Snap for 6198741 from 66c62ceb97ac937df0463173e342734a4aee332e to sdk-release

Change-Id: I5b5c92439e8af564116b26845681aba97d5a603f
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..fbfa7d1
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+**/*.pyc
diff --git a/Android.bp b/Android.bp
index 097d80a..fca5616 100644
--- a/Android.bp
+++ b/Android.bp
@@ -91,6 +91,79 @@
 }
 
 genrule {
+  name: "aftltool_py",
+  srcs: ["aftltool",],
+  out: ["aftltool.py"],
+  cmd: "cp $(in) $(out)",
+}
+
+python_library_host {
+    name: "aftl_proto",
+    srcs: [
+        "proto/**/*.py",
+    ],
+    version: {
+        py2: {
+            enabled: true,
+            // This is needs to be false due to b/146057182#comment5.
+            embedded_launcher: false,
+        },
+        py3: {
+            enabled: false,
+        },
+    },
+}
+
+python_binary_host {
+    name: "aftltool",
+    srcs: [
+        ":aftltool_py",
+        ":avbtool_py",
+    ],
+    libs: [
+        "aftl_proto",
+    ],
+    main: "aftltool.py",
+    required: ["fec"],
+    version: {
+        py2: {
+            enabled: true,
+            embedded_launcher: false,
+        },
+        py3: {
+            enabled: false,
+        },
+    },
+}
+
+python_test_host {
+    name: "aftltool_test",
+    main: "aftltool_test.py",
+    srcs: [
+        ":aftltool_py",
+        ":avbtool_py",
+        "aftltool_test.py",
+    ],
+    libs: [
+        "aftl_proto",
+    ],
+    data: [
+        "test/data/testkey_rsa4096.pem",
+    ],
+    test_suites: ["general-tests"],
+    version: {
+        py2: {
+            enabled: true,
+            // This is needs to be false due to b/146057182#comment5.
+            embedded_launcher: false,
+        },
+        py3: {
+            enabled: false,
+        },
+    },
+}
+
+genrule {
   name: "avbtool_py",
   srcs: ["avbtool",],
   out: ["avbtool.py"],
@@ -175,6 +248,22 @@
 }
 
 cc_library_host_static {
+    name: "libavb_aftl_host",
+    defaults: [
+        "avb_defaults",
+        "avb_sources"],
+    header_libs: ["avb_headers"],
+    export_header_lib_headers: ["avb_headers"],
+    cflags: [
+        "-fno-stack-protector",
+    ],
+    srcs: [
+      "libavb_aftl/avb_aftl_validate.c",
+      "libavb_aftl/avb_aftl_util.c",
+    ],
+}
+
+cc_library_host_static {
     name: "libavb_atx_host",
     defaults: ["avb_defaults"],
     header_libs: [
@@ -225,6 +314,7 @@
     static_libs: [
         "libavb",
         "libavb_ab_host",
+        "libavb_aftl_host",
         "libavb_atx_host",
         "libavb_things_example",
         "libgmock_host",
@@ -241,6 +331,7 @@
     ],
     srcs: [
         "test/avb_ab_flow_unittest.cc",
+        "test/avb_aftl_validate_unittest.cc",
         "test/avb_atx_validate_unittest.cc",
         "test/avb_atx_slot_verify_unittest.cc",
         "test/avb_slot_verify_unittest.cc",
diff --git a/OWNERS b/OWNERS
index b93049c..0883d94 100644
--- a/OWNERS
+++ b/OWNERS
@@ -2,3 +2,5 @@
 samitolvanen@google.com
 zeuthen@google.com
 dkrahn@google.com
+danielaustin@google.com
+jpm@google.com
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index 61948d2..5789f69 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -1,5 +1,6 @@
 [Builtin Hooks]
 clang_format = true
+pylint = true
 
 [Builtin Hooks Options]
 clang_format = --commit ${PREUPLOAD_COMMIT} --style file --extensions c,h,cc
diff --git a/TEST_MAPPING b/TEST_MAPPING
index a3ede88..b5d800a 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -3,6 +3,10 @@
     {
       "name": "libavb_host_unittest",
       "host": true
+    },
+    {
+      "name": "aftltool_test",
+      "host": true
     }
   ]
 }
diff --git a/aftl_integration_test.py b/aftl_integration_test.py
new file mode 100755
index 0000000..396b56c
--- /dev/null
+++ b/aftl_integration_test.py
@@ -0,0 +1,171 @@
+#!/usr/bin/env python
+
+# Copyright 2019, The Android Open Source Project
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation
+# files (the "Software"), to deal in the Software without
+# restriction, including without limitation the rights to use, copy,
+# modify, merge, publish, distribute, sublicense, and/or sell copies
+# of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+#
+"""Integration tests for avbtool with AFTL.
+
+The test cases directly interact with a transparency log. However,
+before using this script the following environment variables
+need to be set:
+
+  AFTL_HOST: host:port of the transparency log to test with.
+  AFTL_PUBKEY: Transparency log public key in PEM format.
+  AFTL_VBMETA_IMAGE: VBMeta image that should be used for submission to AFTL.
+  AFTL_MANUFACTURER_KEY: Manufacturer signing key used to sign submissions
+      to the transparency log in PEM format.
+"""
+
+import os
+import unittest
+
+import aftltool
+import avbtool
+
+
+class AFTLIntegrationTest(unittest.TestCase):
+  """Test suite for testing aftltool with a AFTL."""
+
+  def setUp(self):
+    """Sets up the test bed for the unit tests."""
+    super(AFTLIntegrationTest, self).setUp()
+    self.aftltool = aftltool.Aftl()
+    self.output_filename = 'vbmeta_icp.img'
+
+    self.aftl_host = os.environ.get('AFTL_HOST')
+    self.aftl_pubkey = os.environ.get('AFTL_PUBKEY')
+    self.vbmeta_image = os.environ.get('AFTL_VBMETA_IMAGE')
+    self.manufacturer_key = os.environ.get('AFTL_MANUFACTURER_KEY')
+
+    if (not self.aftl_host or not self.aftl_pubkey or not self.vbmeta_image
+        or not self.manufacturer_key):
+      self.fail('Environment variables not correctly set up. See description of'
+                ' this test case for details')
+
+    self.make_icp_default_params = {
+        'vbmeta_image_path': self.vbmeta_image,
+        'output': None,
+        'signing_helper': None,
+        'signing_helper_with_files': None,
+        'version_incremental': '1',
+        'transparency_log_servers': [self.aftl_host],
+        'transparency_log_pub_keys': [self.aftl_pubkey],
+        'manufacturer_key': self.manufacturer_key,
+        'padding_size': 0
+    }
+
+  def tearDown(self):
+    """Tears down the test bed for the unit tests."""
+    try:
+      os.remove(self.output_filename)
+    except IOError:
+      pass
+    super(AFTLIntegrationTest, self).tearDown()
+
+  def _read_icp_from_vbmeta_blob(self):
+    """Reads the ICP from the output file.
+
+    Returns:
+      AftlDescriptor for the ICP included in the given vbmeta image.
+    """
+    image = avbtool.ImageHandler(self.output_filename)
+
+    # pylint: disable=protected-access
+    (footer, header, _, _) = self.aftltool._parse_image(image)
+    offset = 0
+    if footer:
+      offset = footer.vbmeta_offset
+    image.seek(offset)
+    vbmeta_blob = image.read(header.SIZE +
+                             header.authentication_data_block_size +
+                             header.auxiliary_data_block_size)
+    image.seek(offset + len(vbmeta_blob))
+    # TODO(jpm): Fix AftlDescriptor so that the length of it can be derived
+    # without having to read the whole descriptor.
+    icp_bytes = image.read(100000)
+    self.assertGreater(len(icp_bytes), 0)
+
+    icp_blob = aftltool.AftlDescriptor(icp_bytes)
+    self.assertTrue(icp_blob.is_valid())
+    return icp_blob
+
+  def _make_icp_from_vbmeta(self):
+    """Submits vbmeta to AFTL and fetches inclusion proofs.
+
+    Returns:
+      True if make_icp_from_vbmeta command succeeds; otherwise False.
+    """
+    with open(self.output_filename, 'wb') as output_file:
+      self.make_icp_default_params['output'] = output_file
+      result = self.aftltool.make_icp_from_vbmeta(
+          **self.make_icp_default_params)
+    return result
+
+  def test_make_icp_with_one_transparency_log(self):
+    """Tests integration of aftltool with one AFTL."""
+    # Submits vbmeta to AFTL and fetches ICP.
+    result = self._make_icp_from_vbmeta()
+    self.assertTrue(result)
+
+    # Reads back the vbmeta image with the ICP.
+    icp_blob = self._read_icp_from_vbmeta_blob()
+
+    # Checks ICP proof blob for correctness.
+    icp_header = icp_blob.icp_header
+    self.assertIsNotNone(icp_header)
+    self.assertEqual(icp_header.magic, 'AFTL')
+    self.assertEqual(icp_header.icp_count, 1)
+
+    self.assertEqual(len(icp_blob.icp_entries), 1)
+    for icp in icp_blob.icp_entries:
+      self.assertEqual(icp.log_url, self.aftl_host)
+      self.assertTrue(icp.verify_icp(self.aftl_pubkey))
+
+  def test_make_icp_with_two_transparency_log(self):
+    """Tests integration of aftltool with two AFTLs."""
+    # Reconfigures default parameters with two transparency logs.
+    self.make_icp_default_params['transparency_log_servers'] = [
+        self.aftl_host, self.aftl_host]
+    self.make_icp_default_params['transparency_log_pub_keys'] = [
+        self.aftl_pubkey, self.aftl_pubkey]
+
+    # Submits vbmeta to two AFTLs and fetches their ICPs.
+    result = self._make_icp_from_vbmeta()
+    self.assertTrue(result)
+
+    # Reads back the vbmeta image with the ICP.
+    icp_blob = self._read_icp_from_vbmeta_blob()
+
+    # Checks ICP proof blob for correctness.
+    icp_header = icp_blob.icp_header
+    self.assertIsNotNone(icp_header)
+    self.assertEqual(icp_header.magic, 'AFTL')
+    self.assertEqual(icp_header.icp_count, 2)
+
+    self.assertEqual(len(icp_blob.icp_entries), 2)
+    for icp in icp_blob.icp_entries:
+      self.assertEqual(icp.log_url, self.aftl_host)
+      self.assertTrue(icp.verify_icp(self.aftl_pubkey))
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/aftltool b/aftltool
new file mode 100755
index 0000000..9998836
--- /dev/null
+++ b/aftltool
@@ -0,0 +1,1572 @@
+#!/usr/bin/env python
+
+# Copyright 2020, The Android Open Source Project
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation
+# files (the "Software"), to deal in the Software without
+# restriction, including without limitation the rights to use, copy,
+# modify, merge, publish, distribute, sublicense, and/or sell copies
+# of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+#
+"""Command-line tool for AFTL support for Android Verified Boot images."""
+
+from __future__ import division
+
+import argparse
+import binascii
+import hashlib
+import multiprocessing
+import os
+import Queue  # pylint: disable=bad-python3-import
+import struct
+import subprocess
+import sys
+import tempfile
+import time
+
+
+import avbtool
+import proto.aftl_pb2
+import proto.api_pb2
+import proto.crypto.sigpb
+
+# Android Firmware Transparency Log Data Structures
+
+
+class AftlError(Exception):
+  """Application-specific errors.
+
+  These errors represent issues for which a stack-trace should not be
+  presented.
+
+  Attributes:
+    message: Error message.
+  """
+
+  def __init__(self, message):
+    Exception.__init__(self, message)
+
+
+def rsa_key_read_pem_bytes(key_path):
+  """Reads the bytes out of the passed in PEM file.
+
+  Arguments:
+    key_path: A string containing the path to the PEM file.
+
+  Returns:
+    A bytearray containing the DER encoded bytes in the PEM file.
+
+  Raises:
+    AftlError: If openssl cannot decode the PEM file.
+  """
+  # Use openssl to decode the PEM file.
+  args = ['openssl', 'rsa', '-in', key_path, '-pubout', '-outform', 'DER']
+  p = subprocess.Popen(args,
+                       stdin=subprocess.PIPE,
+                       stdout=subprocess.PIPE,
+                       stderr=subprocess.PIPE)
+  (pout, perr) = p.communicate()
+  retcode = p.wait()
+  if retcode != 0:
+    raise AftlError('Error decoding: {}'.format(perr))
+  return bytearray(pout)
+
+
+def check_signature(log_root, log_root_sig,
+                    transparency_log_pub_key):
+  """Validates the signature provided by the transparency log.
+
+  Arguments:
+    log_root: The transparency log_root data structure.
+    log_root_sig: The signature of the transparency log_root data structure.
+    transparency_log_pub_key: The file path to the transparency log public key.
+
+  Returns:
+    True if the signature check passes, otherwise False.
+  """
+
+  logsig_tmp = tempfile.NamedTemporaryFile()
+  logsig_tmp.write(log_root_sig)
+  logsig_tmp.flush()
+  logroot_tmp = tempfile.NamedTemporaryFile()
+  logroot_tmp.write(log_root)
+  logroot_tmp.flush()
+
+  p = subprocess.Popen(['openssl', 'dgst', '-sha256', '-verify',
+                        transparency_log_pub_key,
+                        '-signature', logsig_tmp.name, logroot_tmp.name],
+                       stdin=subprocess.PIPE,
+                       stdout=subprocess.PIPE,
+                       stderr=subprocess.PIPE)
+
+  (_, openssl_err) = p.communicate()
+  retcode = p.wait()
+  if not retcode:
+    return True
+  sys.stderr.write('Error validating log_root signature with openssl {}'.
+                   format(openssl_err))
+  return False
+
+
+# AFTL Merkle Tree Functionality
+def rfc6962_hash_leaf(leaf):
+  """RFC6962 hashing function for hashing leaves of a Merkle tree.
+
+  Arguments:
+    leaf: A bytearray containing the Merkle tree leaf to be hashed.
+
+  Returns:
+    A bytearray containing the RFC6962 SHA256 hash of the leaf.
+  """
+  hasher = hashlib.sha256()
+  # RFC6962 states a '0' byte should be prepended to the data.
+  # This is done in conjunction with the '1' byte for non-leaf
+  # nodes for 2nd preimage attack resistance.
+  hasher.update(b'\x00')
+  hasher.update(leaf)
+  return hasher.digest()
+
+
+def rfc6962_hash_children(l, r):
+  """Calculates the inner Merkle tree node hash of child nodes l and r.
+
+  Arguments:
+    l: A bytearray containing the left child node to be hashed.
+    r: A bytearray containing the right child node to be hashed.
+
+  Returns:
+    A bytearray containing the RFC6962 SHA256 hash of 1|l|r.
+  """
+  hasher = hashlib.sha256()
+  # RFC6962 states a '1' byte should be prepended to the concatenated data.
+  # This is done in conjunction with the '0' byte for leaf
+  # nodes for 2nd preimage attack resistance.
+  hasher.update(b'\x01')
+  hasher.update(l)
+  hasher.update(r)
+  return hasher.digest()
+
+
+def chain_border_right(seed, proof):
+  """Computes a subtree hash along the left-side tree border.
+
+  Arguments:
+    seed: A bytearray containing the starting hash.
+    proof: A list of bytearrays representing the hashes in the inclusion proof.
+
+  Returns:
+    A bytearray containing the left-side subtree hash.
+  """
+  for h in proof:
+    seed = rfc6962_hash_children(h, seed)
+  return seed
+
+
+def chain_inner(seed, proof, leaf_index):
+  """Computes a subtree hash on or below the tree's right border.
+
+  Arguments:
+    seed: A bytearray containing the starting hash.
+    proof: A list of bytearrays representing the hashes in the inclusion proof.
+    leaf_index: The current leaf index.
+
+  Returns:
+    A bytearray containing the subtree hash.
+  """
+  for i, h in enumerate(proof):
+    if leaf_index >> i & 1 == 0:
+      seed = rfc6962_hash_children(seed, h)
+    else:
+      seed = rfc6962_hash_children(h, seed)
+  return seed
+
+
+def root_from_icp(leaf_index, tree_size, proof, leaf_hash):
+  """Calculates the expected Merkle tree root hash.
+
+  Arguments:
+    leaf_index: The current leaf index.
+    tree_size: The number of nodes in the Merkle tree.
+    proof: A list of bytearrays containing the inclusion proof.
+    leaf_hash: A bytearray containing the initial leaf hash.
+
+  Returns:
+    A bytearray containing the calculated Merkle tree root hash.
+
+  Raises:
+    AftlError: If invalid parameters are passed in.
+  """
+  if leaf_index < 0:
+    raise AftlError('Invalid leaf_index value: {}'.format(leaf_index))
+  if tree_size < 0:
+    raise AftlError('Invalid tree_size value: {}'.format(tree_size))
+  if leaf_index >= tree_size:
+    err_str = 'leaf_index cannot be equal or larger than tree_size: {}, {}'
+    raise AftlError(err_str.format(leaf_index, tree_size))
+  if proof is None:
+    raise AftlError('Inclusion proof not provided.')
+  if leaf_hash is None:
+    raise AftlError('No leaf hash provided.')
+  # Calculate the point to split the proof into two parts.
+  # The split is where the paths to leaves diverge.
+  inner = (leaf_index ^ (tree_size - 1)).bit_length()
+  result = chain_inner(leaf_hash, proof[:inner], leaf_index)
+  result = chain_border_right(result, proof[inner:])
+  return result
+
+
+class AftlIcpHeader(object):
+  """A class for the transparency log inclusion proof header.
+
+  Attributes:
+    magic: Magic for identifying the ICP header.
+    required_icp_version_major: The major version of AVB that wrote the entry.
+    required_icp_version_minor: The minor version of AVB that wrote the entry.
+    aftl_descriptor_size: Total size of the header's AftlDescriptor.
+    icp_count: Number of inclusion proofs represented in this structure.
+  """
+
+  SIZE = 18  # The size of the structure, in bytes
+  MAGIC = 'AFTL'
+  FORMAT_STRING = ('!4s2L'  # magic, major & minor version
+                   'L'      # descriptor size
+                   'H')     # number of inclusion proof entries
+
+  def __init__(self, data=None):
+    """Initializes a new transparency header object.
+
+    Arguments:
+      data: If not None, must be a bytearray of size |SIZE|.
+
+    Raises:
+      AftlError: If invalid structure for AftlIcpHeader.
+    """
+    assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
+
+    if data:
+      (self.magic, self.required_icp_version_major,
+       self.required_icp_version_minor, self.aftl_descriptor_size,
+       self.icp_count) = struct.unpack(self.FORMAT_STRING, data)
+    else:
+      self.magic = self.MAGIC
+      self.required_icp_version_major = avbtool.AVB_VERSION_MAJOR
+      self.required_icp_version_minor = avbtool.AVB_VERSION_MINOR
+      self.aftl_descriptor_size = self.SIZE
+      self.icp_count = 0
+    if not self.is_valid():
+      raise AftlError('Invalid structure for AftlIcpHeader')
+
+  def save(self, output):
+    """Serializes the transparency header |SIZE| to disk.
+
+    Arguments:
+      output: The object to write the header to.
+
+    Raises:
+      AftlError: If invalid structure for AftlIcpHeader.
+    """
+    output.write(self.encode())
+
+  def encode(self):
+    """Serializes the header |SIZE| to a bytearray().
+
+    Returns:
+      A bytearray() with the encoded header.
+
+    Raises:
+      AftlError: If invalid structure for AftlIcpHeader.
+    """
+    if not self.is_valid():
+      raise AftlError('Invalid structure for AftlIcpHeader')
+    return struct.pack(self.FORMAT_STRING, self.magic,
+                       self.required_icp_version_major,
+                       self.required_icp_version_minor,
+                       self.aftl_descriptor_size,
+                       self.icp_count)
+
+  def is_valid(self):
+    """Ensures that values in an AftlIcpHeader structure are sane.
+
+    Returns:
+      True if the values in the AftlIcpHeader are sane, False otherwise.
+    """
+    if self.magic != AftlIcpHeader.MAGIC:
+      sys.stderr.write(
+          'ICP Header: magic value mismatch: {}\n'.format(self.magic))
+      return False
+
+    if self.required_icp_version_major > avbtool.AVB_VERSION_MAJOR:
+      sys.stderr.write('ICP header: major version mismatch: {}\n'.format(
+          self.required_icp_version_major))
+      return False
+
+    if self.required_icp_version_minor > avbtool.AVB_VERSION_MINOR:
+      sys.stderr.write('ICP header: minor version mismatch: {}\n'.format(
+          self.required_icp_version_minor))
+      return False
+
+    if self.aftl_descriptor_size < self.SIZE:
+      sys.stderr.write('ICP Header: Invalid descriptor size: {}\n'.format(
+          self.aftl_descriptor_size))
+      return False
+
+    if self.icp_count < 0 or self.icp_count > 65535:
+      sys.stderr.write(
+          'ICP header: ICP entry count out of range: {}\n'.format(
+              self.icp_count))
+      return False
+    return True
+
+  def print_desc(self, o):
+    """Print the descriptor.
+
+    Arguments:
+      o: The object to write the output to.
+    """
+    o.write('    Major version:      {}\n'.format(
+        self.required_icp_version_major))
+    o.write('    Minor version:      {}\n'.format(
+        self.required_icp_version_minor))
+    o.write('    Descriptor size:    {}\n'.format(
+        self.aftl_descriptor_size))
+    o.write('    ICP entries count:  {}\n'.format(
+        self.icp_count))
+
+class AftlIcpEntry(object):
+  """A class for the transparency log inclusion proof entries.
+
+  The data that represents each of the components of the ICP entry are stored
+  immediately following the ICP entry header. The format is log_url,
+  SignedLogRoot, and inclusion proof hashes.
+
+  Attributes:
+    log_url_size: Length of the string representing the transparency log URL.
+    leaf_index: Leaf index in the transparency log representing this entry.
+    log_root_descriptor_size: Size of the transparency log's SignedLogRoot.
+    fw_info_leaf_size: Size of the FirmwareInfo leaf passed to the log.
+    log_root_sig_size: Size in bytes of the log_root_signature
+    proof_hash_count: Number of hashes comprising the inclusion proof.
+    inc_proof_size: The total size of the inclusion proof, in bytes.
+    log_url: The URL for the transparency log that generated this inclusion
+        proof.
+    log_root_descriptor: The data comprising the signed tree head structure.
+    fw_info_leaf: The data comprising the FirmwareInfo leaf.
+    log_root_signature: The data comprising the log root signature.
+    proofs: The hashes comprising the inclusion proof.
+
+  """
+  SIZE = 27  # The size of the structure, in bytes
+  FORMAT_STRING = ('!L'   # transparency log server url size
+                   'Q'    # leaf index
+                   'L'    # log root descriptor size
+                   'L'    # firmware info leaf size
+                   'H'    # log root signature size
+                   'B'    # number of hashes in the inclusion proof
+                   'L')   # size of the inclusion proof in bytes
+  # These are used to capture the log_url, log_root_descriptor,
+  # fw_info leaf, log root signature, and the proofs elements for the
+  # encode & save functions.
+
+  def __init__(self, data=None):
+    """Initializes a new ICP entry object.
+
+    Arguments:
+      data: If not None, must be a bytearray of size >= |SIZE|.
+
+    Raises:
+      AftlError: If data does not represent a well-formed AftlIcpEntry.
+    """
+    # Assert the header structure is of a sane size.
+    assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
+
+    if data:
+      # Deserialize the header from the data descriptor.
+      (self.log_url_size, self.leaf_index, self.log_root_descriptor_size,
+       self.fw_info_leaf_size, self.log_root_sig_size, self.proof_hash_count,
+       self.inc_proof_size) = struct.unpack(
+           self.FORMAT_STRING, data[0:self.SIZE])
+
+      # Deserialize ICP entry components from the data descriptor.
+      expected_format_string = '{}s{}s{}s{}s{}s'.format(
+          self.log_url_size,
+          self.log_root_descriptor_size,
+          self.fw_info_leaf_size,
+          self.log_root_sig_size,
+          self.inc_proof_size)
+
+      (self.log_url, log_root_descriptor_bytes, self.fw_info_leaf,
+       self.log_root_signature, proof_bytes) = struct.unpack(
+           expected_format_string, data[self.SIZE:self.get_expected_size()])
+      self.log_root_descriptor = TrillianLogRootDescriptor(
+          log_root_descriptor_bytes)
+      self.proofs = []
+      if self.proof_hash_count > 0:
+        proof_idx = 0
+        hash_size = self.inc_proof_size // self.proof_hash_count
+        for _ in range(self.proof_hash_count):
+          proof = proof_bytes[proof_idx:(proof_idx+hash_size)]
+          self.proofs.append(proof)
+          proof_idx += hash_size
+    else:
+      self.log_url_size = 0
+      self.leaf_index = 0
+      self.fw_info_leaf_size = 0
+      self.log_root_sig_size = 0
+      self.proof_hash_count = 0
+      self.inc_proof_size = 0
+      self.log_url = ''
+      self.log_root_descriptor = TrillianLogRootDescriptor()
+      log_root_descriptor_size = self.log_root_descriptor.get_expected_size()
+      self.log_root_descriptor_size = log_root_descriptor_size
+      self.fw_info_leaf = ''
+      self.log_root_signature = ''
+      self.proofs = []
+    if not self.is_valid():
+      raise AftlError('Invalid structure for AftlIcpEntry')
+
+  def set_log_url(self, log_url):
+    """Sets the log_url and log_url_size elements in the AftlIcpEntry.
+
+    Arguments:
+      log_url: The string representing the transparency log URL.
+    """
+    self.log_url = log_url
+    self.log_url_size = len(log_url)
+
+  def set_log_root_descriptor(self, log_root_descriptor):
+    """Sets signed_root_descriptor and signed_root_descriptor_size.
+
+    Arguments:
+      log_root_descriptor: A TrillianLogRootDescriptor containing the
+      log_root for the transparency log.
+    """
+    if not isinstance(log_root_descriptor, TrillianLogRootDescriptor):
+      raise AftlError('Invalid data type passed to set_log_root_descriptor: '
+                      'Received {}.\n'.format(type(log_root_descriptor)))
+    self.log_root_descriptor = log_root_descriptor
+    self.log_root_descriptor_size = log_root_descriptor.get_expected_size()
+
+  def set_proofs(self, proofs):
+    """Sets the proof_hash_count, proofs, and inc_proof_size.
+
+    Arguments:
+      proofs: A bytearray of concatenated hashes comprising the inclusion proof.
+    """
+    self.proof_hash_count = 0
+    self.proofs = proofs
+    inc_proof_size = 0
+    for proof in proofs:
+      inc_proof_size += len(proof)
+      self.proof_hash_count += 1
+    self.inc_proof_size = inc_proof_size
+
+  def verify_icp(self, transparency_log_pub_key):
+    """Verifies the contained inclusion proof given the public log key.
+
+    Arguments:
+      transparency_log_pub_key: The trusted public key for the log.
+
+    Returns:
+      True if the calculated signature matches AftlIcpEntry's. False otherwise.
+    """
+    leaf_hash = rfc6962_hash_leaf(self.fw_info_leaf)
+    calc_root = root_from_icp(self.leaf_index,
+                              self.log_root_descriptor.tree_size,
+                              self.proofs,
+                              leaf_hash)
+    if ((calc_root == self.log_root_descriptor.root_hash) and
+        check_signature(
+            self.log_root_descriptor.log_root,
+            self.log_root_signature,
+            transparency_log_pub_key)):
+      return True
+    return False
+
+  def save(self, output):
+    """Serializes the transparency header |SIZE| and data to disk.
+
+    Arguments:
+      output: The object to write the header to.
+
+    Raises:
+      AftlError: If invalid entry structure.
+    """
+    output.write(self.encode())
+
+  def encode(self):
+    """Serializes the header |SIZE| and data to a bytearray().
+
+    Returns:
+      A bytearray() with the encoded header.
+
+    Raises:
+      AftlError: If invalid entry structure.
+    """
+    proof_bytes = bytearray()
+    if not self.is_valid():
+      raise AftlError('Invalid AftlIcpEntry structure')
+
+    expected_format_string = '{}{}s{}s{}s{}s{}s'.format(
+        self.FORMAT_STRING,
+        self.log_url_size,
+        self.log_root_descriptor_size,
+        self.fw_info_leaf_size,
+        self.log_root_sig_size,
+        self.inc_proof_size)
+
+    for proof in self.proofs:
+      proof_bytes.extend(proof)
+
+    return struct.pack(expected_format_string,
+                       self.log_url_size, self.leaf_index,
+                       self.log_root_descriptor_size, self.fw_info_leaf_size,
+                       self.log_root_sig_size, self.proof_hash_count,
+                       self.inc_proof_size, self.log_url,
+                       self.log_root_descriptor.encode(),
+                       str(self.fw_info_leaf),
+                       str(self.log_root_signature),
+                       str(proof_bytes))
+
+  # TODO(danielaustin): Add unit test.
+  def translate_response(self, transparency_log, afi_response):
+    """Translates an AddFirmwareInfoResponse object to an AftlIcpEntry.
+
+    Arguments:
+      transparency_log: String representing the transparency log URL.
+      afi_response: The AddFirmwareResponse object to translate.
+    """
+    self.set_log_url(transparency_log)
+    self.leaf_index = afi_response.fw_info_proof.proof.leaf_index
+    self.log_root_descriptor = TrillianLogRootDescriptor(
+        afi_response.fw_info_proof.sth.log_root)
+    self.log_root_signature = afi_response.fw_info_proof.sth.log_root_signature
+    self.log_root_sig_size = len(self.log_root_signature)
+    self.log_root_descriptor_size = self.log_root_descriptor.get_expected_size()
+
+    proof_hashes = afi_response.fw_info_proof.proof.hashes
+    self.set_proofs(proof_hashes)
+
+  def get_expected_size(self):
+    """Gets the expected size of the full entry out of the header.
+
+    Returns:
+      The expected size of the AftlIcpEntry from the header.
+    """
+    return (self.SIZE + self.log_url_size + self.log_root_descriptor_size +
+            self.fw_info_leaf_size + self.log_root_sig_size +
+            self.inc_proof_size)
+
+  def is_valid(self):
+    """Ensures that values in an AftlIcpEntry structure are sane.
+
+    Returns:
+      True if the values in the AftlIcpEntry are sane, False otherwise.
+    """
+    if ((self.log_url and self.log_url_size != len(self.log_url))
+        or (not self.log_url and self.log_url_size != 0)):
+      sys.stderr.write('ICP entry: invalid URL size: {}\n'
+                       .format(self.log_url_size))
+      return False
+
+    if self.leaf_index < 0:
+      sys.stderr.write('ICP entry: leaf index out of range: '
+                       '{}\n'.format(self.leaf_index))
+      return False
+
+    if (not self.log_root_descriptor or
+        not self.log_root_descriptor.is_valid()):
+      sys.stderr.write('ICP entry: invalid TrillianLogRootDescriptor\n')
+      return False
+
+    if (self.log_root_descriptor_size !=
+        self.log_root_descriptor.get_expected_size()):
+      sys.stderr.write('ICP entry: invalid signed root descriptor size: '
+                       '{}, should be {}\n'.format(
+                           self.log_root_descriptor_size,
+                           self.log_root_descriptor.get_expected_size()))
+      return False
+
+    if ((self.fw_info_leaf and self.fw_info_leaf_size != len(self.fw_info_leaf))
+        or (not self.fw_info_leaf and self.fw_info_leaf_size != 0)):
+      sys.stderr.write('ICP entry: invalid FirmwareInfo size: {}\n'
+                       .format(self.fw_info_leaf_size))
+      return False
+
+    if self.proof_hash_count < 0:
+      sys.stderr.write('ICP entry: invalid proof count: {}\n'.format(
+          self.proof_hash_count))
+      return False
+
+    inc_proof_size = 0
+    if self.proofs:
+      for proof in self.proofs:
+        inc_proof_size += len(proof)
+      if self.inc_proof_size != inc_proof_size:
+        sys.stderr.write('ICP entry: invalid transparency log proof size: ')
+        sys.stderr.write('{}, calculated {}\n'.format(self.inc_proof_size,
+                                                      inc_proof_size))
+        return False
+    elif self.inc_proof_size != 0:
+      sys.stderr.write('ICP entry: invalid transparency log proof size '
+                       '(should be 0): {}'.format(self.inc_proof_size))
+      return False
+
+    return True
+
+  def print_desc(self, o):
+    """Print the descriptor.
+
+    Arguments:
+      o: The object to write the output to.
+    """
+    o.write('    Transparency Log:   {}\n'.format(self.log_url))
+    o.write('    Leaf index:         {}\n'.format(self.leaf_index))
+    o.write('    ICP hashes:         ')
+    for i, proof_hash in enumerate(self.proofs):
+      if i != 0:
+        o.write(' ' * 24)
+      o.write('{}\n'.format(binascii.hexlify(proof_hash)))
+
+
+class TrillianLogRootDescriptor(object):
+  """A class representing the Trillian log_root descriptor.
+
+  Taken from Trillian definitions:
+  https://github.com/google/trillian/blob/master/trillian.proto#L255
+
+  Attributes:
+    version: The version number of the descriptor. Currently only version=1 is
+        supported.
+    tree_size: The size of the tree.
+    root_hash_size: The size of the root hash in bytes. Valid values are between
+        0 and 128.
+    root_hash: The root hash as bytearray().
+    timestamp: The timestamp in nanoseconds.
+    revision: The revision number as long.
+    metadata_size: The size of the metadata in bytes. Valid values are between
+        0 and 65535.
+    metadata: The metadata as bytearray().
+  """
+  FORMAT_STRING_PART_1 = ('!H'  # version
+                          'Q'   # tree_size
+                          'B'   # root_hash_size
+                         )
+
+  FORMAT_STRING_PART_2 = ('!Q'  # timestamp
+                          'Q'   # revision
+                          'H'   # metadata_size
+                         )
+
+  def __init__(self, data=None):
+    """Initializes a new TrillianLogRoot descriptor."""
+    if data:
+      # Parses first part of the log_root descriptor.
+      data_length = struct.calcsize(self.FORMAT_STRING_PART_1)
+      (self.version, self.tree_size, self.root_hash_size) = struct.unpack(
+          self.FORMAT_STRING_PART_1, data[0:data_length])
+      data = data[data_length:]
+
+      # Parses the root_hash bytes if the size indicates existance.
+      if self.root_hash_size > 0:
+        self.root_hash = data[0:self.root_hash_size]
+        data = data[self.root_hash_size:]
+      else:
+        self.root_hash = bytearray()
+
+      # Parses second part of the log_root descriptor.
+      data_length = struct.calcsize(self.FORMAT_STRING_PART_2)
+      (self.timestamp, self.revision, self.metadata_size) = struct.unpack(
+          self.FORMAT_STRING_PART_2, data[0:data_length])
+      data = data[data_length:]
+
+      # Parses the metadata if the size indicates existance.
+      if self.metadata_size > 0:
+        self.metadata = data[0:self.metadata_size]
+      else:
+        self.metadata = bytearray()
+    else:
+      self.version = 1
+      self.tree_size = 0
+      self.root_hash_size = 0
+      self.root_hash = bytearray()
+      self.timestamp = 0
+      self.revision = 0
+      self.metadata_size = 0
+      self.metadata = bytearray()
+
+    if not self.is_valid():
+      raise AftlError('Invalid structure for TrillianLogRootDescriptor.')
+
+  def get_expected_size(self):
+    """Calculates the expected size of the TrillianLogRootDescriptor.
+
+    Returns:
+      The expected size of the TrillianLogRootDescriptor.
+    """
+    return (struct.calcsize(self.FORMAT_STRING_PART_1) + self.root_hash_size +
+            struct.calcsize(self.FORMAT_STRING_PART_2) + self.metadata_size)
+
+  def encode(self):
+    """Serializes the TrillianLogDescriptor to a bytearray().
+
+    Returns:
+      A bytearray() with the encoded header.
+
+    Raises:
+      AftlError: If invalid entry structure.
+    """
+    if not self.is_valid():
+      raise AftlError('Invalid structure for TrillianLogRootDescriptor.')
+
+    expected_format_string = '{}{}s{}{}s'.format(
+        self.FORMAT_STRING_PART_1,
+        self.root_hash_size,
+        self.FORMAT_STRING_PART_2[1:],
+        self.metadata_size)
+
+    return struct.pack(expected_format_string,
+                       self.version, self.tree_size, self.root_hash_size,
+                       str(self.root_hash), self.timestamp, self.revision,
+                       self.metadata_size, str(self.metadata))
+
+  def is_valid(self):
+    """Ensures that values in the descritor are sane.
+
+    Returns:
+      True if the values are sane; otherwise False.
+    """
+    cls = self.__class__.__name__
+    if self.version != 1:
+      sys.stderr.write('{}: Bad version value {}.'.format(cls, self.version))
+      return False
+    if self.tree_size < 0:
+      sys.stderr.write('{}: Bad tree_size value {}.'.format(cls,
+                                                            self.tree_size))
+      return False
+    if self.root_hash_size < 0 or self.root_hash_size > 128:
+      sys.stderr.write('{}: Bad root_hash_size value {}.'.format(
+          cls, self.root_hash_size))
+      return False
+    if len(self.root_hash) != self.root_hash_size:
+      sys.stderr.write('{}: root_hash_size {} does not match with length of '
+                       'root_hash {}.'.format(cls, self.root_hash_size,
+                                              len(self.root_hash)))
+      return False
+    if self.timestamp < 0:
+      sys.stderr.write('{}: Bad timestamp value {}.'.format(cls,
+                                                            self.timestamp))
+      return False
+    if self.revision < 0:
+      sys.stderr.write('{}: Bad revision value {}.'.format(cls, self.revision))
+      return False
+    if self.metadata_size < 0 or self.metadata_size > 65535:
+      sys.stderr.write('{}: Bad metadatasize value {}.'.format(
+          cls, self.metadata_size))
+      return False
+    if len(self.metadata) != self.metadata_size:
+      sys.stderr.write('{}: metadata_size {} does not match with length of'
+                       'metadata {}'.format(cls, self.metadata_size,
+                                            len(self.metadata)))
+      return False
+    return True
+
+  def print_desc(self, o):
+    """Print the descriptor.
+
+    Arguments:
+      o: The object to write the output to.
+    """
+    i = ' ' * 8
+    o.write('{}Version:          {}\n'.format(i, self.version))
+    o.write('{}Tree size:        {}\n'.format(i, self.tree_size))
+    o.write('{}Root hash size:   {}\n'.format(i, self.root_hash_size))
+    if self.root_hash_size > 0:
+      o.write('{}Root hash:        {}\n'.format(
+          i, binascii.hexlify(self.root_hash)))
+      o.write('{}Timestamp (ns):   {}\n'.format(i, self.timestamp))
+    o.write('{}Revision:         {}\n'.format(i, self.revision))
+    o.write('{}Metadata size:    {}\n'.format(i, self.metadata_size))
+    if self.metadata_size > 0:
+      o.write('{}Metadata:         {}\n'.format(i, self.metadata))
+
+
+class AftlDescriptor(object):
+  """A class for the transparency log inclusion proof descriptor.
+
+  This encapsulates an AFTL ICP section with all information required to
+  validate an inclusion proof.
+
+  Attributes:
+    icp_header: A header for the section.
+    icp_entries: A list of AftlIcpEntry objects representing the inclusion
+        proofs.
+  """
+
+  def __init__(self, data=None):
+    """Initializes a new AftlDescriptor section.
+
+    Arguments:
+      data: If not None, must be a bytearray representing an AftlDescriptor.
+
+    Raises:
+      AftlError: If the data does not represent a well-formed AftlDescriptor.
+    """
+    if data:
+      icp_header_bytes = data[0:AftlIcpHeader.SIZE]
+      self.icp_header = AftlIcpHeader(icp_header_bytes)
+      if not self.icp_header.is_valid():
+        raise AftlError('Invalid ICP header.')
+      icp_count = self.icp_header.icp_count
+
+      # Jump past the header for entry deserialization.
+      icp_index = AftlIcpHeader.SIZE
+      # Validate each entry.
+      self.icp_entries = []
+      # Add_icp_entry updates entries and header, so set header count to
+      # compensate.
+      self.icp_header.icp_count = 0
+      for i in range(icp_count):
+        # Get the entry header from the AftlDescriptor.
+        cur_icp_entry = AftlIcpEntry(data[icp_index:])
+        cur_icp_entry_size = cur_icp_entry.get_expected_size()
+        # Now validate the entry structure.
+        if not cur_icp_entry.is_valid():
+          raise AftlError('Validation of ICP entry {} failed.'.format(i))
+        self.add_icp_entry(cur_icp_entry)
+        icp_index += cur_icp_entry_size
+    else:
+      self.icp_header = AftlIcpHeader()
+      self.icp_entries = []
+    if not self.is_valid():
+      raise AftlError('Malformed AFTLDescriptor')
+
+  def add_icp_entry(self, avb_icp_entry):
+    """Adds a new AftlIcpEntry to the AftlDescriptor, updating fields as needed.
+
+    Arguments:
+      avb_icp_entry: An AftlIcpEntry structure.
+    """
+
+    # Set the next entry field to denote that a new ICP entry will follow.
+    self.icp_entries.append(avb_icp_entry)
+    self.icp_header.icp_count += 1
+
+  def save(self, output):
+    """Serializes the AftlDescriptor to disk.
+
+    Arguments:
+      output: The object to write the descriptor to.
+
+    Raises:
+      AftlError: If invalid descriptor structure.
+    """
+    output.write(self.encode())
+
+  def encode(self):
+    """Serialize the AftlDescriptor to a bytearray().
+
+    Returns:
+      A bytearray() with the encoded header.
+
+    Raises:
+      AftlError: If invalid descriptor structure.
+    """
+    # The header and entries are guaranteed to be valid when encode is called.
+    # Check the entire structure as a whole.
+    if not self.is_valid():
+      raise AftlError('Invalid AftlDescriptor structure.')
+
+    icp_descriptor = bytearray()
+    icp_descriptor.extend(self.icp_header.encode())
+    for icp_entry in self.icp_entries:
+      icp_descriptor.extend(icp_entry.encode())
+    return icp_descriptor
+
+  def is_valid(self):
+    """Ensures that values in the AftlDescriptor are sane.
+
+    Returns:
+      True if the values in the AftlDescriptor are sane, False otherwise.
+    """
+    if not self.icp_header.is_valid():
+      return False
+
+    if self.icp_header.icp_count != len(self.icp_entries):
+      return False
+
+    for icp_entry in self.icp_entries:
+      if not icp_entry.is_valid():
+        return False
+    return True
+
+  def print_desc(self, o):
+    """Print the descriptor.
+
+    Arguments:
+      o: The object to write the output to.
+    """
+    o.write('Android Firmware Transparency Descriptor:\n')
+    o.write('  Header:\n')
+    self.icp_header.print_desc(o)
+    for i, icp_entry in enumerate(self.icp_entries):
+      o.write('  Entry #{}:\n'.format(i + 1))
+      icp_entry.print_desc(o)
+      o.write('    Log Root Descriptor:\n')
+      icp_entry.log_root_descriptor.print_desc(o)
+
+
+class AftlCommunication(object):
+  """Class to abstract the communication layer with the transparency log."""
+
+  def __init__(self, transparency_log):
+    """Initializes the object.
+
+    Arguments:
+      transparency_log: String containing the URL of a transparency log server.
+
+    """
+    self.transparency_log = transparency_log
+
+  def AddFirmwareInfo(self, request):
+    """Calls the AddFirmwareInfo RPC on the AFTL server.
+
+    Arguments:
+      request: A AddFirmwareInfoRequest message.
+
+    Returns:
+      An AddFirmwareInfoReponse message.
+
+    Raises:
+      AftlError: If grpc or the proto modules cannot be loaded, if there is an
+        error communicating with the log.
+    """
+    raise NotImplementedError(
+        'AddFirmwareInfo() needs to be implemented by subclass.')
+
+
+class AftlGrpcCommunication(AftlCommunication):
+  """Class that implements GRPC communication to the AFTL server."""
+
+  def AddFirmwareInfo(self, request):
+    """Calls the AddFirmwareInfo RPC on the AFTL server
+
+    Arguments:
+      request: A AddFirmwareInfoRequest message.
+
+    Returns:
+      An AddFirmwareInfoReponse message.
+
+    Raises:
+      AftlError: If grpc or the proto modules cannot be loaded, if there is an
+        error communicating with the log.
+    """
+    # Import grpc now to avoid global dependencies as it otherwise breakes
+    # running unittest with atest.
+    try:
+      import grpc
+      import proto.api_pb2_grpc
+    except ImportError as e:
+      err_str = 'grpc can be installed with python pip install grpcio.\n'
+      raise AftlError('Failed to import module: ({}).\n{}'.format(e, err_str))
+
+    # Set up the gRPC channel with the transparency log.
+    sys.stdout.write('Preparing to request inclusion proof from {}. This could '
+                     'take ~30 seconds for the process to complete.\n'.format(
+                         self.transparency_log))
+    channel = grpc.insecure_channel(self.transparency_log)
+    stub = proto.api_pb2_grpc.AFTLogStub(channel)
+
+    # Attempt to transmit to the transparency log.
+    sys.stdout.write('ICP is about to be requested from transparency log '
+                     'with domain {}.\n'.format(self.transparency_log))
+    try:
+      # TODO(danielaustin): Set a reasonable timeout deadline here.
+      response = stub.AddFirmwareInfo(request)
+    except grpc.RpcError as e:
+      raise AftlError('Error: grpc failure ({})'.format(e))
+    return response
+
+
+class Aftl(avbtool.Avb):
+  """Business logic for aftltool command-line tool."""
+
+  def info_image_icp(self, image_filename, output):
+    """Implements the 'info_image_icp' command.
+
+    Arguments:
+      image_filename: Image file to get information from.
+      output: Output file to write human-readable information to (file object).
+    """
+    image = avbtool.ImageHandler(image_filename)
+    o = output
+    (footer, header, _, _) = self._parse_image(image)
+
+    offset = 0
+    if footer:
+      offset = footer.vbmeta_offset
+    image.seek(offset +
+               header.SIZE +
+               header.authentication_data_block_size +
+               header.auxiliary_data_block_size)
+
+    # Parse the header out to get the AftlDescriptor size.
+    tmp_header_bytes = image.read(AftlIcpHeader.SIZE)
+    try:
+      tmp_header = AftlIcpHeader(tmp_header_bytes)
+    except AftlError:
+      sys.stderr.write('This image does not contain a valid AftlDescriptor.\n')
+      return
+    # Reset to the beginning of the AftlDescriptor.
+    image.seek(offset +
+               header.SIZE +
+               header.authentication_data_block_size +
+               header.auxiliary_data_block_size)
+    icp_bytes = image.read(tmp_header.aftl_descriptor_size)
+
+    icp_descriptor = AftlDescriptor(icp_bytes)
+    icp_descriptor.print_desc(o)
+
+  def request_inclusion_proof(self, transparency_log, vbmeta_descriptor,
+                              version_inc, manufacturer_key_path,
+                              signing_helper, signing_helper_with_files,
+                              aftl_comms=None):
+    """Packages and sends a request to the specified transparency log.
+
+    Arguments:
+      transparency_log: String containing the URL of a transparency log server.
+      vbmeta_descriptor: A bytearray with the vbmeta descriptor.
+      version_inc: Subcomponent of the build fingerprint.
+      manufacturer_key_path: Path to key used to sign messages sent to the
+        transparency log servers.
+      signing_helper: Program which signs a hash and returns a signature.
+      signing_helper_with_files: Same as signing_helper but uses files instead.
+      aftl_comms: A subclass of the AftlCommunication class. The default is
+        to use AftlGrpcCommunication.
+
+    Returns:
+      An AftlIcpEntry with the inclusion proof for the log entry.
+
+    Raises:
+      AftlError: If grpc or the proto modules cannot be loaded, if there is an
+         error communicating with the log, if the manufacturer_key_path
+         cannot be decoded, or if the log submission cannot be signed.
+    """
+    # Calculate the hash of the vbmeta image.
+    hasher = hashlib.sha256()
+    hasher.update(vbmeta_descriptor)
+    vbmeta_hash = hasher.digest()
+    # Extract the key data from the PEM file.
+    manufacturer_key_data = rsa_key_read_pem_bytes(manufacturer_key_path)
+    # Calculate the hash of the manufacturer key data.
+    hasher = hashlib.sha256()
+    hasher.update(manufacturer_key_data)
+    m_key_hash = hasher.digest()
+    # Create an AddFirmwareInfoRequest protobuf for transmission to the
+    # transparency log.
+    fw_info = proto.aftl_pb2.FirmwareInfo(vbmeta_hash=vbmeta_hash,
+                                          version_incremental=version_inc,
+                                          manufacturer_key_hash=m_key_hash)
+    signed_fw_info = bytearray()
+    # AFTL supports SHA256_RSA4096 for now, more will be available.
+    algorithm_name = 'SHA256_RSA4096'
+    sig_num_bytes = 0
+    alg_padding = ''
+    try:
+      alg = avbtool.ALGORITHMS[algorithm_name]
+      sig_num_bytes = alg.signature_num_bytes
+      alg_padding = alg.padding
+    except KeyError:
+      raise AftlError('Unknown algorithm with name {}'.format(algorithm_name))
+
+    hasher = hashlib.sha256()
+    hasher.update(fw_info.SerializeToString())
+    fw_info_hash = hasher.digest()
+    padding_and_hash = str(bytearray(alg_padding)) + fw_info_hash
+    try:
+      signed_fw_info = avbtool.raw_sign(signing_helper,
+                                        signing_helper_with_files,
+                                        algorithm_name,
+                                        sig_num_bytes,
+                                        manufacturer_key_path,
+                                        padding_and_hash)
+    except avbtool.AvbError as e:
+      raise AftlError('Failed to sign FirmwareInfo with '
+                      '--manufacturer_key: {}'.format(e))
+    fw_info_sig = proto.crypto.sigpb.sigpb_pb2.DigitallySigned(
+        hash_algorithm='SHA256',
+        signature_algorithm='RSA',
+        signature=str(signed_fw_info))
+
+    sfw_info = proto.aftl_pb2.SignedFirmwareInfo(info=fw_info,
+                                                 info_signature=fw_info_sig)
+    request = proto.api_pb2.AddFirmwareInfoRequest(vbmeta=bytes(
+        str(vbmeta_descriptor)), fw_info=sfw_info)
+
+    # Submit signed FirmwareInfo to the server.
+    if not aftl_comms:
+      aftl_comms = AftlGrpcCommunication(transparency_log)
+    response = aftl_comms.AddFirmwareInfo(request)
+
+    # Return an AftlIcpEntry representing this response.
+    icp_entry = AftlIcpEntry()
+    icp_entry.fw_info_leaf = fw_info
+    icp_entry.translate_response(transparency_log, response)
+    return icp_entry
+
+  def make_icp_from_vbmeta(self, vbmeta_image_path, output,
+                           signing_helper, signing_helper_with_files,
+                           version_incremental, transparency_log_servers,
+                           transparency_log_pub_keys, manufacturer_key,
+                           padding_size):
+    """Generates a vbmeta image with inclusion proof given a vbmeta image.
+
+    The descriptor (struct AftlDescriptor) contains the information required to
+    validate an inclusion proof for a specific vbmeta image. It consists
+    of a header (struct AftlIcpHeader) and zero or more entry structures
+    (struct AftlIcpEntry) that contain the vbmeta leaf hash, tree size,
+    root hash, inclusion proof hashes, and the signature for the root hash.
+
+    The vbmeta image, its hash, the version_incremental part of the build
+    fingerprint, and the hash of the manufacturer key are sent to the
+    transparency log, with the message signed by the manufacturer key.
+    An inclusion proof is calculated and returned. This inclusion proof is
+    then packaged in a AftlDescriptor structure. The existing vbmeta data is
+    copied to a new file, appended with the AftlDescriptor data, and written to
+    output. Validation of the inclusion proof does not require
+    communication with the transparency log.
+
+    Arguments:
+      vbmeta_image_path: Path to a vbmeta image file.
+      output: File to write the results to.
+      signing_helper: Program which signs a hash and returns a signature.
+      signing_helper_with_files: Same as signing_helper but uses files instead.
+      version_incremental: A string representing the subcomponent of the
+        build fingerprint used to identify the vbmeta in the transparency log.
+      transparency_log_servers: List of strings containing URLs of transparency
+        log servers where inclusion proofs are requested from.
+      transparency_log_pub_keys: List of paths to PEM files containing trusted
+        public keys that correspond with the transparency_logs. There must be
+        the same number of keys as log servers and they must be in the same
+        order, that is, transparency_log_pub_keys[n] corresponds to
+        transparency_log_servers[n].
+      manufacturer_key: Path to PEM file containting the key file used to sign
+        messages sent to the transparency log servers.
+      padding_size: If not 0, pads output so size is a multiple of the number.
+
+    Returns:
+      True if the inclusion proofs could be fetched from the transparency log
+      servers and could be successfully validated, False otherwise.
+
+    Raises:
+      AftlError: If any parameters are invalid, communication with the log
+      fails or the structures are malformed.
+    """
+    # TODO(danielaustin): Determine the best way to handle chained vbmeta
+    # structures. Currently, we only put the main one in the transparency
+    # log.
+
+    # Validates command line parameters.
+    if len(transparency_log_servers) != len(transparency_log_pub_keys):
+      raise AftlError('Transparency log count and public key count mismatch: '
+                      '{} servers and {} public keys'.format(
+                          len(transparency_log_servers),
+                          len(transparency_log_pub_keys)))
+
+    # Retrieves vbmeta structure from given partition image.
+    image = avbtool.ImageHandler(vbmeta_image_path)
+    (footer, header, _, _) = self._parse_image(image)
+    offset = 0
+    if footer:
+      offset = footer.vbmeta_offset
+    image.seek(offset)
+    vbmeta_image = image.read(header.SIZE +
+                              header.authentication_data_block_size +
+                              header.auxiliary_data_block_size)
+
+    #  Fetches inclusion proofs for vbmeta structure from all transparency logs.
+    icp_entries = []
+    for i, transparency_log in enumerate(transparency_log_servers):
+      try:
+        icp_entry = self.request_inclusion_proof(transparency_log, vbmeta_image,
+                                                 version_incremental,
+                                                 manufacturer_key,
+                                                 signing_helper,
+                                                 signing_helper_with_files)
+        # TODO(danielaustin): Update icp_entry to validate if the vbmeta image
+        # matches with the ICP stored data, and store the correct ICP
+        # in the icp_entry.
+        if not icp_entry.verify_icp(transparency_log_pub_keys[i]):
+          sys.stderr.write('The ICP from {} could not be verified\n'.format(
+              transparency_log))
+        icp_entries.append(icp_entry)
+      except AftlError as e:
+        sys.stderr.write('AftlError: {}'.format(e))
+        # The inclusion proof request failed.
+        # Continue and see if another will succeed.
+        continue
+    if not icp_entries:
+      sys.stderr.write('No inclusion proofs could be validated from any log.\n')
+      return False
+
+    # Prepares the AFTL descriptor to be appended to the vbmeta image.
+    aftl_descriptor = AftlDescriptor()
+    for icp_entry in icp_entries:
+      aftl_descriptor.add_icp_entry(icp_entry)
+    if not aftl_descriptor.is_valid():
+      sys.stderr.write('Resulting AftlDescriptor structure is malformed\n.')
+      return False
+
+    # Write the original vbmeta descriptor, followed by the AftlDescriptor.
+    if footer:  # Checks if it is a chained partition.
+      # TODO(danielaustin): Add support for chained partitions like system.img
+      # using similar functionality as implemented in append_vbmeta_image().
+      sys.stderr.write('Image has a footer and ICP for this format is not '
+                       'implemented.')
+      return False
+
+    # Writes vbmeta image with inclusion proof into a new vbmeta image.
+    output.seek(0)
+    output.write(vbmeta_image)
+    encoded_aftl_descriptor = aftl_descriptor.encode()
+    output.write(encoded_aftl_descriptor)
+
+    if padding_size > 0:
+      descriptor_size = len(vbmeta_image) + len(encoded_aftl_descriptor)
+      padded_size = avbtool.round_to_multiple(descriptor_size, padding_size)
+      padding_needed = padded_size - descriptor_size
+      output.write('\0' * padding_needed)
+
+    return True
+
+  def _load_test_process_function(self, vbmeta_image_path,
+                                  transparency_log_server,
+                                  transparency_log_pub_key, manufacturer_key,
+                                  process_number, submission_count,
+                                  preserve_icp_images, result_queue):
+    """Function to be used by multiprocessing.Process.
+
+    Arguments:
+      vbmeta_image_path: Path to a vbmeta image file.
+      transparency_log_server: A string in host:port format of transparency log
+        servers where a inclusion proof is requested from.
+      transparency_log_pub_key: Path to PEM file containing trusted
+        public keys that corresponds with the transparency_log_server.
+      manufacturer_key: Path to PEM file containting the key file used to sign
+        messages sent to the transparency log servers.
+      process_number: The number of the processes executing the function.
+      submission_count: Number of total submissions to perform per
+        process_count.
+      preserve_icp_images: Boolean to indicate if the generated vbmeta
+        image files with inclusion proofs should preserved.
+      result_queue: Multiprocessing.Queue object for posting execution results.
+    """
+    for count in range(0, submission_count):
+      version_incremental = 'aftl_load_testing_{}_{}'.format(process_number,
+                                                             count)
+      output_file = '{}_icp.img'.format(version_incremental)
+      output = open(output_file, 'wb')
+
+      # Instrumented section.
+      start_time = time.time()
+      result = self.make_icp_from_vbmeta(
+          vbmeta_image_path=vbmeta_image_path,
+          output=output,
+          signing_helper=None,
+          signing_helper_with_files=None,
+          version_incremental=version_incremental,
+          transparency_log_servers=[transparency_log_server],
+          transparency_log_pub_keys=[transparency_log_pub_key],
+          manufacturer_key=manufacturer_key,
+          padding_size=0)
+      end_time = time.time()
+
+      output.close()
+      if not preserve_icp_images:
+        os.unlink(output_file)
+
+      # Puts the result onto the result queue.
+      execution_time = end_time - start_time
+      result_queue.put((start_time, end_time, execution_time,
+                        version_incremental, result))
+
+  def load_test_aftl(self, vbmeta_image_path, output, transparency_log_server,
+                     transparency_log_pub_key, manufacturer_key,
+                     process_count, submission_count, stats_filename,
+                     preserve_icp_images):
+    """Performs multi-threaded load test on a given AFTL and prints stats.
+
+    Arguments:
+      vbmeta_image_path: Path to a vbmeta image file.
+      output: File to write the report to.
+      transparency_log_server: A string in host:port format of transparency log
+        servers where a inclusion proof is requested from.
+      transparency_log_pub_key: Path to PEM file containing trusted
+        public keys that corresponds with the transparency_log_server.
+      manufacturer_key: Path to PEM file containting the key file used to sign
+        messages sent to the transparency log servers.
+      process_count: Number of processes used for parallel testing.
+      submission_count: Number of total submissions to perform per
+        process_count.
+      stats_filename: Path to the stats file to write the raw execution data to.
+      preserve_icp_images: Boolean to indicate if the generated vbmeta
+        image files with inclusion proofs should preserved.
+
+    Returns:
+      True if the load tested succeeded without errors; otherwise False.
+    """
+    if process_count < 1 or submission_count < 1:
+      sys.stderr.write('Values for --processes/--submissions '
+                       'must be at least 1.')
+      return False
+
+    if not stats_filename:
+      stats_filename = 'load_test_p{}_s{}.csv'.format(process_count,
+                                                      submission_count)
+    try:
+      stats_file = open(stats_filename, 'w')
+      stats_file.write('start_time,end_time,execution_time,version_incremental,'
+                       'result\n')
+    except IOError as e:
+      sys.stderr.write('Could not open stats file {}: {}'.format(stats_file, e))
+      return False
+
+    # Launch all the processes with their workloads.
+    result_queue = multiprocessing.Queue()
+    processes = set()
+    execution_times = []
+    results = []
+    for i in range(0, process_count):
+      p = multiprocessing.Process(
+          target=self._load_test_process_function,
+          args=(vbmeta_image_path, transparency_log_server,
+                transparency_log_pub_key, manufacturer_key, i, submission_count,
+                preserve_icp_images, result_queue))
+      p.start()
+      processes.add(p)
+
+    while processes:
+      # Processes the results queue and writes these to a stats file.
+      try:
+        (start_time, end_time, execution_time, version_incremental,
+         result) = result_queue.get(timeout=1)
+        stats_file.write('{},{},{},{},{}\n'.format(start_time, end_time,
+                                                   execution_time,
+                                                   version_incremental, result))
+        execution_times.append(execution_time)
+        results.append(result)
+      except Queue.Empty:
+        pass
+
+      # Checks if processes are still alive; if not clean them up. join() would
+      # have been nicer but we want to continously stream out the stats to file.
+      for p in processes.copy():
+        if not p.is_alive():
+          processes.remove(p)
+
+    # Prepares stats.
+    executions = sorted(execution_times)
+    execution_count = len(execution_times)
+    median = 0
+    if execution_count % 2 == 0:
+      median = (executions[execution_count // 2 - 1]
+                + executions[execution_count // 2]) / 2
+    else:
+      median = executions[execution_count // 2]
+
+    # Outputs the stats report.
+    o = output
+    o.write('Load testing results:\n')
+    o.write('  Processes:               {}\n'.format(process_count))
+    o.write('  Submissions per process: {}\n'.format(submission_count))
+    o.write('\n')
+    o.write('  Submissions:\n')
+    o.write('    Total:                 {}\n'.format(len(executions)))
+    o.write('    Succeeded:             {}\n'.format(results.count(True)))
+    o.write('    Failed:                {}\n'.format(results.count(False)))
+    o.write('\n')
+    o.write('  Submission execution durations:\n')
+    o.write('    Average:               {:.2f} sec\n'.format(
+        sum(execution_times) / execution_count))
+    o.write('    Median:                {:.2f} sec\n'.format(median))
+    o.write('    Min:                   {:.2f} sec\n'.format(min(executions)))
+    o.write('    Max:                   {:.2f} sec\n'.format(max(executions)))
+
+    # Close the stats file.
+    stats_file.close()
+    return True
+
+
+class AftlTool(avbtool.AvbTool):
+  """Object for aftltool command-line tool."""
+
+  def __init__(self):
+    """Initializer method."""
+    self.aftl = Aftl()
+    super(AftlTool, self).__init__()
+
+  def make_icp_from_vbmeta(self, args):
+    """Implements the 'make_icp_from_vbmeta' sub-command."""
+    args = self._fixup_common_args(args)
+    self.aftl.make_icp_from_vbmeta(args.vbmeta_image_path,
+                                   args.output,
+                                   args.signing_helper,
+                                   args.signing_helper_with_files,
+                                   args.version_incremental,
+                                   args.transparency_log_servers,
+                                   args.transparency_log_pub_keys,
+                                   args.manufacturer_key,
+                                   args.padding_size)
+
+  def info_image_icp(self, args):
+    """Implements the 'info_image_icp' sub-command."""
+    self.aftl.info_image_icp(args.vbmeta_image_path.name, args.output)
+
+  def load_test_aftl(self, args):
+    """Implements the 'load_test_aftl' sub-command."""
+    self.aftl.load_test_aftl(args.vbmeta_image_path,
+                             args.output,
+                             args.transparency_log_server,
+                             args.transparency_log_pub_key,
+                             args.manufacturer_key,
+                             args.processes,
+                             args.submissions,
+                             args.stats_file,
+                             args.preserve_icp_images)
+
+  def run(self, argv):
+    """Command-line processor.
+
+    Arguments:
+      argv: Pass sys.argv from main.
+    """
+    parser = argparse.ArgumentParser()
+    subparsers = parser.add_subparsers(title='subcommands')
+
+    # Command: make_icp_from_vbmeta
+    sub_parser = subparsers.add_parser('make_icp_from_vbmeta',
+                                       help='Makes an ICP enhanced vbmeta image'
+                                       ' from an existing vbmeta image.')
+    sub_parser.add_argument('--output',
+                            help='Output file name.',
+                            type=argparse.FileType('wb'),
+                            default=sys.stdout)
+    sub_parser.add_argument('--vbmeta_image_path',
+                            help='Path to a generate vbmeta image file.',
+                            required=True)
+    sub_parser.add_argument('--version_incremental',
+                            help='Current build ID.',
+                            required=True)
+    sub_parser.add_argument('--manufacturer_key',
+                            help='Path to the PEM file containing the '
+                            'manufacturer key for use with the log.',
+                            required=True)
+    sub_parser.add_argument('--transparency_log_servers',
+                            help='List of transparency log servers in '
+                            'host:port format. This must not be None and must '
+                            'be the same size as transparency_log_pub_keys. '
+                            'Also, transparency_log_servers[n] must correspond '
+                            'to transparency_log_pub_keys[n] for all values n.',
+                            nargs='*',
+                            required=True)
+    sub_parser.add_argument('--transparency_log_pub_keys',
+                            help='Paths to PEM files containing transparency '
+                            'log server key(s). This must not be None and must '
+                            'be the same size as transparency_log_servers. '
+                            'Also, transparency_log_pub_keys[n] must '
+                            'correspond to transparency_log_servers[n] for all '
+                            'values n.',
+                            nargs='*',
+                            required=True)
+    sub_parser.add_argument('--padding_size',
+                            metavar='NUMBER',
+                            help='If non-zero, pads output with NUL bytes so '
+                            'its size is a multiple of NUMBER '
+                            '(default: 0)',
+                            type=avbtool.parse_number,
+                            default=0)
+    self._add_common_args(sub_parser)
+    sub_parser.set_defaults(func=self.make_icp_from_vbmeta)
+
+    # Command: info_image_icp
+    sub_parser = subparsers.add_parser(
+        'info_image_icp',
+        help='Show information about AFTL ICPs in vbmeta or footer.')
+    sub_parser.add_argument('--vbmeta_image_path',
+                            help='Path to vbmeta image for AFTL information.',
+                            type=argparse.FileType('rb'),
+                            required=True)
+    sub_parser.add_argument('--output',
+                            help='Write info to file',
+                            type=argparse.FileType('wt'),
+                            default=sys.stdout)
+    sub_parser.set_defaults(func=self.info_image_icp)
+
+    # Command: load_test_aftl
+    sub_parser = subparsers.add_parser(
+        'load_test_aftl',
+        help='Perform load testing against one AFTL log server. Note: This MUST'
+        ' not be performed against a production system.')
+    sub_parser.add_argument('--vbmeta_image_path',
+                            help='Path to a generate vbmeta image file.',
+                            required=True)
+    sub_parser.add_argument('--output',
+                            help='Write report to file.',
+                            type=argparse.FileType('wt'),
+                            default=sys.stdout)
+    sub_parser.add_argument('--manufacturer_key',
+                            help='Path to the PEM file containing the '
+                            'manufacturer key for use with the log.',
+                            required=True)
+    sub_parser.add_argument('--transparency_log_server',
+                            help='Transparency log servers to test against in '
+                            'host:port format.',
+                            required=True)
+    sub_parser.add_argument('--transparency_log_pub_key',
+                            help='Paths to PEM file containing transparency '
+                            'log server key.',
+                            required=True)
+    sub_parser.add_argument('--processes',
+                            help='Number of parallel processes to use for '
+                            'testing (default: 1).',
+                            type=avbtool.parse_number,
+                            default=1)
+    sub_parser.add_argument('--submissions',
+                            help='Number of submissions to perform against the '
+                            'log per process (default: 1).',
+                            type=avbtool.parse_number,
+                            default=1)
+    sub_parser.add_argument('--stats_file',
+                            help='Path to the stats file to write the raw '
+                            'execution data to (Default: '
+                            'load_test_p[processes]_s[submissions].csv.')
+    sub_parser.add_argument('--preserve_icp_images',
+                            help='Boolean flag to indicate if the generated '
+                            'vbmeta image files with inclusion proofs should '
+                            'preserved.',
+                            action='store_true')
+    sub_parser.set_defaults(func=self.load_test_aftl)
+
+    args = parser.parse_args(argv[1:])
+    try:
+      args.func(args)
+    except AftlError:
+      # Indicate failure to signal to calling tools.
+      sys.exit(1)
+
+if __name__ == '__main__':
+  tool = AftlTool()
+  tool.run(sys.argv)
diff --git a/aftltool.py b/aftltool.py
new file mode 120000
index 0000000..39f589b
--- /dev/null
+++ b/aftltool.py
@@ -0,0 +1 @@
+aftltool
\ No newline at end of file
diff --git a/aftltool_integration_test.py b/aftltool_integration_test.py
new file mode 100755
index 0000000..af6d627
--- /dev/null
+++ b/aftltool_integration_test.py
@@ -0,0 +1,171 @@
+#!/usr/bin/env python
+
+# Copyright 2019, The Android Open Source Project
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation
+# files (the "Software"), to deal in the Software without
+# restriction, including without limitation the rights to use, copy,
+# modify, merge, publish, distribute, sublicense, and/or sell copies
+# of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+#
+"""Integration tests for aftltool with AFTL.
+
+The test cases directly interact with a transparency log. However,
+before using this script the following environment variables
+need to be set:
+
+  AFTL_HOST: host:port of the transparency log to test with.
+  AFTL_PUBKEY: Transparency log public key in PEM format.
+  AFTL_VBMETA_IMAGE: VBMeta image that should be used for submission to AFTL.
+  AFTL_MANUFACTURER_KEY: Manufacturer signing key used to sign submissions
+      to the transparency log in PEM format.
+"""
+
+import os
+import unittest
+
+import aftltool
+import avbtool
+
+
+class AFTLIntegrationTest(unittest.TestCase):
+  """Test suite for testing aftltool with a AFTL."""
+
+  def setUp(self):
+    """Sets up the test bed for the unit tests."""
+    super(AFTLIntegrationTest, self).setUp()
+    self.aftltool = aftltool.Aftl()
+    self.output_filename = 'vbmeta_icp.img'
+
+    self.aftl_host = os.environ.get('AFTL_HOST')
+    self.aftl_pubkey = os.environ.get('AFTL_PUBKEY')
+    self.vbmeta_image = os.environ.get('AFTL_VBMETA_IMAGE')
+    self.manufacturer_key = os.environ.get('AFTL_MANUFACTURER_KEY')
+
+    if (not self.aftl_host or not self.aftl_pubkey or not self.vbmeta_image
+        or not self.manufacturer_key):
+      self.fail('Environment variables not correctly set up. See description of'
+                ' this test case for details')
+
+    self.make_icp_default_params = {
+        'vbmeta_image_path': self.vbmeta_image,
+        'output': None,
+        'signing_helper': None,
+        'signing_helper_with_files': None,
+        'version_incremental': '1',
+        'transparency_log_servers': [self.aftl_host],
+        'transparency_log_pub_keys': [self.aftl_pubkey],
+        'manufacturer_key': self.manufacturer_key,
+        'padding_size': 0
+    }
+
+  def tearDown(self):
+    """Tears down the test bed for the unit tests."""
+    try:
+      os.remove(self.output_filename)
+    except IOError:
+      pass
+    super(AFTLIntegrationTest, self).tearDown()
+
+  def _read_icp_from_vbmeta_blob(self):
+    """Reads the ICP from the output file.
+
+    Returns:
+      AftlDescriptor for the ICP included in the given vbmeta image.
+    """
+    image = avbtool.ImageHandler(self.output_filename)
+
+    # pylint: disable=protected-access
+    (footer, header, _, _) = self.aftltool._parse_image(image)
+    offset = 0
+    if footer:
+      offset = footer.vbmeta_offset
+    image.seek(offset)
+    vbmeta_blob = image.read(header.SIZE +
+                             header.authentication_data_block_size +
+                             header.auxiliary_data_block_size)
+    image.seek(offset + len(vbmeta_blob))
+    # TODO(jpm): Fix AftlDescriptor so that the length of it can be derived
+    # without having to read the whole descriptor.
+    icp_bytes = image.read(100000)
+    self.assertGreater(len(icp_bytes), 0)
+
+    icp_blob = aftltool.AftlDescriptor(icp_bytes)
+    self.assertTrue(icp_blob.is_valid())
+    return icp_blob
+
+  def _make_icp_from_vbmeta(self):
+    """Submits vbmeta to AFTL and fetches inclusion proofs.
+
+    Returns:
+      True if make_icp_from_vbmeta command succeeds; otherwise False.
+    """
+    with open(self.output_filename, 'wb') as output_file:
+      self.make_icp_default_params['output'] = output_file
+      result = self.aftltool.make_icp_from_vbmeta(
+          **self.make_icp_default_params)
+    return result
+
+  def test_make_icp_with_one_transparency_log(self):
+    """Tests integration of aftltool with one AFTL."""
+    # Submits vbmeta to AFTL and fetches ICP.
+    result = self._make_icp_from_vbmeta()
+    self.assertTrue(result)
+
+    # Reads back the vbmeta image with the ICP.
+    icp_blob = self._read_icp_from_vbmeta_blob()
+
+    # Checks ICP proof blob for correctness.
+    icp_header = icp_blob.icp_header
+    self.assertIsNotNone(icp_header)
+    self.assertEqual(icp_header.magic, 'AFTL')
+    self.assertEqual(icp_header.icp_count, 1)
+
+    self.assertEqual(len(icp_blob.icp_entries), 1)
+    for icp in icp_blob.icp_entries:
+      self.assertEqual(icp.log_url, self.aftl_host)
+      self.assertTrue(icp.verify_icp(self.aftl_pubkey))
+
+  def test_make_icp_with_two_transparency_log(self):
+    """Tests integration of aftltool with two AFTLs."""
+    # Reconfigures default parameters with two transparency logs.
+    self.make_icp_default_params['transparency_log_servers'] = [
+        self.aftl_host, self.aftl_host]
+    self.make_icp_default_params['transparency_log_pub_keys'] = [
+        self.aftl_pubkey, self.aftl_pubkey]
+
+    # Submits vbmeta to two AFTLs and fetches their ICPs.
+    result = self._make_icp_from_vbmeta()
+    self.assertTrue(result)
+
+    # Reads back the vbmeta image with the ICP.
+    icp_blob = self._read_icp_from_vbmeta_blob()
+
+    # Checks ICP proof blob for correctness.
+    icp_header = icp_blob.icp_header
+    self.assertIsNotNone(icp_header)
+    self.assertEqual(icp_header.magic, 'AFTL')
+    self.assertEqual(icp_header.icp_count, 2)
+
+    self.assertEqual(len(icp_blob.icp_entries), 2)
+    for icp in icp_blob.icp_entries:
+      self.assertEqual(icp.log_url, self.aftl_host)
+      self.assertTrue(icp.verify_icp(self.aftl_pubkey))
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/aftltool_test.py b/aftltool_test.py
new file mode 100755
index 0000000..e3e983a
--- /dev/null
+++ b/aftltool_test.py
@@ -0,0 +1,917 @@
+#!/usr/bin/env python
+
+# Copyright 2019, The Android Open Source Project
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation
+# files (the "Software"), to deal in the Software without
+# restriction, including without limitation the rights to use, copy,
+# modify, merge, publish, distribute, sublicense, and/or sell copies
+# of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+#
+"""Unit tests for aftltool."""
+
+# pylint: disable=unused-import
+from __future__ import print_function
+
+import binascii
+import io
+import os
+import sys
+import unittest
+
+import aftltool
+import avbtool
+import proto.aftl_pb2
+import proto.api_pb2
+import proto.trillian_pb2
+
+
+class AftltoolTestCase(unittest.TestCase):
+
+  def setUp(self):
+    """Sets up the test bed for the unit tests."""
+    super(AftltoolTestCase, self).setUp()
+
+    # Redirects the stderr to /dev/null when running the unittests. The reason
+    # is that soong interprets any output on stderr as an error and marks the
+    # unit test as failed although the test itself succeeded.
+    self.stderr = sys.stderr
+    self.null = open(os.devnull, 'wb')
+    sys.stderr = self.null
+
+    # Test AftlIcpEntry #1
+    self.test_tl_url_1 = 'aftl-test-server.google.com'
+
+    self.test_sth_1 = aftltool.TrillianLogRootDescriptor()
+    self.test_sth_1.tree_size = 2
+    self.test_sth_1.root_hash_size = 32
+    self.test_sth_1.root_hash = bytearray('f' * 32)
+    self.test_sth_1.timestamp = 0x1234567890ABCDEF
+    self.test_sth_1.revision = 0xFEDCBA0987654321
+
+    self.test_sth_1_bytes = bytearray(
+        '\x00\x01'                          # version
+        '\x00\x00\x00\x00\x00\x00\x00\x02'  # tree_size
+        '\x20'                              # root_hash_size
+        + 'f' * 32 +                        # root_hash
+        '\x12\x34\x56\x78\x90\xAB\xCD\xEF'  # timestamp
+        '\xFE\xDC\xBA\x09\x87\x65\x43\x21'  # revision
+        '\x00\x00'                          # metadata_size
+        ''                                  # metadata (empty)
+    )
+
+    # Fill each structure with an easily observable pattern for easy validation.
+    self.test_proof_hashes_1 = []
+    self.test_proof_hashes_1.append(bytearray('b' * 32))
+    self.test_proof_hashes_1.append(bytearray('c' * 32))
+    self.test_proof_hashes_1.append(bytearray('d' * 32))
+    self.test_proof_hashes_1.append(bytearray('e' * 32))
+
+    # Valid test AftlIcpEntry #1.
+    self.test_entry_1 = aftltool.AftlIcpEntry()
+    self.test_entry_1.set_log_url(self.test_tl_url_1)
+    self.test_entry_1.leaf_index = 1
+    self.test_entry_1.set_log_root_descriptor(self.test_sth_1)
+    self.test_entry_1.set_proofs(self.test_proof_hashes_1)
+    self.test_entry_1.log_root_signature = 'g' * 512  # bytearray('g' * 512)
+    self.test_entry_1.log_root_sig_size = 512
+
+    self.test_entry_1_bytes = bytearray(
+        '\x00\x00\x00\x1b'                  # Transparency log url size.
+        '\x00\x00\x00\x00\x00\x00\x00\x01'  # Leaf index.
+        '\x00\x00\x00\x3d'                  # Log root descriptor size.
+        '\x00\x00\x00\x00'                  # Firmware info leaf size.
+        '\x02\x00'                          # Log root signature size.
+        '\x04'                              # Number of hashes in ICP.
+        '\x00\x00\x00\x80'                  # Size of ICP in bytes.
+        'aftl-test-server.google.com'       # Transparency log url.
+        + self.test_sth_1_bytes
+        + 'g' * 512                         # Log root signature.
+        + 'b' * 32                          # Hashes...
+        + 'c' * 32
+        + 'd' * 32
+        + 'e' * 32)
+
+    # Valid test AftlIcpEntry #2.
+    self.test_tl_url_2 = 'aftl-test-server.google.ch'
+
+    self.test_sth_2 = aftltool.TrillianLogRootDescriptor()
+    self.test_sth_2.tree_size = 4
+    self.test_sth_2.root_hash_size = 32
+    self.test_sth_2.root_hash = bytearray('e' * 32)
+    self.test_sth_2.timestamp = 6
+    self.test_sth_2.revision = 7
+    self.test_sth_2.metadata_size = 2
+    self.test_sth_2.metadata = '12'
+
+    self.test_sth_2_bytes = bytearray(
+        '\x00\x01'                          # version
+        '\x00\x00\x00\x00\x00\x00\x00\x04'  # tree_size
+        '\x20'                              # root_hash_size
+        + 'e' * 32 +                        # root_hash
+        '\x00\x00\x00\x00\x00\x00\x00\x06'  # timestamp
+        '\x00\x00\x00\x00\x00\x00\x00\x07'  # revision
+        '\x00\x02'                          # metadata_size
+        '12'                                # metadata
+    )
+
+    # Fill each structure with an easily observable pattern for easy validation.
+    self.test_proof_hashes_2 = []
+    self.test_proof_hashes_2.append(bytearray('g' * 32))
+    self.test_proof_hashes_2.append(bytearray('h' * 32))
+
+    self.test_entry_2 = aftltool.AftlIcpEntry()
+    self.test_entry_2.set_log_url(self.test_tl_url_2)
+    self.test_entry_2.leaf_index = 2
+    self.test_entry_2.set_log_root_descriptor(self.test_sth_2)
+    self.test_entry_2.log_root_signature = bytearray('d' * 512)
+    self.test_entry_2.log_root_sig_size = 512
+    self.test_entry_2.set_proofs(self.test_proof_hashes_2)
+
+    self.test_entry_2_bytes = bytearray(
+        '\x00\x00\x00\x1a'                  # Transparency log url size.
+        '\x00\x00\x00\x00\x00\x00\x00\x02'  # Leaf index.
+        '\x00\x00\x00\x3f'                     # Log root descriptor size.
+        '\x00\x00\x00\x00'                  # Firmware info leaf size.
+        '\x02\x00'                          # Log root signature size.
+        '\x02'                              # Number of hashes in ICP.
+        '\x00\x00\x00@'                     # Size of ICP in bytes.
+        'aftl-test-server.google.ch'        # Transparency log url.
+        + self.test_sth_2_bytes             # Log root
+        + 'd' * 512                         # Log root signature.
+        + 'g' * 32                          # Hashes...
+        + 'h' * 32)
+
+    # Valid test AftlDescriptor made out of AftlEntry #1 and #2.
+    self.test_aftl_desc = aftltool.AftlDescriptor()
+    self.test_aftl_desc.add_icp_entry(self.test_entry_1)
+    self.test_aftl_desc.add_icp_entry(self.test_entry_2)
+
+    self.test_expected_aftl_descriptor_bytes = bytearray(
+        # AftlIcpHeader
+        'AFTL'                              # Magic.
+        '\x00\x00\x00\x01'                  # Descriptor size.
+        '\x00\x00\x00\x01'                  # Major version.
+        '\x00\x00\x00\x12'                  # Minor version.
+        '\x00\x02'                          # Number of ICP entries.
+        + self.test_entry_1_bytes
+        + self.test_entry_2_bytes)
+
+    # Sets up test data.
+    # pylint: disable=no-member
+    self.test_afi_resp = proto.api_pb2.AddFirmwareInfoResponse()
+    self.test_afi_resp.fw_info_proof.proof.leaf_index = 6263
+    hashes = [
+        '3ad99869646980c0a51d637a9791f892d12e0bc83f6bac5d305a9e289e7f7e8b',
+        '2e5c664d2aee64f71cb4d292e787d0eae7ca9ed80d1e08abb41d26baca386c05',
+        'a671dd99f8d97e9155cc2f0a9dc776a112a5ec5b821ec71571bb258ac790717a',
+        '78046b839595e4e49ad4b0c73f92bf4803aacd4a3351181086509d057ef0d7a9',
+        'c0a7e013f03e7c69e9402070e113dadb345868cf144ccb174fabc384b5605abf',
+        'dc36e5dbe36abe9f4ad10f14170aa0148b6fe3fcaba9df43deaf4dede01b02e8',
+        'b063e7fb665370a361718208756c363dc5206e2e9af9b4d847d81289cdae30de',
+        'a69ea5ba88a221103636d3f4245c800570eb86ad9276121481521f97d0a04a81']
+    for h in hashes:
+      self.test_afi_resp.fw_info_proof.proof.hashes.append(
+          binascii.unhexlify(h))
+    self.test_afi_resp.fw_info_proof.sth.key_hint = binascii.unhexlify(
+        '5af859abce8fe1ea')
+    self.test_afi_resp.fw_info_proof.sth.log_root = binascii.unhexlify(
+        '000100000000000018782053b182b55dc1377197c938637f50093131daea4'
+        'd0696b1eae5b8a014bfde884a15edb28f1fc7954400000000000013a50000'
+    )
+    self.test_afi_resp.vbmeta_proof.sth.log_root_signature = binascii.unhexlify(
+        'c264bc7986a1cf56364ca4dd04989f45515cb8764d05b4fb2b880172585ea404'
+        '2105f95a0e0471fb6e0f8c762b14b2e526fb78eaddcc61484917795a12f6ab3b'
+        '557b5571d492d07d7950595f9ad8647a606c7c633f4697c5eb59c272aeca0419'
+        '397c70a3b9b51537537c4ea6b49d356110e70a9286902f814cc6afbeafe612e4'
+        '9e180146140e902bdd9e9dae66b37b4943150a9571949027a648db88a4eea3ad'
+        'f930b4fa6a183e97b762ab0e55a3a26aa6b0fd44d30531e2541ecb94bf645e62'
+        '59e8e3151e7c3b51a09fe24557ce2fd2c0ecdada7ce99c390d2ef10e5d075801'
+        '7c10d49c55cdee930959cc35f0104e04f296591eeb5defbc9ebb237da7b204ca'
+        'a4608cb98d6bc3a01f18585a04441caf8ec7a35aa2d35f7483b92b14fd0f4a41'
+        '3a91133545579309adc593222ca5032a103b00d8fcaea911936dbec11349e4dd'
+        '419b091ea7d1130570d70e2589dd9445fd77fd7492507e1c87736847b9741cc6'
+        '236868af42558ff6e833e12010c8ede786e43ada40ff488f5f1870d1619887d7'
+        '66a24ad0a06a47cc14e2f7db07361be191172adf3155f49713807c7c265f5a84'
+        '040fc84246ccf7913e44721f0043cea05ee774e457e13206775eee992620c3f9'
+        'd2b2584f58aac19e4afe35f0a17df699c45729f94101083f9fc4302659a7e6e0'
+        'e7eb36f8d1ca0be2c9010160d329bd2d17bb707b010fdd63c30b667a0b886cf9'
+    )
+    self.test_afi_resp.fw_info_leaf = (
+        '{\"timestamp\":{\"seconds\":1580115370,\"nanos\":621454825},\"Va'
+        'lue\":{\"FwInfo\":{\"info\":{\"info\":{\"vbmeta_hash\":\"ViNzEQS'
+        '/oc/bJ13yl40fk/cvXw90bxHQbzCRxgHDIGc=\",\"version_incremental\":'
+        '\"1\",\"manufacturer_key_hash\":\"yBCrUOdjvaAh4git5EgqWa5neegUao'
+        'XeLlB67+N8ObY=\"}}}}}')
+
+  def tearDown(self):
+    """Tears down the test bed for the unit tests."""
+    # Reconnects stderr back to the normal stderr; see setUp() for details.
+    sys.stderr = self.stderr
+
+    super(AftltoolTestCase, self).setUp()
+
+
+class AftltoolTest(AftltoolTestCase):
+
+  def setUp(self):
+    """Sets up the test bed for the unit tests."""
+    super(AftltoolTest, self).setUp()
+
+    self.test_url = 'test'
+    self.test_sth = aftltool.TrillianLogRootDescriptor()
+    self.test_sth.leaf_hash = bytearray('leaf' * 8)
+    self.test_sth.tree_size = 2
+    self.test_sth.root_hash = bytearray('root' * 8)
+    self.test_sth.root_hash_size = 32
+    self.test_sth.log_root_sig = bytearray('root_sig' * 64)
+    self.test_proofs = 'proofs'
+
+  def _validate_icp_entry_with_setters(
+      self, log_url, leaf_index, log_root_descriptor, proofs):
+    """Create an ICP entry structure and attempt to validate it.
+
+    Returns:
+      True if the tests pass, False otherwise.
+    """
+    icp_entry = aftltool.AftlIcpEntry()
+    icp_entry.leaf_index = leaf_index
+    icp_entry.set_log_url(log_url)
+    icp_entry.set_log_root_descriptor(log_root_descriptor)
+    icp_entry.set_proofs(proofs)
+    return icp_entry.is_valid()
+
+  def _validate_icp_entry_without_setters(
+      self, log_url, log_url_size, leaf_index, log_root_descriptor,
+      log_root_descriptor_size, proof_hash_count, proofs, inc_proof_size):
+    """Create an ICP entry structure and attempt to validate it.
+
+    Returns:
+      True if the tests pass, False otherwise.
+    """
+    icp_entry = aftltool.AftlIcpEntry()
+    icp_entry.log_url = log_url
+    icp_entry.log_url_size = log_url_size
+    icp_entry.leaf_index = leaf_index
+    icp_entry.log_root_descriptor = log_root_descriptor
+    icp_entry.log_root_descriptor_size = log_root_descriptor_size
+    icp_entry.proof_hash_count = proof_hash_count
+    icp_entry.proofs = proofs
+    icp_entry.inc_proof_size = inc_proof_size
+    return icp_entry.is_valid()
+
+  def test_default_icp_entry(self):
+    """Tests default ICP entry structure."""
+    icp_entry = aftltool.AftlIcpEntry()
+    self.assertTrue(icp_entry.is_valid())
+
+  def test_icp_entry_valid(self):
+    """Tests valid ICP entry structures."""
+    self.assertTrue(
+        self._validate_icp_entry_without_setters(
+            self.test_url, len(self.test_url), 2, self.test_sth,
+            self.test_sth.get_expected_size(), 2, self.test_proofs,
+            len(self.test_proofs)))
+
+    self.assertTrue(
+        self._validate_icp_entry_with_setters(
+            self.test_url, 2, self.test_sth, self.test_proofs))
+
+    self.assertTrue(
+        self._validate_icp_entry_without_setters(
+            self.test_url, len(self.test_url), 2, self.test_sth,
+            self.test_sth.get_expected_size(), 2, self.test_proofs,
+            len(self.test_proofs)))
+
+    self.assertTrue(
+        self._validate_icp_entry_with_setters(
+            self.test_url, 2, self.test_sth, self.test_proofs))
+
+  def test_icp_entry_invalid_log_url(self):
+    """Tests ICP entry with invalid log_url / log_url_size combination."""
+    self.assertFalse(
+        self._validate_icp_entry_without_setters(
+            None, 10, 2, self.test_sth, self.test_sth.get_expected_size(),
+            2, self.test_proofs, len(self.test_proofs)))
+
+    self.assertFalse(
+        self._validate_icp_entry_without_setters(
+            '', 10, 2, self.test_sth, self.test_sth.get_expected_size(),
+            2, self.test_proofs, len(self.test_proofs)))
+
+    self.assertFalse(
+        self._validate_icp_entry_without_setters(
+            self.test_url, -2, 2, self.test_sth,
+            self.test_sth.get_expected_size(),
+            2, self.test_proofs, len(self.test_proofs)))
+
+    self.assertFalse(
+        self._validate_icp_entry_without_setters(
+            self.test_url, len(self.test_url) - 3, 2, self.test_sth,
+            self.test_sth.get_expected_size(), 2, self.test_proofs,
+            len(self.test_proofs)))
+
+  def test_icp_entry_invalid_leaf_index(self):
+    """Tests ICP entry with invalid leaf_index."""
+    self.assertFalse(
+        self._validate_icp_entry_without_setters(
+            self.test_url, len(self.test_url), -1, self.test_sth,
+            self.test_sth.get_expected_size(), 2, self.test_proofs,
+            len(self.test_proofs)))
+
+  def test_icp_entry_invalid_sth(self):
+    """Tests ICP entry with invalid STH / STH length."""
+    self.assertFalse(
+        self._validate_icp_entry_without_setters(
+            self.test_url, len(self.test_url), 2, None, 3,
+            2, self.test_proofs, len(self.test_proofs)))
+
+    self.assertFalse(
+        self._validate_icp_entry_without_setters(
+            self.test_url, len(self.test_url), 2, '', 3,
+            2, self.test_proofs, len(self.test_proofs)))
+
+    self.assertFalse(
+        self._validate_icp_entry_without_setters(
+            self.test_url, len(self.test_url), 2, bytearray(), 3,
+            2, self.test_proofs, len(self.test_proofs)))
+
+    self.assertFalse(
+        self._validate_icp_entry_without_setters(
+            self.test_url, len(self.test_url), 2, self.test_sth, -2,
+            2, self.test_proofs, len(self.test_proofs)))
+
+    self.assertFalse(
+        self._validate_icp_entry_without_setters(
+            self.test_url, len(self.test_url), 2,
+            self.test_sth, self.test_sth.get_expected_size() + 14,
+            2, self.test_proofs, len(self.test_proofs)))
+
+  def test_icp_entry_invalid_proof_hash_count(self):
+    """Tests ICP entry with invalid proof_hash_count."""
+    self.assertFalse(
+        self._validate_icp_entry_without_setters(
+            self.test_url, len(self.test_url), 2, self.test_sth,
+            self.test_sth.get_expected_size(), -2, self.test_proofs,
+            len(self.test_proofs)))
+
+  def test_icp_entry_invalid_proofs(self):
+    """Tests ICP entry with invalid proofs / proof size."""
+    self.assertFalse(
+        self._validate_icp_entry_without_setters(
+            self.test_url, len(self.test_url), 2, self.test_sth,
+            self.test_sth.get_expected_size(), 2, [], len(self.test_proofs)))
+
+    self.assertFalse(
+        self._validate_icp_entry_without_setters(
+            self.test_url, len(self.test_url), 2, self.test_sth,
+            self.test_sth.get_expected_size(), 2, '', len(self.test_proofs)))
+
+    self.assertFalse(
+        self._validate_icp_entry_without_setters(
+            self.test_url, len(self.test_url), 2, self.test_sth,
+            self.test_sth.get_expected_size(), 2, bytearray(),
+            len(self.test_proofs)))
+
+    self.assertFalse(
+        self._validate_icp_entry_without_setters(
+            self.test_url, len(self.test_url), 2, self.test_sth,
+            self.test_sth.get_expected_size(), 2, self.test_proofs,
+            len(self.test_proofs) - 3))
+
+  def test_merkle_root_hash(self):
+    """Tests validation of inclusion proof and the merkle tree calculations.
+
+    The test vectors have been taken from the Trillian tests:
+    https://github.com/google/trillian/blob/v1.3.3/merkle/log_verifier_test.go
+    """
+
+    inclusion_proofs = [
+        (1,
+         8,
+         [
+             binascii.unhexlify('96a296d224f285c67bee93c30f8a3091'
+                                '57f0daa35dc5b87e410b78630a09cfc7'),
+             binascii.unhexlify('5f083f0a1a33ca076a95279832580db3'
+                                'e0ef4584bdff1f54c8a360f50de3031e'),
+             binascii.unhexlify('6b47aaf29ee3c2af9af889bc1fb9254d'
+                                'abd31177f16232dd6aab035ca39bf6e4')
+         ]),
+        (6,
+         8,
+         [
+             binascii.unhexlify('bc1a0643b12e4d2d7c77918f44e0f4f7'
+                                '9a838b6cf9ec5b5c283e1f4d88599e6b'),
+             binascii.unhexlify('ca854ea128ed050b41b35ffc1b87b8eb'
+                                '2bde461e9e3b5596ece6b9d5975a0ae0'),
+             binascii.unhexlify('d37ee418976dd95753c1c73862b9398f'
+                                'a2a2cf9b4ff0fdfe8b30cd95209614b7')
+         ]),
+        (3,
+         3,
+         [
+             binascii.unhexlify('fac54203e7cc696cf0dfcb42c92a1d9d'
+                                'baf70ad9e621f4bd8d98662f00e3c125')
+         ]),
+        (2,
+         5,
+         [
+             binascii.unhexlify('6e340b9cffb37a989ca544e6bb780a2c'
+                                '78901d3fb33738768511a30617afa01d'),
+             binascii.unhexlify('5f083f0a1a33ca076a95279832580db3'
+                                'e0ef4584bdff1f54c8a360f50de3031e'),
+             binascii.unhexlify('bc1a0643b12e4d2d7c77918f44e0f4f7'
+                                '9a838b6cf9ec5b5c283e1f4d88599e6b')
+         ]
+        )
+    ]
+
+    leaves = [
+        binascii.unhexlify(''),
+        binascii.unhexlify('00'),
+        binascii.unhexlify('10'),
+        binascii.unhexlify('2021'),
+        binascii.unhexlify('3031'),
+        binascii.unhexlify('40414243'),
+        binascii.unhexlify('5051525354555657'),
+        binascii.unhexlify('606162636465666768696a6b6c6d6e6f'),
+    ]
+
+    roots = [
+        binascii.unhexlify('6e340b9cffb37a989ca544e6bb780a2c'
+                           '78901d3fb33738768511a30617afa01d'),
+        binascii.unhexlify('fac54203e7cc696cf0dfcb42c92a1d9d'
+                           'baf70ad9e621f4bd8d98662f00e3c125'),
+        binascii.unhexlify('aeb6bcfe274b70a14fb067a5e5578264'
+                           'db0fa9b51af5e0ba159158f329e06e77'),
+        binascii.unhexlify('d37ee418976dd95753c1c73862b9398f'
+                           'a2a2cf9b4ff0fdfe8b30cd95209614b7'),
+        binascii.unhexlify('4e3bbb1f7b478dcfe71fb631631519a3'
+                           'bca12c9aefca1612bfce4c13a86264d4'),
+        binascii.unhexlify('76e67dadbcdf1e10e1b74ddc608abd2f'
+                           '98dfb16fbce75277b5232a127f2087ef'),
+        binascii.unhexlify('ddb89be403809e325750d3d263cd7892'
+                           '9c2942b7942a34b77e122c9594a74c8c'),
+        binascii.unhexlify('5dc9da79a70659a9ad559cb701ded9a2'
+                           'ab9d823aad2f4960cfe370eff4604328'),
+    ]
+
+    for icp in inclusion_proofs:
+      leaf_id = icp[0] - 1
+      leaf_hash = aftltool.rfc6962_hash_leaf(leaves[leaf_id])
+      root_hash = aftltool.root_from_icp(leaf_id, icp[1], icp[2], leaf_hash)
+      self.assertEqual(root_hash, roots[icp[1] -1])
+
+
+class AftlDescriptorTest(AftltoolTestCase):
+
+  def test__init__(self):
+    """Tests the constructor."""
+    # Calls constructor without data.
+    d = aftltool.AftlDescriptor()
+    self.assertTrue(isinstance(d.icp_header, aftltool.AftlIcpHeader))
+    self.assertEqual(d.icp_header.icp_count, 0)
+    self.assertEqual(d.icp_entries, [])
+    self.assertTrue(d.is_valid())
+
+    # Calls constructor with data.
+    d = aftltool.AftlDescriptor(self.test_expected_aftl_descriptor_bytes)
+    self.assertTrue(isinstance(d.icp_header, aftltool.AftlIcpHeader))
+    self.assertEqual(d.icp_header.icp_count, 2)
+    self.assertEqual(len(d.icp_entries), 2)
+    for entry in d.icp_entries:
+      self.assertTrue(isinstance(entry, aftltool.AftlIcpEntry))
+    self.assertTrue(d.is_valid())
+
+  def test_add_icp_entry(self):
+    """Tests the add_icp_entry method."""
+    d = aftltool.AftlDescriptor()
+
+    # Adds 1st ICP.
+    d.add_icp_entry(self.test_entry_1)
+    self.assertEqual(d.icp_header.icp_count, 1)
+    self.assertEqual(len(d.icp_entries), 1)
+    self.assertTrue(d.is_valid())
+
+    # Adds 2nd ICP.
+    d.add_icp_entry(self.test_entry_2)
+    self.assertEqual(d.icp_header.icp_count, 2)
+    self.assertEqual(len(d.icp_entries), 2)
+    self.assertTrue(d.is_valid())
+
+  def test_save(self):
+    """Tests save method."""
+    buf = io.BytesIO()
+    self.test_aftl_desc.save(buf)
+    self.assertEqual(buf.getvalue(), self.test_expected_aftl_descriptor_bytes)
+
+  def test_encode(self):
+    """Tests encode method."""
+    desc_bytes = self.test_aftl_desc.encode()
+    self.assertEqual(desc_bytes, self.test_expected_aftl_descriptor_bytes)
+
+  def test_is_valid(self):
+    """Tests is_valid method."""
+    d = aftltool.AftlDescriptor()
+    d.add_icp_entry(self.test_entry_1)
+    d.add_icp_entry(self.test_entry_2)
+
+    # Force invalid icp header
+    old_magic = d.icp_header.magic
+    d.icp_header.magic = 'YOLO'
+    self.assertFalse(d.is_valid())
+    d.icp_header.magic = old_magic
+    self.assertTrue(d.is_valid())
+
+    # Force count mismatch between header and actual entries.
+    old_icp_count = d.icp_header.icp_count
+    d.icp_header.icp_count = 1
+    self.assertFalse(d.is_valid())
+    d.icp_header.icp_count = old_icp_count
+    self.assertTrue(d.is_valid())
+
+    # Force invalid icp_entry.
+    old_log_url_size = d.icp_entries[0].log_url_size
+    d.icp_entries[0].log_url_size = 0
+    self.assertFalse(d.is_valid())
+    d.icp_entries[0].log_url_size = old_log_url_size
+    self.assertTrue(d.is_valid())
+
+  def test_print_desc(self):
+    """Tests print_desc method."""
+    buf = io.BytesIO()
+    self.test_aftl_desc.print_desc(buf)
+    desc = buf.getvalue()
+
+    # Cursory check whether the printed description contains something useful.
+    self.assertGreater(len(desc), 0)
+    self.assertTrue('Log Root Descriptor:' in desc)
+
+
+class AftlIcpHeaderTest(AftltoolTestCase):
+  """Test suite for testing the AftlIcpHeader descriptor."""
+
+  def setUp(self):
+    """Sets up the test bed for the unit tests."""
+    super(AftlIcpHeaderTest, self).setUp()
+
+    self.test_header_valid = aftltool.AftlIcpHeader()
+    self.test_header_valid.icp_count = 1
+
+    self.test_header_invalid = aftltool.AftlIcpHeader()
+    self.test_header_invalid.icp_count = -34
+
+    self.test_header_bytes = bytearray('\x41\x46\x54\x4c\x00\x00\x00\x01'
+                                       '\x00\x00\x00\x01\x00\x00\x00\x12'
+                                       '\x00\x01')
+
+  def test__init__(self):
+    """Tests default ICP header structure."""
+
+    # Calls constructor without data.
+    header = aftltool.AftlIcpHeader()
+    self.assertEqual(header.magic, 'AFTL')
+    self.assertEqual(header.required_icp_version_major,
+                     avbtool.AVB_VERSION_MAJOR)
+    self.assertEqual(header.required_icp_version_minor,
+                     avbtool.AVB_VERSION_MINOR)
+    self.assertEqual(header.aftl_descriptor_size, aftltool.AftlIcpHeader.SIZE)
+    self.assertEqual(header.icp_count, 0)
+    self.assertTrue(header.is_valid())
+
+    # Calls constructor with data.
+    header = aftltool.AftlIcpHeader(self.test_header_bytes)
+    self.assertEqual(header.magic, 'AFTL')
+    self.assertEqual(header.required_icp_version_major, 1)
+    self.assertEqual(header.required_icp_version_minor, 1)
+    self.assertEqual(header.aftl_descriptor_size, aftltool.AftlIcpHeader.SIZE)
+    self.assertTrue(header.icp_count, 1)
+    self.assertTrue(header.is_valid())
+
+  def test_save(self):
+    """Tests ICP header save method."""
+    buf = io.BytesIO()
+    self.test_header_valid.save(buf)
+    self.assertEqual(buf.getvalue(), self.test_header_bytes)
+
+  def test_encode(self):
+    """Tests ICP header encoding."""
+    # Valid header.
+    header_bytes = self.test_header_valid.encode()
+    self.assertEqual(header_bytes, self.test_header_bytes)
+
+    # Invalid header
+    with self.assertRaises(aftltool.AftlError):
+      header_bytes = self.test_header_invalid.encode()
+
+  def test_is_valid(self):
+    """Tests valid ICP header structures."""
+    # Invalid magic.
+    header = aftltool.AftlIcpHeader()
+    self.assertTrue(header.is_valid())
+
+    # Invalid magic.
+    header = aftltool.AftlIcpHeader()
+    header.magic = 'YOLO'
+    self.assertFalse(header.is_valid())
+
+    # Valid ICP count.
+    self.assertTrue(self.test_header_valid.is_valid())
+
+    # Invalid ICP count.
+    self.assertFalse(self.test_header_invalid.is_valid())
+
+    header = aftltool.AftlIcpHeader()
+    header.icp_count = 10000000
+    self.assertFalse(header.is_valid())
+
+    # Invalid ICP major version.
+    header = aftltool.AftlIcpHeader()
+    header.required_icp_version_major = avbtool.AVB_VERSION_MAJOR + 1
+    self.assertFalse(header.is_valid())
+
+    # Invalid ICP minor version.
+    header = aftltool.AftlIcpHeader()
+    header.required_icp_version_minor = avbtool.AVB_VERSION_MINOR + 1
+    self.assertFalse(header.is_valid())
+
+  def test_print_desc(self):
+    """Tests print_desc method."""
+    buf = io.BytesIO()
+    self.test_header_valid.print_desc(buf)
+    desc = buf.getvalue()
+
+    # Cursory check whether the printed description contains something useful.
+    self.assertGreater(len(desc), 0)
+    self.assertTrue('Major version:' in desc)
+
+
+class TrillianLogRootDescriptorTest(AftltoolTestCase):
+  """Test suite for testing the TrillianLogRootDescriptorTest descriptor."""
+
+  def setUp(self):
+    """Sets up the test bed for the unit tests."""
+    super(TrillianLogRootDescriptorTest, self).setUp()
+
+    # Creates basic log root without metadata fields.
+    base_log_root = (
+        '0001'                              # version
+        '00000000000002e5'                  # tree_size
+        '20'                                # root_hash_size
+        '2d614759ad408a111a3351c0cb33c099'  # root_hash
+        '422c30a5c5104788a343332bde2b387b'
+        '15e1c97e3b4bd239'                  # timestamp
+        '00000000000002e4'                  # revision
+    )
+
+    # Create valid log roots with metadata fields w/ and w/o metadata.
+    self.test_log_root_bytes_wo_metadata = binascii.unhexlify(
+        base_log_root + '0000')
+    self.test_log_root_bytes_with_metadata = binascii.unhexlify(
+        base_log_root + '00023132')
+
+  def test__init__(self):
+    """Tests constructor."""
+    # Calls constructor without data.
+    d = aftltool.TrillianLogRootDescriptor()
+    self.assertTrue(d.is_valid())
+    self.assertEqual(d.version, 1)
+    self.assertEqual(d.tree_size, 0)
+    self.assertEqual(d.root_hash_size, 0)
+    self.assertEqual(d.root_hash, bytearray())
+    self.assertEqual(d.timestamp, 0)
+    self.assertEqual(d.revision, 0)
+    self.assertEqual(d.metadata_size, 0)
+    self.assertEqual(d.metadata, bytearray())
+
+    # Calls constructor with log_root w/o metadata
+    d = aftltool.TrillianLogRootDescriptor(self.test_log_root_bytes_wo_metadata)
+    self.assertTrue(d.is_valid())
+    self.assertEqual(d.version, 1)
+    self.assertEqual(d.tree_size, 741)
+    self.assertEqual(d.root_hash_size, 32)
+    self.assertEqual(d.root_hash,
+                     binascii.unhexlify('2d614759ad408a111a3351c0cb33c099'
+                                        '422c30a5c5104788a343332bde2b387b'))
+    self.assertEqual(d.timestamp, 1576762888554271289)
+    self.assertEqual(d.revision, 740)
+    self.assertEqual(d.metadata_size, 0)
+    self.assertEqual(d.metadata, bytearray())
+
+    # Calls constructor with log_root with metadata
+    d = aftltool.TrillianLogRootDescriptor(
+        self.test_log_root_bytes_with_metadata)
+    self.assertEqual(d.metadata_size, 2)
+    self.assertEqual(d.metadata, bytearray('12'))
+
+  def test_get_expected_size(self):
+    """Tests get_expected_size method."""
+    # Default constructor.
+    d = aftltool.TrillianLogRootDescriptor()
+    self.assertEqual(d.get_expected_size(), 11 + 18)
+
+    # Log root without metadata.
+    d = aftltool.TrillianLogRootDescriptor(self.test_log_root_bytes_wo_metadata)
+    self.assertEqual(d.get_expected_size(), 11 + 18 + 32)
+
+    # Log root with metadata.
+    d = aftltool.TrillianLogRootDescriptor(
+        self.test_log_root_bytes_with_metadata)
+    self.assertEqual(d.get_expected_size(), 11 + 18 + 32 + 2)
+
+  def test_encode(self):
+    """Tests encode method."""
+    # Log root from default constructor.
+    d = aftltool.TrillianLogRootDescriptor()
+    expected_bytes = (
+        '0001'                              # version
+        '0000000000000000'                  # tree_size
+        '00'                                # root_hash_size
+        ''                                  # root_hash (empty)
+        '0000000000000000'                  # timestamp
+        '0000000000000000'                  # revision
+        '0000'                              # metadata size
+        ''                                  # metadata (empty)
+    )
+    self.assertEqual(d.encode(), binascii.unhexlify(expected_bytes))
+
+    # Log root without metadata.
+    d = aftltool.TrillianLogRootDescriptor(self.test_log_root_bytes_wo_metadata)
+    self.assertEqual(d.encode(), self.test_log_root_bytes_wo_metadata)
+
+    # Log root with metadata.
+    d = aftltool.TrillianLogRootDescriptor(
+        self.test_log_root_bytes_with_metadata)
+    self.assertEqual(d.encode(), self.test_log_root_bytes_with_metadata)
+
+  def test_is_valid(self):
+    """Tests the is_valid method."""
+    d = aftltool.TrillianLogRootDescriptor()
+    self.assertTrue(d.is_valid())
+
+    # Invalid version.
+    d = aftltool.TrillianLogRootDescriptor()
+    d.version = 2
+    self.assertFalse(d.is_valid())
+
+    # Invalid tree_size.
+    d = aftltool.TrillianLogRootDescriptor()
+    d.tree_size = -1
+    self.assertFalse(d.is_valid())
+
+    # Invalid root_hash_size.
+    d = aftltool.TrillianLogRootDescriptor()
+    d.root_hash_size = -1
+    self.assertFalse(d.is_valid())
+    d.root_hash_size = 300
+    self.assertFalse(d.is_valid())
+
+    # Invalid/valid root_hash_size / root_hash combination.
+    d = aftltool.TrillianLogRootDescriptor()
+    d.root_hash_size = 4
+    d.root_hash = '123'
+    self.assertFalse(d.is_valid())
+    d.root_hash = '1234'
+    self.assertTrue(d.is_valid())
+
+    # Invalid timestamp.
+    d = aftltool.TrillianLogRootDescriptor()
+    d.timestamp = -1
+    self.assertFalse(d.is_valid())
+
+    # Invalid revision.
+    d = aftltool.TrillianLogRootDescriptor()
+    d.revision = -1
+    self.assertFalse(d.is_valid())
+
+    # Invalid metadata_size.
+    d = aftltool.TrillianLogRootDescriptor()
+    d.metadata_size = -1
+    self.assertFalse(d.is_valid())
+    d.metadata_size = 70000
+    self.assertFalse(d.is_valid())
+
+    # Invalid/valid metadata_size / metadata combination.
+    d = aftltool.TrillianLogRootDescriptor()
+    d.metadata_size = 4
+    d.metadata = '123'
+    self.assertFalse(d.is_valid())
+    d.metadata = '1234'
+    self.assertTrue(d.is_valid())
+
+  def test_print_desc(self):
+    """Tests print_desc method."""
+    # Log root without metadata
+    buf = io.BytesIO()
+    d = aftltool.TrillianLogRootDescriptor(self.test_log_root_bytes_wo_metadata)
+    d.print_desc(buf)
+    desc = buf.getvalue()
+
+    # Cursory check whether the printed description contains something useful.
+    self.assertGreater(len(desc), 0)
+    self.assertTrue('Version:' in desc)
+    self.assertFalse('Metadata:' in desc)
+
+    # Log root with metadata
+    buf = io.BytesIO()
+    d = aftltool.TrillianLogRootDescriptor(
+        self.test_log_root_bytes_with_metadata)
+    d.print_desc(buf)
+    desc = buf.getvalue()
+
+    # Cursory check whether the printed description contains something useful.
+    self.assertGreater(len(desc), 0)
+    self.assertTrue('Version:' in desc)
+    self.assertTrue('Metadata:' in desc)
+
+
+class AftlMockCommunication(aftltool.AftlCommunication):
+  """Testing Mock implementation of AftlCommunication."""
+
+  def __init__(self, transparency_log, canned_response):
+    """Initializes the object.
+
+    Arguments:
+      transparency_log: String containing the URL of a transparency log server.
+      canned_response: AddFirmwareInfoResponse to return or the Exception to
+        raise.
+    """
+    super(AftlMockCommunication, self).__init__(transparency_log)
+    self.request = None
+    self.canned_response = canned_response
+
+  def AddFirmwareInfo(self, request):
+    """Records the request and returns the canned response."""
+    self.request = request
+
+    if isinstance(self.canned_response, aftltool.AftlError):
+      raise self.canned_response
+    return self.canned_response
+
+
+class AftlTest(AftltoolTestCase):
+
+  def setUp(self):
+    """Sets up the test bed for the unit tests."""
+    super(AftlTest, self).setUp()
+    self.mock_aftl_host = 'test.foo.bar:9000'
+
+  # pylint: disable=no-member
+  def test_request_inclusion_proof(self):
+    """Tests the request_inclusion_proof method."""
+    aftl_comms = AftlMockCommunication(self.mock_aftl_host, self.test_afi_resp)
+    aftl = aftltool.Aftl()
+    icp = aftl.request_inclusion_proof(self.mock_aftl_host,
+                                       'a'*1024, 'version_inc',
+                                       'test/data/testkey_rsa4096.pem',
+                                       None, None,
+                                       aftl_comms=aftl_comms)
+    self.assertEqual(icp.leaf_index,
+                     self.test_afi_resp.fw_info_proof.proof.leaf_index)
+    self.assertEqual(icp.proof_hash_count,
+                     len(self.test_afi_resp.fw_info_proof.proof.hashes))
+    self.assertEqual(icp.log_url, self.mock_aftl_host)
+    self.assertEqual(
+        icp.log_root_descriptor.root_hash, binascii.unhexlify(
+            '53b182b55dc1377197c938637f50093131daea4d0696b1eae5b8a014bfde884a'))
+
+    self.assertEqual(icp.fw_info_leaf.version_incremental, 'version_inc')
+    # To calculate the hash of the a RSA key use the following command:
+    # openssl rsa -in test/data/testkey_rsa4096.pem -pubout \
+    #    -outform DER | sha256sum
+    self.assertEqual(icp.fw_info_leaf.manufacturer_key_hash, binascii.unhexlify(
+        '9841073d16a7abbe21059e026da71976373d8f74fdb91cc46aa0a7d622b925b9'))
+
+    self.assertEqual(icp.log_root_signature,
+                     self.test_afi_resp.fw_info_proof.sth.log_root_signature)
+    self.assertEqual(icp.proofs, self.test_afi_resp.fw_info_proof.proof.hashes)
+
+  # pylint: disable=no-member
+  def test_request_inclusion_proof_failure(self):
+    """Tests the request_inclusion_proof_method in case of a comms problem."""
+    aftl_comms = AftlMockCommunication(self.mock_aftl_host,
+                                       aftltool.AftlError('Comms error'))
+    aftl = aftltool.Aftl()
+    with self.assertRaises(aftltool.AftlError):
+      aftl.request_inclusion_proof(self.mock_aftl_host,
+                                   'a'*1024, 'version_inc',
+                                   'test/data/testkey_rsa4096.pem',
+                                   None, None,
+                                   aftl_comms=aftl_comms)
+
+if __name__ == '__main__':
+  unittest.main(verbosity=2)
diff --git a/avbtool b/avbtool
index 92c024c..2523449 100755
--- a/avbtool
+++ b/avbtool
@@ -24,6 +24,8 @@
 #
 """Command-line tool for working with Android Verified Boot images."""
 
+from __future__ import print_function
+
 import argparse
 import binascii
 import bisect
@@ -106,7 +108,7 @@
         hash_name='sha256',
         hash_num_bytes=32,
         signature_num_bytes=256,
-        public_key_num_bytes=8 + 2*2048/8,
+        public_key_num_bytes=8 + 2*2048//8,
         padding=[
             # PKCS1-v1_5 padding
             0x00, 0x01] + [0xff]*202 + [0x00] + [
@@ -120,7 +122,7 @@
         hash_name='sha256',
         hash_num_bytes=32,
         signature_num_bytes=512,
-        public_key_num_bytes=8 + 2*4096/8,
+        public_key_num_bytes=8 + 2*4096//8,
         padding=[
             # PKCS1-v1_5 padding
             0x00, 0x01] + [0xff]*458 + [0x00] + [
@@ -134,7 +136,7 @@
         hash_name='sha256',
         hash_num_bytes=32,
         signature_num_bytes=1024,
-        public_key_num_bytes=8 + 2*8192/8,
+        public_key_num_bytes=8 + 2*8192//8,
         padding=[
             # PKCS1-v1_5 padding
             0x00, 0x01] + [0xff]*970 + [0x00] + [
@@ -148,7 +150,7 @@
         hash_name='sha512',
         hash_num_bytes=64,
         signature_num_bytes=256,
-        public_key_num_bytes=8 + 2*2048/8,
+        public_key_num_bytes=8 + 2*2048//8,
         padding=[
             # PKCS1-v1_5 padding
             0x00, 0x01] + [0xff]*170 + [0x00] + [
@@ -162,7 +164,7 @@
         hash_name='sha512',
         hash_num_bytes=64,
         signature_num_bytes=512,
-        public_key_num_bytes=8 + 2*4096/8,
+        public_key_num_bytes=8 + 2*4096//8,
         padding=[
             # PKCS1-v1_5 padding
             0x00, 0x01] + [0xff]*426 + [0x00] + [
@@ -176,7 +178,7 @@
         hash_name='sha512',
         hash_num_bytes=64,
         signature_num_bytes=1024,
-        public_key_num_bytes=8 + 2*8192/8,
+        public_key_num_bytes=8 + 2*8192//8,
         padding=[
             # PKCS1-v1_5 padding
             0x00, 0x01] + [0xff]*938 + [0x00] + [
@@ -199,7 +201,7 @@
 def round_to_multiple(number, size):
   """Rounds a number up to nearest multiple of another number.
 
-  Args:
+  Arguments:
     number: The number to round up.
     size: The multiple to round up to.
 
@@ -216,7 +218,7 @@
 def round_to_pow2(number):
   """Rounds a number up to the next power of 2.
 
-  Args:
+  Arguments:
     number: The number to round up.
 
   Returns:
@@ -258,7 +260,7 @@
   This is the reverse of encode_long().
 
   Arguments:
-    value: A bytearray() with the encoded long.
+    blob: A bytearray() with the encoded long.
 
   Returns:
     The decoded value.
@@ -286,9 +288,8 @@
   """
   if a == 0:
     return (b, 0, 1)
-  else:
-    g, y, x = egcd(b % a, a)
-    return (g, x - (b // a) * y, y)
+  g, y, x = egcd(b % a, a)
+  return (g, x - (b // a) * y, y)
 
 
 def modinv(a, m):
@@ -309,8 +310,7 @@
   gcd, x, _ = egcd(a, m)
   if gcd != 1:
     return None  # modular inverse does not exist
-  else:
-    return x % m
+  return x % m
 
 
 def parse_number(string):
@@ -349,6 +349,9 @@
 
     Arguments:
       key_path: The path to a key file.
+
+    Raises:
+      AvbError: If RSA key parameters could not be read from file.
     """
     # We used to have something as simple as this:
     #
@@ -401,16 +404,19 @@
 
   Returns:
     A bytearray() with the |AvbRSAPublicKeyHeader|.
+
+  Raises:
+    AvbError: If given RSA key exponent is not 65537.
   """
   key = RSAPublicKey(key_path)
   if key.exponent != 65537:
     raise AvbError('Only RSA keys with exponent 65537 are supported.')
   ret = bytearray()
   # Calculate n0inv = -1/n[0] (mod 2^32)
-  b = 2L**32
+  b = 2L**32  # pylint: disable=long-suffix
   n0inv = b - modinv(key.modulus, b)
   # Calculate rr = r^2 (mod N), where r = 2^(# of key bits)
-  r = 2L**key.modulus.bit_length()
+  r = 2L**key.modulus.bit_length()  # pylint: disable=long-suffix
   rrmodn = r * r % key.modulus
   ret.extend(struct.pack('!II', key.num_bits, n0inv))
   ret.extend(encode_long(key.num_bits, key.modulus))
@@ -437,6 +443,25 @@
   raise AvbError('Unknown algorithm type {}'.format(alg_type))
 
 
+def lookup_hash_size_by_type(alg_type):
+  """Looks up hash size by type.
+
+  Arguments:
+    alg_type: The integer representing the type.
+
+  Returns:
+    The corresponding hash size.
+
+  Raises:
+    AvbError: If the algorithm cannot be found.
+  """
+  for alg_name in ALGORITHMS:
+    alg_data = ALGORITHMS[alg_name]
+    if alg_data.algorithm_type == alg_type:
+      return alg_data.hash_num_bytes
+  raise AvbError('Unsupported algorithm type {}'.format(alg_type))
+
+
 def raw_sign(signing_helper, signing_helper_with_files,
              algorithm_name, signature_num_bytes, key_path,
              raw_data_to_sign):
@@ -461,8 +486,8 @@
     signing_file = tempfile.NamedTemporaryFile()
     signing_file.write(str(raw_data_to_sign))
     signing_file.flush()
-    p = subprocess.Popen(
-      [signing_helper_with_files, algorithm_name, key_path, signing_file.name])
+    p = subprocess.Popen([
+        signing_helper_with_files, algorithm_name, key_path, signing_file.name])
     retcode = p.wait()
     if retcode != 0:
       raise AvbError('Error signing')
@@ -492,8 +517,7 @@
 
 
 def verify_vbmeta_signature(vbmeta_header, vbmeta_blob):
-  """Checks that the signature in a vbmeta blob was made by
-     the embedded public key.
+  """Checks that signature in a vbmeta blob was made by the embedded public key.
 
   Arguments:
     vbmeta_header: A AvbVBMetaHeader.
@@ -502,9 +526,13 @@
   Returns:
     True if the signature is valid and corresponds to the embedded
     public key. Also returns True if the vbmeta blob is not signed.
+
+  Raises:
+    AvbError: If there errors calling out to openssl command during
+        signature verification.
   """
   (_, alg) = lookup_algorithm_by_type(vbmeta_header.algorithm_type)
-  if alg.hash_name == '':
+  if not alg.hash_name:
     return True
   header_blob = vbmeta_blob[0:256]
   auth_offset = 256
@@ -540,7 +568,7 @@
   padding_and_digest.extend(computed_digest)
 
   (num_bits,) = struct.unpack('!I', pubkey_blob[0:4])
-  modulus_blob = pubkey_blob[8:8 + num_bits/8]
+  modulus_blob = pubkey_blob[8:8 + num_bits//8]
   modulus = decode_long(modulus_blob)
   exponent = 65537
 
@@ -567,19 +595,22 @@
               '\n'
               '[rsapubkey]\n'
               'n=INTEGER:%s\n'
-              'e=INTEGER:%s\n' % (hex(modulus).rstrip('L'), hex(exponent).rstrip('L')))
+              'e=INTEGER:%s\n' % (hex(modulus).rstrip('L'),
+                                  hex(exponent).rstrip('L')))
   asn1_tmpfile = tempfile.NamedTemporaryFile()
   asn1_tmpfile.write(asn1_str)
   asn1_tmpfile.flush()
   der_tmpfile = tempfile.NamedTemporaryFile()
   p = subprocess.Popen(
-      ['openssl', 'asn1parse', '-genconf', asn1_tmpfile.name, '-out', der_tmpfile.name, '-noout'])
+      ['openssl', 'asn1parse', '-genconf', asn1_tmpfile.name, '-out',
+       der_tmpfile.name, '-noout'])
   retcode = p.wait()
   if retcode != 0:
     raise AvbError('Error generating DER file')
 
   p = subprocess.Popen(
-      ['openssl', 'rsautl', '-verify', '-pubin', '-inkey', der_tmpfile.name, '-keyform', 'DER', '-raw'],
+      ['openssl', 'rsautl', '-verify', '-pubin', '-inkey', der_tmpfile.name,
+       '-keyform', 'DER', '-raw'],
       stdin=subprocess.PIPE,
       stdout=subprocess.PIPE,
       stderr=subprocess.PIPE)
@@ -694,6 +725,9 @@
       ValueError: If data in the file is invalid.
     """
     self.filename = image_filename
+    self._num_total_blocks = 0
+    self._num_total_chunks = 0
+    self._file_pos = 0
     self._read_header()
 
   def _read_header(self):
@@ -741,7 +775,7 @@
     # image.
     offset = 0
     output_offset = 0
-    for _ in xrange(1, self._num_total_chunks + 1):
+    for _ in range(1, self._num_total_chunks + 1):
       chunk_offset = self._image.tell()
 
       header_bin = self._image.read(struct.calcsize(ImageChunk.FORMAT))
@@ -847,14 +881,14 @@
       return
 
     self._num_total_chunks += 1
-    self._num_total_blocks += num_bytes / self.block_size
+    self._num_total_blocks += num_bytes // self.block_size
     self._update_chunks_and_blocks()
 
     self._image.seek(self._sparse_end, os.SEEK_SET)
     self._image.write(struct.pack(ImageChunk.FORMAT,
                                   ImageChunk.TYPE_DONT_CARE,
                                   0,  # Reserved
-                                  num_bytes / self.block_size,
+                                  num_bytes // self.block_size,
                                   struct.calcsize(ImageChunk.FORMAT)))
     self._read_header()
 
@@ -875,14 +909,14 @@
       return
 
     self._num_total_chunks += 1
-    self._num_total_blocks += len(data) / self.block_size
+    self._num_total_blocks += len(data) // self.block_size
     self._update_chunks_and_blocks()
 
     self._image.seek(self._sparse_end, os.SEEK_SET)
     self._image.write(struct.pack(ImageChunk.FORMAT,
                                   ImageChunk.TYPE_RAW,
                                   0,  # Reserved
-                                  len(data) / self.block_size,
+                                  len(data) // self.block_size,
                                   len(data) +
                                   struct.calcsize(ImageChunk.FORMAT)))
     self._image.write(data)
@@ -903,19 +937,19 @@
 
     if not self.is_sparse:
       self._image.seek(0, os.SEEK_END)
-      self._image.write(fill_data * (size/4))
+      self._image.write(fill_data * (size//4))
       self._read_header()
       return
 
     self._num_total_chunks += 1
-    self._num_total_blocks += size / self.block_size
+    self._num_total_blocks += size // self.block_size
     self._update_chunks_and_blocks()
 
     self._image.seek(self._sparse_end, os.SEEK_SET)
     self._image.write(struct.pack(ImageChunk.FORMAT,
                                   ImageChunk.TYPE_FILL,
                                   0,  # Reserved
-                                  size / self.block_size,
+                                  size // self.block_size,
                                   4 + struct.calcsize(ImageChunk.FORMAT)))
     self._image.write(fill_data)
     self._read_header()
@@ -925,9 +959,12 @@
 
     Arguments:
       offset: Offset to seek to from the beginning of the file.
+
+    Raises:
+      RuntimeError: If the given offset is negative.
     """
     if offset < 0:
-      raise RuntimeError("Seeking with negative offset: %d" % offset)
+      raise RuntimeError('Seeking with negative offset: %d' % offset)
     self._file_pos = offset
 
   def read(self, size):
@@ -966,7 +1003,7 @@
         self._image.seek(chunk.input_offset + chunk_pos_offset)
         data.extend(self._image.read(chunk_pos_to_go))
       elif chunk.chunk_type == ImageChunk.TYPE_FILL:
-        all_data = chunk.fill_data*(chunk_pos_to_go/len(chunk.fill_data) + 2)
+        all_data = chunk.fill_data*(chunk_pos_to_go // len(chunk.fill_data) + 2)
         offset_mod = chunk_pos_offset % len(chunk.fill_data)
         data.extend(all_data[offset_mod:(offset_mod + chunk_pos_to_go)])
       else:
@@ -1032,7 +1069,7 @@
           assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE
           truncate_at = chunk.chunk_offset + struct.calcsize(ImageChunk.FORMAT)
           data_sz = 0
-        chunk_sz = num_to_keep/self.block_size
+        chunk_sz = num_to_keep // self.block_size
         total_sz = data_sz + struct.calcsize(ImageChunk.FORMAT)
         self._image.seek(chunk.chunk_offset)
         self._image.write(struct.pack(ImageChunk.FORMAT,
@@ -1049,7 +1086,7 @@
       self._num_total_chunks = chunk_idx_for_update
       self._num_total_blocks = 0
       for i in range(0, chunk_idx_for_update):
-        self._num_total_blocks += self._chunks[i].output_size / self.block_size
+        self._num_total_blocks += self._chunks[i].output_size // self.block_size
       self._update_chunks_and_blocks()
       self._image.truncate(truncate_at)
 
@@ -1128,16 +1165,22 @@
       image_dir: The directory of the file being verified.
       image_ext: The extension of the file being verified (e.g. '.img').
       expected_chain_partitions_map: A map from partition name to the
-        tuple (rollback_index_location, key_blob).
+          tuple (rollback_index_location, key_blob).
       image_containing_descriptor: The image the descriptor is in.
-      accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is zeroed out.
+      accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is
+          zeroed out.
 
     Returns:
       True if the descriptor verifies, False otherwise.
     """
+    # Deletes unused parameters to prevent pylint warning unused-argument.
+    del image_dir, image_ext, expected_chain_partitions_map
+    del image_containing_descriptor, accept_zeroed_hashtree
+
     # Nothing to do.
     return True
 
+
 class AvbPropertyDescriptor(AvbDescriptor):
   """A class for property descriptors.
 
@@ -1215,9 +1258,10 @@
       image_dir: The directory of the file being verified.
       image_ext: The extension of the file being verified (e.g. '.img').
       expected_chain_partitions_map: A map from partition name to the
-        tuple (rollback_index_location, key_blob).
+          tuple (rollback_index_location, key_blob).
       image_containing_descriptor: The image the descriptor is in.
-      accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is zeroed out.
+      accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is
+          zeroed out.
 
     Returns:
       True if the descriptor verifies, False otherwise.
@@ -1225,6 +1269,7 @@
     # Nothing to do.
     return True
 
+
 class AvbHashtreeDescriptor(AvbDescriptor):
   """A class for hashtree descriptors.
 
@@ -1378,14 +1423,15 @@
       image_dir: The directory of the file being verified.
       image_ext: The extension of the file being verified (e.g. '.img').
       expected_chain_partitions_map: A map from partition name to the
-        tuple (rollback_index_location, key_blob).
+          tuple (rollback_index_location, key_blob).
       image_containing_descriptor: The image the descriptor is in.
-      accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is zeroed out.
+      accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is
+          zeroed out.
 
     Returns:
       True if the descriptor verifies, False otherwise.
     """
-    if self.partition_name == '':
+    if not self.partition_name:
       image_filename = image_containing_descriptor.filename
       image = image_containing_descriptor
     else:
@@ -1395,7 +1441,7 @@
     digest_size = len(hashlib.new(name=self.hash_algorithm).digest())
     digest_padding = round_to_pow2(digest_size) - digest_size
     (hash_level_offsets, tree_size) = calc_hash_level_offsets(
-      self.image_size, self.data_block_size, digest_size + digest_padding)
+        self.image_size, self.data_block_size, digest_size + digest_padding)
     root_digest, hash_tree = generate_hash_tree(image, self.image_size,
                                                 self.data_block_size,
                                                 self.hash_algorithm, self.salt,
@@ -1403,7 +1449,7 @@
                                                 hash_level_offsets,
                                                 tree_size)
     # The root digest must match unless it is not embedded in the descriptor.
-    if len(self.root_digest) != 0 and root_digest != self.root_digest:
+    if self.root_digest and root_digest != self.root_digest:
       sys.stderr.write('hashtree of {} does not match descriptor\n'.
                        format(image_filename))
       return False
@@ -1412,20 +1458,21 @@
     hash_tree_ondisk = image.read(self.tree_size)
     is_zeroed = (self.tree_size == 0) or (hash_tree_ondisk[0:8] == 'ZeRoHaSH')
     if is_zeroed and accept_zeroed_hashtree:
-      print ('{}: skipping verification since hashtree is zeroed and --accept_zeroed_hashtree was given'
-             .format(self.partition_name))
+      print('{}: skipping verification since hashtree is zeroed and '
+            '--accept_zeroed_hashtree was given'
+            .format(self.partition_name))
     else:
       if hash_tree != hash_tree_ondisk:
         sys.stderr.write('hashtree of {} contains invalid data\n'.
                          format(image_filename))
         return False
-      print ('{}: Successfully verified {} hashtree of {} for image of {} bytes'
-             .format(self.partition_name, self.hash_algorithm, image.filename,
-                     self.image_size))
-    # TODO: we could also verify that the FEC stored in the image is
-    # correct but this a) currently requires the 'fec' binary; and b)
-    # takes a long time; and c) is not strictly needed for
-    # verification purposes as we've already verified the root hash.
+      print('{}: Successfully verified {} hashtree of {} for image of {} bytes'
+            .format(self.partition_name, self.hash_algorithm, image.filename,
+                    self.image_size))
+    # TODO(zeuthen): we could also verify that the FEC stored in the image is
+    # correct but this a) currently requires the 'fec' binary; and b) takes a
+    # long time; and c) is not strictly needed for verification purposes as
+    # we've already verified the root hash.
     return True
 
 
@@ -1542,14 +1589,15 @@
       image_dir: The directory of the file being verified.
       image_ext: The extension of the file being verified (e.g. '.img').
       expected_chain_partitions_map: A map from partition name to the
-        tuple (rollback_index_location, key_blob).
+          tuple (rollback_index_location, key_blob).
       image_containing_descriptor: The image the descriptor is in.
-      accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is zeroed out.
+      accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is
+          zeroed out.
 
     Returns:
       True if the descriptor verifies, False otherwise.
     """
-    if self.partition_name == '':
+    if not self.partition_name:
       image_filename = image_containing_descriptor.filename
       image = image_containing_descriptor
     else:
@@ -1561,13 +1609,13 @@
     ha.update(data)
     digest = ha.digest()
     # The digest must match unless there is no digest in the descriptor.
-    if len(self.digest) != 0 and digest != self.digest:
+    if self.digest and digest != self.digest:
       sys.stderr.write('{} digest of {} does not match digest in descriptor\n'.
                        format(self.hash_algorithm, image_filename))
       return False
-    print ('{}: Successfully verified {} hash of {} for image of {} bytes'
-           .format(self.partition_name, self.hash_algorithm, image.filename,
-                   self.image_size))
+    print('{}: Successfully verified {} hash of {} for image of {} bytes'
+          .format(self.partition_name, self.hash_algorithm, image.filename,
+                  self.image_size))
     return True
 
 
@@ -1654,9 +1702,10 @@
       image_dir: The directory of the file being verified.
       image_ext: The extension of the file being verified (e.g. '.img').
       expected_chain_partitions_map: A map from partition name to the
-        tuple (rollback_index_location, key_blob).
+          tuple (rollback_index_location, key_blob).
       image_containing_descriptor: The image the descriptor is in.
-      accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is zeroed out.
+      accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is
+          zeroed out.
 
     Returns:
       True if the descriptor verifies, False otherwise.
@@ -1664,6 +1713,7 @@
     # Nothing to verify.
     return True
 
+
 class AvbChainPartitionDescriptor(AvbDescriptor):
   """A class for chained partition descriptors.
 
@@ -1758,9 +1808,10 @@
       image_dir: The directory of the file being verified.
       image_ext: The extension of the file being verified (e.g. '.img').
       expected_chain_partitions_map: A map from partition name to the
-        tuple (rollback_index_location, key_blob).
+          tuple (rollback_index_location, key_blob).
       image_containing_descriptor: The image the descriptor is in.
-      accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is zeroed out.
+      accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is
+          zeroed out.
 
     Returns:
       True if the descriptor verifies, False otherwise.
@@ -1788,8 +1839,8 @@
                        format(self.partition_name))
       return False
 
-    print ('{}: Successfully verified chain partition descriptor matches '
-           'expected data'.format(self.partition_name))
+    print('{}: Successfully verified chain partition descriptor matches '
+          'expected data'.format(self.partition_name))
 
     return True
 
@@ -1889,9 +1940,42 @@
 class AvbVBMetaHeader(object):
   """A class for parsing and writing AVB vbmeta images.
 
+  The attributes correspond to the |AvbVBMetaImageHeader| struct defined in
+  avb_vbmeta_image.h.
+
   Attributes:
-    The attributes correspond to the |AvbVBMetaImageHeader| struct defined in
-    avb_vbmeta_image.h.
+    magic: Four bytes equal to "AVB0" (AVB_MAGIC).
+    required_libavb_version_major: The major version of libavb required for this
+        header.
+    required_libavb_version_minor: The minor version of libavb required for this
+        header.
+    authentication_data_block_size: The size of the signature block.
+    auxiliary_data_block_size: The size of the auxiliary data block.
+    algorithm_type: The verification algorithm used, see |AvbAlgorithmType|
+        enum.
+    hash_offset: Offset into the "Authentication data" block of hash data.
+    hash_size: Length of the hash data.
+    signature_offset: Offset into the "Authentication data" block of signature
+        data.
+    signature_size: Length of the signature data.
+    public_key_offset: Offset into the "Auxiliary data" block of public key
+        data.
+    public_key_size: Length of the public key data.
+    public_key_metadata_offset: Offset into the "Auxiliary data" block of public
+        key metadata.
+    public_key_metadata_size: Length of the public key metadata. Must be set to
+        zero if there is no public key metadata.
+    descriptors_offset: Offset into the "Auxiliary data" block of descriptor
+        data.
+    descriptors_size: Length of descriptor data.
+    rollback_index: The rollback index which can be used to prevent rollback to
+        older versions.
+    flags: Flags from the AvbVBMetaImageFlags enumeration. This must be set to
+        zero if the vbmeta image is not a top-level image.
+    release_string: The release string from avbtool, e.g. "avbtool 1.0.0" or
+        "avbtool 1.0.0 xyz_board Git-234abde89". Is guaranteed to be NUL
+        terminated. Applications must not make assumptions about how this
+        string is formatted.
   """
 
   SIZE = 256
@@ -2136,7 +2220,8 @@
         raise AvbError('Hash-tree and FEC data must be adjacent.')
       zero_fec_start_offset = ht_desc.fec_offset
       zero_fec_num_bytes = ht_desc.fec_size
-    zero_end_offset = zero_ht_start_offset + zero_ht_num_bytes + zero_fec_num_bytes
+    zero_end_offset = (zero_ht_start_offset + zero_ht_num_bytes
+                       + zero_fec_num_bytes)
     image.seek(zero_end_offset)
     data = image.read(image.image_size - zero_end_offset)
 
@@ -2174,7 +2259,7 @@
                      'block size {}.'.format(partition_size,
                                              image.block_size))
 
-    (footer, vbmeta_header, descriptors, _) = self._parse_image(image)
+    (footer, _, _, _) = self._parse_image(image)
 
     if not footer:
       raise AvbError('Given image does not have a footer.')
@@ -2184,7 +2269,8 @@
 
     vbmeta_end_offset = footer.vbmeta_offset + footer.vbmeta_size
     if vbmeta_end_offset % image.block_size != 0:
-      vbmeta_end_offset += image.block_size - (vbmeta_end_offset % image.block_size)
+      vbmeta_end_offset += image.block_size - (vbmeta_end_offset
+                                               % image.block_size)
 
     if partition_size < vbmeta_end_offset + 1*image.block_size:
       raise AvbError('Requested size of {} is too small for an image '
@@ -2290,21 +2376,26 @@
     if num_printed == 0:
       o.write('    (none)\n')
 
-  def verify_image(self, image_filename, key_path, expected_chain_partitions, follow_chain_partitions,
-                   accept_zeroed_hashtree):
+  def verify_image(self, image_filename, key_path, expected_chain_partitions,
+                   follow_chain_partitions, accept_zeroed_hashtree):
     """Implements the 'verify_image' command.
 
     Arguments:
       image_filename: Image file to get information from (file object).
-      key_path: None or check that embedded public key matches key at given path.
+      key_path: None or check that embedded public key matches key at given
+          path.
       expected_chain_partitions: List of chain partitions to check or None.
-      follow_chain_partitions: If True, will follows chain partitions even when not
-                               specified with the --expected_chain_partition option
-      accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is zeroed out.
+      follow_chain_partitions:
+          If True, will follows chain partitions even when not specified with
+          the --expected_chain_partition option
+      accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is
+          zeroed out.
+
+    Raises:
+      AvbError: If verification of the image fails.
     """
     expected_chain_partitions_map = {}
     if expected_chain_partitions:
-      used_locations = {}
       for cp in expected_chain_partitions:
         cp_tokens = cp.split(':')
         if len(cp_tokens) != 3:
@@ -2313,27 +2404,31 @@
         rollback_index_location = int(cp_tokens[1])
         file_path = cp_tokens[2]
         pk_blob = open(file_path).read()
-        expected_chain_partitions_map[partition_name] = (rollback_index_location, pk_blob)
+        expected_chain_partitions_map[partition_name] = (
+            rollback_index_location, pk_blob)
 
     image_dir = os.path.dirname(image_filename)
     image_ext = os.path.splitext(image_filename)[1]
 
     key_blob = None
     if key_path:
-      print 'Verifying image {} using key at {}'.format(image_filename, key_path)
+      print('Verifying image {} using key at {}'.format(image_filename,
+                                                        key_path))
       key_blob = encode_rsa_key(key_path)
     else:
-      print 'Verifying image {} using embedded public key'.format(image_filename)
+      print('Verifying image {} using embedded public key'.format(
+          image_filename))
 
     image = ImageHandler(image_filename)
-    (footer, header, descriptors, image_size) = self._parse_image(image)
+    (footer, header, descriptors, _) = self._parse_image(image)
     offset = 0
     if footer:
       offset = footer.vbmeta_offset
 
     image.seek(offset)
-    vbmeta_blob = image.read(header.SIZE + header.authentication_data_block_size +
-                             header.auxiliary_data_block_size)
+    vbmeta_blob = image.read(header.SIZE
+                             + header.authentication_data_block_size
+                             + header.auxiliary_data_block_size)
 
     alg_name, _ = lookup_algorithm_by_type(header.algorithm_type)
     if not verify_vbmeta_signature(header, vbmeta_blob):
@@ -2345,36 +2440,41 @@
       key_offset = AvbVBMetaHeader.SIZE
       key_offset += header.authentication_data_block_size
       key_offset += header.public_key_offset
-      key_blob_in_vbmeta = vbmeta_blob[key_offset:key_offset + header.public_key_size]
+      key_blob_in_vbmeta = vbmeta_blob[key_offset:key_offset
+                                       + header.public_key_size]
       if key_blob != key_blob_in_vbmeta:
         raise AvbError('Embedded public key does not match given key.')
 
     if footer:
-      print ('vbmeta: Successfully verified footer and {} vbmeta struct in {}'
-             .format(alg_name, image.filename))
+      print('vbmeta: Successfully verified footer and {} vbmeta struct in {}'
+            .format(alg_name, image.filename))
     else:
-      print ('vbmeta: Successfully verified {} vbmeta struct in {}'
-             .format(alg_name, image.filename))
+      print('vbmeta: Successfully verified {} vbmeta struct in {}'
+            .format(alg_name, image.filename))
 
     for desc in descriptors:
-      if (isinstance(desc, AvbChainPartitionDescriptor) and follow_chain_partitions and
-          expected_chain_partitions_map.get(desc.partition_name) == None):
+      if (isinstance(desc, AvbChainPartitionDescriptor)
+          and follow_chain_partitions
+          and expected_chain_partitions_map.get(desc.partition_name) is None):
         # In this case we're processing a chain descriptor but don't have a
         # --expect_chain_partition ... however --follow_chain_partitions was
         # specified so we shouldn't error out in desc.verify().
-        print ('{}: Chained but ROLLBACK_SLOT (which is {}) and KEY (which has sha1 {}) not specified'
+        print('{}: Chained but ROLLBACK_SLOT (which is {}) '
+              'and KEY (which has sha1 {}) not specified'
               .format(desc.partition_name, desc.rollback_index_location,
                       hashlib.sha1(desc.public_key).hexdigest()))
-      else:
-        if not desc.verify(image_dir, image_ext, expected_chain_partitions_map, image,
-                           accept_zeroed_hashtree):
-          raise AvbError('Error verifying descriptor.')
-      # Honor --follow_chain_partitions - add '--' to make the output more readable.
-      if isinstance(desc, AvbChainPartitionDescriptor) and follow_chain_partitions:
-        print '--'
-        chained_image_filename = os.path.join(image_dir, desc.partition_name + image_ext)
-        self.verify_image(chained_image_filename, key_path, None, False, accept_zeroed_hashtree)
-
+      elif not desc.verify(image_dir, image_ext, expected_chain_partitions_map,
+                           image, accept_zeroed_hashtree):
+        raise AvbError('Error verifying descriptor.')
+      # Honor --follow_chain_partitions - add '--' to make the output more
+      # readable.
+      if (isinstance(desc, AvbChainPartitionDescriptor)
+          and follow_chain_partitions):
+        print('--')
+        chained_image_filename = os.path.join(image_dir,
+                                              desc.partition_name + image_ext)
+        self.verify_image(chained_image_filename, key_path, None, False,
+                          accept_zeroed_hashtree)
 
   def calculate_vbmeta_digest(self, image_filename, hash_algorithm, output):
     """Implements the 'calculate_vbmeta_digest' command.
@@ -2389,7 +2489,7 @@
     image_ext = os.path.splitext(image_filename)[1]
 
     image = ImageHandler(image_filename)
-    (footer, header, descriptors, image_size) = self._parse_image(image)
+    (footer, header, descriptors, _) = self._parse_image(image)
     offset = 0
     if footer:
       offset = footer.vbmeta_offset
@@ -2403,9 +2503,10 @@
 
     for desc in descriptors:
       if isinstance(desc, AvbChainPartitionDescriptor):
-        ch_image_filename = os.path.join(image_dir, desc.partition_name + image_ext)
+        ch_image_filename = os.path.join(image_dir,
+                                         desc.partition_name + image_ext)
         ch_image = ImageHandler(ch_image_filename)
-        (ch_footer, ch_header, ch_descriptors, ch_image_size) = self._parse_image(ch_image)
+        (ch_footer, ch_header, _, _) = self._parse_image(ch_image)
         ch_offset = 0
         ch_size = (ch_header.SIZE + ch_header.authentication_data_block_size +
                    ch_header.auxiliary_data_block_size)
@@ -2416,8 +2517,7 @@
         hasher.update(ch_vbmeta_blob)
 
     digest = hasher.digest()
-    output.write('{}\n'.format(digest.encode('hex')))
-
+    output.write('{}\n'.format(binascii.hexlify(digest)))
 
   def calculate_kernel_cmdline(self, image_filename, hashtree_disabled, output):
     """Implements the 'calculate_kernel_cmdline' command.
@@ -2437,7 +2537,8 @@
     cmdline_descriptors = []
     for desc in descriptors:
       if isinstance(desc, AvbChainPartitionDescriptor):
-        ch_image_filename = os.path.join(image_dir, desc.partition_name + image_ext)
+        ch_image_filename = os.path.join(image_dir,
+                                         desc.partition_name + image_ext)
         ch_image = ImageHandler(ch_image_filename)
         _, _, ch_descriptors, _ = self._parse_image(ch_image)
         for ch_desc in ch_descriptors:
@@ -2449,17 +2550,19 @@
     kernel_cmdline_snippets = []
     for desc in cmdline_descriptors:
       use_cmdline = True
-      if (desc.flags & AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_NOT_DISABLED) != 0:
+      if ((desc.flags &
+           AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_NOT_DISABLED)
+          != 0):
         if hashtree_disabled:
           use_cmdline = False
-      if (desc.flags & AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_DISABLED) != 0:
+      if (desc.flags &
+          AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_DISABLED) != 0:
         if not hashtree_disabled:
           use_cmdline = False
       if use_cmdline:
         kernel_cmdline_snippets.append(desc.kernel_cmdline)
     output.write(' '.join(kernel_cmdline_snippets))
 
-
   def _parse_image(self, image):
     """Gets information about an image.
 
@@ -2547,14 +2650,14 @@
 
     c = 'dm="1 vroot none ro 1,'
     c += '0'  # start
-    c += ' {}'.format((ht.image_size / 512))  # size (# sectors)
+    c += ' {}'.format((ht.image_size // 512))  # size (# sectors)
     c += ' verity {}'.format(ht.dm_verity_version)  # type and version
     c += ' PARTUUID=$(ANDROID_SYSTEM_PARTUUID)'  # data_dev
     c += ' PARTUUID=$(ANDROID_SYSTEM_PARTUUID)'  # hash_dev
     c += ' {}'.format(ht.data_block_size)  # data_block
     c += ' {}'.format(ht.hash_block_size)  # hash_block
-    c += ' {}'.format(ht.image_size / ht.data_block_size)  # #blocks
-    c += ' {}'.format(ht.image_size / ht.data_block_size)  # hash_offset
+    c += ' {}'.format(ht.image_size // ht.data_block_size)  # #blocks
+    c += ' {}'.format(ht.image_size // ht.data_block_size)  # hash_offset
     c += ' {}'.format(ht.hash_algorithm)  # hash_alg
     c += ' {}'.format(str(ht.root_digest).encode('hex'))  # root_digest
     c += ' {}'.format(str(ht.salt).encode('hex'))  # salt
@@ -2567,8 +2670,8 @@
       # Note that fec_blocks is the size that FEC covers, *not* the
       # size of the FEC data. Since we use FEC for everything up until
       # the FEC data, it's the same as the offset.
-      c += ' fec_blocks {}'.format(ht.fec_offset/ht.data_block_size)
-      c += ' fec_start {}'.format(ht.fec_offset/ht.data_block_size)
+      c += ' fec_blocks {}'.format(ht.fec_offset // ht.data_block_size)
+      c += ' fec_start {}'.format(ht.fec_offset // ht.data_block_size)
     else:
       c += ' 2'  # number of optional args
       c += ' $(ANDROID_VERITY_MODE)'
@@ -2666,10 +2769,10 @@
           (_, image_header, _, _) = self._parse_image(ImageHandler(image.name))
           tmp_header.bump_required_libavb_version_minor(
               image_header.required_libavb_version_minor)
-        print '1.{}'.format(tmp_header.required_libavb_version_minor)
+        print('1.{}'.format(tmp_header.required_libavb_version_minor))
       else:
         # Descriptors aside, all vbmeta features are supported in 1.0.
-        print '1.0'
+        print('1.0')
       return
 
     if not output:
@@ -2694,6 +2797,7 @@
       padding_needed = padded_size - len(vbmeta_blob)
       output.write('\0' * padding_needed)
 
+
   def _generate_vbmeta_blob(self, algorithm_name, key_path,
                             public_key_metadata_path, descriptors,
                             chain_partitions,
@@ -2793,6 +2897,7 @@
         idx = prop.find(':')
         if idx == -1:
           raise AvbError('Malformed property "{}".'.format(prop))
+        # pylint: disable=redefined-variable-type
         desc = AvbPropertyDescriptor()
         desc.key = prop[0:idx]
         desc.value = prop[(idx + 1):]
@@ -2852,7 +2957,7 @@
             descriptors_dict[key] = desc.encode()
           else:
             encoded_descriptors.extend(desc.encode())
-      for key in sorted(descriptors_dict.keys()):
+      for key in sorted(descriptors_dict):
         encoded_descriptors.extend(descriptors_dict[key])
 
     # Load public key metadata blob, if requested.
@@ -2873,6 +2978,7 @@
             algorithm_name))
 
     # Override release string, if requested.
+    # pylint: disable=unicode-builtin
     if isinstance(release_string, (str, unicode)):
       h.release_string = release_string
 
@@ -3090,7 +3196,7 @@
 
     # If we're asked to calculate minimum required libavb version, we're done.
     if print_required_libavb_version:
-      print '1.{}'.format(required_libavb_version_minor)
+      print('1.{}'.format(required_libavb_version_minor))
       return
 
     # First, calculate the maximum image size such that an image
@@ -3105,7 +3211,7 @@
 
     # If we're asked to only calculate the maximum image size, we're done.
     if calc_max_image_size:
-      print '{}'.format(max_image_size)
+      print('{}'.format(max_image_size))
       return
 
     image = ImageHandler(image_filename)
@@ -3143,16 +3249,15 @@
 
       digest_size = len(hashlib.new(name=hash_algorithm).digest())
       if salt:
-        salt = salt.decode('hex')
+        salt = binascii.unhexlify(salt)
+      elif salt is None and not use_persistent_digest:
+        # If salt is not explicitly specified, choose a hash that's the same
+        # size as the hash size. Don't populate a random salt if this
+        # descriptor is being created to use a persistent digest on device.
+        hash_size = digest_size
+        salt = open('/dev/urandom').read(hash_size)
       else:
-        if salt is None and not use_persistent_digest:
-          # If salt is not explicitly specified, choose a hash that's the same
-          # size as the hash size. Don't populate a random salt if this
-          # descriptor is being created to use a persistent digest on device.
-          hash_size = digest_size
-          salt = open('/dev/urandom').read(hash_size)
-        else:
-          salt = ''
+        salt = ''
 
       hasher = hashlib.new(name=hash_algorithm, string=salt)
       # TODO(zeuthen): might want to read this in chunks to avoid
@@ -3246,7 +3351,8 @@
                           release_string, append_to_release_string,
                           output_vbmeta_image, do_not_append_vbmeta_image,
                           print_required_libavb_version,
-                          use_persistent_root_digest, do_not_use_ab, no_hashtree):
+                          use_persistent_root_digest, do_not_use_ab,
+                          no_hashtree):
     """Implements the 'add_hashtree_footer' command.
 
     See https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity for
@@ -3300,7 +3406,7 @@
 
     # If we're asked to calculate minimum required libavb version, we're done.
     if print_required_libavb_version:
-      print '1.{}'.format(required_libavb_version_minor)
+      print('1.{}'.format(required_libavb_version_minor))
       return
 
     digest_size = len(hashlib.new(name=hash_algorithm).digest())
@@ -3327,7 +3433,7 @@
 
     # If we're asked to only calculate the maximum image size, we're done.
     if calc_max_image_size:
-      print '{}'.format(max_image_size)
+      print('{}'.format(max_image_size))
       return
 
     image = ImageHandler(image_filename)
@@ -3337,11 +3443,10 @@
         raise AvbError('Partition size of {} is not a multiple of the image '
                        'block size {}.'.format(partition_size,
                                                image.block_size))
-    else:
-      if image.image_size % image.block_size != 0:
-        raise AvbError('File size of {} is not a multiple of the image '
-                       'block size {}.'.format(image.image_size,
-                                               image.block_size))
+    elif image.image_size % image.block_size != 0:
+      raise AvbError('File size of {} is not a multiple of the image '
+                     'block size {}.'.format(image.image_size,
+                                             image.block_size))
 
     # If there's already a footer, truncate the image to its original
     # size. This way 'avbtool add_hashtree_footer' is idempotent
@@ -3376,16 +3481,15 @@
                                               partition_size))
 
       if salt:
-        salt = salt.decode('hex')
+        salt = binascii.unhexlify(salt)
+      elif salt is None and not use_persistent_root_digest:
+        # If salt is not explicitly specified, choose a hash that's the same
+        # size as the hash size. Don't populate a random salt if this
+        # descriptor is being created to use a persistent digest on device.
+        hash_size = digest_size
+        salt = open('/dev/urandom').read(hash_size)
       else:
-        if salt is None and not use_persistent_root_digest:
-          # If salt is not explicitly specified, choose a hash that's the same
-          # size as the hash size. Don't populate a random salt if this
-          # descriptor is being created to use a persistent digest on device.
-          hash_size = digest_size
-          salt = open('/dev/urandom').read(hash_size)
-        else:
-          salt = ''
+        salt = ''
 
       # Hashes are stored upside down so we need to calculate hash
       # offsets in advance.
@@ -3549,7 +3653,7 @@
       padding_and_hash = bytearray()
       algorithm_name = 'SHA512_RSA4096'
       alg = ALGORITHMS[algorithm_name]
-      hasher = hashlib.sha512()
+      hasher = hashlib.sha512()  # pylint: disable=redefined-variable-type
       padding_and_hash.extend(alg.padding)
       hasher.update(signed_data)
       padding_and_hash.extend(hasher.digest())
@@ -3577,7 +3681,7 @@
     Raises:
       AvbError: If an argument is incorrect.
     """
-    EXPECTED_PRODUCT_ID_SIZE = 16
+    EXPECTED_PRODUCT_ID_SIZE = 16  # pylint: disable=invalid-name
     if len(product_id) != EXPECTED_PRODUCT_ID_SIZE:
       raise AvbError('Invalid Product ID length.')
     output.write(struct.pack('<I', 1))  # Format Version
@@ -3604,7 +3708,7 @@
     Raises:
       AvbError: If an argument is incorrect.
     """
-    EXPECTED_CERTIFICATE_SIZE = 1620
+    EXPECTED_CERTIFICATE_SIZE = 1620  # pylint: disable=invalid-name
     if len(intermediate_key_certificate) != EXPECTED_CERTIFICATE_SIZE:
       raise AvbError('Invalid intermediate key certificate length.')
     if len(product_key_certificate) != EXPECTED_CERTIFICATE_SIZE:
@@ -3644,8 +3748,8 @@
     Raises:
       AvbError: If an argument is incorrect.
     """
-    EXPECTED_CERTIFICATE_SIZE = 1620
-    EXPECTED_CHALLENGE_SIZE = 16
+    EXPECTED_CERTIFICATE_SIZE = 1620  # pylint: disable=invalid-name
+    EXPECTED_CHALLENGE_SIZE = 16  # pylint: disable=invalid-name
     if len(intermediate_key_certificate) != EXPECTED_CERTIFICATE_SIZE:
       raise AvbError('Invalid intermediate key certificate length.')
     if len(unlock_key_certificate) != EXPECTED_CERTIFICATE_SIZE:
@@ -3694,7 +3798,7 @@
   num_levels = 0
   size = image_size
   while size > block_size:
-    num_blocks = (size + block_size - 1) / block_size
+    num_blocks = (size + block_size - 1) // block_size
     level_size = round_to_multiple(num_blocks * digest_size, block_size)
 
     level_sizes.append(level_size)
@@ -3720,7 +3824,7 @@
 def calc_fec_data_size(image_size, num_roots):
   """Calculates how much space FEC data will take.
 
-  Args:
+  Arguments:
     image_size: The size of the image.
     num_roots: Number of roots.
 
@@ -3746,7 +3850,7 @@
 def generate_fec_data(image_filename, num_roots):
   """Generate FEC codes for an image.
 
-  Args:
+  Arguments:
     image_filename: The filename of the image.
     num_roots: Number of roots.
 
@@ -3775,7 +3879,7 @@
                        digest_padding, hash_level_offsets, tree_size):
   """Generates a Merkle-tree for a file.
 
-  Args:
+  Arguments:
     image: The image, as a file.
     image_size: The size of the image.
     block_size: The block size, e.g. 4096.
@@ -3984,12 +4088,14 @@
     sub_parser.add_argument('--padding_size',
                             metavar='NUMBER',
                             help='If non-zero, pads output with NUL bytes so '
-                                 'its size is a multiple of NUMBER (default: 0)',
+                                 'its size is a multiple of NUMBER '
+                                 '(default: 0)',
                             type=parse_number,
                             default=0)
     self._add_common_args(sub_parser)
     sub_parser.set_defaults(func=self.make_vbmeta_image)
 
+
     sub_parser = subparsers.add_parser('add_hash_footer',
                                        help='Add hashes and footer to image.')
     sub_parser.add_argument('--image',
@@ -4037,8 +4143,9 @@
                             type=argparse.FileType('rb'))
     sub_parser.set_defaults(func=self.append_vbmeta_image)
 
-    sub_parser = subparsers.add_parser('add_hashtree_footer',
-                                       help='Add hashtree and footer to image.')
+    sub_parser = subparsers.add_parser(
+        'add_hashtree_footer',
+        help='Add hashtree and footer to image.')
     sub_parser.add_argument('--image',
                             help='Image to add hashtree to',
                             type=argparse.FileType('rab+'))
@@ -4066,9 +4173,10 @@
     sub_parser.add_argument('--generate_fec',
                             help=argparse.SUPPRESS,
                             action='store_true')
-    sub_parser.add_argument('--do_not_generate_fec',
-                            help='Do not generate forward-error-correction codes',
-                            action='store_true')
+    sub_parser.add_argument(
+        '--do_not_generate_fec',
+        help='Do not generate forward-error-correction codes',
+        action='store_true')
     sub_parser.add_argument('--fec_num_roots',
                             help='Number of roots for FEC (default: 2)',
                             type=parse_number,
@@ -4119,8 +4227,9 @@
                             required=True)
     sub_parser.set_defaults(func=self.zero_hashtree)
 
-    sub_parser = subparsers.add_parser('extract_vbmeta_image',
-                                       help='Extracts vbmeta from an image with a footer.')
+    sub_parser = subparsers.add_parser(
+        'extract_vbmeta_image',
+        help='Extracts vbmeta from an image with a footer.')
     sub_parser.add_argument('--image',
                             help='Image with footer',
                             type=argparse.FileType('rb'),
@@ -4131,7 +4240,8 @@
     sub_parser.add_argument('--padding_size',
                             metavar='NUMBER',
                             help='If non-zero, pads output with NUL bytes so '
-                                 'its size is a multiple of NUMBER (default: 0)',
+                                 'its size is a multiple of NUMBER '
+                                 '(default: 0)',
                             type=parse_number,
                             default=0)
     sub_parser.set_defaults(func=self.extract_vbmeta_image)
@@ -4175,13 +4285,15 @@
                             help='Expected chain partition',
                             metavar='PART_NAME:ROLLBACK_SLOT:KEY_PATH',
                             action='append')
-    sub_parser.add_argument('--follow_chain_partitions',
-                            help=('Follows chain partitions even when not '
-                                  'specified with the --expected_chain_partition option'),
-                            action='store_true')
-    sub_parser.add_argument('--accept_zeroed_hashtree',
-                            help=('Accept images where the hashtree or FEC data is zeroed out'),
-                            action='store_true')
+    sub_parser.add_argument(
+        '--follow_chain_partitions',
+        help=('Follows chain partitions even when not '
+              'specified with the --expected_chain_partition option'),
+        action='store_true')
+    sub_parser.add_argument(
+        '--accept_zeroed_hashtree',
+        help=('Accept images where the hashtree or FEC data is zeroed out'),
+        action='store_true')
     sub_parser.set_defaults(func=self.verify_image)
 
     sub_parser = subparsers.add_parser(
@@ -4349,12 +4461,12 @@
     try:
       args.func(args)
     except AvbError as e:
-      sys.stderr.write('{}: {}\n'.format(argv[0], e.message))
+      sys.stderr.write('{}: {}\n'.format(argv[0], str(e)))
       sys.exit(1)
 
   def version(self, _):
     """Implements the 'version' sub-command."""
-    print get_release_string()
+    print(get_release_string())
 
   def extract_public_key(self, args):
     """Implements the 'extract_public_key' sub-command."""
@@ -4415,30 +4527,31 @@
       sys.stderr.write('The --generate_fec option is deprecated since FEC '
                        'is now generated by default. Use the option '
                        '--do_not_generate_fec to not generate FEC.\n')
-    self.avb.add_hashtree_footer(args.image.name if args.image else None,
-                                 args.partition_size,
-                                 args.partition_name,
-                                 not args.do_not_generate_fec, args.fec_num_roots,
-                                 args.hash_algorithm, args.block_size,
-                                 args.salt, args.chain_partition, args.algorithm,
-                                 args.key, args.public_key_metadata,
-                                 args.rollback_index, args.flags, args.prop,
-                                 args.prop_from_file,
-                                 args.kernel_cmdline,
-                                 args.setup_rootfs_from_kernel,
-                                 args.setup_as_rootfs_from_kernel,
-                                 args.include_descriptors_from_image,
-                                 args.calc_max_image_size,
-                                 args.signing_helper,
-                                 args.signing_helper_with_files,
-                                 args.internal_release_string,
-                                 args.append_to_release_string,
-                                 args.output_vbmeta_image,
-                                 args.do_not_append_vbmeta_image,
-                                 args.print_required_libavb_version,
-                                 args.use_persistent_digest,
-                                 args.do_not_use_ab,
-                                 args.no_hashtree)
+    self.avb.add_hashtree_footer(
+        args.image.name if args.image else None,
+        args.partition_size,
+        args.partition_name,
+        not args.do_not_generate_fec, args.fec_num_roots,
+        args.hash_algorithm, args.block_size,
+        args.salt, args.chain_partition, args.algorithm,
+        args.key, args.public_key_metadata,
+        args.rollback_index, args.flags, args.prop,
+        args.prop_from_file,
+        args.kernel_cmdline,
+        args.setup_rootfs_from_kernel,
+        args.setup_as_rootfs_from_kernel,
+        args.include_descriptors_from_image,
+        args.calc_max_image_size,
+        args.signing_helper,
+        args.signing_helper_with_files,
+        args.internal_release_string,
+        args.append_to_release_string,
+        args.output_vbmeta_image,
+        args.do_not_append_vbmeta_image,
+        args.print_required_libavb_version,
+        args.use_persistent_digest,
+        args.do_not_use_ab,
+        args.no_hashtree)
 
   def erase_footer(self, args):
     """Implements the 'erase_footer' sub-command."""
@@ -4479,7 +4592,8 @@
 
   def calculate_kernel_cmdline(self, args):
     """Implements the 'calculate_kernel_cmdline' sub-command."""
-    self.avb.calculate_kernel_cmdline(args.image.name, args.hashtree_disabled, args.output)
+    self.avb.calculate_kernel_cmdline(args.image.name, args.hashtree_disabled,
+                                      args.output)
 
   def make_atx_certificate(self, args):
     """Implements the 'make_atx_certificate' sub-command."""
diff --git a/avbtool.py b/avbtool.py
new file mode 120000
index 0000000..16657b9
--- /dev/null
+++ b/avbtool.py
@@ -0,0 +1 @@
+avbtool
\ No newline at end of file
diff --git a/libavb_aftl/avb_aftl_types.h b/libavb_aftl/avb_aftl_types.h
new file mode 100644
index 0000000..2dfe054
--- /dev/null
+++ b/libavb_aftl/avb_aftl_types.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef AVB_INSIDE_LIBAVB_AFTL_H
+#error "You can't include avb_aftl_types.h in the public header libavb_aftl.h."
+#endif
+
+#ifndef AVB_COMPILATION
+#error "Never include this file, it may only be used from internal avb code."
+#endif
+
+#ifndef AVB_AFTL_TYPES_H_
+#define AVB_AFTL_TYPES_H_
+
+#include <libavb/libavb.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Hash and signature size supported. Hash is SHA256, signature is RSA4096. */
+#define AFTL_HASH_SIZE 32
+#define AFTL_SIGNATURE_SIZE 512
+
+/* Data structure containing AFTL header information. */
+typedef struct AftlIcpHeader {
+  uint32_t magic;
+  uint32_t required_icp_version_major;
+  uint32_t required_icp_version_minor;
+  uint32_t aftl_descriptor_size; /* Total size of the AftlDescriptor. */
+  uint16_t icp_count;
+} AftlIcpHeader;
+
+/* Data structure containing a Trillian LogRootDescriptor, from
+   https://github.com/google/trillian/blob/master/trillian.proto#L255
+   The log_root_signature is calculated over this structure. */
+typedef struct TrillianLogRootDescriptor {
+  uint16_t version;
+  uint64_t tree_size;
+  uint8_t root_hash_size;
+  uint8_t* root_hash;
+  uint64_t timestamp;
+  uint64_t revision;
+  uint16_t metadata_size;
+  uint8_t* metadata;
+} TrillianLogRootDescriptor;
+
+/* Data structure containing the firmware image info stored in the
+   transparency log. This is defined in
+   https://android.googlesource.com/platform/external/avb/+/master/proto/aftl.proto
+ */
+typedef struct FirmwareInfo {
+  uint32_t vbmeta_hash_size;
+  uint8_t* vbmeta_hash;
+  uint32_t version_incremental_size;
+  uint8_t* version_incremental;
+  uint32_t platform_key_size;
+  uint8_t* platform_key;
+  uint32_t manufacturer_key_hash_size;
+  uint8_t* manufacturer_key_hash;
+  uint32_t description_size;
+  uint8_t* description;
+} FirmwareInfo;
+
+/* Data structure containing AFTL inclusion proof data from a single
+   transparency log. */
+typedef struct AftlIcpEntry {
+  uint32_t log_url_size;
+  uint64_t leaf_index;
+  uint32_t log_root_descriptor_size;
+  uint32_t fw_info_leaf_size;
+  uint32_t log_root_sig_size;
+  uint8_t proof_hash_count;
+  uint32_t inc_proof_size;
+  uint8_t* log_url;
+  TrillianLogRootDescriptor log_root_descriptor;
+  FirmwareInfo fw_info_leaf;
+  uint8_t* log_root_signature;
+  uint8_t proofs[/*proof_hash_count*/][AFTL_HASH_SIZE];
+} AftlIcpEntry;
+
+/* Main data structure for an AFTL descriptor. */
+typedef struct AftlDescriptor {
+  AftlIcpHeader header;
+  AftlIcpEntry entries[/*icp_count*/];
+} AftlDescriptor;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AVB_AFTL_TYPES_H_ */
diff --git a/libavb_aftl/avb_aftl_util.c b/libavb_aftl/avb_aftl_util.c
new file mode 100644
index 0000000..aaaaa85
--- /dev/null
+++ b/libavb_aftl/avb_aftl_util.c
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <libavb/avb_crypto.h>
+#include <libavb/avb_rsa.h>
+#include <libavb/avb_sha.h>
+#include <libavb/avb_util.h>
+
+#include "avb_aftl_types.h"
+#include "avb_aftl_util.h"
+#include "avb_aftl_validate.h"
+
+/* Performs a SHA256 hash operation on data. */
+bool avb_aftl_sha256(uint8_t* data,
+                     uint64_t length,
+                     uint8_t hash[AFTL_HASH_SIZE]) {
+  AvbSHA256Ctx context;
+  uint8_t* tmp;
+
+  if ((data == NULL) && (length != 0)) return false;
+
+  avb_sha256_init(&context);
+  avb_sha256_update(&context, data, length);
+  tmp = avb_sha256_final(&context);
+  avb_memcpy(hash, tmp, AFTL_HASH_SIZE);
+  return true;
+}
+
+/* Calculates a SHA256 hash of the TrillianLogRootDescriptor in icp_entry.
+
+   The hash is calculated over the entire TrillianLogRootDescriptor
+   structure. Some of the fields in this implementation are dynamically
+   allocated, and so the data needs to be reconstructed so that the hash
+   can be properly calculated. The TrillianLogRootDescriptor is defined
+   here: https://github.com/google/trillian/blob/master/trillian.proto#L255 */
+bool avb_aftl_hash_log_root_descriptor(AftlIcpEntry* icp_entry, uint8_t* hash) {
+  uint8_t* buffer;
+  uint8_t* lrd_offset; /* Byte offset into the descriptor. */
+  uint32_t tlrd_size;
+  bool retval;
+
+  avb_assert(icp_entry != NULL && hash != NULL);
+
+  /* Size of the non-pointer elements of the TrillianLogRootDescriptor. */
+  tlrd_size = sizeof(uint16_t) * 2 + sizeof(uint64_t) * 3 + sizeof(uint8_t);
+  /* Ensure the log_root_descriptor size is correct. */
+  if (icp_entry->log_root_descriptor_size > AFTL_MAX_LOG_ROOT_DESCRIPTOR_SIZE) {
+    avb_error("Invalid log root descriptor size.\n");
+    return false;
+  }
+  if (icp_entry->log_root_descriptor_size !=
+      (tlrd_size + icp_entry->log_root_descriptor.root_hash_size +
+       icp_entry->log_root_descriptor.metadata_size)) {
+    avb_error("Log root descriptor size doesn't match fields.\n");
+    return false;
+  }
+  /* Check that the root_hash exists, and if not, it's size is sane. */
+  if (!icp_entry->log_root_descriptor.root_hash &&
+      (icp_entry->log_root_descriptor.root_hash_size != 0)) {
+    avb_error("Invalid tree root hash values.\n");
+    return false;
+  }
+
+  /* Check that the metadata exists, and if not, it's size is sane. */
+  if (!icp_entry->log_root_descriptor.metadata &&
+      (icp_entry->log_root_descriptor.metadata_size != 0)) {
+    avb_error("Invalid log root descriptor metadata values.\n");
+    return false;
+  }
+  buffer = (uint8_t*)avb_malloc(icp_entry->log_root_descriptor_size);
+  if (buffer == NULL) {
+    avb_error("Allocation failure in avb_aftl_hash_log_root_descriptor.\n");
+    return false;
+  }
+  lrd_offset = buffer;
+  /* Copy in the version, tree_size and root hash length. */
+  avb_memcpy(
+      lrd_offset, &(icp_entry->log_root_descriptor.version), sizeof(uint16_t));
+  lrd_offset += sizeof(uint16_t);
+  avb_memcpy(lrd_offset,
+             &(icp_entry->log_root_descriptor.tree_size),
+             sizeof(uint64_t));
+  lrd_offset += sizeof(uint64_t);
+  avb_memcpy(lrd_offset,
+             &(icp_entry->log_root_descriptor.root_hash_size),
+             sizeof(uint8_t));
+  lrd_offset += sizeof(uint8_t);
+  /* Copy the root hash. */
+  if (icp_entry->log_root_descriptor.root_hash_size > 0) {
+    avb_memcpy(lrd_offset,
+               icp_entry->log_root_descriptor.root_hash,
+               icp_entry->log_root_descriptor.root_hash_size);
+  }
+  lrd_offset += icp_entry->log_root_descriptor.root_hash_size;
+  /* Copy in the timestamp, revision, and the metadata length. */
+  avb_memcpy(lrd_offset,
+             &(icp_entry->log_root_descriptor.timestamp),
+             sizeof(uint64_t));
+  lrd_offset += sizeof(uint64_t);
+
+  avb_memcpy(
+      lrd_offset, &(icp_entry->log_root_descriptor.revision), sizeof(uint64_t));
+  lrd_offset += sizeof(uint64_t);
+
+  avb_memcpy(lrd_offset,
+             &(icp_entry->log_root_descriptor.metadata_size),
+             sizeof(uint16_t));
+  lrd_offset += sizeof(uint16_t);
+
+  /* Copy the metadata if it exists. */
+  if (icp_entry->log_root_descriptor.metadata_size > 0) {
+    avb_memcpy(lrd_offset,
+               icp_entry->log_root_descriptor.metadata,
+               icp_entry->log_root_descriptor.metadata_size);
+  }
+  /* Hash the result & clean up. */
+
+  retval = avb_aftl_sha256(buffer, icp_entry->log_root_descriptor_size, hash);
+  avb_free(buffer);
+  return retval;
+}
+
+/* Computes a leaf hash as detailed by https://tools.ietf.org/html/rfc6962. */
+bool avb_aftl_rfc6962_hash_leaf(uint8_t* leaf,
+                                uint64_t leaf_size,
+                                uint8_t* hash) {
+  uint8_t* buffer;
+  bool retval;
+
+  avb_assert(leaf != NULL && hash != NULL);
+  avb_assert(leaf_size != AFTL_ULONG_MAX);
+
+  buffer = (uint8_t*)avb_malloc(leaf_size + 1);
+
+  if (buffer == NULL) {
+    avb_error("Allocation failure in avb_aftl_rfc6962_hash_leaf.\n");
+    return false;
+  }
+  /* Prefix the data with a '0' for 2nd preimage attack resistance. */
+  buffer[0] = 0;
+
+  if (leaf_size > 0) avb_memcpy(buffer + 1, leaf, leaf_size);
+
+  retval = avb_aftl_sha256(buffer, leaf_size + 1, hash);
+  avb_free(buffer);
+  return retval;
+}
+
+/* Computes an inner hash as detailed by https://tools.ietf.org/html/rfc6962. */
+bool avb_aftl_rfc6962_hash_children(uint8_t* left_child,
+                                    uint64_t left_child_size,
+                                    uint8_t* right_child,
+                                    uint64_t right_child_size,
+                                    uint8_t* hash) {
+  uint8_t* buffer;
+  uint64_t data_size;
+  bool retval;
+
+  avb_assert(left_child != NULL && right_child != NULL && hash != NULL);
+
+  /* Check for integer overflow. */
+  avb_assert(left_child_size < AFTL_ULONG_MAX - right_child_size);
+
+  data_size = left_child_size + right_child_size + 1;
+  buffer = (uint8_t*)avb_malloc(data_size);
+  if (buffer == NULL) {
+    avb_error("Allocation failure in avb_aftl_rfc6962_hash_children.\n");
+    return false;
+  }
+
+  /* Prefix the data with '1' for 2nd preimage attack resistance. */
+  buffer[0] = 1;
+
+  /* Copy the left child data, if it exists. */
+  if (left_child_size > 0) avb_memcpy(buffer + 1, left_child, left_child_size);
+  /* Copy the right child data, if it exists. */
+  if (right_child_size > 0)
+    avb_memcpy(buffer + 1 + left_child_size, right_child, right_child_size);
+
+  /* Hash the concatenated data and clean up. */
+  retval = avb_aftl_sha256(buffer, data_size, hash);
+  avb_free(buffer);
+  return retval;
+}
+
+/* Computes a subtree hash along tree's right border. */
+bool avb_aftl_chain_border_right(uint8_t* seed,
+                                 uint64_t seed_size,
+                                 uint8_t* proof,
+                                 uint32_t proof_entry_count,
+                                 uint8_t* hash) {
+  size_t i;
+  uint8_t* tmp;
+  uint8_t* tmp_hash;
+  bool retval;
+
+  avb_assert(seed_size == AFTL_HASH_SIZE);
+  avb_assert(seed != NULL && proof != NULL && hash != NULL);
+
+  tmp = seed;
+  tmp_hash = (uint8_t*)avb_malloc(AFTL_HASH_SIZE);
+  if (tmp_hash == NULL) {
+    avb_error("Allocation failure in avb_aftl_chain_border_right.\n");
+    return false;
+  }
+  for (i = 0; i < proof_entry_count; i++) {
+    retval = avb_aftl_rfc6962_hash_children(proof + (i * AFTL_HASH_SIZE),
+                                            AFTL_HASH_SIZE,
+                                            tmp,
+                                            AFTL_HASH_SIZE,
+                                            tmp_hash);
+    if (!retval) {
+      avb_error("Failed to hash Merkle tree children.\n");
+    }
+    tmp = tmp_hash;
+  }
+
+  if (retval) avb_memcpy(hash, tmp, AFTL_HASH_SIZE);
+
+  avb_free(tmp_hash);
+  return retval;
+}
+
+/* Computes a subtree hash on or below the tree's right border. */
+bool avb_aftl_chain_inner(uint8_t* seed,
+                          uint64_t seed_size,
+                          uint8_t* proof,
+                          uint32_t proof_entry_count,
+                          uint64_t leaf_index,
+                          uint8_t* hash) {
+  size_t i;
+  uint8_t* tmp = seed;
+  uint8_t* tmp_hash;
+  bool retval;
+
+  avb_assert(seed_size == AFTL_HASH_SIZE);
+  avb_assert(seed != NULL && proof != NULL && hash != NULL);
+
+  tmp = seed;
+  tmp_hash = (uint8_t*)avb_malloc(AFTL_HASH_SIZE);
+  if (tmp_hash == NULL) {
+    avb_error("Allocation failure in avb_aftl_chain_inner.\n");
+    return false;
+  }
+  for (i = 0; i < proof_entry_count; i++) {
+    if ((leaf_index >> i & 1) == 0) {
+      retval = avb_aftl_rfc6962_hash_children(tmp,
+                                              seed_size,
+                                              proof + (i * AFTL_HASH_SIZE),
+                                              AFTL_HASH_SIZE,
+                                              tmp_hash);
+    } else {
+      retval = avb_aftl_rfc6962_hash_children(proof + (i * AFTL_HASH_SIZE),
+                                              AFTL_HASH_SIZE,
+                                              tmp,
+                                              seed_size,
+                                              tmp_hash);
+    }
+    if (!retval) {
+      avb_error("Failed to hash Merkle tree children.\n");
+      break;
+    }
+    tmp = tmp_hash;
+  }
+  if (retval) avb_memcpy(hash, tmp, AFTL_HASH_SIZE);
+  avb_free(tmp_hash);
+  return retval;
+}
+
+/* Counts leading zeros. Used in Merkle tree hash validation .*/
+unsigned int avb_aftl_count_leading_zeros(uint64_t val) {
+  int r = 0;
+  if (val == 0) return 64;
+  if (!(val & 0xffffffff00000000u)) {
+    val <<= 32;
+    r += 32;
+  }
+  if (!(val & 0xffff000000000000u)) {
+    val <<= 16;
+    r += 16;
+  }
+  if (!(val & 0xff00000000000000u)) {
+    val <<= 8;
+    r += 8;
+  }
+  if (!(val & 0xf000000000000000u)) {
+    val <<= 4;
+    r += 4;
+  }
+  if (!(val & 0xc000000000000000u)) {
+    val <<= 2;
+    r += 2;
+  }
+  if (!(val & 0x8000000000000000u)) {
+    val <<= 1;
+    r += 1;
+  }
+
+  return r;
+}
+
+/* Calculates the expected Merkle tree hash. */
+bool avb_aftl_root_from_icp(uint64_t leaf_index,
+                            uint64_t tree_size,
+                            uint8_t proof[][AFTL_HASH_SIZE],
+                            uint32_t proof_entry_count,
+                            uint8_t* leaf_hash,
+                            uint64_t leaf_hash_size,
+                            uint8_t* root_hash) {
+  uint64_t inner_proof_size;
+  uint64_t border_proof_size;
+  size_t i;
+  uint8_t hash[AFTL_HASH_SIZE];
+  uint8_t* inner_proof;
+  uint8_t* border_proof;
+  bool retval;
+
+  avb_assert(proof_entry_count != 0);
+  avb_assert(leaf_hash_size != 0);
+  avb_assert(proof != NULL && leaf_hash != NULL && root_hash != NULL);
+
+  /* This cannot overflow. */
+  inner_proof_size =
+      64 - avb_aftl_count_leading_zeros(leaf_index ^ (tree_size - 1));
+
+  /* Check for integer underflow.*/
+  if ((proof_entry_count - inner_proof_size) > proof_entry_count) {
+    avb_error("Invalid proof entry count value.\n");
+    return false;
+  }
+  border_proof_size = proof_entry_count - inner_proof_size;
+  /* Split the proof into two parts based on the calculated pivot point. */
+  inner_proof = (uint8_t*)avb_malloc(inner_proof_size * AFTL_HASH_SIZE);
+  if (inner_proof == NULL) {
+    avb_error("Allocation failure in avb_aftl_root_from_icp.\n");
+    return false;
+  }
+  border_proof = (uint8_t*)avb_malloc(border_proof_size * AFTL_HASH_SIZE);
+  if (border_proof == NULL) {
+    avb_free(inner_proof);
+    avb_error("Allocation failure in avb_aftl_root_from_icp.\n");
+    return false;
+  }
+
+  for (i = 0; i < inner_proof_size; i++) {
+    avb_memcpy(inner_proof + (AFTL_HASH_SIZE * i), proof[i], AFTL_HASH_SIZE);
+  }
+  for (i = 0; i < border_proof_size; i++) {
+    avb_memcpy(border_proof + (AFTL_HASH_SIZE * i),
+               proof[inner_proof_size + i],
+               AFTL_HASH_SIZE);
+  }
+
+  /* Calculate the root hash and store it in root_hash. */
+  retval = avb_aftl_chain_inner(leaf_hash,
+                                leaf_hash_size,
+                                inner_proof,
+                                inner_proof_size,
+                                leaf_index,
+                                hash);
+  if (retval)
+    retval = avb_aftl_chain_border_right(
+        hash, AFTL_HASH_SIZE, border_proof, border_proof_size, root_hash);
+
+  if (inner_proof != NULL) avb_free(inner_proof);
+  if (border_proof != NULL) avb_free(border_proof);
+  return retval;
+}
diff --git a/libavb_aftl/avb_aftl_util.h b/libavb_aftl/avb_aftl_util.h
new file mode 100644
index 0000000..ae79643
--- /dev/null
+++ b/libavb_aftl/avb_aftl_util.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef AVB_INSIDE_LIBAVB_AFTL_H
+#error "You can't include avb_aftl_util.h in the public header libavb_aftl.h."
+#endif
+
+#ifndef AVB_COMPILATION
+#error "Never include this file, it may only be used from internal avb code."
+#endif
+
+#ifndef AVB_AFTL_UTIL_H_
+#define AVB_AFTL_UTIL_H_
+
+#include "avb_aftl_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Performs a SHA256 hash operation on data. */
+bool avb_aftl_sha256(uint8_t* data,                 /* Data to be hashed. */
+                     uint64_t length,               /* Size of data. */
+                     uint8_t hash[AFTL_HASH_SIZE]); /* Resulting SHA256 hash. */
+
+/* Calculates a SHA256 hash of the TrillianLogRootDescriptor in icp_entry. */
+bool avb_aftl_hash_log_root_descriptor(
+    AftlIcpEntry* icp_entry, /* The icp_entry containing the descriptor. */
+    uint8_t* hash);          /* The resulting hash of the descriptor data. */
+
+/* RFC 6962 Hashing function for leaves of a Merkle tree. */
+bool avb_aftl_rfc6962_hash_leaf(
+    uint8_t* leaf,      /* The Merkle tree leaf data to be hashed. */
+    uint64_t leaf_size, /* Size of the leaf data. */
+    uint8_t* hash);     /* Resulting RFC 6962 hash of the leaf data. */
+
+/* Computes an inner hash as detailed by https://tools.ietf.org/html/rfc6962. */
+bool avb_aftl_rfc6962_hash_children(
+    uint8_t* left_child,           /* The left child node data. */
+    uint64_t left_child_size,      /* Size of the left child node data. */
+    uint8_t* right_child,          /* The right child node data. */
+    uint64_t right_child_size,     /* Size of the right child node data. */
+    uint8_t hash[AFTL_HASH_SIZE]); /* Resulting RFC 6962 hash of the children.*/
+
+/* Computes a subtree hash along the left-side tree border. */
+bool avb_aftl_chain_border_right(
+    uint8_t* seed,              /* Data containing the starting hash. */
+    uint64_t seed_size,         /* Size of the starting hash data. */
+    uint8_t* proof,             /* The hashes in the inclusion proof. */
+    uint32_t proof_entry_count, /* Number of inclusion proof entries. */
+    uint8_t* hash);             /* Resulting subtree hash. */
+
+/* Computes a subtree hash on or below the tree's right border. */
+bool avb_aftl_chain_inner(
+    uint8_t* seed,              /* Data containing the starting hash. */
+    uint64_t seed_size,         /* Size of the starting hash data. */
+    uint8_t* proof,             /* The hashes in the inclusion proof. */
+    uint32_t proof_entry_count, /* Number of inclusion proof entries. */
+    uint64_t leaf_index,        /* The current Merkle tree leaf index. */
+    uint8_t* hash);             /* Resulting subtree hash. */
+
+/* Counts leading zeros. Used in Merkle tree hash validation .*/
+unsigned int avb_aftl_count_leading_zeros(
+    uint64_t val); /* Value to count leading zeros of. */
+
+/* Calculates the expected Merkle tree hash. */
+bool avb_aftl_root_from_icp(
+    uint64_t leaf_index,             /* The leaf index in the Merkle tree.*/
+    uint64_t tree_size,              /* The size of the Merkle tree. */
+    uint8_t proof[][AFTL_HASH_SIZE], /* Inclusion proof hash data. */
+    uint32_t proof_entry_count,      /* Number of inclusion proof hashes. */
+    uint8_t* leaf_hash,              /* The leaf hash to prove inclusion of. */
+    uint64_t leaf_hash_size,         /* Size of the leaf hash. */
+    uint8_t* root_hash);             /* The resulting tree root hash. */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AVB_AFTL_UTIL_H_ */
diff --git a/libavb_aftl/avb_aftl_validate.c b/libavb_aftl/avb_aftl_validate.c
new file mode 100644
index 0000000..83b3f78
--- /dev/null
+++ b/libavb_aftl/avb_aftl_validate.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <libavb/avb_crypto.h>
+#include <libavb/avb_rsa.h>
+#include <libavb/avb_sha.h>
+#include <libavb/avb_util.h>
+
+#include "avb_aftl_types.h"
+#include "avb_aftl_util.h"
+#include "avb_aftl_validate.h"
+
+/* Verifies that the logged VBMeta hash matches the one on device. */
+bool avb_aftl_verify_vbmeta_hash(uint8_t* vbmeta,
+                                 size_t vbmeta_size,
+                                 AftlIcpEntry* icp_entry) {
+  uint8_t vbmeta_hash[AFTL_HASH_SIZE];
+
+  avb_assert(vbmeta != NULL && icp_entry != NULL);
+
+  if (!avb_aftl_sha256(vbmeta, vbmeta_size, vbmeta_hash)) return false;
+
+  /* Only SHA256 hashes are currently supported. If the vbmeta hash
+     size is not AFTL_HASH_SIZE, return false. */
+  if (icp_entry->fw_info_leaf.vbmeta_hash_size != AFTL_HASH_SIZE) {
+    avb_error("Invalid VBMeta hash size.\n");
+    return false;
+  }
+  /* Return whether the calculated VBMeta hash matches the stored one. */
+  return avb_safe_memcmp(vbmeta_hash,
+                         icp_entry->fw_info_leaf.vbmeta_hash,
+                         AFTL_HASH_SIZE) == 0;
+}
+
+/* Extracts the raw data from the FirmwareInfo structure. */
+static bool get_raw_fw_image_info(AftlIcpEntry* icp_entry,
+                                  uint8_t* buffer,
+                                  size_t buffer_size) {
+  uint64_t offset;
+  uint64_t calc_fw_image_size;
+
+  avb_assert(icp_entry != NULL && buffer != NULL);
+
+  if (icp_entry->fw_info_leaf_size != buffer_size) {
+    avb_error("Invalid size passed to get_raw_fw_image_info.\n");
+    return false;
+  }
+  calc_fw_image_size = icp_entry->fw_info_leaf.vbmeta_hash_size;
+  if (calc_fw_image_size != AFTL_HASH_SIZE) {
+    avb_error("Invalid vbmeta hash size.\n");
+    return false;
+  }
+  if (!avb_safe_add_to(&calc_fw_image_size,
+                       icp_entry->fw_info_leaf.version_incremental_size)) {
+    avb_error("Invalid version incremental size.\n");
+    return false;
+  }
+  if (!avb_safe_add_to(&calc_fw_image_size,
+                       icp_entry->fw_info_leaf.platform_key_size)) {
+    avb_error("Invalid platform key size.\n");
+    return false;
+  }
+  if (!avb_safe_add_to(&calc_fw_image_size,
+                       icp_entry->fw_info_leaf.manufacturer_key_hash_size)) {
+    avb_error("Invalid manufacturer key hash size.\n");
+    return false;
+  }
+  if (!avb_safe_add_to(&calc_fw_image_size,
+                       icp_entry->fw_info_leaf.description_size)) {
+    avb_error("Invalid description size.\n");
+    return false;
+  }
+
+  offset = 0;
+
+  if (icp_entry->fw_info_leaf_size != calc_fw_image_size) {
+    avb_error("Invalid FirmwareInfo leaf size.\n");
+    return false;
+  }
+  if (icp_entry->fw_info_leaf.vbmeta_hash != NULL) {
+    avb_memcpy(buffer,
+               icp_entry->fw_info_leaf.vbmeta_hash,
+               icp_entry->fw_info_leaf.vbmeta_hash_size);
+    offset = icp_entry->fw_info_leaf.vbmeta_hash_size;
+  }
+
+  if (icp_entry->fw_info_leaf.version_incremental != NULL) {
+    avb_memcpy(buffer + offset,
+               icp_entry->fw_info_leaf.version_incremental,
+               icp_entry->fw_info_leaf.version_incremental_size);
+    if (!avb_safe_add_to(&offset,
+                         icp_entry->fw_info_leaf.version_incremental_size)) {
+      avb_error("Invalid version incremental size.\n");
+      return false;
+    }
+  }
+
+  if (icp_entry->fw_info_leaf.platform_key != NULL) {
+    avb_memcpy(buffer + offset,
+               icp_entry->fw_info_leaf.platform_key,
+               icp_entry->fw_info_leaf.platform_key_size);
+    if (!avb_safe_add_to(&offset, icp_entry->fw_info_leaf.platform_key_size)) {
+      avb_error("Invalid platform key size.\n");
+      return false;
+    }
+  }
+
+  if (icp_entry->fw_info_leaf.manufacturer_key_hash != NULL) {
+    avb_memcpy(buffer + offset,
+               icp_entry->fw_info_leaf.manufacturer_key_hash,
+               icp_entry->fw_info_leaf.manufacturer_key_hash_size);
+    if (!avb_safe_add_to(&offset,
+                         icp_entry->fw_info_leaf.manufacturer_key_hash_size)) {
+      avb_error("Invalid manufacturer key hash size.\n");
+      return false;
+    }
+  }
+
+  if (icp_entry->fw_info_leaf.description != NULL) {
+    avb_memcpy(buffer + offset,
+               icp_entry->fw_info_leaf.description,
+               icp_entry->fw_info_leaf.description_size);
+    if (!avb_safe_add_to(&offset, icp_entry->fw_info_leaf.description_size)) {
+      avb_error("Invalid description size.\n");
+      return false;
+    }
+  }
+  return true;
+}
+
+/* Verifies the Merkle tree root hash. */
+bool avb_aftl_verify_icp_root_hash(AftlIcpEntry* icp_entry) {
+  uint8_t leaf_hash[AFTL_HASH_SIZE];
+  uint8_t result_hash[AFTL_HASH_SIZE];
+  uint8_t* buffer;
+
+  avb_assert(icp_entry != NULL);
+  if (icp_entry->fw_info_leaf_size > AFTL_MAX_FW_INFO_LEAF_SIZE) {
+    avb_error("Invalid FirmwareInfo leaf size\n");
+    return false;
+  }
+  buffer = (uint8_t*)avb_malloc(icp_entry->fw_info_leaf_size);
+  if (buffer == NULL) {
+    avb_error("Allocation failure in avb_aftl_verify_icp_root_hash\n");
+    return false;
+  }
+  /* Extract the raw data from the FirmwareInfo leaf */
+  if (!get_raw_fw_image_info(icp_entry, buffer, icp_entry->fw_info_leaf_size)) {
+    avb_free(buffer);
+    return false;
+  }
+  /* Calculate the RFC 6962 hash of the seed entry. */
+  if (!avb_aftl_rfc6962_hash_leaf(
+          buffer, icp_entry->fw_info_leaf_size, leaf_hash)) {
+    avb_free(buffer);
+    return false;
+  }
+  avb_free(buffer);
+  /* Calculate the Merkle tree's root hash. */
+  if (!avb_aftl_root_from_icp(icp_entry->leaf_index,
+                              icp_entry->log_root_descriptor.tree_size,
+                              icp_entry->proofs,
+                              icp_entry->proof_hash_count,
+                              leaf_hash,
+                              AFTL_HASH_SIZE,
+                              result_hash))
+    return false;
+  /* Return whether the calculated root hash matches the stored one. */
+  return (avb_safe_memcmp(result_hash,
+                          icp_entry->log_root_descriptor.root_hash,
+                          AFTL_HASH_SIZE) == 0);
+}
+
+/* Verifies the log root signature for the transparency log submission. */
+bool avb_aftl_verify_entry_signature(const uint8_t* key,
+                                     size_t key_num_bytes,
+                                     AftlIcpEntry* icp_entry) {
+  uint8_t* sig;
+  size_t sig_num_bytes;
+  uint8_t log_root_hash[AFTL_HASH_SIZE];
+  size_t log_root_hash_num_bytes;
+  const AvbAlgorithmData* algorithm_data;
+
+  avb_assert(key != NULL && icp_entry != NULL);
+
+  /* Extract the log root signature from the AftlIcpEntry. */
+  sig = icp_entry->log_root_signature;
+  if (sig == NULL) {
+    avb_error("Invalid log root signature.\n");
+    return false;
+  }
+  sig_num_bytes = icp_entry->log_root_sig_size;
+  log_root_hash_num_bytes = AFTL_HASH_SIZE;
+
+  /* Calculate the SHA256 of the TrillianLogRootDescriptor. */
+  if (!avb_aftl_hash_log_root_descriptor(icp_entry, log_root_hash))
+    return false;
+
+  /* algorithm_data is used to calculate the padding for signature verification.
+   */
+  algorithm_data = avb_get_algorithm_data(AVB_ALGORITHM_TYPE_SHA256_RSA4096);
+  if (algorithm_data == NULL) {
+    avb_error("Failed to get algorithm data.\n");
+    return false;
+  }
+  return avb_rsa_verify(key,
+                        key_num_bytes,
+                        sig,
+                        sig_num_bytes,
+                        log_root_hash,
+                        log_root_hash_num_bytes,
+                        algorithm_data->padding,
+                        algorithm_data->padding_len);
+}
diff --git a/libavb_aftl/avb_aftl_validate.h b/libavb_aftl/avb_aftl_validate.h
new file mode 100644
index 0000000..29b3b32
--- /dev/null
+++ b/libavb_aftl/avb_aftl_validate.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if !defined(AVB_INSIDE_LIBAVB_AFTL_H) && !defined(AVB_COMPILATION)
+#error "Never include this file directly, include libavb_aftl/libavb_aftl.h."
+#endif
+
+#ifndef AVB_AFTL_VALIDATE_H_
+#define AVB_AFTL_VALIDATE_H_
+
+#include <libavb/libavb.h>
+#include "avb_aftl_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define AFTL_ULONG_MAX 0xfffffffffffffffful
+#define AFTL_MAX_FW_INFO_LEAF_SIZE 16384
+#define AFTL_MAX_LOG_ROOT_DESCRIPTOR_SIZE (65565 + AFTL_HASH_SIZE)
+
+/* Verifies that the logged vbmeta hash matches the one on device. */
+bool avb_aftl_verify_vbmeta_hash(
+    uint8_t* vbmeta,          /* Buffer containing the vbmeta data. */
+    size_t vbmeta_size,       /* Size of the vbmeta buffer. */
+    AftlIcpEntry* icp_entry); /* Pointer to the AftlIcpEntry to verify. */
+
+/* Verifies the Merkle tree root hash. */
+bool avb_aftl_verify_icp_root_hash(
+    AftlIcpEntry* icp_entry); /* Pointer to the AftlIcpEntry to verify. */
+
+/* Verifies the log root signature for the transparency log submission. */
+bool avb_aftl_verify_entry_signature(
+    const uint8_t* key,       /* Transparency log public key data. */
+    size_t key_num_bytes,     /* Size of the key data. */
+    AftlIcpEntry* icp_entry); /* Pointer to the AftlIcpEntry to verify. */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AVB_AFTL_VALIDATE_H_ */
diff --git a/libavb_aftl/avb_ops_aftl.h b/libavb_aftl/avb_ops_aftl.h
new file mode 100644
index 0000000..a2f5577
--- /dev/null
+++ b/libavb_aftl/avb_ops_aftl.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if !defined(AVB_INSIDE_LIBAVB_AFTL_H) && !defined(AVB_COMPILATION)
+#error "Never include this file directly, include libavb_aftl/libavb_aftl.h."
+#endif
+
+#ifndef AVB_AFTL_OPS_H_
+#define AVB_AFTL_OPS_H_
+
+#include <libavb/libavb.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct AvbAftlOps;
+typedef struct AvbAftlOps AvbAftlOps;
+
+/* An extension to AvbOps required by the new AFTL validation flow.
+   TODO(danielaustin): update the AFTL readme link once it is submitted.  */
+struct AvbAftlOps {
+  /* Operations from libavb. */
+  AvbOps* ops;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AVB_AFTL_OPS_H_ */
diff --git a/libavb_aftl/libavb_aftl.h b/libavb_aftl/libavb_aftl.h
new file mode 100644
index 0000000..395c74c
--- /dev/null
+++ b/libavb_aftl/libavb_aftl.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef LIBAVB_AFTL_H_
+#define LIBAVB_AFTL_H_
+
+#include <libavb/libavb.h>
+
+/* The AVB_INSIDE_LIBAVB_AFTL_H preprocessor symbol is used to enforce
+ * library users to include only this file. All public interfaces, and
+ * only public interfaces, must be included here.
+ */
+
+#define AVB_INSIDE_LIBAVB_AFTL_H
+#include "avb_ops_aftl.h"
+#undef AVB_INSIDE_LIBAVB_AFTL_H
+
+#endif /* LIBAVB_AFTL_H_ */
diff --git a/proto/README.md b/proto/README.md
new file mode 100644
index 0000000..f40db7a
--- /dev/null
+++ b/proto/README.md
@@ -0,0 +1,38 @@
+# Android Firmware Transparency Log Proto Definitions
+---
+
+This directory contains the proto definitions required to communicate with an
+AFTL server. Two (api.proto and aftl.proto) contain the definitions of the
+protos needed to communicate with the AFTL Trillian personality. The remainder
+are dependencies. The original repos and purpose for each proto file are as
+follows:
+
+* aftl.proto
+   <!-- TODO(danielaustin): Add detailed message descriptions. -->
+   Contains messages used by the AFTL frontend and the Trillian log.
+* api.proto
+   <!-- TODO(danielaustin): Add detailed message descriptions. -->
+   Contains the messages to communicate through the AFTL personality.
+* crypto/keyspb/keyspb.proto
+   From https://github.com/google/trillian
+   Dependency of trillian.proto
+   Contains the PublicKey message definition used by Tree.
+* crypto/sigpb/sigpb.proto
+   From https://github.com/google/trillian
+   Dependency of trillian.proto and aftl.proto
+   For trillian.proto, contains the DigitallySigned message used by Tree and
+   SignedEntryTimestamp. For aftl.proto, contains the DigitallySigned message
+   used by SignedFirmwareInfo.
+* trillian.proto
+   From https://github.com/google/trillian
+   Dependency of aftl.proto
+   For aftl.proto, contains message definitions for SignedLogRoot.
+* aftl_google/api/annotations.proto
+   From https://github.com/googleapis/googleapis
+   Used to get access to google.api.http options.
+* aftl_google/api/http.proto
+   From https://github.com/googleapis/googleapis
+   Dependency of aftl_google/api/annotations.proto
+   Contains the HttpRule message that extends MethodOptions.
+* aftl_google/rpc/status.proto
+   From https://github.com/googleapis/googleapis
diff --git a/proto/__init__.py b/proto/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/proto/__init__.py
diff --git a/proto/aftl.proto b/proto/aftl.proto
new file mode 100644
index 0000000..41f1148
--- /dev/null
+++ b/proto/aftl.proto
@@ -0,0 +1,112 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package aftl;
+option go_package = "proto";
+
+import "trillian.proto";
+import "crypto/sigpb/sigpb.proto";
+import "google/protobuf/timestamp.proto";
+
+// These messages are used both by the frontend API and the Trillian log.
+message FirmwareInfo {
+  // This is the SHA256 hash of vbmeta.
+  bytes vbmeta_hash = 1;
+
+  // Subcomponent of the build fingerprint as defined at
+  // https://source.android.com/compatibility/android-cdd#3_2_2_build_parameters.
+  // For example, a Pixel device with the following build fingerprint
+  // google/crosshatch/crosshatch:9/PQ3A.190605.003/5524043:user/release-keys,
+  // would have 5524043 for the version incremental.
+  string version_incremental = 2;
+
+  // Public key of the platform. This is the same key used to sign the vbmeta.
+  bytes platform_key = 3;
+
+  // SHA256 of the manufacturer public key (DER-encoded, x509
+  // subjectPublicKeyInfo format). The public key MUST already be in the list
+  // of root keys known and trusted by the AFTL.
+  // Internal: This field is required to be able to identify which manufacturer
+  // this request is coming from.
+  bytes manufacturer_key_hash = 4;
+
+  // Free form description field. It can be used to annotate this message with
+  // further context on the build (e.g., carrier specific build).
+  string description = 5;
+}
+
+message SignedFirmwareInfo {
+  FirmwareInfo info = 1;
+
+  // Signature of the info field, using manufacturer_pub_key.
+  // For the signature, info is first serialized to JSON. It is not
+  // expected to be able to reconstruct the info field from scratch.
+  // When verifying the inclusion proof associated with the info, it is
+  // expected that the leaf is provided.
+  sigpb.DigitallySigned info_signature = 2;
+}
+
+message FirmwareImageInfo {
+  // This is the SHA256 hash of vbmeta.
+  bytes vbmeta_hash = 1;
+
+  // SHA256 hash of the complete binary image. In case of Pixel, this would be
+  // the hash of the ZIP file that is offered for download at:
+  // https://developers.google.com/android/images
+  bytes hash = 2;
+
+  // Build fingerprint, e.g. in case of Pixel
+  // google/crosshatch/crosshatch:9/PQ3A.190605.003/5524043:user/release-keys
+  // See https://source.android.com/compatibility/android-cdd.html#3_2_2_build_parameters
+  // for the expected format of this field.
+  string build_fingerprint = 3;
+}
+
+message SignedFirmwareImageInfo {
+  FirmwareImageInfo image_info = 1;
+  sigpb.DigitallySigned image_info_signature = 2;
+}
+
+
+message InclusionProof {
+  trillian.Proof proof = 1;
+  trillian.SignedLogRoot sth = 2;
+}
+
+// Trillian-specific data types
+message Leaf {
+  int32 version = 1;
+
+  // Timestamp when the entry was added to the log.
+  google.protobuf.Timestamp timestamp = 2;
+
+  oneof value {
+    bytes vbmeta = 3;
+    FirmwareInfoAnnotation fw_info = 4;
+    FirmwareImageInfoAnnotation fw_image_info = 5;
+  }
+}
+
+message FirmwareInfoAnnotation {
+  SignedFirmwareInfo info = 1;
+}
+
+message FirmwareImageInfoAnnotation {
+  SignedFirmwareImageInfo info = 1;
+
+  // URL of the firmware image in the Cloud Storage bucket populated by AFTL.
+  string url = 2;
+}
diff --git a/proto/aftl_google/__init__.py b/proto/aftl_google/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/proto/aftl_google/__init__.py
diff --git a/proto/aftl_google/api/__init__.py b/proto/aftl_google/api/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/proto/aftl_google/api/__init__.py
diff --git a/proto/aftl_google/api/annotations.proto b/proto/aftl_google/api/annotations.proto
new file mode 100644
index 0000000..8e90cc8
--- /dev/null
+++ b/proto/aftl_google/api/annotations.proto
@@ -0,0 +1,31 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "aftl_google/api/http.proto";
+import "google/protobuf/descriptor.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
+option java_multiple_files = true;
+option java_outer_classname = "AnnotationsProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+extend google.protobuf.MethodOptions {
+  // See `HttpRule`.
+  HttpRule http = 72295728;
+}
diff --git a/proto/aftl_google/api/annotations_pb2.py b/proto/aftl_google/api/annotations_pb2.py
new file mode 100644
index 0000000..2982db1
--- /dev/null
+++ b/proto/aftl_google/api/annotations_pb2.py
@@ -0,0 +1,47 @@
+# pylint: skip-file
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: aftl_google/api/annotations.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from proto.aftl_google.api import http_pb2 as aftl__google_dot_api_dot_http__pb2
+from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='aftl_google/api/annotations.proto',
+  package='google.api',
+  syntax='proto3',
+  serialized_options=_b('\n\016com.google.apiB\020AnnotationsProtoP\001ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\242\002\004GAPI'),
+  serialized_pb=_b('\n!aftl_google/api/annotations.proto\x12\ngoogle.api\x1a\x1a\x61\x66tl_google/api/http.proto\x1a google/protobuf/descriptor.proto:E\n\x04http\x12\x1e.google.protobuf.MethodOptions\x18\xb0\xca\xbc\" \x01(\x0b\x32\x14.google.api.HttpRuleBn\n\x0e\x63om.google.apiB\x10\x41nnotationsProtoP\x01ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\xa2\x02\x04GAPIb\x06proto3')
+  ,
+  dependencies=[aftl__google_dot_api_dot_http__pb2.DESCRIPTOR,google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
+
+
+HTTP_FIELD_NUMBER = 72295728
+http = _descriptor.FieldDescriptor(
+  name='http', full_name='google.api.http', index=0,
+  number=72295728, type=11, cpp_type=10, label=1,
+  has_default_value=False, default_value=None,
+  message_type=None, enum_type=None, containing_type=None,
+  is_extension=True, extension_scope=None,
+  serialized_options=None, file=DESCRIPTOR)
+
+DESCRIPTOR.extensions_by_name['http'] = http
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+http.message_type = aftl__google_dot_api_dot_http__pb2._HTTPRULE
+google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(http)
+
+DESCRIPTOR._options = None
+# @@protoc_insertion_point(module_scope)
diff --git a/proto/aftl_google/api/annotations_pb2_grpc.py b/proto/aftl_google/api/annotations_pb2_grpc.py
new file mode 100644
index 0000000..73636b2
--- /dev/null
+++ b/proto/aftl_google/api/annotations_pb2_grpc.py
@@ -0,0 +1,4 @@
+# pylint: skip-file
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/proto/aftl_google/api/http.proto b/proto/aftl_google/api/http.proto
new file mode 100644
index 0000000..b2977f5
--- /dev/null
+++ b/proto/aftl_google/api/http.proto
@@ -0,0 +1,376 @@
+// Copyright 2019 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.api;
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
+option java_multiple_files = true;
+option java_outer_classname = "HttpProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+// Defines the HTTP configuration for an API service. It contains a list of
+// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
+// to one or more HTTP REST API methods.
+message Http {
+  // A list of HTTP configuration rules that apply to individual API methods.
+  //
+  // **NOTE:** All service configuration rules follow "last one wins" order.
+  repeated HttpRule rules = 1;
+
+  // When set to true, URL path parameters will be fully URI-decoded except in
+  // cases of single segment matches in reserved expansion, where "%2F" will be
+  // left encoded.
+  //
+  // The default behavior is to not decode RFC 6570 reserved characters in multi
+  // segment matches.
+  bool fully_decode_reserved_expansion = 2;
+}
+
+// # gRPC Transcoding
+//
+// gRPC Transcoding is a feature for mapping between a gRPC method and one or
+// more HTTP REST endpoints. It allows developers to build a single API service
+// that supports both gRPC APIs and REST APIs. Many systems, including [Google
+// APIs](https://github.com/googleapis/googleapis),
+// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC
+// Gateway](https://github.com/grpc-ecosystem/grpc-gateway),
+// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature
+// and use it for large scale production services.
+//
+// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies
+// how different portions of the gRPC request message are mapped to the URL
+// path, URL query parameters, and HTTP request body. It also controls how the
+// gRPC response message is mapped to the HTTP response body. `HttpRule` is
+// typically specified as an `google.api.http` annotation on the gRPC method.
+//
+// Each mapping specifies a URL path template and an HTTP method. The path
+// template may refer to one or more fields in the gRPC request message, as long
+// as each field is a non-repeated field with a primitive (non-message) type.
+// The path template controls how fields of the request message are mapped to
+// the URL path.
+//
+// Example:
+//
+//     service Messaging {
+//       rpc GetMessage(GetMessageRequest) returns (Message) {
+//         option (google.api.http) = {
+//             get: "/v1/{name=messages/*}"
+//         };
+//       }
+//     }
+//     message GetMessageRequest {
+//       string name = 1; // Mapped to URL path.
+//     }
+//     message Message {
+//       string text = 1; // The resource content.
+//     }
+//
+// This enables an HTTP REST to gRPC mapping as below:
+//
+// HTTP | gRPC
+// -----|-----
+// `GET /v1/messages/123456`  | `GetMessage(name: "messages/123456")`
+//
+// Any fields in the request message which are not bound by the path template
+// automatically become HTTP query parameters if there is no HTTP request body.
+// For example:
+//
+//     service Messaging {
+//       rpc GetMessage(GetMessageRequest) returns (Message) {
+//         option (google.api.http) = {
+//             get:"/v1/messages/{message_id}"
+//         };
+//       }
+//     }
+//     message GetMessageRequest {
+//       message SubMessage {
+//         string subfield = 1;
+//       }
+//       string message_id = 1; // Mapped to URL path.
+//       int64 revision = 2;    // Mapped to URL query parameter `revision`.
+//       SubMessage sub = 3;    // Mapped to URL query parameter `sub.subfield`.
+//     }
+//
+// This enables a HTTP JSON to RPC mapping as below:
+//
+// HTTP | gRPC
+// -----|-----
+// `GET /v1/messages/123456?revision=2&sub.subfield=foo` |
+// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield:
+// "foo"))`
+//
+// Note that fields which are mapped to URL query parameters must have a
+// primitive type or a repeated primitive type or a non-repeated message type.
+// In the case of a repeated type, the parameter can be repeated in the URL
+// as `...?param=A&param=B`. In the case of a message type, each field of the
+// message is mapped to a separate parameter, such as
+// `...?foo.a=A&foo.b=B&foo.c=C`.
+//
+// For HTTP methods that allow a request body, the `body` field
+// specifies the mapping. Consider a REST update method on the
+// message resource collection:
+//
+//     service Messaging {
+//       rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
+//         option (google.api.http) = {
+//           patch: "/v1/messages/{message_id}"
+//           body: "message"
+//         };
+//       }
+//     }
+//     message UpdateMessageRequest {
+//       string message_id = 1; // mapped to the URL
+//       Message message = 2;   // mapped to the body
+//     }
+//
+// The following HTTP JSON to RPC mapping is enabled, where the
+// representation of the JSON in the request body is determined by
+// protos JSON encoding:
+//
+// HTTP | gRPC
+// -----|-----
+// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
+// "123456" message { text: "Hi!" })`
+//
+// The special name `*` can be used in the body mapping to define that
+// every field not bound by the path template should be mapped to the
+// request body.  This enables the following alternative definition of
+// the update method:
+//
+//     service Messaging {
+//       rpc UpdateMessage(Message) returns (Message) {
+//         option (google.api.http) = {
+//           patch: "/v1/messages/{message_id}"
+//           body: "*"
+//         };
+//       }
+//     }
+//     message Message {
+//       string message_id = 1;
+//       string text = 2;
+//     }
+//
+//
+// The following HTTP JSON to RPC mapping is enabled:
+//
+// HTTP | gRPC
+// -----|-----
+// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
+// "123456" text: "Hi!")`
+//
+// Note that when using `*` in the body mapping, it is not possible to
+// have HTTP parameters, as all fields not bound by the path end in
+// the body. This makes this option more rarely used in practice when
+// defining REST APIs. The common usage of `*` is in custom methods
+// which don't use the URL at all for transferring data.
+//
+// It is possible to define multiple HTTP methods for one RPC by using
+// the `additional_bindings` option. Example:
+//
+//     service Messaging {
+//       rpc GetMessage(GetMessageRequest) returns (Message) {
+//         option (google.api.http) = {
+//           get: "/v1/messages/{message_id}"
+//           additional_bindings {
+//             get: "/v1/users/{user_id}/messages/{message_id}"
+//           }
+//         };
+//       }
+//     }
+//     message GetMessageRequest {
+//       string message_id = 1;
+//       string user_id = 2;
+//     }
+//
+// This enables the following two alternative HTTP JSON to RPC mappings:
+//
+// HTTP | gRPC
+// -----|-----
+// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
+// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id:
+// "123456")`
+//
+// ## Rules for HTTP mapping
+//
+// 1. Leaf request fields (recursive expansion nested messages in the request
+//    message) are classified into three categories:
+//    - Fields referred by the path template. They are passed via the URL path.
+//    - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP
+//      request body.
+//    - All other fields are passed via the URL query parameters, and the
+//      parameter name is the field path in the request message. A repeated
+//      field can be represented as multiple query parameters under the same
+//      name.
+//  2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields
+//     are passed via URL path and HTTP request body.
+//  3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all
+//     fields are passed via URL path and URL query parameters.
+//
+// ### Path template syntax
+//
+//     Template = "/" Segments [ Verb ] ;
+//     Segments = Segment { "/" Segment } ;
+//     Segment  = "*" | "**" | LITERAL | Variable ;
+//     Variable = "{" FieldPath [ "=" Segments ] "}" ;
+//     FieldPath = IDENT { "." IDENT } ;
+//     Verb     = ":" LITERAL ;
+//
+// The syntax `*` matches a single URL path segment. The syntax `**` matches
+// zero or more URL path segments, which must be the last part of the URL path
+// except the `Verb`.
+//
+// The syntax `Variable` matches part of the URL path as specified by its
+// template. A variable template must not contain other variables. If a variable
+// matches a single path segment, its template may be omitted, e.g. `{var}`
+// is equivalent to `{var=*}`.
+//
+// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL`
+// contains any reserved character, such characters should be percent-encoded
+// before the matching.
+//
+// If a variable contains exactly one path segment, such as `"{var}"` or
+// `"{var=*}"`, when such a variable is expanded into a URL path on the client
+// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The
+// server side does the reverse decoding. Such variables show up in the
+// [Discovery
+// Document](https://developers.google.com/discovery/v1/reference/apis) as
+// `{var}`.
+//
+// If a variable contains multiple path segments, such as `"{var=foo/*}"`
+// or `"{var=**}"`, when such a variable is expanded into a URL path on the
+// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded.
+// The server side does the reverse decoding, except "%2F" and "%2f" are left
+// unchanged. Such variables show up in the
+// [Discovery
+// Document](https://developers.google.com/discovery/v1/reference/apis) as
+// `{+var}`.
+//
+// ## Using gRPC API Service Configuration
+//
+// gRPC API Service Configuration (service config) is a configuration language
+// for configuring a gRPC service to become a user-facing product. The
+// service config is simply the YAML representation of the `google.api.Service`
+// proto message.
+//
+// As an alternative to annotating your proto file, you can configure gRPC
+// transcoding in your service config YAML files. You do this by specifying a
+// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same
+// effect as the proto annotation. This can be particularly useful if you
+// have a proto that is reused in multiple services. Note that any transcoding
+// specified in the service config will override any matching transcoding
+// configuration in the proto.
+//
+// Example:
+//
+//     http:
+//       rules:
+//         # Selects a gRPC method and applies HttpRule to it.
+//         - selector: example.v1.Messaging.GetMessage
+//           get: /v1/messages/{message_id}/{sub.subfield}
+//
+// ## Special notes
+//
+// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the
+// proto to JSON conversion must follow the [proto3
+// specification](https://developers.google.com/protocol-buffers/docs/proto3#json).
+//
+// While the single segment variable follows the semantics of
+// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String
+// Expansion, the multi segment variable **does not** follow RFC 6570 Section
+// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion
+// does not expand special characters like `?` and `#`, which would lead
+// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding
+// for multi segment variables.
+//
+// The path variables **must not** refer to any repeated or mapped field,
+// because client libraries are not capable of handling such variable expansion.
+//
+// The path variables **must not** capture the leading "/" character. The reason
+// is that the most common use case "{var}" does not capture the leading "/"
+// character. For consistency, all path variables must share the same behavior.
+//
+// Repeated message fields must not be mapped to URL query parameters, because
+// no client library can support such complicated mapping.
+//
+// If an API needs to use a JSON array for request or response body, it can map
+// the request or response body to a repeated field. However, some gRPC
+// Transcoding implementations may not support this feature.
+message HttpRule {
+  // Selects a method to which this rule applies.
+  //
+  // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+  string selector = 1;
+
+  // Determines the URL pattern is matched by this rules. This pattern can be
+  // used with any of the {get|put|post|delete|patch} methods. A custom method
+  // can be defined using the 'custom' field.
+  oneof pattern {
+    // Maps to HTTP GET. Used for listing and getting information about
+    // resources.
+    string get = 2;
+
+    // Maps to HTTP PUT. Used for replacing a resource.
+    string put = 3;
+
+    // Maps to HTTP POST. Used for creating a resource or performing an action.
+    string post = 4;
+
+    // Maps to HTTP DELETE. Used for deleting a resource.
+    string delete = 5;
+
+    // Maps to HTTP PATCH. Used for updating a resource.
+    string patch = 6;
+
+    // The custom pattern is used for specifying an HTTP method that is not
+    // included in the `pattern` field, such as HEAD, or "*" to leave the
+    // HTTP method unspecified for this rule. The wild-card rule is useful
+    // for services that provide content to Web (HTML) clients.
+    CustomHttpPattern custom = 8;
+  }
+
+  // The name of the request field whose value is mapped to the HTTP request
+  // body, or `*` for mapping all request fields not captured by the path
+  // pattern to the HTTP body, or omitted for not having any HTTP request body.
+  //
+  // NOTE: the referred field must be present at the top-level of the request
+  // message type.
+  string body = 7;
+
+  // Optional. The name of the response field whose value is mapped to the HTTP
+  // response body. When omitted, the entire response message will be used
+  // as the HTTP response body.
+  //
+  // NOTE: The referred field must be present at the top-level of the response
+  // message type.
+  string response_body = 12;
+
+  // Additional HTTP bindings for the selector. Nested bindings must
+  // not contain an `additional_bindings` field themselves (that is,
+  // the nesting may only be one level deep).
+  repeated HttpRule additional_bindings = 11;
+}
+
+// A custom pattern is used for defining custom HTTP verb.
+message CustomHttpPattern {
+  // The name of this custom HTTP verb.
+  string kind = 1;
+
+  // The path matched by this custom verb.
+  string path = 2;
+}
diff --git a/proto/aftl_google/api/http_pb2.py b/proto/aftl_google/api/http_pb2.py
new file mode 100644
index 0000000..8cd65f3
--- /dev/null
+++ b/proto/aftl_google/api/http_pb2.py
@@ -0,0 +1,251 @@
+# pylint: skip-file
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: aftl_google/api/http.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='aftl_google/api/http.proto',
+  package='google.api',
+  syntax='proto3',
+  serialized_options=_b('\n\016com.google.apiB\tHttpProtoP\001ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\370\001\001\242\002\004GAPI'),
+  serialized_pb=_b('\n\x1a\x61\x66tl_google/api/http.proto\x12\ngoogle.api\"T\n\x04Http\x12#\n\x05rules\x18\x01 \x03(\x0b\x32\x14.google.api.HttpRule\x12\'\n\x1f\x66ully_decode_reserved_expansion\x18\x02 \x01(\x08\"\x81\x02\n\x08HttpRule\x12\x10\n\x08selector\x18\x01 \x01(\t\x12\r\n\x03get\x18\x02 \x01(\tH\x00\x12\r\n\x03put\x18\x03 \x01(\tH\x00\x12\x0e\n\x04post\x18\x04 \x01(\tH\x00\x12\x10\n\x06\x64\x65lete\x18\x05 \x01(\tH\x00\x12\x0f\n\x05patch\x18\x06 \x01(\tH\x00\x12/\n\x06\x63ustom\x18\x08 \x01(\x0b\x32\x1d.google.api.CustomHttpPatternH\x00\x12\x0c\n\x04\x62ody\x18\x07 \x01(\t\x12\x15\n\rresponse_body\x18\x0c \x01(\t\x12\x31\n\x13\x61\x64\x64itional_bindings\x18\x0b \x03(\x0b\x32\x14.google.api.HttpRuleB\t\n\x07pattern\"/\n\x11\x43ustomHttpPattern\x12\x0c\n\x04kind\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\tBj\n\x0e\x63om.google.apiB\tHttpProtoP\x01ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\xf8\x01\x01\xa2\x02\x04GAPIb\x06proto3')
+)
+
+
+
+
+_HTTP = _descriptor.Descriptor(
+  name='Http',
+  full_name='google.api.Http',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='rules', full_name='google.api.Http.rules', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='fully_decode_reserved_expansion', full_name='google.api.Http.fully_decode_reserved_expansion', index=1,
+      number=2, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=42,
+  serialized_end=126,
+)
+
+
+_HTTPRULE = _descriptor.Descriptor(
+  name='HttpRule',
+  full_name='google.api.HttpRule',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='selector', full_name='google.api.HttpRule.selector', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='get', full_name='google.api.HttpRule.get', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='put', full_name='google.api.HttpRule.put', index=2,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='post', full_name='google.api.HttpRule.post', index=3,
+      number=4, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='delete', full_name='google.api.HttpRule.delete', index=4,
+      number=5, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='patch', full_name='google.api.HttpRule.patch', index=5,
+      number=6, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='custom', full_name='google.api.HttpRule.custom', index=6,
+      number=8, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='body', full_name='google.api.HttpRule.body', index=7,
+      number=7, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='response_body', full_name='google.api.HttpRule.response_body', index=8,
+      number=12, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='additional_bindings', full_name='google.api.HttpRule.additional_bindings', index=9,
+      number=11, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+    _descriptor.OneofDescriptor(
+      name='pattern', full_name='google.api.HttpRule.pattern',
+      index=0, containing_type=None, fields=[]),
+  ],
+  serialized_start=129,
+  serialized_end=386,
+)
+
+
+_CUSTOMHTTPPATTERN = _descriptor.Descriptor(
+  name='CustomHttpPattern',
+  full_name='google.api.CustomHttpPattern',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='kind', full_name='google.api.CustomHttpPattern.kind', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='path', full_name='google.api.CustomHttpPattern.path', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=388,
+  serialized_end=435,
+)
+
+_HTTP.fields_by_name['rules'].message_type = _HTTPRULE
+_HTTPRULE.fields_by_name['custom'].message_type = _CUSTOMHTTPPATTERN
+_HTTPRULE.fields_by_name['additional_bindings'].message_type = _HTTPRULE
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+  _HTTPRULE.fields_by_name['get'])
+_HTTPRULE.fields_by_name['get'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+  _HTTPRULE.fields_by_name['put'])
+_HTTPRULE.fields_by_name['put'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+  _HTTPRULE.fields_by_name['post'])
+_HTTPRULE.fields_by_name['post'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+  _HTTPRULE.fields_by_name['delete'])
+_HTTPRULE.fields_by_name['delete'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+  _HTTPRULE.fields_by_name['patch'])
+_HTTPRULE.fields_by_name['patch'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+  _HTTPRULE.fields_by_name['custom'])
+_HTTPRULE.fields_by_name['custom'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+DESCRIPTOR.message_types_by_name['Http'] = _HTTP
+DESCRIPTOR.message_types_by_name['HttpRule'] = _HTTPRULE
+DESCRIPTOR.message_types_by_name['CustomHttpPattern'] = _CUSTOMHTTPPATTERN
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Http = _reflection.GeneratedProtocolMessageType('Http', (_message.Message,), {
+  'DESCRIPTOR' : _HTTP,
+  '__module__' : 'aftl_google.api.http_pb2'
+  # @@protoc_insertion_point(class_scope:google.api.Http)
+  })
+_sym_db.RegisterMessage(Http)
+
+HttpRule = _reflection.GeneratedProtocolMessageType('HttpRule', (_message.Message,), {
+  'DESCRIPTOR' : _HTTPRULE,
+  '__module__' : 'aftl_google.api.http_pb2'
+  # @@protoc_insertion_point(class_scope:google.api.HttpRule)
+  })
+_sym_db.RegisterMessage(HttpRule)
+
+CustomHttpPattern = _reflection.GeneratedProtocolMessageType('CustomHttpPattern', (_message.Message,), {
+  'DESCRIPTOR' : _CUSTOMHTTPPATTERN,
+  '__module__' : 'aftl_google.api.http_pb2'
+  # @@protoc_insertion_point(class_scope:google.api.CustomHttpPattern)
+  })
+_sym_db.RegisterMessage(CustomHttpPattern)
+
+
+DESCRIPTOR._options = None
+# @@protoc_insertion_point(module_scope)
diff --git a/proto/aftl_google/api/http_pb2_grpc.py b/proto/aftl_google/api/http_pb2_grpc.py
new file mode 100644
index 0000000..73636b2
--- /dev/null
+++ b/proto/aftl_google/api/http_pb2_grpc.py
@@ -0,0 +1,4 @@
+# pylint: skip-file
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/proto/aftl_google/rpc/__init__.py b/proto/aftl_google/rpc/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/proto/aftl_google/rpc/__init__.py
diff --git a/proto/aftl_google/rpc/status.proto b/proto/aftl_google/rpc/status.proto
new file mode 100644
index 0000000..b0daa36
--- /dev/null
+++ b/proto/aftl_google/rpc/status.proto
@@ -0,0 +1,94 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.rpc;
+
+import "google/protobuf/any.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/rpc/status;status";
+option java_multiple_files = true;
+option java_outer_classname = "StatusProto";
+option java_package = "com.google.rpc";
+option objc_class_prefix = "RPC";
+
+// The `Status` type defines a logical error model that is suitable for
+// different programming environments, including REST APIs and RPC APIs. It is
+// used by [gRPC](https://github.com/grpc). The error model is designed to be:
+//
+// - Simple to use and understand for most users
+// - Flexible enough to meet unexpected needs
+//
+// # Overview
+//
+// The `Status` message contains three pieces of data: error code, error
+// message, and error details. The error code should be an enum value of
+// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes
+// if needed.  The error message should be a developer-facing English message
+// that helps developers *understand* and *resolve* the error. If a localized
+// user-facing error message is needed, put the localized message in the error
+// details or localize it in the client. The optional error details may contain
+// arbitrary information about the error. There is a predefined set of error
+// detail types in the package `google.rpc` that can be used for common error
+// conditions.
+//
+// # Language mapping
+//
+// The `Status` message is the logical representation of the error model, but it
+// is not necessarily the actual wire format. When the `Status` message is
+// exposed in different client libraries and different wire protocols, it can be
+// mapped differently. For example, it will likely be mapped to some exceptions
+// in Java, but more likely mapped to some error codes in C.
+//
+// # Other uses
+//
+// The error model and the `Status` message can be used in a variety of
+// environments, either with or without APIs, to provide a
+// consistent developer experience across different environments.
+//
+// Example uses of this error model include:
+//
+// - Partial errors. If a service needs to return partial errors to the client,
+//     it may embed the `Status` in the normal response to indicate the partial
+//     errors.
+//
+// - Workflow errors. A typical workflow has multiple steps. Each step may
+//     have a `Status` message for error reporting.
+//
+// - Batch operations. If a client uses batch request and batch response, the
+//     `Status` message should be used directly inside batch response, one for
+//     each error sub-response.
+//
+// - Asynchronous operations. If an API call embeds asynchronous operation
+//     results in its response, the status of those operations should be
+//     represented directly using the `Status` message.
+//
+// - Logging. If some API errors are stored in logs, the message `Status` could
+//     be used directly after any stripping needed for security/privacy reasons.
+message Status {
+  // The status code, which should be an enum value of
+  // [google.rpc.Code][google.rpc.Code].
+  int32 code = 1;
+
+  // A developer-facing error message, which should be in English. Any
+  // user-facing error message should be localized and sent in the
+  // [google.rpc.Status.details][google.rpc.Status.details] field, or localized
+  // by the client.
+  string message = 2;
+
+  // A list of messages that carry the error details.  There is a common set of
+  // message types for APIs to use.
+  repeated google.protobuf.Any details = 3;
+}
diff --git a/proto/aftl_google/rpc/status_pb2.py b/proto/aftl_google/rpc/status_pb2.py
new file mode 100644
index 0000000..28e7c88
--- /dev/null
+++ b/proto/aftl_google/rpc/status_pb2.py
@@ -0,0 +1,89 @@
+# pylint: skip-file
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: aftl_google/rpc/status.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='aftl_google/rpc/status.proto',
+  package='google.rpc',
+  syntax='proto3',
+  serialized_options=_b('\n\016com.google.rpcB\013StatusProtoP\001Z7google.golang.org/genproto/googleapis/rpc/status;status\242\002\003RPC'),
+  serialized_pb=_b('\n\x1c\x61\x66tl_google/rpc/status.proto\x12\ngoogle.rpc\x1a\x19google/protobuf/any.proto\"N\n\x06Status\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\x12%\n\x07\x64\x65tails\x18\x03 \x03(\x0b\x32\x14.google.protobuf.AnyB^\n\x0e\x63om.google.rpcB\x0bStatusProtoP\x01Z7google.golang.org/genproto/googleapis/rpc/status;status\xa2\x02\x03RPCb\x06proto3')
+  ,
+  dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,])
+
+
+
+
+_STATUS = _descriptor.Descriptor(
+  name='Status',
+  full_name='google.rpc.Status',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='code', full_name='google.rpc.Status.code', index=0,
+      number=1, type=5, cpp_type=1, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='message', full_name='google.rpc.Status.message', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='details', full_name='google.rpc.Status.details', index=2,
+      number=3, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=71,
+  serialized_end=149,
+)
+
+_STATUS.fields_by_name['details'].message_type = google_dot_protobuf_dot_any__pb2._ANY
+DESCRIPTOR.message_types_by_name['Status'] = _STATUS
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), {
+  'DESCRIPTOR' : _STATUS,
+  '__module__' : 'aftl_google.rpc.status_pb2'
+  # @@protoc_insertion_point(class_scope:google.rpc.Status)
+  })
+_sym_db.RegisterMessage(Status)
+
+
+DESCRIPTOR._options = None
+# @@protoc_insertion_point(module_scope)
diff --git a/proto/aftl_google/rpc/status_pb2_grpc.py b/proto/aftl_google/rpc/status_pb2_grpc.py
new file mode 100644
index 0000000..73636b2
--- /dev/null
+++ b/proto/aftl_google/rpc/status_pb2_grpc.py
@@ -0,0 +1,4 @@
+# pylint: skip-file
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/proto/aftl_pb2.py b/proto/aftl_pb2.py
new file mode 100644
index 0000000..ed811ae
--- /dev/null
+++ b/proto/aftl_pb2.py
@@ -0,0 +1,469 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: aftl.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+import trillian_pb2 as trillian__pb2
+from crypto.sigpb import sigpb_pb2 as crypto_dot_sigpb_dot_sigpb__pb2
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='aftl.proto',
+  package='aftl',
+  syntax='proto3',
+  serialized_options=_b('Z\005proto'),
+  serialized_pb=_b('\n\naftl.proto\x12\x04\x61\x66tl\x1a\x0etrillian.proto\x1a\x18\x63rypto/sigpb/sigpb.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x8a\x01\n\x0c\x46irmwareInfo\x12\x13\n\x0bvbmeta_hash\x18\x01 \x01(\x0c\x12\x1b\n\x13version_incremental\x18\x02 \x01(\t\x12\x14\n\x0cplatform_key\x18\x03 \x01(\x0c\x12\x1d\n\x15manufacturer_key_hash\x18\x04 \x01(\x0c\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\"f\n\x12SignedFirmwareInfo\x12 \n\x04info\x18\x01 \x01(\x0b\x32\x12.aftl.FirmwareInfo\x12.\n\x0einfo_signature\x18\x02 \x01(\x0b\x32\x16.sigpb.DigitallySigned\"Q\n\x11\x46irmwareImageInfo\x12\x13\n\x0bvbmeta_hash\x18\x01 \x01(\x0c\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\x12\x19\n\x11\x62uild_fingerprint\x18\x03 \x01(\t\"|\n\x17SignedFirmwareImageInfo\x12+\n\nimage_info\x18\x01 \x01(\x0b\x32\x17.aftl.FirmwareImageInfo\x12\x34\n\x14image_info_signature\x18\x02 \x01(\x0b\x32\x16.sigpb.DigitallySigned\"V\n\x0eInclusionProof\x12\x1e\n\x05proof\x18\x01 \x01(\x0b\x32\x0f.trillian.Proof\x12$\n\x03sth\x18\x02 \x01(\x0b\x32\x17.trillian.SignedLogRoot\"\xce\x01\n\x04Leaf\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x10\n\x06vbmeta\x18\x03 \x01(\x0cH\x00\x12/\n\x07\x66w_info\x18\x04 \x01(\x0b\x32\x1c.aftl.FirmwareInfoAnnotationH\x00\x12:\n\rfw_image_info\x18\x05 \x01(\x0b\x32!.aftl.FirmwareImageInfoAnnotationH\x00\x42\x07\n\x05value\"@\n\x16\x46irmwareInfoAnnotation\x12&\n\x04info\x18\x01 \x01(\x0b\x32\x18.aftl.SignedFirmwareInfo\"W\n\x1b\x46irmwareImageInfoAnnotation\x12+\n\x04info\x18\x01 \x01(\x0b\x32\x1d.aftl.SignedFirmwareImageInfo\x12\x0b\n\x03url\x18\x02 \x01(\tB\x07Z\x05protob\x06proto3')
+  ,
+  dependencies=[trillian__pb2.DESCRIPTOR,crypto_dot_sigpb_dot_sigpb__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
+
+
+
+
+_FIRMWAREINFO = _descriptor.Descriptor(
+  name='FirmwareInfo',
+  full_name='aftl.FirmwareInfo',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='vbmeta_hash', full_name='aftl.FirmwareInfo.vbmeta_hash', index=0,
+      number=1, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='version_incremental', full_name='aftl.FirmwareInfo.version_incremental', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='platform_key', full_name='aftl.FirmwareInfo.platform_key', index=2,
+      number=3, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='manufacturer_key_hash', full_name='aftl.FirmwareInfo.manufacturer_key_hash', index=3,
+      number=4, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='description', full_name='aftl.FirmwareInfo.description', index=4,
+      number=5, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=96,
+  serialized_end=234,
+)
+
+
+_SIGNEDFIRMWAREINFO = _descriptor.Descriptor(
+  name='SignedFirmwareInfo',
+  full_name='aftl.SignedFirmwareInfo',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='info', full_name='aftl.SignedFirmwareInfo.info', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='info_signature', full_name='aftl.SignedFirmwareInfo.info_signature', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=236,
+  serialized_end=338,
+)
+
+
+_FIRMWAREIMAGEINFO = _descriptor.Descriptor(
+  name='FirmwareImageInfo',
+  full_name='aftl.FirmwareImageInfo',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='vbmeta_hash', full_name='aftl.FirmwareImageInfo.vbmeta_hash', index=0,
+      number=1, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='hash', full_name='aftl.FirmwareImageInfo.hash', index=1,
+      number=2, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='build_fingerprint', full_name='aftl.FirmwareImageInfo.build_fingerprint', index=2,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=340,
+  serialized_end=421,
+)
+
+
+_SIGNEDFIRMWAREIMAGEINFO = _descriptor.Descriptor(
+  name='SignedFirmwareImageInfo',
+  full_name='aftl.SignedFirmwareImageInfo',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='image_info', full_name='aftl.SignedFirmwareImageInfo.image_info', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='image_info_signature', full_name='aftl.SignedFirmwareImageInfo.image_info_signature', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=423,
+  serialized_end=547,
+)
+
+
+_INCLUSIONPROOF = _descriptor.Descriptor(
+  name='InclusionProof',
+  full_name='aftl.InclusionProof',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='proof', full_name='aftl.InclusionProof.proof', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='sth', full_name='aftl.InclusionProof.sth', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=549,
+  serialized_end=635,
+)
+
+
+_LEAF = _descriptor.Descriptor(
+  name='Leaf',
+  full_name='aftl.Leaf',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='version', full_name='aftl.Leaf.version', index=0,
+      number=1, type=5, cpp_type=1, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='timestamp', full_name='aftl.Leaf.timestamp', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='vbmeta', full_name='aftl.Leaf.vbmeta', index=2,
+      number=3, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='fw_info', full_name='aftl.Leaf.fw_info', index=3,
+      number=4, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='fw_image_info', full_name='aftl.Leaf.fw_image_info', index=4,
+      number=5, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+    _descriptor.OneofDescriptor(
+      name='value', full_name='aftl.Leaf.value',
+      index=0, containing_type=None, fields=[]),
+  ],
+  serialized_start=638,
+  serialized_end=844,
+)
+
+
+_FIRMWAREINFOANNOTATION = _descriptor.Descriptor(
+  name='FirmwareInfoAnnotation',
+  full_name='aftl.FirmwareInfoAnnotation',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='info', full_name='aftl.FirmwareInfoAnnotation.info', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=846,
+  serialized_end=910,
+)
+
+
+_FIRMWAREIMAGEINFOANNOTATION = _descriptor.Descriptor(
+  name='FirmwareImageInfoAnnotation',
+  full_name='aftl.FirmwareImageInfoAnnotation',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='info', full_name='aftl.FirmwareImageInfoAnnotation.info', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='url', full_name='aftl.FirmwareImageInfoAnnotation.url', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=912,
+  serialized_end=999,
+)
+
+_SIGNEDFIRMWAREINFO.fields_by_name['info'].message_type = _FIRMWAREINFO
+_SIGNEDFIRMWAREINFO.fields_by_name['info_signature'].message_type = crypto_dot_sigpb_dot_sigpb__pb2._DIGITALLYSIGNED
+_SIGNEDFIRMWAREIMAGEINFO.fields_by_name['image_info'].message_type = _FIRMWAREIMAGEINFO
+_SIGNEDFIRMWAREIMAGEINFO.fields_by_name['image_info_signature'].message_type = crypto_dot_sigpb_dot_sigpb__pb2._DIGITALLYSIGNED
+_INCLUSIONPROOF.fields_by_name['proof'].message_type = trillian__pb2._PROOF
+_INCLUSIONPROOF.fields_by_name['sth'].message_type = trillian__pb2._SIGNEDLOGROOT
+_LEAF.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_LEAF.fields_by_name['fw_info'].message_type = _FIRMWAREINFOANNOTATION
+_LEAF.fields_by_name['fw_image_info'].message_type = _FIRMWAREIMAGEINFOANNOTATION
+_LEAF.oneofs_by_name['value'].fields.append(
+  _LEAF.fields_by_name['vbmeta'])
+_LEAF.fields_by_name['vbmeta'].containing_oneof = _LEAF.oneofs_by_name['value']
+_LEAF.oneofs_by_name['value'].fields.append(
+  _LEAF.fields_by_name['fw_info'])
+_LEAF.fields_by_name['fw_info'].containing_oneof = _LEAF.oneofs_by_name['value']
+_LEAF.oneofs_by_name['value'].fields.append(
+  _LEAF.fields_by_name['fw_image_info'])
+_LEAF.fields_by_name['fw_image_info'].containing_oneof = _LEAF.oneofs_by_name['value']
+_FIRMWAREINFOANNOTATION.fields_by_name['info'].message_type = _SIGNEDFIRMWAREINFO
+_FIRMWAREIMAGEINFOANNOTATION.fields_by_name['info'].message_type = _SIGNEDFIRMWAREIMAGEINFO
+DESCRIPTOR.message_types_by_name['FirmwareInfo'] = _FIRMWAREINFO
+DESCRIPTOR.message_types_by_name['SignedFirmwareInfo'] = _SIGNEDFIRMWAREINFO
+DESCRIPTOR.message_types_by_name['FirmwareImageInfo'] = _FIRMWAREIMAGEINFO
+DESCRIPTOR.message_types_by_name['SignedFirmwareImageInfo'] = _SIGNEDFIRMWAREIMAGEINFO
+DESCRIPTOR.message_types_by_name['InclusionProof'] = _INCLUSIONPROOF
+DESCRIPTOR.message_types_by_name['Leaf'] = _LEAF
+DESCRIPTOR.message_types_by_name['FirmwareInfoAnnotation'] = _FIRMWAREINFOANNOTATION
+DESCRIPTOR.message_types_by_name['FirmwareImageInfoAnnotation'] = _FIRMWAREIMAGEINFOANNOTATION
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+FirmwareInfo = _reflection.GeneratedProtocolMessageType('FirmwareInfo', (_message.Message,), {
+  'DESCRIPTOR' : _FIRMWAREINFO,
+  '__module__' : 'aftl_pb2'
+  # @@protoc_insertion_point(class_scope:aftl.FirmwareInfo)
+  })
+_sym_db.RegisterMessage(FirmwareInfo)
+
+SignedFirmwareInfo = _reflection.GeneratedProtocolMessageType('SignedFirmwareInfo', (_message.Message,), {
+  'DESCRIPTOR' : _SIGNEDFIRMWAREINFO,
+  '__module__' : 'aftl_pb2'
+  # @@protoc_insertion_point(class_scope:aftl.SignedFirmwareInfo)
+  })
+_sym_db.RegisterMessage(SignedFirmwareInfo)
+
+FirmwareImageInfo = _reflection.GeneratedProtocolMessageType('FirmwareImageInfo', (_message.Message,), {
+  'DESCRIPTOR' : _FIRMWAREIMAGEINFO,
+  '__module__' : 'aftl_pb2'
+  # @@protoc_insertion_point(class_scope:aftl.FirmwareImageInfo)
+  })
+_sym_db.RegisterMessage(FirmwareImageInfo)
+
+SignedFirmwareImageInfo = _reflection.GeneratedProtocolMessageType('SignedFirmwareImageInfo', (_message.Message,), {
+  'DESCRIPTOR' : _SIGNEDFIRMWAREIMAGEINFO,
+  '__module__' : 'aftl_pb2'
+  # @@protoc_insertion_point(class_scope:aftl.SignedFirmwareImageInfo)
+  })
+_sym_db.RegisterMessage(SignedFirmwareImageInfo)
+
+InclusionProof = _reflection.GeneratedProtocolMessageType('InclusionProof', (_message.Message,), {
+  'DESCRIPTOR' : _INCLUSIONPROOF,
+  '__module__' : 'aftl_pb2'
+  # @@protoc_insertion_point(class_scope:aftl.InclusionProof)
+  })
+_sym_db.RegisterMessage(InclusionProof)
+
+Leaf = _reflection.GeneratedProtocolMessageType('Leaf', (_message.Message,), {
+  'DESCRIPTOR' : _LEAF,
+  '__module__' : 'aftl_pb2'
+  # @@protoc_insertion_point(class_scope:aftl.Leaf)
+  })
+_sym_db.RegisterMessage(Leaf)
+
+FirmwareInfoAnnotation = _reflection.GeneratedProtocolMessageType('FirmwareInfoAnnotation', (_message.Message,), {
+  'DESCRIPTOR' : _FIRMWAREINFOANNOTATION,
+  '__module__' : 'aftl_pb2'
+  # @@protoc_insertion_point(class_scope:aftl.FirmwareInfoAnnotation)
+  })
+_sym_db.RegisterMessage(FirmwareInfoAnnotation)
+
+FirmwareImageInfoAnnotation = _reflection.GeneratedProtocolMessageType('FirmwareImageInfoAnnotation', (_message.Message,), {
+  'DESCRIPTOR' : _FIRMWAREIMAGEINFOANNOTATION,
+  '__module__' : 'aftl_pb2'
+  # @@protoc_insertion_point(class_scope:aftl.FirmwareImageInfoAnnotation)
+  })
+_sym_db.RegisterMessage(FirmwareImageInfoAnnotation)
+
+
+DESCRIPTOR._options = None
+# @@protoc_insertion_point(module_scope)
diff --git a/proto/aftl_pb2_grpc.py b/proto/aftl_pb2_grpc.py
new file mode 100644
index 0000000..a894352
--- /dev/null
+++ b/proto/aftl_pb2_grpc.py
@@ -0,0 +1,3 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/proto/api.proto b/proto/api.proto
new file mode 100644
index 0000000..4c66333
--- /dev/null
+++ b/proto/api.proto
@@ -0,0 +1,87 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package aftl;
+option go_package = "proto";
+
+import "aftl.proto";
+
+message AddFirmwareInfoRequest {
+  // VBMeta structure as described in
+  // https://android.googlesource.com/platform/external/avb/+/master/README.md.
+  // In case of chained partitions, each VBMeta is added via a separate call.
+  // The default size for gRPC payload is about 4MB. We expect vbmeta to be
+  // in the order of 1kB.
+  bytes vbmeta = 1;
+
+  SignedFirmwareInfo fw_info = 2;
+}
+
+message AddFirmwareInfoResponse {
+  // Inclusion proof and the leaf that was added to the log, which contains
+  // information on the firmware.
+  // It is required to have the complete leaf to validate the inclusion proof.
+  // For on-device verification, only these first 2 fields are required to
+  // validate the inclusion.
+  InclusionProof fw_info_proof = 1;
+  bytes          fw_info_leaf = 2;
+
+  // Inclusion proof and leaf that was added to the log, which contains the full
+  // vbmeta partition.
+  // These fields are NOT required for validation but can still be recorded by a
+  // vendor to prove that the complete VBMeta was submitted.
+  InclusionProof vbmeta_proof = 3;
+  bytes          vbmeta_leaf = 4;
+}
+
+message AddFirmwareImageRequest {
+
+  SignedFirmwareImageInfo fw_image_info = 1;
+
+  // Bytes of the binary images. These are not signed as their final
+  // hash value is already signed in fw_image_info.hash
+  // This is ignored if any of the requests origin_url is set.
+  bytes image_chunk = 2;
+
+  // Origin location of image. It is used to get a copy of the binary image
+  // from another server (e.g., Google Cloud Storage).
+  string origin_url = 3;
+}
+
+message AddFirmwareImageResponse {
+
+  // Inclusion proof and leaf for the firmware image. The leaf contains the URL
+  // where the image was stored.
+  // It is not required for vendors to keep this information. However, this can
+  // be used for their records to ensure the correctness of the log.
+  InclusionProof fw_image_info_proof = 1;
+  Leaf           fw_image_info_leaf = 2;
+}
+
+service AFTLog {
+
+  // Insert a new VBMeta structure into the log.
+  // This request will effectively create 2 log entries:
+  //  - VBMeta itself
+  //  - Vendor annotations, including a reference to the VBMeta leaf.
+  rpc AddFirmwareInfo(AddFirmwareInfoRequest) returns (AddFirmwareInfoResponse) {}
+
+  // Upload (or copy) the complete firmware image.
+  rpc AddFirmwareImage(stream AddFirmwareImageRequest) returns (AddFirmwareImageResponse) {}
+
+  // TODO GetProofByHash, GetSthConsistency, GetEntries, GetRootKeys
+}
+
diff --git a/proto/api_pb2.py b/proto/api_pb2.py
new file mode 100644
index 0000000..f8f9a01
--- /dev/null
+++ b/proto/api_pb2.py
@@ -0,0 +1,279 @@
+# pylint: skip-file
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: api.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+import aftl_pb2 as aftl__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='api.proto',
+  package='aftl',
+  syntax='proto3',
+  serialized_options=_b('Z\005proto'),
+  serialized_pb=_b('\n\tapi.proto\x12\x04\x61\x66tl\x1a\naftl.proto\"S\n\x16\x41\x64\x64\x46irmwareInfoRequest\x12\x0e\n\x06vbmeta\x18\x01 \x01(\x0c\x12)\n\x07\x66w_info\x18\x02 \x01(\x0b\x32\x18.aftl.SignedFirmwareInfo\"\x9d\x01\n\x17\x41\x64\x64\x46irmwareInfoResponse\x12+\n\rfw_info_proof\x18\x01 \x01(\x0b\x32\x14.aftl.InclusionProof\x12\x14\n\x0c\x66w_info_leaf\x18\x02 \x01(\x0c\x12*\n\x0cvbmeta_proof\x18\x03 \x01(\x0b\x32\x14.aftl.InclusionProof\x12\x13\n\x0bvbmeta_leaf\x18\x04 \x01(\x0c\"x\n\x17\x41\x64\x64\x46irmwareImageRequest\x12\x34\n\rfw_image_info\x18\x01 \x01(\x0b\x32\x1d.aftl.SignedFirmwareImageInfo\x12\x13\n\x0bimage_chunk\x18\x02 \x01(\x0c\x12\x12\n\norigin_url\x18\x03 \x01(\t\"u\n\x18\x41\x64\x64\x46irmwareImageResponse\x12\x31\n\x13\x66w_image_info_proof\x18\x01 \x01(\x0b\x32\x14.aftl.InclusionProof\x12&\n\x12\x66w_image_info_leaf\x18\x02 \x01(\x0b\x32\n.aftl.Leaf2\xb1\x01\n\x06\x41\x46TLog\x12P\n\x0f\x41\x64\x64\x46irmwareInfo\x12\x1c.aftl.AddFirmwareInfoRequest\x1a\x1d.aftl.AddFirmwareInfoResponse\"\x00\x12U\n\x10\x41\x64\x64\x46irmwareImage\x12\x1d.aftl.AddFirmwareImageRequest\x1a\x1e.aftl.AddFirmwareImageResponse\"\x00(\x01\x42\x07Z\x05protob\x06proto3')
+  ,
+  dependencies=[aftl__pb2.DESCRIPTOR,])
+
+
+
+
+_ADDFIRMWAREINFOREQUEST = _descriptor.Descriptor(
+  name='AddFirmwareInfoRequest',
+  full_name='aftl.AddFirmwareInfoRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='vbmeta', full_name='aftl.AddFirmwareInfoRequest.vbmeta', index=0,
+      number=1, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='fw_info', full_name='aftl.AddFirmwareInfoRequest.fw_info', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=31,
+  serialized_end=114,
+)
+
+
+_ADDFIRMWAREINFORESPONSE = _descriptor.Descriptor(
+  name='AddFirmwareInfoResponse',
+  full_name='aftl.AddFirmwareInfoResponse',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='fw_info_proof', full_name='aftl.AddFirmwareInfoResponse.fw_info_proof', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='fw_info_leaf', full_name='aftl.AddFirmwareInfoResponse.fw_info_leaf', index=1,
+      number=2, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='vbmeta_proof', full_name='aftl.AddFirmwareInfoResponse.vbmeta_proof', index=2,
+      number=3, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='vbmeta_leaf', full_name='aftl.AddFirmwareInfoResponse.vbmeta_leaf', index=3,
+      number=4, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=117,
+  serialized_end=274,
+)
+
+
+_ADDFIRMWAREIMAGEREQUEST = _descriptor.Descriptor(
+  name='AddFirmwareImageRequest',
+  full_name='aftl.AddFirmwareImageRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='fw_image_info', full_name='aftl.AddFirmwareImageRequest.fw_image_info', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='image_chunk', full_name='aftl.AddFirmwareImageRequest.image_chunk', index=1,
+      number=2, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='origin_url', full_name='aftl.AddFirmwareImageRequest.origin_url', index=2,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=276,
+  serialized_end=396,
+)
+
+
+_ADDFIRMWAREIMAGERESPONSE = _descriptor.Descriptor(
+  name='AddFirmwareImageResponse',
+  full_name='aftl.AddFirmwareImageResponse',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='fw_image_info_proof', full_name='aftl.AddFirmwareImageResponse.fw_image_info_proof', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='fw_image_info_leaf', full_name='aftl.AddFirmwareImageResponse.fw_image_info_leaf', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=398,
+  serialized_end=515,
+)
+
+_ADDFIRMWAREINFOREQUEST.fields_by_name['fw_info'].message_type = aftl__pb2._SIGNEDFIRMWAREINFO
+_ADDFIRMWAREINFORESPONSE.fields_by_name['fw_info_proof'].message_type = aftl__pb2._INCLUSIONPROOF
+_ADDFIRMWAREINFORESPONSE.fields_by_name['vbmeta_proof'].message_type = aftl__pb2._INCLUSIONPROOF
+_ADDFIRMWAREIMAGEREQUEST.fields_by_name['fw_image_info'].message_type = aftl__pb2._SIGNEDFIRMWAREIMAGEINFO
+_ADDFIRMWAREIMAGERESPONSE.fields_by_name['fw_image_info_proof'].message_type = aftl__pb2._INCLUSIONPROOF
+_ADDFIRMWAREIMAGERESPONSE.fields_by_name['fw_image_info_leaf'].message_type = aftl__pb2._LEAF
+DESCRIPTOR.message_types_by_name['AddFirmwareInfoRequest'] = _ADDFIRMWAREINFOREQUEST
+DESCRIPTOR.message_types_by_name['AddFirmwareInfoResponse'] = _ADDFIRMWAREINFORESPONSE
+DESCRIPTOR.message_types_by_name['AddFirmwareImageRequest'] = _ADDFIRMWAREIMAGEREQUEST
+DESCRIPTOR.message_types_by_name['AddFirmwareImageResponse'] = _ADDFIRMWAREIMAGERESPONSE
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+AddFirmwareInfoRequest = _reflection.GeneratedProtocolMessageType('AddFirmwareInfoRequest', (_message.Message,), {
+  'DESCRIPTOR' : _ADDFIRMWAREINFOREQUEST,
+  '__module__' : 'api_pb2'
+  # @@protoc_insertion_point(class_scope:aftl.AddFirmwareInfoRequest)
+  })
+_sym_db.RegisterMessage(AddFirmwareInfoRequest)
+
+AddFirmwareInfoResponse = _reflection.GeneratedProtocolMessageType('AddFirmwareInfoResponse', (_message.Message,), {
+  'DESCRIPTOR' : _ADDFIRMWAREINFORESPONSE,
+  '__module__' : 'api_pb2'
+  # @@protoc_insertion_point(class_scope:aftl.AddFirmwareInfoResponse)
+  })
+_sym_db.RegisterMessage(AddFirmwareInfoResponse)
+
+AddFirmwareImageRequest = _reflection.GeneratedProtocolMessageType('AddFirmwareImageRequest', (_message.Message,), {
+  'DESCRIPTOR' : _ADDFIRMWAREIMAGEREQUEST,
+  '__module__' : 'api_pb2'
+  # @@protoc_insertion_point(class_scope:aftl.AddFirmwareImageRequest)
+  })
+_sym_db.RegisterMessage(AddFirmwareImageRequest)
+
+AddFirmwareImageResponse = _reflection.GeneratedProtocolMessageType('AddFirmwareImageResponse', (_message.Message,), {
+  'DESCRIPTOR' : _ADDFIRMWAREIMAGERESPONSE,
+  '__module__' : 'api_pb2'
+  # @@protoc_insertion_point(class_scope:aftl.AddFirmwareImageResponse)
+  })
+_sym_db.RegisterMessage(AddFirmwareImageResponse)
+
+
+DESCRIPTOR._options = None
+
+_AFTLOG = _descriptor.ServiceDescriptor(
+  name='AFTLog',
+  full_name='aftl.AFTLog',
+  file=DESCRIPTOR,
+  index=0,
+  serialized_options=None,
+  serialized_start=518,
+  serialized_end=695,
+  methods=[
+  _descriptor.MethodDescriptor(
+    name='AddFirmwareInfo',
+    full_name='aftl.AFTLog.AddFirmwareInfo',
+    index=0,
+    containing_service=None,
+    input_type=_ADDFIRMWAREINFOREQUEST,
+    output_type=_ADDFIRMWAREINFORESPONSE,
+    serialized_options=None,
+  ),
+  _descriptor.MethodDescriptor(
+    name='AddFirmwareImage',
+    full_name='aftl.AFTLog.AddFirmwareImage',
+    index=1,
+    containing_service=None,
+    input_type=_ADDFIRMWAREIMAGEREQUEST,
+    output_type=_ADDFIRMWAREIMAGERESPONSE,
+    serialized_options=None,
+  ),
+])
+_sym_db.RegisterServiceDescriptor(_AFTLOG)
+
+DESCRIPTOR.services_by_name['AFTLog'] = _AFTLOG
+
+# @@protoc_insertion_point(module_scope)
diff --git a/proto/api_pb2_grpc.py b/proto/api_pb2_grpc.py
new file mode 100644
index 0000000..b1834c7
--- /dev/null
+++ b/proto/api_pb2_grpc.py
@@ -0,0 +1,67 @@
+# pylint: skip-file
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+import api_pb2 as api__pb2
+
+
+class AFTLogStub(object):
+  # missing associated documentation comment in .proto file
+  pass
+
+  def __init__(self, channel):
+    """Constructor.
+
+    Args:
+      channel: A grpc.Channel.
+    """
+    self.AddFirmwareInfo = channel.unary_unary(
+        '/aftl.AFTLog/AddFirmwareInfo',
+        request_serializer=api__pb2.AddFirmwareInfoRequest.SerializeToString,
+        response_deserializer=api__pb2.AddFirmwareInfoResponse.FromString,
+        )
+    self.AddFirmwareImage = channel.stream_unary(
+        '/aftl.AFTLog/AddFirmwareImage',
+        request_serializer=api__pb2.AddFirmwareImageRequest.SerializeToString,
+        response_deserializer=api__pb2.AddFirmwareImageResponse.FromString,
+        )
+
+
+class AFTLogServicer(object):
+  # missing associated documentation comment in .proto file
+  pass
+
+  def AddFirmwareInfo(self, request, context):
+    """Insert a new VBMeta structure into the log.
+    This request will effectively create 2 log entries:
+    - VBMeta itself
+    - Vendor annotations, including a reference to the VBMeta leaf.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def AddFirmwareImage(self, request_iterator, context):
+    """Upload (or copy) the complete firmware image.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+
+def add_AFTLogServicer_to_server(servicer, server):
+  rpc_method_handlers = {
+      'AddFirmwareInfo': grpc.unary_unary_rpc_method_handler(
+          servicer.AddFirmwareInfo,
+          request_deserializer=api__pb2.AddFirmwareInfoRequest.FromString,
+          response_serializer=api__pb2.AddFirmwareInfoResponse.SerializeToString,
+      ),
+      'AddFirmwareImage': grpc.stream_unary_rpc_method_handler(
+          servicer.AddFirmwareImage,
+          request_deserializer=api__pb2.AddFirmwareImageRequest.FromString,
+          response_serializer=api__pb2.AddFirmwareImageResponse.SerializeToString,
+      ),
+  }
+  generic_handler = grpc.method_handlers_generic_handler(
+      'aftl.AFTLog', rpc_method_handlers)
+  server.add_generic_rpc_handlers((generic_handler,))
diff --git a/proto/crypto/__init__.py b/proto/crypto/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/proto/crypto/__init__.py
diff --git a/proto/crypto/keyspb/__init__.py b/proto/crypto/keyspb/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/proto/crypto/keyspb/__init__.py
diff --git a/proto/crypto/keyspb/keyspb.proto b/proto/crypto/keyspb/keyspb.proto
new file mode 100644
index 0000000..03a8313
--- /dev/null
+++ b/proto/crypto/keyspb/keyspb.proto
@@ -0,0 +1,94 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+option go_package = "github.com/google/trillian/crypto/keyspb";
+
+package keyspb;
+
+// Specification for a private key.
+message Specification {
+  /// ECDSA defines parameters for an ECDSA key.
+  message ECDSA {
+    // The supported elliptic curves.
+    enum Curve {
+      DEFAULT_CURVE = 0;  // Curve will be chosen by Trillian.
+      P256 = 1;
+      P384 = 2;
+      P521 = 3;
+    }
+
+    // The elliptic curve to use.
+    // Optional. If not set, the default curve will be used.
+    Curve curve = 1;
+  }
+
+  // RSA defines parameters for an RSA key.
+  message RSA {
+    // Size of the keys in bits. Must be sufficiently large to allow two primes
+    // to be generated.
+    // Optional. If not set, the key size will be chosen by Trillian.
+    int32 bits = 1;
+  }
+
+  // Ed25519 defines (empty) parameters for an Ed25519 private key.
+  message Ed25519 {
+  }
+
+  // The type of parameters provided determines the algorithm used for the key.
+  oneof params {
+    // The parameters for an ECDSA key.
+    ECDSA ecdsa_params = 1;
+
+    // The parameters for an RSA key.
+    RSA rsa_params = 2;
+
+    // The parameters for an Ed25519 key.
+    Ed25519 ed25519_params = 3;
+  }
+}
+
+// PEMKeyFile identifies a private key stored in a PEM-encoded file.
+message PEMKeyFile {
+  // File path of the private key.
+  string path = 1;
+
+  // Password for decrypting the private key.
+  // If empty, indicates that the private key is not encrypted.
+  string password = 2;
+}
+
+// PrivateKey is a private key, used for generating signatures.
+message PrivateKey {
+  // The key in DER-encoded form.
+  // The specific format (e.g. PKCS8) is not specified.
+  bytes der = 1;
+}
+
+// PublicKey is a public key, used for verifying signatures.
+message PublicKey {
+  // The key in DER-encoded PKIX form.
+  bytes der = 1;
+}
+
+// PKCS11Config identifies a private key accessed using PKCS #11.
+message PKCS11Config {
+  // The label of the PKCS#11 token.
+  string token_label = 1;
+  // The PIN for the specific token.
+  string pin = 2;
+  // The PEM public key assosciated with the private key to be used.
+  string public_key = 3;
+}
diff --git a/proto/crypto/keyspb/keyspb_pb2.py b/proto/crypto/keyspb/keyspb_pb2.py
new file mode 100644
index 0000000..eba4099
--- /dev/null
+++ b/proto/crypto/keyspb/keyspb_pb2.py
@@ -0,0 +1,421 @@
+# pylint: skip-file
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: crypto/keyspb/keyspb.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='crypto/keyspb/keyspb.proto',
+  package='keyspb',
+  syntax='proto3',
+  serialized_options=_b('Z(github.com/google/trillian/crypto/keyspb'),
+  serialized_pb=_b('\n\x1a\x63rypto/keyspb/keyspb.proto\x12\x06keyspb\"\xcd\x02\n\rSpecification\x12\x33\n\x0c\x65\x63\x64sa_params\x18\x01 \x01(\x0b\x32\x1b.keyspb.Specification.ECDSAH\x00\x12/\n\nrsa_params\x18\x02 \x01(\x0b\x32\x19.keyspb.Specification.RSAH\x00\x12\x37\n\x0e\x65\x64\x32\x35\x35\x31\x39_params\x18\x03 \x01(\x0b\x32\x1d.keyspb.Specification.Ed25519H\x00\x1as\n\x05\x45\x43\x44SA\x12\x30\n\x05\x63urve\x18\x01 \x01(\x0e\x32!.keyspb.Specification.ECDSA.Curve\"8\n\x05\x43urve\x12\x11\n\rDEFAULT_CURVE\x10\x00\x12\x08\n\x04P256\x10\x01\x12\x08\n\x04P384\x10\x02\x12\x08\n\x04P521\x10\x03\x1a\x13\n\x03RSA\x12\x0c\n\x04\x62its\x18\x01 \x01(\x05\x1a\t\n\x07\x45\x64\x32\x35\x35\x31\x39\x42\x08\n\x06params\",\n\nPEMKeyFile\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x10\n\x08password\x18\x02 \x01(\t\"\x19\n\nPrivateKey\x12\x0b\n\x03\x64\x65r\x18\x01 \x01(\x0c\"\x18\n\tPublicKey\x12\x0b\n\x03\x64\x65r\x18\x01 \x01(\x0c\"D\n\x0cPKCS11Config\x12\x13\n\x0btoken_label\x18\x01 \x01(\t\x12\x0b\n\x03pin\x18\x02 \x01(\t\x12\x12\n\npublic_key\x18\x03 \x01(\tB*Z(github.com/google/trillian/crypto/keyspbb\x06proto3')
+)
+
+
+
+_SPECIFICATION_ECDSA_CURVE = _descriptor.EnumDescriptor(
+  name='Curve',
+  full_name='keyspb.Specification.ECDSA.Curve',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='DEFAULT_CURVE', index=0, number=0,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='P256', index=1, number=1,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='P384', index=2, number=2,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='P521', index=3, number=3,
+      serialized_options=None,
+      type=None),
+  ],
+  containing_type=None,
+  serialized_options=None,
+  serialized_start=274,
+  serialized_end=330,
+)
+_sym_db.RegisterEnumDescriptor(_SPECIFICATION_ECDSA_CURVE)
+
+
+_SPECIFICATION_ECDSA = _descriptor.Descriptor(
+  name='ECDSA',
+  full_name='keyspb.Specification.ECDSA',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='curve', full_name='keyspb.Specification.ECDSA.curve', index=0,
+      number=1, type=14, cpp_type=8, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+    _SPECIFICATION_ECDSA_CURVE,
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=215,
+  serialized_end=330,
+)
+
+_SPECIFICATION_RSA = _descriptor.Descriptor(
+  name='RSA',
+  full_name='keyspb.Specification.RSA',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='bits', full_name='keyspb.Specification.RSA.bits', index=0,
+      number=1, type=5, cpp_type=1, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=332,
+  serialized_end=351,
+)
+
+_SPECIFICATION_ED25519 = _descriptor.Descriptor(
+  name='Ed25519',
+  full_name='keyspb.Specification.Ed25519',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=353,
+  serialized_end=362,
+)
+
+_SPECIFICATION = _descriptor.Descriptor(
+  name='Specification',
+  full_name='keyspb.Specification',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='ecdsa_params', full_name='keyspb.Specification.ecdsa_params', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='rsa_params', full_name='keyspb.Specification.rsa_params', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='ed25519_params', full_name='keyspb.Specification.ed25519_params', index=2,
+      number=3, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[_SPECIFICATION_ECDSA, _SPECIFICATION_RSA, _SPECIFICATION_ED25519, ],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+    _descriptor.OneofDescriptor(
+      name='params', full_name='keyspb.Specification.params',
+      index=0, containing_type=None, fields=[]),
+  ],
+  serialized_start=39,
+  serialized_end=372,
+)
+
+
+_PEMKEYFILE = _descriptor.Descriptor(
+  name='PEMKeyFile',
+  full_name='keyspb.PEMKeyFile',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='path', full_name='keyspb.PEMKeyFile.path', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='password', full_name='keyspb.PEMKeyFile.password', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=374,
+  serialized_end=418,
+)
+
+
+_PRIVATEKEY = _descriptor.Descriptor(
+  name='PrivateKey',
+  full_name='keyspb.PrivateKey',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='der', full_name='keyspb.PrivateKey.der', index=0,
+      number=1, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=420,
+  serialized_end=445,
+)
+
+
+_PUBLICKEY = _descriptor.Descriptor(
+  name='PublicKey',
+  full_name='keyspb.PublicKey',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='der', full_name='keyspb.PublicKey.der', index=0,
+      number=1, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=447,
+  serialized_end=471,
+)
+
+
+_PKCS11CONFIG = _descriptor.Descriptor(
+  name='PKCS11Config',
+  full_name='keyspb.PKCS11Config',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='token_label', full_name='keyspb.PKCS11Config.token_label', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='pin', full_name='keyspb.PKCS11Config.pin', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='public_key', full_name='keyspb.PKCS11Config.public_key', index=2,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=473,
+  serialized_end=541,
+)
+
+_SPECIFICATION_ECDSA.fields_by_name['curve'].enum_type = _SPECIFICATION_ECDSA_CURVE
+_SPECIFICATION_ECDSA.containing_type = _SPECIFICATION
+_SPECIFICATION_ECDSA_CURVE.containing_type = _SPECIFICATION_ECDSA
+_SPECIFICATION_RSA.containing_type = _SPECIFICATION
+_SPECIFICATION_ED25519.containing_type = _SPECIFICATION
+_SPECIFICATION.fields_by_name['ecdsa_params'].message_type = _SPECIFICATION_ECDSA
+_SPECIFICATION.fields_by_name['rsa_params'].message_type = _SPECIFICATION_RSA
+_SPECIFICATION.fields_by_name['ed25519_params'].message_type = _SPECIFICATION_ED25519
+_SPECIFICATION.oneofs_by_name['params'].fields.append(
+  _SPECIFICATION.fields_by_name['ecdsa_params'])
+_SPECIFICATION.fields_by_name['ecdsa_params'].containing_oneof = _SPECIFICATION.oneofs_by_name['params']
+_SPECIFICATION.oneofs_by_name['params'].fields.append(
+  _SPECIFICATION.fields_by_name['rsa_params'])
+_SPECIFICATION.fields_by_name['rsa_params'].containing_oneof = _SPECIFICATION.oneofs_by_name['params']
+_SPECIFICATION.oneofs_by_name['params'].fields.append(
+  _SPECIFICATION.fields_by_name['ed25519_params'])
+_SPECIFICATION.fields_by_name['ed25519_params'].containing_oneof = _SPECIFICATION.oneofs_by_name['params']
+DESCRIPTOR.message_types_by_name['Specification'] = _SPECIFICATION
+DESCRIPTOR.message_types_by_name['PEMKeyFile'] = _PEMKEYFILE
+DESCRIPTOR.message_types_by_name['PrivateKey'] = _PRIVATEKEY
+DESCRIPTOR.message_types_by_name['PublicKey'] = _PUBLICKEY
+DESCRIPTOR.message_types_by_name['PKCS11Config'] = _PKCS11CONFIG
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Specification = _reflection.GeneratedProtocolMessageType('Specification', (_message.Message,), {
+
+  'ECDSA' : _reflection.GeneratedProtocolMessageType('ECDSA', (_message.Message,), {
+    'DESCRIPTOR' : _SPECIFICATION_ECDSA,
+    '__module__' : 'crypto.keyspb.keyspb_pb2'
+    # @@protoc_insertion_point(class_scope:keyspb.Specification.ECDSA)
+    })
+  ,
+
+  'RSA' : _reflection.GeneratedProtocolMessageType('RSA', (_message.Message,), {
+    'DESCRIPTOR' : _SPECIFICATION_RSA,
+    '__module__' : 'crypto.keyspb.keyspb_pb2'
+    # @@protoc_insertion_point(class_scope:keyspb.Specification.RSA)
+    })
+  ,
+
+  'Ed25519' : _reflection.GeneratedProtocolMessageType('Ed25519', (_message.Message,), {
+    'DESCRIPTOR' : _SPECIFICATION_ED25519,
+    '__module__' : 'crypto.keyspb.keyspb_pb2'
+    # @@protoc_insertion_point(class_scope:keyspb.Specification.Ed25519)
+    })
+  ,
+  'DESCRIPTOR' : _SPECIFICATION,
+  '__module__' : 'crypto.keyspb.keyspb_pb2'
+  # @@protoc_insertion_point(class_scope:keyspb.Specification)
+  })
+_sym_db.RegisterMessage(Specification)
+_sym_db.RegisterMessage(Specification.ECDSA)
+_sym_db.RegisterMessage(Specification.RSA)
+_sym_db.RegisterMessage(Specification.Ed25519)
+
+PEMKeyFile = _reflection.GeneratedProtocolMessageType('PEMKeyFile', (_message.Message,), {
+  'DESCRIPTOR' : _PEMKEYFILE,
+  '__module__' : 'crypto.keyspb.keyspb_pb2'
+  # @@protoc_insertion_point(class_scope:keyspb.PEMKeyFile)
+  })
+_sym_db.RegisterMessage(PEMKeyFile)
+
+PrivateKey = _reflection.GeneratedProtocolMessageType('PrivateKey', (_message.Message,), {
+  'DESCRIPTOR' : _PRIVATEKEY,
+  '__module__' : 'crypto.keyspb.keyspb_pb2'
+  # @@protoc_insertion_point(class_scope:keyspb.PrivateKey)
+  })
+_sym_db.RegisterMessage(PrivateKey)
+
+PublicKey = _reflection.GeneratedProtocolMessageType('PublicKey', (_message.Message,), {
+  'DESCRIPTOR' : _PUBLICKEY,
+  '__module__' : 'crypto.keyspb.keyspb_pb2'
+  # @@protoc_insertion_point(class_scope:keyspb.PublicKey)
+  })
+_sym_db.RegisterMessage(PublicKey)
+
+PKCS11Config = _reflection.GeneratedProtocolMessageType('PKCS11Config', (_message.Message,), {
+  'DESCRIPTOR' : _PKCS11CONFIG,
+  '__module__' : 'crypto.keyspb.keyspb_pb2'
+  # @@protoc_insertion_point(class_scope:keyspb.PKCS11Config)
+  })
+_sym_db.RegisterMessage(PKCS11Config)
+
+
+DESCRIPTOR._options = None
+# @@protoc_insertion_point(module_scope)
diff --git a/proto/crypto/keyspb/keyspb_pb2_grpc.py b/proto/crypto/keyspb/keyspb_pb2_grpc.py
new file mode 100644
index 0000000..73636b2
--- /dev/null
+++ b/proto/crypto/keyspb/keyspb_pb2_grpc.py
@@ -0,0 +1,4 @@
+# pylint: skip-file
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/proto/crypto/sigpb/__init__.py b/proto/crypto/sigpb/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/proto/crypto/sigpb/__init__.py
diff --git a/proto/crypto/sigpb/sigpb.proto b/proto/crypto/sigpb/sigpb.proto
new file mode 100644
index 0000000..3e333d3
--- /dev/null
+++ b/proto/crypto/sigpb/sigpb.proto
@@ -0,0 +1,57 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+option go_package = "github.com/google/trillian/crypto/sigpb";
+
+package sigpb;
+
+// Protocol buffer encoding of the TLS DigitallySigned type, from RFC 5246 §4.7.
+message DigitallySigned {
+  // HashAlgorithm defines the approved methods for object hashing.
+  //
+  // Supported hash algorithms. The numbering space is the same as for TLS,
+  // given in RFC 5246 s7.4.1.4.1 and at:
+  // http://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-18
+  enum HashAlgorithm {
+    // No hash algorithm is used.
+    NONE = 0;
+    // SHA256 is used.
+    SHA256 = 4;
+  }
+
+  // SignatureAlgorithm defines the algorithm used to sign the object.
+  //
+  // Supported signature algorithms. The numbering space is the same as for TLS,
+  // given in RFC 5246 s7.4.1.4.1 and at:
+  // http://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-16
+  enum SignatureAlgorithm {
+    // Anonymous signature scheme.
+    ANONYMOUS = 0;
+    // RSA signature scheme.
+    RSA = 1;
+    // ECDSA signature scheme.
+    ECDSA = 3;
+    // Ed25519 signature scheme.
+    ED25519 = 7;
+  }
+
+  // hash_algorithm contains the hash algorithm used.
+  HashAlgorithm hash_algorithm = 1;
+  // sig_algorithm contains the signing algorithm used.
+  SignatureAlgorithm signature_algorithm = 2;
+  // signature contains the object signature.
+  bytes signature = 3;
+}
diff --git a/proto/crypto/sigpb/sigpb_pb2.py b/proto/crypto/sigpb/sigpb_pb2.py
new file mode 100644
index 0000000..b00d42a
--- /dev/null
+++ b/proto/crypto/sigpb/sigpb_pb2.py
@@ -0,0 +1,144 @@
+# pylint: skip-file
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: crypto/sigpb/sigpb.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='crypto/sigpb/sigpb.proto',
+  package='sigpb',
+  syntax='proto3',
+  serialized_options=_b('Z\'github.com/google/trillian/crypto/sigpb'),
+  serialized_pb=_b('\n\x18\x63rypto/sigpb/sigpb.proto\x12\x05sigpb\"\x97\x02\n\x0f\x44igitallySigned\x12<\n\x0ehash_algorithm\x18\x01 \x01(\x0e\x32$.sigpb.DigitallySigned.HashAlgorithm\x12\x46\n\x13signature_algorithm\x18\x02 \x01(\x0e\x32).sigpb.DigitallySigned.SignatureAlgorithm\x12\x11\n\tsignature\x18\x03 \x01(\x0c\"%\n\rHashAlgorithm\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06SHA256\x10\x04\"D\n\x12SignatureAlgorithm\x12\r\n\tANONYMOUS\x10\x00\x12\x07\n\x03RSA\x10\x01\x12\t\n\x05\x45\x43\x44SA\x10\x03\x12\x0b\n\x07\x45\x44\x32\x35\x35\x31\x39\x10\x07\x42)Z\'github.com/google/trillian/crypto/sigpbb\x06proto3')
+)
+
+
+
+_DIGITALLYSIGNED_HASHALGORITHM = _descriptor.EnumDescriptor(
+  name='HashAlgorithm',
+  full_name='sigpb.DigitallySigned.HashAlgorithm',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='NONE', index=0, number=0,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='SHA256', index=1, number=4,
+      serialized_options=None,
+      type=None),
+  ],
+  containing_type=None,
+  serialized_options=None,
+  serialized_start=208,
+  serialized_end=245,
+)
+_sym_db.RegisterEnumDescriptor(_DIGITALLYSIGNED_HASHALGORITHM)
+
+_DIGITALLYSIGNED_SIGNATUREALGORITHM = _descriptor.EnumDescriptor(
+  name='SignatureAlgorithm',
+  full_name='sigpb.DigitallySigned.SignatureAlgorithm',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='ANONYMOUS', index=0, number=0,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='RSA', index=1, number=1,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='ECDSA', index=2, number=3,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='ED25519', index=3, number=7,
+      serialized_options=None,
+      type=None),
+  ],
+  containing_type=None,
+  serialized_options=None,
+  serialized_start=247,
+  serialized_end=315,
+)
+_sym_db.RegisterEnumDescriptor(_DIGITALLYSIGNED_SIGNATUREALGORITHM)
+
+
+_DIGITALLYSIGNED = _descriptor.Descriptor(
+  name='DigitallySigned',
+  full_name='sigpb.DigitallySigned',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='hash_algorithm', full_name='sigpb.DigitallySigned.hash_algorithm', index=0,
+      number=1, type=14, cpp_type=8, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='signature_algorithm', full_name='sigpb.DigitallySigned.signature_algorithm', index=1,
+      number=2, type=14, cpp_type=8, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='signature', full_name='sigpb.DigitallySigned.signature', index=2,
+      number=3, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+    _DIGITALLYSIGNED_HASHALGORITHM,
+    _DIGITALLYSIGNED_SIGNATUREALGORITHM,
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=36,
+  serialized_end=315,
+)
+
+_DIGITALLYSIGNED.fields_by_name['hash_algorithm'].enum_type = _DIGITALLYSIGNED_HASHALGORITHM
+_DIGITALLYSIGNED.fields_by_name['signature_algorithm'].enum_type = _DIGITALLYSIGNED_SIGNATUREALGORITHM
+_DIGITALLYSIGNED_HASHALGORITHM.containing_type = _DIGITALLYSIGNED
+_DIGITALLYSIGNED_SIGNATUREALGORITHM.containing_type = _DIGITALLYSIGNED
+DESCRIPTOR.message_types_by_name['DigitallySigned'] = _DIGITALLYSIGNED
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+DigitallySigned = _reflection.GeneratedProtocolMessageType('DigitallySigned', (_message.Message,), {
+  'DESCRIPTOR' : _DIGITALLYSIGNED,
+  '__module__' : 'crypto.sigpb.sigpb_pb2'
+  # @@protoc_insertion_point(class_scope:sigpb.DigitallySigned)
+  })
+_sym_db.RegisterMessage(DigitallySigned)
+
+
+DESCRIPTOR._options = None
+# @@protoc_insertion_point(module_scope)
diff --git a/proto/crypto/sigpb/sigpb_pb2_grpc.py b/proto/crypto/sigpb/sigpb_pb2_grpc.py
new file mode 100644
index 0000000..73636b2
--- /dev/null
+++ b/proto/crypto/sigpb/sigpb_pb2_grpc.py
@@ -0,0 +1,4 @@
+# pylint: skip-file
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/proto/trillian.proto b/proto/trillian.proto
new file mode 100644
index 0000000..e14522f
--- /dev/null
+++ b/proto/trillian.proto
@@ -0,0 +1,316 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+option java_multiple_files = true;
+option java_package = "com.google.trillian.proto";
+option java_outer_classname = "TrillianProto";
+option go_package = "github.com/google/trillian";
+
+package trillian;
+
+import "crypto/keyspb/keyspb.proto";
+import "crypto/sigpb/sigpb.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+
+// LogRootFormat specifies the fields that are covered by the
+// SignedLogRoot signature, as well as their ordering and formats.
+enum LogRootFormat {
+  LOG_ROOT_FORMAT_UNKNOWN = 0;
+  LOG_ROOT_FORMAT_V1 = 1;
+}
+
+// MapRootFormat specifies the fields that are covered by the
+// SignedMapRoot signature, as well as their ordering and formats.
+enum MapRootFormat {
+  MAP_ROOT_FORMAT_UNKNOWN = 0;
+  MAP_ROOT_FORMAT_V1 = 1;
+}
+
+// What goes in here?
+// Things which are exposed through the public trillian APIs.
+
+// Defines the way empty / node / leaf hashes are constructed incorporating
+// preimage protection, which can be application specific.
+enum HashStrategy {
+  // Hash strategy cannot be determined. Included to enable detection of
+  // mismatched proto versions being used. Represents an invalid value.
+  UNKNOWN_HASH_STRATEGY = 0;
+
+  // Certificate Transparency strategy: leaf hash prefix = 0x00, node prefix =
+  // 0x01, empty hash is digest([]byte{}), as defined in the specification.
+  RFC6962_SHA256 = 1;
+
+  // Sparse Merkle Tree strategy:  leaf hash prefix = 0x00, node prefix = 0x01,
+  // empty branch is recursively computed from empty leaf nodes.
+  // NOT secure in a multi tree environment. For testing only.
+  TEST_MAP_HASHER = 2;
+
+  // Append-only log strategy where leaf nodes are defined as the ObjectHash.
+  // All other properties are equal to RFC6962_SHA256.
+  OBJECT_RFC6962_SHA256 = 3;
+
+  // The CONIKS sparse tree hasher with SHA512_256 as the hash algorithm.
+  CONIKS_SHA512_256 = 4;
+
+  // The CONIKS sparse tree hasher with SHA256 as the hash algorithm.
+  CONIKS_SHA256 = 5;
+}
+
+// State of the tree.
+enum TreeState {
+  // Tree state cannot be determined. Included to enable detection of
+  // mismatched proto versions being used. Represents an invalid value.
+  UNKNOWN_TREE_STATE = 0;
+
+  // Active trees are able to respond to both read and write requests.
+  ACTIVE = 1;
+
+  // Frozen trees are only able to respond to read requests, writing to a frozen
+  // tree is forbidden. Trees should not be frozen when there are entries
+  // in the queue that have not yet been integrated. See the DRAINING
+  // state for this case.
+  FROZEN = 2;
+
+  // Deprecated: now tracked in Tree.deleted.
+  DEPRECATED_SOFT_DELETED = 3 [deprecated = true];
+
+  // Deprecated: now tracked in Tree.deleted.
+  DEPRECATED_HARD_DELETED = 4 [deprecated = true];
+
+  // A tree that is draining will continue to integrate queued entries.
+  // No new entries should be accepted.
+  DRAINING = 5;
+}
+
+// Type of the tree.
+enum TreeType {
+  // Tree type cannot be determined. Included to enable detection of mismatched
+  // proto versions being used. Represents an invalid value.
+  UNKNOWN_TREE_TYPE = 0;
+
+  // Tree represents a verifiable log.
+  LOG = 1;
+
+  // Tree represents a verifiable map.
+  MAP = 2;
+
+  // Tree represents a verifiable pre-ordered log, i.e., a log whose entries are
+  // placed according to sequence numbers assigned outside of Trillian.
+  PREORDERED_LOG = 3;
+}
+
+// Represents a tree, which may be either a verifiable log or map.
+// Readonly attributes are assigned at tree creation, after which they may not
+// be modified.
+//
+// Note: Many APIs within the rest of the code require these objects to
+// be provided. For safety they should be obtained via Admin API calls and
+// not created dynamically.
+message Tree {
+  // ID of the tree.
+  // Readonly.
+  int64 tree_id = 1;
+
+  // State of the tree.
+  // Trees are ACTIVE after creation. At any point the tree may transition
+  // between ACTIVE, DRAINING and FROZEN states.
+  TreeState tree_state = 2;
+
+  // Type of the tree.
+  // Readonly after Tree creation. Exception: Can be switched from
+  // PREORDERED_LOG to LOG if the Tree is and remains in the FROZEN state.
+  TreeType tree_type = 3;
+
+  // Hash strategy to be used by the tree.
+  // Readonly.
+  HashStrategy hash_strategy = 4;
+
+  // Hash algorithm to be used by the tree.
+  // Readonly.
+  sigpb.DigitallySigned.HashAlgorithm hash_algorithm = 5;
+
+  // Signature algorithm to be used by the tree.
+  // Readonly.
+  sigpb.DigitallySigned.SignatureAlgorithm signature_algorithm = 6;
+
+  reserved 18;  // Signature cipher suite (removed)
+  reserved 7;   // DuplicatePolicy (removed)
+
+  // Display name of the tree.
+  // Optional.
+  string display_name = 8;
+
+  // Description of the tree,
+  // Optional.
+  string description = 9;
+
+  reserved 10;  // create_time_millis_since_epoch (removed)
+  reserved 11;  // update_time_millis_since_epoch (removed)
+
+  // Identifies the private key used for signing tree heads and entry
+  // timestamps.
+  // This can be any type of message to accommodate different key management
+  // systems, e.g. PEM files, HSMs, etc.
+  // Private keys are write-only: they're never returned by RPCs.
+  // The private_key message can be changed after a tree is created, but the
+  // underlying key must remain the same - this is to enable migrating a key
+  // from one provider to another.
+  google.protobuf.Any private_key = 12;
+
+  // Storage-specific settings.
+  // Varies according to the storage implementation backing Trillian.
+  google.protobuf.Any storage_settings = 13;
+
+  // The public key used for verifying tree heads and entry timestamps.
+  // Readonly.
+  keyspb.PublicKey public_key = 14;
+
+  // Interval after which a new signed root is produced even if there have been
+  // no submission.  If zero, this behavior is disabled.
+  google.protobuf.Duration max_root_duration = 15;
+
+  // Time of tree creation.
+  // Readonly.
+  google.protobuf.Timestamp create_time = 16;
+
+  // Time of last tree update.
+  // Readonly (automatically assigned on updates).
+  google.protobuf.Timestamp update_time = 17;
+
+  // If true, the tree has been deleted.
+  // Deleted trees may be undeleted during a certain time window, after which
+  // they're permanently deleted (and unrecoverable).
+  // Readonly.
+  bool deleted = 19;
+
+  // Time of tree deletion, if any.
+  // Readonly.
+  google.protobuf.Timestamp delete_time = 20;
+}
+
+message SignedEntryTimestamp {
+  int64 timestamp_nanos = 1;
+  int64 log_id = 2;
+  sigpb.DigitallySigned signature = 3;
+}
+
+// SignedLogRoot represents a commitment by a Log to a particular tree.
+message SignedLogRoot {
+  // Deleted: TimestampNanos moved to LogRoot.
+  reserved 1;
+  // Deleted: RootHash moved to LogRoot.
+  reserved 2;
+  // Deleted: TreeSize moved to LogRoot.
+  reserved 3;
+  // Deleted: Signature replaced by LogRootSignature.
+  reserved 4;
+  // Deleted: LogID is associated with the public key that validates signature.
+  reserved 5;
+  // Deleted: TreeRevision moved to LogRoot.
+  reserved 6;
+
+  // key_hint is a hint to identify the public key for signature verification.
+  // key_hint is not authenticated and may be incorrect or missing, in which
+  // case all known public keys may be used to verify the signature.
+  // When directly communicating with a Trillian gRPC server, the key_hint will
+  // typically contain the LogID encoded as a big-endian 64-bit integer;
+  // however, in other contexts the key_hint is likely to have different
+  // contents (e.g. it could be a GUID, a URL + TreeID, or it could be
+  // derived from the public key itself).
+  bytes key_hint = 7;
+
+  // log_root holds the TLS-serialization of the following structure (described
+  // in RFC5246 notation): Clients should validate log_root_signature with
+  // VerifySignedLogRoot before deserializing log_root.
+  // enum { v1(1), (65535)} Version;
+  // struct {
+  //   uint64 tree_size;
+  //   opaque root_hash<0..128>;
+  //   uint64 timestamp_nanos;
+  //   uint64 revision;
+  //   opaque metadata<0..65535>;
+  // } LogRootV1;
+  // struct {
+  //   Version version;
+  //   select(version) {
+  //     case v1: LogRootV1;
+  //   }
+  // } LogRoot;
+  //
+  // A serialized v1 log root will therefore be laid out as:
+  //
+  // +---+---+---+---+---+---+---+---+---+---+---+---+---+---+-....--+
+  // | ver=1 |          tree_size            |len|    root_hashlen   |
+  // +---+---+---+---+---+---+---+---+---+---+---+---+---+---+-....--+
+  //
+  // +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+  // |        timestamp_nanos        |      revision                 |
+  // +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+  //
+  // +---+---+---+---+---+-....---+
+  // |  len  |    metadata        |
+  // +---+---+---+---+---+-....---+
+  //
+  // (with all integers encoded big-endian).
+  bytes log_root = 8;
+
+  // log_root_signature is the raw signature over log_root.
+  bytes log_root_signature = 9;
+}
+
+// SignedMapRoot represents a commitment by a Map to a particular tree.
+message SignedMapRoot {
+  reserved 1;  // Deprecated: Was timestamp_nanos. Use map_root.
+  reserved 2;  // Deprecated: Was root_hash. Use map_root.
+  reserved 3;  // Deprecated: Was MapperMetadata. Use map_root.
+  reserved 5;  // Deprecated: Was map_id. Use signature.
+  reserved 6;  // Deprecated: Was map_revision. Use map_root.
+  reserved 7;  // Deprecated: Was metadata Any. Use map_root.
+  reserved 8;  // Deprecated: Was metadata bytes. Use map_root.
+
+  // map_root holds the TLS-serialization of the following structure (described
+  // in RFC5246 notation): Clients should validate signature with
+  // VerifySignedMapRoot before deserializing map_root.
+  // enum { v1(1), (65535)} Version;
+  // struct {
+  //   opaque root_hash<0..128>;
+  //   uint64 timestamp_nanos;
+  //   uint64 revision;
+  //   opaque metadata<0..65535>;
+  // } MapRootV1;
+  // struct {
+  //   Version version;
+  //   select(version) {
+  //     case v1: MapRootV1;
+  //   }
+  // } MapRoot;
+  bytes map_root = 9;
+  // Signature is the raw signature over MapRoot.
+  bytes signature = 4;
+}
+
+// Proof holds a consistency or inclusion proof for a Merkle tree, as returned
+// by the API.
+message Proof {
+  // leaf_index indicates the requested leaf index when this message is used for
+  // a leaf inclusion proof.  This field is set to zero when this message is
+  // used for a consistency proof.
+  int64 leaf_index = 1;
+  reserved 2; // Contained internal node details (removed)
+  repeated bytes hashes = 3;
+}
diff --git a/proto/trillian_pb2.py b/proto/trillian_pb2.py
new file mode 100644
index 0000000..4c7ddd6
--- /dev/null
+++ b/proto/trillian_pb2.py
@@ -0,0 +1,576 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: trillian.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf.internal import enum_type_wrapper
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from crypto.keyspb import keyspb_pb2 as crypto_dot_keyspb_dot_keyspb__pb2
+from crypto.sigpb import sigpb_pb2 as crypto_dot_sigpb_dot_sigpb__pb2
+from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
+from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='trillian.proto',
+  package='trillian',
+  syntax='proto3',
+  serialized_options=_b('\n\031com.google.trillian.protoB\rTrillianProtoP\001Z\032github.com/google/trillian'),
+  serialized_pb=_b('\n\x0etrillian.proto\x12\x08trillian\x1a\x1a\x63rypto/keyspb/keyspb.proto\x1a\x18\x63rypto/sigpb/sigpb.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xbb\x05\n\x04Tree\x12\x0f\n\x07tree_id\x18\x01 \x01(\x03\x12\'\n\ntree_state\x18\x02 \x01(\x0e\x32\x13.trillian.TreeState\x12%\n\ttree_type\x18\x03 \x01(\x0e\x32\x12.trillian.TreeType\x12-\n\rhash_strategy\x18\x04 \x01(\x0e\x32\x16.trillian.HashStrategy\x12<\n\x0ehash_algorithm\x18\x05 \x01(\x0e\x32$.sigpb.DigitallySigned.HashAlgorithm\x12\x46\n\x13signature_algorithm\x18\x06 \x01(\x0e\x32).sigpb.DigitallySigned.SignatureAlgorithm\x12\x14\n\x0c\x64isplay_name\x18\x08 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\t \x01(\t\x12)\n\x0bprivate_key\x18\x0c \x01(\x0b\x32\x14.google.protobuf.Any\x12.\n\x10storage_settings\x18\r \x01(\x0b\x32\x14.google.protobuf.Any\x12%\n\npublic_key\x18\x0e \x01(\x0b\x32\x11.keyspb.PublicKey\x12\x34\n\x11max_root_duration\x18\x0f \x01(\x0b\x32\x19.google.protobuf.Duration\x12/\n\x0b\x63reate_time\x18\x10 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x11 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07\x64\x65leted\x18\x13 \x01(\x08\x12/\n\x0b\x64\x65lete_time\x18\x14 \x01(\x0b\x32\x1a.google.protobuf.TimestampJ\x04\x08\x12\x10\x13J\x04\x08\x07\x10\x08J\x04\x08\n\x10\x0bJ\x04\x08\x0b\x10\x0c\"j\n\x14SignedEntryTimestamp\x12\x17\n\x0ftimestamp_nanos\x18\x01 \x01(\x03\x12\x0e\n\x06log_id\x18\x02 \x01(\x03\x12)\n\tsignature\x18\x03 \x01(\x0b\x32\x16.sigpb.DigitallySigned\"s\n\rSignedLogRoot\x12\x10\n\x08key_hint\x18\x07 \x01(\x0c\x12\x10\n\x08log_root\x18\x08 \x01(\x0c\x12\x1a\n\x12log_root_signature\x18\t \x01(\x0cJ\x04\x08\x01\x10\x02J\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07\"^\n\rSignedMapRoot\x12\x10\n\x08map_root\x18\t \x01(\x0c\x12\x11\n\tsignature\x18\x04 \x01(\x0cJ\x04\x08\x01\x10\x02J\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x07\x10\x08J\x04\x08\x08\x10\t\"1\n\x05Proof\x12\x12\n\nleaf_index\x18\x01 \x01(\x03\x12\x0e\n\x06hashes\x18\x03 \x03(\x0cJ\x04\x08\x02\x10\x03*D\n\rLogRootFormat\x12\x1b\n\x17LOG_ROOT_FORMAT_UNKNOWN\x10\x00\x12\x16\n\x12LOG_ROOT_FORMAT_V1\x10\x01*D\n\rMapRootFormat\x12\x1b\n\x17MAP_ROOT_FORMAT_UNKNOWN\x10\x00\x12\x16\n\x12MAP_ROOT_FORMAT_V1\x10\x01*\x97\x01\n\x0cHashStrategy\x12\x19\n\x15UNKNOWN_HASH_STRATEGY\x10\x00\x12\x12\n\x0eRFC6962_SHA256\x10\x01\x12\x13\n\x0fTEST_MAP_HASHER\x10\x02\x12\x19\n\x15OBJECT_RFC6962_SHA256\x10\x03\x12\x15\n\x11\x43ONIKS_SHA512_256\x10\x04\x12\x11\n\rCONIKS_SHA256\x10\x05*\x8b\x01\n\tTreeState\x12\x16\n\x12UNKNOWN_TREE_STATE\x10\x00\x12\n\n\x06\x41\x43TIVE\x10\x01\x12\n\n\x06\x46ROZEN\x10\x02\x12\x1f\n\x17\x44\x45PRECATED_SOFT_DELETED\x10\x03\x1a\x02\x08\x01\x12\x1f\n\x17\x44\x45PRECATED_HARD_DELETED\x10\x04\x1a\x02\x08\x01\x12\x0c\n\x08\x44RAINING\x10\x05*G\n\x08TreeType\x12\x15\n\x11UNKNOWN_TREE_TYPE\x10\x00\x12\x07\n\x03LOG\x10\x01\x12\x07\n\x03MAP\x10\x02\x12\x12\n\x0ePREORDERED_LOG\x10\x03\x42H\n\x19\x63om.google.trillian.protoB\rTrillianProtoP\x01Z\x1agithub.com/google/trillianb\x06proto3')
+  ,
+  dependencies=[crypto_dot_keyspb_dot_keyspb__pb2.DESCRIPTOR,crypto_dot_sigpb_dot_sigpb__pb2.DESCRIPTOR,google_dot_protobuf_dot_any__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
+
+_LOGROOTFORMAT = _descriptor.EnumDescriptor(
+  name='LogRootFormat',
+  full_name='trillian.LogRootFormat',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='LOG_ROOT_FORMAT_UNKNOWN', index=0, number=0,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='LOG_ROOT_FORMAT_V1', index=1, number=1,
+      serialized_options=None,
+      type=None),
+  ],
+  containing_type=None,
+  serialized_options=None,
+  serialized_start=1248,
+  serialized_end=1316,
+)
+_sym_db.RegisterEnumDescriptor(_LOGROOTFORMAT)
+
+LogRootFormat = enum_type_wrapper.EnumTypeWrapper(_LOGROOTFORMAT)
+_MAPROOTFORMAT = _descriptor.EnumDescriptor(
+  name='MapRootFormat',
+  full_name='trillian.MapRootFormat',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='MAP_ROOT_FORMAT_UNKNOWN', index=0, number=0,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='MAP_ROOT_FORMAT_V1', index=1, number=1,
+      serialized_options=None,
+      type=None),
+  ],
+  containing_type=None,
+  serialized_options=None,
+  serialized_start=1318,
+  serialized_end=1386,
+)
+_sym_db.RegisterEnumDescriptor(_MAPROOTFORMAT)
+
+MapRootFormat = enum_type_wrapper.EnumTypeWrapper(_MAPROOTFORMAT)
+_HASHSTRATEGY = _descriptor.EnumDescriptor(
+  name='HashStrategy',
+  full_name='trillian.HashStrategy',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='UNKNOWN_HASH_STRATEGY', index=0, number=0,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='RFC6962_SHA256', index=1, number=1,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='TEST_MAP_HASHER', index=2, number=2,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='OBJECT_RFC6962_SHA256', index=3, number=3,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='CONIKS_SHA512_256', index=4, number=4,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='CONIKS_SHA256', index=5, number=5,
+      serialized_options=None,
+      type=None),
+  ],
+  containing_type=None,
+  serialized_options=None,
+  serialized_start=1389,
+  serialized_end=1540,
+)
+_sym_db.RegisterEnumDescriptor(_HASHSTRATEGY)
+
+HashStrategy = enum_type_wrapper.EnumTypeWrapper(_HASHSTRATEGY)
+_TREESTATE = _descriptor.EnumDescriptor(
+  name='TreeState',
+  full_name='trillian.TreeState',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='UNKNOWN_TREE_STATE', index=0, number=0,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='ACTIVE', index=1, number=1,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='FROZEN', index=2, number=2,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='DEPRECATED_SOFT_DELETED', index=3, number=3,
+      serialized_options=_b('\010\001'),
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='DEPRECATED_HARD_DELETED', index=4, number=4,
+      serialized_options=_b('\010\001'),
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='DRAINING', index=5, number=5,
+      serialized_options=None,
+      type=None),
+  ],
+  containing_type=None,
+  serialized_options=None,
+  serialized_start=1543,
+  serialized_end=1682,
+)
+_sym_db.RegisterEnumDescriptor(_TREESTATE)
+
+TreeState = enum_type_wrapper.EnumTypeWrapper(_TREESTATE)
+_TREETYPE = _descriptor.EnumDescriptor(
+  name='TreeType',
+  full_name='trillian.TreeType',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='UNKNOWN_TREE_TYPE', index=0, number=0,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='LOG', index=1, number=1,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='MAP', index=2, number=2,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='PREORDERED_LOG', index=3, number=3,
+      serialized_options=None,
+      type=None),
+  ],
+  containing_type=None,
+  serialized_options=None,
+  serialized_start=1684,
+  serialized_end=1755,
+)
+_sym_db.RegisterEnumDescriptor(_TREETYPE)
+
+TreeType = enum_type_wrapper.EnumTypeWrapper(_TREETYPE)
+LOG_ROOT_FORMAT_UNKNOWN = 0
+LOG_ROOT_FORMAT_V1 = 1
+MAP_ROOT_FORMAT_UNKNOWN = 0
+MAP_ROOT_FORMAT_V1 = 1
+UNKNOWN_HASH_STRATEGY = 0
+RFC6962_SHA256 = 1
+TEST_MAP_HASHER = 2
+OBJECT_RFC6962_SHA256 = 3
+CONIKS_SHA512_256 = 4
+CONIKS_SHA256 = 5
+UNKNOWN_TREE_STATE = 0
+ACTIVE = 1
+FROZEN = 2
+DEPRECATED_SOFT_DELETED = 3
+DEPRECATED_HARD_DELETED = 4
+DRAINING = 5
+UNKNOWN_TREE_TYPE = 0
+LOG = 1
+MAP = 2
+PREORDERED_LOG = 3
+
+
+
+_TREE = _descriptor.Descriptor(
+  name='Tree',
+  full_name='trillian.Tree',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='tree_id', full_name='trillian.Tree.tree_id', index=0,
+      number=1, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='tree_state', full_name='trillian.Tree.tree_state', index=1,
+      number=2, type=14, cpp_type=8, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='tree_type', full_name='trillian.Tree.tree_type', index=2,
+      number=3, type=14, cpp_type=8, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='hash_strategy', full_name='trillian.Tree.hash_strategy', index=3,
+      number=4, type=14, cpp_type=8, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='hash_algorithm', full_name='trillian.Tree.hash_algorithm', index=4,
+      number=5, type=14, cpp_type=8, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='signature_algorithm', full_name='trillian.Tree.signature_algorithm', index=5,
+      number=6, type=14, cpp_type=8, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='display_name', full_name='trillian.Tree.display_name', index=6,
+      number=8, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='description', full_name='trillian.Tree.description', index=7,
+      number=9, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='private_key', full_name='trillian.Tree.private_key', index=8,
+      number=12, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='storage_settings', full_name='trillian.Tree.storage_settings', index=9,
+      number=13, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='public_key', full_name='trillian.Tree.public_key', index=10,
+      number=14, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='max_root_duration', full_name='trillian.Tree.max_root_duration', index=11,
+      number=15, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='create_time', full_name='trillian.Tree.create_time', index=12,
+      number=16, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='update_time', full_name='trillian.Tree.update_time', index=13,
+      number=17, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='deleted', full_name='trillian.Tree.deleted', index=14,
+      number=19, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='delete_time', full_name='trillian.Tree.delete_time', index=15,
+      number=20, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=175,
+  serialized_end=874,
+)
+
+
+_SIGNEDENTRYTIMESTAMP = _descriptor.Descriptor(
+  name='SignedEntryTimestamp',
+  full_name='trillian.SignedEntryTimestamp',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='timestamp_nanos', full_name='trillian.SignedEntryTimestamp.timestamp_nanos', index=0,
+      number=1, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='log_id', full_name='trillian.SignedEntryTimestamp.log_id', index=1,
+      number=2, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='signature', full_name='trillian.SignedEntryTimestamp.signature', index=2,
+      number=3, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=876,
+  serialized_end=982,
+)
+
+
+_SIGNEDLOGROOT = _descriptor.Descriptor(
+  name='SignedLogRoot',
+  full_name='trillian.SignedLogRoot',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='key_hint', full_name='trillian.SignedLogRoot.key_hint', index=0,
+      number=7, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='log_root', full_name='trillian.SignedLogRoot.log_root', index=1,
+      number=8, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='log_root_signature', full_name='trillian.SignedLogRoot.log_root_signature', index=2,
+      number=9, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=984,
+  serialized_end=1099,
+)
+
+
+_SIGNEDMAPROOT = _descriptor.Descriptor(
+  name='SignedMapRoot',
+  full_name='trillian.SignedMapRoot',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='map_root', full_name='trillian.SignedMapRoot.map_root', index=0,
+      number=9, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='signature', full_name='trillian.SignedMapRoot.signature', index=1,
+      number=4, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1101,
+  serialized_end=1195,
+)
+
+
+_PROOF = _descriptor.Descriptor(
+  name='Proof',
+  full_name='trillian.Proof',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='leaf_index', full_name='trillian.Proof.leaf_index', index=0,
+      number=1, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='hashes', full_name='trillian.Proof.hashes', index=1,
+      number=3, type=12, cpp_type=9, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1197,
+  serialized_end=1246,
+)
+
+_TREE.fields_by_name['tree_state'].enum_type = _TREESTATE
+_TREE.fields_by_name['tree_type'].enum_type = _TREETYPE
+_TREE.fields_by_name['hash_strategy'].enum_type = _HASHSTRATEGY
+_TREE.fields_by_name['hash_algorithm'].enum_type = crypto_dot_sigpb_dot_sigpb__pb2._DIGITALLYSIGNED_HASHALGORITHM
+_TREE.fields_by_name['signature_algorithm'].enum_type = crypto_dot_sigpb_dot_sigpb__pb2._DIGITALLYSIGNED_SIGNATUREALGORITHM
+_TREE.fields_by_name['private_key'].message_type = google_dot_protobuf_dot_any__pb2._ANY
+_TREE.fields_by_name['storage_settings'].message_type = google_dot_protobuf_dot_any__pb2._ANY
+_TREE.fields_by_name['public_key'].message_type = crypto_dot_keyspb_dot_keyspb__pb2._PUBLICKEY
+_TREE.fields_by_name['max_root_duration'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
+_TREE.fields_by_name['create_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_TREE.fields_by_name['update_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_TREE.fields_by_name['delete_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_SIGNEDENTRYTIMESTAMP.fields_by_name['signature'].message_type = crypto_dot_sigpb_dot_sigpb__pb2._DIGITALLYSIGNED
+DESCRIPTOR.message_types_by_name['Tree'] = _TREE
+DESCRIPTOR.message_types_by_name['SignedEntryTimestamp'] = _SIGNEDENTRYTIMESTAMP
+DESCRIPTOR.message_types_by_name['SignedLogRoot'] = _SIGNEDLOGROOT
+DESCRIPTOR.message_types_by_name['SignedMapRoot'] = _SIGNEDMAPROOT
+DESCRIPTOR.message_types_by_name['Proof'] = _PROOF
+DESCRIPTOR.enum_types_by_name['LogRootFormat'] = _LOGROOTFORMAT
+DESCRIPTOR.enum_types_by_name['MapRootFormat'] = _MAPROOTFORMAT
+DESCRIPTOR.enum_types_by_name['HashStrategy'] = _HASHSTRATEGY
+DESCRIPTOR.enum_types_by_name['TreeState'] = _TREESTATE
+DESCRIPTOR.enum_types_by_name['TreeType'] = _TREETYPE
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Tree = _reflection.GeneratedProtocolMessageType('Tree', (_message.Message,), {
+  'DESCRIPTOR' : _TREE,
+  '__module__' : 'trillian_pb2'
+  # @@protoc_insertion_point(class_scope:trillian.Tree)
+  })
+_sym_db.RegisterMessage(Tree)
+
+SignedEntryTimestamp = _reflection.GeneratedProtocolMessageType('SignedEntryTimestamp', (_message.Message,), {
+  'DESCRIPTOR' : _SIGNEDENTRYTIMESTAMP,
+  '__module__' : 'trillian_pb2'
+  # @@protoc_insertion_point(class_scope:trillian.SignedEntryTimestamp)
+  })
+_sym_db.RegisterMessage(SignedEntryTimestamp)
+
+SignedLogRoot = _reflection.GeneratedProtocolMessageType('SignedLogRoot', (_message.Message,), {
+  'DESCRIPTOR' : _SIGNEDLOGROOT,
+  '__module__' : 'trillian_pb2'
+  # @@protoc_insertion_point(class_scope:trillian.SignedLogRoot)
+  })
+_sym_db.RegisterMessage(SignedLogRoot)
+
+SignedMapRoot = _reflection.GeneratedProtocolMessageType('SignedMapRoot', (_message.Message,), {
+  'DESCRIPTOR' : _SIGNEDMAPROOT,
+  '__module__' : 'trillian_pb2'
+  # @@protoc_insertion_point(class_scope:trillian.SignedMapRoot)
+  })
+_sym_db.RegisterMessage(SignedMapRoot)
+
+Proof = _reflection.GeneratedProtocolMessageType('Proof', (_message.Message,), {
+  'DESCRIPTOR' : _PROOF,
+  '__module__' : 'trillian_pb2'
+  # @@protoc_insertion_point(class_scope:trillian.Proof)
+  })
+_sym_db.RegisterMessage(Proof)
+
+
+DESCRIPTOR._options = None
+_TREESTATE.values_by_name["DEPRECATED_SOFT_DELETED"]._options = None
+_TREESTATE.values_by_name["DEPRECATED_HARD_DELETED"]._options = None
+# @@protoc_insertion_point(module_scope)
diff --git a/proto/trillian_pb2_grpc.py b/proto/trillian_pb2_grpc.py
new file mode 100644
index 0000000..a894352
--- /dev/null
+++ b/proto/trillian_pb2_grpc.py
@@ -0,0 +1,3 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/pylintrc b/pylintrc
new file mode 100644
index 0000000..7d6abe3
--- /dev/null
+++ b/pylintrc
@@ -0,0 +1,489 @@
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Profiled execution.
+profile=no
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS,.svn,.git
+ignore-patterns=.*pb2\.py,.*pb2_grpc\.py
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+    pylint.extensions.bad_builtin,
+    pylint.extensions.check_elif,
+    pylint.extensions.docstyle,
+    pylint.extensions.emptystring,
+    pylint.extensions.overlapping_exceptions,
+    pylint.extensions.redefined_variable_type,
+
+# Use multiple processes to speed up Pylint.  A value of 0 autodetects available
+# processors.
+jobs=0
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Allow optimization of some AST trees. This will activate a peephole AST
+# optimizer, which will apply various small optimizations. For instance, it can
+# be used to obtain the result of joining multiple strings with the addition
+# operator. Joining a lot of strings can lead to a maximum recursion error in
+# Pylint and this flag can prevent that. It has one side effect, the resulting
+# AST will be different than the one from reality.
+optimize-ast=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time. See also the "--disable" option for examples.
+enable=
+    apply-builtin,
+    backtick,
+    bad-python3-import,
+    basestring-builtin,
+    buffer-builtin,
+    cmp-builtin,
+    cmp-method,
+    coerce-builtin,
+    coerce-method,
+    delslice-method,
+    deprecated-itertools-function,
+    deprecated-str-translate-call,
+    deprecated-string-function,
+    deprecated-types-field,
+    dict-items-not-iterating,
+    dict-iter-method,
+    dict-keys-not-iterating,
+    dict-values-not-iterating,
+    dict-view-method,
+    div-method,
+    exception-message-attribute,
+    execfile-builtin,
+    file-builtin,
+    filter-builtin-not-iterating,
+    getslice-method,
+    hex-method,
+    idiv-method,
+    import-star-module-level,
+    indexing-exception,
+    input-builtin,
+    intern-builtin,
+    invalid-str-codec,
+    long-builtin,
+    long-suffix,
+    map-builtin-not-iterating,
+    metaclass-assignment,
+    next-method-called,
+    next-method-defined,
+    nonzero-method,
+    oct-method,
+    old-division,
+    old-ne-operator,
+    old-octal-literal,
+    old-raise-syntax,
+    parameter-unpacking,
+    print-statement,
+    raising-string,
+    range-builtin-not-iterating,
+    raw_input-builtin,
+    rdiv-method,
+    reduce-builtin,
+    reload-builtin,
+    round-builtin,
+    setslice-method,
+    standarderror-builtin,
+    sys-max-int,
+    unichr-builtin,
+    unicode-builtin,
+    unpacking-in-except,
+    using-cmp-argument,
+    xrange-builtin,
+    zip-builtin-not-iterating,
+
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+# We leave many of the style warnings to judgement/peer review.
+# useless-object-inheritance: We disable this for Python 2 compatibility.
+disable=
+    fixme,
+    file-ignored,
+    invalid-name,
+    locally-disabled,
+    locally-enabled,
+    missing-docstring,
+    no-self-use,
+    star-args,
+    too-few-public-methods,
+    too-many-arguments,
+    too-many-branches,
+    too-many-instance-attributes,
+    too-many-lines,
+    too-many-locals,
+    too-many-public-methods,
+    too-many-return-statements,
+    too-many-statements,
+    useless-object-inheritance,
+
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html. You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=no
+
+# Activate the evaluation score.
+score=no
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+#evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=20
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
+ignored-modules=
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set). This supports can work
+# with qualified names.
+ignored-classes=hashlib,numpy
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E1101 when accessed. Python regular
+# expressions are accepted.
+generated-members=
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_|unused_
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=80
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1  : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
+no-space-check=trailing-comma,dict-separator
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually "    " (4 spaces) or "\t" (1
+# tab).
+indent-string='  '
+
+# Number of spaces of indent required inside a hanging  or continued line.
+indent-after-paren=4
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=LF
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[BASIC]
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter,input
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,x,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Regular expression matching correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for function names
+function-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for variable names
+variable-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct constant names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Naming hint for constant names
+const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for attribute names
+attr-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for argument names
+argument-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression which should only match correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for method names
+method-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=^_
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=10
+
+
+[ELIF]
+
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=10
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=5
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,TERMIOS,Bastion,rexec,optparse
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/test/avb_aftl_validate_unittest.cc b/test/avb_aftl_validate_unittest.cc
new file mode 100644
index 0000000..e27dbc1
--- /dev/null
+++ b/test/avb_aftl_validate_unittest.cc
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <gtest/gtest.h>
+
+#include <libavb_aftl/libavb_aftl.h>
+
+#include "avb_unittest_util.h"
+#include "libavb_aftl/avb_aftl_types.h"
+#include "libavb_aftl/avb_aftl_util.h"
+#include "libavb_aftl/avb_aftl_validate.h"
+
+namespace {
+
+const char kAftlKeyBytesPath[] = "test/data/aftl_key_bytes.bin";
+const char kAftlLogSigPath[] = "test/data/aftl_log_sig.bin";
+
+}  // namespace
+
+namespace avb {
+
+/* Extend BaseAvbToolTest to take advantage of common checks and tooling. */
+class AvbAftlValidateTest : public BaseAvbToolTest {
+ public:
+  AvbAftlValidateTest() {}
+  ~AvbAftlValidateTest() {}
+  void SetUp() override {
+    uint32_t i;
+    BaseAvbToolTest::SetUp();
+
+    /* Read in test data from the key and log_sig binaries. */
+    base::GetFileSize(base::FilePath(kAftlKeyBytesPath), &key_size_);
+    if (key_size_ != 1032) return;
+    key_bytes_ = (uint8_t*)avb_malloc(key_size_);
+    if (!key_bytes_) return;
+    base::ReadFile(
+        base::FilePath(kAftlKeyBytesPath), (char*)key_bytes_, key_size_);
+    base::GetFileSize(base::FilePath(kAftlLogSigPath), &log_sig_size_);
+    if (log_sig_size_ != 512) return;
+    log_sig_bytes_ = (uint8_t*)avb_malloc(log_sig_size_);
+    if (!log_sig_bytes_) return;
+    base::ReadFile(
+        base::FilePath(kAftlLogSigPath), (char*)log_sig_bytes_, log_sig_size_);
+    icp_entry_ =
+        (AftlIcpEntry*)avb_malloc(sizeof(AftlIcpEntry) + AFTL_HASH_SIZE);
+    if (!icp_entry_) return;
+    icp_entry_->log_root_descriptor.version = 1;
+    icp_entry_->log_root_descriptor.tree_size = 3;
+    icp_entry_->log_root_descriptor.root_hash_size = AFTL_HASH_SIZE;
+    icp_entry_->log_root_descriptor.timestamp = 322325503;
+    icp_entry_->log_root_descriptor.revision = 0;
+    icp_entry_->log_root_descriptor.metadata_size = 0;
+    icp_entry_->log_root_descriptor.metadata = NULL;
+    icp_entry_->log_root_descriptor_size =
+        icp_entry_->log_root_descriptor.root_hash_size +
+        icp_entry_->log_root_descriptor.metadata_size + 29;
+
+    icp_entry_->fw_info_leaf_size = AFTL_HASH_SIZE * 2 + 16;
+    icp_entry_->fw_info_leaf.vbmeta_hash_size = AFTL_HASH_SIZE;
+    icp_entry_->fw_info_leaf.vbmeta_hash = (uint8_t*)avb_malloc(AFTL_HASH_SIZE);
+    if (!icp_entry_->fw_info_leaf.vbmeta_hash) {
+      return;
+    }
+    memcpy(icp_entry_->fw_info_leaf.vbmeta_hash,
+           "\x65\xec\x58\x83\x43\x62\x8e\x81\x4d\xc7\x75\xa8\xcb\x77\x1f\x46"
+           "\x81\xcc\x79\x6f\xba\x32\xf0\x68\xc7\x17\xce\x2e\xe2\x14\x4d\x39",
+           AFTL_HASH_SIZE);
+
+    icp_entry_->fw_info_leaf.version_incremental_size = 4;
+    icp_entry_->fw_info_leaf.version_incremental =
+        (uint8_t*)avb_malloc(icp_entry_->fw_info_leaf.version_incremental_size);
+    memcpy(icp_entry_->fw_info_leaf.version_incremental,
+           "test",
+           icp_entry_->fw_info_leaf.version_incremental_size);
+    icp_entry_->fw_info_leaf.platform_key_size = 8;
+    icp_entry_->fw_info_leaf.platform_key =
+        (uint8_t*)avb_malloc(icp_entry_->fw_info_leaf.platform_key_size);
+    memcpy(icp_entry_->fw_info_leaf.platform_key,
+           "aaaaaaaa",
+           icp_entry_->fw_info_leaf.platform_key_size);
+    icp_entry_->fw_info_leaf.manufacturer_key_hash_size = AFTL_HASH_SIZE;
+    icp_entry_->fw_info_leaf.manufacturer_key_hash =
+        (uint8_t*)avb_malloc(AFTL_HASH_SIZE);
+    icp_entry_->fw_info_leaf.description_size = 4;
+    icp_entry_->fw_info_leaf.description =
+        (uint8_t*)avb_malloc(icp_entry_->fw_info_leaf.description_size);
+    memcpy(icp_entry_->fw_info_leaf.description,
+           "test",
+           icp_entry_->fw_info_leaf.description_size);
+    for (i = 0; i < AFTL_HASH_SIZE; i++) {
+      icp_entry_->fw_info_leaf.manufacturer_key_hash[i] = 0;
+    }
+    icp_entry_->leaf_index = 2;
+
+    memcpy(icp_entry_->proofs[0],
+           "\xfa\xc5\x42\x03\xe7\xcc\x69\x6c\xf0\xdf\xcb\x42\xc9\x2a\x1d\x9d"
+           "\xba\xf7\x0a\xd9\xe6\x21\xf4\xbd\x8d\x98\x66\x2f\x00\xe3\xc1\x25",
+           AFTL_HASH_SIZE);
+    icp_entry_->proof_hash_count = 1;
+    icp_entry_->log_root_descriptor.root_hash =
+        (uint8_t*)avb_malloc(AFTL_HASH_SIZE);
+    if (!icp_entry_->log_root_descriptor.root_hash) return;
+    memcpy(icp_entry_->log_root_descriptor.root_hash,
+           "\x44\x14\xe4\x45\x03\x3d\xf6\x00\x6b\xd1\xf0\x1a\x14\x18\x8a\x79"
+           "\x1f\xdd\x09\x46\x4e\xdc\x70\x16\x03\x2c\x9f\x85\x5f\x28\x10\x88",
+           AFTL_HASH_SIZE);
+  }
+
+  void TearDown() override {
+    if (icp_entry_ != NULL) {
+      if (icp_entry_->fw_info_leaf.vbmeta_hash != NULL)
+        avb_free(icp_entry_->fw_info_leaf.vbmeta_hash);
+      if (icp_entry_->fw_info_leaf.version_incremental != NULL)
+        avb_free(icp_entry_->fw_info_leaf.version_incremental);
+      if (icp_entry_->fw_info_leaf.platform_key != NULL)
+        avb_free(icp_entry_->fw_info_leaf.platform_key);
+      if (icp_entry_->fw_info_leaf.manufacturer_key_hash != NULL)
+        avb_free(icp_entry_->fw_info_leaf.manufacturer_key_hash);
+      if (icp_entry_->fw_info_leaf.description != NULL)
+        avb_free(icp_entry_->fw_info_leaf.description);
+      if (icp_entry_->log_root_descriptor.root_hash != NULL)
+        avb_free(icp_entry_->log_root_descriptor.root_hash);
+      avb_free(icp_entry_);
+    }
+    avb_free(key_bytes_);
+    avb_free(log_sig_bytes_);
+    BaseAvbToolTest::TearDown();
+  }
+
+ protected:
+  AftlIcpEntry* icp_entry_;
+  uint8_t* key_bytes_;
+  uint8_t* log_sig_bytes_;
+  int64_t key_size_;
+  int64_t log_sig_size_;
+};
+
+TEST_F(AvbAftlValidateTest, AvbAftlVerifySignature) {
+  icp_entry_->log_root_sig_size = AFTL_SIGNATURE_SIZE;
+  icp_entry_->log_root_signature = (uint8_t*)avb_malloc(AFTL_SIGNATURE_SIZE);
+  memcpy(icp_entry_->log_root_signature, log_sig_bytes_, AFTL_SIGNATURE_SIZE);
+  EXPECT_EQ(true,
+            avb_aftl_verify_entry_signature(key_bytes_, key_size_, icp_entry_));
+  avb_free(icp_entry_->log_root_signature);
+}
+
+TEST_F(AvbAftlValidateTest, AvbAftlHashLogRootDescriptor) {
+  uint8_t hash[AFTL_HASH_SIZE];
+
+  /* Initialize the icp_entry components used with the test. */
+
+  avb_aftl_hash_log_root_descriptor(icp_entry_, hash);
+  EXPECT_EQ("5bd0f3af4b7584536438169b6eaa4f84577f7590f3a4da2f6f68476caa5828b4",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE));
+}
+
+TEST_F(AvbAftlValidateTest, AvbAftlVerifyIcpRootHash) {
+  /* Initialize the icp_entry components used with the test. */
+  EXPECT_EQ(true, avb_aftl_verify_icp_root_hash(icp_entry_));
+}
+
+TEST_F(AvbAftlValidateTest, AftlVerifyVbmetaHash) {
+  GenerateVBMetaImage("vbmeta.img",
+                      "SHA256_RSA4096",
+                      0,
+                      base::FilePath("test/data/testkey_rsa4096.pem"));
+
+  EXPECT_EQ(true,
+            avb_aftl_verify_vbmeta_hash(
+                vbmeta_image_.data(), vbmeta_image_.size(), icp_entry_));
+}
+
+TEST_F(AvbAftlValidateTest, AvbAftlRootFromIcp) {
+  /* Tests from trillian root_from_icp functionality:
+     https://github.com/google/trillian/blob/master/merkle/log_verifier_test.go
+  */
+  uint64_t leaf_index;
+  uint64_t tree_size;
+  uint8_t proof[3][AFTL_HASH_SIZE];
+  uint8_t leaf_hash[AFTL_HASH_SIZE];
+  uint8_t hash[AFTL_HASH_SIZE];
+
+  leaf_index = 0;
+  tree_size = 8;
+  avb_aftl_rfc6962_hash_leaf((uint8_t*)"", 0, leaf_hash);
+  memcpy(proof[0],
+         "\x96\xa2\x96\xd2\x24\xf2\x85\xc6\x7b\xee\x93\xc3\x0f\x8a\x30\x91"
+         "\x57\xf0\xda\xa3\x5d\xc5\xb8\x7e\x41\x0b\x78\x63\x0a\x09\xcf\xc7",
+         AFTL_HASH_SIZE);
+  memcpy(proof[1],
+         "\x5f\x08\x3f\x0a\x1a\x33\xca\x07\x6a\x95\x27\x98\x32\x58\x0d\xb3"
+         "\xe0\xef\x45\x84\xbd\xff\x1f\x54\xc8\xa3\x60\xf5\x0d\xe3\x03\x1e",
+         AFTL_HASH_SIZE);
+  memcpy(proof[2],
+         "\x6b\x47\xaa\xf2\x9e\xe3\xc2\xaf\x9a\xf8\x89\xbc\x1f\xb9\x25\x4d"
+         "\xab\xd3\x11\x77\xf1\x62\x32\xdd\x6a\xab\x03\x5c\xa3\x9b\xf6\xe4",
+         AFTL_HASH_SIZE);
+  avb_aftl_root_from_icp(
+      leaf_index, tree_size, proof, 3, leaf_hash, AFTL_HASH_SIZE, hash);
+  EXPECT_EQ("5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604328",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE))
+      << "Failed on test #1";
+
+  leaf_index = 5;
+  tree_size = 8;
+  avb_aftl_rfc6962_hash_leaf((uint8_t*)"@ABC", 4, leaf_hash);
+  memcpy(proof[0],
+         "\xbc\x1a\x06\x43\xb1\x2e\x4d\x2d\x7c\x77\x91\x8f\x44\xe0\xf4\xf7"
+         "\x9a\x83\x8b\x6c\xf9\xec\x5b\x5c\x28\x3e\x1f\x4d\x88\x59\x9e\x6b",
+         AFTL_HASH_SIZE);
+  memcpy(proof[1],
+         "\xca\x85\x4e\xa1\x28\xed\x05\x0b\x41\xb3\x5f\xfc\x1b\x87\xb8\xeb"
+         "\x2b\xde\x46\x1e\x9e\x3b\x55\x96\xec\xe6\xb9\xd5\x97\x5a\x0a\xe0",
+         AFTL_HASH_SIZE);
+  memcpy(proof[2],
+         "\xd3\x7e\xe4\x18\x97\x6d\xd9\x57\x53\xc1\xc7\x38\x62\xb9\x39\x8f"
+         "\xa2\xa2\xcf\x9b\x4f\xf0\xfd\xfe\x8b\x30\xcd\x95\x20\x96\x14\xb7",
+         AFTL_HASH_SIZE);
+  avb_aftl_root_from_icp(
+      leaf_index, tree_size, proof, 3, leaf_hash, AFTL_HASH_SIZE, hash);
+  EXPECT_EQ("5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604328",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE))
+      << "Failed on test #2";
+
+  leaf_index = 2;
+  tree_size = 3;
+  avb_aftl_rfc6962_hash_leaf((uint8_t*)"\x10", 1, leaf_hash);
+  memcpy(proof[0],
+         "\xfa\xc5\x42\x03\xe7\xcc\x69\x6c\xf0\xdf\xcb\x42\xc9\x2a\x1d\x9d"
+         "\xba\xf7\x0a\xd9\xe6\x21\xf4\xbd\x8d\x98\x66\x2f\x00\xe3\xc1\x25",
+         AFTL_HASH_SIZE);
+  avb_aftl_root_from_icp(
+      leaf_index, tree_size, proof, 1, leaf_hash, AFTL_HASH_SIZE, hash);
+  EXPECT_EQ("aeb6bcfe274b70a14fb067a5e5578264db0fa9b51af5e0ba159158f329e06e77",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE))
+      << "Failed on test #3";
+
+  leaf_index = 1;
+  tree_size = 5;
+  avb_aftl_rfc6962_hash_leaf((uint8_t*)"\x00", 1, leaf_hash);
+  memcpy(proof[0],
+         "\x6e\x34\x0b\x9c\xff\xb3\x7a\x98\x9c\xa5\x44\xe6\xbb\x78\x0a\x2c"
+         "\x78\x90\x1d\x3f\xb3\x37\x38\x76\x85\x11\xa3\x06\x17\xaf\xa0\x1d",
+         AFTL_HASH_SIZE);
+  memcpy(proof[1],
+         "\x5f\x08\x3f\x0a\x1a\x33\xca\x07\x6a\x95\x27\x98\x32\x58\x0d\xb3"
+         "\xe0\xef\x45\x84\xbd\xff\x1f\x54\xc8\xa3\x60\xf5\x0d\xe3\x03\x1e",
+         AFTL_HASH_SIZE);
+  memcpy(proof[2],
+         "\xbc\x1a\x06\x43\xb1\x2e\x4d\x2d\x7c\x77\x91\x8f\x44\xe0\xf4\xf7"
+         "\x9a\x83\x8b\x6c\xf9\xec\x5b\x5c\x28\x3e\x1f\x4d\x88\x59\x9e\x6b",
+         AFTL_HASH_SIZE);
+  avb_aftl_root_from_icp(
+      leaf_index, tree_size, proof, 3, leaf_hash, AFTL_HASH_SIZE, hash);
+  EXPECT_EQ("4e3bbb1f7b478dcfe71fb631631519a3bca12c9aefca1612bfce4c13a86264d4",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE))
+      << "Failed on test #4";
+}
+
+TEST_F(AvbAftlValidateTest, AvbAftlChainInner) {
+  uint8_t hash[AFTL_HASH_SIZE];
+  uint8_t seed[AFTL_HASH_SIZE];
+  uint8_t proof[4][AFTL_HASH_SIZE];
+  uint64_t i;
+
+  for (i = 0; i < AFTL_HASH_SIZE; i++) {
+    hash[i] = 0;
+  }
+
+  memcpy(seed, "1234567890abcdefghijklmnopqrstuv", AFTL_HASH_SIZE);
+  memcpy(proof[0], "abcdefghijklmnopqrstuvwxyz123456", AFTL_HASH_SIZE);
+  avb_aftl_chain_inner(seed, AFTL_HASH_SIZE, (uint8_t*)proof, 1, 0, hash);
+  EXPECT_EQ("9cb6af81b146b6a81d911d26f4c0d467265a3385d6caf926d5515e58efd161a3",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE))
+      << "Failed with seed: "
+      << "\"1234567890abcdefghijklmnopqrstuv\", proof ["
+      << "\"abcdefghijklmnopqrstuvwxyz123456\"], and leaf_index 0";
+  memcpy(proof[1], "7890abcdefghijklmnopqrstuvwxyz12", AFTL_HASH_SIZE);
+  avb_aftl_chain_inner(seed, AFTL_HASH_SIZE, (uint8_t*)proof, 2, 0, hash);
+  EXPECT_EQ("368d8213cd7d62335a84b3a3d75c8a0302c0d63c93cbbd22c5396dc4c75ba019",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE))
+      << "Failed with seed: "
+      << "\"1234567890abcdefghijklmnopqrstuv\", proof ["
+      << "\"abcdefghijklmnopqrstuvwxyz123456\", "
+         "\"7890abcdefghijklmnopqrstuvwxyz12\"],"
+      << " and leaf_index 0";
+  avb_aftl_chain_inner(seed, AFTL_HASH_SIZE, (uint8_t*)proof, 2, 1, hash);
+  EXPECT_EQ("78418158eb5943c50ec581b41f105ba9aecc1b9e7aba3ea2e93021cbd5bd166e",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE))
+      << "Failed with seed: "
+      << "\"1234567890abcdefghijklmnopqrstuv\", proof ["
+      << "\"abcdefghijklmnopqrstuvwxyz123456\", "
+         "\"7890abcdefghijklmnopqrstuvwxyz12\"],"
+      << " and leaf_index 1";
+  memcpy(proof[2], "abcdefghijklmn0pqrstuvwxyz123456", AFTL_HASH_SIZE);
+  memcpy(proof[3], "7890abcdefgh1jklmnopqrstuvwxyz12", AFTL_HASH_SIZE);
+  avb_aftl_chain_inner(seed, AFTL_HASH_SIZE, (uint8_t*)proof, 4, 1, hash);
+  EXPECT_EQ("83309c48fb92707f5788b6dd4c9a89042dff20856ad9529b7fb8e5cdf47c04f8",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE))
+      << "Failed with seed: "
+      << "\"1234567890abcdefghijklmnopqrstuv\", proof ["
+      << "\"abcdefghijklmnopqrstuvwxyz123456\", "
+         "\"7890abcdefghijklmnopqrstuvwxyz12\","
+      << "\"abcdefghijklmnopqrstuvwxyz123456\", "
+         "\"7890abcdefghijklmnopqrstuvwxyz12\"]"
+      << " and leaf_index 1";
+  avb_aftl_chain_inner(seed, AFTL_HASH_SIZE, (uint8_t*)proof, 4, 3, hash);
+  EXPECT_EQ("13e5f7e441dc4dbea659acbc989ac33222f4447546e3dac36b0e0c9977d52b97",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE))
+      << "Failed with seed: "
+      << "\"1234567890abcdefghijklmnopqrstuv\", proof ["
+      << "\"abcdefghijklmnopqrstuvwxyz123456\", "
+         "\"7890abcdefghijklmnopqrstuvwxyz12\","
+      << "\"abcdefghijklmnopqrstuvwxyz123456\", "
+         "\"7890abcdefghijklmnopqrstuvwxyz12\"]"
+      << " and leaf_index 3";
+}
+
+TEST_F(AvbAftlValidateTest, AvbAftlChainBorderRight) {
+  uint8_t hash[AFTL_HASH_SIZE];
+  uint8_t seed[AFTL_HASH_SIZE];
+  uint8_t proof[2][AFTL_HASH_SIZE];
+  uint64_t i;
+
+  for (i = 0; i < AFTL_HASH_SIZE; i++) {
+    hash[i] = 0;
+  }
+
+  memcpy(seed, "1234567890abcdefghijklmnopqrstuv", AFTL_HASH_SIZE);
+  memcpy(proof[0], "abcdefghijklmnopqrstuvwxyz123456", AFTL_HASH_SIZE);
+  avb_aftl_chain_border_right(seed, AFTL_HASH_SIZE, (uint8_t*)proof, 1, hash);
+  EXPECT_EQ("363aa8a62b784be38392ab69ade1aac2562f8989ce8986bec685d2957d657310",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE))
+      << "Failed with seed: "
+      << "\"1234567890abcdefghijklmnopqrstuv\" and proof "
+         "[\"abcdefghijklmnopqrstuvwxyz123456\"]";
+  memcpy(proof[1], "7890abcdefghijklmnopqrstuvwxyz12", AFTL_HASH_SIZE);
+  avb_aftl_chain_border_right(seed, AFTL_HASH_SIZE, (uint8_t*)proof, 2, hash);
+  EXPECT_EQ("618fc58c45faea808e0bbe0f82afbe7687f4db2608824120e8ade507cbce221f",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE))
+      << "Failed with seed: "
+      << "\"1234567890abcdefghijklmnopqrstuv\" and proof ["
+      << "\"abcdefghijklmnopqrstuvwxyz123456\", "
+         "\"7890abcdefghijklmnopqrstuvwxyz12\"]";
+}
+
+TEST_F(AvbAftlValidateTest, AvbAftlRFC6962HashChildren) {
+  uint8_t hash[AFTL_HASH_SIZE];
+
+  avb_aftl_rfc6962_hash_children((uint8_t*)"", 0, (uint8_t*)"", 0, hash);
+  EXPECT_EQ("4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE))
+      << "Failed on inputs \"\" and \"\"";
+
+  avb_aftl_rfc6962_hash_children((uint8_t*)"abcd", 4, (uint8_t*)"", 0, hash);
+  EXPECT_EQ("b75eb7b06e69c1c49597fba37398e0f5ba319c7164ed67bb19b41e9d576313b9",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE))
+      << "Failed on inputs \"abcd\" and \"\"";
+
+  avb_aftl_rfc6962_hash_children((uint8_t*)"", 0, (uint8_t*)"efgh", 4, hash);
+  EXPECT_EQ("8d65f3e92e3853cee633345caca3e035f01c2e44815371985baed2c45c10ca40",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE))
+      << "Failed on inputs \"\" and \"efgh\"";
+
+  avb_aftl_rfc6962_hash_children(
+      (uint8_t*)"abcd", 4, (uint8_t*)"efgh", 4, hash);
+  EXPECT_EQ("41561b1297f692dad705e28ece8bf47060fba1abeeebda0aa67c43570a36bf79",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE))
+      << "Failed on inputs \"abcd\" and \"efgh\"";
+}
+
+TEST_F(AvbAftlValidateTest, AvbAftlRFC6962HashLeaf) {
+  uint8_t hash[AFTL_HASH_SIZE];
+  avb_aftl_rfc6962_hash_leaf((uint8_t*)"", 0, hash);
+  EXPECT_EQ("6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE))
+      << "Failed on input \"\"";
+  avb_aftl_rfc6962_hash_leaf((uint8_t*)"abcdefg", 7, hash);
+  EXPECT_EQ("6b43f785b72386e132b275bc918c25dbc687ab8427836bef6ce4509b64f4f54d",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE))
+      << "Failed on input \"abcdefg\"";
+}
+
+TEST_F(AvbAftlValidateTest, AvbAftlSha256) {
+  /* Computed with:
+   *
+   * $ echo -n foobar |sha256sum
+   * c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2
+   */
+  uint8_t hash[AFTL_HASH_SIZE];
+  avb_aftl_sha256(NULL, 0, hash);
+  EXPECT_EQ("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE));
+  avb_aftl_sha256((uint8_t*)"foobar", 6, hash);
+  EXPECT_EQ("c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2",
+            mem_to_hexstring(hash, AFTL_HASH_SIZE));
+}
+
+TEST_F(AvbAftlValidateTest, AvbAftlCountLeadingZeros) {
+  /* Spot checks to ensure aftl_count_leading_zeros is correct. */
+  EXPECT_EQ(52ull, avb_aftl_count_leading_zeros(4095))
+      << "Failed on input 4095";
+  EXPECT_EQ(12ull, avb_aftl_count_leading_zeros(0xfffffffffffff))
+      << "Failed on input 0xfffffffffffff";
+  EXPECT_EQ(64ull, avb_aftl_count_leading_zeros(0)) << "Failed on input 0";
+  EXPECT_EQ(0ull, avb_aftl_count_leading_zeros(0xffffffffffffffff))
+      << "Failed on input 0xffffffffffffffff";
+}
+
+} /* namespace avb */
diff --git a/test/data/aftl_key_bytes.bin b/test/data/aftl_key_bytes.bin
new file mode 100644
index 0000000..f2e8fbd
--- /dev/null
+++ b/test/data/aftl_key_bytes.bin
Binary files differ
diff --git a/test/data/aftl_log_sig.bin b/test/data/aftl_log_sig.bin
new file mode 100644
index 0000000..3d3e7d9
--- /dev/null
+++ b/test/data/aftl_log_sig.bin
Binary files differ