DO NOT MERGE - Merge PPRL.190305.001 into master

Bug: 127812889
Change-Id: I80e0f58f11d57ef493b4e122a2ad311406e7c403
diff --git a/.clang-format b/.clang-format
deleted file mode 120000
index f412743..0000000
--- a/.clang-format
+++ /dev/null
@@ -1 +0,0 @@
-../../build/tools/brillo-clang-format
\ No newline at end of file
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..c1244fe
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,38 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+# This is the .clang-format file used by all Brillo projects, conforming to the
+# style guide defined by Brillo. To use this file create a *relative* symlink in
+# your project pointing to this file, as this repository is expected to be
+# present in all manifests.
+#
+# See go/brillo-c++-style for details about the style guide.
+#
+
+# WARN: We do not symlink this file to the original file because their location
+# are different in AOSP and CrOS. Keep in sync with the original file if
+# possible.
+
+BasedOnStyle: Google
+AllowShortFunctionsOnASingleLine: Inline
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+BinPackArguments: false
+BinPackParameters: false
+CommentPragmas: NOLINT:.*
+DerivePointerAlignment: false
+PointerAlignment: Left
+TabWidth: 2
diff --git a/.gitignore b/.gitignore
index ced5927..db4c370 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,7 +4,6 @@
 /delta_generator
 /html/
 /test_http_server
-/update_engine
 /update_engine.dbusclient.h
 /update_engine.dbusserver.h
 /update_engine_client
diff --git a/Android.bp b/Android.bp
index c3d164b..dac1acd 100644
--- a/Android.bp
+++ b/Android.bp
@@ -1,3 +1,19 @@
+//
+// Copyright (C) 2015 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
 // AIDL interface between libupdate_engine and framework.jar
 filegroup {
     name: "libupdate_engine_aidl",
@@ -5,4 +21,706 @@
         "binder_bindings/android/os/IUpdateEngine.aidl",
         "binder_bindings/android/os/IUpdateEngineCallback.aidl",
     ],
+    path: "binder_bindings",
+}
+
+cc_defaults {
+    name: "ue_defaults",
+
+    cflags: [
+        "-DBASE_VER=576279",
+        "-DUSE_BINDER=1",
+        "-DUSE_CHROME_NETWORK_PROXY=0",
+        "-DUSE_CHROME_KIOSK_APP=0",
+        "-DUSE_HWID_OVERRIDE=0",
+        "-DUSE_MTD=0",
+        "-DUSE_OMAHA=0",
+        "-D_FILE_OFFSET_BITS=64",
+        "-D_POSIX_C_SOURCE=199309L",
+        "-Wa,--noexecstack",
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+        "-Wformat=2",
+        "-Wno-psabi",
+        "-Wno-unused-parameter",
+        "-ffunction-sections",
+        "-fstack-protector-strong",
+        "-fvisibility=hidden",
+    ],
+    cppflags: [
+        "-Wnon-virtual-dtor",
+        "-fno-strict-aliasing",
+    ],
+    include_dirs: ["system"],
+    local_include_dirs: ["client_library/include"],
+    static_libs: ["libgtest_prod"],
+    shared_libs: [
+        "libbrillo-stream",
+        "libbrillo",
+        "libchrome",
+    ],
+    ldflags: ["-Wl,--gc-sections"],
+
+    product_variables: {
+        pdk: {
+            enabled: false,
+        },
+    },
+
+    target: {
+        android: {
+            cflags: [
+                "-DUSE_FEC=1",
+            ],
+        },
+        host: {
+            cflags: [
+                "-DUSE_FEC=0",
+            ],
+        },
+        darwin: {
+            enabled: false,
+        },
+    },
+}
+
+// update_metadata-protos (type: static_library)
+// ========================================================
+// Protobufs.
+cc_defaults {
+    name: "update_metadata-protos_exports",
+
+    shared_libs: ["libprotobuf-cpp-lite"],
+}
+
+cc_library_static {
+    name: "update_metadata-protos",
+    host_supported: true,
+    recovery_available: true,
+
+    srcs: ["update_engine/update_metadata.proto"],
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ],
+    proto: {
+        canonical_path_from_root: false,
+        export_proto_headers: true,
+    },
+}
+
+// libpayload_consumer (type: static_library)
+// ========================================================
+// The payload application component and common dependencies.
+cc_defaults {
+    name: "libpayload_consumer_exports",
+    defaults: ["update_metadata-protos_exports"],
+
+    static_libs: [
+        "update_metadata-protos",
+        "libxz",
+        "libbz",
+        "libbspatch",
+        "libbrotli",
+        "libfec_rs",
+        "libpuffpatch",
+        "libverity_tree",
+    ],
+    shared_libs: [
+        "libbase",
+        "libcrypto",
+        "libfec",
+    ],
+}
+
+cc_library_static {
+    name: "libpayload_consumer",
+    defaults: [
+        "ue_defaults",
+        "libpayload_consumer_exports",
+    ],
+    host_supported: true,
+    recovery_available: true,
+
+    srcs: [
+        "common/action_processor.cc",
+        "common/boot_control_stub.cc",
+        "common/clock.cc",
+        "common/constants.cc",
+        "common/cpu_limiter.cc",
+        "common/error_code_utils.cc",
+        "common/file_fetcher.cc",
+        "common/hash_calculator.cc",
+        "common/http_common.cc",
+        "common/http_fetcher.cc",
+        "common/hwid_override.cc",
+        "common/multi_range_http_fetcher.cc",
+        "common/platform_constants_android.cc",
+        "common/prefs.cc",
+        "common/proxy_resolver.cc",
+        "common/subprocess.cc",
+        "common/terminator.cc",
+        "common/utils.cc",
+        "payload_consumer/bzip_extent_writer.cc",
+        "payload_consumer/cached_file_descriptor.cc",
+        "payload_consumer/delta_performer.cc",
+        "payload_consumer/download_action.cc",
+        "payload_consumer/extent_reader.cc",
+        "payload_consumer/extent_writer.cc",
+        "payload_consumer/file_descriptor.cc",
+        "payload_consumer/file_descriptor_utils.cc",
+        "payload_consumer/file_writer.cc",
+        "payload_consumer/filesystem_verifier_action.cc",
+        "payload_consumer/install_plan.cc",
+        "payload_consumer/mount_history.cc",
+        "payload_consumer/payload_constants.cc",
+        "payload_consumer/payload_metadata.cc",
+        "payload_consumer/payload_verifier.cc",
+        "payload_consumer/postinstall_runner_action.cc",
+        "payload_consumer/verity_writer_android.cc",
+        "payload_consumer/xz_extent_writer.cc",
+        "payload_consumer/fec_file_descriptor.cc",
+    ],
+}
+
+// libupdate_engine_boot_control (type: static_library)
+// ========================================================
+// A BootControl class implementation using Android's HIDL boot_control HAL.
+cc_defaults {
+    name: "libupdate_engine_boot_control_exports",
+    defaults: ["update_metadata-protos_exports"],
+
+    static_libs: ["update_metadata-protos"],
+    shared_libs: [
+        "libbootloader_message",
+        "libfs_mgr",
+        "libhwbinder",
+        "libhidlbase",
+        "liblp",
+        "libutils",
+        "android.hardware.boot@1.0",
+    ],
+}
+
+cc_library_static {
+    name: "libupdate_engine_boot_control",
+    defaults: [
+        "ue_defaults",
+        "libupdate_engine_boot_control_exports",
+    ],
+    recovery_available: true,
+
+    srcs: [
+        "boot_control_android.cc",
+        "dynamic_partition_control_android.cc",
+    ],
+}
+
+// libupdate_engine_android (type: static_library)
+// ========================================================
+// The main daemon static_library used in Android (non-Brillo). This only has a
+// loop to apply payloads provided by the upper layer via a Binder interface.
+cc_defaults {
+    name: "libupdate_engine_android_exports",
+    defaults: [
+        "ue_defaults",
+        "libpayload_consumer_exports",
+        "libupdate_engine_boot_control_exports",
+    ],
+
+    static_libs: [
+        "libpayload_consumer",
+        "libupdate_engine_boot_control",
+    ],
+    shared_libs: [
+        "libandroid_net",
+        "libbase",
+        "libbinder",
+        "libbinderwrapper",
+        "libbootloader_message",
+        "libbrillo-binder",
+        "libcurl",
+        "libcutils",
+        "liblog",
+        "libmetricslogger",
+        "libssl",
+        "libutils",
+    ],
+}
+
+cc_library_static {
+    name: "libupdate_engine_android",
+    defaults: [
+        "ue_defaults",
+        "libupdate_engine_android_exports",
+    ],
+
+    // TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
+    // out of the DBus interface.
+    include_dirs: ["external/cros/system_api/dbus"],
+
+    aidl: {
+        local_include_dirs: ["binder_bindings"],
+        export_aidl_headers: true,
+    },
+
+    srcs: [
+        ":libupdate_engine_aidl",
+        "binder_service_android.cc",
+        "certificate_checker.cc",
+        "daemon.cc",
+        "daemon_state_android.cc",
+        "hardware_android.cc",
+        "libcurl_http_fetcher.cc",
+        "metrics_reporter_android.cc",
+        "metrics_utils.cc",
+        "network_selector_android.cc",
+        "update_attempter_android.cc",
+        "update_boot_flags_action.cc",
+        "update_status_utils.cc",
+    ],
+}
+
+// update_engine (type: executable)
+// ========================================================
+// update_engine daemon.
+cc_binary {
+    name: "update_engine",
+    defaults: [
+        "ue_defaults",
+        "libupdate_engine_android_exports",
+    ],
+
+    static_libs: ["libupdate_engine_android"],
+    required: ["cacerts_google"],
+
+    srcs: ["main.cc"],
+    init_rc: ["update_engine.rc"],
+}
+
+// update_engine_sideload (type: executable)
+// ========================================================
+// A binary executable equivalent to update_engine daemon that installs an update
+// from a local file directly instead of running in the background. Used in
+// recovery image.
+cc_binary {
+    name: "update_engine_sideload",
+    defaults: [
+        "ue_defaults",
+        "update_metadata-protos_exports",
+        "libupdate_engine_boot_control_exports",
+        "libpayload_consumer_exports",
+    ],
+    recovery: true,
+
+    cflags: ["-D_UE_SIDELOAD"],
+    // TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
+    // out of the DBus interface.
+    include_dirs: ["external/cros/system_api/dbus"],
+
+    srcs: [
+        "hardware_android.cc",
+        "metrics_reporter_stub.cc",
+        "metrics_utils.cc",
+        "network_selector_stub.cc",
+        "sideload_main.cc",
+        "update_attempter_android.cc",
+        "update_boot_flags_action.cc",
+        "update_status_utils.cc",
+    ],
+
+    // Use commonly used shared libraries. libprotobuf-cpp-lite.so is filtered out,
+    // as it doesn't look beneficial to be installed separately due to its size. Note
+    // that we explicitly request their recovery variants, so that the expected files
+    // will be used and installed.
+    shared_libs: [
+        "libbase",
+        "liblog",
+    ],
+    static_libs: [
+        "libpayload_consumer",
+        "libupdate_engine_boot_control",
+        "update_metadata-protos",
+
+        // We add the static versions of the shared libraries that are not installed to
+        // recovery image due to size concerns. Need to include all the static library
+        // dependencies of these static libraries.
+        "libevent",
+        "libmodpb64",
+        "libgtest_prod",
+        "libprotobuf-cpp-lite",
+        "libbrillo-stream",
+        "libbrillo",
+        "libchrome",
+    ],
+    target: {
+        recovery: {
+            exclude_shared_libs: [
+                "libprotobuf-cpp-lite",
+                "libhwbinder",
+                "libbrillo-stream",
+                "libbrillo",
+                "libchrome",
+            ],
+        },
+    },
+
+    required: ["android.hardware.boot@1.0-impl-wrapper.recovery"],
+}
+
+// libupdate_engine_client (type: shared_library)
+// ========================================================
+cc_library_shared {
+    name: "libupdate_engine_client",
+
+    cflags: [
+        "-Wall",
+        "-Werror",
+        "-Wno-unused-parameter",
+        "-DUSE_BINDER=1",
+    ],
+    export_include_dirs: ["client_library/include"],
+    include_dirs: [
+        // TODO(deymo): Remove "external/cros/system_api/dbus" when dbus is not used.
+        "external/cros/system_api/dbus",
+        "system",
+    ],
+
+    aidl: {
+        local_include_dirs: ["binder_bindings"],
+    },
+
+    shared_libs: [
+        "libchrome",
+        "libbrillo",
+        "libbinder",
+        "libbrillo-binder",
+        "libutils",
+    ],
+
+    srcs: [
+        "binder_bindings/android/brillo/IUpdateEngine.aidl",
+        "binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl",
+        "client_library/client.cc",
+        "client_library/client_binder.cc",
+        "parcelable_update_engine_status.cc",
+        "update_status_utils.cc",
+    ],
+}
+
+// update_engine_client (type: executable)
+// ========================================================
+// update_engine console client.
+cc_binary {
+    name: "update_engine_client",
+    defaults: ["ue_defaults"],
+
+    // TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
+    // out of the DBus interface.
+    include_dirs: ["external/cros/system_api/dbus"],
+
+    shared_libs: [
+        "libbinder",
+        "libbinderwrapper",
+        "libbrillo-binder",
+        "libutils",
+    ],
+
+    aidl: {
+        local_include_dirs: ["binder_bindings"],
+    },
+
+    srcs: [
+        ":libupdate_engine_aidl",
+        "common/error_code_utils.cc",
+        "update_engine_client_android.cc",
+        "update_status_utils.cc",
+    ],
+}
+
+// libpayload_generator (type: static_library)
+// ========================================================
+// server-side code. This is used for delta_generator and unittests but not
+// for any client code.
+cc_defaults {
+    name: "libpayload_generator_exports",
+    defaults: [
+        "libpayload_consumer_exports",
+        "update_metadata-protos_exports",
+    ],
+
+    static_libs: [
+        "libavb",
+        "libbrotli",
+        "libbsdiff",
+        "libdivsufsort",
+        "libdivsufsort64",
+        "liblzma",
+        "libpayload_consumer",
+        "libpuffdiff",
+        "libverity_tree",
+        "update_metadata-protos",
+    ],
+    shared_libs: [
+        "libbase",
+        "libext2fs",
+    ],
+}
+
+cc_library_static {
+    name: "libpayload_generator",
+    defaults: [
+        "ue_defaults",
+        "libpayload_generator_exports",
+    ],
+    host_supported: true,
+
+    srcs: [
+        "payload_generator/ab_generator.cc",
+        "payload_generator/annotated_operation.cc",
+        "payload_generator/blob_file_writer.cc",
+        "payload_generator/block_mapping.cc",
+        "payload_generator/boot_img_filesystem.cc",
+        "payload_generator/bzip.cc",
+        "payload_generator/cycle_breaker.cc",
+        "payload_generator/deflate_utils.cc",
+        "payload_generator/delta_diff_generator.cc",
+        "payload_generator/delta_diff_utils.cc",
+        "payload_generator/ext2_filesystem.cc",
+        "payload_generator/extent_ranges.cc",
+        "payload_generator/extent_utils.cc",
+        "payload_generator/full_update_generator.cc",
+        "payload_generator/graph_types.cc",
+        "payload_generator/graph_utils.cc",
+        "payload_generator/inplace_generator.cc",
+        "payload_generator/mapfile_filesystem.cc",
+        "payload_generator/payload_file.cc",
+        "payload_generator/payload_generation_config_android.cc",
+        "payload_generator/payload_generation_config.cc",
+        "payload_generator/payload_signer.cc",
+        "payload_generator/raw_filesystem.cc",
+        "payload_generator/squashfs_filesystem.cc",
+        "payload_generator/tarjan.cc",
+        "payload_generator/topological_sort.cc",
+        "payload_generator/xz_android.cc",
+    ],
+}
+
+// delta_generator (type: executable)
+// ========================================================
+// server-side delta generator.
+cc_binary_host {
+    name: "delta_generator",
+    defaults: [
+        "ue_defaults",
+        "libpayload_generator_exports",
+        "libpayload_consumer_exports",
+    ],
+
+    static_libs: [
+        "libavb_host_sysdeps",
+        "libpayload_consumer",
+        "libpayload_generator",
+    ],
+
+    srcs: ["payload_generator/generate_delta_main.cc"],
+}
+
+cc_test {
+    name: "ue_unittest_delta_generator",
+    defaults: [
+        "ue_defaults",
+        "libpayload_generator_exports",
+        "libpayload_consumer_exports",
+    ],
+
+    static_libs: [
+        "libpayload_consumer",
+        "libpayload_generator",
+    ],
+
+    srcs: ["payload_generator/generate_delta_main.cc"],
+
+    gtest: false,
+    stem: "delta_generator",
+    relative_install_path: "update_engine_unittests",
+    no_named_install_directory: true,
+}
+
+// test_http_server (type: executable)
+// ========================================================
+// Test HTTP Server.
+cc_test {
+    name: "test_http_server",
+    defaults: ["ue_defaults"],
+    srcs: [
+        "common/http_common.cc",
+        "test_http_server.cc",
+    ],
+
+    gtest: false,
+    relative_install_path: "update_engine_unittests",
+    no_named_install_directory: true,
+}
+
+// test_subprocess (type: executable)
+// ========================================================
+// Test helper subprocess program.
+cc_test {
+    name: "test_subprocess",
+    defaults: ["ue_defaults"],
+    srcs: ["test_subprocess.cc"],
+
+    gtest: false,
+    relative_install_path: "update_engine_unittests",
+    no_named_install_directory: true,
+}
+
+// Public keys for unittests.
+// ========================================================
+genrule {
+    name: "ue_unittest_keys",
+    cmd: "openssl rsa -in $(location unittest_key.pem) -pubout -out $(location unittest_key.pub.pem) &&" +
+        "openssl rsa -in $(location unittest_key2.pem) -pubout -out $(location unittest_key2.pub.pem)",
+    srcs: [
+        "unittest_key.pem",
+        "unittest_key2.pem",
+    ],
+    out: [
+        "unittest_key.pub.pem",
+        "unittest_key2.pub.pem",
+    ],
+}
+
+// Sample images for unittests.
+// ========================================================
+// Extract sample image from the compressed sample_images.tar.bz2 file used by
+// the unittests.
+genrule {
+    name: "ue_unittest_disk_imgs",
+    cmd: "tar -jxf $(in) -C $(genDir)/gen disk_ext2_1k.img disk_ext2_4k.img disk_ext2_4k_empty.img disk_ext2_unittest.img",
+    srcs: ["sample_images/sample_images.tar.bz2"],
+    out: [
+        "gen/disk_ext2_1k.img",
+        "gen/disk_ext2_4k.img",
+        "gen/disk_ext2_4k_empty.img",
+        "gen/disk_ext2_unittest.img",
+    ],
+}
+
+// update_engine_unittests (type: executable)
+// ========================================================
+// Main unittest file.
+cc_test {
+    name: "update_engine_unittests",
+    defaults: [
+        "ue_defaults",
+        "libpayload_generator_exports",
+        "libupdate_engine_android_exports",
+    ],
+    required: [
+        "test_http_server",
+        "test_subprocess",
+        "ue_unittest_delta_generator",
+    ],
+
+    static_libs: [
+        "libpayload_generator",
+        "libbrillo-test-helpers",
+        "libgmock",
+        "libchrome_test_helpers",
+        "libupdate_engine_android",
+    ],
+    shared_libs: [
+        "libhidltransport",
+    ],
+
+    data: [
+        ":ue_unittest_disk_imgs",
+        ":ue_unittest_keys",
+        "unittest_key.pem",
+        "unittest_key2.pem",
+        "update_engine.conf",
+    ],
+
+    srcs: [
+        "boot_control_android_unittest.cc",
+        "certificate_checker_unittest.cc",
+        "common/action_pipe_unittest.cc",
+        "common/action_processor_unittest.cc",
+        "common/action_unittest.cc",
+        "common/cpu_limiter_unittest.cc",
+        "common/fake_prefs.cc",
+        "common/file_fetcher_unittest.cc",
+        "common/hash_calculator_unittest.cc",
+        "common/http_fetcher_unittest.cc",
+        "common/hwid_override_unittest.cc",
+        "common/mock_http_fetcher.cc",
+        "common/prefs_unittest.cc",
+        "common/proxy_resolver_unittest.cc",
+        "common/subprocess_unittest.cc",
+        "common/terminator_unittest.cc",
+        "common/test_utils.cc",
+        "common/utils_unittest.cc",
+        "payload_consumer/bzip_extent_writer_unittest.cc",
+        "payload_consumer/cached_file_descriptor_unittest.cc",
+        "payload_consumer/delta_performer_integration_test.cc",
+        "payload_consumer/delta_performer_unittest.cc",
+        "payload_consumer/extent_reader_unittest.cc",
+        "payload_consumer/extent_writer_unittest.cc",
+        "payload_consumer/fake_file_descriptor.cc",
+        "payload_consumer/file_descriptor_utils_unittest.cc",
+        "payload_consumer/file_writer_unittest.cc",
+        "payload_consumer/filesystem_verifier_action_unittest.cc",
+        "payload_consumer/postinstall_runner_action_unittest.cc",
+        "payload_consumer/verity_writer_android_unittest.cc",
+        "payload_consumer/xz_extent_writer_unittest.cc",
+        "payload_generator/ab_generator_unittest.cc",
+        "payload_generator/blob_file_writer_unittest.cc",
+        "payload_generator/block_mapping_unittest.cc",
+        "payload_generator/boot_img_filesystem_unittest.cc",
+        "payload_generator/cycle_breaker_unittest.cc",
+        "payload_generator/deflate_utils_unittest.cc",
+        "payload_generator/delta_diff_utils_unittest.cc",
+        "payload_generator/ext2_filesystem_unittest.cc",
+        "payload_generator/extent_ranges_unittest.cc",
+        "payload_generator/extent_utils_unittest.cc",
+        "payload_generator/fake_filesystem.cc",
+        "payload_generator/full_update_generator_unittest.cc",
+        "payload_generator/graph_utils_unittest.cc",
+        "payload_generator/inplace_generator_unittest.cc",
+        "payload_generator/mapfile_filesystem_unittest.cc",
+        "payload_generator/payload_file_unittest.cc",
+        "payload_generator/payload_generation_config_android_unittest.cc",
+        "payload_generator/payload_generation_config_unittest.cc",
+        "payload_generator/payload_signer_unittest.cc",
+        "payload_generator/squashfs_filesystem_unittest.cc",
+        "payload_generator/tarjan_unittest.cc",
+        "payload_generator/topological_sort_unittest.cc",
+        "payload_generator/zip_unittest.cc",
+        "testrunner.cc",
+        "update_attempter_android_unittest.cc",
+    ],
+}
+
+// Brillo update payload generation script
+// ========================================================
+cc_prebuilt_binary {
+    name: "brillo_update_payload",
+    device_supported: false,
+    host_supported: true,
+
+    srcs: ["scripts/brillo_update_payload"],
+    required: [
+        "delta_generator",
+        "shflags",
+        "simg2img",
+    ],
+
+    target: {
+        darwin: {
+            enabled: false,
+        },
+    },
 }
diff --git a/Android.mk b/Android.mk
deleted file mode 100644
index 8f2c8fa..0000000
--- a/Android.mk
+++ /dev/null
@@ -1,1037 +0,0 @@
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-ifneq ($(TARGET_BUILD_PDK),true)
-
-LOCAL_PATH := $(my-dir)
-
-# Default values for the USE flags. Override these USE flags from your product
-# by setting BRILLO_USE_* values. Note that we define local variables like
-# local_use_* to prevent leaking our default setting for other packages.
-local_use_binder := $(if $(BRILLO_USE_BINDER),$(BRILLO_USE_BINDER),1)
-local_use_hwid_override := \
-    $(if $(BRILLO_USE_HWID_OVERRIDE),$(BRILLO_USE_HWID_OVERRIDE),0)
-local_use_mtd := $(if $(BRILLO_USE_MTD),$(BRILLO_USE_MTD),0)
-local_use_chrome_network_proxy := 0
-local_use_chrome_kiosk_app := 0
-
-# IoT devices use Omaha for updates.
-local_use_omaha := $(if $(filter true,$(PRODUCT_IOT)),1,0)
-
-ue_common_cflags := \
-    -DUSE_BINDER=$(local_use_binder) \
-    -DUSE_CHROME_NETWORK_PROXY=$(local_use_chrome_network_proxy) \
-    -DUSE_CHROME_KIOSK_APP=$(local_use_chrome_kiosk_app) \
-    -DUSE_HWID_OVERRIDE=$(local_use_hwid_override) \
-    -DUSE_MTD=$(local_use_mtd) \
-    -DUSE_OMAHA=$(local_use_omaha) \
-    -D_FILE_OFFSET_BITS=64 \
-    -D_POSIX_C_SOURCE=199309L \
-    -Wa,--noexecstack \
-    -Wall \
-    -Werror \
-    -Wextra \
-    -Wformat=2 \
-    -Wno-psabi \
-    -Wno-unused-parameter \
-    -ffunction-sections \
-    -fstack-protector-strong \
-    -fvisibility=hidden
-ue_common_cppflags := \
-    -Wnon-virtual-dtor \
-    -fno-strict-aliasing
-ue_common_ldflags := \
-    -Wl,--gc-sections
-ue_common_c_includes := \
-    $(LOCAL_PATH)/client_library/include \
-    system
-ue_common_shared_libraries := \
-    libbrillo-stream \
-    libbrillo \
-    libchrome
-ue_common_static_libraries := \
-    libgtest_prod \
-
-# update_metadata-protos (type: static_library)
-# ========================================================
-# Protobufs.
-ue_update_metadata_protos_exported_static_libraries := \
-    update_metadata-protos
-ue_update_metadata_protos_exported_shared_libraries := \
-    libprotobuf-cpp-lite
-
-ue_update_metadata_protos_src_files := \
-    update_metadata.proto
-
-# Build for the host.
-include $(CLEAR_VARS)
-LOCAL_MODULE := update_metadata-protos
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_IS_HOST_MODULE := true
-generated_sources_dir := $(call local-generated-sources-dir)
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(generated_sources_dir)/proto/system
-LOCAL_SRC_FILES := $(ue_update_metadata_protos_src_files)
-LOCAL_CFLAGS := -Wall -Werror
-include $(BUILD_HOST_STATIC_LIBRARY)
-
-# Build for the target.
-include $(CLEAR_VARS)
-LOCAL_MODULE := update_metadata-protos
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-generated_sources_dir := $(call local-generated-sources-dir)
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(generated_sources_dir)/proto/system
-LOCAL_SRC_FILES := $(ue_update_metadata_protos_src_files)
-LOCAL_CFLAGS := -Wall -Werror
-include $(BUILD_STATIC_LIBRARY)
-
-# libpayload_consumer (type: static_library)
-# ========================================================
-# The payload application component and common dependencies.
-ue_libpayload_consumer_exported_static_libraries := \
-    update_metadata-protos \
-    libxz \
-    libbz \
-    libbspatch \
-    libbrotli \
-    libpuffpatch \
-    $(ue_update_metadata_protos_exported_static_libraries)
-ue_libpayload_consumer_exported_shared_libraries := \
-    libcrypto \
-    $(ue_update_metadata_protos_exported_shared_libraries)
-
-ue_libpayload_consumer_src_files := \
-    common/action_processor.cc \
-    common/boot_control_stub.cc \
-    common/clock.cc \
-    common/constants.cc \
-    common/cpu_limiter.cc \
-    common/error_code_utils.cc \
-    common/file_fetcher.cc \
-    common/hash_calculator.cc \
-    common/http_common.cc \
-    common/http_fetcher.cc \
-    common/hwid_override.cc \
-    common/multi_range_http_fetcher.cc \
-    common/platform_constants_android.cc \
-    common/prefs.cc \
-    common/subprocess.cc \
-    common/terminator.cc \
-    common/utils.cc \
-    payload_consumer/bzip_extent_writer.cc \
-    payload_consumer/cached_file_descriptor.cc \
-    payload_consumer/delta_performer.cc \
-    payload_consumer/download_action.cc \
-    payload_consumer/extent_reader.cc \
-    payload_consumer/extent_writer.cc \
-    payload_consumer/file_descriptor.cc \
-    payload_consumer/file_descriptor_utils.cc \
-    payload_consumer/file_writer.cc \
-    payload_consumer/filesystem_verifier_action.cc \
-    payload_consumer/install_plan.cc \
-    payload_consumer/mount_history.cc \
-    payload_consumer/payload_constants.cc \
-    payload_consumer/payload_metadata.cc \
-    payload_consumer/payload_verifier.cc \
-    payload_consumer/postinstall_runner_action.cc \
-    payload_consumer/xz_extent_writer.cc
-
-ifeq ($(HOST_OS),linux)
-# Build for the host.
-include $(CLEAR_VARS)
-LOCAL_MODULE := libpayload_consumer
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := \
-    $(ue_common_c_includes)
-LOCAL_STATIC_LIBRARIES := \
-    update_metadata-protos \
-    $(ue_common_static_libraries) \
-    $(ue_libpayload_consumer_exported_static_libraries) \
-    $(ue_update_metadata_protos_exported_static_libraries)
-LOCAL_SHARED_LIBRARIES := \
-    $(ue_common_shared_libraries) \
-    $(ue_libpayload_consumer_exported_shared_libraries) \
-    $(ue_update_metadata_protos_exported_shared_libraries)
-LOCAL_SRC_FILES := $(ue_libpayload_consumer_src_files)
-include $(BUILD_HOST_STATIC_LIBRARY)
-endif  # HOST_OS == linux
-
-# Build for the target.
-include $(CLEAR_VARS)
-LOCAL_MODULE := libpayload_consumer
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := \
-    $(ue_common_c_includes)
-LOCAL_STATIC_LIBRARIES := \
-    update_metadata-protos \
-    $(ue_common_static_libraries) \
-    $(ue_libpayload_consumer_exported_static_libraries:-host=) \
-    $(ue_update_metadata_protos_exported_static_libraries)
-LOCAL_SHARED_LIBRARIES := \
-    $(ue_common_shared_libraries) \
-    $(ue_libpayload_consumer_exported_shared_libraries:-host=) \
-    $(ue_update_metadata_protos_exported_shared_libraries)
-LOCAL_SRC_FILES := $(ue_libpayload_consumer_src_files)
-include $(BUILD_STATIC_LIBRARY)
-
-# libupdate_engine_boot_control (type: static_library)
-# ========================================================
-# A BootControl class implementation using Android's HIDL boot_control HAL.
-ue_libupdate_engine_boot_control_exported_static_libraries := \
-    update_metadata-protos \
-    $(ue_update_metadata_protos_exported_static_libraries)
-
-ue_libupdate_engine_boot_control_exported_shared_libraries := \
-    libhwbinder \
-    libhidlbase \
-    libutils \
-    android.hardware.boot@1.0 \
-    $(ue_update_metadata_protos_exported_shared_libraries)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := libupdate_engine_boot_control
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := \
-    $(ue_common_c_includes) \
-    bootable/recovery
-LOCAL_STATIC_LIBRARIES := \
-    $(ue_common_static_libraries) \
-    $(ue_libupdate_engine_boot_control_exported_static_libraries)
-LOCAL_SHARED_LIBRARIES := \
-    $(ue_common_shared_libraries) \
-    $(ue_libupdate_engine_boot_control_exported_shared_libraries)
-LOCAL_SRC_FILES := \
-    boot_control_android.cc
-include $(BUILD_STATIC_LIBRARY)
-
-ifeq ($(local_use_omaha),1)
-
-# libupdate_engine (type: static_library)
-# ========================================================
-# The main daemon static_library with all the code used to check for updates
-# with Omaha and expose a DBus daemon.
-ue_libupdate_engine_exported_c_includes := \
-    external/cros/system_api/dbus
-ue_libupdate_engine_exported_static_libraries := \
-    libpayload_consumer \
-    update_metadata-protos \
-    libbz \
-    libfs_mgr \
-    libbase \
-    liblog \
-    $(ue_libpayload_consumer_exported_static_libraries) \
-    $(ue_update_metadata_protos_exported_static_libraries) \
-    libupdate_engine_boot_control \
-    $(ue_libupdate_engine_boot_control_exported_static_libraries)
-ue_libupdate_engine_exported_shared_libraries := \
-    libmetrics \
-    libexpat \
-    libbrillo-policy \
-    libcurl \
-    libcutils \
-    libssl \
-    $(ue_libpayload_consumer_exported_shared_libraries) \
-    $(ue_update_metadata_protos_exported_shared_libraries) \
-    $(ue_libupdate_engine_boot_control_exported_shared_libraries)
-ifeq ($(local_use_binder),1)
-ue_libupdate_engine_exported_shared_libraries += \
-    libbinder \
-    libbinderwrapper \
-    libbrillo-binder \
-    libutils
-endif  # local_use_binder == 1
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := libupdate_engine
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(ue_libupdate_engine_exported_c_includes)
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := \
-    $(ue_common_c_includes) \
-    $(ue_libupdate_engine_exported_c_includes) \
-    bootable/recovery
-LOCAL_STATIC_LIBRARIES := \
-    libpayload_consumer \
-    update_metadata-protos \
-    $(ue_common_static_libraries) \
-    $(ue_libupdate_engine_exported_static_libraries:-host=) \
-    $(ue_libpayload_consumer_exported_static_libraries:-host=) \
-    $(ue_update_metadata_protos_exported_static_libraries)
-LOCAL_SHARED_LIBRARIES := \
-    $(ue_common_shared_libraries) \
-    $(ue_libupdate_engine_exported_shared_libraries:-host=) \
-    $(ue_libpayload_consumer_exported_shared_libraries:-host=) \
-    $(ue_update_metadata_protos_exported_shared_libraries)
-LOCAL_SRC_FILES := \
-    certificate_checker.cc \
-    common_service.cc \
-    connection_manager_android.cc \
-    connection_utils.cc \
-    daemon.cc \
-    hardware_android.cc \
-    image_properties_android.cc \
-    libcurl_http_fetcher.cc \
-    metrics_reporter_omaha.cc \
-    metrics_utils.cc \
-    omaha_request_action.cc \
-    omaha_request_params.cc \
-    omaha_response_handler_action.cc \
-    omaha_utils.cc \
-    p2p_manager.cc \
-    payload_state.cc \
-    power_manager_android.cc \
-    proxy_resolver.cc \
-    real_system_state.cc \
-    update_attempter.cc \
-    update_manager/android_things_policy.cc \
-    update_manager/api_restricted_downloads_policy_impl.cc \
-    update_manager/boxed_value.cc \
-    update_manager/default_policy.cc \
-    update_manager/enough_slots_ab_updates_policy_impl.cc \
-    update_manager/evaluation_context.cc \
-    update_manager/interactive_update_policy_impl.cc \
-    update_manager/next_update_check_policy_impl.cc \
-    update_manager/official_build_check_policy_impl.cc \
-    update_manager/policy.cc \
-    update_manager/real_config_provider.cc \
-    update_manager/real_device_policy_provider.cc \
-    update_manager/real_random_provider.cc \
-    update_manager/real_system_provider.cc \
-    update_manager/real_time_provider.cc \
-    update_manager/real_updater_provider.cc \
-    update_manager/state_factory.cc \
-    update_manager/update_manager.cc \
-    update_status_utils.cc \
-    utils_android.cc
-ifeq ($(local_use_binder),1)
-LOCAL_AIDL_INCLUDES += $(LOCAL_PATH)/binder_bindings
-LOCAL_SRC_FILES += \
-    binder_bindings/android/brillo/IUpdateEngine.aidl \
-    binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl \
-    binder_service_brillo.cc \
-    parcelable_update_engine_status.cc
-endif  # local_use_binder == 1
-ifeq ($(local_use_chrome_network_proxy),1)
-LOCAL_SRC_FILES += \
-    chrome_browser_proxy_resolver.cc
-endif  # local_use_chrome_network_proxy == 1
-include $(BUILD_STATIC_LIBRARY)
-
-else  # local_use_omaha == 1
-
-ifneq ($(local_use_binder),1)
-$(error USE_BINDER is disabled but is required in non-Brillo devices.)
-endif  # local_use_binder == 1
-
-# libupdate_engine_android (type: static_library)
-# ========================================================
-# The main daemon static_library used in Android (non-Brillo). This only has a
-# loop to apply payloads provided by the upper layer via a Binder interface.
-ue_libupdate_engine_android_exported_static_libraries := \
-    libpayload_consumer \
-    libfs_mgr \
-    libbase \
-    liblog \
-    $(ue_libpayload_consumer_exported_static_libraries) \
-    libupdate_engine_boot_control \
-    $(ue_libupdate_engine_boot_control_exported_static_libraries)
-ue_libupdate_engine_android_exported_shared_libraries := \
-    $(ue_libpayload_consumer_exported_shared_libraries) \
-    $(ue_libupdate_engine_boot_control_exported_shared_libraries) \
-    libandroid_net \
-    libbinder \
-    libbinderwrapper \
-    libbrillo-binder \
-    libcutils \
-    libcurl \
-    libmetricslogger \
-    libssl \
-    libutils
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := libupdate_engine_android
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := \
-    $(ue_common_c_includes) \
-    bootable/recovery
-#TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
-# out of the DBus interface.
-LOCAL_C_INCLUDES += \
-    external/cros/system_api/dbus
-LOCAL_STATIC_LIBRARIES := \
-    $(ue_common_static_libraries) \
-    $(ue_libupdate_engine_android_exported_static_libraries:-host=)
-LOCAL_SHARED_LIBRARIES += \
-    $(ue_common_shared_libraries) \
-    $(ue_libupdate_engine_android_exported_shared_libraries:-host=)
-LOCAL_AIDL_INCLUDES := $(LOCAL_PATH)/binder_bindings
-LOCAL_SRC_FILES += \
-    binder_bindings/android/os/IUpdateEngine.aidl \
-    binder_bindings/android/os/IUpdateEngineCallback.aidl \
-    binder_service_android.cc \
-    certificate_checker.cc \
-    daemon.cc \
-    daemon_state_android.cc \
-    hardware_android.cc \
-    libcurl_http_fetcher.cc \
-    metrics_reporter_android.cc \
-    metrics_utils.cc \
-    network_selector_android.cc \
-    proxy_resolver.cc \
-    update_attempter_android.cc \
-    update_status_utils.cc \
-    utils_android.cc
-include $(BUILD_STATIC_LIBRARY)
-
-endif  # local_use_omaha == 1
-
-# update_engine (type: executable)
-# ========================================================
-# update_engine daemon.
-include $(CLEAR_VARS)
-LOCAL_MODULE := update_engine
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_REQUIRED_MODULES := \
-    cacerts_google
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := \
-    $(ue_common_c_includes)
-LOCAL_SHARED_LIBRARIES := \
-    $(ue_common_shared_libraries)
-LOCAL_STATIC_LIBRARIES := \
-    $(ue_common_static_libraries)
-LOCAL_SRC_FILES := \
-    main.cc
-
-ifeq ($(local_use_omaha),1)
-LOCAL_C_INCLUDES += \
-    $(ue_libupdate_engine_exported_c_includes)
-LOCAL_STATIC_LIBRARIES += \
-    libupdate_engine \
-    $(ue_libupdate_engine_exported_static_libraries:-host=)
-LOCAL_SHARED_LIBRARIES += \
-    $(ue_libupdate_engine_exported_shared_libraries:-host=)
-else  # local_use_omaha == 1
-LOCAL_STATIC_LIBRARIES += \
-    libupdate_engine_android \
-    $(ue_libupdate_engine_android_exported_static_libraries:-host=)
-LOCAL_SHARED_LIBRARIES += \
-    $(ue_libupdate_engine_android_exported_shared_libraries:-host=)
-endif  # local_use_omaha == 1
-
-LOCAL_INIT_RC := update_engine.rc
-include $(BUILD_EXECUTABLE)
-
-# update_engine_sideload (type: executable)
-# ========================================================
-# A static binary equivalent to update_engine daemon that installs an update
-# from a local file directly instead of running in the background.
-include $(CLEAR_VARS)
-LOCAL_MODULE := update_engine_sideload
-LOCAL_FORCE_STATIC_EXECUTABLE := true
-LOCAL_MODULE_PATH := $(TARGET_RECOVERY_ROOT_OUT)/sbin
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := \
-    $(ue_common_cflags) \
-    -D_UE_SIDELOAD
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := \
-    $(ue_common_c_includes) \
-    bootable/recovery
-#TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
-# out of the DBus interface.
-LOCAL_C_INCLUDES += \
-    external/cros/system_api/dbus
-LOCAL_SRC_FILES := \
-    boot_control_recovery.cc \
-    hardware_android.cc \
-    metrics_reporter_stub.cc \
-    metrics_utils.cc \
-    network_selector_stub.cc \
-    proxy_resolver.cc \
-    sideload_main.cc \
-    update_attempter_android.cc \
-    update_status_utils.cc \
-    utils_android.cc
-LOCAL_STATIC_LIBRARIES := \
-    libfs_mgr \
-    libbase \
-    liblog \
-    libpayload_consumer \
-    update_metadata-protos \
-    $(ue_common_static_libraries) \
-    $(ue_libpayload_consumer_exported_static_libraries:-host=) \
-    $(ue_update_metadata_protos_exported_static_libraries)
-# We add the static versions of the shared libraries since we are forcing this
-# binary to be a static binary, so we also need to include all the static
-# library dependencies of these static libraries.
-LOCAL_STATIC_LIBRARIES += \
-    $(ue_common_shared_libraries) \
-    libbase \
-    liblog \
-    $(ue_libpayload_consumer_exported_shared_libraries:-host=) \
-    $(ue_update_metadata_protos_exported_shared_libraries) \
-    libevent \
-    libmodpb64 \
-    libgtest_prod
-
-ifeq ($(strip $(PRODUCT_STATIC_BOOT_CONTROL_HAL)),)
-# No static boot_control HAL defined, so no sideload support. We use a fake
-# boot_control HAL to allow compiling update_engine_sideload for test purposes.
-ifeq ($(strip $(AB_OTA_UPDATER)),true)
-$(warning No PRODUCT_STATIC_BOOT_CONTROL_HAL configured but AB_OTA_UPDATER is \
-true, no update sideload support.)
-endif  # AB_OTA_UPDATER == true
-LOCAL_SRC_FILES += \
-    boot_control_recovery_stub.cc
-else  # PRODUCT_STATIC_BOOT_CONTROL_HAL != ""
-LOCAL_STATIC_LIBRARIES += \
-    $(PRODUCT_STATIC_BOOT_CONTROL_HAL)
-endif  # PRODUCT_STATIC_BOOT_CONTROL_HAL != ""
-
-include $(BUILD_EXECUTABLE)
-
-# libupdate_engine_client (type: shared_library)
-# ========================================================
-include $(CLEAR_VARS)
-LOCAL_MODULE := libupdate_engine_client
-LOCAL_CFLAGS := \
-    -Wall \
-    -Werror \
-    -Wno-unused-parameter \
-    -DUSE_BINDER=$(local_use_binder)
-LOCAL_CPP_EXTENSION := .cc
-# TODO(deymo): Remove "external/cros/system_api/dbus" when dbus is not used.
-LOCAL_C_INCLUDES := \
-    $(LOCAL_PATH)/client_library/include \
-    external/cros/system_api/dbus \
-    system
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/client_library/include
-LOCAL_SHARED_LIBRARIES := \
-    libchrome \
-    libbrillo
-LOCAL_SRC_FILES := \
-    client_library/client.cc \
-    update_status_utils.cc
-
-# We only support binder IPC mechanism in Android.
-ifeq ($(local_use_binder),1)
-LOCAL_AIDL_INCLUDES := $(LOCAL_PATH)/binder_bindings
-LOCAL_SHARED_LIBRARIES += \
-    libbinder \
-    libbrillo-binder \
-    libutils
-LOCAL_SRC_FILES += \
-    binder_bindings/android/brillo/IUpdateEngine.aidl \
-    binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl \
-    client_library/client_binder.cc \
-    parcelable_update_engine_status.cc
-endif  # local_use_binder == 1
-
-include $(BUILD_SHARED_LIBRARY)
-
-# update_engine_client (type: executable)
-# ========================================================
-# update_engine console client.
-include $(CLEAR_VARS)
-LOCAL_MODULE := update_engine_client
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := $(ue_common_c_includes)
-LOCAL_SHARED_LIBRARIES := $(ue_common_shared_libraries)
-LOCAL_STATIC_LIBRARIES := $(ue_common_static_libraries)
-ifeq ($(local_use_omaha),1)
-LOCAL_SHARED_LIBRARIES += \
-    libupdate_engine_client
-LOCAL_SRC_FILES := \
-    update_engine_client.cc \
-    common/error_code_utils.cc \
-    omaha_utils.cc
-else  # local_use_omaha == 1
-#TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
-# out of the DBus interface.
-LOCAL_C_INCLUDES += \
-    external/cros/system_api/dbus
-LOCAL_SHARED_LIBRARIES += \
-    libbinder \
-    libbinderwrapper \
-    libbrillo-binder \
-    libutils
-LOCAL_AIDL_INCLUDES := $(LOCAL_PATH)/binder_bindings
-LOCAL_SRC_FILES := \
-    binder_bindings/android/os/IUpdateEngine.aidl \
-    binder_bindings/android/os/IUpdateEngineCallback.aidl \
-    common/error_code_utils.cc \
-    update_engine_client_android.cc \
-    update_status_utils.cc
-endif  # local_use_omaha == 1
-include $(BUILD_EXECUTABLE)
-
-# libpayload_generator (type: static_library)
-# ========================================================
-# server-side code. This is used for delta_generator and unittests but not
-# for any client code.
-ue_libpayload_generator_exported_static_libraries := \
-    libbsdiff \
-    libdivsufsort \
-    libdivsufsort64 \
-    libbrotli \
-    liblzma \
-    libpayload_consumer \
-    libpuffdiff \
-    libz \
-    update_metadata-protos \
-    $(ue_libpayload_consumer_exported_static_libraries) \
-    $(ue_update_metadata_protos_exported_static_libraries)
-ue_libpayload_generator_exported_shared_libraries := \
-    libext2fs \
-    $(ue_libpayload_consumer_exported_shared_libraries) \
-    $(ue_update_metadata_protos_exported_shared_libraries)
-
-ue_libpayload_generator_src_files := \
-    payload_generator/ab_generator.cc \
-    payload_generator/annotated_operation.cc \
-    payload_generator/blob_file_writer.cc \
-    payload_generator/block_mapping.cc \
-    payload_generator/bzip.cc \
-    payload_generator/cycle_breaker.cc \
-    payload_generator/deflate_utils.cc \
-    payload_generator/delta_diff_generator.cc \
-    payload_generator/delta_diff_utils.cc \
-    payload_generator/ext2_filesystem.cc \
-    payload_generator/extent_ranges.cc \
-    payload_generator/extent_utils.cc \
-    payload_generator/full_update_generator.cc \
-    payload_generator/graph_types.cc \
-    payload_generator/graph_utils.cc \
-    payload_generator/inplace_generator.cc \
-    payload_generator/mapfile_filesystem.cc \
-    payload_generator/payload_file.cc \
-    payload_generator/payload_generation_config.cc \
-    payload_generator/payload_signer.cc \
-    payload_generator/raw_filesystem.cc \
-    payload_generator/squashfs_filesystem.cc \
-    payload_generator/tarjan.cc \
-    payload_generator/topological_sort.cc \
-    payload_generator/xz_android.cc
-
-ifeq ($(HOST_OS),linux)
-# Build for the host.
-include $(CLEAR_VARS)
-LOCAL_MODULE := libpayload_generator
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := $(ue_common_c_includes)
-LOCAL_STATIC_LIBRARIES := \
-    libbsdiff \
-    libdivsufsort \
-    libdivsufsort64 \
-    liblzma \
-    libpayload_consumer \
-    libpuffdiff \
-    update_metadata-protos \
-    $(ue_common_static_libraries) \
-    $(ue_libpayload_consumer_exported_static_libraries) \
-    $(ue_update_metadata_protos_exported_static_libraries)
-LOCAL_SHARED_LIBRARIES := \
-    $(ue_common_shared_libraries) \
-    $(ue_libpayload_generator_exported_shared_libraries) \
-    $(ue_libpayload_consumer_exported_shared_libraries) \
-    $(ue_update_metadata_protos_exported_shared_libraries)
-LOCAL_SRC_FILES := $(ue_libpayload_generator_src_files)
-include $(BUILD_HOST_STATIC_LIBRARY)
-endif  # HOST_OS == linux
-
-# Build for the target.
-include $(CLEAR_VARS)
-LOCAL_MODULE := libpayload_generator
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := $(ue_common_c_includes)
-LOCAL_STATIC_LIBRARIES := \
-    libbsdiff \
-    libdivsufsort \
-    libdivsufsort64 \
-    libpayload_consumer \
-    update_metadata-protos \
-    liblzma \
-    $(ue_common_static_libraries) \
-    $(ue_libpayload_consumer_exported_static_libraries:-host=) \
-    $(ue_update_metadata_protos_exported_static_libraries)
-LOCAL_SHARED_LIBRARIES := \
-    $(ue_common_shared_libraries) \
-    $(ue_libpayload_generator_exported_shared_libraries:-host=) \
-    $(ue_libpayload_consumer_exported_shared_libraries:-host=) \
-    $(ue_update_metadata_protos_exported_shared_libraries)
-LOCAL_SRC_FILES := $(ue_libpayload_generator_src_files)
-include $(BUILD_STATIC_LIBRARY)
-
-# delta_generator (type: executable)
-# ========================================================
-# server-side delta generator.
-ue_delta_generator_src_files := \
-    payload_generator/generate_delta_main.cc
-
-ifeq ($(HOST_OS),linux)
-# Build for the host.
-include $(CLEAR_VARS)
-LOCAL_MODULE := delta_generator
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := $(ue_common_c_includes)
-LOCAL_STATIC_LIBRARIES := \
-    libpayload_consumer \
-    libpayload_generator \
-    $(ue_common_static_libraries) \
-    $(ue_libpayload_consumer_exported_static_libraries) \
-    $(ue_libpayload_generator_exported_static_libraries)
-LOCAL_SHARED_LIBRARIES := \
-    $(ue_common_shared_libraries) \
-    $(ue_libpayload_consumer_exported_shared_libraries) \
-    $(ue_libpayload_generator_exported_shared_libraries)
-LOCAL_SRC_FILES := $(ue_delta_generator_src_files)
-include $(BUILD_HOST_EXECUTABLE)
-endif  # HOST_OS == linux
-
-# Build for the target.
-include $(CLEAR_VARS)
-LOCAL_MODULE := ue_unittest_delta_generator
-LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
-LOCAL_MODULE_STEM := delta_generator
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := $(ue_common_c_includes)
-LOCAL_STATIC_LIBRARIES := \
-    libpayload_consumer \
-    libpayload_generator \
-    $(ue_common_static_libraries) \
-    $(ue_libpayload_consumer_exported_static_libraries:-host=) \
-    $(ue_libpayload_generator_exported_static_libraries:-host=)
-LOCAL_SHARED_LIBRARIES := \
-    $(ue_common_shared_libraries) \
-    $(ue_libpayload_consumer_exported_shared_libraries:-host=) \
-    $(ue_libpayload_generator_exported_shared_libraries:-host=)
-LOCAL_SRC_FILES := $(ue_delta_generator_src_files)
-include $(BUILD_EXECUTABLE)
-
-# Private and public keys for unittests.
-# ========================================================
-# Generate a module that installs a prebuilt private key and a module that
-# installs a public key generated from the private key.
-#
-# $(1): The path to the private key in pem format.
-define ue-unittest-keys
-    $(eval include $(CLEAR_VARS)) \
-    $(eval LOCAL_MODULE := ue_$(1).pem) \
-    $(eval LOCAL_MODULE_CLASS := ETC) \
-    $(eval LOCAL_SRC_FILES := $(1).pem) \
-    $(eval LOCAL_MODULE_PATH := \
-        $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests) \
-    $(eval LOCAL_MODULE_STEM := $(1).pem) \
-    $(eval include $(BUILD_PREBUILT)) \
-    \
-    $(eval include $(CLEAR_VARS)) \
-    $(eval LOCAL_MODULE := ue_$(1).pub.pem) \
-    $(eval LOCAL_MODULE_CLASS := ETC) \
-    $(eval LOCAL_MODULE_PATH := \
-        $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests) \
-    $(eval LOCAL_MODULE_STEM := $(1).pub.pem) \
-    $(eval include $(BUILD_SYSTEM)/base_rules.mk) \
-    $(eval $(LOCAL_BUILT_MODULE) : $(LOCAL_PATH)/$(1).pem ; \
-        openssl rsa -in $$< -pubout -out $$@)
-endef
-
-$(call ue-unittest-keys,unittest_key)
-$(call ue-unittest-keys,unittest_key2)
-
-# Sample images for unittests.
-# ========================================================
-# Generate a prebuilt module that installs a sample image from the compressed
-# sample_images.tar.bz2 file used by the unittests.
-#
-# $(1): The filename in the sample_images.tar.bz2
-define ue-unittest-sample-image
-    $(eval include $(CLEAR_VARS)) \
-    $(eval LOCAL_MODULE := ue_unittest_$(1)) \
-    $(eval LOCAL_MODULE_CLASS := EXECUTABLES) \
-    $(eval LOCAL_MODULE_PATH := \
-        $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests/gen) \
-    $(eval LOCAL_MODULE_STEM := $(1)) \
-    $(eval include $(BUILD_SYSTEM)/base_rules.mk) \
-    $(eval $(LOCAL_BUILT_MODULE) : \
-        $(LOCAL_PATH)/sample_images/sample_images.tar.bz2 ; \
-        tar -jxf $$< -C $$(dir $$@) $$(notdir $$@) && touch $$@)
-endef
-
-$(call ue-unittest-sample-image,disk_ext2_1k.img)
-$(call ue-unittest-sample-image,disk_ext2_4k.img)
-$(call ue-unittest-sample-image,disk_ext2_4k_empty.img)
-$(call ue-unittest-sample-image,disk_ext2_unittest.img)
-
-# update_engine.conf
-# ========================================================
-include $(CLEAR_VARS)
-LOCAL_MODULE := ue_unittest_update_engine.conf
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
-LOCAL_MODULE_STEM := update_engine.conf
-LOCAL_SRC_FILES := update_engine.conf
-include $(BUILD_PREBUILT)
-
-# test_http_server (type: executable)
-# ========================================================
-# Test HTTP Server.
-include $(CLEAR_VARS)
-LOCAL_MODULE := test_http_server
-LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := $(ue_common_c_includes)
-LOCAL_SHARED_LIBRARIES := $(ue_common_shared_libraries)
-LOCAL_STATIC_LIBRARIES := $(ue_common_static_libraries)
-LOCAL_SRC_FILES := \
-    common/http_common.cc \
-    test_http_server.cc
-include $(BUILD_EXECUTABLE)
-
-# test_subprocess (type: executable)
-# ========================================================
-# Test helper subprocess program.
-include $(CLEAR_VARS)
-LOCAL_MODULE := test_subprocess
-LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := $(ue_common_c_includes)
-LOCAL_SHARED_LIBRARIES := $(ue_common_shared_libraries)
-LOCAL_STATIC_LIBRARIES := $(ue_common_static_libraries)
-LOCAL_SRC_FILES := test_subprocess.cc
-include $(BUILD_EXECUTABLE)
-
-# update_engine_unittests (type: executable)
-# ========================================================
-# Main unittest file.
-include $(CLEAR_VARS)
-LOCAL_MODULE := update_engine_unittests
-LOCAL_REQUIRED_MODULES := \
-    test_http_server \
-    test_subprocess \
-    ue_unittest_delta_generator \
-    ue_unittest_disk_ext2_1k.img \
-    ue_unittest_disk_ext2_4k.img \
-    ue_unittest_disk_ext2_4k_empty.img \
-    ue_unittest_disk_ext2_unittest.img \
-    ue_unittest_key.pem \
-    ue_unittest_key.pub.pem \
-    ue_unittest_key2.pem \
-    ue_unittest_key2.pub.pem \
-    ue_unittest_update_engine.conf
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := \
-    $(ue_common_c_includes) \
-    $(ue_libupdate_engine_exported_c_includes)
-LOCAL_STATIC_LIBRARIES := \
-    libpayload_generator \
-    libbrillo-test-helpers \
-    libgmock \
-    libchrome_test_helpers \
-    $(ue_common_static_libraries) \
-    $(ue_libpayload_generator_exported_static_libraries:-host=)
-LOCAL_SHARED_LIBRARIES := \
-    $(ue_common_shared_libraries) \
-    $(ue_libpayload_generator_exported_shared_libraries:-host=)
-LOCAL_SRC_FILES := \
-    certificate_checker_unittest.cc \
-    common/action_pipe_unittest.cc \
-    common/action_processor_unittest.cc \
-    common/action_unittest.cc \
-    common/cpu_limiter_unittest.cc \
-    common/fake_prefs.cc \
-    common/file_fetcher_unittest.cc \
-    common/hash_calculator_unittest.cc \
-    common/http_fetcher_unittest.cc \
-    common/hwid_override_unittest.cc \
-    common/mock_http_fetcher.cc \
-    common/prefs_unittest.cc \
-    common/subprocess_unittest.cc \
-    common/terminator_unittest.cc \
-    common/test_utils.cc \
-    common/utils_unittest.cc \
-    payload_consumer/bzip_extent_writer_unittest.cc \
-    payload_consumer/cached_file_descriptor_unittest.cc \
-    payload_consumer/delta_performer_integration_test.cc \
-    payload_consumer/delta_performer_unittest.cc \
-    payload_consumer/extent_reader_unittest.cc \
-    payload_consumer/extent_writer_unittest.cc \
-    payload_consumer/fake_file_descriptor.cc \
-    payload_consumer/file_descriptor_utils_unittest.cc \
-    payload_consumer/file_writer_unittest.cc \
-    payload_consumer/filesystem_verifier_action_unittest.cc \
-    payload_consumer/postinstall_runner_action_unittest.cc \
-    payload_consumer/xz_extent_writer_unittest.cc \
-    payload_generator/ab_generator_unittest.cc \
-    payload_generator/blob_file_writer_unittest.cc \
-    payload_generator/block_mapping_unittest.cc \
-    payload_generator/cycle_breaker_unittest.cc \
-    payload_generator/deflate_utils_unittest.cc \
-    payload_generator/delta_diff_utils_unittest.cc \
-    payload_generator/ext2_filesystem_unittest.cc \
-    payload_generator/extent_ranges_unittest.cc \
-    payload_generator/extent_utils_unittest.cc \
-    payload_generator/fake_filesystem.cc \
-    payload_generator/full_update_generator_unittest.cc \
-    payload_generator/graph_utils_unittest.cc \
-    payload_generator/inplace_generator_unittest.cc \
-    payload_generator/mapfile_filesystem_unittest.cc \
-    payload_generator/payload_file_unittest.cc \
-    payload_generator/payload_generation_config_unittest.cc \
-    payload_generator/payload_signer_unittest.cc \
-    payload_generator/squashfs_filesystem_unittest.cc \
-    payload_generator/tarjan_unittest.cc \
-    payload_generator/topological_sort_unittest.cc \
-    payload_generator/zip_unittest.cc \
-    proxy_resolver_unittest.cc \
-    testrunner.cc
-ifeq ($(local_use_omaha),1)
-LOCAL_C_INCLUDES += \
-    $(ue_libupdate_engine_exported_c_includes)
-LOCAL_STATIC_LIBRARIES += \
-    libupdate_engine \
-    $(ue_libupdate_engine_exported_static_libraries:-host=)
-LOCAL_SHARED_LIBRARIES += \
-    $(ue_libupdate_engine_exported_shared_libraries:-host=)
-LOCAL_SRC_FILES += \
-    common_service_unittest.cc \
-    fake_system_state.cc \
-    image_properties_android_unittest.cc \
-    metrics_reporter_omaha_unittest.cc \
-    metrics_utils_unittest.cc \
-    omaha_request_action_unittest.cc \
-    omaha_request_params_unittest.cc \
-    omaha_response_handler_action_unittest.cc \
-    omaha_utils_unittest.cc \
-    p2p_manager_unittest.cc \
-    payload_consumer/download_action_unittest.cc \
-    payload_state_unittest.cc \
-    parcelable_update_engine_status_unittest.cc \
-    update_attempter_unittest.cc \
-    update_manager/android_things_policy_unittest.cc \
-    update_manager/boxed_value_unittest.cc \
-    update_manager/chromeos_policy.cc \
-    update_manager/chromeos_policy_unittest.cc \
-    update_manager/enterprise_device_policy_impl.cc \
-    update_manager/evaluation_context_unittest.cc \
-    update_manager/generic_variables_unittest.cc \
-    update_manager/next_update_check_policy_impl_unittest.cc \
-    update_manager/out_of_box_experience_policy_impl.cc \
-    update_manager/policy_test_utils.cc \
-    update_manager/prng_unittest.cc \
-    update_manager/real_device_policy_provider_unittest.cc \
-    update_manager/real_random_provider_unittest.cc \
-    update_manager/real_system_provider_unittest.cc \
-    update_manager/real_time_provider_unittest.cc \
-    update_manager/real_updater_provider_unittest.cc \
-    update_manager/umtest_utils.cc \
-    update_manager/update_manager_unittest.cc \
-    update_manager/variable_unittest.cc
-else  # local_use_omaha == 1
-LOCAL_STATIC_LIBRARIES += \
-    libupdate_engine_android \
-    $(ue_libupdate_engine_android_exported_static_libraries:-host=)
-LOCAL_SHARED_LIBRARIES += \
-    $(ue_libupdate_engine_android_exported_shared_libraries:-host=)
-LOCAL_SRC_FILES += \
-    update_attempter_android_unittest.cc
-endif  # local_use_omaha == 1
-include $(BUILD_NATIVE_TEST)
-
-# Update payload signing public key.
-# ========================================================
-ifeq ($(PRODUCT_IOT),true)
-include $(CLEAR_VARS)
-LOCAL_MODULE := brillo-update-payload-key
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/update_engine
-LOCAL_MODULE_STEM := update-payload-key.pub.pem
-LOCAL_SRC_FILES := update_payload_key/brillo-update-payload-key.pub.pem
-LOCAL_BUILT_MODULE_STEM := update_payload_key/brillo-update-payload-key.pub.pem
-include $(BUILD_PREBUILT)
-endif  # PRODUCT_IOT
-
-# Brillo update payload generation script
-# ========================================================
-ifeq ($(HOST_OS),linux)
-include $(CLEAR_VARS)
-LOCAL_SRC_FILES := scripts/brillo_update_payload
-LOCAL_MODULE := brillo_update_payload
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MODULE_TAGS := optional
-LOCAL_REQUIRED_MODULES := \
-    delta_generator \
-    shflags \
-    simg2img
-include $(BUILD_PREBUILT)
-endif  # HOST_OS == linux
-
-endif  # ifneq ($(TARGET_BUILD_PDK),true)
diff --git a/CPPLINT.cfg b/CPPLINT.cfg
index 3dd0f35..f7dde21 100644
--- a/CPPLINT.cfg
+++ b/CPPLINT.cfg
@@ -1,3 +1,4 @@
 # This should be kept in sync with platform2/CPPLINT.cfg
 set noparent
+
 filter=-build/include_order,+build/include_alpha,-build/header_guard
diff --git a/OWNERS b/OWNERS
index 0bf7587..07ee38e 100644
--- a/OWNERS
+++ b/OWNERS
@@ -7,3 +7,4 @@
 # Chromium OS maintainers:
 benchan@google.com
 ahassani@google.com
+xiaochu@google.com
diff --git a/PRESUBMIT.cfg b/PRESUBMIT.cfg
index 3b8b271..f2c7831 100644
--- a/PRESUBMIT.cfg
+++ b/PRESUBMIT.cfg
@@ -3,5 +3,6 @@
 hook1=../../../platform2/common-mk/gyplint.py ${PRESUBMIT_FILES}
 
 [Hook Overrides]
+clang_format_check: true
 cros_license_check: false
 aosp_license_check: true
diff --git a/UpdateEngine.conf b/UpdateEngine.conf
index 58cca09..9490096 100644
--- a/UpdateEngine.conf
+++ b/UpdateEngine.conf
@@ -53,6 +53,9 @@
            send_member="SetUpdateOverCellularPermission"/>
     <allow send_destination="org.chromium.UpdateEngine"
            send_interface="org.chromium.UpdateEngineInterface"
+           send_member="SetUpdateOverCellularTarget"/>
+    <allow send_destination="org.chromium.UpdateEngine"
+           send_interface="org.chromium.UpdateEngineInterface"
            send_member="GetUpdateOverCellularPermission"/>
     <allow send_destination="org.chromium.UpdateEngine"
            send_interface="org.chromium.UpdateEngineInterface"
@@ -73,4 +76,12 @@
            send_interface="org.chromium.UpdateEngineInterface"
            send_member="GetStatus"/>
   </policy>
+  <policy user="dlcservice">
+    <allow send_destination="org.chromium.UpdateEngine"
+           send_interface="org.chromium.UpdateEngineInterface"
+           send_member="GetStatus"/>
+    <allow send_destination="org.chromium.UpdateEngine"
+           send_interface="org.chromium.UpdateEngineInterface"
+           send_member="AttemptInstall"/>
+  </policy>
 </busconfig>
diff --git a/WATCHLISTS b/WATCHLISTS
deleted file mode 100644
index bcce0de..0000000
--- a/WATCHLISTS
+++ /dev/null
@@ -1,14 +0,0 @@
-# See http://dev.chromium.org/developers/contributing-code/watchlists for
-# a description of this file's format.
-# Please keep these keys in alphabetical order.
-
-{
-  'WATCHLIST_DEFINITIONS': {
-    'all': {
-      'filepath': '.',
-    },
-  },
-  'WATCHLISTS': {
-    'all': ['adlr@chromium.org', 'petkov@chromium.org']
-  },
-}
diff --git a/binder_bindings/android/brillo/IUpdateEngine.aidl b/binder_bindings/android/brillo/IUpdateEngine.aidl
index e549a4d..56e1524 100644
--- a/binder_bindings/android/brillo/IUpdateEngine.aidl
+++ b/binder_bindings/android/brillo/IUpdateEngine.aidl
@@ -34,6 +34,8 @@
   void SetP2PUpdatePermission(in boolean enabled);
   boolean GetP2PUpdatePermission();
   void SetUpdateOverCellularPermission(in boolean enabled);
+  void SetUpdateOverCellularTarget(in String target_version,
+                                   in long target_size);
   boolean GetUpdateOverCellularPermission();
   long GetDurationSinceUpdate();
   String GetPrevVersion();
diff --git a/binder_service_android.cc b/binder_service_android.cc
index 1702ead..137694a 100644
--- a/binder_service_android.cc
+++ b/binder_service_android.cc
@@ -37,8 +37,7 @@
 
 BinderUpdateEngineAndroidService::BinderUpdateEngineAndroidService(
     ServiceDelegateAndroidInterface* service_delegate)
-    : service_delegate_(service_delegate) {
-}
+    : service_delegate_(service_delegate) {}
 
 void BinderUpdateEngineAndroidService::SendStatusUpdate(
     const UpdateEngineStatus& update_engine_status) {
diff --git a/binder_service_android.h b/binder_service_android.h
index 694b80a..d8c4e9c 100644
--- a/binder_service_android.h
+++ b/binder_service_android.h
@@ -40,9 +40,7 @@
       ServiceDelegateAndroidInterface* service_delegate);
   ~BinderUpdateEngineAndroidService() override = default;
 
-  const char* ServiceName() const {
-    return "android.os.UpdateEngineService";
-  }
+  const char* ServiceName() const { return "android.os.UpdateEngineService"; }
 
   // ServiceObserverInterface overrides.
   void SendStatusUpdate(
diff --git a/binder_service_brillo.cc b/binder_service_brillo.cc
index 3f01e42..cc74763 100644
--- a/binder_service_brillo.cc
+++ b/binder_service_brillo.cc
@@ -25,12 +25,12 @@
 
 #include "update_engine/update_status_utils.h"
 
+using android::sp;
 using android::String16;
 using android::String8;
 using android::binder::Status;
 using android::brillo::IUpdateEngineStatusCallback;
 using android::brillo::ParcelableUpdateEngineStatus;
-using android::sp;
 using brillo::ErrorPtr;
 using std::string;
 using update_engine::UpdateEngineStatus;
@@ -153,6 +153,13 @@
       &UpdateEngineService::SetUpdateOverCellularPermission, enabled);
 }
 
+Status BinderUpdateEngineBrilloService::SetUpdateOverCellularTarget(
+    const String16& target_version, int64_t target_size) {
+  return CallCommonHandler(&UpdateEngineService::SetUpdateOverCellularTarget,
+                           NormalString(target_version),
+                           target_size);
+}
+
 Status BinderUpdateEngineBrilloService::GetUpdateOverCellularPermission(
     bool* out_cellular_permission) {
   return CallCommonHandler(
diff --git a/binder_service_brillo.h b/binder_service_brillo.h
index c802fca..d0d0dc9 100644
--- a/binder_service_brillo.h
+++ b/binder_service_brillo.h
@@ -75,6 +75,8 @@
       bool* out_p2p_permission) override;
   android::binder::Status SetUpdateOverCellularPermission(
       bool enabled) override;
+  android::binder::Status SetUpdateOverCellularTarget(
+      const android::String16& target_version, int64_t target_size) override;
   android::binder::Status GetUpdateOverCellularPermission(
       bool* out_cellular_permission) override;
   android::binder::Status GetDurationSinceUpdate(
diff --git a/boot_control_android.cc b/boot_control_android.cc
index 8c1603b..8909cd9 100644
--- a/boot_control_android.cc
+++ b/boot_control_android.cc
@@ -16,24 +16,36 @@
 
 #include "update_engine/boot_control_android.h"
 
+#include <memory>
+#include <utility>
+#include <vector>
+
 #include <base/bind.h>
-#include <base/files/file_util.h>
 #include <base/logging.h>
 #include <base/strings/string_util.h>
+#include <bootloader_message/bootloader_message.h>
 #include <brillo/message_loops/message_loop.h>
+#include <fs_mgr.h>
+#include <fs_mgr_overlayfs.h>
 
 #include "update_engine/common/utils.h"
-#include "update_engine/utils_android.h"
+#include "update_engine/dynamic_partition_control_android.h"
 
 using std::string;
 
+using android::dm::DmDeviceState;
+using android::fs_mgr::Partition;
+using android::hardware::hidl_string;
 using android::hardware::Return;
 using android::hardware::boot::V1_0::BoolResult;
 using android::hardware::boot::V1_0::CommandResult;
 using android::hardware::boot::V1_0::IBootControl;
-using android::hardware::hidl_string;
+using Slot = chromeos_update_engine::BootControlInterface::Slot;
+using PartitionMetadata =
+    chromeos_update_engine::BootControlInterface::PartitionMetadata;
 
 namespace {
+
 auto StoreResultCallback(CommandResult* dest) {
   return [dest](const CommandResult& result) { *dest = result; };
 }
@@ -45,7 +57,7 @@
 
 // Factory defined in boot_control.h.
 std::unique_ptr<BootControlInterface> CreateBootControl() {
-  std::unique_ptr<BootControlAndroid> boot_control(new BootControlAndroid());
+  auto boot_control = std::make_unique<BootControlAndroid>();
   if (!boot_control->Init()) {
     return nullptr;
   }
@@ -63,9 +75,15 @@
 
   LOG(INFO) << "Loaded boot control hidl hal.";
 
+  dynamic_control_ = std::make_unique<DynamicPartitionControlAndroid>();
+
   return true;
 }
 
+void BootControlAndroid::Cleanup() {
+  dynamic_control_->Cleanup();
+}
+
 unsigned int BootControlAndroid::GetNumSlots() const {
   return module_->getNumberSlots();
 }
@@ -74,41 +92,9 @@
   return module_->getCurrentSlot();
 }
 
-bool BootControlAndroid::GetPartitionDevice(const string& partition_name,
-                                            Slot slot,
-                                            string* device) const {
-  // We can't use fs_mgr to look up |partition_name| because fstab
-  // doesn't list every slot partition (it uses the slotselect option
-  // to mask the suffix).
-  //
-  // We can however assume that there's an entry for the /misc mount
-  // point and use that to get the device file for the misc
-  // partition. This helps us locate the disk that |partition_name|
-  // resides on. From there we'll assume that a by-name scheme is used
-  // so we can just replace the trailing "misc" by the given
-  // |partition_name| and suffix corresponding to |slot|, e.g.
-  //
-  //   /dev/block/platform/soc.0/7824900.sdhci/by-name/misc ->
-  //   /dev/block/platform/soc.0/7824900.sdhci/by-name/boot_a
-  //
-  // If needed, it's possible to relax the by-name assumption in the
-  // future by trawling /sys/block looking for the appropriate sibling
-  // of misc and then finding an entry in /dev matching the sysfs
-  // entry.
-
-  base::FilePath misc_device;
-  if (!utils::DeviceForMountPoint("/misc", &misc_device))
-    return false;
-
-  if (!utils::IsSymlink(misc_device.value().c_str())) {
-    LOG(ERROR) << "Device file " << misc_device.value() << " for /misc "
-               << "is not a symlink.";
-    return false;
-  }
-
-  string suffix;
+bool BootControlAndroid::GetSuffix(Slot slot, string* suffix) const {
   auto store_suffix_cb = [&suffix](hidl_string cb_suffix) {
-    suffix = cb_suffix.c_str();
+    *suffix = cb_suffix.c_str();
   };
   Return<void> ret = module_->getSuffix(slot, store_suffix_cb);
 
@@ -117,9 +103,123 @@
                << SlotName(slot);
     return false;
   }
+  return true;
+}
 
-  base::FilePath path = misc_device.DirName().Append(partition_name + suffix);
-  if (!base::PathExists(path)) {
+bool BootControlAndroid::IsSuperBlockDevice(
+    const base::FilePath& device_dir,
+    Slot slot,
+    const string& partition_name_suffix) const {
+  string source_device =
+      device_dir.Append(fs_mgr_get_super_partition_name(slot)).value();
+  auto source_metadata = dynamic_control_->LoadMetadataBuilder(
+      source_device, slot, BootControlInterface::kInvalidSlot);
+  return source_metadata->HasBlockDevice(partition_name_suffix);
+}
+
+BootControlAndroid::DynamicPartitionDeviceStatus
+BootControlAndroid::GetDynamicPartitionDevice(
+    const base::FilePath& device_dir,
+    const string& partition_name_suffix,
+    Slot slot,
+    string* device) const {
+  string super_device =
+      device_dir.Append(fs_mgr_get_super_partition_name(slot)).value();
+
+  auto builder = dynamic_control_->LoadMetadataBuilder(
+      super_device, slot, BootControlInterface::kInvalidSlot);
+
+  if (builder == nullptr) {
+    LOG(ERROR) << "No metadata in slot "
+               << BootControlInterface::SlotName(slot);
+    return DynamicPartitionDeviceStatus::ERROR;
+  }
+
+  if (builder->FindPartition(partition_name_suffix) == nullptr) {
+    LOG(INFO) << partition_name_suffix
+              << " is not in super partition metadata.";
+
+    Slot current_slot = GetCurrentSlot();
+    if (IsSuperBlockDevice(device_dir, current_slot, partition_name_suffix)) {
+      LOG(ERROR) << "The static partition " << partition_name_suffix
+                 << " is a block device for current metadata ("
+                 << fs_mgr_get_super_partition_name(current_slot) << ", slot "
+                 << BootControlInterface::SlotName(current_slot)
+                 << "). It cannot be used as a logical partition.";
+      return DynamicPartitionDeviceStatus::ERROR;
+    }
+
+    return DynamicPartitionDeviceStatus::TRY_STATIC;
+  }
+
+  DmDeviceState state = dynamic_control_->GetState(partition_name_suffix);
+
+  // Device is mapped in the previous GetPartitionDevice() call. Just return
+  // the path.
+  if (state == DmDeviceState::ACTIVE) {
+    if (dynamic_control_->GetDmDevicePathByName(partition_name_suffix,
+                                                device)) {
+      LOG(INFO) << partition_name_suffix
+                << " is mapped on device mapper: " << *device;
+      return DynamicPartitionDeviceStatus::SUCCESS;
+    }
+    LOG(ERROR) << partition_name_suffix << " is mapped but path is unknown.";
+    return DynamicPartitionDeviceStatus::ERROR;
+  }
+
+  if (state == DmDeviceState::INVALID) {
+    bool force_writable = slot != GetCurrentSlot();
+    if (dynamic_control_->MapPartitionOnDeviceMapper(super_device,
+                                                     partition_name_suffix,
+                                                     slot,
+                                                     force_writable,
+                                                     device)) {
+      return DynamicPartitionDeviceStatus::SUCCESS;
+    }
+    return DynamicPartitionDeviceStatus::ERROR;
+  }
+
+  LOG(ERROR) << partition_name_suffix
+             << " is mapped on device mapper but state is unknown: "
+             << static_cast<std::underlying_type_t<DmDeviceState>>(state);
+  return DynamicPartitionDeviceStatus::ERROR;
+}
+
+bool BootControlAndroid::GetPartitionDevice(const string& partition_name,
+                                            Slot slot,
+                                            string* device) const {
+  string suffix;
+  if (!GetSuffix(slot, &suffix)) {
+    return false;
+  }
+  const string partition_name_suffix = partition_name + suffix;
+
+  string device_dir_str;
+  if (!dynamic_control_->GetDeviceDir(&device_dir_str)) {
+    return false;
+  }
+  base::FilePath device_dir(device_dir_str);
+
+  // When looking up target partition devices, treat them as static if the
+  // current payload doesn't encode them as dynamic partitions. This may happen
+  // when applying a retrofit update on top of a dynamic-partitions-enabled
+  // build.
+  if (dynamic_control_->IsDynamicPartitionsEnabled() &&
+      (slot == GetCurrentSlot() || is_target_dynamic_)) {
+    switch (GetDynamicPartitionDevice(
+        device_dir, partition_name_suffix, slot, device)) {
+      case DynamicPartitionDeviceStatus::SUCCESS:
+        return true;
+      case DynamicPartitionDeviceStatus::TRY_STATIC:
+        break;
+      case DynamicPartitionDeviceStatus::ERROR:  // fallthrough
+      default:
+        return false;
+    }
+  }
+
+  base::FilePath path = device_dir.Append(partition_name_suffix);
+  if (!dynamic_control_->DeviceExists(path.value())) {
     LOG(ERROR) << "Device file " << path.value() << " does not exist.";
     return false;
   }
@@ -132,8 +232,7 @@
   Return<BoolResult> ret = module_->isSlotBootable(slot);
   if (!ret.isOk()) {
     LOG(ERROR) << "Unable to determine if slot " << SlotName(slot)
-               << " is bootable: "
-               << ret.description();
+               << " is bootable: " << ret.description();
     return false;
   }
   if (ret == BoolResult::INVALID_SLOT) {
@@ -148,8 +247,7 @@
   auto ret = module_->setSlotAsUnbootable(slot, StoreResultCallback(&result));
   if (!ret.isOk()) {
     LOG(ERROR) << "Unable to call MarkSlotUnbootable for slot "
-               << SlotName(slot) << ": "
-               << ret.description();
+               << SlotName(slot) << ": " << ret.description();
     return false;
   }
   if (!result.success) {
@@ -179,8 +277,7 @@
   CommandResult result;
   auto ret = module_->markBootSuccessful(StoreResultCallback(&result));
   if (!ret.isOk()) {
-    LOG(ERROR) << "Unable to call MarkBootSuccessful: "
-               << ret.description();
+    LOG(ERROR) << "Unable to call MarkBootSuccessful: " << ret.description();
     return false;
   }
   if (!result.success) {
@@ -191,4 +288,160 @@
          brillo::MessageLoop::kTaskIdNull;
 }
 
+namespace {
+
+bool UpdatePartitionMetadata(DynamicPartitionControlInterface* dynamic_control,
+                             Slot source_slot,
+                             Slot target_slot,
+                             const string& target_suffix,
+                             const PartitionMetadata& partition_metadata) {
+  string device_dir_str;
+  if (!dynamic_control->GetDeviceDir(&device_dir_str)) {
+    return false;
+  }
+  base::FilePath device_dir(device_dir_str);
+  auto source_device =
+      device_dir.Append(fs_mgr_get_super_partition_name(source_slot)).value();
+
+  auto builder = dynamic_control->LoadMetadataBuilder(
+      source_device, source_slot, target_slot);
+  if (builder == nullptr) {
+    // TODO(elsk): allow reconstructing metadata from partition_metadata
+    // in recovery sideload.
+    LOG(ERROR) << "No metadata at "
+               << BootControlInterface::SlotName(source_slot);
+    return false;
+  }
+
+  std::vector<string> groups = builder->ListGroups();
+  for (const auto& group_name : groups) {
+    if (base::EndsWith(
+            group_name, target_suffix, base::CompareCase::SENSITIVE)) {
+      LOG(INFO) << "Removing group " << group_name;
+      builder->RemoveGroupAndPartitions(group_name);
+    }
+  }
+
+  uint64_t total_size = 0;
+  for (const auto& group : partition_metadata.groups) {
+    total_size += group.size;
+  }
+
+  string expr;
+  uint64_t allocatable_space = builder->AllocatableSpace();
+  if (!dynamic_control->IsDynamicPartitionsRetrofit()) {
+    allocatable_space /= 2;
+    expr = "half of ";
+  }
+  if (total_size > allocatable_space) {
+    LOG(ERROR) << "The maximum size of all groups with suffix " << target_suffix
+               << " (" << total_size << ") has exceeded " << expr
+               << " allocatable space for dynamic partitions "
+               << allocatable_space << ".";
+    return false;
+  }
+
+  for (const auto& group : partition_metadata.groups) {
+    auto group_name_suffix = group.name + target_suffix;
+    if (!builder->AddGroup(group_name_suffix, group.size)) {
+      LOG(ERROR) << "Cannot add group " << group_name_suffix << " with size "
+                 << group.size;
+      return false;
+    }
+    LOG(INFO) << "Added group " << group_name_suffix << " with size "
+              << group.size;
+
+    for (const auto& partition : group.partitions) {
+      auto partition_name_suffix = partition.name + target_suffix;
+      Partition* p = builder->AddPartition(
+          partition_name_suffix, group_name_suffix, LP_PARTITION_ATTR_READONLY);
+      if (!p) {
+        LOG(ERROR) << "Cannot add partition " << partition_name_suffix
+                   << " to group " << group_name_suffix;
+        return false;
+      }
+      if (!builder->ResizePartition(p, partition.size)) {
+        LOG(ERROR) << "Cannot resize partition " << partition_name_suffix
+                   << " to size " << partition.size << ". Not enough space?";
+        return false;
+      }
+      LOG(INFO) << "Added partition " << partition_name_suffix << " to group "
+                << group_name_suffix << " with size " << partition.size;
+    }
+  }
+
+  auto target_device =
+      device_dir.Append(fs_mgr_get_super_partition_name(target_slot)).value();
+  return dynamic_control->StoreMetadata(
+      target_device, builder.get(), target_slot);
+}
+
+bool UnmapTargetPartitions(DynamicPartitionControlInterface* dynamic_control,
+                           const string& target_suffix,
+                           const PartitionMetadata& partition_metadata) {
+  for (const auto& group : partition_metadata.groups) {
+    for (const auto& partition : group.partitions) {
+      if (!dynamic_control->UnmapPartitionOnDeviceMapper(
+              partition.name + target_suffix, true /* wait */)) {
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+}  // namespace
+
+bool BootControlAndroid::InitPartitionMetadata(
+    Slot target_slot,
+    const PartitionMetadata& partition_metadata,
+    bool update_metadata) {
+  if (fs_mgr_overlayfs_is_setup()) {
+    // Non DAP devices can use overlayfs as well.
+    LOG(WARNING)
+        << "overlayfs overrides are active and can interfere with our "
+           "resources.\n"
+        << "run adb enable-verity to deactivate if required and try again.";
+  }
+  if (!dynamic_control_->IsDynamicPartitionsEnabled()) {
+    return true;
+  }
+
+  auto source_slot = GetCurrentSlot();
+  if (target_slot == source_slot) {
+    LOG(ERROR) << "Cannot call InitPartitionMetadata on current slot.";
+    return false;
+  }
+
+  // Although the current build supports dynamic partitions, the given payload
+  // doesn't use it for target partitions. This could happen when applying a
+  // retrofit update. Skip updating the partition metadata for the target slot.
+  is_target_dynamic_ = !partition_metadata.groups.empty();
+  if (!is_target_dynamic_) {
+    return true;
+  }
+
+  if (!update_metadata) {
+    return true;
+  }
+
+  string target_suffix;
+  if (!GetSuffix(target_slot, &target_suffix)) {
+    return false;
+  }
+
+  // Unmap all the target dynamic partitions because they would become
+  // inconsistent with the new metadata.
+  if (!UnmapTargetPartitions(
+          dynamic_control_.get(), target_suffix, partition_metadata)) {
+    return false;
+  }
+
+  return UpdatePartitionMetadata(dynamic_control_.get(),
+                                 source_slot,
+                                 target_slot,
+                                 target_suffix,
+                                 partition_metadata);
+}
+
 }  // namespace chromeos_update_engine
diff --git a/boot_control_android.h b/boot_control_android.h
index 1de0e41..a6f33be 100644
--- a/boot_control_android.h
+++ b/boot_control_android.h
@@ -17,11 +17,16 @@
 #ifndef UPDATE_ENGINE_BOOT_CONTROL_ANDROID_H_
 #define UPDATE_ENGINE_BOOT_CONTROL_ANDROID_H_
 
+#include <map>
+#include <memory>
 #include <string>
 
 #include <android/hardware/boot/1.0/IBootControl.h>
+#include <base/files/file_util.h>
+#include <liblp/builder.h>
 
 #include "update_engine/common/boot_control.h"
+#include "update_engine/dynamic_partition_control_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -46,9 +51,41 @@
   bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
   bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
   bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
+  bool InitPartitionMetadata(Slot slot,
+                             const PartitionMetadata& partition_metadata,
+                             bool update_metadata) override;
+  void Cleanup() override;
 
  private:
   ::android::sp<::android::hardware::boot::V1_0::IBootControl> module_;
+  std::unique_ptr<DynamicPartitionControlInterface> dynamic_control_;
+
+  friend class BootControlAndroidTest;
+
+  // Wrapper method of IBootControl::getSuffix().
+  bool GetSuffix(Slot slot, std::string* out) const;
+
+  enum class DynamicPartitionDeviceStatus {
+    SUCCESS,
+    ERROR,
+    TRY_STATIC,
+  };
+
+  DynamicPartitionDeviceStatus GetDynamicPartitionDevice(
+      const base::FilePath& device_dir,
+      const std::string& partition_name_suffix,
+      Slot slot,
+      std::string* device) const;
+
+  // Return true if |partition_name_suffix| is a block device of
+  // super partition metadata slot |slot|.
+  bool IsSuperBlockDevice(const base::FilePath& device_dir,
+                          Slot slot,
+                          const std::string& partition_name_suffix) const;
+
+  // Whether the target partitions should be loaded as dynamic partitions. Set
+  // by InitPartitionMetadata() per each update.
+  bool is_target_dynamic_{false};
 
   DISALLOW_COPY_AND_ASSIGN(BootControlAndroid);
 };
diff --git a/boot_control_android_unittest.cc b/boot_control_android_unittest.cc
new file mode 100644
index 0000000..bb9903e
--- /dev/null
+++ b/boot_control_android_unittest.cc
@@ -0,0 +1,853 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/boot_control_android.h"
+
+#include <set>
+#include <vector>
+
+#include <base/logging.h>
+#include <base/strings/string_util.h>
+#include <fs_mgr.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <libdm/dm.h>
+
+#include "update_engine/mock_boot_control_hal.h"
+#include "update_engine/mock_dynamic_partition_control.h"
+
+using android::dm::DmDeviceState;
+using android::fs_mgr::MetadataBuilder;
+using android::hardware::Void;
+using std::string;
+using testing::_;
+using testing::AnyNumber;
+using testing::Contains;
+using testing::Eq;
+using testing::Invoke;
+using testing::Key;
+using testing::MakeMatcher;
+using testing::Matcher;
+using testing::MatcherInterface;
+using testing::MatchResultListener;
+using testing::NiceMock;
+using testing::Not;
+using testing::Return;
+
+namespace chromeos_update_engine {
+
+constexpr const uint32_t kMaxNumSlots = 2;
+constexpr const char* kSlotSuffixes[kMaxNumSlots] = {"_a", "_b"};
+constexpr const char* kFakeDevicePath = "/fake/dev/path/";
+constexpr const char* kFakeDmDevicePath = "/fake/dm/dev/path/";
+constexpr const uint32_t kFakeMetadataSize = 65536;
+constexpr const char* kDefaultGroup = "foo";
+
+// A map describing the size of each partition.
+// "{name, size}"
+using PartitionSizes = std::map<string, uint64_t>;
+
+// "{name_a, size}"
+using PartitionSuffixSizes = std::map<string, uint64_t>;
+
+using PartitionMetadata = BootControlInterface::PartitionMetadata;
+
+// C++ standards do not allow uint64_t (aka unsigned long) to be the parameter
+// of user-defined literal operators.
+constexpr unsigned long long operator"" _MiB(unsigned long long x) {  // NOLINT
+  return x << 20;
+}
+constexpr unsigned long long operator"" _GiB(unsigned long long x) {  // NOLINT
+  return x << 30;
+}
+
+constexpr uint64_t kDefaultGroupSize = 5_GiB;
+// Super device size. 1 MiB for metadata.
+constexpr uint64_t kDefaultSuperSize = kDefaultGroupSize * 2 + 1_MiB;
+
+template <typename U, typename V>
+std::ostream& operator<<(std::ostream& os, const std::map<U, V>& param) {
+  os << "{";
+  bool first = true;
+  for (const auto& pair : param) {
+    if (!first)
+      os << ", ";
+    os << pair.first << ":" << pair.second;
+    first = false;
+  }
+  return os << "}";
+}
+
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const std::vector<T>& param) {
+  os << "[";
+  bool first = true;
+  for (const auto& e : param) {
+    if (!first)
+      os << ", ";
+    os << e;
+    first = false;
+  }
+  return os << "]";
+}
+
+std::ostream& operator<<(std::ostream& os,
+                         const PartitionMetadata::Partition& p) {
+  return os << "{" << p.name << ", " << p.size << "}";
+}
+
+std::ostream& operator<<(std::ostream& os, const PartitionMetadata::Group& g) {
+  return os << "{" << g.name << ", " << g.size << ", " << g.partitions << "}";
+}
+
+std::ostream& operator<<(std::ostream& os, const PartitionMetadata& m) {
+  return os << m.groups;
+}
+
+inline string GetDevice(const string& name) {
+  return kFakeDevicePath + name;
+}
+
+inline string GetDmDevice(const string& name) {
+  return kFakeDmDevicePath + name;
+}
+
+// TODO(elsk): fs_mgr_get_super_partition_name should be mocked.
+inline string GetSuperDevice(uint32_t slot) {
+  return GetDevice(fs_mgr_get_super_partition_name(slot));
+}
+
+struct TestParam {
+  uint32_t source;
+  uint32_t target;
+};
+std::ostream& operator<<(std::ostream& os, const TestParam& param) {
+  return os << "{source: " << param.source << ", target:" << param.target
+            << "}";
+}
+
+// To support legacy tests, auto-convert {name_a: size} map to
+// PartitionMetadata.
+PartitionMetadata partitionSuffixSizesToMetadata(
+    const PartitionSuffixSizes& partition_sizes) {
+  PartitionMetadata metadata;
+  for (const char* suffix : kSlotSuffixes) {
+    metadata.groups.push_back(
+        {string(kDefaultGroup) + suffix, kDefaultGroupSize, {}});
+  }
+  for (const auto& pair : partition_sizes) {
+    for (size_t suffix_idx = 0; suffix_idx < kMaxNumSlots; ++suffix_idx) {
+      if (base::EndsWith(pair.first,
+                         kSlotSuffixes[suffix_idx],
+                         base::CompareCase::SENSITIVE)) {
+        metadata.groups[suffix_idx].partitions.push_back(
+            {pair.first, pair.second});
+      }
+    }
+  }
+  return metadata;
+}
+
+// To support legacy tests, auto-convert {name: size} map to PartitionMetadata.
+PartitionMetadata partitionSizesToMetadata(
+    const PartitionSizes& partition_sizes) {
+  PartitionMetadata metadata;
+  metadata.groups.push_back({string{kDefaultGroup}, kDefaultGroupSize, {}});
+  for (const auto& pair : partition_sizes) {
+    metadata.groups[0].partitions.push_back({pair.first, pair.second});
+  }
+  return metadata;
+}
+
+std::unique_ptr<MetadataBuilder> NewFakeMetadata(
+    const PartitionMetadata& metadata) {
+  auto builder =
+      MetadataBuilder::New(kDefaultSuperSize, kFakeMetadataSize, kMaxNumSlots);
+  EXPECT_GE(builder->AllocatableSpace(), kDefaultGroupSize * 2);
+  EXPECT_NE(nullptr, builder);
+  if (builder == nullptr)
+    return nullptr;
+  for (const auto& group : metadata.groups) {
+    EXPECT_TRUE(builder->AddGroup(group.name, group.size));
+    for (const auto& partition : group.partitions) {
+      auto p = builder->AddPartition(partition.name, group.name, 0 /* attr */);
+      EXPECT_TRUE(p && builder->ResizePartition(p, partition.size));
+    }
+  }
+  return builder;
+}
+
+class MetadataMatcher : public MatcherInterface<MetadataBuilder*> {
+ public:
+  explicit MetadataMatcher(const PartitionSuffixSizes& partition_sizes)
+      : partition_metadata_(partitionSuffixSizesToMetadata(partition_sizes)) {}
+  explicit MetadataMatcher(const PartitionMetadata& partition_metadata)
+      : partition_metadata_(partition_metadata) {}
+
+  bool MatchAndExplain(MetadataBuilder* metadata,
+                       MatchResultListener* listener) const override {
+    bool success = true;
+    for (const auto& group : partition_metadata_.groups) {
+      for (const auto& partition : group.partitions) {
+        auto p = metadata->FindPartition(partition.name);
+        if (p == nullptr) {
+          if (!success)
+            *listener << "; ";
+          *listener << "No partition " << partition.name;
+          success = false;
+          continue;
+        }
+        if (p->size() != partition.size) {
+          if (!success)
+            *listener << "; ";
+          *listener << "Partition " << partition.name << " has size "
+                    << p->size() << ", expected " << partition.size;
+          success = false;
+        }
+        if (p->group_name() != group.name) {
+          if (!success)
+            *listener << "; ";
+          *listener << "Partition " << partition.name << " has group "
+                    << p->group_name() << ", expected " << group.name;
+          success = false;
+        }
+      }
+    }
+    return success;
+  }
+
+  void DescribeTo(std::ostream* os) const override {
+    *os << "expect: " << partition_metadata_;
+  }
+
+  void DescribeNegationTo(std::ostream* os) const override {
+    *os << "expect not: " << partition_metadata_;
+  }
+
+ private:
+  PartitionMetadata partition_metadata_;
+};
+
+inline Matcher<MetadataBuilder*> MetadataMatches(
+    const PartitionSuffixSizes& partition_sizes) {
+  return MakeMatcher(new MetadataMatcher(partition_sizes));
+}
+
+inline Matcher<MetadataBuilder*> MetadataMatches(
+    const PartitionMetadata& partition_metadata) {
+  return MakeMatcher(new MetadataMatcher(partition_metadata));
+}
+
+MATCHER_P(HasGroup, group, " has group " + group) {
+  auto groups = arg->ListGroups();
+  return std::find(groups.begin(), groups.end(), group) != groups.end();
+}
+
+class BootControlAndroidTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    // Fake init bootctl_
+    bootctl_.module_ = new NiceMock<MockBootControlHal>();
+    bootctl_.dynamic_control_ =
+        std::make_unique<NiceMock<MockDynamicPartitionControl>>();
+
+    ON_CALL(module(), getNumberSlots()).WillByDefault(Invoke([] {
+      return kMaxNumSlots;
+    }));
+    ON_CALL(module(), getSuffix(_, _))
+        .WillByDefault(Invoke([](auto slot, auto cb) {
+          EXPECT_LE(slot, kMaxNumSlots);
+          cb(slot < kMaxNumSlots ? kSlotSuffixes[slot] : "");
+          return Void();
+        }));
+
+    ON_CALL(dynamicControl(), IsDynamicPartitionsEnabled())
+        .WillByDefault(Return(true));
+    ON_CALL(dynamicControl(), IsDynamicPartitionsRetrofit())
+        .WillByDefault(Return(false));
+    ON_CALL(dynamicControl(), DeviceExists(_)).WillByDefault(Return(true));
+    ON_CALL(dynamicControl(), GetDeviceDir(_))
+        .WillByDefault(Invoke([](auto path) {
+          *path = kFakeDevicePath;
+          return true;
+        }));
+    ON_CALL(dynamicControl(), GetDmDevicePathByName(_, _))
+        .WillByDefault(Invoke([](auto partition_name_suffix, auto device) {
+          *device = GetDmDevice(partition_name_suffix);
+          return true;
+        }));
+  }
+
+  // Return the mocked HAL module.
+  NiceMock<MockBootControlHal>& module() {
+    return static_cast<NiceMock<MockBootControlHal>&>(*bootctl_.module_);
+  }
+
+  // Return the mocked DynamicPartitionControlInterface.
+  NiceMock<MockDynamicPartitionControl>& dynamicControl() {
+    return static_cast<NiceMock<MockDynamicPartitionControl>&>(
+        *bootctl_.dynamic_control_);
+  }
+
+  // Set the fake metadata to return when LoadMetadataBuilder is called on
+  // |slot|.
+  void SetMetadata(uint32_t slot, const PartitionSuffixSizes& sizes) {
+    SetMetadata(slot, partitionSuffixSizesToMetadata(sizes));
+  }
+
+  void SetMetadata(uint32_t slot, const PartitionMetadata& metadata) {
+    EXPECT_CALL(dynamicControl(),
+                LoadMetadataBuilder(GetSuperDevice(slot), slot, _))
+        .Times(AnyNumber())
+        .WillRepeatedly(Invoke([metadata](auto, auto, auto) {
+          return NewFakeMetadata(metadata);
+        }));
+  }
+
+  // Expect that UnmapPartitionOnDeviceMapper is called on target() metadata
+  // slot with each partition in |partitions|.
+  void ExpectUnmap(const std::set<string>& partitions) {
+    // Error when UnmapPartitionOnDeviceMapper is called on unknown arguments.
+    ON_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_, _))
+        .WillByDefault(Return(false));
+
+    for (const auto& partition : partitions) {
+      EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(partition, _))
+          .WillOnce(Invoke([this](auto partition, auto) {
+            mapped_devices_.erase(partition);
+            return true;
+          }));
+    }
+  }
+
+  void ExpectDevicesAreMapped(const std::set<string>& partitions) {
+    ASSERT_EQ(partitions.size(), mapped_devices_.size());
+    for (const auto& partition : partitions) {
+      EXPECT_THAT(mapped_devices_, Contains(Key(Eq(partition))))
+          << "Expect that " << partition << " is mapped, but it is not.";
+    }
+  }
+
+  void ExpectStoreMetadata(const PartitionSuffixSizes& partition_sizes) {
+    ExpectStoreMetadataMatch(MetadataMatches(partition_sizes));
+  }
+
+  virtual void ExpectStoreMetadataMatch(
+      const Matcher<MetadataBuilder*>& matcher) {
+    EXPECT_CALL(dynamicControl(),
+                StoreMetadata(GetSuperDevice(target()), matcher, target()))
+        .WillOnce(Return(true));
+  }
+
+  uint32_t source() { return slots_.source; }
+
+  uint32_t target() { return slots_.target; }
+
+  // Return partition names with suffix of source().
+  string S(const string& name) { return name + kSlotSuffixes[source()]; }
+
+  // Return partition names with suffix of target().
+  string T(const string& name) { return name + kSlotSuffixes[target()]; }
+
+  // Set source and target slots to use before testing.
+  void SetSlots(const TestParam& slots) {
+    slots_ = slots;
+
+    ON_CALL(module(), getCurrentSlot()).WillByDefault(Invoke([this] {
+      return source();
+    }));
+    // Should not store metadata to source slot.
+    EXPECT_CALL(dynamicControl(),
+                StoreMetadata(GetSuperDevice(source()), _, source()))
+        .Times(0);
+    // Should not load metadata from target slot.
+    EXPECT_CALL(dynamicControl(),
+                LoadMetadataBuilder(GetSuperDevice(target()), target(), _))
+        .Times(0);
+  }
+
+  bool InitPartitionMetadata(uint32_t slot,
+                             PartitionSizes partition_sizes,
+                             bool update_metadata = true) {
+    auto m = partitionSizesToMetadata(partition_sizes);
+    LOG(INFO) << m;
+    return bootctl_.InitPartitionMetadata(slot, m, update_metadata);
+  }
+
+  BootControlAndroid bootctl_;  // BootControlAndroid under test.
+  TestParam slots_;
+  // mapped devices through MapPartitionOnDeviceMapper.
+  std::map<string, string> mapped_devices_;
+};
+
+class BootControlAndroidTestP
+    : public BootControlAndroidTest,
+      public ::testing::WithParamInterface<TestParam> {
+ public:
+  void SetUp() override {
+    BootControlAndroidTest::SetUp();
+    SetSlots(GetParam());
+  }
+};
+
+// Test resize case. Grow if target metadata contains a partition with a size
+// less than expected.
+TEST_P(BootControlAndroidTestP, NeedGrowIfSizeNotMatchWhenResizing) {
+  SetMetadata(source(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 2_GiB},
+               {T("vendor"), 1_GiB}});
+  ExpectStoreMetadata({{S("system"), 2_GiB},
+                       {S("vendor"), 1_GiB},
+                       {T("system"), 3_GiB},
+                       {T("vendor"), 1_GiB}});
+  ExpectUnmap({T("system"), T("vendor")});
+
+  EXPECT_TRUE(
+      InitPartitionMetadata(target(), {{"system", 3_GiB}, {"vendor", 1_GiB}}));
+}
+
+// Test resize case. Shrink if target metadata contains a partition with a size
+// greater than expected.
+TEST_P(BootControlAndroidTestP, NeedShrinkIfSizeNotMatchWhenResizing) {
+  SetMetadata(source(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 2_GiB},
+               {T("vendor"), 1_GiB}});
+  ExpectStoreMetadata({{S("system"), 2_GiB},
+                       {S("vendor"), 1_GiB},
+                       {T("system"), 2_GiB},
+                       {T("vendor"), 150_MiB}});
+  ExpectUnmap({T("system"), T("vendor")});
+
+  EXPECT_TRUE(InitPartitionMetadata(target(),
+                                    {{"system", 2_GiB}, {"vendor", 150_MiB}}));
+}
+
+// Test adding partitions on the first run.
+TEST_P(BootControlAndroidTestP, AddPartitionToEmptyMetadata) {
+  SetMetadata(source(), PartitionSuffixSizes{});
+  ExpectStoreMetadata({{T("system"), 2_GiB}, {T("vendor"), 1_GiB}});
+  ExpectUnmap({T("system"), T("vendor")});
+
+  EXPECT_TRUE(
+      InitPartitionMetadata(target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}));
+}
+
+// Test subsequent add case.
+TEST_P(BootControlAndroidTestP, AddAdditionalPartition) {
+  SetMetadata(source(), {{S("system"), 2_GiB}, {T("system"), 2_GiB}});
+  ExpectStoreMetadata(
+      {{S("system"), 2_GiB}, {T("system"), 2_GiB}, {T("vendor"), 1_GiB}});
+  ExpectUnmap({T("system"), T("vendor")});
+
+  EXPECT_TRUE(
+      InitPartitionMetadata(target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}));
+}
+
+// Test delete one partition.
+TEST_P(BootControlAndroidTestP, DeletePartition) {
+  SetMetadata(source(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 2_GiB},
+               {T("vendor"), 1_GiB}});
+  // No T("vendor")
+  ExpectStoreMetadata(
+      {{S("system"), 2_GiB}, {S("vendor"), 1_GiB}, {T("system"), 2_GiB}});
+  ExpectUnmap({T("system")});
+
+  EXPECT_TRUE(InitPartitionMetadata(target(), {{"system", 2_GiB}}));
+}
+
+// Test delete all partitions.
+TEST_P(BootControlAndroidTestP, DeleteAll) {
+  SetMetadata(source(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 2_GiB},
+               {T("vendor"), 1_GiB}});
+  ExpectStoreMetadata({{S("system"), 2_GiB}, {S("vendor"), 1_GiB}});
+
+  EXPECT_TRUE(InitPartitionMetadata(target(), {}));
+}
+
+// Test corrupt source metadata case.
+TEST_P(BootControlAndroidTestP, CorruptedSourceMetadata) {
+  EXPECT_CALL(dynamicControl(),
+              LoadMetadataBuilder(GetSuperDevice(source()), source(), _))
+      .WillOnce(Invoke([](auto, auto, auto) { return nullptr; }));
+  ExpectUnmap({T("system")});
+
+  EXPECT_FALSE(InitPartitionMetadata(target(), {{"system", 1_GiB}}))
+      << "Should not be able to continue with corrupt source metadata";
+}
+
+// Test that InitPartitionMetadata fail if there is not enough space on the
+// device.
+TEST_P(BootControlAndroidTestP, NotEnoughSpace) {
+  SetMetadata(source(),
+              {{S("system"), 3_GiB},
+               {S("vendor"), 2_GiB},
+               {T("system"), 0},
+               {T("vendor"), 0}});
+  EXPECT_FALSE(
+      InitPartitionMetadata(target(), {{"system", 3_GiB}, {"vendor", 3_GiB}}))
+      << "Should not be able to fit 11GiB data into 10GiB space";
+}
+
+TEST_P(BootControlAndroidTestP, NotEnoughSpaceForSlot) {
+  SetMetadata(source(),
+              {{S("system"), 1_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 0},
+               {T("vendor"), 0}});
+  EXPECT_FALSE(
+      InitPartitionMetadata(target(), {{"system", 3_GiB}, {"vendor", 3_GiB}}))
+      << "Should not be able to grow over size of super / 2";
+}
+
+// Test applying retrofit update on a build with dynamic partitions enabled.
+TEST_P(BootControlAndroidTestP,
+       ApplyRetrofitUpdateOnDynamicPartitionsEnabledBuild) {
+  SetMetadata(source(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 2_GiB},
+               {T("vendor"), 1_GiB}});
+  // Should not try to unmap any target partition.
+  EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_, _)).Times(0);
+  // Should not store metadata to target slot.
+  EXPECT_CALL(dynamicControl(),
+              StoreMetadata(GetSuperDevice(target()), _, target()))
+      .Times(0);
+
+  // Not calling through BootControlAndroidTest::InitPartitionMetadata(), since
+  // we don't want any default group in the PartitionMetadata.
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(target(), {}, true));
+
+  // Should use dynamic source partitions.
+  EXPECT_CALL(dynamicControl(), GetState(S("system")))
+      .Times(1)
+      .WillOnce(Return(DmDeviceState::ACTIVE));
+  string system_device;
+  EXPECT_TRUE(bootctl_.GetPartitionDevice("system", source(), &system_device));
+  EXPECT_EQ(GetDmDevice(S("system")), system_device);
+
+  // Should use static target partitions without querying dynamic control.
+  EXPECT_CALL(dynamicControl(), GetState(T("system"))).Times(0);
+  EXPECT_TRUE(bootctl_.GetPartitionDevice("system", target(), &system_device));
+  EXPECT_EQ(GetDevice(T("system")), system_device);
+
+  // Static partition "bar".
+  EXPECT_CALL(dynamicControl(), GetState(S("bar"))).Times(0);
+  std::string bar_device;
+  EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", source(), &bar_device));
+  EXPECT_EQ(GetDevice(S("bar")), bar_device);
+
+  EXPECT_CALL(dynamicControl(), GetState(T("bar"))).Times(0);
+  EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", target(), &bar_device));
+  EXPECT_EQ(GetDevice(T("bar")), bar_device);
+}
+
+TEST_P(BootControlAndroidTestP, GetPartitionDeviceWhenResumingUpdate) {
+  // Both of the two slots contain valid partition metadata, since this is
+  // resuming an update.
+  SetMetadata(source(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 2_GiB},
+               {T("vendor"), 1_GiB}});
+  SetMetadata(target(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 2_GiB},
+               {T("vendor"), 1_GiB}});
+  EXPECT_CALL(dynamicControl(),
+              StoreMetadata(GetSuperDevice(target()), _, target()))
+      .Times(0);
+  EXPECT_TRUE(InitPartitionMetadata(
+      target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}, false));
+
+  // Dynamic partition "system".
+  EXPECT_CALL(dynamicControl(), GetState(S("system")))
+      .Times(1)
+      .WillOnce(Return(DmDeviceState::ACTIVE));
+  string system_device;
+  EXPECT_TRUE(bootctl_.GetPartitionDevice("system", source(), &system_device));
+  EXPECT_EQ(GetDmDevice(S("system")), system_device);
+
+  EXPECT_CALL(dynamicControl(), GetState(T("system")))
+      .Times(1)
+      .WillOnce(Return(DmDeviceState::ACTIVE));
+  EXPECT_TRUE(bootctl_.GetPartitionDevice("system", target(), &system_device));
+  EXPECT_EQ(GetDmDevice(T("system")), system_device);
+
+  // Static partition "bar".
+  EXPECT_CALL(dynamicControl(), GetState(S("bar"))).Times(0);
+  std::string bar_device;
+  EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", source(), &bar_device));
+  EXPECT_EQ(GetDevice(S("bar")), bar_device);
+
+  EXPECT_CALL(dynamicControl(), GetState(T("bar"))).Times(0);
+  EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", target(), &bar_device));
+  EXPECT_EQ(GetDevice(T("bar")), bar_device);
+}
+
+INSTANTIATE_TEST_CASE_P(BootControlAndroidTest,
+                        BootControlAndroidTestP,
+                        testing::Values(TestParam{0, 1}, TestParam{1, 0}));
+
+const PartitionSuffixSizes update_sizes_0() {
+  // Initial state is 0 for "other" slot.
+  return {
+      {"grown_a", 2_GiB},
+      {"shrunk_a", 1_GiB},
+      {"same_a", 100_MiB},
+      {"deleted_a", 150_MiB},
+      // no added_a
+      {"grown_b", 200_MiB},
+      // simulate system_other
+      {"shrunk_b", 0},
+      {"same_b", 0},
+      {"deleted_b", 0},
+      // no added_b
+  };
+}
+
+const PartitionSuffixSizes update_sizes_1() {
+  return {
+      {"grown_a", 2_GiB},
+      {"shrunk_a", 1_GiB},
+      {"same_a", 100_MiB},
+      {"deleted_a", 150_MiB},
+      // no added_a
+      {"grown_b", 3_GiB},
+      {"shrunk_b", 150_MiB},
+      {"same_b", 100_MiB},
+      {"added_b", 150_MiB},
+      // no deleted_b
+  };
+}
+
+const PartitionSuffixSizes update_sizes_2() {
+  return {
+      {"grown_a", 4_GiB},
+      {"shrunk_a", 100_MiB},
+      {"same_a", 100_MiB},
+      {"deleted_a", 64_MiB},
+      // no added_a
+      {"grown_b", 3_GiB},
+      {"shrunk_b", 150_MiB},
+      {"same_b", 100_MiB},
+      {"added_b", 150_MiB},
+      // no deleted_b
+  };
+}
+
+// Test case for first update after the device is manufactured, in which
+// case the "other" slot is likely of size "0" (except system, which is
+// non-zero because of system_other partition)
+TEST_F(BootControlAndroidTest, SimulatedFirstUpdate) {
+  SetSlots({0, 1});
+
+  SetMetadata(source(), update_sizes_0());
+  SetMetadata(target(), update_sizes_0());
+  ExpectStoreMetadata(update_sizes_1());
+  ExpectUnmap({"grown_b", "shrunk_b", "same_b", "added_b"});
+
+  EXPECT_TRUE(InitPartitionMetadata(target(),
+                                    {{"grown", 3_GiB},
+                                     {"shrunk", 150_MiB},
+                                     {"same", 100_MiB},
+                                     {"added", 150_MiB}}));
+}
+
+// After first update, test for the second update. In the second update, the
+// "added" partition is deleted and "deleted" partition is re-added.
+TEST_F(BootControlAndroidTest, SimulatedSecondUpdate) {
+  SetSlots({1, 0});
+
+  SetMetadata(source(), update_sizes_1());
+  SetMetadata(target(), update_sizes_0());
+
+  ExpectStoreMetadata(update_sizes_2());
+  ExpectUnmap({"grown_a", "shrunk_a", "same_a", "deleted_a"});
+
+  EXPECT_TRUE(InitPartitionMetadata(target(),
+                                    {{"grown", 4_GiB},
+                                     {"shrunk", 100_MiB},
+                                     {"same", 100_MiB},
+                                     {"deleted", 64_MiB}}));
+}
+
+TEST_F(BootControlAndroidTest, ApplyingToCurrentSlot) {
+  SetSlots({1, 1});
+  EXPECT_FALSE(InitPartitionMetadata(target(), {}))
+      << "Should not be able to apply to current slot.";
+}
+
+class BootControlAndroidGroupTestP : public BootControlAndroidTestP {
+ public:
+  void SetUp() override {
+    BootControlAndroidTestP::SetUp();
+    SetMetadata(
+        source(),
+        {.groups = {SimpleGroup(S("android"), 3_GiB, S("system"), 2_GiB),
+                    SimpleGroup(S("oem"), 2_GiB, S("vendor"), 1_GiB),
+                    SimpleGroup(T("android"), 3_GiB, T("system"), 0),
+                    SimpleGroup(T("oem"), 2_GiB, T("vendor"), 0)}});
+  }
+
+  // Return a simple group with only one partition.
+  PartitionMetadata::Group SimpleGroup(const string& group,
+                                       uint64_t group_size,
+                                       const string& partition,
+                                       uint64_t partition_size) {
+    return {.name = group,
+            .size = group_size,
+            .partitions = {{.name = partition, .size = partition_size}}};
+  }
+
+  void ExpectStoreMetadata(const PartitionMetadata& partition_metadata) {
+    ExpectStoreMetadataMatch(MetadataMatches(partition_metadata));
+  }
+
+  // Expect that target slot is stored with target groups.
+  void ExpectStoreMetadataMatch(
+      const Matcher<MetadataBuilder*>& matcher) override {
+    BootControlAndroidTestP::ExpectStoreMetadataMatch(AllOf(
+        MetadataMatches(PartitionMetadata{
+            .groups = {SimpleGroup(S("android"), 3_GiB, S("system"), 2_GiB),
+                       SimpleGroup(S("oem"), 2_GiB, S("vendor"), 1_GiB)}}),
+        matcher));
+  }
+};
+
+// Allow to resize within group.
+TEST_P(BootControlAndroidGroupTestP, ResizeWithinGroup) {
+  ExpectStoreMetadata(PartitionMetadata{
+      .groups = {SimpleGroup(T("android"), 3_GiB, T("system"), 3_GiB),
+                 SimpleGroup(T("oem"), 2_GiB, T("vendor"), 2_GiB)}});
+  ExpectUnmap({T("system"), T("vendor")});
+
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+      target(),
+      PartitionMetadata{
+          .groups = {SimpleGroup("android", 3_GiB, "system", 3_GiB),
+                     SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}},
+      true));
+}
+
+TEST_P(BootControlAndroidGroupTestP, NotEnoughSpaceForGroup) {
+  EXPECT_FALSE(bootctl_.InitPartitionMetadata(
+      target(),
+      PartitionMetadata{
+          .groups = {SimpleGroup("android", 3_GiB, "system", 1_GiB),
+                     SimpleGroup("oem", 2_GiB, "vendor", 3_GiB)}},
+      true))
+      << "Should not be able to grow over maximum size of group";
+}
+
+TEST_P(BootControlAndroidGroupTestP, GroupTooBig) {
+  EXPECT_FALSE(bootctl_.InitPartitionMetadata(
+      target(),
+      PartitionMetadata{.groups = {{.name = "android", .size = 3_GiB},
+                                   {.name = "oem", .size = 3_GiB}}},
+      true))
+      << "Should not be able to grow over size of super / 2";
+}
+
+TEST_P(BootControlAndroidGroupTestP, AddPartitionToGroup) {
+  ExpectStoreMetadata(PartitionMetadata{
+      .groups = {
+          {.name = T("android"),
+           .size = 3_GiB,
+           .partitions = {{.name = T("system"), .size = 2_GiB},
+                          {.name = T("product_services"), .size = 1_GiB}}}}});
+  ExpectUnmap({T("system"), T("vendor"), T("product_services")});
+
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+      target(),
+      PartitionMetadata{
+          .groups = {{.name = "android",
+                      .size = 3_GiB,
+                      .partitions = {{.name = "system", .size = 2_GiB},
+                                     {.name = "product_services",
+                                      .size = 1_GiB}}},
+                     SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}},
+      true));
+}
+
+TEST_P(BootControlAndroidGroupTestP, RemovePartitionFromGroup) {
+  ExpectStoreMetadata(PartitionMetadata{
+      .groups = {{.name = T("android"), .size = 3_GiB, .partitions = {}}}});
+  ExpectUnmap({T("vendor")});
+
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+      target(),
+      PartitionMetadata{
+          .groups = {{.name = "android", .size = 3_GiB, .partitions = {}},
+                     SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}},
+      true));
+}
+
+TEST_P(BootControlAndroidGroupTestP, AddGroup) {
+  ExpectStoreMetadata(PartitionMetadata{
+      .groups = {
+          SimpleGroup(T("new_group"), 2_GiB, T("new_partition"), 2_GiB)}});
+  ExpectUnmap({T("system"), T("vendor"), T("new_partition")});
+
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+      target(),
+      PartitionMetadata{
+          .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB),
+                     SimpleGroup("oem", 1_GiB, "vendor", 1_GiB),
+                     SimpleGroup("new_group", 2_GiB, "new_partition", 2_GiB)}},
+      true));
+}
+
+TEST_P(BootControlAndroidGroupTestP, RemoveGroup) {
+  ExpectStoreMetadataMatch(Not(HasGroup(T("oem"))));
+  ExpectUnmap({T("system")});
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+      target(),
+      PartitionMetadata{
+          .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB)}},
+      true));
+}
+
+TEST_P(BootControlAndroidGroupTestP, ResizeGroup) {
+  ExpectStoreMetadata(PartitionMetadata{
+      .groups = {SimpleGroup(T("android"), 2_GiB, T("system"), 2_GiB),
+                 SimpleGroup(T("oem"), 3_GiB, T("vendor"), 3_GiB)}});
+  ExpectUnmap({T("system"), T("vendor")});
+
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+      target(),
+      PartitionMetadata{
+          .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB),
+                     SimpleGroup("oem", 3_GiB, "vendor", 3_GiB)}},
+      true));
+}
+
+INSTANTIATE_TEST_CASE_P(BootControlAndroidTest,
+                        BootControlAndroidGroupTestP,
+                        testing::Values(TestParam{0, 1}, TestParam{1, 0}));
+
+}  // namespace chromeos_update_engine
diff --git a/boot_control_chromeos.cc b/boot_control_chromeos.cc
index aa94d3c..ccba316 100644
--- a/boot_control_chromeos.cc
+++ b/boot_control_chromeos.cc
@@ -16,7 +16,9 @@
 
 #include "update_engine/boot_control_chromeos.h"
 
+#include <memory>
 #include <string>
+#include <utility>
 
 #include <base/bind.h>
 #include <base/files/file_path.h>
@@ -41,6 +43,12 @@
 const char* kAndroidPartitionNameKernel = "boot";
 const char* kAndroidPartitionNameRoot = "system";
 
+const char kDlcInstallRootDirectoryEncrypted[] = "/home/chronos/dlc";
+const char kPartitionNamePrefixDlc[] = "dlc_";
+const char kPartitionNameDlcA[] = "dlc_a";
+const char kPartitionNameDlcB[] = "dlc_b";
+const char kPartitionNameDlcImage[] = "dlc.img";
+
 // Returns the currently booted rootfs partition. "/dev/sda3", for example.
 string GetBootDevice() {
   char boot_path[PATH_MAX];
@@ -116,9 +124,8 @@
   }
   if (current_slot_ >= num_slots_) {
     LOG(ERROR) << "Couldn't find the slot number corresponding to the "
-                  "partition " << boot_device
-               << ", number of slots: " << num_slots_
-               << ". This device is not updateable.";
+               << "partition " << boot_device << ", number of slots: "
+               << num_slots_ << ". This device is not updateable.";
     num_slots_ = 1;
     current_slot_ = BootControlInterface::kInvalidSlot;
     return false;
@@ -141,6 +148,26 @@
 bool BootControlChromeOS::GetPartitionDevice(const string& partition_name,
                                              unsigned int slot,
                                              string* device) const {
+  // Partition name prefixed with |kPartitionNamePrefixDlc| is a DLC module.
+  if (base::StartsWith(partition_name,
+                       kPartitionNamePrefixDlc,
+                       base::CompareCase::SENSITIVE)) {
+    // Extract DLC module ID from partition_name (DLC module ID is the string
+    // after |kPartitionNamePrefixDlc| in partition_name).
+    const auto dlc_module_id =
+        partition_name.substr(strlen(kPartitionNamePrefixDlc));
+    if (dlc_module_id.empty()) {
+      LOG(ERROR) << " partition name does not contain DLC module ID:"
+                 << partition_name;
+      return false;
+    }
+    *device = base::FilePath(kDlcInstallRootDirectoryEncrypted)
+                  .Append(dlc_module_id)
+                  .Append(slot == 0 ? kPartitionNameDlcA : kPartitionNameDlcB)
+                  .Append(kPartitionNameDlcImage)
+                  .value();
+    return true;
+  }
   int partition_num = GetPartitionNumber(partition_name, slot);
   if (partition_num < 0)
     return false;
@@ -275,8 +302,7 @@
 }
 
 int BootControlChromeOS::GetPartitionNumber(
-    const string partition_name,
-    BootControlInterface::Slot slot) const {
+    const string partition_name, BootControlInterface::Slot slot) const {
   if (slot >= num_slots_) {
     LOG(ERROR) << "Invalid slot number: " << slot << ", we only have "
                << num_slots_ << " slot(s)";
@@ -300,4 +326,13 @@
   return -1;
 }
 
+bool BootControlChromeOS::InitPartitionMetadata(
+    Slot slot,
+    const PartitionMetadata& partition_metadata,
+    bool update_metadata) {
+  return true;
+}
+
+void BootControlChromeOS::Cleanup() {}
+
 }  // namespace chromeos_update_engine
diff --git a/boot_control_chromeos.h b/boot_control_chromeos.h
index a1d57fe..f3682e9 100644
--- a/boot_control_chromeos.h
+++ b/boot_control_chromeos.h
@@ -50,6 +50,10 @@
   bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
   bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
   bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
+  bool InitPartitionMetadata(Slot slot,
+                             const PartitionMetadata& partition_metadata,
+                             bool update_metadata) override;
+  void Cleanup() override;
 
  private:
   friend class BootControlChromeOSTest;
diff --git a/boot_control_recovery.cc b/boot_control_recovery.cc
deleted file mode 100644
index b74f4aa..0000000
--- a/boot_control_recovery.cc
+++ /dev/null
@@ -1,181 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/boot_control_recovery.h"
-
-#include <base/bind.h>
-#include <base/files/file_util.h>
-#include <base/logging.h>
-#include <base/strings/string_util.h>
-#include <brillo/message_loops/message_loop.h>
-
-#include "update_engine/common/utils.h"
-#include "update_engine/utils_android.h"
-
-using std::string;
-
-#ifndef _UE_SIDELOAD
-#error "BootControlRecovery should only be used for update_engine_sideload."
-#endif
-
-// When called from update_engine_sideload, we don't attempt to dynamically load
-// the right boot_control HAL, instead we use the only HAL statically linked in
-// via the PRODUCT_STATIC_BOOT_CONTROL_HAL make variable and access the module
-// struct directly.
-extern const hw_module_t HAL_MODULE_INFO_SYM;
-
-namespace chromeos_update_engine {
-
-namespace boot_control {
-
-// Factory defined in boot_control.h.
-std::unique_ptr<BootControlInterface> CreateBootControl() {
-  std::unique_ptr<BootControlRecovery> boot_control(new BootControlRecovery());
-  if (!boot_control->Init()) {
-    return nullptr;
-  }
-  return std::move(boot_control);
-}
-
-}  // namespace boot_control
-
-bool BootControlRecovery::Init() {
-  const hw_module_t* hw_module;
-  int ret;
-
-  // For update_engine_sideload, we simulate the hw_get_module() by accessing it
-  // from the current process directly.
-  hw_module = &HAL_MODULE_INFO_SYM;
-  ret = 0;
-  if (!hw_module ||
-      strcmp(BOOT_CONTROL_HARDWARE_MODULE_ID, hw_module->id) != 0) {
-    ret = -EINVAL;
-  }
-  if (ret != 0) {
-    LOG(ERROR) << "Error loading boot_control HAL implementation.";
-    return false;
-  }
-
-  module_ = reinterpret_cast<boot_control_module_t*>(
-      const_cast<hw_module_t*>(hw_module));
-  module_->init(module_);
-
-  LOG(INFO) << "Loaded boot_control HAL "
-            << "'" << hw_module->name << "' "
-            << "version " << (hw_module->module_api_version >> 8) << "."
-            << (hw_module->module_api_version & 0xff) << " "
-            << "authored by '" << hw_module->author << "'.";
-  return true;
-}
-
-unsigned int BootControlRecovery::GetNumSlots() const {
-  return module_->getNumberSlots(module_);
-}
-
-BootControlInterface::Slot BootControlRecovery::GetCurrentSlot() const {
-  return module_->getCurrentSlot(module_);
-}
-
-bool BootControlRecovery::GetPartitionDevice(const string& partition_name,
-                                             Slot slot,
-                                             string* device) const {
-  // We can't use fs_mgr to look up |partition_name| because fstab
-  // doesn't list every slot partition (it uses the slotselect option
-  // to mask the suffix).
-  //
-  // We can however assume that there's an entry for the /misc mount
-  // point and use that to get the device file for the misc
-  // partition. This helps us locate the disk that |partition_name|
-  // resides on. From there we'll assume that a by-name scheme is used
-  // so we can just replace the trailing "misc" by the given
-  // |partition_name| and suffix corresponding to |slot|, e.g.
-  //
-  //   /dev/block/platform/soc.0/7824900.sdhci/by-name/misc ->
-  //   /dev/block/platform/soc.0/7824900.sdhci/by-name/boot_a
-  //
-  // If needed, it's possible to relax the by-name assumption in the
-  // future by trawling /sys/block looking for the appropriate sibling
-  // of misc and then finding an entry in /dev matching the sysfs
-  // entry.
-
-  base::FilePath misc_device;
-  if (!utils::DeviceForMountPoint("/misc", &misc_device))
-    return false;
-
-  if (!utils::IsSymlink(misc_device.value().c_str())) {
-    LOG(ERROR) << "Device file " << misc_device.value() << " for /misc "
-               << "is not a symlink.";
-    return false;
-  }
-
-  const char* suffix = module_->getSuffix(module_, slot);
-  if (suffix == nullptr) {
-    LOG(ERROR) << "boot_control impl returned no suffix for slot "
-               << SlotName(slot);
-    return false;
-  }
-
-  base::FilePath path = misc_device.DirName().Append(partition_name + suffix);
-  if (!base::PathExists(path)) {
-    LOG(ERROR) << "Device file " << path.value() << " does not exist.";
-    return false;
-  }
-
-  *device = path.value();
-  return true;
-}
-
-bool BootControlRecovery::IsSlotBootable(Slot slot) const {
-  int ret = module_->isSlotBootable(module_, slot);
-  if (ret < 0) {
-    LOG(ERROR) << "Unable to determine if slot " << SlotName(slot)
-               << " is bootable: " << strerror(-ret);
-    return false;
-  }
-  return ret == 1;
-}
-
-bool BootControlRecovery::MarkSlotUnbootable(Slot slot) {
-  int ret = module_->setSlotAsUnbootable(module_, slot);
-  if (ret < 0) {
-    LOG(ERROR) << "Unable to mark slot " << SlotName(slot)
-               << " as bootable: " << strerror(-ret);
-    return false;
-  }
-  return ret == 0;
-}
-
-bool BootControlRecovery::SetActiveBootSlot(Slot slot) {
-  int ret = module_->setActiveBootSlot(module_, slot);
-  if (ret < 0) {
-    LOG(ERROR) << "Unable to set the active slot to slot " << SlotName(slot)
-               << ": " << strerror(-ret);
-  }
-  return ret == 0;
-}
-
-bool BootControlRecovery::MarkBootSuccessfulAsync(
-    base::Callback<void(bool)> callback) {
-  int ret = module_->markBootSuccessful(module_);
-  if (ret < 0) {
-    LOG(ERROR) << "Unable to mark boot successful: " << strerror(-ret);
-  }
-  return brillo::MessageLoop::current()->PostTask(
-             FROM_HERE, base::Bind(callback, ret == 0)) !=
-         brillo::MessageLoop::kTaskIdNull;
-}
-
-}  // namespace chromeos_update_engine
diff --git a/boot_control_recovery.h b/boot_control_recovery.h
deleted file mode 100644
index 3a83caa..0000000
--- a/boot_control_recovery.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_BOOT_CONTROL_RECOVERY_H_
-#define UPDATE_ENGINE_BOOT_CONTROL_RECOVERY_H_
-
-#include <string>
-
-#include <hardware/boot_control.h>
-#include <hardware/hardware.h>
-
-#include "update_engine/common/boot_control.h"
-
-namespace chromeos_update_engine {
-
-// The Android recovery implementation of the BootControlInterface. This
-// implementation uses the legacy libhardware's boot_control HAL to access the
-// bootloader by linking against it statically. This should only be used in
-// recovery.
-class BootControlRecovery : public BootControlInterface {
- public:
-  BootControlRecovery() = default;
-  ~BootControlRecovery() = default;
-
-  // Load boot_control HAL implementation using libhardware and
-  // initializes it. Returns false if an error occurred.
-  bool Init();
-
-  // BootControlInterface overrides.
-  unsigned int GetNumSlots() const override;
-  BootControlInterface::Slot GetCurrentSlot() const override;
-  bool GetPartitionDevice(const std::string& partition_name,
-                          BootControlInterface::Slot slot,
-                          std::string* device) const override;
-  bool IsSlotBootable(BootControlInterface::Slot slot) const override;
-  bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
-  bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
-  bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
-
- private:
-  // NOTE: There is no way to release/unload HAL implementations so
-  // this is essentially leaked on object destruction.
-  boot_control_module_t* module_;
-
-  DISALLOW_COPY_AND_ASSIGN(BootControlRecovery);
-};
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_BOOT_CONTROL_RECOVERY_H_
diff --git a/certificate_checker.cc b/certificate_checker.cc
index 6e886e7..938c66f 100644
--- a/certificate_checker.cc
+++ b/certificate_checker.cc
@@ -59,8 +59,7 @@
 
 CertificateChecker::CertificateChecker(PrefsInterface* prefs,
                                        OpenSSLWrapper* openssl_wrapper)
-    : prefs_(prefs), openssl_wrapper_(openssl_wrapper) {
-}
+    : prefs_(prefs), openssl_wrapper_(openssl_wrapper) {}
 
 CertificateChecker::~CertificateChecker() {
   if (cert_checker_singleton_ == this)
@@ -128,7 +127,9 @@
                                           ServerToCheck server_to_check) {
   CHECK(cert_checker_singleton_ != nullptr);
   return cert_checker_singleton_->CheckCertificateChange(
-      preverify_ok, x509_ctx, server_to_check) ? 1 : 0;
+             preverify_ok, x509_ctx, server_to_check)
+             ? 1
+             : 0;
 }
 
 bool CertificateChecker::CheckCertificateChange(int preverify_ok,
@@ -147,10 +148,8 @@
   unsigned int digest_length;
   uint8_t digest[EVP_MAX_MD_SIZE];
 
-  if (!openssl_wrapper_->GetCertificateDigest(x509_ctx,
-                                              &depth,
-                                              &digest_length,
-                                              digest)) {
+  if (!openssl_wrapper_->GetCertificateDigest(
+          x509_ctx, &depth, &digest_length, digest)) {
     LOG(WARNING) << "Failed to generate digest of X509 certificate "
                  << "from update server.";
     NotifyCertificateChecked(server_to_check, CertificateCheckResult::kValid);
@@ -161,9 +160,10 @@
   // prefs.
   string digest_string = base::HexEncode(digest, digest_length);
 
-  string storage_key =
-      base::StringPrintf("%s-%d-%d", kPrefsUpdateServerCertificate,
-                         static_cast<int>(server_to_check), depth);
+  string storage_key = base::StringPrintf("%s-%d-%d",
+                                          kPrefsUpdateServerCertificate,
+                                          static_cast<int>(server_to_check),
+                                          depth);
   string stored_digest;
   // If there's no stored certificate, we just store the current one and return.
   if (!prefs_->GetString(storage_key, &stored_digest)) {
@@ -195,8 +195,7 @@
 }
 
 void CertificateChecker::NotifyCertificateChecked(
-    ServerToCheck server_to_check,
-    CertificateCheckResult result) {
+    ServerToCheck server_to_check, CertificateCheckResult result) {
   if (observer_)
     observer_->CertificateChecked(server_to_check, result);
 }
diff --git a/certificate_checker_unittest.cc b/certificate_checker_unittest.cc
index 66b92d6..15d6555 100644
--- a/certificate_checker_unittest.cc
+++ b/certificate_checker_unittest.cc
@@ -27,12 +27,12 @@
 #include "update_engine/common/mock_prefs.h"
 #include "update_engine/mock_certificate_checker.h"
 
+using std::string;
+using ::testing::_;
 using ::testing::DoAll;
 using ::testing::Return;
 using ::testing::SetArgPointee;
 using ::testing::SetArrayArgument;
-using ::testing::_;
-using std::string;
 
 namespace chromeos_update_engine {
 
@@ -54,9 +54,7 @@
     cert_checker.SetObserver(&observer_);
   }
 
-  void TearDown() override {
-    cert_checker.SetObserver(nullptr);
-  }
+  void TearDown() override { cert_checker.SetObserver(nullptr); }
 
   MockPrefs prefs_;
   MockOpenSSLWrapper openssl_wrapper_;
@@ -77,16 +75,15 @@
 // check certificate change, new
 TEST_F(CertificateCheckerTest, NewCertificate) {
   EXPECT_CALL(openssl_wrapper_, GetCertificateDigest(nullptr, _, _, _))
-      .WillOnce(DoAll(
-          SetArgPointee<1>(depth_),
-          SetArgPointee<2>(length_),
-          SetArrayArgument<3>(digest_, digest_ + 4),
-          Return(true)));
+      .WillOnce(DoAll(SetArgPointee<1>(depth_),
+                      SetArgPointee<2>(length_),
+                      SetArrayArgument<3>(digest_, digest_ + 4),
+                      Return(true)));
   EXPECT_CALL(prefs_, GetString(cert_key_, _)).WillOnce(Return(false));
   EXPECT_CALL(prefs_, SetString(cert_key_, digest_hex_)).WillOnce(Return(true));
-  EXPECT_CALL(observer_,
-              CertificateChecked(server_to_check_,
-                                 CertificateCheckResult::kValid));
+  EXPECT_CALL(
+      observer_,
+      CertificateChecked(server_to_check_, CertificateCheckResult::kValid));
   ASSERT_TRUE(
       cert_checker.CheckCertificateChange(1, nullptr, server_to_check_));
 }
@@ -94,17 +91,16 @@
 // check certificate change, unchanged
 TEST_F(CertificateCheckerTest, SameCertificate) {
   EXPECT_CALL(openssl_wrapper_, GetCertificateDigest(nullptr, _, _, _))
-      .WillOnce(DoAll(
-          SetArgPointee<1>(depth_),
-          SetArgPointee<2>(length_),
-          SetArrayArgument<3>(digest_, digest_ + 4),
-          Return(true)));
+      .WillOnce(DoAll(SetArgPointee<1>(depth_),
+                      SetArgPointee<2>(length_),
+                      SetArrayArgument<3>(digest_, digest_ + 4),
+                      Return(true)));
   EXPECT_CALL(prefs_, GetString(cert_key_, _))
       .WillOnce(DoAll(SetArgPointee<1>(digest_hex_), Return(true)));
   EXPECT_CALL(prefs_, SetString(_, _)).Times(0);
-  EXPECT_CALL(observer_,
-              CertificateChecked(server_to_check_,
-                                 CertificateCheckResult::kValid));
+  EXPECT_CALL(
+      observer_,
+      CertificateChecked(server_to_check_, CertificateCheckResult::kValid));
   ASSERT_TRUE(
       cert_checker.CheckCertificateChange(1, nullptr, server_to_check_));
 }
@@ -112,11 +108,10 @@
 // check certificate change, changed
 TEST_F(CertificateCheckerTest, ChangedCertificate) {
   EXPECT_CALL(openssl_wrapper_, GetCertificateDigest(nullptr, _, _, _))
-      .WillOnce(DoAll(
-          SetArgPointee<1>(depth_),
-          SetArgPointee<2>(length_),
-          SetArrayArgument<3>(digest_, digest_ + 4),
-          Return(true)));
+      .WillOnce(DoAll(SetArgPointee<1>(depth_),
+                      SetArgPointee<2>(length_),
+                      SetArrayArgument<3>(digest_, digest_ + 4),
+                      Return(true)));
   EXPECT_CALL(prefs_, GetString(cert_key_, _))
       .WillOnce(DoAll(SetArgPointee<1>(diff_digest_hex_), Return(true)));
   EXPECT_CALL(observer_,
@@ -129,8 +124,9 @@
 
 // check certificate change, failed
 TEST_F(CertificateCheckerTest, FailedCertificate) {
-  EXPECT_CALL(observer_, CertificateChecked(server_to_check_,
-                                            CertificateCheckResult::kFailed));
+  EXPECT_CALL(
+      observer_,
+      CertificateChecked(server_to_check_, CertificateCheckResult::kFailed));
   EXPECT_CALL(prefs_, GetString(_, _)).Times(0);
   EXPECT_CALL(openssl_wrapper_, GetCertificateDigest(_, _, _, _)).Times(0);
   ASSERT_FALSE(
diff --git a/chrome_browser_proxy_resolver.cc b/chrome_browser_proxy_resolver.cc
index 5beecc1..bfb58f7 100644
--- a/chrome_browser_proxy_resolver.cc
+++ b/chrome_browser_proxy_resolver.cc
@@ -28,8 +28,7 @@
 namespace chromeos_update_engine {
 
 ChromeBrowserProxyResolver::ChromeBrowserProxyResolver()
-    : next_request_id_(kProxyRequestIdNull + 1),
-      weak_ptr_factory_(this) {}
+    : next_request_id_(kProxyRequestIdNull + 1), weak_ptr_factory_(this) {}
 
 ChromeBrowserProxyResolver::~ChromeBrowserProxyResolver() = default;
 
@@ -37,9 +36,11 @@
     const std::string& url, const ProxiesResolvedFn& callback) {
   const ProxyRequestId id = next_request_id_++;
   brillo::http::GetChromeProxyServersAsync(
-      DBusConnection::Get()->GetDBus(), url,
+      DBusConnection::Get()->GetDBus(),
+      url,
       base::Bind(&ChromeBrowserProxyResolver::OnGetChromeProxyServers,
-                 weak_ptr_factory_.GetWeakPtr(), id));
+                 weak_ptr_factory_.GetWeakPtr(),
+                 id));
   pending_callbacks_[id] = callback;
   return id;
 }
@@ -49,7 +50,8 @@
 }
 
 void ChromeBrowserProxyResolver::OnGetChromeProxyServers(
-    ProxyRequestId request_id, bool success,
+    ProxyRequestId request_id,
+    bool success,
     const std::vector<std::string>& proxies) {
   // If |success| is false, |proxies| will still hold the direct proxy option
   // which is what we do in our error case.
diff --git a/chrome_browser_proxy_resolver.h b/chrome_browser_proxy_resolver.h
index fcf85b6..10a55fb 100644
--- a/chrome_browser_proxy_resolver.h
+++ b/chrome_browser_proxy_resolver.h
@@ -24,7 +24,7 @@
 
 #include <base/memory/weak_ptr.h>
 
-#include "update_engine/proxy_resolver.h"
+#include "update_engine/common/proxy_resolver.h"
 
 namespace chromeos_update_engine {
 
diff --git a/client_library/client.cc b/client_library/client.cc
index 9a42696..b05df90 100644
--- a/client_library/client.cc
+++ b/client_library/client.cc
@@ -31,13 +31,13 @@
 unique_ptr<UpdateEngineClient> UpdateEngineClient::CreateInstance() {
 #if USE_BINDER
   auto update_engine_client_impl = new internal::BinderUpdateEngineClient{};
-#else  // !USE_BINDER
+#else   // !USE_BINDER
   auto update_engine_client_impl = new internal::DBusUpdateEngineClient{};
 #endif  // USE_BINDER
   auto ret = unique_ptr<UpdateEngineClient>{update_engine_client_impl};
 
   if (!update_engine_client_impl->Init()) {
-      ret.reset();
+    ret.reset();
   }
 
   return ret;
diff --git a/client_library/client_binder.cc b/client_library/client_binder.cc
index 54b33ed..588bc64 100644
--- a/client_library/client_binder.cc
+++ b/client_library/client_binder.cc
@@ -25,12 +25,12 @@
 #include "update_engine/parcelable_update_engine_status.h"
 #include "update_engine/update_status_utils.h"
 
-using android::binder::Status;
-using android::brillo::ParcelableUpdateEngineStatus;
 using android::getService;
 using android::OK;
 using android::String16;
 using android::String8;
+using android::binder::Status;
+using android::brillo::ParcelableUpdateEngineStatus;
 using chromeos_update_engine::StringToUpdateStatus;
 using std::string;
 using update_engine::UpdateAttemptFlags;
@@ -39,10 +39,11 @@
 namespace internal {
 
 bool BinderUpdateEngineClient::Init() {
-  if (!binder_watcher_.Init()) return false;
+  if (!binder_watcher_.Init())
+    return false;
 
   return getService(String16{"android.brillo.UpdateEngineService"},
-      &service_) == OK;
+                    &service_) == OK;
 }
 
 bool BinderUpdateEngineClient::AttemptUpdate(const string& in_app_version,
@@ -58,6 +59,11 @@
       .isOk();
 }
 
+bool BinderUpdateEngineClient::AttemptInstall(
+    const string& omaha_url, const std::vector<string>& dlc_module_ids) {
+  return false;
+}
+
 bool BinderUpdateEngineClient::GetStatus(int64_t* out_last_checked_time,
                                          double* out_progress,
                                          UpdateStatus* out_update_status,
@@ -167,8 +173,7 @@
 bool BinderUpdateEngineClient::RegisterStatusUpdateHandler(
     StatusUpdateHandler* handler) {
   if (!status_callback_.get()) {
-    status_callback_ =
-        new BinderUpdateEngineClient::StatusUpdateCallback(this);
+    status_callback_ = new BinderUpdateEngineClient::StatusUpdateCallback(this);
     if (!service_->RegisterStatusCallback(status_callback_).isOk()) {
       return false;
     }
@@ -182,13 +187,16 @@
   string new_version;
   int64_t new_size;
 
-  if (!GetStatus(&last_checked_time, &progress, &update_status,
-                 &new_version, &new_size)) {
+  if (!GetStatus(&last_checked_time,
+                 &progress,
+                 &update_status,
+                 &new_version,
+                 &new_size)) {
     handler->IPCError("Could not get status from binder service");
   }
 
-  handler->HandleStatusUpdate(last_checked_time, progress, update_status,
-                              new_version, new_size);
+  handler->HandleStatusUpdate(
+      last_checked_time, progress, update_status, new_version, new_size);
 
   return true;
 }
@@ -206,8 +214,9 @@
 
 bool BinderUpdateEngineClient::SetTargetChannel(const string& in_target_channel,
                                                 bool allow_powerwash) {
-  return service_->SetChannel(String16{in_target_channel.c_str()},
-                              allow_powerwash).isOk();
+  return service_
+      ->SetChannel(String16{in_target_channel.c_str()}, allow_powerwash)
+      .isOk();
 }
 
 bool BinderUpdateEngineClient::GetTargetChannel(string* out_channel) const {
diff --git a/client_library/client_binder.h b/client_library/client_binder.h
index 17f2beb..f3e4102 100644
--- a/client_library/client_binder.h
+++ b/client_library/client_binder.h
@@ -47,6 +47,9 @@
                      const std::string& omaha_url,
                      bool at_user_request) override;
 
+  bool AttemptInstall(const std::string& omaha_url,
+                      const std::vector<std::string>& dlc_module_ids) override;
+
   bool GetStatus(int64_t* out_last_checked_time,
                  double* out_progress,
                  UpdateStatus* out_update_status,
@@ -87,8 +90,8 @@
   bool GetEolStatus(int32_t* eol_status) const override;
 
  private:
-  class StatusUpdateCallback :
-      public android::brillo::BnUpdateEngineStatusCallback {
+  class StatusUpdateCallback
+      : public android::brillo::BnUpdateEngineStatusCallback {
    public:
     explicit StatusUpdateCallback(BinderUpdateEngineClient* client)
         : client_(client) {}
diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc
index 1072836..7ca6307 100644
--- a/client_library/client_dbus.cc
+++ b/client_library/client_dbus.cc
@@ -20,6 +20,7 @@
 
 #include <dbus/bus.h>
 #include <update_engine/dbus-constants.h>
+#include <update_engine/proto_bindings/update_engine.pb.h>
 
 #include "update_engine/update_status_utils.h"
 
@@ -27,6 +28,7 @@
 using dbus::Bus;
 using org::chromium::UpdateEngineInterfaceProxy;
 using std::string;
+using std::vector;
 
 namespace update_engine {
 namespace internal {
@@ -55,6 +57,24 @@
       nullptr);
 }
 
+bool DBusUpdateEngineClient::AttemptInstall(
+    const string& omaha_url, const vector<string>& dlc_module_ids) {
+  // Convert parameters into protobuf.
+  chromeos_update_engine::DlcParameters dlc_parameters;
+  dlc_parameters.set_omaha_url(omaha_url);
+  for (const auto& dlc_module_id : dlc_module_ids) {
+    chromeos_update_engine::DlcInfo* dlc_info = dlc_parameters.add_dlc_infos();
+    dlc_info->set_dlc_id(dlc_module_id);
+  }
+  string dlc_request;
+  if (dlc_parameters.SerializeToString(&dlc_request)) {
+    return proxy_->AttemptInstall(dlc_request, nullptr /* brillo::ErrorPtr* */);
+  } else {
+    LOG(ERROR) << "Fail to serialize a protobuf to a string.";
+    return false;
+  }
+}
+
 bool DBusUpdateEngineClient::GetStatus(int64_t* out_last_checked_time,
                                        double* out_progress,
                                        UpdateStatus* out_update_status,
@@ -127,13 +147,11 @@
 }
 
 void DBusUpdateEngineClient::DBusStatusHandlersRegistered(
-    const string& interface,
-    const string& signal_name,
-    bool success) const {
+    const string& interface, const string& signal_name, bool success) const {
   if (!success) {
     for (auto handler : handlers_) {
-      handler->IPCError("Could not connect to" + signal_name +
-                        " on " + interface);
+      handler->IPCError("Could not connect to" + signal_name + " on " +
+                        interface);
     }
   } else {
     StatusUpdateHandlersRegistered(nullptr);
diff --git a/client_library/client_dbus.h b/client_library/client_dbus.h
index cec1665..a186d45 100644
--- a/client_library/client_dbus.h
+++ b/client_library/client_dbus.h
@@ -41,6 +41,9 @@
                      const std::string& omaha_url,
                      bool at_user_request) override;
 
+  bool AttemptInstall(const std::string& omaha_url,
+                      const std::vector<std::string>& dlc_module_ids) override;
+
   bool GetStatus(int64_t* out_last_checked_time,
                  double* out_progress,
                  UpdateStatus* out_update_status,
diff --git a/client_library/include/update_engine/client.h b/client_library/include/update_engine/client.h
index be87c76..1bc6111 100644
--- a/client_library/include/update_engine/client.h
+++ b/client_library/include/update_engine/client.h
@@ -20,6 +20,7 @@
 #include <cstdint>
 #include <memory>
 #include <string>
+#include <vector>
 
 #include "update_engine/status_update_handler.h"
 #include "update_engine/update_status.h"
@@ -47,6 +48,18 @@
                              const std::string& omaha_url,
                              bool at_user_request) = 0;
 
+  // Request the update_engine to install a list of DLC modules.
+  // |omaha_url|
+  //     Force update_engine to look for updates from the given server. Passing
+  //     empty indicates update_engine should use its default value. Note that
+  //     update_engine will ignore this parameter in production mode to avoid
+  //     pulling untrusted updates.
+  // |dlc_module_ids|
+  //     A list of DLC module IDs.
+  virtual bool AttemptInstall(
+      const std::string& omaha_url,
+      const std::vector<std::string>& dlc_module_ids) = 0;
+
   // Returns the current status of the Update Engine.
   //
   // |out_last_checked_time|
diff --git a/client_library/include/update_engine/status_update_handler.h b/client_library/include/update_engine/status_update_handler.h
index d5b8cdb..d2fad34 100644
--- a/client_library/include/update_engine/status_update_handler.h
+++ b/client_library/include/update_engine/status_update_handler.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_STATUS_UPDATE_HANDLER_H_
-#define UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_STATUS_UPDATE_HANDLER_H_
+#ifndef UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_
+#define UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_
 
 #include <string>
 
@@ -44,4 +44,4 @@
 
 }  // namespace update_engine
 
-#endif  // UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_STATUS_UPDATE_HANDLER_H_
+#endif  // UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_
diff --git a/client_library/include/update_engine/update_status.h b/client_library/include/update_engine/update_status.h
index 41fab48..5a3dccf 100644
--- a/client_library/include/update_engine/update_status.h
+++ b/client_library/include/update_engine/update_status.h
@@ -23,17 +23,24 @@
 
 namespace update_engine {
 
+// ATTENTION: When adding a new enum value here, always append at the end and
+// make sure to make proper adjustments in UpdateAttempter:ActionCompleted(). If
+// any enum memeber is deprecated, the assigned value of other members should
+// not change. See b/62842358.
 enum class UpdateStatus {
   IDLE = 0,
-  CHECKING_FOR_UPDATE,
-  UPDATE_AVAILABLE,
-  DOWNLOADING,
-  VERIFYING,
-  FINALIZING,
-  UPDATED_NEED_REBOOT,
-  REPORTING_ERROR_EVENT,
-  ATTEMPTING_ROLLBACK,
-  DISABLED,
+  CHECKING_FOR_UPDATE = 1,
+  UPDATE_AVAILABLE = 2,
+  DOWNLOADING = 3,
+  VERIFYING = 4,
+  FINALIZING = 5,
+  UPDATED_NEED_REBOOT = 6,
+  REPORTING_ERROR_EVENT = 7,
+  ATTEMPTING_ROLLBACK = 8,
+  DISABLED = 9,
+  // Broadcast this state when an update aborts because user preferences do not
+  // allow updates, e.g. over cellular network.
+  NEED_PERMISSION_TO_UPDATE = 10,
 };
 
 // Enum of bit-wise flags for controlling how updates are attempted.
diff --git a/common/action.h b/common/action.h
index 6c88216..9e2f5ff 100644
--- a/common/action.h
+++ b/common/action.h
@@ -145,12 +145,12 @@
 };
 
 // Forward declare a couple classes we use.
-template<typename T>
+template <typename T>
 class ActionPipe;
-template<typename T>
+template <typename T>
 class ActionTraits;
 
-template<typename SubClass>
+template <typename SubClass>
 class Action : public AbstractAction {
  public:
   ~Action() override {}
@@ -162,8 +162,9 @@
   void set_in_pipe(
       // this type is a fancy way of saying: a shared_ptr to an
       // ActionPipe<InputObjectType>.
-      const std::shared_ptr<ActionPipe<
-          typename ActionTraits<SubClass>::InputObjectType>>& in_pipe) {
+      const std::shared_ptr<
+          ActionPipe<typename ActionTraits<SubClass>::InputObjectType>>&
+          in_pipe) {
     in_pipe_ = in_pipe;
   }
 
@@ -174,8 +175,9 @@
   void set_out_pipe(
       // this type is a fancy way of saying: a shared_ptr to an
       // ActionPipe<OutputObjectType>.
-      const std::shared_ptr<ActionPipe<
-          typename ActionTraits<SubClass>::OutputObjectType>>& out_pipe) {
+      const std::shared_ptr<
+          ActionPipe<typename ActionTraits<SubClass>::OutputObjectType>>&
+          out_pipe) {
     out_pipe_ = out_pipe;
   }
 
@@ -192,9 +194,7 @@
   }
 
   // Returns true iff there's an output pipe.
-  bool HasOutputPipe() const {
-    return out_pipe_.get();
-  }
+  bool HasOutputPipe() const { return out_pipe_.get(); }
 
   // Copies the object passed into the output pipe. It will be accessible to
   // the next Action via that action's input pipe (which is the same as this
diff --git a/common/action_pipe.h b/common/action_pipe.h
index 376c2f1..0c98ee1 100644
--- a/common/action_pipe.h
+++ b/common/action_pipe.h
@@ -48,10 +48,10 @@
 // for that type, no object is taken/given.
 class NoneType {};
 
-template<typename T>
+template <typename T>
 class Action;
 
-template<typename ObjectType>
+template <typename ObjectType>
 class ActionPipe {
  public:
   virtual ~ActionPipe() {}
@@ -67,7 +67,7 @@
   // Bonds two Actions together with a new ActionPipe. The ActionPipe is
   // jointly owned by the two Actions and will be automatically destroyed
   // when the last Action is destroyed.
-  template<typename FromAction, typename ToAction>
+  template <typename FromAction, typename ToAction>
   static void Bond(FromAction* from, ToAction* to) {
     std::shared_ptr<ActionPipe<ObjectType>> pipe(new ActionPipe<ObjectType>);
     from->set_out_pipe(pipe);
@@ -87,7 +87,7 @@
 };
 
 // Utility function
-template<typename FromAction, typename ToAction>
+template <typename FromAction, typename ToAction>
 void BondActions(FromAction* from, ToAction* to) {
   static_assert(
       std::is_same<typename FromAction::OutputObjectType,
diff --git a/common/action_pipe_unittest.cc b/common/action_pipe_unittest.cc
index 9bfbc83..233561d 100644
--- a/common/action_pipe_unittest.cc
+++ b/common/action_pipe_unittest.cc
@@ -28,7 +28,7 @@
 
 class ActionPipeTestAction;
 
-template<>
+template <>
 class ActionTraits<ActionPipeTestAction> {
  public:
   typedef string OutputObjectType;
@@ -46,7 +46,7 @@
   string Type() const { return "ActionPipeTestAction"; }
 };
 
-class ActionPipeTest : public ::testing::Test { };
+class ActionPipeTest : public ::testing::Test {};
 
 // This test creates two simple Actions and sends a message via an ActionPipe
 // from one to the other.
diff --git a/common/action_processor.cc b/common/action_processor.cc
index 3549e08..6e555dd 100644
--- a/common/action_processor.cc
+++ b/common/action_processor.cc
@@ -17,6 +17,7 @@
 #include "update_engine/common/action_processor.h"
 
 #include <string>
+#include <utility>
 
 #include <base/logging.h>
 
@@ -24,27 +25,30 @@
 #include "update_engine/common/error_code_utils.h"
 
 using std::string;
+using std::unique_ptr;
 
 namespace chromeos_update_engine {
 
 ActionProcessor::~ActionProcessor() {
   if (IsRunning())
     StopProcessing();
-  for (auto action : actions_)
-    action->SetProcessor(nullptr);
 }
 
-void ActionProcessor::EnqueueAction(AbstractAction* action) {
-  actions_.push_back(action);
+void ActionProcessor::EnqueueAction(unique_ptr<AbstractAction> action) {
   action->SetProcessor(this);
+  actions_.push_back(std::move(action));
+}
+
+bool ActionProcessor::IsRunning() const {
+  return current_action_ != nullptr || suspended_;
 }
 
 void ActionProcessor::StartProcessing() {
   CHECK(!IsRunning());
   if (!actions_.empty()) {
-    current_action_ = actions_.front();
-    LOG(INFO) << "ActionProcessor: starting " << current_action_->Type();
+    current_action_ = std::move(actions_.front());
     actions_.pop_front();
+    LOG(INFO) << "ActionProcessor: starting " << current_action_->Type();
     current_action_->PerformAction();
   }
 }
@@ -53,16 +57,13 @@
   CHECK(IsRunning());
   if (current_action_) {
     current_action_->TerminateProcessing();
-    current_action_->SetProcessor(nullptr);
   }
   LOG(INFO) << "ActionProcessor: aborted "
             << (current_action_ ? current_action_->Type() : "")
             << (suspended_ ? " while suspended" : "");
-  current_action_ = nullptr;
+  current_action_.reset();
   suspended_ = false;
   // Delete all the actions before calling the delegate.
-  for (auto action : actions_)
-    action->SetProcessor(nullptr);
   actions_.clear();
   if (delegate_)
     delegate_->ProcessingStopped(this);
@@ -106,17 +107,16 @@
 
 void ActionProcessor::ActionComplete(AbstractAction* actionptr,
                                      ErrorCode code) {
-  CHECK_EQ(actionptr, current_action_);
+  CHECK_EQ(actionptr, current_action_.get());
   if (delegate_)
     delegate_->ActionCompleted(this, actionptr, code);
   string old_type = current_action_->Type();
   current_action_->ActionCompleted(code);
-  current_action_->SetProcessor(nullptr);
-  current_action_ = nullptr;
+  current_action_.reset();
   LOG(INFO) << "ActionProcessor: finished "
             << (actions_.empty() ? "last action " : "") << old_type
-            << (suspended_ ? " while suspended" : "")
-            << " with code " << utils::ErrorCodeToString(code);
+            << (suspended_ ? " while suspended" : "") << " with code "
+            << utils::ErrorCodeToString(code);
   if (!actions_.empty() && code != ErrorCode::kSuccess) {
     LOG(INFO) << "ActionProcessor: Aborting processing due to failure.";
     actions_.clear();
@@ -138,7 +138,7 @@
     }
     return;
   }
-  current_action_ = actions_.front();
+  current_action_ = std::move(actions_.front());
   actions_.pop_front();
   LOG(INFO) << "ActionProcessor: starting " << current_action_->Type();
   current_action_->PerformAction();
diff --git a/common/action_processor.h b/common/action_processor.h
index c9c179e..735a106 100644
--- a/common/action_processor.h
+++ b/common/action_processor.h
@@ -18,12 +18,16 @@
 #define UPDATE_ENGINE_COMMON_ACTION_PROCESSOR_H_
 
 #include <deque>
+#include <memory>
+#include <vector>
 
 #include <base/macros.h>
 #include <brillo/errors/error.h>
 
 #include "update_engine/common/error_code.h"
 
+#include <gtest/gtest_prod.h>
+
 // The structure of these classes (Action, ActionPipe, ActionProcessor, etc.)
 // is based on the KSAction* classes from the Google Update Engine code at
 // http://code.google.com/p/update-engine/ . The author of this file sends
@@ -69,26 +73,27 @@
 
   // Returns true iff the processing was started but not yet completed nor
   // stopped.
-  bool IsRunning() const { return current_action_ != nullptr || suspended_; }
+  bool IsRunning() const;
 
   // Adds another Action to the end of the queue.
-  virtual void EnqueueAction(AbstractAction* action);
+  virtual void EnqueueAction(std::unique_ptr<AbstractAction> action);
 
   // Sets/gets the current delegate. Set to null to remove a delegate.
   ActionProcessorDelegate* delegate() const { return delegate_; }
-  void set_delegate(ActionProcessorDelegate *delegate) {
-    delegate_ = delegate;
-  }
+  void set_delegate(ActionProcessorDelegate* delegate) { delegate_ = delegate; }
 
   // Returns a pointer to the current Action that's processing.
-  AbstractAction* current_action() const {
-    return current_action_;
-  }
+  AbstractAction* current_action() const { return current_action_.get(); }
 
   // Called by an action to notify processor that it's done. Caller passes self.
+  // But this call deletes the action if there no other object has a reference
+  // to it, so in that case, the caller should not try to access any of its
+  // member variables after this call.
   void ActionComplete(AbstractAction* actionptr, ErrorCode code);
 
  private:
+  FRIEND_TEST(ActionProcessorTest, ChainActionsTest);
+
   // Continue processing actions (if any) after the last action terminated with
   // the passed error code. If there are no more actions to process, the
   // processing will terminate.
@@ -96,10 +101,10 @@
 
   // Actions that have not yet begun processing, in the order in which
   // they'll be processed.
-  std::deque<AbstractAction*> actions_;
+  std::deque<std::unique_ptr<AbstractAction>> actions_;
 
   // A pointer to the currently processing Action, if any.
-  AbstractAction* current_action_{nullptr};
+  std::unique_ptr<AbstractAction> current_action_;
 
   // The ErrorCode reported by an action that was suspended but finished while
   // being suspended. This error code is stored here to be reported back to the
diff --git a/common/action_processor_unittest.cc b/common/action_processor_unittest.cc
index 631e42d..4057abd 100644
--- a/common/action_processor_unittest.cc
+++ b/common/action_processor_unittest.cc
@@ -17,6 +17,7 @@
 #include "update_engine/common/action_processor.h"
 
 #include <string>
+#include <utility>
 
 #include <gtest/gtest.h>
 
@@ -31,7 +32,7 @@
 
 class ActionProcessorTestAction;
 
-template<>
+template <>
 class ActionTraits<ActionProcessorTestAction> {
  public:
   typedef string OutputObjectType;
@@ -96,12 +97,14 @@
   void SetUp() override {
     action_processor_.set_delegate(&delegate_);
     // Silence Type() calls used for logging.
-    EXPECT_CALL(mock_action_, Type()).Times(testing::AnyNumber());
+    mock_action_.reset(new testing::StrictMock<MockAction>());
+    mock_action_ptr_ = mock_action_.get();
+    action_.reset(new ActionProcessorTestAction());
+    action_ptr_ = action_.get();
+    EXPECT_CALL(*mock_action_, Type()).Times(testing::AnyNumber());
   }
 
-  void TearDown() override {
-    action_processor_.set_delegate(nullptr);
-  }
+  void TearDown() override { action_processor_.set_delegate(nullptr); }
 
  protected:
   // The ActionProcessor under test.
@@ -110,34 +113,35 @@
   MyActionProcessorDelegate delegate_{&action_processor_};
 
   // Common actions used during most tests.
-  testing::StrictMock<MockAction> mock_action_;
-  ActionProcessorTestAction action_;
+  std::unique_ptr<testing::StrictMock<MockAction>> mock_action_;
+  testing::StrictMock<MockAction>* mock_action_ptr_;
+  std::unique_ptr<ActionProcessorTestAction> action_;
+  ActionProcessorTestAction* action_ptr_;
 };
 
 TEST_F(ActionProcessorTest, SimpleTest) {
   EXPECT_FALSE(action_processor_.IsRunning());
-  action_processor_.EnqueueAction(&action_);
+  action_processor_.EnqueueAction(std::move(action_));
   EXPECT_FALSE(action_processor_.IsRunning());
-  EXPECT_FALSE(action_.IsRunning());
+  EXPECT_FALSE(action_ptr_->IsRunning());
   action_processor_.StartProcessing();
   EXPECT_TRUE(action_processor_.IsRunning());
-  EXPECT_TRUE(action_.IsRunning());
-  EXPECT_EQ(action_processor_.current_action(), &action_);
-  action_.CompleteAction();
+  EXPECT_TRUE(action_ptr_->IsRunning());
+  action_ptr_->CompleteAction();
   EXPECT_FALSE(action_processor_.IsRunning());
-  EXPECT_FALSE(action_.IsRunning());
+  EXPECT_EQ(action_processor_.current_action(), nullptr);
 }
 
 TEST_F(ActionProcessorTest, DelegateTest) {
-  action_processor_.EnqueueAction(&action_);
+  action_processor_.EnqueueAction(std::move(action_));
   action_processor_.StartProcessing();
-  action_.CompleteAction();
+  action_ptr_->CompleteAction();
   EXPECT_TRUE(delegate_.processing_done_called_);
   EXPECT_TRUE(delegate_.action_completed_called_);
 }
 
 TEST_F(ActionProcessorTest, StopProcessingTest) {
-  action_processor_.EnqueueAction(&action_);
+  action_processor_.EnqueueAction(std::move(action_));
   action_processor_.StartProcessing();
   action_processor_.StopProcessing();
   EXPECT_TRUE(delegate_.processing_stopped_called_);
@@ -150,54 +154,58 @@
   // This test doesn't use a delegate since it terminates several actions.
   action_processor_.set_delegate(nullptr);
 
-  ActionProcessorTestAction action1, action2;
-  action_processor_.EnqueueAction(&action1);
-  action_processor_.EnqueueAction(&action2);
+  auto action0 = std::make_unique<ActionProcessorTestAction>();
+  auto action1 = std::make_unique<ActionProcessorTestAction>();
+  auto action2 = std::make_unique<ActionProcessorTestAction>();
+  auto action0_ptr = action0.get();
+  auto action1_ptr = action1.get();
+  auto action2_ptr = action2.get();
+  action_processor_.EnqueueAction(std::move(action0));
+  action_processor_.EnqueueAction(std::move(action1));
+  action_processor_.EnqueueAction(std::move(action2));
+
+  EXPECT_EQ(action_processor_.actions_.size(), 3u);
+  EXPECT_EQ(action_processor_.actions_[0].get(), action0_ptr);
+  EXPECT_EQ(action_processor_.actions_[1].get(), action1_ptr);
+  EXPECT_EQ(action_processor_.actions_[2].get(), action2_ptr);
+
   action_processor_.StartProcessing();
-  EXPECT_EQ(&action1, action_processor_.current_action());
+  EXPECT_EQ(action0_ptr, action_processor_.current_action());
   EXPECT_TRUE(action_processor_.IsRunning());
-  action1.CompleteAction();
-  EXPECT_EQ(&action2, action_processor_.current_action());
+  action0_ptr->CompleteAction();
+  EXPECT_EQ(action1_ptr, action_processor_.current_action());
   EXPECT_TRUE(action_processor_.IsRunning());
-  action2.CompleteAction();
+  action1_ptr->CompleteAction();
+  EXPECT_EQ(action2_ptr, action_processor_.current_action());
+  EXPECT_TRUE(action_processor_.actions_.empty());
+  EXPECT_TRUE(action_processor_.IsRunning());
+  action2_ptr->CompleteAction();
   EXPECT_EQ(nullptr, action_processor_.current_action());
+  EXPECT_TRUE(action_processor_.actions_.empty());
   EXPECT_FALSE(action_processor_.IsRunning());
 }
 
-TEST_F(ActionProcessorTest, DtorTest) {
-  ActionProcessorTestAction action1, action2;
-  {
-    ActionProcessor action_processor;
-    action_processor.EnqueueAction(&action1);
-    action_processor.EnqueueAction(&action2);
-    action_processor.StartProcessing();
-  }
-  EXPECT_EQ(nullptr, action1.processor());
-  EXPECT_FALSE(action1.IsRunning());
-  EXPECT_EQ(nullptr, action2.processor());
-  EXPECT_FALSE(action2.IsRunning());
-}
-
 TEST_F(ActionProcessorTest, DefaultDelegateTest) {
-  // Just make sure it doesn't crash
-  action_processor_.EnqueueAction(&action_);
+  // Just make sure it doesn't crash.
+  action_processor_.EnqueueAction(std::move(action_));
   action_processor_.StartProcessing();
-  action_.CompleteAction();
+  action_ptr_->CompleteAction();
 
-  action_processor_.EnqueueAction(&action_);
+  action_.reset(new ActionProcessorTestAction());
+  action_processor_.EnqueueAction(std::move(action_));
   action_processor_.StartProcessing();
   action_processor_.StopProcessing();
 }
 
-// This test suspends and resume the action processor while running one action_.
+// This test suspends and resume the action processor while running one action.
 TEST_F(ActionProcessorTest, SuspendResumeTest) {
-  action_processor_.EnqueueAction(&mock_action_);
+  action_processor_.EnqueueAction(std::move(mock_action_));
 
   testing::InSequence s;
-  EXPECT_CALL(mock_action_, PerformAction());
+  EXPECT_CALL(*mock_action_ptr_, PerformAction());
   action_processor_.StartProcessing();
 
-  EXPECT_CALL(mock_action_, SuspendAction());
+  EXPECT_CALL(*mock_action_ptr_, SuspendAction());
   action_processor_.SuspendProcessing();
   // Suspending the processor twice should not suspend the action twice.
   action_processor_.SuspendProcessing();
@@ -205,32 +213,31 @@
   // IsRunning should return whether there's is an action doing some work, even
   // if it is suspended.
   EXPECT_TRUE(action_processor_.IsRunning());
-  EXPECT_EQ(&mock_action_, action_processor_.current_action());
+  EXPECT_EQ(mock_action_ptr_, action_processor_.current_action());
 
-  EXPECT_CALL(mock_action_, ResumeAction());
+  EXPECT_CALL(*mock_action_ptr_, ResumeAction());
   action_processor_.ResumeProcessing();
 
   // Calling ResumeProcessing twice should not affect the action_.
   action_processor_.ResumeProcessing();
-
-  action_processor_.ActionComplete(&mock_action_, ErrorCode::kSuccess);
+  action_processor_.ActionComplete(mock_action_ptr_, ErrorCode::kSuccess);
 }
 
 // This test suspends an action that presumably doesn't support suspend/resume
 // and it finished before being resumed.
 TEST_F(ActionProcessorTest, ActionCompletedWhileSuspendedTest) {
-  action_processor_.EnqueueAction(&mock_action_);
+  action_processor_.EnqueueAction(std::move(mock_action_));
 
   testing::InSequence s;
-  EXPECT_CALL(mock_action_, PerformAction());
+  EXPECT_CALL(*mock_action_ptr_, PerformAction());
   action_processor_.StartProcessing();
 
-  EXPECT_CALL(mock_action_, SuspendAction());
+  EXPECT_CALL(*mock_action_ptr_, SuspendAction());
   action_processor_.SuspendProcessing();
 
   // Simulate the action completion while suspended. No other call to
   // |mock_action_| is expected at this point.
-  action_processor_.ActionComplete(&mock_action_, ErrorCode::kSuccess);
+  action_processor_.ActionComplete(mock_action_ptr_, ErrorCode::kSuccess);
 
   // The processing should not be done since the ActionProcessor is suspended
   // and the processing is considered to be still running until resumed.
@@ -243,15 +250,15 @@
 }
 
 TEST_F(ActionProcessorTest, StoppedWhileSuspendedTest) {
-  action_processor_.EnqueueAction(&mock_action_);
+  action_processor_.EnqueueAction(std::move(mock_action_));
 
   testing::InSequence s;
-  EXPECT_CALL(mock_action_, PerformAction());
+  EXPECT_CALL(*mock_action_ptr_, PerformAction());
   action_processor_.StartProcessing();
-  EXPECT_CALL(mock_action_, SuspendAction());
+  EXPECT_CALL(*mock_action_ptr_, SuspendAction());
   action_processor_.SuspendProcessing();
 
-  EXPECT_CALL(mock_action_, TerminateProcessing());
+  EXPECT_CALL(*mock_action_ptr_, TerminateProcessing());
   action_processor_.StopProcessing();
   // Stopping the processing should abort the current execution no matter what.
   EXPECT_TRUE(delegate_.processing_stopped_called_);
diff --git a/common/action_unittest.cc b/common/action_unittest.cc
index dcdce17..ca48bee 100644
--- a/common/action_unittest.cc
+++ b/common/action_unittest.cc
@@ -16,8 +16,11 @@
 
 #include "update_engine/common/action.h"
 
-#include <gtest/gtest.h>
 #include <string>
+#include <utility>
+
+#include <gtest/gtest.h>
+
 #include "update_engine/common/action_processor.h"
 
 using std::string;
@@ -28,7 +31,7 @@
 
 class ActionTestAction;
 
-template<>
+template <>
 class ActionTraits<ActionTestAction> {
  public:
   typedef string OutputObjectType;
@@ -51,26 +54,24 @@
   string Type() const { return "ActionTestAction"; }
 };
 
-class ActionTest : public ::testing::Test { };
+class ActionTest : public ::testing::Test {};
 
 // This test creates two simple Actions and sends a message via an ActionPipe
 // from one to the other.
 TEST(ActionTest, SimpleTest) {
-  ActionTestAction action;
-
-  EXPECT_FALSE(action.in_pipe());
-  EXPECT_FALSE(action.out_pipe());
-  EXPECT_FALSE(action.processor());
-  EXPECT_FALSE(action.IsRunning());
+  auto action = std::make_unique<ActionTestAction>();
+  auto action_ptr = action.get();
+  EXPECT_FALSE(action->in_pipe());
+  EXPECT_FALSE(action->out_pipe());
+  EXPECT_FALSE(action->processor());
+  EXPECT_FALSE(action->IsRunning());
 
   ActionProcessor action_processor;
-  action_processor.EnqueueAction(&action);
-  EXPECT_EQ(&action_processor, action.processor());
-
+  action_processor.EnqueueAction(std::move(action));
+  EXPECT_EQ(&action_processor, action_ptr->processor());
   action_processor.StartProcessing();
-  EXPECT_TRUE(action.IsRunning());
-  action.CompleteAction();
-  EXPECT_FALSE(action.IsRunning());
+  EXPECT_TRUE(action_ptr->IsRunning());
+  action_ptr->CompleteAction();
 }
 
 }  // namespace chromeos_update_engine
diff --git a/common/boot_control_interface.h b/common/boot_control_interface.h
index 659b388..392d785 100644
--- a/common/boot_control_interface.h
+++ b/common/boot_control_interface.h
@@ -18,7 +18,9 @@
 #define UPDATE_ENGINE_COMMON_BOOT_CONTROL_INTERFACE_H_
 
 #include <climits>
+#include <map>
 #include <string>
+#include <vector>
 
 #include <base/callback.h>
 #include <base/macros.h>
@@ -33,6 +35,19 @@
  public:
   using Slot = unsigned int;
 
+  struct PartitionMetadata {
+    struct Partition {
+      std::string name;
+      uint64_t size;
+    };
+    struct Group {
+      std::string name;
+      uint64_t size;
+      std::vector<Partition> partitions;
+    };
+    std::vector<Group> groups;
+  };
+
   static const Slot kInvalidSlot = UINT_MAX;
 
   virtual ~BootControlInterface() = default;
@@ -51,7 +66,9 @@
   // Determines the block device for the given partition name and slot number.
   // The |slot| number must be between 0 and GetNumSlots() - 1 and the
   // |partition_name| is a platform-specific name that identifies a partition on
-  // every slot. On success, returns true and stores the block device in
+  // every slot. In order to access the dynamic partitions in the target slot,
+  // InitPartitionMetadata() must be called (once per payload) prior to calling
+  // this function. On success, returns true and stores the block device in
   // |device|.
   virtual bool GetPartitionDevice(const std::string& partition_name,
                                   Slot slot,
@@ -77,6 +94,18 @@
   // of the operation.
   virtual bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) = 0;
 
+  // Initializes the metadata of the underlying partitions for a given |slot|
+  // and sets up the states for accessing dynamic partitions.
+  // |partition_metadata| will be written to the specified |slot| if
+  // |update_metadata| is set.
+  virtual bool InitPartitionMetadata(
+      Slot slot,
+      const PartitionMetadata& partition_metadata,
+      bool update_metadata) = 0;
+
+  // Do necessary clean-up operations after the whole update.
+  virtual void Cleanup() = 0;
+
   // Return a human-readable slot name used for logging.
   static std::string SlotName(Slot slot) {
     if (slot == kInvalidSlot)
diff --git a/common/boot_control_stub.cc b/common/boot_control_stub.cc
index 2de0c82..0fe8a98 100644
--- a/common/boot_control_stub.cc
+++ b/common/boot_control_stub.cc
@@ -59,4 +59,16 @@
   return false;
 }
 
+bool BootControlStub::InitPartitionMetadata(
+    Slot slot,
+    const PartitionMetadata& partition_metadata,
+    bool update_metadata) {
+  LOG(ERROR) << __FUNCTION__ << " should never be called.";
+  return false;
+}
+
+void BootControlStub::Cleanup() {
+  LOG(ERROR) << __FUNCTION__ << " should never be called.";
+}
+
 }  // namespace chromeos_update_engine
diff --git a/common/boot_control_stub.h b/common/boot_control_stub.h
index 7832adc..8dfaffc 100644
--- a/common/boot_control_stub.h
+++ b/common/boot_control_stub.h
@@ -27,7 +27,7 @@
 // typically used when e.g. an underlying HAL implementation cannot be
 // loaded or doesn't exist.
 //
-// You are gauranteed that the implementation of GetNumSlots() method
+// You are guaranteed that the implementation of GetNumSlots() method
 // always returns 0. This can be used to identify that this
 // implementation is in use.
 class BootControlStub : public BootControlInterface {
@@ -45,6 +45,10 @@
   bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
   bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
   bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
+  bool InitPartitionMetadata(Slot slot,
+                             const PartitionMetadata& partition_metadata,
+                             bool update_metadata) override;
+  void Cleanup() override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(BootControlStub);
diff --git a/common/clock.cc b/common/clock.cc
index f0eff44..05c495c 100644
--- a/common/clock.cc
+++ b/common/clock.cc
@@ -36,7 +36,7 @@
   }
   struct timeval now_tv;
   now_tv.tv_sec = now_ts.tv_sec;
-  now_tv.tv_usec = now_ts.tv_nsec/base::Time::kNanosecondsPerMicrosecond;
+  now_tv.tv_usec = now_ts.tv_nsec / base::Time::kNanosecondsPerMicrosecond;
   return base::Time::FromTimeVal(now_tv);
 }
 
@@ -51,7 +51,7 @@
   }
   struct timeval now_tv;
   now_tv.tv_sec = now_ts.tv_sec;
-  now_tv.tv_usec = now_ts.tv_nsec/base::Time::kNanosecondsPerMicrosecond;
+  now_tv.tv_usec = now_ts.tv_nsec / base::Time::kNanosecondsPerMicrosecond;
   return base::Time::FromTimeVal(now_tv);
 }
 
diff --git a/common/constants.cc b/common/constants.cc
index 5941c93..310f1b2 100644
--- a/common/constants.cc
+++ b/common/constants.cc
@@ -37,6 +37,8 @@
 const char kPrefsDailyMetricsLastReportedAt[] =
     "daily-metrics-last-reported-at";
 const char kPrefsDeltaUpdateFailures[] = "delta-update-failures";
+const char kPrefsDynamicPartitionMetadataUpdated[] =
+    "dynamic-partition-metadata-updated";
 const char kPrefsFullPayloadAttemptNumber[] = "full-payload-attempt-number";
 const char kPrefsInstallDateDays[] = "install-date-days";
 const char kPrefsLastActivePingDay[] = "last-active-ping-day";
@@ -47,6 +49,7 @@
     "metrics-attempt-last-reporting-time";
 const char kPrefsMetricsCheckLastReportingTime[] =
     "metrics-check-last-reporting-time";
+const char kPrefsNoIgnoreBackoff[] = "no-ignore-backoff";
 const char kPrefsNumReboots[] = "num-reboots";
 const char kPrefsNumResponsesSeen[] = "num-responses-seen";
 const char kPrefsOmahaCohort[] = "omaha-cohort";
@@ -60,6 +63,7 @@
 const char kPrefsPostInstallSucceeded[] = "post-install-succeeded";
 const char kPrefsPreviousVersion[] = "previous-version";
 const char kPrefsResumedUpdateFailures[] = "resumed-update-failures";
+const char kPrefsRollbackHappened[] = "rollback-happened";
 const char kPrefsRollbackVersion[] = "rollback-version";
 const char kPrefsChannelOnSlotPrefix[] = "channel-on-slot-";
 const char kPrefsSystemUpdatedMarker[] = "system-updated-marker";
@@ -75,6 +79,10 @@
 const char kPrefsUpdateFirstSeenAt[] = "update-first-seen-at";
 const char kPrefsUpdateOverCellularPermission[] =
     "update-over-cellular-permission";
+const char kPrefsUpdateOverCellularTargetVersion[] =
+    "update-over-cellular-target-version";
+const char kPrefsUpdateOverCellularTargetSize[] =
+    "update-over-cellular-target-size";
 const char kPrefsUpdateServerCertificate[] = "update-server-cert";
 const char kPrefsUpdateStateNextDataLength[] = "update-state-next-data-length";
 const char kPrefsUpdateStateNextDataOffset[] = "update-state-next-data-offset";
@@ -84,9 +92,13 @@
 const char kPrefsUpdateStateSignatureBlob[] = "update-state-signature-blob";
 const char kPrefsUpdateStateSignedSHA256Context[] =
     "update-state-signed-sha-256-context";
+const char kPrefsUpdateBootTimestampStart[] = "update-boot-timestamp-start";
 const char kPrefsUpdateTimestampStart[] = "update-timestamp-start";
 const char kPrefsUrlSwitchCount[] = "url-switch-count";
-const char kPrefsWallClockWaitPeriod[] = "wall-clock-wait-period";
+const char kPrefsVerityWritten[] = "verity-written";
+const char kPrefsWallClockScatteringWaitPeriod[] = "wall-clock-wait-period";
+const char kPrefsWallClockStagingWaitPeriod[] =
+    "wall-clock-staging-wait-period";
 
 // These four fields are generated by scripts/brillo_update_payload.
 const char kPayloadPropertyFileSize[] = "FILE_SIZE";
diff --git a/common/constants.h b/common/constants.h
index 26773cf..d5a8ae3 100644
--- a/common/constants.h
+++ b/common/constants.h
@@ -41,6 +41,7 @@
 extern const char kPrefsCurrentUrlIndex[];
 extern const char kPrefsDailyMetricsLastReportedAt[];
 extern const char kPrefsDeltaUpdateFailures[];
+extern const char kPrefsDynamicPartitionMetadataUpdated[];
 extern const char kPrefsFullPayloadAttemptNumber[];
 extern const char kPrefsInstallDateDays[];
 extern const char kPrefsLastActivePingDay[];
@@ -49,6 +50,7 @@
 extern const char kPrefsManifestSignatureSize[];
 extern const char kPrefsMetricsAttemptLastReportingTime[];
 extern const char kPrefsMetricsCheckLastReportingTime[];
+extern const char kPrefsNoIgnoreBackoff[];
 extern const char kPrefsNumReboots[];
 extern const char kPrefsNumResponsesSeen[];
 extern const char kPrefsOmahaCohort[];
@@ -62,6 +64,7 @@
 extern const char kPrefsPostInstallSucceeded[];
 extern const char kPrefsPreviousVersion[];
 extern const char kPrefsResumedUpdateFailures[];
+extern const char kPrefsRollbackHappened[];
 extern const char kPrefsRollbackVersion[];
 extern const char kPrefsChannelOnSlotPrefix[];
 extern const char kPrefsSystemUpdatedMarker[];
@@ -76,6 +79,8 @@
 extern const char kPrefsUpdateDurationUptime[];
 extern const char kPrefsUpdateFirstSeenAt[];
 extern const char kPrefsUpdateOverCellularPermission[];
+extern const char kPrefsUpdateOverCellularTargetVersion[];
+extern const char kPrefsUpdateOverCellularTargetSize[];
 extern const char kPrefsUpdateServerCertificate[];
 extern const char kPrefsUpdateStateNextDataLength[];
 extern const char kPrefsUpdateStateNextDataOffset[];
@@ -84,9 +89,12 @@
 extern const char kPrefsUpdateStateSHA256Context[];
 extern const char kPrefsUpdateStateSignatureBlob[];
 extern const char kPrefsUpdateStateSignedSHA256Context[];
+extern const char kPrefsUpdateBootTimestampStart[];
 extern const char kPrefsUpdateTimestampStart[];
 extern const char kPrefsUrlSwitchCount[];
-extern const char kPrefsWallClockWaitPeriod[];
+extern const char kPrefsVerityWritten[];
+extern const char kPrefsWallClockScatteringWaitPeriod[];
+extern const char kPrefsWallClockStagingWaitPeriod[];
 
 // Keys used when storing and loading payload properties.
 extern const char kPayloadPropertyFileSize[];
@@ -164,9 +172,10 @@
 //
 // For non-official builds (e.g. typically built on a developer's
 // workstation and served via devserver) bump this since it takes time
-// for the workstation to generate the payload. For p2p, make this
-// relatively low since we want to fail fast.
-const int kDownloadLowSpeedTimeSeconds = 90;
+// for the workstation to generate the payload. For normal operation
+// and p2p, make this relatively low since we want to fail fast in
+// those cases.
+const int kDownloadLowSpeedTimeSeconds = 30;
 const int kDownloadDevModeLowSpeedTimeSeconds = 180;
 const int kDownloadP2PLowSpeedTimeSeconds = 60;
 
diff --git a/common/dlcservice.h b/common/dlcservice.h
new file mode 100644
index 0000000..9dae560
--- /dev/null
+++ b/common/dlcservice.h
@@ -0,0 +1,32 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_COMMON_DLCSERVICE_H_
+#define UPDATE_ENGINE_COMMON_DLCSERVICE_H_
+
+#include <memory>
+
+#include "update_engine/common/dlcservice_interface.h"
+
+namespace chromeos_update_engine {
+
+// This factory function creates a new DlcServiceInterface instance for the
+// current platform.
+std::unique_ptr<DlcServiceInterface> CreateDlcService();
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_COMMON_DLCSERVICE_H_
diff --git a/common/dlcservice_interface.h b/common/dlcservice_interface.h
new file mode 100644
index 0000000..aa24105
--- /dev/null
+++ b/common/dlcservice_interface.h
@@ -0,0 +1,46 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_COMMON_DLCSERVICE_INTERFACE_H_
+#define UPDATE_ENGINE_COMMON_DLCSERVICE_INTERFACE_H_
+
+#include <string>
+#include <vector>
+
+#include <base/macros.h>
+
+namespace chromeos_update_engine {
+
+// The abstract dlcservice interface defines the interaction with the
+// platform's dlcservice.
+class DlcServiceInterface {
+ public:
+  virtual ~DlcServiceInterface() = default;
+
+  // Returns true and a list of installed DLC module ids in |dlc_module_ids|.
+  // On failure it returns false.
+  virtual bool GetInstalled(std::vector<std::string>* dlc_module_ids) = 0;
+
+ protected:
+  DlcServiceInterface() = default;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(DlcServiceInterface);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_COMMON_DLCSERVICE_INTERFACE_H_
diff --git a/common/dlcservice_stub.cc b/common/dlcservice_stub.cc
new file mode 100644
index 0000000..c5f9306
--- /dev/null
+++ b/common/dlcservice_stub.cc
@@ -0,0 +1,36 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/common/dlcservice_stub.h"
+
+#include <memory>
+
+using std::string;
+using std::vector;
+
+namespace chromeos_update_engine {
+
+std::unique_ptr<DlcServiceInterface> CreateDlcService() {
+  return std::make_unique<DlcServiceStub>();
+}
+
+bool DlcServiceStub::GetInstalled(std::vector<std::string>* dlc_module_ids) {
+  if (dlc_module_ids)
+    dlc_module_ids->clear();
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/common/dlcservice_stub.h b/common/dlcservice_stub.h
new file mode 100644
index 0000000..4e12c11
--- /dev/null
+++ b/common/dlcservice_stub.h
@@ -0,0 +1,42 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_COMMON_DLCSERVICE_STUB_H_
+#define UPDATE_ENGINE_COMMON_DLCSERVICE_STUB_H_
+
+#include <string>
+#include <vector>
+
+#include "update_engine/common/dlcservice_interface.h"
+
+namespace chromeos_update_engine {
+
+// An implementation of the DlcServiceInterface that does nothing.
+class DlcServiceStub : public DlcServiceInterface {
+ public:
+  DlcServiceStub() = default;
+  ~DlcServiceStub() = default;
+
+  // BootControlInterface overrides.
+  bool GetInstalled(std::vector<std::string>* dlc_module_ids) override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(DlcServiceStub);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_COMMON_DLCSERVICE_STUB_H_
diff --git a/common/error_code.h b/common/error_code.h
index 0b08005..252cc42 100644
--- a/common/error_code.h
+++ b/common/error_code.h
@@ -73,13 +73,17 @@
   kFilesystemVerifierError = 47,
   kUserCanceled = 48,
   kNonCriticalUpdateInOOBE = 49,
-  // kOmahaUpdateIgnoredOverCellular = 50,
+  kOmahaUpdateIgnoredOverCellular = 50,
   kPayloadTimestampError = 51,
   kUpdatedButNotActive = 52,
+  kNoUpdate = 53,
+  kRollbackNotPossible = 54,
+  kFirstActiveOmahaPingSentPersistenceError = 55,
+  kVerityCalculationError = 56,
 
   // VERY IMPORTANT! When adding new error codes:
   //
-  // 1) Update tools/metrics/histograms/histograms.xml in Chrome.
+  // 1) Update tools/metrics/histograms/enums.xml in Chrome.
   //
   // 2) Update the assorted switch statements in update_engine which won't
   //    build until this case is added.
@@ -115,7 +119,7 @@
   // modify the implementation of ErrorCode into a properly encapsulated class.
   kDevModeFlag = 1 << 31,
 
-  // Set if resuming an interruped update.
+  // Set if resuming an interrupted update.
   kResumedFlag = 1 << 30,
 
   // Set if using a dev/test image as opposed to an MP-signed image.
diff --git a/common/error_code_utils.cc b/common/error_code_utils.cc
index 313a15f..b0bbbd4 100644
--- a/common/error_code_utils.cc
+++ b/common/error_code_utils.cc
@@ -30,12 +30,15 @@
   // doesn't support any combinations of those.
   if ((static_cast<int>(code) & static_cast<int>(ErrorCode::kSpecialFlags)) &&
       (static_cast<int>(code) & ~static_cast<int>(ErrorCode::kSpecialFlags)))
-    code = static_cast<ErrorCode>(
-        static_cast<int>(code) & ~static_cast<int>(ErrorCode::kSpecialFlags));
+    code = static_cast<ErrorCode>(static_cast<int>(code) &
+                                  ~static_cast<int>(ErrorCode::kSpecialFlags));
   switch (code) {
-    case ErrorCode::kSuccess: return "ErrorCode::kSuccess";
-    case ErrorCode::kError: return "ErrorCode::kError";
-    case ErrorCode::kOmahaRequestError: return "ErrorCode::kOmahaRequestError";
+    case ErrorCode::kSuccess:
+      return "ErrorCode::kSuccess";
+    case ErrorCode::kError:
+      return "ErrorCode::kError";
+    case ErrorCode::kOmahaRequestError:
+      return "ErrorCode::kOmahaRequestError";
     case ErrorCode::kOmahaResponseHandlerError:
       return "ErrorCode::kOmahaResponseHandlerError";
     case ErrorCode::kFilesystemCopierError:
@@ -144,10 +147,20 @@
       return "ErrorCode::kUserCanceled";
     case ErrorCode::kNonCriticalUpdateInOOBE:
       return "ErrorCode::kNonCriticalUpdateInOOBE";
+    case ErrorCode::kOmahaUpdateIgnoredOverCellular:
+      return "ErrorCode::kOmahaUpdateIgnoredOverCellular";
     case ErrorCode::kPayloadTimestampError:
       return "ErrorCode::kPayloadTimestampError";
     case ErrorCode::kUpdatedButNotActive:
       return "ErrorCode::kUpdatedButNotActive";
+    case ErrorCode::kNoUpdate:
+      return "ErrorCode::kNoUpdate";
+    case ErrorCode::kRollbackNotPossible:
+      return "ErrorCode::kRollbackNotPossible";
+    case ErrorCode::kFirstActiveOmahaPingSentPersistenceError:
+      return "ErrorCode::kFirstActiveOmahaPingSentPersistenceError";
+    case ErrorCode::kVerityCalculationError:
+      return "ErrorCode::kVerityCalculationError";
       // Don't add a default case to let the compiler warn about newly added
       // error codes which should be added here.
   }
diff --git a/common/fake_boot_control.h b/common/fake_boot_control.h
index 3eccc80..3d65075 100644
--- a/common/fake_boot_control.h
+++ b/common/fake_boot_control.h
@@ -74,6 +74,14 @@
     return true;
   }
 
+  bool InitPartitionMetadata(Slot slot,
+                             const PartitionMetadata& partition_metadata,
+                             bool update_metadata) override {
+    return true;
+  }
+
+  void Cleanup() override {}
+
   // Setters
   void SetNumSlots(unsigned int num_slots) {
     num_slots_ = num_slots;
@@ -81,9 +89,7 @@
     devices_.resize(num_slots_);
   }
 
-  void SetCurrentSlot(BootControlInterface::Slot slot) {
-    current_slot_ = slot;
-  }
+  void SetCurrentSlot(BootControlInterface::Slot slot) { current_slot_ = slot; }
 
   void SetPartitionDevice(const std::string& partition_name,
                           BootControlInterface::Slot slot,
diff --git a/common/fake_clock.h b/common/fake_clock.h
index 3d3bad8..165ec4d 100644
--- a/common/fake_clock.h
+++ b/common/fake_clock.h
@@ -26,29 +26,17 @@
  public:
   FakeClock() {}
 
-  base::Time GetWallclockTime() override {
-    return wallclock_time_;
-  }
+  base::Time GetWallclockTime() override { return wallclock_time_; }
 
-  base::Time GetMonotonicTime() override {
-    return monotonic_time_;
-  }
+  base::Time GetMonotonicTime() override { return monotonic_time_; }
 
-  base::Time GetBootTime() override {
-    return boot_time_;
-  }
+  base::Time GetBootTime() override { return boot_time_; }
 
-  void SetWallclockTime(const base::Time &time) {
-    wallclock_time_ = time;
-  }
+  void SetWallclockTime(const base::Time& time) { wallclock_time_ = time; }
 
-  void SetMonotonicTime(const base::Time &time) {
-    monotonic_time_ = time;
-  }
+  void SetMonotonicTime(const base::Time& time) { monotonic_time_ = time; }
 
-  void SetBootTime(const base::Time &time) {
-    boot_time_ = time;
-  }
+  void SetBootTime(const base::Time& time) { boot_time_ = time; }
 
  private:
   base::Time wallclock_time_;
diff --git a/common/fake_hardware.h b/common/fake_hardware.h
index f2b2c9d..3e5a66e 100644
--- a/common/fake_hardware.h
+++ b/common/fake_hardware.h
@@ -34,6 +34,24 @@
   // false.
   static const int kPowerwashCountNotSet = -1;
 
+  // Default value for crossystem tpm_kernver.
+  static const int kMinKernelKeyVersion = 3;
+
+  // Default value for crossystem tpm_fwver.
+  static const int kMinFirmwareKeyVersion = 13;
+
+  // Default value for crossystem kernel_max_rollforward. This value is the
+  // default for consumer devices and effectively means "unlimited rollforward
+  // is allowed", which is the same as the behavior prior to implementing
+  // roll forward prevention.
+  static const int kKernelMaxRollforward = 0xfffffffe;
+
+  // Default value for crossystem firmware_max_rollforward. This value is the
+  // default for consumer devices and effectively means "unlimited rollforward
+  // is allowed", which is the same as the behavior prior to implementing
+  // roll forward prevention.
+  static const int kFirmwareMaxRollforward = 0xfffffffe;
+
   FakeHardware() = default;
 
   // HardwareInterface methods.
@@ -59,15 +77,42 @@
 
   std::string GetECVersion() const override { return ec_version_; }
 
+  int GetMinKernelKeyVersion() const override {
+    return min_kernel_key_version_;
+  }
+
+  int GetMinFirmwareKeyVersion() const override {
+    return min_firmware_key_version_;
+  }
+
+  int GetMaxFirmwareKeyRollforward() const override {
+    return firmware_max_rollforward_;
+  }
+
+  bool SetMaxFirmwareKeyRollforward(int firmware_max_rollforward) override {
+    if (GetMaxFirmwareKeyRollforward() == -1)
+      return false;
+
+    firmware_max_rollforward_ = firmware_max_rollforward;
+    return true;
+  }
+
+  bool SetMaxKernelKeyRollforward(int kernel_max_rollforward) override {
+    kernel_max_rollforward_ = kernel_max_rollforward;
+    return true;
+  }
+
   int GetPowerwashCount() const override { return powerwash_count_; }
 
-  bool SchedulePowerwash() override {
+  bool SchedulePowerwash(bool is_rollback) override {
     powerwash_scheduled_ = true;
+    is_rollback_powerwash_ = is_rollback;
     return true;
   }
 
   bool CancelPowerwash() override {
     powerwash_scheduled_ = false;
+    is_rollback_powerwash_ = false;
     return true;
   }
 
@@ -87,8 +132,9 @@
     return first_active_omaha_ping_sent_;
   }
 
-  void SetFirstActiveOmahaPingSent() override {
+  bool SetFirstActiveOmahaPingSent() override {
     first_active_omaha_ping_sent_ = true;
+    return true;
   }
 
   // Setters
@@ -115,9 +161,7 @@
     oobe_timestamp_ = oobe_timestamp;
   }
 
-  void UnsetIsOOBEComplete() {
-    is_oobe_complete_ = false;
-  }
+  void UnsetIsOOBEComplete() { is_oobe_complete_ = false; }
 
   void SetHardwareClass(const std::string& hardware_class) {
     hardware_class_ = hardware_class;
@@ -127,8 +171,14 @@
     firmware_version_ = firmware_version;
   }
 
-  void SetECVersion(const std::string& ec_version) {
-    ec_version_ = ec_version;
+  void SetECVersion(const std::string& ec_version) { ec_version_ = ec_version; }
+
+  void SetMinKernelKeyVersion(int min_kernel_key_version) {
+    min_kernel_key_version_ = min_kernel_key_version;
+  }
+
+  void SetMinFirmwareKeyVersion(int min_firmware_key_version) {
+    min_firmware_key_version_ = min_firmware_key_version;
   }
 
   void SetPowerwashCount(int powerwash_count) {
@@ -139,18 +189,31 @@
     build_timestamp_ = build_timestamp;
   }
 
+  // Getters to verify state.
+  int GetMaxKernelKeyRollforward() const { return kernel_max_rollforward_; }
+
+  bool GetIsRollbackPowerwashScheduled() const {
+    return powerwash_scheduled_ && is_rollback_powerwash_;
+  }
+
  private:
   bool is_official_build_{true};
   bool is_normal_boot_mode_{true};
   bool are_dev_features_enabled_{false};
   bool is_oobe_enabled_{true};
   bool is_oobe_complete_{true};
-  base::Time oobe_timestamp_{base::Time::FromTimeT(1169280000)}; // Jan 20, 2007
+  // Jan 20, 2007
+  base::Time oobe_timestamp_{base::Time::FromTimeT(1169280000)};
   std::string hardware_class_{"Fake HWID BLAH-1234"};
   std::string firmware_version_{"Fake Firmware v1.0.1"};
   std::string ec_version_{"Fake EC v1.0a"};
+  int min_kernel_key_version_{kMinKernelKeyVersion};
+  int min_firmware_key_version_{kMinFirmwareKeyVersion};
+  int kernel_max_rollforward_{kKernelMaxRollforward};
+  int firmware_max_rollforward_{kFirmwareMaxRollforward};
   int powerwash_count_{kPowerwashCountNotSet};
   bool powerwash_scheduled_{false};
+  bool is_rollback_powerwash_{false};
   int64_t build_timestamp_{0};
   bool first_active_omaha_ping_sent_{false};
 
diff --git a/common/fake_prefs.cc b/common/fake_prefs.cc
index 5a0a3af..c446e06 100644
--- a/common/fake_prefs.cc
+++ b/common/fake_prefs.cc
@@ -27,8 +27,8 @@
 namespace {
 
 void CheckNotNull(const string& key, void* ptr) {
-  EXPECT_NE(nullptr, ptr)
-      << "Called Get*() for key \"" << key << "\" with a null parameter.";
+  EXPECT_NE(nullptr, ptr) << "Called Get*() for key \"" << key
+                          << "\" with a null parameter.";
 }
 
 }  // namespace
@@ -40,25 +40,25 @@
 }
 
 // Compile-time type-dependent constants definitions.
-template<>
+template <>
 FakePrefs::PrefType const FakePrefs::PrefConsts<string>::type =
     FakePrefs::PrefType::kString;
-template<>
-string FakePrefs::PrefValue::* const  // NOLINT(runtime/string), not static str.
+template <>
+string FakePrefs::PrefValue::*const  // NOLINT(runtime/string), not static str.
     FakePrefs::PrefConsts<string>::member = &FakePrefs::PrefValue::as_str;
 
-template<>
+template <>
 FakePrefs::PrefType const FakePrefs::PrefConsts<int64_t>::type =
     FakePrefs::PrefType::kInt64;
-template<>
-int64_t FakePrefs::PrefValue::* const FakePrefs::PrefConsts<int64_t>::member =
+template <>
+int64_t FakePrefs::PrefValue::*const FakePrefs::PrefConsts<int64_t>::member =
     &FakePrefs::PrefValue::as_int64;
 
-template<>
+template <>
 FakePrefs::PrefType const FakePrefs::PrefConsts<bool>::type =
     FakePrefs::PrefType::kBool;
-template<>
-bool FakePrefs::PrefValue::* const FakePrefs::PrefConsts<bool>::member =
+template <>
+bool FakePrefs::PrefValue::*const FakePrefs::PrefConsts<bool>::member =
     &FakePrefs::PrefValue::as_bool;
 
 bool FakePrefs::GetString(const string& key, string* value) const {
@@ -124,7 +124,7 @@
       << " but is accessed as a " << GetTypeName(type);
 }
 
-template<typename T>
+template <typename T>
 void FakePrefs::SetValue(const string& key, const T& value) {
   CheckKeyType(key, PrefConsts<T>::type);
   values_[key].type = PrefConsts<T>::type;
@@ -137,7 +137,7 @@
   }
 }
 
-template<typename T>
+template <typename T>
 bool FakePrefs::GetValue(const string& key, T* value) const {
   CheckKeyType(key, PrefConsts<T>::type);
   auto it = values_.find(key);
@@ -157,8 +157,7 @@
   auto observer_it =
       std::find(observers_for_key.begin(), observers_for_key.end(), observer);
   EXPECT_NE(observer_it, observers_for_key.end())
-      << "Trying to remove an observer instance not watching the key "
-      << key;
+      << "Trying to remove an observer instance not watching the key " << key;
   if (observer_it != observers_for_key.end())
     observers_for_key.erase(observer_it);
   if (observers_for_key.empty())
diff --git a/common/fake_prefs.h b/common/fake_prefs.h
index d194060..b1c5b71 100644
--- a/common/fake_prefs.h
+++ b/common/fake_prefs.h
@@ -72,14 +72,14 @@
   };
 
   // Class to store compile-time type-dependent constants.
-  template<typename T>
+  template <typename T>
   class PrefConsts {
    public:
     // The PrefType associated with T.
     static FakePrefs::PrefType const type;
 
     // The data member pointer to PrefValue associated with T.
-    static T FakePrefs::PrefValue::* const member;
+    static T FakePrefs::PrefValue::*const member;
   };
 
   // Returns a string representation of the PrefType useful for logging.
@@ -90,13 +90,13 @@
 
   // Helper function to set a value of the passed |key|. It sets the type based
   // on the template parameter T.
-  template<typename T>
+  template <typename T>
   void SetValue(const std::string& key, const T& value);
 
   // Helper function to get a value from the map checking for invalid calls.
   // The function fails the test if you attempt to read a value  defined as a
   // different type. Returns whether the get succeeded.
-  template<typename T>
+  template <typename T>
   bool GetValue(const std::string& key, T* value) const;
 
   // Container for all the key/value pairs.
diff --git a/common/file_fetcher.cc b/common/file_fetcher.cc
index d0a109b..3836e54 100644
--- a/common/file_fetcher.cc
+++ b/common/file_fetcher.cc
@@ -138,8 +138,9 @@
       delegate_->TransferComplete(this, true);
   } else {
     bytes_copied_ += bytes_read;
-    if (delegate_)
-      delegate_->ReceivedBytes(this, buffer_.data(), bytes_read);
+    if (delegate_ &&
+        !delegate_->ReceivedBytes(this, buffer_.data(), bytes_read))
+      return;
     ScheduleRead();
   }
 }
diff --git a/common/file_fetcher.h b/common/file_fetcher.h
index 2368b1d..fbdfc32 100644
--- a/common/file_fetcher.h
+++ b/common/file_fetcher.h
@@ -57,7 +57,7 @@
 
   // Ignore all extra headers for files.
   void SetHeader(const std::string& header_name,
-                 const std::string& header_value) override {};
+                 const std::string& header_value) override {}
 
   // Suspend the asynchronous file read.
   void Pause() override;
diff --git a/common/hardware_interface.h b/common/hardware_interface.h
index 94442d1..0140588 100644
--- a/common/hardware_interface.h
+++ b/common/hardware_interface.h
@@ -70,14 +70,41 @@
   // running a custom chrome os ec.
   virtual std::string GetECVersion() const = 0;
 
+  // Returns the minimum kernel key version that verified boot on Chrome OS
+  // will allow to boot. This is the value of crossystem tpm_kernver. Returns
+  // -1 on error, or if not running on Chrome OS.
+  virtual int GetMinKernelKeyVersion() const = 0;
+
+  // Returns the minimum firmware key version that verified boot on Chrome OS
+  // will allow to boot. This is the value of crossystem tpm_fwver. Returns
+  // -1 on error, or if not running on Chrome OS.
+  virtual int GetMinFirmwareKeyVersion() const = 0;
+
+  // Returns the maximum firmware key version that verified boot should roll
+  // forward to. This is the value of crossystem firmware_max_rollforward.
+  // Returns -1 on error, if this board does not yet support this value, or
+  // if not running on Chrome OS.
+  virtual int GetMaxFirmwareKeyRollforward() const = 0;
+
+  // Sets the maximum firmware key version that verified boot should roll
+  // forward to. This is the value of crossystem firmware_max_rollforward.
+  // This value is not available on all Chrome OS devices.
+  virtual bool SetMaxFirmwareKeyRollforward(int firmware_max_rollforward) = 0;
+
+  // Sets the maximum kernel key version that verified boot should roll
+  // forward to. This is the value of crossystem kernel_max_rollforward.
+  // Returns false if the value cannot be set, or if not running on Chrome OS.
+  virtual bool SetMaxKernelKeyRollforward(int kernel_max_rollforward) = 0;
+
   // Returns the powerwash_count from the stateful. If the file is not found
   // or is invalid, returns -1. Brand new machines out of the factory or after
   // recovery don't have this value set.
   virtual int GetPowerwashCount() const = 0;
 
   // Signals that a powerwash (stateful partition wipe) should be performed
-  // after reboot.
-  virtual bool SchedulePowerwash() = 0;
+  // after reboot. If |is_rollback| is true additional state is preserved
+  // during shutdown that can be restored after the powerwash.
+  virtual bool SchedulePowerwash(bool is_rollback) = 0;
 
   // Cancel the powerwash operation scheduled to be performed on next boot.
   virtual bool CancelPowerwash() = 0;
@@ -100,9 +127,9 @@
   // |SetFirstActiveOmahaPingSent()|.
   virtual bool GetFirstActiveOmahaPingSent() const = 0;
 
-  // Persist the fact that first active ping was sent to omaha. It bails out if
-  // it fails.
-  virtual void SetFirstActiveOmahaPingSent() = 0;
+  // Persist the fact that first active ping was sent to omaha and returns false
+  // if failed to persist it.
+  virtual bool SetFirstActiveOmahaPingSent() = 0;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/common/hash_calculator.cc b/common/hash_calculator.cc
index ebfdb6e..d010a53 100644
--- a/common/hash_calculator.cc
+++ b/common/hash_calculator.cc
@@ -95,7 +95,8 @@
   return RawHashOfBytes(data.data(), data.size(), out_hash);
 }
 
-off_t HashCalculator::RawHashOfFile(const string& name, off_t length,
+off_t HashCalculator::RawHashOfFile(const string& name,
+                                    off_t length,
                                     brillo::Blob* out_hash) {
   HashCalculator calc;
   off_t res = calc.UpdateFile(name, length);
diff --git a/common/hash_calculator.h b/common/hash_calculator.h
index 06d2cfb..b7e4d86 100644
--- a/common/hash_calculator.h
+++ b/common/hash_calculator.h
@@ -71,9 +71,9 @@
   static bool RawHashOfBytes(const void* data,
                              size_t length,
                              brillo::Blob* out_hash);
-  static bool RawHashOfData(const brillo::Blob& data,
-                            brillo::Blob* out_hash);
-  static off_t RawHashOfFile(const std::string& name, off_t length,
+  static bool RawHashOfData(const brillo::Blob& data, brillo::Blob* out_hash);
+  static off_t RawHashOfFile(const std::string& name,
+                             off_t length,
                              brillo::Blob* out_hash);
 
  private:
diff --git a/common/hash_calculator_unittest.cc b/common/hash_calculator_unittest.cc
index 233237b..e8f73d5 100644
--- a/common/hash_calculator_unittest.cc
+++ b/common/hash_calculator_unittest.cc
@@ -26,6 +26,7 @@
 #include <brillo/secure_blob.h>
 #include <gtest/gtest.h>
 
+#include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
 
 using std::string;
@@ -37,16 +38,11 @@
 // $ echo -n hi | openssl dgst -sha256 -binary |
 //   hexdump -v -e '"    " 12/1 "0x%02x, " "\n"'
 static const uint8_t kExpectedRawHash[] = {
-  0x8f, 0x43, 0x43, 0x46, 0x64, 0x8f, 0x6b, 0x96,
-  0xdf, 0x89, 0xdd, 0xa9, 0x01, 0xc5, 0x17, 0x6b,
-  0x10, 0xa6, 0xd8, 0x39, 0x61, 0xdd, 0x3c, 0x1a,
-  0xc8, 0x8b, 0x59, 0xb2, 0xdc, 0x32, 0x7a, 0xa4
-};
+    0x8f, 0x43, 0x43, 0x46, 0x64, 0x8f, 0x6b, 0x96, 0xdf, 0x89, 0xdd,
+    0xa9, 0x01, 0xc5, 0x17, 0x6b, 0x10, 0xa6, 0xd8, 0x39, 0x61, 0xdd,
+    0x3c, 0x1a, 0xc8, 0x8b, 0x59, 0xb2, 0xdc, 0x32, 0x7a, 0xa4};
 
-class HashCalculatorTest : public ::testing::Test {
- public:
-  HashCalculatorTest() {}
-};
+class HashCalculatorTest : public ::testing::Test {};
 
 TEST_F(HashCalculatorTest, SimpleTest) {
   HashCalculator calc;
@@ -54,7 +50,7 @@
   calc.Finalize();
   brillo::Blob raw_hash(std::begin(kExpectedRawHash),
                         std::end(kExpectedRawHash));
-  EXPECT_TRUE(raw_hash == calc.raw_hash());
+  EXPECT_EQ(raw_hash, calc.raw_hash());
 }
 
 TEST_F(HashCalculatorTest, MultiUpdateTest) {
@@ -64,7 +60,7 @@
   calc.Finalize();
   brillo::Blob raw_hash(std::begin(kExpectedRawHash),
                         std::end(kExpectedRawHash));
-  EXPECT_TRUE(raw_hash == calc.raw_hash());
+  EXPECT_EQ(raw_hash, calc.raw_hash());
 }
 
 TEST_F(HashCalculatorTest, ContextTest) {
@@ -78,7 +74,7 @@
   calc_next.Finalize();
   brillo::Blob raw_hash(std::begin(kExpectedRawHash),
                         std::end(kExpectedRawHash));
-  EXPECT_TRUE(raw_hash == calc_next.raw_hash());
+  EXPECT_EQ(raw_hash, calc_next.raw_hash());
 }
 
 TEST_F(HashCalculatorTest, BigTest) {
@@ -108,25 +104,21 @@
 }
 
 TEST_F(HashCalculatorTest, UpdateFileSimpleTest) {
-  string data_path;
-  ASSERT_TRUE(
-      utils::MakeTempFile("data.XXXXXX", &data_path, nullptr));
-  ScopedPathUnlinker data_path_unlinker(data_path);
-  ASSERT_TRUE(utils::WriteFile(data_path.c_str(), "hi", 2));
+  test_utils::ScopedTempFile data_file("data.XXXXXX");
+  ASSERT_TRUE(test_utils::WriteFileString(data_file.path(), "hi"));
 
-  static const int kLengths[] = { -1, 2, 10 };
-  for (size_t i = 0; i < arraysize(kLengths); i++) {
+  for (const int length : {-1, 2, 10}) {
     HashCalculator calc;
-    EXPECT_EQ(2, calc.UpdateFile(data_path, kLengths[i]));
+    EXPECT_EQ(2, calc.UpdateFile(data_file.path(), length));
     EXPECT_TRUE(calc.Finalize());
     brillo::Blob raw_hash(std::begin(kExpectedRawHash),
                           std::end(kExpectedRawHash));
-    EXPECT_TRUE(raw_hash == calc.raw_hash());
+    EXPECT_EQ(raw_hash, calc.raw_hash());
   }
 
   HashCalculator calc;
-  EXPECT_EQ(0, calc.UpdateFile(data_path, 0));
-  EXPECT_EQ(1, calc.UpdateFile(data_path, 1));
+  EXPECT_EQ(0, calc.UpdateFile(data_file.path(), 0));
+  EXPECT_EQ(1, calc.UpdateFile(data_file.path(), 1));
   EXPECT_TRUE(calc.Finalize());
   // echo -n h | openssl dgst -sha256 -binary | openssl base64
   EXPECT_EQ("qqlAJmTxpB9A67xSyZk+tmrrNmYClY/fqig7ceZNsSM=",
@@ -134,21 +126,16 @@
 }
 
 TEST_F(HashCalculatorTest, RawHashOfFileSimpleTest) {
-  string data_path;
-  ASSERT_TRUE(
-      utils::MakeTempFile("data.XXXXXX", &data_path, nullptr));
-  ScopedPathUnlinker data_path_unlinker(data_path);
-  ASSERT_TRUE(utils::WriteFile(data_path.c_str(), "hi", 2));
+  test_utils::ScopedTempFile data_file("data.XXXXXX");
+  ASSERT_TRUE(test_utils::WriteFileString(data_file.path(), "hi"));
 
-  static const int kLengths[] = { -1, 2, 10 };
-  for (size_t i = 0; i < arraysize(kLengths); i++) {
+  for (const int length : {-1, 2, 10}) {
     brillo::Blob exp_raw_hash(std::begin(kExpectedRawHash),
                               std::end(kExpectedRawHash));
     brillo::Blob raw_hash;
-    EXPECT_EQ(2, HashCalculator::RawHashOfFile(data_path,
-                                               kLengths[i],
-                                               &raw_hash));
-    EXPECT_TRUE(exp_raw_hash == raw_hash);
+    EXPECT_EQ(
+        2, HashCalculator::RawHashOfFile(data_file.path(), length, &raw_hash));
+    EXPECT_EQ(exp_raw_hash, raw_hash);
   }
 }
 
@@ -159,9 +146,7 @@
 
 TEST_F(HashCalculatorTest, AbortTest) {
   // Just make sure we don't crash and valgrind doesn't detect memory leaks
-  {
-    HashCalculator calc;
-  }
+  { HashCalculator calc; }
   {
     HashCalculator calc;
     calc.Update("h", 1);
diff --git a/common/http_common.cc b/common/http_common.cc
index d07ced3..5f234b0 100644
--- a/common/http_common.cc
+++ b/common/http_common.cc
@@ -24,34 +24,34 @@
 
 namespace chromeos_update_engine {
 
-const char *GetHttpResponseDescription(HttpResponseCode code) {
+const char* GetHttpResponseDescription(HttpResponseCode code) {
   static const struct {
     HttpResponseCode code;
     const char* description;
   } http_response_table[] = {
-    { kHttpResponseOk,                  "OK" },
-    { kHttpResponseCreated,             "Created" },
-    { kHttpResponseAccepted,            "Accepted" },
-    { kHttpResponseNonAuthInfo,         "Non-Authoritative Information" },
-    { kHttpResponseNoContent,           "No Content" },
-    { kHttpResponseResetContent,        "Reset Content" },
-    { kHttpResponsePartialContent,      "Partial Content" },
-    { kHttpResponseMultipleChoices,     "Multiple Choices" },
-    { kHttpResponseMovedPermanently,    "Moved Permanently" },
-    { kHttpResponseFound,               "Found" },
-    { kHttpResponseSeeOther,            "See Other" },
-    { kHttpResponseNotModified,         "Not Modified" },
-    { kHttpResponseUseProxy,            "Use Proxy" },
-    { kHttpResponseTempRedirect,        "Temporary Redirect" },
-    { kHttpResponseBadRequest,          "Bad Request" },
-    { kHttpResponseUnauth,              "Unauthorized" },
-    { kHttpResponseForbidden,           "Forbidden" },
-    { kHttpResponseNotFound,            "Not Found" },
-    { kHttpResponseRequestTimeout,      "Request Timeout" },
-    { kHttpResponseInternalServerError, "Internal Server Error" },
-    { kHttpResponseNotImplemented,      "Not Implemented" },
-    { kHttpResponseServiceUnavailable,  "Service Unavailable" },
-    { kHttpResponseVersionNotSupported, "HTTP Version Not Supported" },
+      {kHttpResponseOk, "OK"},
+      {kHttpResponseCreated, "Created"},
+      {kHttpResponseAccepted, "Accepted"},
+      {kHttpResponseNonAuthInfo, "Non-Authoritative Information"},
+      {kHttpResponseNoContent, "No Content"},
+      {kHttpResponseResetContent, "Reset Content"},
+      {kHttpResponsePartialContent, "Partial Content"},
+      {kHttpResponseMultipleChoices, "Multiple Choices"},
+      {kHttpResponseMovedPermanently, "Moved Permanently"},
+      {kHttpResponseFound, "Found"},
+      {kHttpResponseSeeOther, "See Other"},
+      {kHttpResponseNotModified, "Not Modified"},
+      {kHttpResponseUseProxy, "Use Proxy"},
+      {kHttpResponseTempRedirect, "Temporary Redirect"},
+      {kHttpResponseBadRequest, "Bad Request"},
+      {kHttpResponseUnauth, "Unauthorized"},
+      {kHttpResponseForbidden, "Forbidden"},
+      {kHttpResponseNotFound, "Not Found"},
+      {kHttpResponseRequestTimeout, "Request Timeout"},
+      {kHttpResponseInternalServerError, "Internal Server Error"},
+      {kHttpResponseNotImplemented, "Not Implemented"},
+      {kHttpResponseServiceUnavailable, "Service Unavailable"},
+      {kHttpResponseVersionNotSupported, "HTTP Version Not Supported"},
   };
 
   bool is_found = false;
@@ -63,17 +63,16 @@
   return (is_found ? http_response_table[i].description : "(unsupported)");
 }
 
-HttpResponseCode StringToHttpResponseCode(const char *s) {
+HttpResponseCode StringToHttpResponseCode(const char* s) {
   return static_cast<HttpResponseCode>(strtoul(s, nullptr, 10));
 }
 
-
-const char *GetHttpContentTypeString(HttpContentType type) {
+const char* GetHttpContentTypeString(HttpContentType type) {
   static const struct {
     HttpContentType type;
     const char* str;
   } http_content_type_table[] = {
-    { kHttpContentTypeTextXml, "text/xml" },
+      {kHttpContentTypeTextXml, "text/xml"},
   };
 
   bool is_found = false;
diff --git a/common/http_common.h b/common/http_common.h
index 6d444ed..7a68da0 100644
--- a/common/http_common.h
+++ b/common/http_common.h
@@ -24,39 +24,38 @@
 
 // Enumeration type for HTTP response codes.
 enum HttpResponseCode {
-  kHttpResponseUndefined           = 0,
-  kHttpResponseOk                  = 200,
-  kHttpResponseCreated             = 201,
-  kHttpResponseAccepted            = 202,
-  kHttpResponseNonAuthInfo         = 203,
-  kHttpResponseNoContent           = 204,
-  kHttpResponseResetContent        = 205,
-  kHttpResponsePartialContent      = 206,
-  kHttpResponseMultipleChoices     = 300,
-  kHttpResponseMovedPermanently    = 301,
-  kHttpResponseFound               = 302,
-  kHttpResponseSeeOther            = 303,
-  kHttpResponseNotModified         = 304,
-  kHttpResponseUseProxy            = 305,
-  kHttpResponseTempRedirect        = 307,
-  kHttpResponseBadRequest          = 400,
-  kHttpResponseUnauth              = 401,
-  kHttpResponseForbidden           = 403,
-  kHttpResponseNotFound            = 404,
-  kHttpResponseRequestTimeout      = 408,
-  kHttpResponseReqRangeNotSat      = 416,
+  kHttpResponseUndefined = 0,
+  kHttpResponseOk = 200,
+  kHttpResponseCreated = 201,
+  kHttpResponseAccepted = 202,
+  kHttpResponseNonAuthInfo = 203,
+  kHttpResponseNoContent = 204,
+  kHttpResponseResetContent = 205,
+  kHttpResponsePartialContent = 206,
+  kHttpResponseMultipleChoices = 300,
+  kHttpResponseMovedPermanently = 301,
+  kHttpResponseFound = 302,
+  kHttpResponseSeeOther = 303,
+  kHttpResponseNotModified = 304,
+  kHttpResponseUseProxy = 305,
+  kHttpResponseTempRedirect = 307,
+  kHttpResponseBadRequest = 400,
+  kHttpResponseUnauth = 401,
+  kHttpResponseForbidden = 403,
+  kHttpResponseNotFound = 404,
+  kHttpResponseRequestTimeout = 408,
+  kHttpResponseReqRangeNotSat = 416,
   kHttpResponseInternalServerError = 500,
-  kHttpResponseNotImplemented      = 501,
-  kHttpResponseServiceUnavailable  = 503,
+  kHttpResponseNotImplemented = 501,
+  kHttpResponseServiceUnavailable = 503,
   kHttpResponseVersionNotSupported = 505,
 };
 
 // Returns a standard HTTP status line string for a given response code.
-const char *GetHttpResponseDescription(HttpResponseCode code);
+const char* GetHttpResponseDescription(HttpResponseCode code);
 
 // Converts a string beginning with an HTTP error code into numerical value.
-HttpResponseCode StringToHttpResponseCode(const char *s);
-
+HttpResponseCode StringToHttpResponseCode(const char* s);
 
 // Enumeration type for HTTP Content-Type.
 enum HttpContentType {
@@ -65,7 +64,7 @@
 };
 
 // Returns a standard HTTP Content-Type string.
-const char *GetHttpContentTypeString(HttpContentType type);
+const char* GetHttpContentTypeString(HttpContentType type);
 
 }  // namespace chromeos_update_engine
 
diff --git a/common/http_fetcher.cc b/common/http_fetcher.cc
index 73c0d48..5a98dfc 100644
--- a/common/http_fetcher.cc
+++ b/common/http_fetcher.cc
@@ -29,7 +29,8 @@
   CancelProxyResolution();
 }
 
-void HttpFetcher::SetPostData(const void* data, size_t size,
+void HttpFetcher::SetPostData(const void* data,
+                              size_t size,
                               HttpContentType type) {
   post_data_set_ = true;
   post_data_.clear();
diff --git a/common/http_fetcher.h b/common/http_fetcher.h
index 3f7b2e8..2b4fc83 100644
--- a/common/http_fetcher.h
+++ b/common/http_fetcher.h
@@ -18,6 +18,7 @@
 #define UPDATE_ENGINE_COMMON_HTTP_FETCHER_H_
 
 #include <deque>
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -27,7 +28,7 @@
 #include <brillo/message_loops/message_loop.h>
 
 #include "update_engine/common/http_common.h"
-#include "update_engine/proxy_resolver.h"
+#include "update_engine/common/proxy_resolver.h"
 
 // This class is a simple wrapper around an HTTP library (libcurl). We can
 // easily mock out this interface for testing.
@@ -72,9 +73,7 @@
   void SetProxies(const std::deque<std::string>& proxies) {
     proxies_ = proxies;
   }
-  const std::string& GetCurrentProxy() const {
-    return proxies_.front();
-  }
+  const std::string& GetCurrentProxy() const { return proxies_.front(); }
   bool HasProxy() const { return !proxies_.empty(); }
   void PopProxy() { proxies_.pop_front(); }
 
@@ -186,8 +185,9 @@
  public:
   virtual ~HttpFetcherDelegate() = default;
 
-  // Called every time bytes are received.
-  virtual void ReceivedBytes(HttpFetcher* fetcher,
+  // Called every time bytes are received. Returns false if this call causes the
+  // transfer be terminated or completed otherwise it returns true.
+  virtual bool ReceivedBytes(HttpFetcher* fetcher,
                              const void* bytes,
                              size_t length) = 0;
 
diff --git a/common/http_fetcher_unittest.cc b/common/http_fetcher_unittest.cc
index 867216e..237ea20 100644
--- a/common/http_fetcher_unittest.cc
+++ b/common/http_fetcher_unittest.cc
@@ -19,6 +19,7 @@
 #include <sys/socket.h>
 #include <unistd.h>
 
+#include <algorithm>
 #include <memory>
 #include <string>
 #include <utility>
@@ -44,12 +45,12 @@
 #include "update_engine/common/file_fetcher.h"
 #include "update_engine/common/http_common.h"
 #include "update_engine/common/mock_http_fetcher.h"
+#include "update_engine/common/mock_proxy_resolver.h"
 #include "update_engine/common/multi_range_http_fetcher.h"
+#include "update_engine/common/proxy_resolver.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/libcurl_http_fetcher.h"
-#include "update_engine/mock_proxy_resolver.h"
-#include "update_engine/proxy_resolver.h"
 
 using brillo::MessageLoop;
 using std::make_pair;
@@ -57,30 +58,29 @@
 using std::string;
 using std::unique_ptr;
 using std::vector;
+using testing::_;
 using testing::DoAll;
 using testing::Return;
 using testing::SaveArg;
-using testing::_;
 
 namespace {
 
-const int kBigLength           = 100000;
-const int kMediumLength        = 1000;
+const int kBigLength = 100000;
+const int kMediumLength = 1000;
 const int kFlakyTruncateLength = 29000;
-const int kFlakySleepEvery     = 3;
-const int kFlakySleepSecs      = 10;
+const int kFlakySleepEvery = 3;
+const int kFlakySleepSecs = 10;
 
 }  // namespace
 
 namespace chromeos_update_engine {
 
-static const char *kUnusedUrl = "unused://unused";
+static const char* kUnusedUrl = "unused://unused";
 
-static inline string LocalServerUrlForPath(in_port_t port,
-                                           const string& path) {
+static inline string LocalServerUrlForPath(in_port_t port, const string& path) {
   string port_str = (port ? base::StringPrintf(":%hu", port) : "");
-  return base::StringPrintf("http://127.0.0.1%s%s", port_str.c_str(),
-                            path.c_str());
+  return base::StringPrintf(
+      "http://127.0.0.1%s%s", port_str.c_str(), path.c_str());
 }
 
 //
@@ -92,24 +92,18 @@
   // This makes it an abstract class (dirty but works).
   virtual ~HttpServer() = 0;
 
-  virtual in_port_t GetPort() const {
-    return 0;
-  }
+  virtual in_port_t GetPort() const { return 0; }
 
   bool started_;
 };
 
 HttpServer::~HttpServer() {}
 
-
 class NullHttpServer : public HttpServer {
  public:
-  NullHttpServer() {
-    started_ = true;
-  }
+  NullHttpServer() { started_ = true; }
 };
 
-
 class PythonHttpServer : public HttpServer {
  public:
   PythonHttpServer() : port_(0) {
@@ -174,9 +168,7 @@
     http_server_->Kill(SIGTERM, 10);
   }
 
-  in_port_t GetPort() const override {
-    return port_;
-  }
+  in_port_t GetPort() const override { return port_; }
 
  private:
   static const char* kServerListeningMsgPrefix;
@@ -201,9 +193,7 @@
     proxy_resolver_.set_num_proxies(num_proxies);
     return NewLargeFetcher(&proxy_resolver_);
   }
-  HttpFetcher* NewLargeFetcher() {
-    return NewLargeFetcher(1);
-  }
+  HttpFetcher* NewLargeFetcher() { return NewLargeFetcher(1); }
 
   virtual HttpFetcher* NewSmallFetcher(ProxyResolver* proxy_resolver) = 0;
   HttpFetcher* NewSmallFetcher() {
@@ -218,14 +208,13 @@
   virtual bool IsMock() const = 0;
   virtual bool IsMulti() const = 0;
   virtual bool IsHttpSupported() const = 0;
+  virtual bool IsFileFetcher() const = 0;
 
   virtual void IgnoreServerAborting(HttpServer* server) const {}
 
   virtual HttpServer* CreateServer() = 0;
 
-  FakeHardware* fake_hardware() {
-    return &fake_hardware_;
-  }
+  FakeHardware* fake_hardware() { return &fake_hardware_; }
 
  protected:
   DirectProxyResolver proxy_resolver_;
@@ -251,10 +240,9 @@
   bool IsMock() const override { return true; }
   bool IsMulti() const override { return false; }
   bool IsHttpSupported() const override { return true; }
+  bool IsFileFetcher() const override { return false; }
 
-  HttpServer* CreateServer() override {
-    return new NullHttpServer;
-  }
+  HttpServer* CreateServer() override { return new NullHttpServer; }
 };
 
 class LibcurlHttpFetcherTest : public AnyHttpFetcherTest {
@@ -278,9 +266,8 @@
   }
 
   string BigUrl(in_port_t port) const override {
-    return LocalServerUrlForPath(port,
-                                 base::StringPrintf("/download/%d",
-                                                    kBigLength));
+    return LocalServerUrlForPath(
+        port, base::StringPrintf("/download/%d", kBigLength));
   }
   string SmallUrl(in_port_t port) const override {
     return LocalServerUrlForPath(port, "/foo");
@@ -292,14 +279,13 @@
   bool IsMock() const override { return false; }
   bool IsMulti() const override { return false; }
   bool IsHttpSupported() const override { return true; }
+  bool IsFileFetcher() const override { return false; }
 
   void IgnoreServerAborting(HttpServer* server) const override {
     // Nothing to do.
   }
 
-  HttpServer* CreateServer() override {
-    return new PythonHttpServer;
-  }
+  HttpServer* CreateServer() override { return new PythonHttpServer; }
 };
 
 class MultiRangeHttpFetcherTest : public LibcurlHttpFetcherTest {
@@ -342,6 +328,17 @@
   }
 
   string BigUrl(in_port_t port) const override {
+    static string big_contents = []() {
+      string buf;
+      buf.reserve(kBigLength);
+      constexpr const char* kBigUrlContent = "abcdefghij";
+      for (size_t i = 0; i < kBigLength; i += strlen(kBigUrlContent)) {
+        buf.append(kBigUrlContent,
+                   std::min(kBigLength - i, strlen(kBigUrlContent)));
+      }
+      return buf;
+    }();
+    test_utils::WriteFileString(temp_file_.path(), big_contents);
     return "file://" + temp_file_.path();
   }
   string SmallUrl(in_port_t port) const override {
@@ -355,6 +352,7 @@
   bool IsMock() const override { return false; }
   bool IsMulti() const override { return false; }
   bool IsHttpSupported() const override { return false; }
+  bool IsFileFetcher() const override { return true; }
 
   void IgnoreServerAborting(HttpServer* server) const override {}
 
@@ -364,6 +362,31 @@
   test_utils::ScopedTempFile temp_file_{"ue_file_fetcher.XXXXXX"};
 };
 
+class MultiRangeHttpFetcherOverFileFetcherTest : public FileFetcherTest {
+ public:
+  // Necessary to unhide the definition in the base class.
+  using AnyHttpFetcherTest::NewLargeFetcher;
+  HttpFetcher* NewLargeFetcher(ProxyResolver* /* proxy_resolver */) override {
+    MultiRangeHttpFetcher* ret = new MultiRangeHttpFetcher(new FileFetcher());
+    ret->ClearRanges();
+    // FileFetcher doesn't support range with unspecified length.
+    ret->AddRange(0, 1);
+    // Speed up test execution.
+    ret->set_idle_seconds(1);
+    ret->set_retry_seconds(1);
+    fake_hardware_.SetIsOfficialBuild(false);
+    return ret;
+  }
+
+  // Necessary to unhide the definition in the base class.
+  using AnyHttpFetcherTest::NewSmallFetcher;
+  HttpFetcher* NewSmallFetcher(ProxyResolver* proxy_resolver) override {
+    return NewLargeFetcher(proxy_resolver);
+  }
+
+  bool IsMulti() const override { return true; }
+};
+
 //
 // Infrastructure for type tests of HTTP fetcher.
 // See: http://code.google.com/p/googletest/wiki/AdvancedGuide#Typed_Tests
@@ -381,9 +404,7 @@
   T test_;
 
  protected:
-  HttpFetcherTest() {
-    loop_.SetAsCurrent();
-  }
+  HttpFetcherTest() { loop_.SetAsCurrent(); }
 
   void TearDown() override {
     EXPECT_EQ(0, brillo::MessageLoopRunMaxIterations(&loop_, 1));
@@ -391,7 +412,7 @@
 
  private:
   static void TypeConstraint(T* a) {
-    AnyHttpFetcherTest *b = a;
+    AnyHttpFetcherTest* b = a;
     if (b == 0)  // Silence compiler warning of unused variable.
       *b = a;
   }
@@ -401,22 +422,23 @@
 typedef ::testing::Types<LibcurlHttpFetcherTest,
                          MockHttpFetcherTest,
                          MultiRangeHttpFetcherTest,
-                         FileFetcherTest>
+                         FileFetcherTest,
+                         MultiRangeHttpFetcherOverFileFetcherTest>
     HttpFetcherTestTypes;
 TYPED_TEST_CASE(HttpFetcherTest, HttpFetcherTestTypes);
 
-
 namespace {
 class HttpFetcherTestDelegate : public HttpFetcherDelegate {
  public:
   HttpFetcherTestDelegate() = default;
 
-  void ReceivedBytes(HttpFetcher* /* fetcher */,
+  bool ReceivedBytes(HttpFetcher* /* fetcher */,
                      const void* bytes,
                      size_t length) override {
     data.append(reinterpret_cast<const char*>(bytes), length);
     // Update counters
     times_received_bytes_called_++;
+    return true;
   }
 
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
@@ -447,7 +469,6 @@
   string data;
 };
 
-
 void StartTransfer(HttpFetcher* http_fetcher, const string& url) {
   http_fetcher->BeginTransfer(url);
 }
@@ -461,10 +482,10 @@
   unique_ptr<HttpServer> server(this->test_.CreateServer());
   ASSERT_TRUE(server->started_);
 
-  this->loop_.PostTask(FROM_HERE, base::Bind(
-      StartTransfer,
-      fetcher.get(),
-      this->test_.SmallUrl(server->GetPort())));
+  this->loop_.PostTask(FROM_HERE,
+                       base::Bind(StartTransfer,
+                                  fetcher.get(),
+                                  this->test_.SmallUrl(server->GetPort())));
   this->loop_.Run();
   EXPECT_EQ(0, delegate.times_transfer_terminated_called_);
 }
@@ -477,10 +498,10 @@
   unique_ptr<HttpServer> server(this->test_.CreateServer());
   ASSERT_TRUE(server->started_);
 
-  this->loop_.PostTask(FROM_HERE, base::Bind(
-      StartTransfer,
-      fetcher.get(),
-      this->test_.BigUrl(server->GetPort())));
+  this->loop_.PostTask(
+      FROM_HERE,
+      base::Bind(
+          StartTransfer, fetcher.get(), this->test_.BigUrl(server->GetPort())));
   this->loop_.Run();
   EXPECT_EQ(0, delegate.times_transfer_terminated_called_);
 }
@@ -501,10 +522,10 @@
   unique_ptr<HttpServer> server(this->test_.CreateServer());
   ASSERT_TRUE(server->started_);
 
-  this->loop_.PostTask(FROM_HERE, base::Bind(
-      StartTransfer,
-      fetcher.get(),
-      this->test_.ErrorUrl(server->GetPort())));
+  this->loop_.PostTask(FROM_HERE,
+                       base::Bind(StartTransfer,
+                                  fetcher.get(),
+                                  this->test_.ErrorUrl(server->GetPort())));
   this->loop_.Run();
 
   // Make sure that no bytes were received.
@@ -559,18 +580,18 @@
 namespace {
 class PausingHttpFetcherTestDelegate : public HttpFetcherDelegate {
  public:
-  void ReceivedBytes(HttpFetcher* fetcher,
-                     const void* /* bytes */, size_t /* length */) override {
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* /* bytes */,
+                     size_t /* length */) override {
     CHECK(!paused_);
     paused_ = true;
     fetcher->Pause();
+    return true;
   }
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
     MessageLoop::current()->BreakLoop();
   }
-  void TransferTerminated(HttpFetcher* fetcher) override {
-    ADD_FAILURE();
-  }
+  void TransferTerminated(HttpFetcher* fetcher) override { ADD_FAILURE(); }
   void Unpause() {
     CHECK(paused_);
     paused_ = false;
@@ -640,8 +661,11 @@
 namespace {
 class AbortingHttpFetcherTestDelegate : public HttpFetcherDelegate {
  public:
-  void ReceivedBytes(HttpFetcher* fetcher,
-                     const void* bytes, size_t length) override {}
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* bytes,
+                     size_t length) override {
+    return true;
+  }
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
     ADD_FAILURE();  // We should never get here
     MessageLoop::current()->BreakLoop();
@@ -660,9 +684,7 @@
     once_ = false;
     fetcher_->TerminateTransfer();
   }
-  void EndLoop() {
-    MessageLoop::current()->BreakLoop();
-  }
+  void EndLoop() { MessageLoop::current()->BreakLoop(); }
   bool once_;
   bool callback_once_;
   unique_ptr<HttpFetcher> fetcher_;
@@ -673,8 +695,7 @@
   if (delegate->once_) {
     delegate->TerminateTransfer();
     *my_id = MessageLoop::current()->PostTask(
-        FROM_HERE,
-        base::Bind(AbortingTimeoutCallback, delegate, my_id));
+        FROM_HERE, base::Bind(AbortingTimeoutCallback, delegate, my_id));
   } else {
     delegate->EndLoop();
     *my_id = MessageLoop::kTaskIdNull;
@@ -696,8 +717,7 @@
   MessageLoop::TaskId task_id = MessageLoop::kTaskIdNull;
 
   task_id = this->loop_.PostTask(
-      FROM_HERE,
-      base::Bind(AbortingTimeoutCallback, &delegate, &task_id));
+      FROM_HERE, base::Bind(AbortingTimeoutCallback, &delegate, &task_id));
   delegate.fetcher_->BeginTransfer(this->test_.BigUrl(server->GetPort()));
 
   this->loop_.Run();
@@ -735,18 +755,18 @@
 namespace {
 class FlakyHttpFetcherTestDelegate : public HttpFetcherDelegate {
  public:
-  void ReceivedBytes(HttpFetcher* fetcher,
-                     const void* bytes, size_t length) override {
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* bytes,
+                     size_t length) override {
     data.append(reinterpret_cast<const char*>(bytes), length);
+    return true;
   }
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
     EXPECT_TRUE(successful);
     EXPECT_EQ(kHttpResponsePartialContent, fetcher->http_response_code());
     MessageLoop::current()->BreakLoop();
   }
-  void TransferTerminated(HttpFetcher* fetcher) override {
-    ADD_FAILURE();
-  }
+  void TransferTerminated(HttpFetcher* fetcher) override { ADD_FAILURE(); }
   string data;
 };
 }  // namespace
@@ -762,15 +782,16 @@
     unique_ptr<HttpServer> server(this->test_.CreateServer());
     ASSERT_TRUE(server->started_);
 
-    this->loop_.PostTask(FROM_HERE, base::Bind(
-        &StartTransfer,
-        fetcher.get(),
-        LocalServerUrlForPath(server->GetPort(),
-                              base::StringPrintf("/flaky/%d/%d/%d/%d",
-                                                 kBigLength,
-                                                 kFlakyTruncateLength,
-                                                 kFlakySleepEvery,
-                                                 kFlakySleepSecs))));
+    this->loop_.PostTask(FROM_HERE,
+                         base::Bind(&StartTransfer,
+                                    fetcher.get(),
+                                    LocalServerUrlForPath(
+                                        server->GetPort(),
+                                        base::StringPrintf("/flaky/%d/%d/%d/%d",
+                                                           kBigLength,
+                                                           kFlakyTruncateLength,
+                                                           kFlakySleepEvery,
+                                                           kFlakySleepSecs))));
     this->loop_.Run();
 
     // verify the data we get back
@@ -799,13 +820,15 @@
     }
   }
 
-  void ReceivedBytes(HttpFetcher* fetcher,
-                     const void* bytes, size_t length) override {
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* bytes,
+                     size_t length) override {
     if (server_) {
       LOG(INFO) << "Stopping server in ReceivedBytes";
       server_.reset();
       LOG(INFO) << "server stopped";
     }
+    return true;
   }
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
     EXPECT_FALSE(successful);
@@ -823,7 +846,6 @@
 };
 }  // namespace
 
-
 TYPED_TEST(HttpFetcherTest, FailureTest) {
   // This test ensures that a fetcher responds correctly when a server isn't
   // available at all.
@@ -864,10 +886,10 @@
   // expired.
   fetcher->set_low_speed_limit(kDownloadLowSpeedLimitBps, 1);
 
-  this->loop_.PostTask(FROM_HERE, base::Bind(
-      StartTransfer,
-      fetcher.get(),
-      LocalServerUrlForPath(port, "/hang")));
+  this->loop_.PostTask(
+      FROM_HERE,
+      base::Bind(
+          StartTransfer, fetcher.get(), LocalServerUrlForPath(port, "/hang")));
   this->loop_.Run();
   EXPECT_EQ(1, delegate.times_transfer_complete_called_);
   EXPECT_EQ(0, delegate.times_transfer_terminated_called_);
@@ -877,8 +899,8 @@
   bool timeout = false;
   auto callback = base::Bind([](bool* timeout) { *timeout = true; },
                              base::Unretained(&timeout));
-  this->loop_.PostDelayedTask(FROM_HERE, callback,
-                              base::TimeDelta::FromSeconds(2));
+  this->loop_.PostDelayedTask(
+      FROM_HERE, callback, base::TimeDelta::FromSeconds(2));
   EXPECT_TRUE(this->loop_.RunOnce(true));
   EXPECT_TRUE(timeout);
 }
@@ -964,18 +986,20 @@
 }
 
 namespace {
-const HttpResponseCode kRedirectCodes[] = {
-  kHttpResponseMovedPermanently, kHttpResponseFound, kHttpResponseSeeOther,
-  kHttpResponseTempRedirect
-};
+const HttpResponseCode kRedirectCodes[] = {kHttpResponseMovedPermanently,
+                                           kHttpResponseFound,
+                                           kHttpResponseSeeOther,
+                                           kHttpResponseTempRedirect};
 
 class RedirectHttpFetcherTestDelegate : public HttpFetcherDelegate {
  public:
   explicit RedirectHttpFetcherTestDelegate(bool expected_successful)
       : expected_successful_(expected_successful) {}
-  void ReceivedBytes(HttpFetcher* fetcher,
-                     const void* bytes, size_t length) override {
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* bytes,
+                     size_t length) override {
     data.append(reinterpret_cast<const char*>(bytes), length);
+    return true;
   }
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
     EXPECT_EQ(expected_successful_, successful);
@@ -987,9 +1011,7 @@
     }
     MessageLoop::current()->BreakLoop();
   }
-  void TransferTerminated(HttpFetcher* fetcher) override {
-    ADD_FAILURE();
-  }
+  void TransferTerminated(HttpFetcher* fetcher) override { ADD_FAILURE(); }
   bool expected_successful_;
   string data;
 };
@@ -1003,10 +1025,11 @@
   unique_ptr<HttpFetcher> fetcher(http_fetcher);
   fetcher->set_delegate(&delegate);
 
-  MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
-      StartTransfer,
-      fetcher.get(),
-      LocalServerUrlForPath(server->GetPort(), url)));
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(StartTransfer,
+                 fetcher.get(),
+                 LocalServerUrlForPath(server->GetPort(), url)));
   MessageLoop::current()->Run();
   if (expected_successful) {
     // verify the data we get back
@@ -1027,9 +1050,8 @@
   ASSERT_TRUE(server->started_);
 
   for (size_t c = 0; c < arraysize(kRedirectCodes); ++c) {
-    const string url = base::StringPrintf("/redirect/%d/download/%d",
-                                          kRedirectCodes[c],
-                                          kMediumLength);
+    const string url = base::StringPrintf(
+        "/redirect/%d/download/%d", kRedirectCodes[c], kMediumLength);
     RedirectTest(server.get(), true, url, this->test_.NewLargeFetcher());
   }
 }
@@ -1072,10 +1094,12 @@
   explicit MultiHttpFetcherTestDelegate(int expected_response_code)
       : expected_response_code_(expected_response_code) {}
 
-  void ReceivedBytes(HttpFetcher* fetcher,
-                     const void* bytes, size_t length) override {
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* bytes,
+                     size_t length) override {
     EXPECT_EQ(fetcher, fetcher_.get());
     data.append(reinterpret_cast<const char*>(bytes), length);
+    return true;
   }
 
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
@@ -1088,9 +1112,7 @@
     MessageLoop::current()->BreakLoop();
   }
 
-  void TransferTerminated(HttpFetcher* fetcher) override {
-    ADD_FAILURE();
-  }
+  void TransferTerminated(HttpFetcher* fetcher) override { ADD_FAILURE(); }
 
   unique_ptr<HttpFetcher> fetcher_;
   int expected_response_code_;
@@ -1112,7 +1134,9 @@
   ASSERT_TRUE(multi_fetcher);
   multi_fetcher->ClearRanges();
   for (vector<pair<off_t, off_t>>::const_iterator it = ranges.begin(),
-           e = ranges.end(); it != e; ++it) {
+                                                  e = ranges.end();
+       it != e;
+       ++it) {
     string tmp_str = base::StringPrintf("%jd+", it->first);
     if (it->second > 0) {
       base::StringAppendF(&tmp_str, "%jd", it->second);
@@ -1127,8 +1151,7 @@
   multi_fetcher->set_delegate(&delegate);
 
   MessageLoop::current()->PostTask(
-      FROM_HERE,
-      base::Bind(StartTransfer, multi_fetcher, url));
+      FROM_HERE, base::Bind(StartTransfer, multi_fetcher, url));
   MessageLoop::current()->Run();
 
   EXPECT_EQ(expected_size, delegate.data.size());
@@ -1146,6 +1169,26 @@
 
   vector<pair<off_t, off_t>> ranges;
   ranges.push_back(make_pair(0, 25));
+  ranges.push_back(make_pair(99, 17));
+  MultiTest(this->test_.NewLargeFetcher(),
+            this->test_.fake_hardware(),
+            this->test_.BigUrl(server->GetPort()),
+            ranges,
+            "abcdefghijabcdefghijabcdejabcdefghijabcdef",
+            25 + 17,
+            this->test_.IsFileFetcher() ? kHttpResponseOk
+                                        : kHttpResponsePartialContent);
+}
+
+TYPED_TEST(HttpFetcherTest, MultiHttpFetcherUnspecifiedEndTest) {
+  if (!this->test_.IsMulti() || this->test_.IsFileFetcher())
+    return;
+
+  unique_ptr<HttpServer> server(this->test_.CreateServer());
+  ASSERT_TRUE(server->started_);
+
+  vector<pair<off_t, off_t>> ranges;
+  ranges.push_back(make_pair(0, 25));
   ranges.push_back(make_pair(99, 0));
   MultiTest(this->test_.NewLargeFetcher(),
             this->test_.fake_hardware(),
@@ -1171,11 +1214,12 @@
             ranges,
             "abcdefghijabcdefghijabcd",
             24,
-            kHttpResponsePartialContent);
+            this->test_.IsFileFetcher() ? kHttpResponseOk
+                                        : kHttpResponsePartialContent);
 }
 
 TYPED_TEST(HttpFetcherTest, MultiHttpFetcherMultiEndTest) {
-  if (!this->test_.IsMulti())
+  if (!this->test_.IsMulti() || this->test_.IsFileFetcher())
     return;
 
   unique_ptr<HttpServer> server(this->test_.CreateServer());
@@ -1221,7 +1265,7 @@
 // (1) successful recovery: The offset fetch will fail twice but succeed with
 // the third proxy.
 TYPED_TEST(HttpFetcherTest, MultiHttpFetcherErrorIfOffsetRecoverableTest) {
-  if (!this->test_.IsMulti())
+  if (!this->test_.IsMulti() || this->test_.IsFileFetcher())
     return;
 
   unique_ptr<HttpServer> server(this->test_.CreateServer());
@@ -1232,9 +1276,9 @@
   ranges.push_back(make_pair(99, 0));
   MultiTest(this->test_.NewLargeFetcher(3),
             this->test_.fake_hardware(),
-            LocalServerUrlForPath(server->GetPort(),
-                                  base::StringPrintf("/error-if-offset/%d/2",
-                                                     kBigLength)),
+            LocalServerUrlForPath(
+                server->GetPort(),
+                base::StringPrintf("/error-if-offset/%d/2", kBigLength)),
             ranges,
             "abcdefghijabcdefghijabcdejabcdefghijabcdef",
             kBigLength - (99 - 25),
@@ -1244,7 +1288,7 @@
 // (2) unsuccessful recovery: The offset fetch will fail repeatedly.  The
 // fetcher will signal a (failed) completed transfer to the delegate.
 TYPED_TEST(HttpFetcherTest, MultiHttpFetcherErrorIfOffsetUnrecoverableTest) {
-  if (!this->test_.IsMulti())
+  if (!this->test_.IsMulti() || this->test_.IsFileFetcher())
     return;
 
   unique_ptr<HttpServer> server(this->test_.CreateServer());
@@ -1255,9 +1299,9 @@
   ranges.push_back(make_pair(99, 0));
   MultiTest(this->test_.NewLargeFetcher(2),
             this->test_.fake_hardware(),
-            LocalServerUrlForPath(server->GetPort(),
-                                  base::StringPrintf("/error-if-offset/%d/3",
-                                                     kBigLength)),
+            LocalServerUrlForPath(
+                server->GetPort(),
+                base::StringPrintf("/error-if-offset/%d/3", kBigLength)),
             ranges,
             "abcdefghijabcdefghijabcde",  // only received the first chunk
             25,
@@ -1271,19 +1315,22 @@
   explicit MultiHttpFetcherTerminateTestDelegate(size_t terminate_trigger_bytes)
       : terminate_trigger_bytes_(terminate_trigger_bytes) {}
 
-  void ReceivedBytes(HttpFetcher* fetcher,
+  bool ReceivedBytes(HttpFetcher* fetcher,
                      const void* bytes,
                      size_t length) override {
     LOG(INFO) << "ReceivedBytes, " << length << " bytes.";
     EXPECT_EQ(fetcher, fetcher_.get());
+    bool should_terminate = false;
     if (bytes_downloaded_ < terminate_trigger_bytes_ &&
         bytes_downloaded_ + length >= terminate_trigger_bytes_) {
       MessageLoop::current()->PostTask(
           FROM_HERE,
           base::Bind(&HttpFetcher::TerminateTransfer,
                      base::Unretained(fetcher_.get())));
+      should_terminate = true;
     }
     bytes_downloaded_ += length;
+    return !should_terminate;
   }
 
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
@@ -1337,17 +1384,17 @@
 namespace {
 class BlockedTransferTestDelegate : public HttpFetcherDelegate {
  public:
-  void ReceivedBytes(HttpFetcher* fetcher,
-                     const void* bytes, size_t length) override {
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* bytes,
+                     size_t length) override {
     ADD_FAILURE();
+    return true;
   }
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
     EXPECT_FALSE(successful);
     MessageLoop::current()->BreakLoop();
   }
-  void TransferTerminated(HttpFetcher* fetcher) override {
-    ADD_FAILURE();
-  }
+  void TransferTerminated(HttpFetcher* fetcher) override { ADD_FAILURE(); }
 };
 
 void BlockedTransferTestHelper(AnyHttpFetcherTest* fetcher_test,
@@ -1365,11 +1412,13 @@
   fetcher_test->fake_hardware()->SetIsOfficialBuild(is_official_build);
   fetcher->set_delegate(&delegate);
 
-  MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
-      StartTransfer,
-      fetcher.get(),
-      LocalServerUrlForPath(server->GetPort(),
-                            fetcher_test->SmallUrl(server->GetPort()))));
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(
+          StartTransfer,
+          fetcher.get(),
+          LocalServerUrlForPath(server->GetPort(),
+                                fetcher_test->SmallUrl(server->GetPort()))));
   MessageLoop::current()->Run();
 }
 }  // namespace
diff --git a/common/mock_action_processor.h b/common/mock_action_processor.h
index 04275c1..4c62109 100644
--- a/common/mock_action_processor.h
+++ b/common/mock_action_processor.h
@@ -17,6 +17,10 @@
 #ifndef UPDATE_ENGINE_COMMON_MOCK_ACTION_PROCESSOR_H_
 #define UPDATE_ENGINE_COMMON_MOCK_ACTION_PROCESSOR_H_
 
+#include <deque>
+#include <memory>
+#include <utility>
+
 #include <gmock/gmock.h>
 
 #include "update_engine/common/action.h"
@@ -27,6 +31,12 @@
  public:
   MOCK_METHOD0(StartProcessing, void());
   MOCK_METHOD1(EnqueueAction, void(AbstractAction* action));
+
+  // This is a legacy workaround described in:
+  // https://github.com/google/googletest/blob/master/googlemock/docs/CookBook.md#legacy-workarounds-for-move-only-types-legacymoveonly
+  void EnqueueAction(std::unique_ptr<AbstractAction> action) override {
+    EnqueueAction(action.get());
+  }
 };
 
 }  // namespace chromeos_update_engine
diff --git a/common/mock_hardware.h b/common/mock_hardware.h
index 42fa7ba..84c0c5b 100644
--- a/common/mock_hardware.h
+++ b/common/mock_hardware.h
@@ -31,44 +31,55 @@
   MockHardware() {
     // Delegate all calls to the fake instance
     ON_CALL(*this, IsOfficialBuild())
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeHardware::IsOfficialBuild));
+        .WillByDefault(testing::Invoke(&fake_, &FakeHardware::IsOfficialBuild));
     ON_CALL(*this, IsNormalBootMode())
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeHardware::IsNormalBootMode));
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeHardware::IsNormalBootMode));
     ON_CALL(*this, AreDevFeaturesEnabled())
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeHardware::AreDevFeaturesEnabled));
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeHardware::AreDevFeaturesEnabled));
     ON_CALL(*this, IsOOBEEnabled())
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeHardware::IsOOBEEnabled));
+        .WillByDefault(testing::Invoke(&fake_, &FakeHardware::IsOOBEEnabled));
     ON_CALL(*this, IsOOBEComplete(testing::_))
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeHardware::IsOOBEComplete));
+        .WillByDefault(testing::Invoke(&fake_, &FakeHardware::IsOOBEComplete));
     ON_CALL(*this, GetHardwareClass())
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeHardware::GetHardwareClass));
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeHardware::GetHardwareClass));
     ON_CALL(*this, GetFirmwareVersion())
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeHardware::GetFirmwareVersion));
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeHardware::GetFirmwareVersion));
     ON_CALL(*this, GetECVersion())
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeHardware::GetECVersion));
+        .WillByDefault(testing::Invoke(&fake_, &FakeHardware::GetECVersion));
+    ON_CALL(*this, GetMinKernelKeyVersion())
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeHardware::GetMinKernelKeyVersion));
+    ON_CALL(*this, GetMinFirmwareKeyVersion())
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeHardware::GetMinFirmwareKeyVersion));
+    ON_CALL(*this, GetMaxFirmwareKeyRollforward())
+        .WillByDefault(testing::Invoke(
+            &fake_, &FakeHardware::GetMaxFirmwareKeyRollforward));
+    ON_CALL(*this, SetMaxFirmwareKeyRollforward())
+        .WillByDefault(testing::Invoke(
+            &fake_, &FakeHardware::SetMaxFirmwareKeyRollforward));
+    ON_CALL(*this, SetMaxKernelKeyRollforward())
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeHardware::SetMaxKernelKeyRollforward));
     ON_CALL(*this, GetPowerwashCount())
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeHardware::GetPowerwashCount));
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeHardware::GetPowerwashCount));
     ON_CALL(*this, GetNonVolatileDirectory(testing::_))
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeHardware::GetNonVolatileDirectory));
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeHardware::GetNonVolatileDirectory));
     ON_CALL(*this, GetPowerwashSafeDirectory(testing::_))
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeHardware::GetPowerwashSafeDirectory));
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeHardware::GetPowerwashSafeDirectory));
     ON_CALL(*this, GetFirstActiveOmahaPingSent())
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeHardware::GetFirstActiveOmahaPingSent()));
+        .WillByDefault(testing::Invoke(
+            &fake_, &FakeHardware::GetFirstActiveOmahaPingSent()));
     ON_CALL(*this, SetFirstActiveOmahaPingSent())
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeHardware::SetFirstActiveOmahaPingSent()));
+        .WillByDefault(testing::Invoke(
+            &fake_, &FakeHardware::SetFirstActiveOmahaPingSent()));
   }
 
   ~MockHardware() override = default;
@@ -81,15 +92,20 @@
   MOCK_CONST_METHOD0(GetHardwareClass, std::string());
   MOCK_CONST_METHOD0(GetFirmwareVersion, std::string());
   MOCK_CONST_METHOD0(GetECVersion, std::string());
+  MOCK_CONST_METHOD0(GetMinKernelKeyVersion, int());
+  MOCK_CONST_METHOD0(GetMinFirmwareKeyVersion, int());
+  MOCK_CONST_METHOD0(GetMaxFirmwareKeyRollforward, int());
+  MOCK_CONST_METHOD1(SetMaxFirmwareKeyRollforward,
+                     bool(int firmware_max_rollforward));
+  MOCK_CONST_METHOD1(SetMaxKernelKeyRollforward,
+                     bool(int kernel_max_rollforward));
   MOCK_CONST_METHOD0(GetPowerwashCount, int());
   MOCK_CONST_METHOD1(GetNonVolatileDirectory, bool(base::FilePath*));
   MOCK_CONST_METHOD1(GetPowerwashSafeDirectory, bool(base::FilePath*));
   MOCK_CONST_METHOD0(GetFirstActiveOmahaPingSent, bool());
 
   // Returns a reference to the underlying FakeHardware.
-  FakeHardware& fake() {
-    return fake_;
-  }
+  FakeHardware& fake() { return fake_; }
 
  private:
   // The underlying FakeHardware.
@@ -98,7 +114,6 @@
   DISALLOW_COPY_AND_ASSIGN(MockHardware);
 };
 
-
 }  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_COMMON_MOCK_HARDWARE_H_
diff --git a/common/mock_http_fetcher.cc b/common/mock_http_fetcher.cc
index f1ae72a..10e3b9e 100644
--- a/common/mock_http_fetcher.cc
+++ b/common/mock_http_fetcher.cc
@@ -32,8 +32,8 @@
 namespace chromeos_update_engine {
 
 MockHttpFetcher::~MockHttpFetcher() {
-  CHECK(timeout_id_ == MessageLoop::kTaskIdNull) <<
-      "Call TerminateTransfer() before dtor.";
+  CHECK(timeout_id_ == MessageLoop::kTaskIdNull)
+      << "Call TerminateTransfer() before dtor.";
 }
 
 void MockHttpFetcher::BeginTransfer(const std::string& url) {
@@ -47,71 +47,49 @@
     SendData(true);
 }
 
-// Returns false on one condition: If timeout_id_ was already set
-// and it needs to be deleted by the caller. If timeout_id_ is null
-// when this function is called, this function will always return true.
-bool MockHttpFetcher::SendData(bool skip_delivery) {
-  if (fail_transfer_) {
+void MockHttpFetcher::SendData(bool skip_delivery) {
+  if (fail_transfer_ || sent_size_ == data_.size()) {
     SignalTransferComplete();
-    return timeout_id_ != MessageLoop::kTaskIdNull;
-  }
-
-  CHECK_LT(sent_size_, data_.size());
-  if (!skip_delivery) {
-    const size_t chunk_size = min(kMockHttpFetcherChunkSize,
-                                  data_.size() - sent_size_);
-    CHECK(delegate_);
-    delegate_->ReceivedBytes(this, &data_[sent_size_], chunk_size);
-    // We may get terminated in the callback.
-    if (sent_size_ == data_.size()) {
-      LOG(INFO) << "Terminated in the ReceivedBytes callback.";
-      return timeout_id_ != MessageLoop::kTaskIdNull;
-    }
-    sent_size_ += chunk_size;
-    CHECK_LE(sent_size_, data_.size());
-    if (sent_size_ == data_.size()) {
-      // We've sent all the data. Notify of success.
-      SignalTransferComplete();
-    }
+    return;
   }
 
   if (paused_) {
-    // If we're paused, we should return true if timeout_id_ is set,
-    // since we need the caller to delete it.
-    return timeout_id_ != MessageLoop::kTaskIdNull;
+    // If we're paused, we should return so no callback is scheduled.
+    return;
   }
 
-  if (timeout_id_ != MessageLoop::kTaskIdNull) {
-    // we still need a timeout if there's more data to send
-    return sent_size_ < data_.size();
-  } else if (sent_size_ < data_.size()) {
-    // we don't have a timeout source and we need one
+  // Setup timeout callback even if the transfer is about to be completed in
+  // order to get a call to |TransferComplete|.
+  if (timeout_id_ == MessageLoop::kTaskIdNull) {
     timeout_id_ = MessageLoop::current()->PostDelayedTask(
         FROM_HERE,
         base::Bind(&MockHttpFetcher::TimeoutCallback, base::Unretained(this)),
         base::TimeDelta::FromMilliseconds(10));
   }
-  return true;
+
+  if (!skip_delivery) {
+    const size_t chunk_size =
+        min(kMockHttpFetcherChunkSize, data_.size() - sent_size_);
+    sent_size_ += chunk_size;
+    CHECK(delegate_);
+    delegate_->ReceivedBytes(this, &data_[sent_size_ - chunk_size], chunk_size);
+  }
+  // We may get terminated and deleted right after |ReceivedBytes| call, so we
+  // should not access any class member variable after this call.
 }
 
 void MockHttpFetcher::TimeoutCallback() {
   CHECK(!paused_);
-  if (SendData(false)) {
-    // We need to re-schedule the timeout.
-    timeout_id_ = MessageLoop::current()->PostDelayedTask(
-        FROM_HERE,
-        base::Bind(&MockHttpFetcher::TimeoutCallback, base::Unretained(this)),
-        base::TimeDelta::FromMilliseconds(10));
-  } else {
-    timeout_id_ = MessageLoop::kTaskIdNull;
-  }
+  timeout_id_ = MessageLoop::kTaskIdNull;
+  CHECK_LE(sent_size_, data_.size());
+  // Same here, we should not access any member variable after this call.
+  SendData(false);
 }
 
 // If the transfer is in progress, aborts the transfer early.
 // The transfer cannot be resumed.
 void MockHttpFetcher::TerminateTransfer() {
   LOG(INFO) << "Terminating transfer.";
-  sent_size_ = data_.size();
   // Kill any timeout, it is ok to call with kTaskIdNull.
   MessageLoop::current()->CancelTask(timeout_id_);
   timeout_id_ = MessageLoop::kTaskIdNull;
@@ -140,9 +118,7 @@
 void MockHttpFetcher::Unpause() {
   CHECK(paused_) << "You must pause before unpause.";
   paused_ = false;
-  if (sent_size_ < data_.size()) {
-    SendData(false);
-  }
+  SendData(false);
 }
 
 void MockHttpFetcher::FailTransfer(int http_response_code) {
diff --git a/common/mock_http_fetcher.h b/common/mock_http_fetcher.h
index 367802e..492e6ce 100644
--- a/common/mock_http_fetcher.h
+++ b/common/mock_http_fetcher.h
@@ -56,8 +56,8 @@
 
   // Constructor overload for string data.
   MockHttpFetcher(const char* data, size_t size, ProxyResolver* proxy_resolver)
-      : MockHttpFetcher(reinterpret_cast<const uint8_t*>(data), size,
-                        proxy_resolver) {}
+      : MockHttpFetcher(
+            reinterpret_cast<const uint8_t*>(data), size, proxy_resolver) {}
 
   // Cleans up all internal state. Does not notify delegate
   ~MockHttpFetcher() override;
@@ -77,9 +77,7 @@
   void set_max_retry_count(int max_retry_count) override {}
 
   // Dummy: no bytes were downloaded.
-  size_t GetBytesDownloaded() override {
-    return sent_size_;
-  }
+  size_t GetBytesDownloaded() override { return sent_size_; }
 
   // Begins the transfer if it hasn't already begun.
   void BeginTransfer(const std::string& url) override;
@@ -107,18 +105,13 @@
   // If set to true, this will EXPECT fail on BeginTransfer
   void set_never_use(bool never_use) { never_use_ = never_use; }
 
-  const brillo::Blob& post_data() const {
-    return post_data_;
-  }
+  const brillo::Blob& post_data() const { return post_data_; }
 
  private:
-  // Sends data to the delegate and sets up a timeout callback if needed.
-  // There must be a delegate and there must be data to send. If there is
-  // already a timeout callback, and it should be deleted by the caller,
-  // this will return false; otherwise true is returned.
-  // If skip_delivery is true, no bytes will be delivered, but the callbacks
-  // still be set if needed.
-  bool SendData(bool skip_delivery);
+  // Sends data to the delegate and sets up a timeout callback if needed. There
+  // must be a delegate. If |skip_delivery| is true, no bytes will be delivered,
+  // but the callbacks still be set if needed.
+  void SendData(bool skip_delivery);
 
   // Callback for when our message loop timeout expires.
   void TimeoutCallback();
diff --git a/common/mock_prefs.h b/common/mock_prefs.h
index 0e639a2..2582e19 100644
--- a/common/mock_prefs.h
+++ b/common/mock_prefs.h
@@ -30,8 +30,8 @@
  public:
   MOCK_CONST_METHOD2(GetString,
                      bool(const std::string& key, std::string* value));
-  MOCK_METHOD2(SetString, bool(const std::string& key,
-                               const std::string& value));
+  MOCK_METHOD2(SetString,
+               bool(const std::string& key, const std::string& value));
   MOCK_CONST_METHOD2(GetInt64, bool(const std::string& key, int64_t* value));
   MOCK_METHOD2(SetInt64, bool(const std::string& key, const int64_t value));
 
diff --git a/mock_proxy_resolver.h b/common/mock_proxy_resolver.h
similarity index 82%
rename from mock_proxy_resolver.h
rename to common/mock_proxy_resolver.h
index bd6d04f..67de68f 100644
--- a/mock_proxy_resolver.h
+++ b/common/mock_proxy_resolver.h
@@ -14,14 +14,14 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_MOCK_PROXY_RESOLVER_H_
-#define UPDATE_ENGINE_MOCK_PROXY_RESOLVER_H_
+#ifndef UPDATE_ENGINE_COMMON_MOCK_PROXY_RESOLVER_H_
+#define UPDATE_ENGINE_COMMON_MOCK_PROXY_RESOLVER_H_
 
 #include <string>
 
 #include <gmock/gmock.h>
 
-#include "update_engine/proxy_resolver.h"
+#include "update_engine/common/proxy_resolver.h"
 
 namespace chromeos_update_engine {
 
@@ -35,4 +35,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_MOCK_PROXY_RESOLVER_H_
+#endif  // UPDATE_ENGINE_COMMON_MOCK_PROXY_RESOLVER_H_
diff --git a/common/multi_range_http_fetcher.cc b/common/multi_range_http_fetcher.cc
index 1189fde..6ce3dae 100644
--- a/common/multi_range_http_fetcher.cc
+++ b/common/multi_range_http_fetcher.cc
@@ -86,7 +86,7 @@
 }
 
 // State change: Downloading -> Downloading or Pending transfer ended
-void MultiRangeHttpFetcher::ReceivedBytes(HttpFetcher* fetcher,
+bool MultiRangeHttpFetcher::ReceivedBytes(HttpFetcher* fetcher,
                                           const void* bytes,
                                           size_t length) {
   CHECK_LT(current_index_, ranges_.size());
@@ -95,23 +95,28 @@
   size_t next_size = length;
   Range range = ranges_[current_index_];
   if (range.HasLength()) {
-    next_size = std::min(next_size,
-                         range.length() - bytes_received_this_range_);
+    next_size =
+        std::min(next_size, range.length() - bytes_received_this_range_);
   }
   LOG_IF(WARNING, next_size <= 0) << "Asked to write length <= 0";
-  if (delegate_) {
-    delegate_->ReceivedBytes(this, bytes, next_size);
-  }
+  // bytes_received_this_range_ needs to be updated regardless of the delegate_
+  // result, because it will be used to determine a successful transfer in
+  // TransferEnded().
   bytes_received_this_range_ += length;
+  if (delegate_ && !delegate_->ReceivedBytes(this, bytes, next_size))
+    return false;
+
   if (range.HasLength() && bytes_received_this_range_ >= range.length()) {
     // Terminates the current fetcher. Waits for its TransferTerminated
     // callback before starting the next range so that we don't end up
     // signalling the delegate that the whole multi-transfer is complete
     // before all fetchers are really done and cleaned up.
     pending_transfer_ended_ = true;
-    LOG(INFO) << "terminating transfer";
+    LOG(INFO) << "Terminating transfer.";
     fetcher->TerminateTransfer();
+    return false;
   }
+  return true;
 }
 
 // State change: Downloading or Pending transfer ended -> Stopped
diff --git a/common/multi_range_http_fetcher.h b/common/multi_range_http_fetcher.h
index 54ddfbc..f57ea7f 100644
--- a/common/multi_range_http_fetcher.h
+++ b/common/multi_range_http_fetcher.h
@@ -62,9 +62,7 @@
     ranges_.push_back(Range(offset, size));
   }
 
-  void AddRange(off_t offset) {
-    ranges_.push_back(Range(offset));
-  }
+  void AddRange(off_t offset) { ranges_.push_back(Range(offset)); }
 
   // HttpFetcher overrides.
   void SetOffset(off_t offset) override;
@@ -146,7 +144,7 @@
 
   // HttpFetcherDelegate overrides.
   // State change: Downloading -> Downloading or Pending transfer ended
-  void ReceivedBytes(HttpFetcher* fetcher,
+  bool ReceivedBytes(HttpFetcher* fetcher,
                      const void* bytes,
                      size_t length) override;
 
diff --git a/common/platform_constants_android.cc b/common/platform_constants_android.cc
index 371fe26..9d8d30e 100644
--- a/common/platform_constants_android.cc
+++ b/common/platform_constants_android.cc
@@ -32,7 +32,7 @@
 const char kOmahaResponseDeadlineFile[] = "";
 const char kNonVolatileDirectory[] = "/data/misc/update_engine";
 const char kPostinstallMountOptions[] =
-  "context=u:object_r:postinstall_file:s0";
+    "context=u:object_r:postinstall_file:s0";
 
 }  // namespace constants
 }  // namespace chromeos_update_engine
diff --git a/common/platform_constants_chromeos.cc b/common/platform_constants_chromeos.cc
index 3ebcf8a..f1ac490 100644
--- a/common/platform_constants_chromeos.cc
+++ b/common/platform_constants_chromeos.cc
@@ -22,14 +22,13 @@
 const char kOmahaDefaultProductionURL[] =
     "https://tools.google.com/service/update2";
 const char kOmahaDefaultAUTestURL[] =
-    "https://omaha.sandbox.google.com/service/update2";
+    "https://omaha-qa.sandbox.google.com/service/update2";
 const char kOmahaUpdaterID[] = "ChromeOSUpdateEngine";
 const char kOmahaPlatformName[] = "Chrome OS";
 const char kUpdatePayloadPublicKeyPath[] =
     "/usr/share/update_engine/update-payload-key.pub.pem";
 const char kCACertificatesPath[] = "/usr/share/chromeos-ca-certificates";
-const char kOmahaResponseDeadlineFile[] =
-    "/tmp/update-check-response-deadline";
+const char kOmahaResponseDeadlineFile[] = "/tmp/update-check-response-deadline";
 // This directory is wiped during powerwash.
 const char kNonVolatileDirectory[] = "/var/lib/update_engine";
 const char kPostinstallMountOptions[] = "";
diff --git a/common/prefs_unittest.cc b/common/prefs_unittest.cc
index aa2eb04..cb6fc70 100644
--- a/common/prefs_unittest.cc
+++ b/common/prefs_unittest.cc
@@ -30,13 +30,13 @@
 #include <gtest/gtest.h>
 
 using std::string;
-using testing::Eq;
 using testing::_;
+using testing::Eq;
 
 namespace {
 // Test key used along the tests.
 const char kKey[] = "test-key";
-}
+}  // namespace
 
 namespace chromeos_update_engine {
 
@@ -49,7 +49,8 @@
   }
 
   bool SetValue(const string& key, const string& value) {
-    return base::WriteFile(prefs_dir_.Append(key), value.data(),
+    return base::WriteFile(prefs_dir_.Append(key),
+                           value.data(),
                            value.length()) == static_cast<int>(value.length());
   }
 
@@ -143,16 +144,18 @@
 }
 
 TEST_F(PrefsTest, GetInt64Max) {
-  ASSERT_TRUE(SetValue(kKey, base::StringPrintf(
-      "%" PRIi64, std::numeric_limits<int64_t>::max())));
+  ASSERT_TRUE(SetValue(
+      kKey,
+      base::StringPrintf("%" PRIi64, std::numeric_limits<int64_t>::max())));
   int64_t value;
   EXPECT_TRUE(prefs_.GetInt64(kKey, &value));
   EXPECT_EQ(std::numeric_limits<int64_t>::max(), value);
 }
 
 TEST_F(PrefsTest, GetInt64Min) {
-  ASSERT_TRUE(SetValue(kKey, base::StringPrintf(
-        "%" PRIi64, std::numeric_limits<int64_t>::min())));
+  ASSERT_TRUE(SetValue(
+      kKey,
+      base::StringPrintf("%" PRIi64, std::numeric_limits<int64_t>::min())));
   int64_t value;
   EXPECT_TRUE(prefs_.GetInt64(kKey, &value));
   EXPECT_EQ(std::numeric_limits<int64_t>::min(), value);
diff --git a/proxy_resolver.cc b/common/proxy_resolver.cc
similarity index 97%
rename from proxy_resolver.cc
rename to common/proxy_resolver.cc
index 2ec59db..0591c3e 100644
--- a/proxy_resolver.cc
+++ b/common/proxy_resolver.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/proxy_resolver.h"
+#include "update_engine/common/proxy_resolver.h"
 
 #include <base/bind.h>
 #include <base/location.h>
@@ -63,5 +63,4 @@
   callback.Run(proxies);
 }
 
-
 }  // namespace chromeos_update_engine
diff --git a/proxy_resolver.h b/common/proxy_resolver.h
similarity index 95%
rename from proxy_resolver.h
rename to common/proxy_resolver.h
index 19a400f..9bd51fc 100644
--- a/proxy_resolver.h
+++ b/common/proxy_resolver.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_PROXY_RESOLVER_H_
-#define UPDATE_ENGINE_PROXY_RESOLVER_H_
+#ifndef UPDATE_ENGINE_COMMON_PROXY_RESOLVER_H_
+#define UPDATE_ENGINE_COMMON_PROXY_RESOLVER_H_
 
 #include <deque>
 #include <string>
@@ -95,4 +95,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_PROXY_RESOLVER_H_
+#endif  // UPDATE_ENGINE_COMMON_PROXY_RESOLVER_H_
diff --git a/proxy_resolver_unittest.cc b/common/proxy_resolver_unittest.cc
similarity index 97%
rename from proxy_resolver_unittest.cc
rename to common/proxy_resolver_unittest.cc
index 484aae1..101bf6b 100644
--- a/proxy_resolver_unittest.cc
+++ b/common/proxy_resolver_unittest.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/proxy_resolver.h"
+#include "update_engine/common/proxy_resolver.h"
 
 #include <deque>
 #include <string>
diff --git a/common/subprocess.cc b/common/subprocess.cc
index 4e6d352..0131f10 100644
--- a/common/subprocess.cc
+++ b/common/subprocess.cc
@@ -23,6 +23,7 @@
 
 #include <memory>
 #include <string>
+#include <utility>
 #include <vector>
 
 #include <base/bind.h>
@@ -100,7 +101,7 @@
 }  // namespace
 
 void Subprocess::Init(
-      brillo::AsynchronousSignalHandlerInterface* async_signal_handler) {
+    brillo::AsynchronousSignalHandlerInterface* async_signal_handler) {
   if (subprocess_singleton_ == this)
     return;
   CHECK(subprocess_singleton_ == nullptr);
@@ -185,9 +186,10 @@
   }
 
   pid_t pid = record->proc.pid();
-  CHECK(process_reaper_.WatchForChild(FROM_HERE, pid, base::Bind(
-      &Subprocess::ChildExitedCallback,
-      base::Unretained(this))));
+  CHECK(process_reaper_.WatchForChild(
+      FROM_HERE,
+      pid,
+      base::Bind(&Subprocess::ChildExitedCallback, base::Unretained(this))));
 
   record->stdout_fd = record->proc.GetPipe(STDOUT_FILENO);
   // Capture the subprocess output. Make our end of the pipe non-blocking.
@@ -236,10 +238,7 @@
   // The default for SynchronousExec is to use kSearchPath since the code relies
   // on that.
   return SynchronousExecFlags(
-      cmd,
-      kRedirectStderrToStdout | kSearchPath,
-      return_code,
-      stdout);
+      cmd, kRedirectStderrToStdout | kSearchPath, return_code, stdout);
 }
 
 bool Subprocess::SynchronousExecFlags(const vector<string>& cmd,
@@ -281,12 +280,19 @@
   return proc_return_code != brillo::Process::kErrorExitStatus;
 }
 
-bool Subprocess::SubprocessInFlight() {
-  for (const auto& pid_record : subprocess_records_) {
-    if (!pid_record.second->callback.is_null())
-      return true;
+void Subprocess::FlushBufferedLogsAtExit() {
+  if (!subprocess_records_.empty()) {
+    LOG(INFO) << "We are exiting, but there are still in flight subprocesses!";
+    for (auto& pid_record : subprocess_records_) {
+      SubprocessRecord* record = pid_record.second.get();
+      // Make sure we read any remaining process output.
+      OnStdoutReady(record);
+      if (!record->stdout.empty()) {
+        LOG(INFO) << "Subprocess(" << pid_record.first << ") output:\n"
+                  << record->stdout;
+      }
+    }
   }
-  return false;
 }
 
 Subprocess* Subprocess::subprocess_singleton_ = nullptr;
diff --git a/common/subprocess.h b/common/subprocess.h
index b655fb7..bc19d16 100644
--- a/common/subprocess.h
+++ b/common/subprocess.h
@@ -97,19 +97,19 @@
                                    std::string* stdout);
 
   // Gets the one instance.
-  static Subprocess& Get() {
-    return *subprocess_singleton_;
-  }
+  static Subprocess& Get() { return *subprocess_singleton_; }
 
-  // Returns true iff there is at least one subprocess we're waiting on.
-  bool SubprocessInFlight();
+  // Tries to log all in flight processes's output. It is used right before
+  // exiting the update_engine, probably when the subprocess caused a system
+  // shutdown.
+  void FlushBufferedLogsAtExit();
 
  private:
   FRIEND_TEST(SubprocessTest, CancelTest);
 
   struct SubprocessRecord {
     explicit SubprocessRecord(const ExecCallback& callback)
-      : callback(callback) {}
+        : callback(callback) {}
 
     // The callback supplied by the caller.
     ExecCallback callback;
diff --git a/common/subprocess_unittest.cc b/common/subprocess_unittest.cc
index c8996db..104ef41 100644
--- a/common/subprocess_unittest.cc
+++ b/common/subprocess_unittest.cc
@@ -77,8 +77,10 @@
 
 namespace {
 
-void ExpectedResults(int expected_return_code, const string& expected_output,
-                     int return_code, const string& output) {
+void ExpectedResults(int expected_return_code,
+                     const string& expected_output,
+                     int return_code,
+                     const string& output) {
   EXPECT_EQ(expected_return_code, return_code);
   EXPECT_EQ(expected_output, output);
   MessageLoop::current()->BreakLoop();
@@ -88,8 +90,8 @@
   EXPECT_EQ(0, return_code);
   const std::set<string> allowed_envs = {"LD_LIBRARY_PATH", "PATH"};
   for (const string& key_value : brillo::string_utils::Split(output, "\n")) {
-    auto key_value_pair = brillo::string_utils::SplitAtFirst(
-        key_value, "=", true);
+    auto key_value_pair =
+        brillo::string_utils::SplitAtFirst(key_value, "=", true);
     EXPECT_NE(allowed_envs.end(), allowed_envs.find(key_value_pair.first));
   }
   MessageLoop::current()->BreakLoop();
@@ -197,9 +199,7 @@
 
 TEST_F(SubprocessTest, SynchronousEchoTest) {
   vector<string> cmd = {
-      kBinPath "/sh",
-      "-c",
-      "echo -n stdout-here; echo -n stderr-there >&2"};
+      kBinPath "/sh", "-c", "echo -n stdout-here; echo -n stderr-there >&2"};
   int rc = -1;
   string stdout;
   ASSERT_TRUE(Subprocess::SynchronousExec(cmd, &rc, &stdout));
@@ -259,20 +259,24 @@
                             fifo_fd,
                             MessageLoop::WatchMode::kWatchRead,
                             false,
-                            base::Bind([](int fifo_fd, uint32_t tag) {
-                              char c;
-                              EXPECT_EQ(1, HANDLE_EINTR(read(fifo_fd, &c, 1)));
-                              EXPECT_EQ('X', c);
-                              LOG(INFO) << "Killing tag " << tag;
-                              Subprocess::Get().KillExec(tag);
-                            }, fifo_fd, tag));
+                            base::Bind(
+                                [](int fifo_fd, uint32_t tag) {
+                                  char c;
+                                  EXPECT_EQ(1,
+                                            HANDLE_EINTR(read(fifo_fd, &c, 1)));
+                                  EXPECT_EQ('X', c);
+                                  LOG(INFO) << "Killing tag " << tag;
+                                  Subprocess::Get().KillExec(tag);
+                                },
+                                fifo_fd,
+                                tag));
 
   // This test would leak a callback that runs when the child process exits
   // unless we wait for it to run.
   brillo::MessageLoopRunUntil(
-      &loop_,
-      TimeDelta::FromSeconds(120),
-      base::Bind([] { return Subprocess::Get().subprocess_records_.empty(); }));
+      &loop_, TimeDelta::FromSeconds(120), base::Bind([] {
+        return Subprocess::Get().subprocess_records_.empty();
+      }));
   EXPECT_TRUE(Subprocess::Get().subprocess_records_.empty());
   // Check that there isn't anything else to read from the pipe.
   char c;
diff --git a/common/test_utils.cc b/common/test_utils.cc
index 85f78f9..50b0962 100644
--- a/common/test_utils.cc
+++ b/common/test_utils.cc
@@ -28,7 +28,6 @@
 #include <sys/stat.h>
 #include <sys/sysmacros.h>
 #include <sys/types.h>
-#include <sys/xattr.h>
 #include <unistd.h>
 
 #include <set>
@@ -36,20 +35,26 @@
 #include <vector>
 
 #include <base/files/file_util.h>
-#include <base/format_macros.h>
 #include <base/logging.h>
-#include <base/strings/string_util.h>
-#include <base/strings/stringprintf.h>
 
 #include "update_engine/common/error_code_utils.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/file_writer.h"
 
-using base::StringPrintf;
 using std::set;
 using std::string;
 using std::vector;
 
+namespace {
+
+#ifdef __ANDROID__
+#define kLoopDevicePrefix "/dev/block/loop"
+#else
+#define kLoopDevicePrefix "/dev/loop"
+#endif  // __ANDROID__
+
+}  // namespace
+
 namespace chromeos_update_engine {
 
 void PrintTo(const Extent& extent, ::std::ostream* os) {
@@ -63,44 +68,31 @@
 namespace test_utils {
 
 const uint8_t kRandomString[] = {
-  0xf2, 0xb7, 0x55, 0x92, 0xea, 0xa6, 0xc9, 0x57,
-  0xe0, 0xf8, 0xeb, 0x34, 0x93, 0xd9, 0xc4, 0x8f,
-  0xcb, 0x20, 0xfa, 0x37, 0x4b, 0x40, 0xcf, 0xdc,
-  0xa5, 0x08, 0x70, 0x89, 0x79, 0x35, 0xe2, 0x3d,
-  0x56, 0xa4, 0x75, 0x73, 0xa3, 0x6d, 0xd1, 0xd5,
-  0x26, 0xbb, 0x9c, 0x60, 0xbd, 0x2f, 0x5a, 0xfa,
-  0xb7, 0xd4, 0x3a, 0x50, 0xa7, 0x6b, 0x3e, 0xfd,
-  0x61, 0x2b, 0x3a, 0x31, 0x30, 0x13, 0x33, 0x53,
-  0xdb, 0xd0, 0x32, 0x71, 0x5c, 0x39, 0xed, 0xda,
-  0xb4, 0x84, 0xca, 0xbc, 0xbd, 0x78, 0x1c, 0x0c,
-  0xd8, 0x0b, 0x41, 0xe8, 0xe1, 0xe0, 0x41, 0xad,
-  0x03, 0x12, 0xd3, 0x3d, 0xb8, 0x75, 0x9b, 0xe6,
-  0xd9, 0x01, 0xd0, 0x87, 0xf4, 0x36, 0xfa, 0xa7,
-  0x0a, 0xfa, 0xc5, 0x87, 0x65, 0xab, 0x9a, 0x7b,
-  0xeb, 0x58, 0x23, 0xf0, 0xa8, 0x0a, 0xf2, 0x33,
-  0x3a, 0xe2, 0xe3, 0x35, 0x74, 0x95, 0xdd, 0x3c,
-  0x59, 0x5a, 0xd9, 0x52, 0x3a, 0x3c, 0xac, 0xe5,
-  0x15, 0x87, 0x6d, 0x82, 0xbc, 0xf8, 0x7d, 0xbe,
-  0xca, 0xd3, 0x2c, 0xd6, 0xec, 0x38, 0xeb, 0xe4,
-  0x53, 0xb0, 0x4c, 0x3f, 0x39, 0x29, 0xf7, 0xa4,
-  0x73, 0xa8, 0xcb, 0x32, 0x50, 0x05, 0x8c, 0x1c,
-  0x1c, 0xca, 0xc9, 0x76, 0x0b, 0x8f, 0x6b, 0x57,
-  0x1f, 0x24, 0x2b, 0xba, 0x82, 0xba, 0xed, 0x58,
-  0xd8, 0xbf, 0xec, 0x06, 0x64, 0x52, 0x6a, 0x3f,
-  0xe4, 0xad, 0xce, 0x84, 0xb4, 0x27, 0x55, 0x14,
-  0xe3, 0x75, 0x59, 0x73, 0x71, 0x51, 0xea, 0xe8,
-  0xcc, 0xda, 0x4f, 0x09, 0xaf, 0xa4, 0xbc, 0x0e,
-  0xa6, 0x1f, 0xe2, 0x3a, 0xf8, 0x96, 0x7d, 0x30,
-  0x23, 0xc5, 0x12, 0xb5, 0xd8, 0x73, 0x6b, 0x71,
-  0xab, 0xf1, 0xd7, 0x43, 0x58, 0xa7, 0xc9, 0xf0,
-  0xe4, 0x85, 0x1c, 0xd6, 0x92, 0x50, 0x2c, 0x98,
-  0x36, 0xfe, 0x87, 0xaf, 0x43, 0x8f, 0x8f, 0xf5,
-  0x88, 0x48, 0x18, 0x42, 0xcf, 0x42, 0xc1, 0xa8,
-  0xe8, 0x05, 0x08, 0xa1, 0x45, 0x70, 0x5b, 0x8c,
-  0x39, 0x28, 0xab, 0xe9, 0x6b, 0x51, 0xd2, 0xcb,
-  0x30, 0x04, 0xea, 0x7d, 0x2f, 0x6e, 0x6c, 0x3b,
-  0x5f, 0x82, 0xd9, 0x5b, 0x89, 0x37, 0x65, 0x65,
-  0xbe, 0x9f, 0xa3, 0x5d,
+    0xf2, 0xb7, 0x55, 0x92, 0xea, 0xa6, 0xc9, 0x57, 0xe0, 0xf8, 0xeb, 0x34,
+    0x93, 0xd9, 0xc4, 0x8f, 0xcb, 0x20, 0xfa, 0x37, 0x4b, 0x40, 0xcf, 0xdc,
+    0xa5, 0x08, 0x70, 0x89, 0x79, 0x35, 0xe2, 0x3d, 0x56, 0xa4, 0x75, 0x73,
+    0xa3, 0x6d, 0xd1, 0xd5, 0x26, 0xbb, 0x9c, 0x60, 0xbd, 0x2f, 0x5a, 0xfa,
+    0xb7, 0xd4, 0x3a, 0x50, 0xa7, 0x6b, 0x3e, 0xfd, 0x61, 0x2b, 0x3a, 0x31,
+    0x30, 0x13, 0x33, 0x53, 0xdb, 0xd0, 0x32, 0x71, 0x5c, 0x39, 0xed, 0xda,
+    0xb4, 0x84, 0xca, 0xbc, 0xbd, 0x78, 0x1c, 0x0c, 0xd8, 0x0b, 0x41, 0xe8,
+    0xe1, 0xe0, 0x41, 0xad, 0x03, 0x12, 0xd3, 0x3d, 0xb8, 0x75, 0x9b, 0xe6,
+    0xd9, 0x01, 0xd0, 0x87, 0xf4, 0x36, 0xfa, 0xa7, 0x0a, 0xfa, 0xc5, 0x87,
+    0x65, 0xab, 0x9a, 0x7b, 0xeb, 0x58, 0x23, 0xf0, 0xa8, 0x0a, 0xf2, 0x33,
+    0x3a, 0xe2, 0xe3, 0x35, 0x74, 0x95, 0xdd, 0x3c, 0x59, 0x5a, 0xd9, 0x52,
+    0x3a, 0x3c, 0xac, 0xe5, 0x15, 0x87, 0x6d, 0x82, 0xbc, 0xf8, 0x7d, 0xbe,
+    0xca, 0xd3, 0x2c, 0xd6, 0xec, 0x38, 0xeb, 0xe4, 0x53, 0xb0, 0x4c, 0x3f,
+    0x39, 0x29, 0xf7, 0xa4, 0x73, 0xa8, 0xcb, 0x32, 0x50, 0x05, 0x8c, 0x1c,
+    0x1c, 0xca, 0xc9, 0x76, 0x0b, 0x8f, 0x6b, 0x57, 0x1f, 0x24, 0x2b, 0xba,
+    0x82, 0xba, 0xed, 0x58, 0xd8, 0xbf, 0xec, 0x06, 0x64, 0x52, 0x6a, 0x3f,
+    0xe4, 0xad, 0xce, 0x84, 0xb4, 0x27, 0x55, 0x14, 0xe3, 0x75, 0x59, 0x73,
+    0x71, 0x51, 0xea, 0xe8, 0xcc, 0xda, 0x4f, 0x09, 0xaf, 0xa4, 0xbc, 0x0e,
+    0xa6, 0x1f, 0xe2, 0x3a, 0xf8, 0x96, 0x7d, 0x30, 0x23, 0xc5, 0x12, 0xb5,
+    0xd8, 0x73, 0x6b, 0x71, 0xab, 0xf1, 0xd7, 0x43, 0x58, 0xa7, 0xc9, 0xf0,
+    0xe4, 0x85, 0x1c, 0xd6, 0x92, 0x50, 0x2c, 0x98, 0x36, 0xfe, 0x87, 0xaf,
+    0x43, 0x8f, 0x8f, 0xf5, 0x88, 0x48, 0x18, 0x42, 0xcf, 0x42, 0xc1, 0xa8,
+    0xe8, 0x05, 0x08, 0xa1, 0x45, 0x70, 0x5b, 0x8c, 0x39, 0x28, 0xab, 0xe9,
+    0x6b, 0x51, 0xd2, 0xcb, 0x30, 0x04, 0xea, 0x7d, 0x2f, 0x6e, 0x6c, 0x3b,
+    0x5f, 0x82, 0xd9, 0x5b, 0x89, 0x37, 0x65, 0x65, 0xbe, 0x9f, 0xa3, 0x5d,
 };
 
 string Readlink(const string& path) {
@@ -112,36 +104,6 @@
   return string(buf.begin(), buf.begin() + r);
 }
 
-bool IsXAttrSupported(const base::FilePath& dir_path) {
-  char *path = strdup(dir_path.Append("xattr_test_XXXXXX").value().c_str());
-
-  int fd = mkstemp(path);
-  if (fd == -1) {
-    PLOG(ERROR) << "Error creating temporary file in " << dir_path.value();
-    free(path);
-    return false;
-  }
-
-  if (unlink(path) != 0) {
-    PLOG(ERROR) << "Error unlinking temporary file " << path;
-    close(fd);
-    free(path);
-    return false;
-  }
-
-  int xattr_res = fsetxattr(fd, "user.xattr-test", "value", strlen("value"), 0);
-  if (xattr_res != 0) {
-    if (errno == ENOTSUP) {
-      // Leave it to call-sites to warn about non-support.
-    } else {
-      PLOG(ERROR) << "Error setting xattr on " << path;
-    }
-  }
-  close(fd);
-  free(path);
-  return xattr_res == 0;
-}
-
 bool WriteFileVector(const string& path, const brillo::Blob& data) {
   return utils::WriteFile(path.c_str(), data.data(), data.size());
 }
@@ -160,7 +122,7 @@
   TEST_AND_RETURN_FALSE_ERRNO(control_fd >= 0);
   int loop_number = ioctl(control_fd, LOOP_CTL_GET_FREE);
   IGNORE_EINTR(close(control_fd));
-  *out_lo_dev_name = StringPrintf("/dev/loop%d", loop_number);
+  *out_lo_dev_name = kLoopDevicePrefix + std::to_string(loop_number);
 
   // Double check that the loop exists and is free.
   int loop_device_fd =
@@ -230,8 +192,7 @@
   return true;
 }
 
-bool ExpectVectorsEq(const brillo::Blob& expected,
-                     const brillo::Blob& actual) {
+bool ExpectVectorsEq(const brillo::Blob& expected, const brillo::Blob& actual) {
   EXPECT_EQ(expected.size(), actual.size());
   if (expected.size() != actual.size())
     return false;
diff --git a/common/test_utils.h b/common/test_utils.h
index ddb3d34..44b7aa1 100644
--- a/common/test_utils.h
+++ b/common/test_utils.h
@@ -24,18 +24,15 @@
 // Streams used for gtest's PrintTo() functions.
 #include <iostream>  // NOLINT(readability/streams)
 #include <memory>
-#include <set>
 #include <string>
 #include <vector>
 
-#include <base/callback.h>
 #include <base/files/file_path.h>
 #include <base/files/scoped_temp_dir.h>
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
 
 #include "update_engine/common/action.h"
-#include "update_engine/common/subprocess.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/update_metadata.pb.h"
 
@@ -76,30 +73,9 @@
   return system(cmd.c_str());
 }
 
-inline int Symlink(const std::string& oldpath, const std::string& newpath) {
-  return symlink(oldpath.c_str(), newpath.c_str());
-}
-
-inline int Chmod(const std::string& path, mode_t mode) {
-  return chmod(path.c_str(), mode);
-}
-
-inline int Mkdir(const std::string& path, mode_t mode) {
-  return mkdir(path.c_str(), mode);
-}
-
-inline int Chdir(const std::string& path) {
-  return chdir(path.c_str());
-}
-
 // Reads a symlink from disk. Returns empty string on failure.
 std::string Readlink(const std::string& path);
 
-// Checks if xattr is supported in the directory specified by
-// |dir_path| which must be writable. Returns true if the feature is
-// supported, false if not or if an error occurred.
-bool IsXAttrSupported(const base::FilePath& dir_path);
-
 void FillWithData(brillo::Blob* buffer);
 
 // Compare the value of native array for download source parameter.
@@ -111,14 +87,14 @@
 class ScopedFilesystemUnmounter {
  public:
   explicit ScopedFilesystemUnmounter(const std::string& mountpoint)
-      : mountpoint_(mountpoint),
-        should_unmount_(true) {}
+      : mountpoint_(mountpoint), should_unmount_(true) {}
   ~ScopedFilesystemUnmounter() {
     if (should_unmount_) {
       utils::UnmountFilesystem(mountpoint_);
     }
   }
   void set_should_unmount(bool unmount) { should_unmount_ = unmount; }
+
  private:
   const std::string mountpoint_;
   bool should_unmount_;
@@ -149,7 +125,7 @@
     ADD_FAILURE();
   }
 
-  const std::string &dev() {
+  const std::string& dev() const {
     EXPECT_TRUE(is_bound_);
     return dev_;
   }
@@ -207,10 +183,10 @@
 
 class NoneType;
 
-template<typename T>
+template <typename T>
 class ObjectFeederAction;
 
-template<typename T>
+template <typename T>
 class ActionTraits<ObjectFeederAction<T>> {
  public:
   typedef T OutputObjectType;
@@ -219,7 +195,7 @@
 
 // This is a simple Action class for testing. It feeds an object into
 // another action.
-template<typename T>
+template <typename T>
 class ObjectFeederAction : public Action<ObjectFeederAction<T>> {
  public:
   typedef NoneType InputObjectType;
@@ -234,17 +210,16 @@
   }
   static std::string StaticType() { return "ObjectFeederAction"; }
   std::string Type() const { return StaticType(); }
-  void set_obj(const T& out_obj) {
-    out_obj_ = out_obj;
-  }
+  void set_obj(const T& out_obj) { out_obj_ = out_obj; }
+
  private:
   T out_obj_;
 };
 
-template<typename T>
+template <typename T>
 class ObjectCollectorAction;
 
-template<typename T>
+template <typename T>
 class ActionTraits<ObjectCollectorAction<T>> {
  public:
   typedef NoneType OutputObjectType;
@@ -253,7 +228,7 @@
 
 // This is a simple Action class for testing. It receives an object from
 // another action.
-template<typename T>
+template <typename T>
 class ObjectCollectorAction : public Action<ObjectCollectorAction<T>> {
  public:
   typedef T InputObjectType;
@@ -269,6 +244,7 @@
   static std::string StaticType() { return "ObjectCollectorAction"; }
   std::string Type() const { return StaticType(); }
   const T& object() const { return object_; }
+
  private:
   T object_;
 };
diff --git a/common/utils.cc b/common/utils.cc
index f651823..34d97a2 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -61,6 +61,7 @@
 using base::Time;
 using base::TimeDelta;
 using std::min;
+using std::numeric_limits;
 using std::pair;
 using std::string;
 using std::vector;
@@ -82,16 +83,11 @@
 // The path to the kernel's boot_id.
 const char kBootIdPath[] = "/proc/sys/kernel/random/boot_id";
 
-// A pointer to a null-terminated string containing the root directory where all
-// the temporary files should be created. If null, the system default is used
-// instead.
-const char* root_temp_dir = nullptr;
-
 // Return true if |disk_name| is an MTD or a UBI device. Note that this test is
 // simply based on the name of the device.
 bool IsMtdDeviceName(const string& disk_name) {
-  return base::StartsWith(disk_name, "/dev/ubi",
-                          base::CompareCase::SENSITIVE) ||
+  return base::StartsWith(
+             disk_name, "/dev/ubi", base::CompareCase::SENSITIVE) ||
          base::StartsWith(disk_name, "/dev/mtd", base::CompareCase::SENSITIVE);
 }
 
@@ -143,15 +139,11 @@
   }
 
   base::FilePath temp_dir;
-  if (root_temp_dir) {
-    temp_dir = base::FilePath(root_temp_dir);
-  } else {
 #ifdef __ANDROID__
-    temp_dir = base::FilePath(constants::kNonVolatileDirectory).Append("tmp");
+  temp_dir = base::FilePath(constants::kNonVolatileDirectory).Append("tmp");
 #else
-    TEST_AND_RETURN_FALSE(base::GetTempDir(&temp_dir));
+  TEST_AND_RETURN_FALSE(base::GetTempDir(&temp_dir));
 #endif  // __ANDROID__
-  }
   if (!base::PathExists(temp_dir))
     TEST_AND_RETURN_FALSE(base::CreateDirectory(temp_dir));
   *template_path = temp_dir.Append(path);
@@ -162,10 +154,6 @@
 
 namespace utils {
 
-void SetRootTempDir(const char* new_root_temp_dir) {
-  root_temp_dir = new_root_temp_dir;
-}
-
 string ParseECVersion(string input_line) {
   base::TrimWhitespaceASCII(input_line, base::TRIM_ALL, &input_line);
 
@@ -240,13 +228,15 @@
   int num_attempts = 0;
   while (bytes_written < count) {
     num_attempts++;
-    ssize_t rc = pwrite(fd, c_buf + bytes_written, count - bytes_written,
+    ssize_t rc = pwrite(fd,
+                        c_buf + bytes_written,
+                        count - bytes_written,
                         offset + bytes_written);
     // TODO(garnold) for debugging failure in chromium-os:31077; to be removed.
     if (rc < 0) {
       PLOG(ERROR) << "pwrite error; num_attempts=" << num_attempts
-                  << " bytes_written=" << bytes_written
-                  << " count=" << count << " offset=" << offset;
+                  << " bytes_written=" << bytes_written << " count=" << count
+                  << " offset=" << offset;
     }
     TEST_AND_RETURN_FALSE_ERRNO(rc >= 0);
     bytes_written += rc;
@@ -274,13 +264,13 @@
   return WriteAll(fd, buf, count);
 }
 
-bool PReadAll(int fd, void* buf, size_t count, off_t offset,
-              ssize_t* out_bytes_read) {
+bool PReadAll(
+    int fd, void* buf, size_t count, off_t offset, ssize_t* out_bytes_read) {
   char* c_buf = static_cast<char*>(buf);
   ssize_t bytes_read = 0;
   while (bytes_read < static_cast<ssize_t>(count)) {
-    ssize_t rc = pread(fd, c_buf + bytes_read, count - bytes_read,
-                       offset + bytes_read);
+    ssize_t rc =
+        pread(fd, c_buf + bytes_read, count - bytes_read, offset + bytes_read);
     TEST_AND_RETURN_FALSE_ERRNO(rc >= 0);
     if (rc == 0) {
       break;
@@ -291,7 +281,10 @@
   return true;
 }
 
-bool PReadAll(const FileDescriptorPtr& fd, void* buf, size_t count, off_t offset,
+bool PReadAll(const FileDescriptorPtr& fd,
+              void* buf,
+              size_t count,
+              off_t offset,
               ssize_t* out_bytes_read) {
   TEST_AND_RETURN_FALSE_ERRNO(fd->Seek(offset, SEEK_SET) !=
                               static_cast<off_t>(-1));
@@ -311,14 +304,14 @@
 
 // Append |nbytes| of content from |buf| to the vector pointed to by either
 // |vec_p| or |str_p|.
-static void AppendBytes(const uint8_t* buf, size_t nbytes,
+static void AppendBytes(const uint8_t* buf,
+                        size_t nbytes,
                         brillo::Blob* vec_p) {
   CHECK(buf);
   CHECK(vec_p);
   vec_p->insert(vec_p->end(), buf, buf + nbytes);
 }
-static void AppendBytes(const uint8_t* buf, size_t nbytes,
-                        string* str_p) {
+static void AppendBytes(const uint8_t* buf, size_t nbytes, string* str_p) {
   CHECK(buf);
   CHECK(str_p);
   str_p->append(buf, buf + nbytes);
@@ -396,7 +389,9 @@
   return ReadFileChunkAndAppend(path, 0, -1, out_p);
 }
 
-bool ReadFileChunk(const string& path, off_t offset, off_t size,
+bool ReadFileChunk(const string& path,
+                   off_t offset,
+                   off_t size,
                    brillo::Blob* out_p) {
   return ReadFileChunkAndAppend(path, offset, size, out_p);
 }
@@ -445,8 +440,8 @@
   const unsigned int bytes_per_line = 16;
   for (uint32_t i = 0; i < length; i += bytes_per_line) {
     const unsigned int bytes_remaining = length - i;
-    const unsigned int bytes_per_this_line = min(bytes_per_line,
-                                                 bytes_remaining);
+    const unsigned int bytes_per_this_line =
+        min(bytes_per_line, bytes_remaining);
     char header[100];
     int r = snprintf(header, sizeof(header), "0x%08x : ", i);
     TEST_AND_RETURN(r == 13);
@@ -465,8 +460,8 @@
 bool SplitPartitionName(const string& partition_name,
                         string* out_disk_name,
                         int* out_partition_num) {
-  if (!base::StartsWith(partition_name, "/dev/",
-                        base::CompareCase::SENSITIVE)) {
+  if (!base::StartsWith(
+          partition_name, "/dev/", base::CompareCase::SENSITIVE)) {
     LOG(ERROR) << "Invalid partition device name: " << partition_name;
     return false;
   }
@@ -498,8 +493,7 @@
     // Special case for MMC devices which have the following naming scheme:
     // mmcblk0p2
     size_t disk_name_len = last_nondigit_pos;
-    if (partition_name[last_nondigit_pos] != 'p' ||
-        last_nondigit_pos == 0 ||
+    if (partition_name[last_nondigit_pos] != 'p' || last_nondigit_pos == 0 ||
         !isdigit(partition_name[last_nondigit_pos - 1])) {
       disk_name_len++;
     }
@@ -507,8 +501,8 @@
   }
 
   if (out_partition_num) {
-    string partition_str = partition_name.substr(last_nondigit_pos + 1,
-                                                 partition_name_len);
+    string partition_str =
+        partition_name.substr(last_nondigit_pos + 1, partition_name_len);
     *out_partition_num = atoi(partition_str.c_str());
   }
   return true;
@@ -579,21 +573,15 @@
   }
 
   int exit_code;
-  vector<string> cmd = {
-      "ubiattach",
-      "-m",
-      base::StringPrintf("%d", volume_num),
-      "-d",
-      base::StringPrintf("%d", volume_num)
-  };
+  vector<string> cmd = {"ubiattach",
+                        "-m",
+                        base::StringPrintf("%d", volume_num),
+                        "-d",
+                        base::StringPrintf("%d", volume_num)};
   TEST_AND_RETURN_FALSE(Subprocess::SynchronousExec(cmd, &exit_code, nullptr));
   TEST_AND_RETURN_FALSE(exit_code == 0);
 
-  cmd = {
-      "ubiblock",
-      "--create",
-      volume_path
-  };
+  cmd = {"ubiblock", "--create", volume_path};
   TEST_AND_RETURN_FALSE(Subprocess::SynchronousExec(cmd, &exit_code, nullptr));
   TEST_AND_RETURN_FALSE(exit_code == 0);
 
@@ -613,7 +601,8 @@
       GetTempName(base_filename_template, &filename_template));
   DCHECK(filename || fd);
   vector<char> buf(filename_template.value().size() + 1);
-  memcpy(buf.data(), filename_template.value().data(),
+  memcpy(buf.data(),
+         filename_template.value().data(),
          filename_template.value().size());
   buf[filename_template.value().size()] = '\0';
 
@@ -647,8 +636,8 @@
 
   rc = ioctl(fd, BLKROSET, &expected_flag);
   if (rc != 0) {
-    PLOG(ERROR) << "Marking block device " << device << " as read_only="
-                << expected_flag;
+    PLOG(ERROR) << "Marking block device " << device
+                << " as read_only=" << expected_flag;
     return false;
   }
   return true;
@@ -666,13 +655,16 @@
     fstypes = {type.c_str()};
   }
   for (const char* fstype : fstypes) {
-    int rc = mount(device.c_str(), mountpoint.c_str(), fstype, mountflags,
+    int rc = mount(device.c_str(),
+                   mountpoint.c_str(),
+                   fstype,
+                   mountflags,
                    fs_mount_options.c_str());
     if (rc == 0)
       return true;
 
-    PLOG(WARNING) << "Unable to mount destination device " << device
-                  << " on " << mountpoint << " as " << fstype;
+    PLOG(WARNING) << "Unable to mount destination device " << device << " on "
+                  << mountpoint << " as " << fstype;
   }
   if (!type.empty()) {
     LOG(ERROR) << "Unable to mount " << device << " with any supported type";
@@ -730,7 +722,8 @@
 
 // Tries to parse the header of an ELF file to obtain a human-readable
 // description of it on the |output| string.
-static bool GetFileFormatELF(const uint8_t* buffer, size_t size,
+static bool GetFileFormatELF(const uint8_t* buffer,
+                             size_t size,
                              string* output) {
   // 0x00: EI_MAG - ELF magic header, 4 bytes.
   if (size < SELFMAG || memcmp(buffer, ELFMAG, SELFMAG) != 0)
@@ -774,7 +767,7 @@
   if (size < offsetof(Elf32_Ehdr, e_machine) + sizeof(hdr->e_machine))
     return true;
   uint16_t e_machine;
-  // Fix endianess regardless of the host endianess.
+  // Fix endianness regardless of the host endianness.
   if (ei_data == ELFDATA2LSB)
     e_machine = le16toh(hdr->e_machine);
   else
@@ -864,12 +857,12 @@
   Time::Exploded exp_time;
   utc_time.UTCExplode(&exp_time);
   return base::StringPrintf("%d/%d/%d %d:%02d:%02d GMT",
-                      exp_time.month,
-                      exp_time.day_of_month,
-                      exp_time.year,
-                      exp_time.hour,
-                      exp_time.minute,
-                      exp_time.second);
+                            exp_time.month,
+                            exp_time.day_of_month,
+                            exp_time.year,
+                            exp_time.hour,
+                            exp_time.minute,
+                            exp_time.second);
 }
 
 string ToString(bool b) {
@@ -878,12 +871,16 @@
 
 string ToString(DownloadSource source) {
   switch (source) {
-    case kDownloadSourceHttpsServer: return "HttpsServer";
-    case kDownloadSourceHttpServer:  return "HttpServer";
-    case kDownloadSourceHttpPeer:    return "HttpPeer";
-    case kNumDownloadSources:        return "Unknown";
-    // Don't add a default case to let the compiler warn about newly added
-    // download sources which should be added here.
+    case kDownloadSourceHttpsServer:
+      return "HttpsServer";
+    case kDownloadSourceHttpServer:
+      return "HttpServer";
+    case kDownloadSourceHttpPeer:
+      return "HttpPeer";
+    case kNumDownloadSources:
+      return "Unknown";
+      // Don't add a default case to let the compiler warn about newly added
+      // download sources which should be added here.
   }
 
   return "Unknown";
@@ -891,12 +888,16 @@
 
 string ToString(PayloadType payload_type) {
   switch (payload_type) {
-    case kPayloadTypeDelta:      return "Delta";
-    case kPayloadTypeFull:       return "Full";
-    case kPayloadTypeForcedFull: return "ForcedFull";
-    case kNumPayloadTypes:       return "Unknown";
-    // Don't add a default case to let the compiler warn about newly added
-    // payload types which should be added here.
+    case kPayloadTypeDelta:
+      return "Delta";
+    case kPayloadTypeFull:
+      return "Full";
+    case kPayloadTypeForcedFull:
+      return "ForcedFull";
+    case kNumPayloadTypes:
+      return "Unknown";
+      // Don't add a default case to let the compiler warn about newly added
+      // payload types which should be added here.
   }
 
   return "Unknown";
@@ -923,16 +924,10 @@
   return base_code;
 }
 
-Time TimeFromStructTimespec(struct timespec *ts) {
-  int64_t us = static_cast<int64_t>(ts->tv_sec) * Time::kMicrosecondsPerSecond +
-      static_cast<int64_t>(ts->tv_nsec) / Time::kNanosecondsPerMicrosecond;
-  return Time::UnixEpoch() + TimeDelta::FromMicroseconds(us);
-}
-
 string StringVectorToString(const vector<string> &vec_str) {
   string str = "[";
-  for (vector<string>::const_iterator i = vec_str.begin();
-       i != vec_str.end(); ++i) {
+  for (vector<string>::const_iterator i = vec_str.begin(); i != vec_str.end();
+       ++i) {
     if (i != vec_str.begin())
       str += ", ";
     str += '"';
@@ -958,53 +953,11 @@
                             encoded_hash.c_str());
 }
 
-bool DecodeAndStoreBase64String(const string& base64_encoded,
-                                base::FilePath *out_path) {
-  brillo::Blob contents;
-
-  out_path->clear();
-
-  if (base64_encoded.size() == 0) {
-    LOG(ERROR) << "Can't decode empty string.";
-    return false;
-  }
-
-  if (!brillo::data_encoding::Base64Decode(base64_encoded, &contents) ||
-      contents.size() == 0) {
-    LOG(ERROR) << "Error decoding base64.";
-    return false;
-  }
-
-  FILE *file = base::CreateAndOpenTemporaryFile(out_path);
-  if (file == nullptr) {
-    LOG(ERROR) << "Error creating temporary file.";
-    return false;
-  }
-
-  if (fwrite(contents.data(), 1, contents.size(), file) != contents.size()) {
-    PLOG(ERROR) << "Error writing to temporary file.";
-    if (fclose(file) != 0)
-      PLOG(ERROR) << "Error closing temporary file.";
-    if (unlink(out_path->value().c_str()) != 0)
-      PLOG(ERROR) << "Error unlinking temporary file.";
-    out_path->clear();
-    return false;
-  }
-
-  if (fclose(file) != 0) {
-    PLOG(ERROR) << "Error closing temporary file.";
-    out_path->clear();
-    return false;
-  }
-
-  return true;
-}
-
 bool ConvertToOmahaInstallDate(Time time, int *out_num_days) {
   time_t unix_time = time.ToTimeT();
   // Output of: date +"%s" --date="Jan 1, 2007 0:00 PST".
   const time_t kOmahaEpoch = 1167638400;
-  const int64_t kNumSecondsPerWeek = 7*24*3600;
+  const int64_t kNumSecondsPerWeek = 7 * 24 * 3600;
   const int64_t kNumDaysPerWeek = 7;
 
   time_t omaha_time = unix_time - kOmahaEpoch;
@@ -1034,8 +987,10 @@
   return false;
 }
 
-bool ReadExtents(const string& path, const vector<Extent>& extents,
-                 brillo::Blob* out_data, ssize_t out_data_size,
+bool ReadExtents(const string& path,
+                 const vector<Extent>& extents,
+                 brillo::Blob* out_data,
+                 ssize_t out_data_size,
                  size_t block_size) {
   brillo::Blob data(out_data_size);
   ssize_t bytes_read = 0;
@@ -1067,6 +1022,48 @@
   return true;
 }
 
+int VersionPrefix(const std::string& version) {
+  if (version.empty()) {
+    return 0;
+  }
+  vector<string> tokens = base::SplitString(
+      version, ".", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+  int value;
+  if (tokens.empty() || !base::StringToInt(tokens[0], &value))
+    return -1;  // Target version is invalid.
+  return value;
+}
+
+void ParseRollbackKeyVersion(const string& raw_version,
+                             uint16_t* high_version,
+                             uint16_t* low_version) {
+  DCHECK(high_version);
+  DCHECK(low_version);
+  *high_version = numeric_limits<uint16_t>::max();
+  *low_version = numeric_limits<uint16_t>::max();
+
+  vector<string> parts = base::SplitString(
+      raw_version, ".", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+  if (parts.size() != 2) {
+    // The version string must have exactly one period.
+    return;
+  }
+
+  int high;
+  int low;
+  if (!(base::StringToInt(parts[0], &high) &&
+        base::StringToInt(parts[1], &low))) {
+    // Both parts of the version could not be parsed correctly.
+    return;
+  }
+
+  if (high >= 0 && high < numeric_limits<uint16_t>::max() && low >= 0 &&
+      low < numeric_limits<uint16_t>::max()) {
+    *high_version = static_cast<uint16_t>(high);
+    *low_version = static_cast<uint16_t>(low);
+  }
+}
+
 }  // namespace utils
 
 }  // namespace chromeos_update_engine
diff --git a/common/utils.h b/common/utils.h
index e4ffcf8..9160d9f 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -21,6 +21,7 @@
 #include <unistd.h>
 
 #include <algorithm>
+#include <limits>
 #include <map>
 #include <memory>
 #include <set>
@@ -43,14 +44,9 @@
 
 namespace utils {
 
-// Converts a struct timespec representing a number of seconds since
-// the Unix epoch to a base::Time. Sub-microsecond time is rounded
-// down.
-base::Time TimeFromStructTimespec(struct timespec *ts);
-
 // Formats |vec_str| as a string of the form ["<elem1>", "<elem2>"].
 // Does no escaping, only use this for presentation in error messages.
-std::string StringVectorToString(const std::vector<std::string> &vec_str);
+std::string StringVectorToString(const std::vector<std::string>& vec_str);
 
 // Calculates the p2p file id from payload hash and size
 std::string CalculateP2PFileId(const brillo::Blob& payload_hash,
@@ -85,11 +81,14 @@
 
 // Calls pread() repeatedly until count bytes are read, or EOF is reached.
 // Returns number of bytes read in *bytes_read. Returns true on success.
-bool PReadAll(int fd, void* buf, size_t count, off_t offset,
-              ssize_t* out_bytes_read);
+bool PReadAll(
+    int fd, void* buf, size_t count, off_t offset, ssize_t* out_bytes_read);
 
-bool PReadAll(const FileDescriptorPtr& fd, void* buf, size_t count,
-              off_t offset, ssize_t* out_bytes_read);
+bool PReadAll(const FileDescriptorPtr& fd,
+              void* buf,
+              size_t count,
+              off_t offset,
+              ssize_t* out_bytes_read);
 
 // Opens |path| for reading and appends its entire content to the container
 // pointed to by |out_p|. Returns true upon successfully reading all of the
@@ -98,7 +97,9 @@
 // |size| is not -1, only up to |size| bytes are read in.
 bool ReadFile(const std::string& path, brillo::Blob* out_p);
 bool ReadFile(const std::string& path, std::string* out_p);
-bool ReadFileChunk(const std::string& path, off_t offset, off_t size,
+bool ReadFileChunk(const std::string& path,
+                   off_t offset,
+                   off_t size,
                    brillo::Blob* out_p);
 
 // Invokes |cmd| in a pipe and appends its stdout to the container pointed to by
@@ -110,7 +111,7 @@
 // occurs, -1 is returned.
 off_t BlockDevSize(int fd);
 
-// Returns the size of the file at path, or the file desciptor fd. If the file
+// Returns the size of the file at path, or the file descriptor fd. If the file
 // is actually a block device, this function will automatically call
 // BlockDevSize. If the file doesn't exist or some error occurrs, -1 is
 // returned.
@@ -131,13 +132,6 @@
 // only returns true if "/dev/ubi%d_0" becomes available in |timeout| seconds.
 bool TryAttachingUbiVolume(int volume_num, int timeout);
 
-// Setup the directory |new_root_temp_dir| to be used as the root directory for
-// temporary files instead of the system's default. If the directory doesn't
-// exists, it will be created when first used.
-// NOTE: The memory pointed by |new_root_temp_dir| must be available until this
-// function is called again with a different value.
-void SetRootTempDir(const char* new_root_temp_dir);
-
 // If |base_filename_template| is neither absolute (starts with "/") nor
 // explicitly relative to the current working directory (starts with "./" or
 // "../"), then it is prepended the system's temporary directory. On success,
@@ -166,8 +160,7 @@
 // {"/dev/sda", 1} => "/dev/sda1"
 // {"/dev/mmcblk2", 12} => "/dev/mmcblk2p12"
 // Returns empty string when invalid parameters are passed in
-std::string MakePartitionName(const std::string& disk_name,
-                              int partition_num);
+std::string MakePartitionName(const std::string& disk_name, int partition_num);
 
 // Similar to "MakePartitionName" but returns a name that is suitable for
 // mounting. On NAND system we can write to "/dev/ubiX_0", which is what
@@ -228,26 +221,12 @@
   HexDumpArray(vect.data(), vect.size());
 }
 
-template<typename KeyType, typename ValueType>
-bool MapContainsKey(const std::map<KeyType, ValueType>& m, const KeyType& k) {
-  return m.find(k) != m.end();
-}
-template<typename KeyType>
-bool SetContainsKey(const std::set<KeyType>& s, const KeyType& k) {
-  return s.find(k) != s.end();
-}
-
-template<typename T>
-bool VectorContainsValue(const std::vector<T>& vect, const T& value) {
-  return std::find(vect.begin(), vect.end(), value) != vect.end();
-}
-
-template<typename T>
-bool VectorIndexOf(const std::vector<T>& vect, const T& value,
+template <typename T>
+bool VectorIndexOf(const std::vector<T>& vect,
+                   const T& value,
                    typename std::vector<T>::size_type* out_index) {
-  typename std::vector<T>::const_iterator it = std::find(vect.begin(),
-                                                         vect.end(),
-                                                         value);
+  typename std::vector<T>::const_iterator it =
+      std::find(vect.begin(), vect.end(), value);
   if (it == vect.end()) {
     return false;
   } else {
@@ -288,14 +267,6 @@
 // it'll return the same value again.
 ErrorCode GetBaseErrorCode(ErrorCode code);
 
-// Decodes the data in |base64_encoded| and stores it in a temporary
-// file. Returns false if the given data is empty, not well-formed
-// base64 or if an error occurred. If true is returned, the decoded
-// data is stored in the file returned in |out_path|. The file should
-// be deleted when no longer needed.
-bool DecodeAndStoreBase64String(const std::string& base64_encoded,
-                                base::FilePath *out_path);
-
 // Converts |time| to an Omaha InstallDate which is defined as "the
 // number of PST8PDT calendar weeks since Jan 1st 2007 0:00 PST, times
 // seven" with PST8PDT defined as "Pacific Time" (e.g. UTC-07:00 if
@@ -311,7 +282,7 @@
 // into account so the result may up to one hour off. This is because
 // the glibc date and timezone routines depend on the TZ environment
 // variable and changing environment variables is not thread-safe.
-bool ConvertToOmahaInstallDate(base::Time time, int *out_num_days);
+bool ConvertToOmahaInstallDate(base::Time time, int* out_num_days);
 
 // Look for the minor version value in the passed |store| and set
 // |minor_version| to that value. Return whether the value was found and valid.
@@ -322,8 +293,10 @@
 // extents are read from the file at |path|. |out_data_size| is the size of
 // |out_data|. Returns false if the number of bytes to read given in
 // |extents| does not equal |out_data_size|.
-bool ReadExtents(const std::string& path, const std::vector<Extent>& extents,
-                 brillo::Blob* out_data, ssize_t out_data_size,
+bool ReadExtents(const std::string& path,
+                 const std::vector<Extent>& extents,
+                 brillo::Blob* out_data,
+                 ssize_t out_data_size,
                  size_t block_size);
 
 // Read the current boot identifier and store it in |boot_id|. This identifier
@@ -331,8 +304,29 @@
 // reboot. Returns whether it succeeded getting the boot_id.
 bool GetBootId(std::string* boot_id);
 
-}  // namespace utils
+// Divide |x| by |y| and round up to the nearest integer.
+constexpr uint64_t DivRoundUp(uint64_t x, uint64_t y) {
+  return (x + y - 1) / y;
+}
 
+// Round |x| up to be a multiple of |y|.
+constexpr uint64_t RoundUp(uint64_t x, uint64_t y) {
+  return DivRoundUp(x, y) * y;
+}
+
+// Returns the integer value of the first section of |version|. E.g. for
+//  "10575.39." returns 10575. Returns 0 if |version| is empty, returns -1 if
+// first section of |version| is invalid (e.g. not a number).
+int VersionPrefix(const std::string& version);
+
+// Parses a string in the form high.low, where high and low are 16 bit unsigned
+// integers. If there is more than 1 dot, or if either of the two parts are
+// not valid 16 bit unsigned numbers, then 0xffff is returned for both.
+void ParseRollbackKeyVersion(const std::string& raw_version,
+                             uint16_t* high_version,
+                             uint16_t* low_version);
+
+}  // namespace utils
 
 // Utility class to close a file descriptor
 class ScopedFdCloser {
@@ -343,6 +337,7 @@
       *fd_ = -1;
   }
   void set_should_close(bool should_close) { should_close_ = should_close; }
+
  private:
   int* fd_;
   bool should_close_ = true;
@@ -353,8 +348,7 @@
 class ScopedPathUnlinker {
  public:
   explicit ScopedPathUnlinker(const std::string& path)
-      : path_(path),
-        should_remove_(true) {}
+      : path_(path), should_remove_(true) {}
   ~ScopedPathUnlinker() {
     if (should_remove_ && unlink(path_.c_str()) < 0) {
       PLOG(ERROR) << "Unable to unlink path " << path_;
@@ -377,7 +371,9 @@
       : processor_(processor),
         action_(action),
         code_(ErrorCode::kError),
-        should_complete_(true) {}
+        should_complete_(true) {
+    CHECK(processor_);
+  }
   ~ScopedActionCompleter() {
     if (should_complete_)
       processor_->ActionComplete(action_, code_);
@@ -398,54 +394,54 @@
 
 }  // namespace chromeos_update_engine
 
-#define TEST_AND_RETURN_FALSE_ERRNO(_x)                                        \
-  do {                                                                         \
-    bool _success = static_cast<bool>(_x);                                     \
-    if (!_success) {                                                           \
-      std::string _msg =                                                       \
-          chromeos_update_engine::utils::ErrnoNumberAsString(errno);           \
-      LOG(ERROR) << #_x " failed: " << _msg;                                   \
-      return false;                                                            \
-    }                                                                          \
+#define TEST_AND_RETURN_FALSE_ERRNO(_x)                              \
+  do {                                                               \
+    bool _success = static_cast<bool>(_x);                           \
+    if (!_success) {                                                 \
+      std::string _msg =                                             \
+          chromeos_update_engine::utils::ErrnoNumberAsString(errno); \
+      LOG(ERROR) << #_x " failed: " << _msg;                         \
+      return false;                                                  \
+    }                                                                \
   } while (0)
 
-#define TEST_AND_RETURN_FALSE(_x)                                              \
-  do {                                                                         \
-    bool _success = static_cast<bool>(_x);                                     \
-    if (!_success) {                                                           \
-      LOG(ERROR) << #_x " failed.";                                            \
-      return false;                                                            \
-    }                                                                          \
+#define TEST_AND_RETURN_FALSE(_x)          \
+  do {                                     \
+    bool _success = static_cast<bool>(_x); \
+    if (!_success) {                       \
+      LOG(ERROR) << #_x " failed.";        \
+      return false;                        \
+    }                                      \
   } while (0)
 
-#define TEST_AND_RETURN_ERRNO(_x)                                              \
-  do {                                                                         \
-    bool _success = static_cast<bool>(_x);                                     \
-    if (!_success) {                                                           \
-      std::string _msg =                                                       \
-          chromeos_update_engine::utils::ErrnoNumberAsString(errno);           \
-      LOG(ERROR) << #_x " failed: " << _msg;                                   \
-      return;                                                                  \
-    }                                                                          \
+#define TEST_AND_RETURN_ERRNO(_x)                                    \
+  do {                                                               \
+    bool _success = static_cast<bool>(_x);                           \
+    if (!_success) {                                                 \
+      std::string _msg =                                             \
+          chromeos_update_engine::utils::ErrnoNumberAsString(errno); \
+      LOG(ERROR) << #_x " failed: " << _msg;                         \
+      return;                                                        \
+    }                                                                \
   } while (0)
 
-#define TEST_AND_RETURN(_x)                                                    \
-  do {                                                                         \
-    bool _success = static_cast<bool>(_x);                                     \
-    if (!_success) {                                                           \
-      LOG(ERROR) << #_x " failed.";                                            \
-      return;                                                                  \
-    }                                                                          \
+#define TEST_AND_RETURN(_x)                \
+  do {                                     \
+    bool _success = static_cast<bool>(_x); \
+    if (!_success) {                       \
+      LOG(ERROR) << #_x " failed.";        \
+      return;                              \
+    }                                      \
   } while (0)
 
-#define TEST_AND_RETURN_FALSE_ERRCODE(_x)                                      \
-  do {                                                                         \
-    errcode_t _error = (_x);                                                   \
-    if (_error) {                                                              \
-      errno = _error;                                                          \
-      LOG(ERROR) << #_x " failed: " << _error;                                 \
-      return false;                                                            \
-    }                                                                          \
+#define TEST_AND_RETURN_FALSE_ERRCODE(_x)      \
+  do {                                         \
+    errcode_t _error = (_x);                   \
+    if (_error) {                              \
+      errno = _error;                          \
+      LOG(ERROR) << #_x " failed: " << _error; \
+      return false;                            \
+    }                                          \
   } while (0)
 
 #endif  // UPDATE_ENGINE_COMMON_UTILS_H_
diff --git a/common/utils_unittest.cc b/common/utils_unittest.cc
index 62f9f6c..7d1c59e 100644
--- a/common/utils_unittest.cc
+++ b/common/utils_unittest.cc
@@ -22,6 +22,7 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 
+#include <limits>
 #include <string>
 #include <vector>
 
@@ -32,21 +33,22 @@
 
 #include "update_engine/common/test_utils.h"
 
+using std::numeric_limits;
 using std::string;
 using std::vector;
 
 namespace chromeos_update_engine {
 
-class UtilsTest : public ::testing::Test { };
+class UtilsTest : public ::testing::Test {};
 
 TEST(UtilsTest, CanParseECVersion) {
   // Should be able to parse and valid key value line.
   EXPECT_EQ("12345", utils::ParseECVersion("fw_version=12345"));
-  EXPECT_EQ("123456", utils::ParseECVersion(
-      "b=1231a fw_version=123456 a=fasd2"));
+  EXPECT_EQ("123456",
+            utils::ParseECVersion("b=1231a fw_version=123456 a=fasd2"));
   EXPECT_EQ("12345", utils::ParseECVersion("fw_version=12345"));
-  EXPECT_EQ("00VFA616", utils::ParseECVersion(
-      "vendor=\"sam\" fw_version=\"00VFA616\""));
+  EXPECT_EQ("00VFA616",
+            utils::ParseECVersion("vendor=\"sam\" fw_version=\"00VFA616\""));
 
   // For invalid entries, should return the empty string.
   EXPECT_EQ("", utils::ParseECVersion("b=1231a fw_version a=fasd2"));
@@ -57,13 +59,11 @@
 }
 
 TEST(UtilsTest, WriteFileReadFile) {
-  base::FilePath file;
-  EXPECT_TRUE(base::CreateTemporaryFile(&file));
-  ScopedPathUnlinker unlinker(file.value());
-  EXPECT_TRUE(utils::WriteFile(file.value().c_str(), "hello", 5));
+  test_utils::ScopedTempFile file;
+  EXPECT_TRUE(utils::WriteFile(file.path().c_str(), "hello", 5));
 
   brillo::Blob readback;
-  EXPECT_TRUE(utils::ReadFile(file.value().c_str(), &readback));
+  EXPECT_TRUE(utils::ReadFile(file.path().c_str(), &readback));
   EXPECT_EQ("hello", string(readback.begin(), readback.end()));
 }
 
@@ -73,24 +73,21 @@
 }
 
 TEST(UtilsTest, ReadFileChunk) {
-  base::FilePath file;
-  EXPECT_TRUE(base::CreateTemporaryFile(&file));
-  ScopedPathUnlinker unlinker(file.value());
+  test_utils::ScopedTempFile file;
   brillo::Blob data;
   const size_t kSize = 1024 * 1024;
   for (size_t i = 0; i < kSize; i++) {
     data.push_back(i % 255);
   }
-  EXPECT_TRUE(utils::WriteFile(file.value().c_str(), data.data(), data.size()));
+  EXPECT_TRUE(test_utils::WriteFileVector(file.path(), data));
   brillo::Blob in_data;
-  EXPECT_TRUE(utils::ReadFileChunk(file.value().c_str(), kSize, 10, &in_data));
+  EXPECT_TRUE(utils::ReadFileChunk(file.path().c_str(), kSize, 10, &in_data));
   EXPECT_TRUE(in_data.empty());
-  EXPECT_TRUE(utils::ReadFileChunk(file.value().c_str(), 0, -1, &in_data));
-  EXPECT_TRUE(data == in_data);
+  EXPECT_TRUE(utils::ReadFileChunk(file.path().c_str(), 0, -1, &in_data));
+  EXPECT_EQ(data, in_data);
   in_data.clear();
-  EXPECT_TRUE(utils::ReadFileChunk(file.value().c_str(), 10, 20, &in_data));
-  EXPECT_TRUE(brillo::Blob(data.begin() + 10, data.begin() + 10 + 20) ==
-              in_data);
+  EXPECT_TRUE(utils::ReadFileChunk(file.path().c_str(), 10, 20, &in_data));
+  EXPECT_EQ(brillo::Blob(data.begin() + 10, data.begin() + 10 + 20), in_data);
 }
 
 TEST(UtilsTest, ErrnoNumberAsStringTest) {
@@ -175,21 +172,18 @@
             utils::MakePartitionNameForMount("/dev/mmcblk0p2"));
   EXPECT_EQ("/dev/loop0", utils::MakePartitionNameForMount("/dev/loop0"));
   EXPECT_EQ("/dev/loop8", utils::MakePartitionNameForMount("/dev/loop8"));
-  EXPECT_EQ("/dev/loop12p2",
-            utils::MakePartitionNameForMount("/dev/loop12p2"));
+  EXPECT_EQ("/dev/loop12p2", utils::MakePartitionNameForMount("/dev/loop12p2"));
   EXPECT_EQ("/dev/ubiblock5_0",
             utils::MakePartitionNameForMount("/dev/ubiblock5_0"));
-  EXPECT_EQ("/dev/mtd4",
-            utils::MakePartitionNameForMount("/dev/ubi4_0"));
+  EXPECT_EQ("/dev/mtd4", utils::MakePartitionNameForMount("/dev/ubi4_0"));
   EXPECT_EQ("/dev/ubiblock3_0",
             utils::MakePartitionNameForMount("/dev/ubiblock3"));
   EXPECT_EQ("/dev/mtd2", utils::MakePartitionNameForMount("/dev/ubi2"));
-  EXPECT_EQ("/dev/ubi1_0",
-            utils::MakePartitionNameForMount("/dev/ubiblock1"));
+  EXPECT_EQ("/dev/ubi1_0", utils::MakePartitionNameForMount("/dev/ubiblock1"));
 }
 
 TEST(UtilsTest, FuzzIntTest) {
-  static const uint32_t kRanges[] = { 0, 1, 2, 20 };
+  static const uint32_t kRanges[] = {0, 1, 2, 20};
   for (uint32_t range : kRanges) {
     const int kValue = 50;
     for (int tries = 0; tries < 100; ++tries) {
@@ -255,18 +249,12 @@
   // which is not localized) so we only need to test the C locale
   EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromMilliseconds(100)),
             "0.1s");
-  EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(0)),
-            "0s");
-  EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(1)),
-            "1s");
-  EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(59)),
-            "59s");
-  EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(60)),
-            "1m0s");
-  EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(61)),
-            "1m1s");
-  EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(90)),
-            "1m30s");
+  EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(0)), "0s");
+  EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(1)), "1s");
+  EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(59)), "59s");
+  EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(60)), "1m0s");
+  EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(61)), "1m1s");
+  EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(90)), "1m30s");
   EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(1205)),
             "20m5s");
   EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(3600)),
@@ -286,49 +274,7 @@
   EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(200000) +
                                    base::TimeDelta::FromMilliseconds(1)),
             "2d7h33m20.001s");
-  EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(-1)),
-            "-1s");
-}
-
-TEST(UtilsTest, TimeFromStructTimespecTest) {
-  struct timespec ts;
-
-  // Unix epoch (Thursday 00:00:00 UTC on Jan 1, 1970)
-  ts = (struct timespec) {.tv_sec = 0, .tv_nsec = 0};
-  EXPECT_EQ(base::Time::UnixEpoch(), utils::TimeFromStructTimespec(&ts));
-
-  // 42 ms after the Unix billennium (Sunday 01:46:40 UTC on September 9, 2001)
-  ts = (struct timespec) {.tv_sec = 1000 * 1000 * 1000,
-                          .tv_nsec = 42 * 1000 * 1000};
-  base::Time::Exploded exploded = (base::Time::Exploded) {
-    .year = 2001, .month = 9, .day_of_week = 0, .day_of_month = 9,
-    .hour = 1, .minute = 46, .second = 40, .millisecond = 42};
-  base::Time time;
-  EXPECT_TRUE(base::Time::FromUTCExploded(exploded, &time));
-  EXPECT_EQ(time, utils::TimeFromStructTimespec(&ts));
-}
-
-TEST(UtilsTest, DecodeAndStoreBase64String) {
-  base::FilePath path;
-
-  // Ensure we return false on empty strings or invalid base64.
-  EXPECT_FALSE(utils::DecodeAndStoreBase64String("", &path));
-  EXPECT_FALSE(utils::DecodeAndStoreBase64String("not valid base64", &path));
-
-  // Pass known base64 and check that it matches. This string was generated
-  // the following way:
-  //
-  //   $ echo "Update Engine" | base64
-  //   VXBkYXRlIEVuZ2luZQo=
-  EXPECT_TRUE(utils::DecodeAndStoreBase64String("VXBkYXRlIEVuZ2luZQo=",
-                                                &path));
-  ScopedPathUnlinker unlinker(path.value());
-  string expected_contents = "Update Engine\n";
-  string contents;
-  EXPECT_TRUE(utils::ReadFile(path.value(), &contents));
-  EXPECT_EQ(contents, expected_contents);
-  EXPECT_EQ(static_cast<off_t>(expected_contents.size()),
-            utils::FileSize(path.value()));
+  EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(-1)), "-1s");
 }
 
 TEST(UtilsTest, ConvertToOmahaInstallDate) {
@@ -349,29 +295,29 @@
   EXPECT_FALSE(utils::ConvertToOmahaInstallDate(
       base::Time::FromTimeT(omaha_epoch - 1), &value));
   EXPECT_FALSE(utils::ConvertToOmahaInstallDate(
-      base::Time::FromTimeT(omaha_epoch - 100*24*3600), &value));
+      base::Time::FromTimeT(omaha_epoch - 100 * 24 * 3600), &value));
 
   // Check that we jump from 0 to 7 exactly on the one-week mark, e.g.
   // on Jan 8, 2007 0:00 PST.
   EXPECT_TRUE(utils::ConvertToOmahaInstallDate(
-      base::Time::FromTimeT(omaha_epoch + 7*24*3600 - 1), &value));
+      base::Time::FromTimeT(omaha_epoch + 7 * 24 * 3600 - 1), &value));
   EXPECT_EQ(value, 0);
   EXPECT_TRUE(utils::ConvertToOmahaInstallDate(
-      base::Time::FromTimeT(omaha_epoch + 7*24*3600), &value));
+      base::Time::FromTimeT(omaha_epoch + 7 * 24 * 3600), &value));
   EXPECT_EQ(value, 7);
 
   // Check a couple of more values.
   EXPECT_TRUE(utils::ConvertToOmahaInstallDate(
-      base::Time::FromTimeT(omaha_epoch + 10*24*3600), &value));
+      base::Time::FromTimeT(omaha_epoch + 10 * 24 * 3600), &value));
   EXPECT_EQ(value, 7);
   EXPECT_TRUE(utils::ConvertToOmahaInstallDate(
-      base::Time::FromTimeT(omaha_epoch + 20*24*3600), &value));
+      base::Time::FromTimeT(omaha_epoch + 20 * 24 * 3600), &value));
   EXPECT_EQ(value, 14);
   EXPECT_TRUE(utils::ConvertToOmahaInstallDate(
-      base::Time::FromTimeT(omaha_epoch + 26*24*3600), &value));
+      base::Time::FromTimeT(omaha_epoch + 26 * 24 * 3600), &value));
   EXPECT_EQ(value, 21);
   EXPECT_TRUE(utils::ConvertToOmahaInstallDate(
-      base::Time::FromTimeT(omaha_epoch + 29*24*3600), &value));
+      base::Time::FromTimeT(omaha_epoch + 29 * 24 * 3600), &value));
   EXPECT_EQ(value, 28);
 
   // The date Jun 4, 2007 0:00 PDT is a Monday and is hence a point
@@ -451,6 +397,22 @@
   *ret = true;
 }
 
+static void ExpectParseRollbackKeyVersion(const string& version,
+                                          uint16_t expected_high,
+                                          uint16_t expected_low) {
+  uint16_t actual_high;
+  uint16_t actual_low;
+  utils::ParseRollbackKeyVersion(version, &actual_high, &actual_low);
+  EXPECT_EQ(expected_high, actual_high);
+  EXPECT_EQ(expected_low, actual_low);
+}
+
+static void ExpectInvalidParseRollbackKeyVersion(const string& version) {
+  ExpectParseRollbackKeyVersion(version,
+                                numeric_limits<uint16_t>::max(),
+                                numeric_limits<uint16_t>::max());
+}
+
 TEST(UtilsTest, TestMacros) {
   bool void_test = false;
   VoidMacroTestHelper(&void_test);
@@ -464,20 +426,18 @@
 }
 
 TEST(UtilsTest, RunAsRootUnmountFilesystemBusyFailureTest) {
-  string tmp_image;
-  EXPECT_TRUE(utils::MakeTempFile("img.XXXXXX", &tmp_image, nullptr));
-  ScopedPathUnlinker tmp_image_unlinker(tmp_image);
+  test_utils::ScopedTempFile tmp_image("img.XXXXXX");
 
   EXPECT_TRUE(base::CopyFile(
       test_utils::GetBuildArtifactsPath().Append("gen/disk_ext2_4k.img"),
-      base::FilePath(tmp_image)));
+      base::FilePath(tmp_image.path())));
 
   base::ScopedTempDir mnt_dir;
   EXPECT_TRUE(mnt_dir.CreateUniqueTempDir());
 
   string loop_dev;
   test_utils::ScopedLoopbackDeviceBinder loop_binder(
-      tmp_image, true, &loop_dev);
+      tmp_image.path(), true, &loop_dev);
 
   EXPECT_FALSE(utils::IsMountpoint(mnt_dir.GetPath().value()));
   // This is the actual test part. While we hold a file descriptor open for the
@@ -506,10 +466,45 @@
   EXPECT_TRUE(mnt_dir.CreateUniqueTempDir());
   EXPECT_FALSE(utils::IsMountpoint(mnt_dir.GetPath().value()));
 
-  base::FilePath file;
-  EXPECT_TRUE(base::CreateTemporaryFile(&file));
-  ScopedPathUnlinker unlinker(file.value());
-  EXPECT_FALSE(utils::IsMountpoint(file.value()));
+  test_utils::ScopedTempFile file;
+  EXPECT_FALSE(utils::IsMountpoint(file.path()));
+}
+
+TEST(UtilsTest, VersionPrefix) {
+  EXPECT_EQ(10575, utils::VersionPrefix("10575.39."));
+  EXPECT_EQ(10575, utils::VersionPrefix("10575.39"));
+  EXPECT_EQ(10575, utils::VersionPrefix("10575.x"));
+  EXPECT_EQ(10575, utils::VersionPrefix("10575."));
+  EXPECT_EQ(10575, utils::VersionPrefix("10575"));
+  EXPECT_EQ(0, utils::VersionPrefix(""));
+  EXPECT_EQ(-1, utils::VersionPrefix("x"));
+  EXPECT_EQ(-1, utils::VersionPrefix("1x"));
+  EXPECT_EQ(-1, utils::VersionPrefix("x.1"));
+}
+
+TEST(UtilsTest, ParseDottedVersion) {
+  // Valid case.
+  ExpectParseRollbackKeyVersion("2.3", 2, 3);
+  ExpectParseRollbackKeyVersion("65535.65535", 65535, 65535);
+
+  // Zero is technically allowed but never actually used.
+  ExpectParseRollbackKeyVersion("0.0", 0, 0);
+
+  // Invalid cases.
+  ExpectInvalidParseRollbackKeyVersion("");
+  ExpectInvalidParseRollbackKeyVersion("2");
+  ExpectInvalidParseRollbackKeyVersion("2.");
+  ExpectInvalidParseRollbackKeyVersion(".2");
+  ExpectInvalidParseRollbackKeyVersion("2.2.");
+  ExpectInvalidParseRollbackKeyVersion("2.2.3");
+  ExpectInvalidParseRollbackKeyVersion(".2.2");
+  ExpectInvalidParseRollbackKeyVersion("a.b");
+  ExpectInvalidParseRollbackKeyVersion("1.b");
+  ExpectInvalidParseRollbackKeyVersion("a.2");
+  ExpectInvalidParseRollbackKeyVersion("65536.65536");
+  ExpectInvalidParseRollbackKeyVersion("99999.99999");
+  ExpectInvalidParseRollbackKeyVersion("99999.1");
+  ExpectInvalidParseRollbackKeyVersion("1.99999");
 }
 
 }  // namespace chromeos_update_engine
diff --git a/common_service.cc b/common_service.cc
index 9f3b862..0d5ee6d 100644
--- a/common_service.cc
+++ b/common_service.cc
@@ -16,7 +16,6 @@
 
 #include "update_engine/common_service.h"
 
-#include <set>
 #include <string>
 
 #include <base/bind.h>
@@ -41,8 +40,8 @@
 using base::StringPrintf;
 using brillo::ErrorPtr;
 using brillo::string_utils::ToString;
-using std::set;
 using std::string;
+using std::vector;
 using update_engine::UpdateAttemptFlags;
 using update_engine::UpdateEngineStatus;
 
@@ -51,7 +50,11 @@
 namespace {
 // Log and set the error on the passed ErrorPtr.
 void LogAndSetError(ErrorPtr* error,
+#if BASE_VER < 576279
                     const tracked_objects::Location& location,
+#else
+                    const base::Location& location,
+#endif
                     const string& reason) {
   brillo::Error::AddTo(error,
                        location,
@@ -68,8 +71,7 @@
     "org.chromium.UpdateEngine.Error.Failed";
 
 UpdateEngineService::UpdateEngineService(SystemState* system_state)
-    : system_state_(system_state) {
-}
+    : system_state_(system_state) {}
 
 // org::chromium::UpdateEngineInterfaceInterface methods implementation.
 
@@ -105,6 +107,18 @@
   return true;
 }
 
+bool UpdateEngineService::AttemptInstall(brillo::ErrorPtr* error,
+                                         const string& omaha_url,
+                                         const vector<string>& dlc_module_ids) {
+  if (!system_state_->update_attempter()->CheckForInstall(dlc_module_ids,
+                                                          omaha_url)) {
+    // TODO(xiaochu): support more detailed error messages.
+    LogAndSetError(error, FROM_HERE, "Could not schedule install operation.");
+    return false;
+  }
+  return true;
+}
+
 bool UpdateEngineService::AttemptRollback(ErrorPtr* error, bool in_powerwash) {
   LOG(INFO) << "Attempting rollback to non-active partitions.";
 
@@ -258,22 +272,11 @@
 
 bool UpdateEngineService::SetUpdateOverCellularPermission(ErrorPtr* error,
                                                           bool in_allowed) {
-  set<string> allowed_types;
-  const policy::DevicePolicy* device_policy = system_state_->device_policy();
-
-  // The device_policy is loaded in a lazy way before an update check. Load it
-  // now from the libbrillo cache if it wasn't already loaded.
-  if (!device_policy) {
-    UpdateAttempter* update_attempter = system_state_->update_attempter();
-    if (update_attempter) {
-      update_attempter->RefreshDevicePolicy();
-      device_policy = system_state_->device_policy();
-    }
-  }
+  ConnectionManagerInterface* connection_manager =
+      system_state_->connection_manager();
 
   // Check if this setting is allowed by the device policy.
-  if (device_policy &&
-      device_policy->GetAllowedConnectionTypesForUpdate(&allowed_types)) {
+  if (connection_manager->IsAllowedConnectionTypesForUpdateSet()) {
     LogAndSetError(error,
                    FROM_HERE,
                    "Ignoring the update over cellular setting since there's "
@@ -286,7 +289,8 @@
 
   PrefsInterface* prefs = system_state_->prefs();
 
-  if (!prefs->SetBoolean(kPrefsUpdateOverCellularPermission, in_allowed)) {
+  if (!prefs ||
+      !prefs->SetBoolean(kPrefsUpdateOverCellularPermission, in_allowed)) {
     LogAndSetError(error,
                    FROM_HERE,
                    string("Error setting the update over cellular to ") +
@@ -296,24 +300,66 @@
   return true;
 }
 
-bool UpdateEngineService::GetUpdateOverCellularPermission(ErrorPtr* /* error */,
-                                                          bool* out_allowed) {
-  ConnectionManagerInterface* cm = system_state_->connection_manager();
+bool UpdateEngineService::SetUpdateOverCellularTarget(
+    brillo::ErrorPtr* error,
+    const std::string& target_version,
+    int64_t target_size) {
+  ConnectionManagerInterface* connection_manager =
+      system_state_->connection_manager();
 
-  // The device_policy is loaded in a lazy way before an update check and is
-  // used to determine if an update is allowed over cellular. Load the device
-  // policy now from the libbrillo cache if it wasn't already loaded.
-  if (!system_state_->device_policy()) {
-    UpdateAttempter* update_attempter = system_state_->update_attempter();
-    if (update_attempter)
-      update_attempter->RefreshDevicePolicy();
+  // Check if this setting is allowed by the device policy.
+  if (connection_manager->IsAllowedConnectionTypesForUpdateSet()) {
+    LogAndSetError(error,
+                   FROM_HERE,
+                   "Ignoring the update over cellular setting since there's "
+                   "a device policy enforcing this setting.");
+    return false;
   }
 
-  // Return the current setting based on the same logic used while checking for
-  // updates. A log message could be printed as the result of this test.
-  LOG(INFO) << "Checking if updates over cellular networks are allowed:";
-  *out_allowed = cm->IsUpdateAllowedOver(ConnectionType::kCellular,
-                                         ConnectionTethering::kUnknown);
+  // If the policy wasn't loaded yet, then it is still OK to change the local
+  // setting because the policy will be checked again during the update check.
+
+  PrefsInterface* prefs = system_state_->prefs();
+
+  if (!prefs ||
+      !prefs->SetString(kPrefsUpdateOverCellularTargetVersion,
+                        target_version) ||
+      !prefs->SetInt64(kPrefsUpdateOverCellularTargetSize, target_size)) {
+    LogAndSetError(
+        error, FROM_HERE, "Error setting the target for update over cellular.");
+    return false;
+  }
+  return true;
+}
+
+bool UpdateEngineService::GetUpdateOverCellularPermission(ErrorPtr* error,
+                                                          bool* out_allowed) {
+  ConnectionManagerInterface* connection_manager =
+      system_state_->connection_manager();
+
+  if (connection_manager->IsAllowedConnectionTypesForUpdateSet()) {
+    // We have device policy, so ignore the user preferences.
+    *out_allowed = connection_manager->IsUpdateAllowedOver(
+        ConnectionType::kCellular, ConnectionTethering::kUnknown);
+  } else {
+    PrefsInterface* prefs = system_state_->prefs();
+
+    if (!prefs || !prefs->Exists(kPrefsUpdateOverCellularPermission)) {
+      // Update is not allowed as user preference is not set or not available.
+      *out_allowed = false;
+      return true;
+    }
+
+    bool is_allowed;
+
+    if (!prefs->GetBoolean(kPrefsUpdateOverCellularPermission, &is_allowed)) {
+      LogAndSetError(error,
+                     FROM_HERE,
+                     "Error getting the update over cellular preference.");
+      return false;
+    }
+    *out_allowed = is_allowed;
+  }
   return true;
 }
 
@@ -360,7 +406,8 @@
 
 bool UpdateEngineService::GetLastAttemptError(ErrorPtr* /* error */,
                                               int32_t* out_last_attempt_error) {
-  ErrorCode error_code = system_state_->payload_state()->GetAttemptErrorCode();
+  ErrorCode error_code =
+      system_state_->update_attempter()->GetAttemptErrorCode();
   *out_last_attempt_error = static_cast<int>(error_code);
   return true;
 }
diff --git a/common_service.h b/common_service.h
index 544dd93..f93855d 100644
--- a/common_service.h
+++ b/common_service.h
@@ -20,6 +20,7 @@
 #include <inttypes.h>
 
 #include <string>
+#include <vector>
 
 #include <base/memory/ref_counted.h>
 #include <brillo/errors/error.h>
@@ -52,6 +53,13 @@
                      int32_t in_flags_as_int,
                      bool* out_result);
 
+  // Attempts a DLC module install operation.
+  // |omaha_url|: the URL to query for update.
+  // |dlc_module_ids|: a list of DLC module IDs.
+  bool AttemptInstall(brillo::ErrorPtr* error,
+                      const std::string& omaha_url,
+                      const std::vector<std::string>& dlc_module_ids);
+
   bool AttemptRollback(brillo::ErrorPtr* error, bool in_powerwash);
 
   // Checks if the system rollback is available by verifying if the secondary
@@ -114,6 +122,12 @@
   bool SetUpdateOverCellularPermission(brillo::ErrorPtr* error,
                                        bool in_allowed);
 
+  // If there's no device policy installed, sets the update over cellular
+  // target. Otherwise, this method returns with an error.
+  bool SetUpdateOverCellularTarget(brillo::ErrorPtr* error,
+                                   const std::string& target_version,
+                                   int64_t target_size);
+
   // Returns the current value of the update over cellular network setting,
   // either forced by the device policy if the device is enrolled or the current
   // user preference otherwise.
diff --git a/common_service_unittest.cc b/common_service_unittest.cc
index d9ef567..65202a0 100644
--- a/common_service_unittest.cc
+++ b/common_service_unittest.cc
@@ -18,6 +18,7 @@
 
 #include <gtest/gtest.h>
 #include <string>
+#include <vector>
 
 #include <brillo/errors/error.h>
 #include <policy/libpolicy.h>
@@ -28,6 +29,7 @@
 #include "update_engine/omaha_utils.h"
 
 using std::string;
+using std::vector;
 using testing::_;
 using testing::Return;
 using testing::SetArgPointee;
@@ -41,9 +43,7 @@
       : mock_update_attempter_(fake_system_state_.mock_update_attempter()),
         common_service_(&fake_system_state_) {}
 
-  void SetUp() override {
-    fake_system_state_.set_device_policy(nullptr);
-  }
+  void SetUp() override { fake_system_state_.set_device_policy(nullptr); }
 
   // Fake/mock infrastructure.
   FakeSystemState fake_system_state_;
@@ -85,6 +85,21 @@
   EXPECT_FALSE(result);
 }
 
+TEST_F(UpdateEngineServiceTest, AttemptInstall) {
+  EXPECT_CALL(*mock_update_attempter_, CheckForInstall(_, _))
+      .WillOnce(Return(true));
+
+  EXPECT_TRUE(common_service_.AttemptInstall(&error_, "", {}));
+  EXPECT_EQ(nullptr, error_);
+}
+
+TEST_F(UpdateEngineServiceTest, AttemptInstallReturnsFalse) {
+  EXPECT_CALL(*mock_update_attempter_, CheckForInstall(_, _))
+      .WillOnce(Return(false));
+
+  EXPECT_FALSE(common_service_.AttemptInstall(&error_, "", {}));
+}
+
 // SetChannel is allowed when there's no device policy (the device is not
 // enterprise enrolled).
 TEST_F(UpdateEngineServiceTest, SetChannelWithNoPolicy) {
@@ -116,7 +131,8 @@
 TEST_F(UpdateEngineServiceTest, SetChannelWithInvalidChannel) {
   EXPECT_CALL(*mock_update_attempter_, RefreshDevicePolicy());
   EXPECT_CALL(*fake_system_state_.mock_request_params(),
-              SetTargetChannel("foo-channel", true, _)).WillOnce(Return(false));
+              SetTargetChannel("foo-channel", true, _))
+      .WillOnce(Return(false));
 
   EXPECT_FALSE(common_service_.SetChannel(&error_, "foo-channel", true));
   ASSERT_NE(nullptr, error_);
diff --git a/connection_manager.cc b/connection_manager.cc
index d15faf0..7263a74 100644
--- a/connection_manager.cc
+++ b/connection_manager.cc
@@ -16,6 +16,7 @@
 
 #include "update_engine/connection_manager.h"
 
+#include <memory>
 #include <set>
 #include <string>
 
@@ -30,6 +31,7 @@
 #include "update_engine/connection_utils.h"
 #include "update_engine/shill_proxy.h"
 #include "update_engine/system_state.h"
+#include "update_engine/update_attempter.h"
 
 using org::chromium::flimflam::ManagerProxyInterface;
 using org::chromium::flimflam::ServiceProxyInterface;
@@ -44,7 +46,7 @@
   return std::unique_ptr<ConnectionManagerInterface>(
       new ConnectionManager(new ShillProxy(), system_state));
 }
-}
+}  // namespace connection_manager
 
 ConnectionManager::ConnectionManager(ShillProxyInterface* shill_proxy,
                                      SystemState* system_state)
@@ -58,16 +60,27 @@
 
     case ConnectionType::kCellular: {
       set<string> allowed_types;
+
       const policy::DevicePolicy* device_policy =
           system_state_->device_policy();
 
-      // A device_policy is loaded in a lazy way right before an update check,
-      // so the device_policy should be already loaded at this point. If it's
-      // not, return a safe value for this setting.
+      // The device_policy is loaded in a lazy way before an update check. Load
+      // it now from the libbrillo cache if it wasn't already loaded.
       if (!device_policy) {
-        LOG(INFO) << "Disabling updates over cellular networks as there's no "
-                     "device policy loaded yet.";
-        return false;
+        UpdateAttempter* update_attempter = system_state_->update_attempter();
+        if (update_attempter) {
+          update_attempter->RefreshDevicePolicy();
+          device_policy = system_state_->device_policy();
+        }
+      }
+
+      if (!device_policy) {
+        // Device policy fails to be loaded (possibly due to guest account). We
+        // do not check the local user setting here, which should be checked by
+        // |OmahaRequestAction| during checking for update.
+        LOG(INFO) << "Allowing updates over cellular as device policy "
+                     "fails to be loaded.";
+        return true;
       }
 
       if (device_policy->GetAllowedConnectionTypesForUpdate(&allowed_types)) {
@@ -81,31 +94,14 @@
 
         LOG(INFO) << "Allowing updates over cellular per device policy.";
         return true;
-      } else {
-        // There's no update setting in the device policy, using the local user
-        // setting.
-        PrefsInterface* prefs = system_state_->prefs();
-
-        if (!prefs || !prefs->Exists(kPrefsUpdateOverCellularPermission)) {
-          LOG(INFO) << "Disabling updates over cellular connection as there's "
-                       "no device policy setting nor user preference present.";
-          return false;
-        }
-
-        bool stored_value;
-        if (!prefs->GetBoolean(kPrefsUpdateOverCellularPermission,
-                               &stored_value)) {
-          return false;
-        }
-
-        if (!stored_value) {
-          LOG(INFO) << "Disabling updates over cellular connection per user "
-                       "setting.";
-          return false;
-        }
-        LOG(INFO) << "Allowing updates over cellular per user setting.";
-        return true;
       }
+
+      // If there's no update setting in the device policy, we do not check
+      // the local user setting here, which should be checked by
+      // |OmahaRequestAction| during checking for update.
+      LOG(INFO) << "Allowing updates over cellular as device policy does "
+                   "not include update setting.";
+      return true;
     }
 
     default:
@@ -120,6 +116,21 @@
   }
 }
 
+bool ConnectionManager::IsAllowedConnectionTypesForUpdateSet() const {
+  const policy::DevicePolicy* device_policy = system_state_->device_policy();
+  if (!device_policy) {
+    LOG(INFO) << "There's no device policy loaded yet.";
+    return false;
+  }
+
+  set<string> allowed_types;
+  if (!device_policy->GetAllowedConnectionTypesForUpdate(&allowed_types)) {
+    return false;
+  }
+
+  return true;
+}
+
 bool ConnectionManager::GetConnectionProperties(
     ConnectionType* out_type, ConnectionTethering* out_tethering) {
   dbus::ObjectPath default_service_path;
@@ -127,8 +138,11 @@
   if (!default_service_path.IsValid())
     return false;
   // Shill uses the "/" service path to indicate that it is not connected.
-  if (default_service_path.value() == "/")
-    return false;
+  if (default_service_path.value() == "/") {
+    *out_type = ConnectionType::kDisconnected;
+    *out_tethering = ConnectionTethering::kUnknown;
+    return true;
+  }
   TEST_AND_RETURN_FALSE(
       GetServicePathProperties(default_service_path, out_type, out_tethering));
   return true;
diff --git a/connection_manager.h b/connection_manager.h
index e5a9d49..d8527a3 100644
--- a/connection_manager.h
+++ b/connection_manager.h
@@ -17,6 +17,7 @@
 #ifndef UPDATE_ENGINE_CONNECTION_MANAGER_H_
 #define UPDATE_ENGINE_CONNECTION_MANAGER_H_
 
+#include <memory>
 #include <string>
 
 #include <base/macros.h>
@@ -43,10 +44,11 @@
                                ConnectionTethering* out_tethering) override;
   bool IsUpdateAllowedOver(ConnectionType type,
                            ConnectionTethering tethering) const override;
+  bool IsAllowedConnectionTypesForUpdateSet() const override;
 
  private:
-  // Returns (via out_path) the default network path, or empty string if
-  // there's no network up. Returns true on success.
+  // Returns (via out_path) the default network path, or "/" if there's no
+  // network up. Returns true on success.
   bool GetDefaultServicePath(dbus::ObjectPath* out_path);
 
   bool GetServicePathProperties(const dbus::ObjectPath& path,
diff --git a/connection_manager_android.cc b/connection_manager_android.cc
index 2dd824a..9d0c57b 100644
--- a/connection_manager_android.cc
+++ b/connection_manager_android.cc
@@ -16,6 +16,8 @@
 
 #include "update_engine/connection_manager_android.h"
 
+#include <memory>
+
 namespace chromeos_update_engine {
 
 namespace connection_manager {
@@ -24,7 +26,7 @@
   return std::unique_ptr<ConnectionManagerInterface>(
       new ConnectionManagerAndroid());
 }
-}
+}  // namespace connection_manager
 
 bool ConnectionManagerAndroid::GetConnectionProperties(
     ConnectionType* out_type, ConnectionTethering* out_tethering) {
@@ -34,5 +36,8 @@
     ConnectionType type, ConnectionTethering tethering) const {
   return true;
 }
+bool ConnectionManagerAndroid::IsAllowedConnectionTypesForUpdateSet() const {
+  return false;
+}
 
 }  // namespace chromeos_update_engine
diff --git a/connection_manager_android.h b/connection_manager_android.h
index 0cd5e73..006f4ea 100644
--- a/connection_manager_android.h
+++ b/connection_manager_android.h
@@ -34,6 +34,7 @@
                                ConnectionTethering* out_tethering) override;
   bool IsUpdateAllowedOver(ConnectionType type,
                            ConnectionTethering tethering) const override;
+  bool IsAllowedConnectionTypesForUpdateSet() const override;
 
   DISALLOW_COPY_AND_ASSIGN(ConnectionManagerAndroid);
 };
diff --git a/connection_manager_interface.h b/connection_manager_interface.h
index df8eb4b..9f77989 100644
--- a/connection_manager_interface.h
+++ b/connection_manager_interface.h
@@ -46,6 +46,10 @@
   virtual bool IsUpdateAllowedOver(ConnectionType type,
                                    ConnectionTethering tethering) const = 0;
 
+  // Returns true if the allowed connection types for update is set in the
+  // device policy. Otherwise, returns false.
+  virtual bool IsAllowedConnectionTypesForUpdateSet() const = 0;
+
  protected:
   ConnectionManagerInterface() = default;
 
@@ -57,7 +61,7 @@
 // Factory function which creates a ConnectionManager.
 std::unique_ptr<ConnectionManagerInterface> CreateConnectionManager(
     SystemState* system_state);
-}
+}  // namespace connection_manager
 
 }  // namespace chromeos_update_engine
 
diff --git a/connection_manager_unittest.cc b/connection_manager_unittest.cc
index e26a686..3cdaf4c 100644
--- a/connection_manager_unittest.cc
+++ b/connection_manager_unittest.cc
@@ -16,8 +16,10 @@
 
 #include "update_engine/connection_manager.h"
 
+#include <memory>
 #include <set>
 #include <string>
+#include <utility>
 
 #include <base/logging.h>
 #include <brillo/any.h>
@@ -38,9 +40,9 @@
 using org::chromium::flimflam::ServiceProxyMock;
 using std::set;
 using std::string;
+using testing::_;
 using testing::Return;
 using testing::SetArgPointee;
-using testing::_;
 
 namespace chromeos_update_engine {
 
@@ -69,13 +71,14 @@
                        const char* physical_technology,
                        const char* service_tethering);
 
-  void TestWithServiceType(
-      const char* service_type,
-      const char* physical_technology,
-      ConnectionType expected_type);
-  void TestWithServiceTethering(
-      const char* service_tethering,
-      ConnectionTethering expected_tethering);
+  void TestWithServiceType(const char* service_type,
+                           const char* physical_technology,
+                           ConnectionType expected_type);
+
+  void TestWithServiceDisconnected(ConnectionType expected_type);
+
+  void TestWithServiceTethering(const char* service_tethering,
+                                ConnectionTethering expected_tethering);
 
   brillo::FakeMessageLoop loop_{nullptr};
   FakeSystemState fake_system_state_;
@@ -135,10 +138,9 @@
                                        std::move(service_proxy_mock));
 }
 
-void ConnectionManagerTest::TestWithServiceType(
-    const char* service_type,
-    const char* physical_technology,
-    ConnectionType expected_type) {
+void ConnectionManagerTest::TestWithServiceType(const char* service_type,
+                                                const char* physical_technology,
+                                                ConnectionType expected_type) {
   SetManagerReply("/service/guest/network", true);
   SetServiceReply("/service/guest/network",
                   service_type,
@@ -154,8 +156,7 @@
 }
 
 void ConnectionManagerTest::TestWithServiceTethering(
-    const char* service_tethering,
-    ConnectionTethering expected_tethering) {
+    const char* service_tethering, ConnectionTethering expected_tethering) {
   SetManagerReply("/service/guest/network", true);
   SetServiceReply(
       "/service/guest/network", shill::kTypeWifi, nullptr, service_tethering);
@@ -168,6 +169,18 @@
       fake_shill_proxy_->GetManagerProxy());
 }
 
+void ConnectionManagerTest::TestWithServiceDisconnected(
+    ConnectionType expected_type) {
+  SetManagerReply("/", true);
+
+  ConnectionType type;
+  ConnectionTethering tethering;
+  EXPECT_TRUE(cmut_.GetConnectionProperties(&type, &tethering));
+  EXPECT_EQ(expected_type, type);
+  testing::Mock::VerifyAndClearExpectations(
+      fake_shill_proxy_->GetManagerProxy());
+}
+
 TEST_F(ConnectionManagerTest, SimpleTest) {
   TestWithServiceType(shill::kTypeEthernet, nullptr, ConnectionType::kEthernet);
   TestWithServiceType(shill::kTypeWifi, nullptr, ConnectionType::kWifi);
@@ -201,6 +214,10 @@
   TestWithServiceType("foo", nullptr, ConnectionType::kUnknown);
 }
 
+TEST_F(ConnectionManagerTest, DisconnectTest) {
+  TestWithServiceDisconnected(ConnectionType::kDisconnected);
+}
+
 TEST_F(ConnectionManagerTest, AllowUpdatesOverEthernetTest) {
   // Updates over Ethernet are allowed even if there's no policy.
   EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kEthernet,
@@ -276,16 +293,24 @@
                                         ConnectionTethering::kConfirmed));
 }
 
-TEST_F(ConnectionManagerTest, BlockUpdatesOverCellularByDefaultTest) {
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
-                                         ConnectionTethering::kUnknown));
+TEST_F(ConnectionManagerTest, AllowUpdatesOverCellularByDefaultTest) {
+  policy::MockDevicePolicy device_policy;
+  // Set an empty device policy.
+  fake_system_state_.set_device_policy(&device_policy);
+
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
+                                        ConnectionTethering::kUnknown));
 }
 
-TEST_F(ConnectionManagerTest, BlockUpdatesOverTetheredNetworkByDefaultTest) {
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kWifi,
-                                         ConnectionTethering::kConfirmed));
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kEthernet,
-                                         ConnectionTethering::kConfirmed));
+TEST_F(ConnectionManagerTest, AllowUpdatesOverTetheredNetworkByDefaultTest) {
+  policy::MockDevicePolicy device_policy;
+  // Set an empty device policy.
+  fake_system_state_.set_device_policy(&device_policy);
+
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWifi,
+                                        ConnectionTethering::kConfirmed));
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kEthernet,
+                                        ConnectionTethering::kConfirmed));
   EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWifi,
                                         ConnectionTethering::kSuspected));
 }
@@ -310,62 +335,27 @@
                                          ConnectionTethering::kUnknown));
 }
 
-TEST_F(ConnectionManagerTest, BlockUpdatesOver3GIfErrorInPolicyFetchTest) {
-  policy::MockDevicePolicy allow_3g_policy;
+TEST_F(ConnectionManagerTest, AllowUpdatesOver3GIfPolicyIsNotSet) {
+  policy::MockDevicePolicy device_policy;
 
-  fake_system_state_.set_device_policy(&allow_3g_policy);
-
-  set<string> allowed_set;
-  allowed_set.insert(StringForConnectionType(ConnectionType::kCellular));
+  fake_system_state_.set_device_policy(&device_policy);
 
   // Return false for GetAllowedConnectionTypesForUpdate and see
-  // that updates are still blocked for 3G despite the value being in
-  // the string set above.
-  EXPECT_CALL(allow_3g_policy, GetAllowedConnectionTypesForUpdate(_))
-      .Times(1)
-      .WillOnce(DoAll(SetArgPointee<0>(allowed_set), Return(false)));
-
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
-                                         ConnectionTethering::kUnknown));
-}
-
-TEST_F(ConnectionManagerTest, UseUserPrefForUpdatesOverCellularIfNoPolicyTest) {
-  policy::MockDevicePolicy no_policy;
-  testing::NiceMock<MockPrefs>* prefs = fake_system_state_.mock_prefs();
-
-  fake_system_state_.set_device_policy(&no_policy);
-
-  // No setting enforced by the device policy, user prefs should be used.
-  EXPECT_CALL(no_policy, GetAllowedConnectionTypesForUpdate(_))
-      .Times(3)
-      .WillRepeatedly(Return(false));
-
-  // No user pref: block.
-  EXPECT_CALL(*prefs, Exists(kPrefsUpdateOverCellularPermission))
+  // that updates are allowed as device policy is not set. Further
+  // check is left to |OmahaRequestAction|.
+  EXPECT_CALL(device_policy, GetAllowedConnectionTypesForUpdate(_))
       .Times(1)
       .WillOnce(Return(false));
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
-                                         ConnectionTethering::kUnknown));
 
-  // Allow per user pref.
-  EXPECT_CALL(*prefs, Exists(kPrefsUpdateOverCellularPermission))
-      .Times(1)
-      .WillOnce(Return(true));
-  EXPECT_CALL(*prefs, GetBoolean(kPrefsUpdateOverCellularPermission, _))
-      .Times(1)
-      .WillOnce(DoAll(SetArgPointee<1>(true), Return(true)));
   EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
                                         ConnectionTethering::kUnknown));
+}
 
-  // Block per user pref.
-  EXPECT_CALL(*prefs, Exists(kPrefsUpdateOverCellularPermission))
-      .Times(1)
-      .WillOnce(Return(true));
-  EXPECT_CALL(*prefs, GetBoolean(kPrefsUpdateOverCellularPermission, _))
-      .Times(1)
-      .WillOnce(DoAll(SetArgPointee<1>(false), Return(true)));
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
-                                         ConnectionTethering::kUnknown));
+TEST_F(ConnectionManagerTest, AllowUpdatesOverCellularIfPolicyFailsToBeLoaded) {
+  fake_system_state_.set_device_policy(nullptr);
+
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
+                                        ConnectionTethering::kUnknown));
 }
 
 TEST_F(ConnectionManagerTest, StringForConnectionTypeTest) {
diff --git a/connection_utils.cc b/connection_utils.cc
index 9b6b526..aeb0163 100644
--- a/connection_utils.cc
+++ b/connection_utils.cc
@@ -18,6 +18,12 @@
 
 #include <shill/dbus-constants.h>
 
+namespace {
+// Not defined by shill since we don't use this outside of UE.
+constexpr char kTypeDisconnected[] = "Disconnected";
+constexpr char kTypeUnknown[] = "Unknown";
+}  // namespace
+
 namespace chromeos_update_engine {
 namespace connection_utils {
 
@@ -32,6 +38,8 @@
     return ConnectionType::kBluetooth;
   } else if (type_str == shill::kTypeCellular) {
     return ConnectionType::kCellular;
+  } else if (type_str == kTypeDisconnected) {
+    return ConnectionType::kDisconnected;
   }
   return ConnectionType::kUnknown;
 }
@@ -59,10 +67,12 @@
       return shill::kTypeBluetooth;
     case ConnectionType::kCellular:
       return shill::kTypeCellular;
+    case ConnectionType::kDisconnected:
+      return kTypeDisconnected;
     case ConnectionType::kUnknown:
-      return "Unknown";
+      return kTypeUnknown;
   }
-  return "Unknown";
+  return kTypeUnknown;
 }
 
 }  // namespace connection_utils
diff --git a/connection_utils.h b/connection_utils.h
index e385517..d5133a1 100644
--- a/connection_utils.h
+++ b/connection_utils.h
@@ -22,6 +22,7 @@
 namespace chromeos_update_engine {
 
 enum class ConnectionType {
+  kDisconnected,
   kEthernet,
   kWifi,
   kWimax,
diff --git a/daemon.cc b/daemon.cc
index f016fec..d42344a 100644
--- a/daemon.cc
+++ b/daemon.cc
@@ -66,7 +66,7 @@
   // Create the Binder Service.
 #if USE_OMAHA
   binder_service_ = new BinderUpdateEngineBrilloService{real_system_state};
-#else  // !USE_OMAHA
+#else   // !USE_OMAHA
   binder_service_ = new BinderUpdateEngineAndroidService{
       daemon_state_android->service_delegate()};
 #endif  // USE_OMAHA
@@ -87,7 +87,7 @@
   dbus_adaptor_->RegisterAsync(base::Bind(&UpdateEngineDaemon::OnDBusRegistered,
                                           base::Unretained(this)));
   LOG(INFO) << "Waiting for DBus object to be registered.";
-#else  // !USE_DBUS
+#else   // !USE_DBUS
   daemon_state_->StartUpdater();
 #endif  // USE_DBUS
   return EX_OK;
diff --git a/daemon_state_android.cc b/daemon_state_android.cc
index 0960b1a..c9c09b8 100644
--- a/daemon_state_android.cc
+++ b/daemon_state_android.cc
@@ -36,7 +36,7 @@
 
   hardware_ = hardware::CreateHardware();
   if (!hardware_) {
-    LOG(ERROR) << "Error intializing the HardwareInterface.";
+    LOG(ERROR) << "Error initializing the HardwareInterface.";
     return false;
   }
 
diff --git a/dbus_bindings/org.chromium.KioskAppService.dbus-xml b/dbus_bindings/org.chromium.KioskAppService.dbus-xml
new file mode 100644
index 0000000..11b888b
--- /dev/null
+++ b/dbus_bindings/org.chromium.KioskAppService.dbus-xml
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+
+<node name="/org/chromium/KioskAppService"
+      xmlns:tp="http://telepathy.freedesktop.org/wiki/DbusSpec#extensions-v0">
+  <interface name="org.chromium.KioskAppServiceInterface">
+    <method name="GetRequiredPlatformVersion">
+      <arg name="required_platform_version" type="s" direction="out" />
+    </method>
+  </interface>
+</node>
diff --git a/dbus_bindings/org.chromium.LibCrosService.dbus-xml b/dbus_bindings/org.chromium.LibCrosService.dbus-xml
deleted file mode 100644
index 3111c63..0000000
--- a/dbus_bindings/org.chromium.LibCrosService.dbus-xml
+++ /dev/null
@@ -1,10 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-
-<node name="/org/chromium/LibCrosService"
-      xmlns:tp="http://telepathy.freedesktop.org/wiki/DbusSpec#extensions-v0">
-  <interface name="org.chromium.LibCrosServiceInterface">
-    <method name="GetKioskAppRequiredPlatformVersion">
-      <arg name="required_platform_version" type="s" direction="out" />
-    </method>
-  </interface>
-</node>
diff --git a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml
index 848f775..f81d4ed 100644
--- a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml
+++ b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml
@@ -19,6 +19,9 @@
       <!-- See AttemptUpdateFlags enum in update_engine/dbus-constants.h. -->
       <arg type="i" name="flags" direction="in" />
     </method>
+    <method name="AttemptInstall">
+      <arg type="s" name="dlc_request" direction="in" />
+    </method>
     <method name="AttemptRollback">
       <arg type="b" name="powerwash" direction="in" />
     </method>
@@ -67,6 +70,10 @@
     <method name="SetUpdateOverCellularPermission">
       <arg type="b" name="allowed" direction="in" />
     </method>
+    <method name="SetUpdateOverCellularTarget">
+      <arg type="s" name="target_version" direction="in" />
+      <arg type="x" name="target_size" direction="in" />
+    </method>
     <method name="GetUpdateOverCellularPermission">
       <arg type="b" name="allowed" direction="out" />
     </method>
diff --git a/dbus_service.cc b/dbus_service.cc
index 47aeec7..7296053 100644
--- a/dbus_service.cc
+++ b/dbus_service.cc
@@ -16,7 +16,12 @@
 
 #include "update_engine/dbus_service.h"
 
-#include "update_engine/dbus-constants.h"
+#include <string>
+#include <vector>
+
+#include <update_engine/dbus-constants.h>
+#include <update_engine/proto_bindings/update_engine.pb.h>
+
 #include "update_engine/dbus_connection.h"
 #include "update_engine/update_status_utils.h"
 
@@ -25,11 +30,11 @@
 using brillo::ErrorPtr;
 using chromeos_update_engine::UpdateEngineService;
 using std::string;
+using std::vector;
 using update_engine::UpdateEngineStatus;
 
 DBusUpdateEngineService::DBusUpdateEngineService(SystemState* system_state)
-    : common_(new UpdateEngineService{system_state}) {
-}
+    : common_(new UpdateEngineService{system_state}) {}
 
 // org::chromium::UpdateEngineInterfaceInterface methods implementation.
 
@@ -50,14 +55,34 @@
   bool interactive = !(flags & update_engine::kAttemptUpdateFlagNonInteractive);
   bool result;
   return common_->AttemptUpdate(
-             error,
-             in_app_version,
-             in_omaha_url,
-             interactive
-                 ? 0
-                 : update_engine::UpdateAttemptFlags::kFlagNonInteractive,
-             &result) &&
-         result;
+      error,
+      in_app_version,
+      in_omaha_url,
+      interactive ? 0 : update_engine::UpdateAttemptFlags::kFlagNonInteractive,
+      &result);
+}
+
+bool DBusUpdateEngineService::AttemptInstall(ErrorPtr* error,
+                                             const string& dlc_request) {
+  // Parse the raw parameters into protobuf.
+  DlcParameters dlc_parameters;
+  if (!dlc_parameters.ParseFromString(dlc_request)) {
+    *error = brillo::Error::Create(
+        FROM_HERE, "update_engine", "INTERNAL", "parameters are invalid.");
+    return false;
+  }
+  // Extract fields from the protobuf.
+  vector<string> dlc_module_ids;
+  for (const auto& dlc_info : dlc_parameters.dlc_infos()) {
+    if (dlc_info.dlc_id().empty()) {
+      *error = brillo::Error::Create(
+          FROM_HERE, "update_engine", "INTERNAL", "parameters are invalid.");
+      return false;
+    }
+    dlc_module_ids.push_back(dlc_info.dlc_id());
+  }
+  return common_->AttemptInstall(
+      error, dlc_parameters.omaha_url(), dlc_module_ids);
 }
 
 bool DBusUpdateEngineService::AttemptRollback(ErrorPtr* error,
@@ -133,6 +158,14 @@
   return common_->SetUpdateOverCellularPermission(error, in_allowed);
 }
 
+bool DBusUpdateEngineService::SetUpdateOverCellularTarget(
+    brillo::ErrorPtr* error,
+    const std::string& target_version,
+    int64_t target_size) {
+  return common_->SetUpdateOverCellularTarget(
+      error, target_version, target_size);
+}
+
 bool DBusUpdateEngineService::GetUpdateOverCellularPermission(
     ErrorPtr* error, bool* out_allowed) {
   return common_->GetUpdateOverCellularPermission(error, out_allowed);
diff --git a/dbus_service.h b/dbus_service.h
index b754661..134461b 100644
--- a/dbus_service.h
+++ b/dbus_service.h
@@ -49,6 +49,9 @@
                               const std::string& in_omaha_url,
                               int32_t in_flags_as_int) override;
 
+  bool AttemptInstall(brillo::ErrorPtr* error,
+                      const std::string& dlc_request) override;
+
   bool AttemptRollback(brillo::ErrorPtr* error, bool in_powerwash) override;
 
   // Checks if the system rollback is available by verifying if the secondary
@@ -114,6 +117,12 @@
   bool SetUpdateOverCellularPermission(brillo::ErrorPtr* error,
                                        bool in_allowed) override;
 
+  // If there's no device policy installed, sets the update over cellular
+  // target. Otherwise, this method returns with an error.
+  bool SetUpdateOverCellularTarget(brillo::ErrorPtr* error,
+                                   const std::string& target_version,
+                                   int64_t target_size) override;
+
   // Returns the current value of the update over cellular network setting,
   // either forced by the device policy if the device is enrolled or the current
   // user preference otherwise.
@@ -154,7 +163,7 @@
 class UpdateEngineAdaptor : public org::chromium::UpdateEngineInterfaceAdaptor,
                             public ServiceObserverInterface {
  public:
-  UpdateEngineAdaptor(SystemState* system_state);
+  explicit UpdateEngineAdaptor(SystemState* system_state);
   ~UpdateEngineAdaptor() = default;
 
   // Register the DBus object with the update engine service asynchronously.
diff --git a/dlcservice_chromeos.cc b/dlcservice_chromeos.cc
new file mode 100644
index 0000000..e95f08f
--- /dev/null
+++ b/dlcservice_chromeos.cc
@@ -0,0 +1,54 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/dlcservice_chromeos.h"
+
+#include <dlcservice/dbus-proxies.h>
+#include <dlcservice/proto_bindings/dlcservice.pb.h>
+
+#include "update_engine/dbus_connection.h"
+
+using std::string;
+using std::vector;
+
+namespace chromeos_update_engine {
+
+std::unique_ptr<DlcServiceInterface> CreateDlcService() {
+  return std::make_unique<DlcServiceChromeOS>();
+}
+
+bool DlcServiceChromeOS::GetInstalled(vector<string>* dlc_module_ids) {
+  if (!dlc_module_ids)
+    return false;
+  org::chromium::DlcServiceInterfaceProxy dlcservice_proxy(
+      DBusConnection::Get()->GetDBus());
+  string dlc_module_list_str;
+  if (!dlcservice_proxy.GetInstalled(&dlc_module_list_str, nullptr)) {
+    LOG(ERROR) << "dlcservice does not return installed DLC module list.";
+    return false;
+  }
+  dlcservice::DlcModuleList dlc_module_list;
+  if (!dlc_module_list.ParseFromString(dlc_module_list_str)) {
+    LOG(ERROR) << "Errors parsing DlcModuleList protobuf.";
+    return false;
+  }
+  for (const auto& dlc_module_info : dlc_module_list.dlc_module_infos()) {
+    dlc_module_ids->emplace_back(dlc_module_info.dlc_id());
+  }
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/dlcservice_chromeos.h b/dlcservice_chromeos.h
new file mode 100644
index 0000000..8d103c1
--- /dev/null
+++ b/dlcservice_chromeos.h
@@ -0,0 +1,44 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_DLCSERVICE_CHROMEOS_H_
+#define UPDATE_ENGINE_DLCSERVICE_CHROMEOS_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "update_engine/common/dlcservice_interface.h"
+
+namespace chromeos_update_engine {
+
+// The Chrome OS implementation of the DlcServiceInterface. This interface
+// interacts with dlcservice via D-Bus.
+class DlcServiceChromeOS : public DlcServiceInterface {
+ public:
+  DlcServiceChromeOS() = default;
+  ~DlcServiceChromeOS() = default;
+
+  // BootControlInterface overrides.
+  bool GetInstalled(std::vector<std::string>* dlc_module_ids) override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(DlcServiceChromeOS);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_DLCSERVICE_CHROMEOS_H_
diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc
new file mode 100644
index 0000000..bd34ea9
--- /dev/null
+++ b/dynamic_partition_control_android.cc
@@ -0,0 +1,225 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/dynamic_partition_control_android.h"
+
+#include <memory>
+#include <set>
+#include <string>
+
+#include <android-base/properties.h>
+#include <android-base/strings.h>
+#include <base/files/file_util.h>
+#include <base/logging.h>
+#include <bootloader_message/bootloader_message.h>
+#include <fs_mgr_dm_linear.h>
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/utils.h"
+
+using android::base::GetBoolProperty;
+using android::base::Join;
+using android::dm::DeviceMapper;
+using android::dm::DmDeviceState;
+using android::fs_mgr::CreateLogicalPartition;
+using android::fs_mgr::DestroyLogicalPartition;
+using android::fs_mgr::MetadataBuilder;
+using android::fs_mgr::PartitionOpener;
+
+namespace chromeos_update_engine {
+
+constexpr char kUseDynamicPartitions[] = "ro.boot.dynamic_partitions";
+constexpr char kRetrfoitDynamicPartitions[] =
+    "ro.boot.dynamic_partitions_retrofit";
+constexpr uint64_t kMapTimeoutMillis = 1000;
+
+DynamicPartitionControlAndroid::~DynamicPartitionControlAndroid() {
+  CleanupInternal(false /* wait */);
+}
+
+bool DynamicPartitionControlAndroid::IsDynamicPartitionsEnabled() {
+  return GetBoolProperty(kUseDynamicPartitions, false);
+}
+
+bool DynamicPartitionControlAndroid::IsDynamicPartitionsRetrofit() {
+  return GetBoolProperty(kRetrfoitDynamicPartitions, false);
+}
+
+bool DynamicPartitionControlAndroid::MapPartitionOnDeviceMapper(
+    const std::string& super_device,
+    const std::string& target_partition_name,
+    uint32_t slot,
+    bool force_writable,
+    std::string* path) {
+  if (!CreateLogicalPartition(super_device.c_str(),
+                              slot,
+                              target_partition_name,
+                              force_writable,
+                              std::chrono::milliseconds(kMapTimeoutMillis),
+                              path)) {
+    LOG(ERROR) << "Cannot map " << target_partition_name << " in "
+               << super_device << " on device mapper.";
+    return false;
+  }
+  LOG(INFO) << "Succesfully mapped " << target_partition_name
+            << " to device mapper (force_writable = " << force_writable
+            << "); device path at " << *path;
+  mapped_devices_.insert(target_partition_name);
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::UnmapPartitionOnDeviceMapper(
+    const std::string& target_partition_name, bool wait) {
+  if (DeviceMapper::Instance().GetState(target_partition_name) !=
+      DmDeviceState::INVALID) {
+    if (!DestroyLogicalPartition(
+            target_partition_name,
+            std::chrono::milliseconds(wait ? kMapTimeoutMillis : 0))) {
+      LOG(ERROR) << "Cannot unmap " << target_partition_name
+                 << " from device mapper.";
+      return false;
+    }
+    LOG(INFO) << "Successfully unmapped " << target_partition_name
+              << " from device mapper.";
+  }
+  mapped_devices_.erase(target_partition_name);
+  return true;
+}
+
+void DynamicPartitionControlAndroid::CleanupInternal(bool wait) {
+  // UnmapPartitionOnDeviceMapper removes objects from mapped_devices_, hence
+  // a copy is needed for the loop.
+  std::set<std::string> mapped = mapped_devices_;
+  LOG(INFO) << "Destroying [" << Join(mapped, ", ") << "] from device mapper";
+  for (const auto& partition_name : mapped) {
+    ignore_result(UnmapPartitionOnDeviceMapper(partition_name, wait));
+  }
+}
+
+void DynamicPartitionControlAndroid::Cleanup() {
+  CleanupInternal(true /* wait */);
+}
+
+bool DynamicPartitionControlAndroid::DeviceExists(const std::string& path) {
+  return base::PathExists(base::FilePath(path));
+}
+
+android::dm::DmDeviceState DynamicPartitionControlAndroid::GetState(
+    const std::string& name) {
+  return DeviceMapper::Instance().GetState(name);
+}
+
+bool DynamicPartitionControlAndroid::GetDmDevicePathByName(
+    const std::string& name, std::string* path) {
+  return DeviceMapper::Instance().GetDmDevicePathByName(name, path);
+}
+
+std::unique_ptr<MetadataBuilder>
+DynamicPartitionControlAndroid::LoadMetadataBuilder(
+    const std::string& super_device,
+    uint32_t source_slot,
+    uint32_t target_slot) {
+  std::unique_ptr<MetadataBuilder> builder;
+
+  if (target_slot != BootControlInterface::kInvalidSlot &&
+      IsDynamicPartitionsRetrofit()) {
+    builder = MetadataBuilder::NewForUpdate(
+        PartitionOpener(), super_device, source_slot, target_slot);
+  } else {
+    builder =
+        MetadataBuilder::New(PartitionOpener(), super_device, source_slot);
+  }
+
+  if (builder == nullptr) {
+    LOG(WARNING) << "No metadata slot "
+                 << BootControlInterface::SlotName(source_slot) << " in "
+                 << super_device;
+    return nullptr;
+  }
+  LOG(INFO) << "Loaded metadata from slot "
+            << BootControlInterface::SlotName(source_slot) << " in "
+            << super_device;
+  return builder;
+}
+
+bool DynamicPartitionControlAndroid::StoreMetadata(
+    const std::string& super_device,
+    MetadataBuilder* builder,
+    uint32_t target_slot) {
+  auto metadata = builder->Export();
+  if (metadata == nullptr) {
+    LOG(ERROR) << "Cannot export metadata to slot "
+               << BootControlInterface::SlotName(target_slot) << " in "
+               << super_device;
+    return false;
+  }
+
+  if (IsDynamicPartitionsRetrofit()) {
+    if (!FlashPartitionTable(super_device, *metadata)) {
+      LOG(ERROR) << "Cannot write metadata to " << super_device;
+      return false;
+    }
+    LOG(INFO) << "Written metadata to " << super_device;
+  } else {
+    if (!UpdatePartitionTable(super_device, *metadata, target_slot)) {
+      LOG(ERROR) << "Cannot write metadata to slot "
+                 << BootControlInterface::SlotName(target_slot) << " in "
+                 << super_device;
+      return false;
+    }
+    LOG(INFO) << "Copied metadata to slot "
+              << BootControlInterface::SlotName(target_slot) << " in "
+              << super_device;
+  }
+
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::GetDeviceDir(std::string* out) {
+  // We can't use fs_mgr to look up |partition_name| because fstab
+  // doesn't list every slot partition (it uses the slotselect option
+  // to mask the suffix).
+  //
+  // We can however assume that there's an entry for the /misc mount
+  // point and use that to get the device file for the misc
+  // partition. This helps us locate the disk that |partition_name|
+  // resides on. From there we'll assume that a by-name scheme is used
+  // so we can just replace the trailing "misc" by the given
+  // |partition_name| and suffix corresponding to |slot|, e.g.
+  //
+  //   /dev/block/platform/soc.0/7824900.sdhci/by-name/misc ->
+  //   /dev/block/platform/soc.0/7824900.sdhci/by-name/boot_a
+  //
+  // If needed, it's possible to relax the by-name assumption in the
+  // future by trawling /sys/block looking for the appropriate sibling
+  // of misc and then finding an entry in /dev matching the sysfs
+  // entry.
+
+  std::string err, misc_device = get_bootloader_message_blk_device(&err);
+  if (misc_device.empty()) {
+    LOG(ERROR) << "Unable to get misc block device: " << err;
+    return false;
+  }
+
+  if (!utils::IsSymlink(misc_device.c_str())) {
+    LOG(ERROR) << "Device file " << misc_device << " for /misc "
+               << "is not a symlink.";
+    return false;
+  }
+  *out = base::FilePath(misc_device).DirName().value();
+  return true;
+}
+}  // namespace chromeos_update_engine
diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h
new file mode 100644
index 0000000..0ccab4e
--- /dev/null
+++ b/dynamic_partition_control_android.h
@@ -0,0 +1,65 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
+#define UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
+
+#include "update_engine/dynamic_partition_control_interface.h"
+
+#include <memory>
+#include <set>
+#include <string>
+
+namespace chromeos_update_engine {
+
+class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface {
+ public:
+  DynamicPartitionControlAndroid() = default;
+  ~DynamicPartitionControlAndroid();
+  bool IsDynamicPartitionsEnabled() override;
+  bool IsDynamicPartitionsRetrofit() override;
+  bool MapPartitionOnDeviceMapper(const std::string& super_device,
+                                  const std::string& target_partition_name,
+                                  uint32_t slot,
+                                  bool force_writable,
+                                  std::string* path) override;
+  bool UnmapPartitionOnDeviceMapper(const std::string& target_partition_name,
+                                    bool wait) override;
+  void Cleanup() override;
+  bool DeviceExists(const std::string& path) override;
+  android::dm::DmDeviceState GetState(const std::string& name) override;
+  bool GetDmDevicePathByName(const std::string& name,
+                             std::string* path) override;
+  std::unique_ptr<android::fs_mgr::MetadataBuilder> LoadMetadataBuilder(
+      const std::string& super_device,
+      uint32_t source_slot,
+      uint32_t target_slot) override;
+  bool StoreMetadata(const std::string& super_device,
+                     android::fs_mgr::MetadataBuilder* builder,
+                     uint32_t target_slot) override;
+  bool GetDeviceDir(std::string* path) override;
+
+ private:
+  std::set<std::string> mapped_devices_;
+
+  void CleanupInternal(bool wait);
+
+  DISALLOW_COPY_AND_ASSIGN(DynamicPartitionControlAndroid);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
diff --git a/dynamic_partition_control_interface.h b/dynamic_partition_control_interface.h
new file mode 100644
index 0000000..86a0730
--- /dev/null
+++ b/dynamic_partition_control_interface.h
@@ -0,0 +1,98 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
+#define UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include <base/files/file_util.h>
+#include <libdm/dm.h>
+#include <liblp/builder.h>
+
+namespace chromeos_update_engine {
+
+class DynamicPartitionControlInterface {
+ public:
+  virtual ~DynamicPartitionControlInterface() = default;
+
+  // Return true iff dynamic partitions is enabled on this device.
+  virtual bool IsDynamicPartitionsEnabled() = 0;
+
+  // Return true iff dynamic partitions is retrofitted on this device.
+  virtual bool IsDynamicPartitionsRetrofit() = 0;
+
+  // Map logical partition on device-mapper.
+  // |super_device| is the device path of the physical partition ("super").
+  // |target_partition_name| is the identifier used in metadata; for example,
+  // "vendor_a"
+  // |slot| is the selected slot to mount; for example, 0 for "_a".
+  // Returns true if mapped successfully; if so, |path| is set to the device
+  // path of the mapped logical partition.
+  virtual bool MapPartitionOnDeviceMapper(
+      const std::string& super_device,
+      const std::string& target_partition_name,
+      uint32_t slot,
+      bool force_writable,
+      std::string* path) = 0;
+
+  // Unmap logical partition on device mapper. This is the reverse operation
+  // of MapPartitionOnDeviceMapper.
+  // If |wait| is set, wait until the device is unmapped.
+  // Returns true if unmapped successfully.
+  virtual bool UnmapPartitionOnDeviceMapper(
+      const std::string& target_partition_name, bool wait) = 0;
+
+  // Do necessary cleanups before destroying the object.
+  virtual void Cleanup() = 0;
+
+  // Return true if a static partition exists at device path |path|.
+  virtual bool DeviceExists(const std::string& path) = 0;
+
+  // Returns the current state of the underlying device mapper device
+  // with given name.
+  // One of INVALID, SUSPENDED or ACTIVE.
+  virtual android::dm::DmDeviceState GetState(const std::string& name) = 0;
+
+  // Returns the path to the device mapper device node in '/dev' corresponding
+  // to 'name'. If the device does not exist, false is returned, and the path
+  // parameter is not set.
+  virtual bool GetDmDevicePathByName(const std::string& name,
+                                     std::string* path) = 0;
+
+  // Retrieve metadata from |super_device| at slot |source_slot|.
+  // On retrofit devices, if |target_slot| != kInvalidSlot, the returned
+  // metadata automatically includes block devices at |target_slot|.
+  virtual std::unique_ptr<android::fs_mgr::MetadataBuilder> LoadMetadataBuilder(
+      const std::string& super_device,
+      uint32_t source_slot,
+      uint32_t target_slot) = 0;
+
+  // Write metadata |builder| to |super_device| at slot |target_slot|.
+  virtual bool StoreMetadata(const std::string& super_device,
+                             android::fs_mgr::MetadataBuilder* builder,
+                             uint32_t target_slot) = 0;
+
+  // Return a possible location for devices listed by name.
+  virtual bool GetDeviceDir(std::string* path) = 0;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
diff --git a/fake_file_writer.h b/fake_file_writer.h
index 43b71c7..75507ea 100644
--- a/fake_file_writer.h
+++ b/fake_file_writer.h
@@ -56,9 +56,7 @@
     return 0;
   }
 
-  const brillo::Blob& bytes() {
-    return bytes_;
-  }
+  const brillo::Blob& bytes() { return bytes_; }
 
  private:
   // The internal store of all bytes that have been written
diff --git a/fake_p2p_manager.h b/fake_p2p_manager.h
index a8cf4ea..1f8ae95 100644
--- a/fake_p2p_manager.h
+++ b/fake_p2p_manager.h
@@ -26,31 +26,23 @@
 // A fake implementation of P2PManager.
 class FakeP2PManager : public P2PManager {
  public:
-  FakeP2PManager() :
-    is_p2p_enabled_(false),
-    ensure_p2p_running_result_(false),
-    ensure_p2p_not_running_result_(false),
-    perform_housekeeping_result_(false),
-    count_shared_files_result_(0) {}
+  FakeP2PManager()
+      : is_p2p_enabled_(false),
+        ensure_p2p_running_result_(false),
+        ensure_p2p_not_running_result_(false),
+        perform_housekeeping_result_(false),
+        count_shared_files_result_(0) {}
 
   // P2PManager overrides.
   void SetDevicePolicy(const policy::DevicePolicy* device_policy) override {}
 
-  bool IsP2PEnabled() override {
-    return is_p2p_enabled_;
-  }
+  bool IsP2PEnabled() override { return is_p2p_enabled_; }
 
-  bool EnsureP2PRunning() override {
-    return ensure_p2p_running_result_;
-  }
+  bool EnsureP2PRunning() override { return ensure_p2p_running_result_; }
 
-  bool EnsureP2PNotRunning() override {
-    return ensure_p2p_not_running_result_;
-  }
+  bool EnsureP2PNotRunning() override { return ensure_p2p_not_running_result_; }
 
-  bool PerformHousekeeping() override {
-    return perform_housekeeping_result_;
-  }
+  bool PerformHousekeeping() override { return perform_housekeeping_result_; }
 
   void LookupUrlForFile(const std::string& file_id,
                         size_t minimum_size,
@@ -59,8 +51,7 @@
     callback.Run(lookup_url_for_file_result_);
   }
 
-  bool FileShare(const std::string& file_id,
-                 size_t expected_size) override {
+  bool FileShare(const std::string& file_id, size_t expected_size) override {
     return false;
   }
 
@@ -68,31 +59,22 @@
     return base::FilePath();
   }
 
-  ssize_t FileGetSize(const std::string& file_id) override {
-    return -1;
-  }
+  ssize_t FileGetSize(const std::string& file_id) override { return -1; }
 
   ssize_t FileGetExpectedSize(const std::string& file_id) override {
     return -1;
   }
 
-  bool FileGetVisible(const std::string& file_id,
-                      bool *out_result) override {
+  bool FileGetVisible(const std::string& file_id, bool* out_result) override {
     return false;
   }
 
-  bool FileMakeVisible(const std::string& file_id) override {
-    return false;
-  }
+  bool FileMakeVisible(const std::string& file_id) override { return false; }
 
-  int CountSharedFiles() override {
-    return count_shared_files_result_;
-  }
+  int CountSharedFiles() override { return count_shared_files_result_; }
 
   // Methods for controlling what the fake returns and how it acts.
-  void SetP2PEnabled(bool is_p2p_enabled) {
-    is_p2p_enabled_ = is_p2p_enabled;
-  }
+  void SetP2PEnabled(bool is_p2p_enabled) { is_p2p_enabled_ = is_p2p_enabled; }
 
   void SetEnsureP2PRunningResult(bool ensure_p2p_running_result) {
     ensure_p2p_running_result_ = ensure_p2p_running_result;
diff --git a/fake_p2p_manager_configuration.h b/fake_p2p_manager_configuration.h
index c1cf4f2..f5b0e80 100644
--- a/fake_p2p_manager_configuration.h
+++ b/fake_p2p_manager_configuration.h
@@ -32,9 +32,7 @@
 // /var/cache/p2p, a temporary directory is used.
 class FakeP2PManagerConfiguration : public P2PManager::Configuration {
  public:
-  FakeP2PManagerConfiguration() {
-    EXPECT_TRUE(p2p_dir_.CreateUniqueTempDir());
-  }
+  FakeP2PManagerConfiguration() { EXPECT_TRUE(p2p_dir_.CreateUniqueTempDir()); }
 
   // P2PManager::Configuration override
   base::FilePath GetP2PDir() override { return p2p_dir_.GetPath(); }
@@ -45,15 +43,15 @@
   }
 
   // P2PManager::Configuration override
-  std::vector<std::string> GetP2PClientArgs(const std::string &file_id,
+  std::vector<std::string> GetP2PClientArgs(const std::string& file_id,
                                             size_t minimum_size) override {
     std::vector<std::string> formatted_command = p2p_client_cmd_format_;
     // Replace {variable} on the passed string.
     std::string str_minimum_size = std::to_string(minimum_size);
     for (std::string& arg : formatted_command) {
       base::ReplaceSubstringsAfterOffset(&arg, 0, "{file_id}", file_id);
-      base::ReplaceSubstringsAfterOffset(&arg, 0, "{minsize}",
-                                         str_minimum_size);
+      base::ReplaceSubstringsAfterOffset(
+          &arg, 0, "{minsize}", str_minimum_size);
     }
     return formatted_command;
   }
diff --git a/fake_shill_proxy.cc b/fake_shill_proxy.cc
index 17698cd..de96511 100644
--- a/fake_shill_proxy.cc
+++ b/fake_shill_proxy.cc
@@ -16,6 +16,8 @@
 
 #include "update_engine/fake_shill_proxy.h"
 
+#include <utility>
+
 using org::chromium::flimflam::ManagerProxyMock;
 using org::chromium::flimflam::ServiceProxyInterface;
 
@@ -31,8 +33,8 @@
 std::unique_ptr<ServiceProxyInterface> FakeShillProxy::GetServiceForPath(
     const dbus::ObjectPath& path) {
   auto it = service_proxy_mocks_.find(path.value());
-  CHECK(it != service_proxy_mocks_.end()) << "No ServiceProxyMock set for "
-                                          << path.value();
+  CHECK(it != service_proxy_mocks_.end())
+      << "No ServiceProxyMock set for " << path.value();
   std::unique_ptr<ServiceProxyInterface> result = std::move(it->second);
   service_proxy_mocks_.erase(it);
   return result;
diff --git a/fake_system_state.h b/fake_system_state.h
index 67ad3aa..24b1eec 100644
--- a/fake_system_state.h
+++ b/fake_system_state.h
@@ -100,6 +100,8 @@
     return power_manager_;
   }
 
+  inline DlcServiceInterface* dlcservice() override { return dlcservice_; }
+
   inline bool system_rebooted() override { return fake_system_rebooted_; }
 
   // Setters for the various members, can be used for overriding the default
@@ -116,8 +118,8 @@
 
   inline void set_connection_manager(
       ConnectionManagerInterface* connection_manager) {
-    connection_manager_ = (connection_manager ? connection_manager :
-                           &mock_connection_manager_);
+    connection_manager_ =
+        (connection_manager ? connection_manager : &mock_connection_manager_);
   }
 
   inline void set_hardware(HardwareInterface* hardware) {
@@ -134,30 +136,30 @@
   }
 
   inline void set_powerwash_safe_prefs(PrefsInterface* powerwash_safe_prefs) {
-    powerwash_safe_prefs_ = (powerwash_safe_prefs ? powerwash_safe_prefs :
-                             &mock_powerwash_safe_prefs_);
+    powerwash_safe_prefs_ =
+        (powerwash_safe_prefs ? powerwash_safe_prefs
+                              : &mock_powerwash_safe_prefs_);
   }
 
-  inline void set_payload_state(PayloadStateInterface *payload_state) {
+  inline void set_payload_state(PayloadStateInterface* payload_state) {
     payload_state_ = payload_state ? payload_state : &mock_payload_state_;
   }
 
   inline void set_update_attempter(UpdateAttempter* update_attempter) {
-    update_attempter_ = (update_attempter ? update_attempter :
-                         &mock_update_attempter_);
+    update_attempter_ =
+        (update_attempter ? update_attempter : &mock_update_attempter_);
   }
 
   inline void set_request_params(OmahaRequestParams* request_params) {
-    request_params_ = (request_params ? request_params :
-                       &mock_request_params_);
+    request_params_ = (request_params ? request_params : &mock_request_params_);
   }
 
-  inline void set_p2p_manager(P2PManager *p2p_manager) {
+  inline void set_p2p_manager(P2PManager* p2p_manager) {
     p2p_manager_ = p2p_manager ? p2p_manager : &mock_p2p_manager_;
   }
 
   inline void set_update_manager(
-      chromeos_update_manager::UpdateManager *update_manager) {
+      chromeos_update_manager::UpdateManager* update_manager) {
     update_manager_ = update_manager ? update_manager : &fake_update_manager_;
   }
 
@@ -165,6 +167,10 @@
     fake_system_rebooted_ = system_rebooted;
   }
 
+  inline void set_dlcservice(DlcServiceInterface* dlcservice) {
+    dlcservice_ = dlcservice;
+  }
+
   // Getters for the built-in default implementations. These return the actual
   // concrete type of each implementation. For additional safety, they will fail
   // whenever the requested default was overridden by a different
@@ -195,12 +201,12 @@
     return &mock_metrics_reporter_;
   }
 
-  inline testing::NiceMock<MockPrefs> *mock_prefs() {
+  inline testing::NiceMock<MockPrefs>* mock_prefs() {
     CHECK(prefs_ == &mock_prefs_);
     return &mock_prefs_;
   }
 
-  inline testing::NiceMock<MockPrefs> *mock_powerwash_safe_prefs() {
+  inline testing::NiceMock<MockPrefs>* mock_powerwash_safe_prefs() {
     CHECK(powerwash_safe_prefs_ == &mock_powerwash_safe_prefs_);
     return &mock_powerwash_safe_prefs_;
   }
@@ -261,6 +267,7 @@
   P2PManager* p2p_manager_;
   chromeos_update_manager::UpdateManager* update_manager_;
   PowerManagerInterface* power_manager_{&mock_power_manager_};
+  DlcServiceInterface* dlcservice_;
 
   // Other object pointers (not preinitialized).
   const policy::DevicePolicy* device_policy_;
diff --git a/fuzz/xml.dict b/fuzz/xml.dict
new file mode 100644
index 0000000..8449cb0
--- /dev/null
+++ b/fuzz/xml.dict
@@ -0,0 +1,125 @@
+#
+# AFL dictionary for XML
+# ----------------------
+#
+# Several basic syntax elements and attributes, modeled on libxml2.
+#
+# Created by Michal Zalewski <lcamtuf@google.com>
+#
+
+attr_encoding=" encoding=\"1\""
+attr_generic=" a=\"1\""
+attr_href=" href=\"1\""
+attr_standalone=" standalone=\"no\""
+attr_version=" version=\"1\""
+attr_xml_base=" xml:base=\"1\""
+attr_xml_id=" xml:id=\"1\""
+attr_xml_lang=" xml:lang=\"1\""
+attr_xml_space=" xml:space=\"1\""
+attr_xmlns=" xmlns=\"1\""
+
+entity_builtin="&lt;"
+entity_decimal="&#1;"
+entity_external="&a;"
+entity_hex="&#x1;"
+
+# keywords
+"ANY"
+"ATTLIST"
+"CDATA"
+"DOCTYPE"
+"ELEMENT"
+"EMPTY"
+"ENTITIES"
+"ENTITY"
+"FIXED"
+"ID"
+"IDREF"
+"IDREFS"
+"IGNORE"
+"IMPLIED"
+"INCLUDE"
+"NDATA"
+"NMTOKEN"
+"NMTOKENS"
+"NOTATION"
+"PCDATA"
+"PUBLIC"
+"REQUIRED"
+"SYSTEM"
+
+# Various tag parts
+"<"
+">"
+"/>"
+"</"
+"<?"
+"?>"
+"<!"
+"!>"
+"[]"
+"]]"
+"<![CDATA["
+"<![CDATA[]]>"
+"\"\""
+"''"
+"=\"\""
+"=''"
+
+# DTD
+"<!ATTLIST"
+"<!DOCTYPE"
+"<!ELEMENT"
+"<!ENTITY"
+"<![IGNORE["
+"<![INCLUDE["
+"<!NOTATION"
+"#CDATA"
+"#FIXED"
+"#IMPLIED"
+"#PCDATA"
+"#REQUIRED"
+
+# Encodings
+"ISO-8859-1"
+"US-ASCII"
+"UTF-8"
+"UTF-16"
+"UTF-16BE"
+"UTF-16LE"
+
+# Namespaces and schemas
+"xmlns"
+"xmlns:"
+"xmlns:xhtml=\"http://www.w3.org/1999/xhtml\""
+"xmlns:xml=\"http://www.w3.org/XML/1998/namespace\""
+"xmlns:xmlns=\"http://www.w3.org/2000/xmlns\""
+
+string_col_fallback=":fallback"
+string_col_generic=":a"
+string_col_include=":include"
+string_dashes="--"
+string_parentheses="()"
+string_percent="%a"
+string_schema=":schema"
+string_ucs4="UCS-4"
+tag_close="</a>"
+tag_open="<a>"
+tag_open_close="<a />"
+
+
+"<?xml?>"
+"http://docboo"
+"http://www.w"
+"he30"
+"he2"
+"IET"
+"FDF-10"
+"aDUCS-4OPveb:"
+"a>"
+"UT"
+"xMl"
+"/usr/share/sg"
+"ha07"
+"http://www.oa"
+"cle"
diff --git a/hardware_android.cc b/hardware_android.cc
index 947b13a..21d4659 100644
--- a/hardware_android.cc
+++ b/hardware_android.cc
@@ -16,23 +16,16 @@
 
 #include "update_engine/hardware_android.h"
 
-#include <fcntl.h>
-#include <sys/stat.h>
 #include <sys/types.h>
 
-#include <algorithm>
 #include <memory>
 
-#include <bootloader.h>
-
 #include <android-base/properties.h>
 #include <base/files/file_util.h>
-#include <base/strings/stringprintf.h>
+#include <bootloader_message/bootloader_message.h>
 
 #include "update_engine/common/hardware.h"
 #include "update_engine/common/platform_constants.h"
-#include "update_engine/common/utils.h"
-#include "update_engine/utils_android.h"
 
 using android::base::GetBoolProperty;
 using android::base::GetIntProperty;
@@ -43,12 +36,6 @@
 
 namespace {
 
-// The powerwash arguments passed to recovery. Arguments are separated by \n.
-const char kAndroidRecoveryPowerwashCommand[] =
-    "recovery\n"
-    "--wipe_data\n"
-    "--reason=wipe_data_from_ota\n";
-
 // Android properties that identify the hardware and potentially non-updatable
 // parts of the bootloader (such as the bootloader version and the baseband
 // version).
@@ -59,39 +46,6 @@
 const char kPropBootRevision[] = "ro.boot.revision";
 const char kPropBuildDateUTC[] = "ro.build.date.utc";
 
-// Write a recovery command line |message| to the BCB. The arguments to recovery
-// must be separated by '\n'. An empty string will erase the BCB.
-bool WriteBootloaderRecoveryMessage(const string& message) {
-  base::FilePath misc_device;
-  if (!utils::DeviceForMountPoint("/misc", &misc_device))
-    return false;
-
-  // Setup a bootloader_message with just the command and recovery fields set.
-  bootloader_message boot = {};
-  if (!message.empty()) {
-    strncpy(boot.command, "boot-recovery", sizeof(boot.command) - 1);
-    memcpy(boot.recovery,
-           message.data(),
-           std::min(message.size(), sizeof(boot.recovery) - 1));
-  }
-
-  int fd = HANDLE_EINTR(open(misc_device.value().c_str(), O_WRONLY | O_SYNC));
-  if (fd < 0) {
-    PLOG(ERROR) << "Opening misc";
-    return false;
-  }
-  ScopedFdCloser fd_closer(&fd);
-  // We only re-write the first part of the bootloader_message, up to and
-  // including the recovery message.
-  size_t boot_size =
-      offsetof(bootloader_message, recovery) + sizeof(boot.recovery);
-  if (!utils::WriteAll(fd, &boot, boot_size)) {
-    PLOG(ERROR) << "Writing recovery command to misc";
-    return false;
-  }
-  return true;
-}
-
 }  // namespace
 
 namespace hardware {
@@ -167,18 +121,56 @@
   return GetProperty(kPropBootBaseband, "");
 }
 
+int HardwareAndroid::GetMinKernelKeyVersion() const {
+  LOG(WARNING) << "STUB: No Kernel key version is available.";
+  return -1;
+}
+
+int HardwareAndroid::GetMinFirmwareKeyVersion() const {
+  LOG(WARNING) << "STUB: No Firmware key version is available.";
+  return -1;
+}
+
+int HardwareAndroid::GetMaxFirmwareKeyRollforward() const {
+  LOG(WARNING) << "STUB: Getting firmware_max_rollforward is not supported.";
+  return -1;
+}
+
+bool HardwareAndroid::SetMaxFirmwareKeyRollforward(
+    int firmware_max_rollforward) {
+  LOG(WARNING) << "STUB: Setting firmware_max_rollforward is not supported.";
+  return false;
+}
+
+bool HardwareAndroid::SetMaxKernelKeyRollforward(int kernel_max_rollforward) {
+  LOG(WARNING) << "STUB: Setting kernel_max_rollforward is not supported.";
+  return false;
+}
+
 int HardwareAndroid::GetPowerwashCount() const {
   LOG(WARNING) << "STUB: Assuming no factory reset was performed.";
   return 0;
 }
 
-bool HardwareAndroid::SchedulePowerwash() {
+bool HardwareAndroid::SchedulePowerwash(bool is_rollback) {
   LOG(INFO) << "Scheduling a powerwash to BCB.";
-  return WriteBootloaderRecoveryMessage(kAndroidRecoveryPowerwashCommand);
+  LOG_IF(WARNING, is_rollback) << "is_rollback was true but isn't supported.";
+  string err;
+  if (!update_bootloader_message({"--wipe_data", "--reason=wipe_data_from_ota"},
+                                 &err)) {
+    LOG(ERROR) << "Failed to update bootloader message: " << err;
+    return false;
+  }
+  return true;
 }
 
 bool HardwareAndroid::CancelPowerwash() {
-  return WriteBootloaderRecoveryMessage("");
+  string err;
+  if (!clear_bootloader_message(&err)) {
+    LOG(ERROR) << "Failed to clear bootloader message: " << err;
+    return false;
+  }
+  return true;
 }
 
 bool HardwareAndroid::GetNonVolatileDirectory(base::FilePath* path) const {
@@ -205,9 +197,10 @@
   return false;
 }
 
-void HardwareAndroid::SetFirstActiveOmahaPingSent() {
-  LOG(WARNING) << "STUB: Assuming first active omaha is never set.";
-  return;
+bool HardwareAndroid::SetFirstActiveOmahaPingSent() {
+  LOG(WARNING) << "STUB: Assuming first active omaha is set.";
+  // We will set it true, so its failure doesn't cause escalation.
+  return true;
 }
 
 }  // namespace chromeos_update_engine
diff --git a/hardware_android.h b/hardware_android.h
index ca90b62..5b3c99d 100644
--- a/hardware_android.h
+++ b/hardware_android.h
@@ -42,14 +42,19 @@
   std::string GetHardwareClass() const override;
   std::string GetFirmwareVersion() const override;
   std::string GetECVersion() const override;
+  int GetMinKernelKeyVersion() const override;
+  int GetMinFirmwareKeyVersion() const override;
+  int GetMaxFirmwareKeyRollforward() const override;
+  bool SetMaxFirmwareKeyRollforward(int firmware_max_rollforward) override;
+  bool SetMaxKernelKeyRollforward(int kernel_max_rollforward) override;
   int GetPowerwashCount() const override;
-  bool SchedulePowerwash() override;
+  bool SchedulePowerwash(bool is_rollback) override;
   bool CancelPowerwash() override;
   bool GetNonVolatileDirectory(base::FilePath* path) const override;
   bool GetPowerwashSafeDirectory(base::FilePath* path) const override;
   int64_t GetBuildTimestamp() const override;
   bool GetFirstActiveOmahaPingSent() const override;
-  void SetFirstActiveOmahaPingSent() override;
+  bool SetFirstActiveOmahaPingSent() override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HardwareAndroid);
diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc
index f2bb28a..8ef05b2 100644
--- a/hardware_chromeos.cc
+++ b/hardware_chromeos.cc
@@ -16,6 +16,8 @@
 
 #include "update_engine/hardware_chromeos.h"
 
+#include <utility>
+
 #include <base/files/file_path.h>
 #include <base/files/file_util.h>
 #include <base/logging.h>
@@ -59,9 +61,13 @@
 const char kPowerwashMarkerFile[] =
     "/mnt/stateful_partition/factory_install_reset";
 
-// The contents of the powerwash marker file.
+// The contents of the powerwash marker file for the non-rollback case.
 const char kPowerwashCommand[] = "safe fast keepimg reason=update_engine\n";
 
+// The contents of the powerwas marker file for the rollback case.
+const char kRollbackPowerwashCommand[] =
+    "safe fast keepimg rollback reason=update_engine\n";
+
 // UpdateManager config path.
 const char* kConfigFilePath = "/etc/update_manager.conf";
 
@@ -129,8 +135,7 @@
   struct stat statbuf;
   if (stat(kOOBECompletedMarker, &statbuf) != 0) {
     if (errno != ENOENT) {
-      PLOG(ERROR) << "Error getting information about "
-                  << kOOBECompletedMarker;
+      PLOG(ERROR) << "Error getting information about " << kOOBECompletedMarker;
     }
     return false;
   }
@@ -143,8 +148,8 @@
 static string ReadValueFromCrosSystem(const string& key) {
   char value_buffer[VB_MAX_STRING_PROPERTY];
 
-  const char* rv = VbGetSystemPropertyString(key.c_str(), value_buffer,
-                                             sizeof(value_buffer));
+  const char* rv = VbGetSystemPropertyString(
+      key.c_str(), value_buffer, sizeof(value_buffer));
   if (rv != nullptr) {
     string return_value(value_buffer);
     base::TrimWhitespaceASCII(return_value, base::TRIM_ALL, &return_value);
@@ -180,10 +185,38 @@
   return utils::ParseECVersion(input_line);
 }
 
+int HardwareChromeOS::GetMinKernelKeyVersion() const {
+  return VbGetSystemPropertyInt("tpm_kernver");
+}
+
+int HardwareChromeOS::GetMaxFirmwareKeyRollforward() const {
+  return VbGetSystemPropertyInt("firmware_max_rollforward");
+}
+
+bool HardwareChromeOS::SetMaxFirmwareKeyRollforward(
+    int firmware_max_rollforward) {
+  // Not all devices have this field yet. So first try to read
+  // it and if there is an error just fail.
+  if (GetMaxFirmwareKeyRollforward() == -1)
+    return false;
+
+  return VbSetSystemPropertyInt("firmware_max_rollforward",
+                                firmware_max_rollforward) == 0;
+}
+
+int HardwareChromeOS::GetMinFirmwareKeyVersion() const {
+  return VbGetSystemPropertyInt("tpm_fwver");
+}
+
+bool HardwareChromeOS::SetMaxKernelKeyRollforward(int kernel_max_rollforward) {
+  return VbSetSystemPropertyInt("kernel_max_rollforward",
+                                kernel_max_rollforward) == 0;
+}
+
 int HardwareChromeOS::GetPowerwashCount() const {
   int powerwash_count;
-  base::FilePath marker_path = base::FilePath(kPowerwashSafeDirectory).Append(
-      kPowerwashCountMarker);
+  base::FilePath marker_path =
+      base::FilePath(kPowerwashSafeDirectory).Append(kPowerwashCountMarker);
   string contents;
   if (!utils::ReadFile(marker_path.value(), &contents))
     return -1;
@@ -193,12 +226,15 @@
   return powerwash_count;
 }
 
-bool HardwareChromeOS::SchedulePowerwash() {
+bool HardwareChromeOS::SchedulePowerwash(bool is_rollback) {
+  const char* powerwash_command =
+      is_rollback ? kRollbackPowerwashCommand : kPowerwashCommand;
   bool result = utils::WriteFile(
-      kPowerwashMarkerFile, kPowerwashCommand, strlen(kPowerwashCommand));
+      kPowerwashMarkerFile, powerwash_command, strlen(powerwash_command));
   if (result) {
     LOG(INFO) << "Created " << kPowerwashMarkerFile
-              << " to powerwash on next reboot";
+              << " to powerwash on next reboot (is_rollback=" << is_rollback
+              << ")";
   } else {
     PLOG(ERROR) << "Error in creating powerwash marker file: "
                 << kPowerwashMarkerFile;
@@ -257,7 +293,7 @@
 bool HardwareChromeOS::GetFirstActiveOmahaPingSent() const {
   int exit_code = 0;
   string active_ping_str;
-  vector<string> cmd = { "vpd_get_value", kActivePingKey };
+  vector<string> cmd = {"vpd_get_value", kActivePingKey};
   if (!Subprocess::SynchronousExec(cmd, &exit_code, &active_ping_str) ||
       exit_code) {
     LOG(ERROR) << "Failed to get vpd key for " << kActivePingKey
@@ -265,9 +301,7 @@
     return false;
   }
 
-  base::TrimWhitespaceASCII(active_ping_str,
-                            base::TRIM_ALL,
-                            &active_ping_str);
+  base::TrimWhitespaceASCII(active_ping_str, base::TRIM_ALL, &active_ping_str);
   int active_ping;
   if (active_ping_str.empty() ||
       !base::StringToInt(active_ping_str, &active_ping)) {
@@ -277,26 +311,26 @@
   return static_cast<bool>(active_ping);
 }
 
-void HardwareChromeOS::SetFirstActiveOmahaPingSent() {
+bool HardwareChromeOS::SetFirstActiveOmahaPingSent() {
   int exit_code = 0;
   string output;
   vector<string> vpd_set_cmd = {
-    "vpd", "-i", "RW_VPD", "-s", string(kActivePingKey) + "=1" };
+      "vpd", "-i", "RW_VPD", "-s", string(kActivePingKey) + "=1"};
   if (!Subprocess::SynchronousExec(vpd_set_cmd, &exit_code, &output) ||
       exit_code) {
     LOG(ERROR) << "Failed to set vpd key for " << kActivePingKey
-               << " with exit code: " << exit_code
-               << " with error: " << output;
-    return;
+               << " with exit code: " << exit_code << " with error: " << output;
+    return false;
   }
 
-  vector<string> vpd_dump_cmd = { "dump_vpd_log", "--force" };
+  vector<string> vpd_dump_cmd = {"dump_vpd_log", "--force"};
   if (!Subprocess::SynchronousExec(vpd_dump_cmd, &exit_code, &output) ||
       exit_code) {
-    LOG(ERROR) << "Failed to cache " << kActivePingKey<< " using dump_vpd_log"
-               << " with exit code: " << exit_code
-               << " with error: " << output;
+    LOG(ERROR) << "Failed to cache " << kActivePingKey << " using dump_vpd_log"
+               << " with exit code: " << exit_code << " with error: " << output;
+    return false;
   }
+  return true;
 }
 
 }  // namespace chromeos_update_engine
diff --git a/hardware_chromeos.h b/hardware_chromeos.h
index 0cf1214..8829866 100644
--- a/hardware_chromeos.h
+++ b/hardware_chromeos.h
@@ -17,6 +17,7 @@
 #ifndef UPDATE_ENGINE_HARDWARE_CHROMEOS_H_
 #define UPDATE_ENGINE_HARDWARE_CHROMEOS_H_
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -46,14 +47,19 @@
   std::string GetHardwareClass() const override;
   std::string GetFirmwareVersion() const override;
   std::string GetECVersion() const override;
+  int GetMinKernelKeyVersion() const override;
+  int GetMinFirmwareKeyVersion() const override;
+  int GetMaxFirmwareKeyRollforward() const override;
+  bool SetMaxFirmwareKeyRollforward(int firmware_max_rollforward) override;
+  bool SetMaxKernelKeyRollforward(int kernel_max_rollforward) override;
   int GetPowerwashCount() const override;
-  bool SchedulePowerwash() override;
+  bool SchedulePowerwash(bool is_rollback) override;
   bool CancelPowerwash() override;
   bool GetNonVolatileDirectory(base::FilePath* path) const override;
   bool GetPowerwashSafeDirectory(base::FilePath* path) const override;
   int64_t GetBuildTimestamp() const override;
   bool GetFirstActiveOmahaPingSent() const override;
-  void SetFirstActiveOmahaPingSent() override;
+  bool SetFirstActiveOmahaPingSent() override;
 
  private:
   friend class HardwareChromeOSTest;
diff --git a/image_properties_android.cc b/image_properties_android.cc
index 4dc2c02..2d418b3 100644
--- a/image_properties_android.cc
+++ b/image_properties_android.cc
@@ -23,7 +23,7 @@
 #include <android-base/properties.h>
 #include <base/logging.h>
 #include <base/strings/string_util.h>
-#include <bootloader.h>
+#include <bootloader_message/bootloader_message.h>
 #include <brillo/osrelease_reader.h>
 #include <brillo/strings/string_utils.h>
 
@@ -33,7 +33,6 @@
 #include "update_engine/common/prefs_interface.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/system_state.h"
-#include "update_engine/utils_android.h"
 
 using android::base::GetProperty;
 using std::string;
@@ -79,18 +78,23 @@
 
 // Open misc partition for read or write and output the fd in |out_fd|.
 bool OpenMisc(bool write, int* out_fd) {
-  base::FilePath misc_device;
+  string misc_device;
   int flags = write ? O_WRONLY | O_SYNC : O_RDONLY;
   if (root_prefix) {
     // Use a file for unittest and create one if doesn't exist.
-    misc_device = base::FilePath(root_prefix).Append("misc");
+    misc_device = base::FilePath(root_prefix).Append("misc").value();
     if (write)
       flags |= O_CREAT;
-  } else if (!utils::DeviceForMountPoint("/misc", &misc_device)) {
-    return false;
+  } else {
+    string err;
+    misc_device = get_bootloader_message_blk_device(&err);
+    if (misc_device.empty()) {
+      LOG(ERROR) << "Unable to get misc block device: " << err;
+      return false;
+    }
   }
 
-  int fd = HANDLE_EINTR(open(misc_device.value().c_str(), flags, 0600));
+  int fd = HANDLE_EINTR(open(misc_device.c_str(), flags, 0600));
   if (fd < 0) {
     PLOG(ERROR) << "Opening misc failed";
     return false;
diff --git a/image_properties_chromeos.cc b/image_properties_chromeos.cc
index 87c32f0..5ab8f05 100644
--- a/image_properties_chromeos.cc
+++ b/image_properties_chromeos.cc
@@ -114,7 +114,8 @@
   result.board = GetStringWithDefault(lsb_release, kLsbReleaseBoardKey, "");
   result.version = GetStringWithDefault(lsb_release, kLsbReleaseVersionKey, "");
   result.omaha_url =
-      GetStringWithDefault(lsb_release, kLsbReleaseAutoUpdateServerKey,
+      GetStringWithDefault(lsb_release,
+                           kLsbReleaseAutoUpdateServerKey,
                            constants::kOmahaDefaultProductionURL);
   // Build fingerprint not used in Chrome OS.
   result.build_fingerprint = "";
diff --git a/init/update-engine.conf b/init/update-engine.conf
index 4c05cf4..d3681db 100644
--- a/init/update-engine.conf
+++ b/init/update-engine.conf
@@ -22,7 +22,10 @@
 # also updating that reference.
 start on starting system-services
 stop on stopping system-services
-respawn
+# The default is 10 failures every 5 seconds, but even if we crash early, it is
+# hard to catch that. So here we set the crash rate as 10 failures every 20
+# seconds which will include the default and more.
+respawn limit 10 20
 
 expect fork
 
diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc
index 87f30ad..ce3475d 100644
--- a/libcurl_http_fetcher.cc
+++ b/libcurl_http_fetcher.cc
@@ -164,29 +164,26 @@
   bool is_direct = (GetCurrentProxy() == kNoProxy);
   LOG(INFO) << "Using proxy: " << (is_direct ? "no" : "yes");
   if (is_direct) {
-    CHECK_EQ(curl_easy_setopt(curl_handle_,
-                              CURLOPT_PROXY,
-                              ""), CURLE_OK);
+    CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_PROXY, ""), CURLE_OK);
   } else {
-    CHECK_EQ(curl_easy_setopt(curl_handle_,
-                              CURLOPT_PROXY,
-                              GetCurrentProxy().c_str()), CURLE_OK);
+    CHECK_EQ(curl_easy_setopt(
+                 curl_handle_, CURLOPT_PROXY, GetCurrentProxy().c_str()),
+             CURLE_OK);
     // Curl seems to require us to set the protocol
     curl_proxytype type;
     if (GetProxyType(GetCurrentProxy(), &type)) {
-      CHECK_EQ(curl_easy_setopt(curl_handle_,
-                                CURLOPT_PROXYTYPE,
-                                type), CURLE_OK);
+      CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_PROXYTYPE, type),
+               CURLE_OK);
     }
   }
 
   if (post_data_set_) {
     CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_POST, 1), CURLE_OK);
-    CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_POSTFIELDS,
-                              post_data_.data()),
-             CURLE_OK);
-    CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_POSTFIELDSIZE,
-                              post_data_.size()),
+    CHECK_EQ(
+        curl_easy_setopt(curl_handle_, CURLOPT_POSTFIELDS, post_data_.data()),
+        CURLE_OK);
+    CHECK_EQ(curl_easy_setopt(
+                 curl_handle_, CURLOPT_POSTFIELDSIZE, post_data_.size()),
              CURLE_OK);
   }
 
@@ -225,7 +222,7 @@
     size_t end_offset = 0;
     if (download_length_) {
       end_offset = static_cast<size_t>(resume_offset_) + download_length_ - 1;
-      CHECK_LE((size_t) resume_offset_, end_offset);
+      CHECK_LE((size_t)resume_offset_, end_offset);
     }
 
     // Create a string representation of the desired range.
@@ -238,30 +235,30 @@
   }
 
   CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_WRITEDATA, this), CURLE_OK);
-  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_WRITEFUNCTION,
-                            StaticLibcurlWrite), CURLE_OK);
-  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_URL, url_.c_str()),
-           CURLE_OK);
+  CHECK_EQ(
+      curl_easy_setopt(curl_handle_, CURLOPT_WRITEFUNCTION, StaticLibcurlWrite),
+      CURLE_OK);
+  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_URL, url_.c_str()), CURLE_OK);
 
   // If the connection drops under |low_speed_limit_bps_| (10
   // bytes/sec by default) for |low_speed_time_seconds_| (90 seconds,
   // 180 on non-official builds), reconnect.
-  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_LOW_SPEED_LIMIT,
-                            low_speed_limit_bps_),
+  CHECK_EQ(curl_easy_setopt(
+               curl_handle_, CURLOPT_LOW_SPEED_LIMIT, low_speed_limit_bps_),
            CURLE_OK);
-  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_LOW_SPEED_TIME,
-                            low_speed_time_seconds_),
+  CHECK_EQ(curl_easy_setopt(
+               curl_handle_, CURLOPT_LOW_SPEED_TIME, low_speed_time_seconds_),
            CURLE_OK);
-  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_CONNECTTIMEOUT,
-                            connect_timeout_seconds_),
+  CHECK_EQ(curl_easy_setopt(
+               curl_handle_, CURLOPT_CONNECTTIMEOUT, connect_timeout_seconds_),
            CURLE_OK);
 
   // By default, libcurl doesn't follow redirections. Allow up to
   // |kDownloadMaxRedirects| redirections.
   CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_FOLLOWLOCATION, 1), CURLE_OK);
-  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_MAXREDIRS,
-                            kDownloadMaxRedirects),
-           CURLE_OK);
+  CHECK_EQ(
+      curl_easy_setopt(curl_handle_, CURLOPT_MAXREDIRS, kDownloadMaxRedirects),
+      CURLE_OK);
 
   // Lock down the appropriate curl options for HTTP or HTTPS depending on
   // the url.
@@ -296,9 +293,9 @@
   LOG(INFO) << "Setting up curl options for HTTP";
   CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_PROTOCOLS, CURLPROTO_HTTP),
            CURLE_OK);
-  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_REDIR_PROTOCOLS,
-                            CURLPROTO_HTTP),
-           CURLE_OK);
+  CHECK_EQ(
+      curl_easy_setopt(curl_handle_, CURLOPT_REDIR_PROTOCOLS, CURLPROTO_HTTP),
+      CURLE_OK);
 }
 
 // Security lock-down in official builds: makes sure that peer certificate
@@ -306,25 +303,24 @@
 // restricts protocols to HTTPS, restricts ciphers to HIGH.
 void LibcurlHttpFetcher::SetCurlOptionsForHttps() {
   LOG(INFO) << "Setting up curl options for HTTPS";
-  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_SSL_VERIFYPEER, 1),
-           CURLE_OK);
-  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_SSL_VERIFYHOST, 2),
-           CURLE_OK);
-  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_CAPATH,
-                            constants::kCACertificatesPath),
+  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_SSL_VERIFYPEER, 1), CURLE_OK);
+  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_SSL_VERIFYHOST, 2), CURLE_OK);
+  CHECK_EQ(curl_easy_setopt(
+               curl_handle_, CURLOPT_CAPATH, constants::kCACertificatesPath),
            CURLE_OK);
   CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_PROTOCOLS, CURLPROTO_HTTPS),
            CURLE_OK);
-  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_REDIR_PROTOCOLS,
-                            CURLPROTO_HTTPS),
-           CURLE_OK);
+  CHECK_EQ(
+      curl_easy_setopt(curl_handle_, CURLOPT_REDIR_PROTOCOLS, CURLPROTO_HTTPS),
+      CURLE_OK);
   CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_SSL_CIPHER_LIST, "HIGH:!ADH"),
            CURLE_OK);
   if (server_to_check_ != ServerToCheck::kNone) {
     CHECK_EQ(
         curl_easy_setopt(curl_handle_, CURLOPT_SSL_CTX_DATA, &server_to_check_),
         CURLE_OK);
-    CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_SSL_CTX_FUNCTION,
+    CHECK_EQ(curl_easy_setopt(curl_handle_,
+                              CURLOPT_SSL_CTX_FUNCTION,
                               CertificateChecker::ProcessSSLContext),
              CURLE_OK);
   }
@@ -344,8 +340,8 @@
 void LibcurlHttpFetcher::BeginTransfer(const string& url) {
   CHECK(!transfer_in_progress_);
   url_ = url;
-  auto closure = base::Bind(&LibcurlHttpFetcher::ProxiesResolved,
-                            base::Unretained(this));
+  auto closure =
+      base::Bind(&LibcurlHttpFetcher::ProxiesResolved, base::Unretained(this));
   ResolveProxiesForUrl(url_, closure);
 }
 
@@ -443,8 +439,7 @@
   // update engine performs an update check while the network is not ready
   // (e.g., right after resume). Longer term, we should check if the network
   // is online/offline and return an appropriate error code.
-  if (!sent_byte_ &&
-      http_response_code_ == 0 &&
+  if (!sent_byte_ && http_response_code_ == 0 &&
       no_network_retry_count_ < no_network_max_retries_) {
     no_network_retry_count_++;
     retry_task_id_ = MessageLoop::current()->PostDelayedTask(
@@ -487,8 +482,8 @@
   } else if ((transfer_size_ >= 0) && (bytes_downloaded_ < transfer_size_)) {
     if (!ignore_failure_)
       retry_count_++;
-    LOG(INFO) << "Transfer interrupted after downloading "
-              << bytes_downloaded_ << " of " << transfer_size_ << " bytes. "
+    LOG(INFO) << "Transfer interrupted after downloading " << bytes_downloaded_
+              << " of " << transfer_size_ << " bytes. "
               << transfer_size_ - bytes_downloaded_ << " bytes remaining "
               << "after " << retry_count_ << " attempt(s)";
 
@@ -506,8 +501,8 @@
                    base::Unretained(this)),
         TimeDelta::FromSeconds(retry_seconds_));
   } else {
-    LOG(INFO) << "Transfer completed (" << http_response_code_
-              << "), " << bytes_downloaded_ << " bytes downloaded";
+    LOG(INFO) << "Transfer completed (" << http_response_code_ << "), "
+              << bytes_downloaded_ << " bytes downloaded";
     if (delegate_) {
       bool success = IsHttpResponseSuccess();
       delegate_->TransferComplete(this, success);
@@ -520,7 +515,7 @@
   ignore_failure_ = false;
 }
 
-size_t LibcurlHttpFetcher::LibcurlWrite(void *ptr, size_t size, size_t nmemb) {
+size_t LibcurlHttpFetcher::LibcurlWrite(void* ptr, size_t size, size_t nmemb) {
   // Update HTTP response first.
   GetHttpResponseCode();
   const size_t payload_size = size * nmemb;
@@ -537,17 +532,26 @@
     double transfer_size_double;
     CHECK_EQ(curl_easy_getinfo(curl_handle_,
                                CURLINFO_CONTENT_LENGTH_DOWNLOAD,
-                               &transfer_size_double), CURLE_OK);
+                               &transfer_size_double),
+             CURLE_OK);
     off_t new_transfer_size = static_cast<off_t>(transfer_size_double);
     if (new_transfer_size > 0) {
       transfer_size_ = resume_offset_ + new_transfer_size;
     }
   }
   bytes_downloaded_ += payload_size;
-  in_write_callback_ = true;
-  if (delegate_)
-    delegate_->ReceivedBytes(this, ptr, payload_size);
-  in_write_callback_ = false;
+  if (delegate_) {
+    in_write_callback_ = true;
+    auto should_terminate = !delegate_->ReceivedBytes(this, ptr, payload_size);
+    in_write_callback_ = false;
+    if (should_terminate) {
+      LOG(INFO) << "Requesting libcurl to terminate transfer.";
+      // Returning an amount that differs from the received size signals an
+      // error condition to libcurl, which will cause the transfer to be
+      // aborted.
+      return 0;
+    }
+  }
   return payload_size;
 }
 
@@ -605,8 +609,9 @@
 
   // Ask libcurl for the set of file descriptors we should track on its
   // behalf.
-  CHECK_EQ(curl_multi_fdset(curl_multi_handle_, &fd_read, &fd_write,
-                            &fd_exc, &fd_max), CURLM_OK);
+  CHECK_EQ(curl_multi_fdset(
+               curl_multi_handle_, &fd_read, &fd_write, &fd_exc, &fd_max),
+           CURLM_OK);
 
   // We should iterate through all file descriptors up to libcurl's fd_max or
   // the highest one we're tracking, whichever is larger.
@@ -624,12 +629,12 @@
     // should always be false.
     bool is_exc = FD_ISSET(fd, &fd_exc) != 0;
     bool must_track[2] = {
-      is_exc || (FD_ISSET(fd, &fd_read) != 0),  // track 0 -- read
-      is_exc || (FD_ISSET(fd, &fd_write) != 0)  // track 1 -- write
+        is_exc || (FD_ISSET(fd, &fd_read) != 0),  // track 0 -- read
+        is_exc || (FD_ISSET(fd, &fd_write) != 0)  // track 1 -- write
     };
     MessageLoop::WatchMode watch_modes[2] = {
-      MessageLoop::WatchMode::kWatchRead,
-      MessageLoop::WatchMode::kWatchWrite,
+        MessageLoop::WatchMode::kWatchRead,
+        MessageLoop::WatchMode::kWatchWrite,
     };
 
     for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) {
@@ -712,9 +717,8 @@
   for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) {
     for (const auto& fd_taks_pair : fd_task_maps_[t]) {
       if (!MessageLoop::current()->CancelTask(fd_taks_pair.second)) {
-        LOG(WARNING) << "Error canceling the watch task "
-                     << fd_taks_pair.second << " for "
-                     << (t ? "writing" : "reading") << " the fd "
+        LOG(WARNING) << "Error canceling the watch task " << fd_taks_pair.second
+                     << " for " << (t ? "writing" : "reading") << " the fd "
                      << fd_taks_pair.first;
       }
     }
diff --git a/libcurl_http_fetcher.h b/libcurl_http_fetcher.h
index 61871c9..25a2df3 100644
--- a/libcurl_http_fetcher.h
+++ b/libcurl_http_fetcher.h
@@ -151,11 +151,13 @@
   void SetupMessageLoopSources();
 
   // Callback called by libcurl when new data has arrived on the transfer
-  size_t LibcurlWrite(void *ptr, size_t size, size_t nmemb);
-  static size_t StaticLibcurlWrite(void *ptr, size_t size,
-                                   size_t nmemb, void *stream) {
-    return reinterpret_cast<LibcurlHttpFetcher*>(stream)->
-        LibcurlWrite(ptr, size, nmemb);
+  size_t LibcurlWrite(void* ptr, size_t size, size_t nmemb);
+  static size_t StaticLibcurlWrite(void* ptr,
+                                   size_t size,
+                                   size_t nmemb,
+                                   void* stream) {
+    return reinterpret_cast<LibcurlHttpFetcher*>(stream)->LibcurlWrite(
+        ptr, size, nmemb);
   }
 
   // Cleans up the following if they are non-null:
diff --git a/main.cc b/main.cc
index 0612c54..26f9efb 100644
--- a/main.cc
+++ b/main.cc
@@ -108,9 +108,8 @@
   // we stop caring about the old-style logs.
   if (utils::FileExists(symlink_path.c_str()) &&
       !utils::IsSymlink(symlink_path.c_str())) {
-    base::ReplaceFile(base::FilePath(symlink_path),
-                      base::FilePath(log_path),
-                      nullptr);
+    base::ReplaceFile(
+        base::FilePath(symlink_path), base::FilePath(log_path), nullptr);
   }
   base::DeleteFile(base::FilePath(symlink_path), true);
   if (symlink(log_path.c_str(), symlink_path.c_str()) == -1) {
@@ -160,13 +159,13 @@
 
 int main(int argc, char** argv) {
   DEFINE_bool(logtofile, false, "Write logs to a file in log_dir.");
-  DEFINE_bool(logtostderr, false,
+  DEFINE_bool(logtostderr,
+              false,
               "Write logs to stderr instead of to a file in log_dir.");
-  DEFINE_bool(foreground, false,
-              "Don't daemon()ize; run in foreground.");
+  DEFINE_bool(foreground, false, "Don't daemon()ize; run in foreground.");
 
   chromeos_update_engine::Terminator::Init();
-  brillo::FlagHelper::Init(argc, argv, "Chromium OS Update Engine");
+  brillo::FlagHelper::Init(argc, argv, "A/B Update Engine");
 
   // We have two logging flags "--logtostderr" and "--logtofile"; and the logic
   // to choose the logging destination is:
@@ -179,7 +178,7 @@
   if (!FLAGS_foreground)
     PLOG_IF(FATAL, daemon(0, 0) == 1) << "daemon() failed";
 
-  LOG(INFO) << "Chrome OS Update Engine starting";
+  LOG(INFO) << "A/B Update Engine starting";
 
   // xz-embedded requires to initialize its CRC-32 table once on startup.
   xz_crc32_init();
@@ -194,7 +193,8 @@
   chromeos_update_engine::UpdateEngineDaemon update_engine_daemon;
   int exit_code = update_engine_daemon.Run();
 
-  LOG(INFO) << "Chrome OS Update Engine terminating with exit code "
-            << exit_code;
+  chromeos_update_engine::Subprocess::Get().FlushBufferedLogsAtExit();
+
+  LOG(INFO) << "A/B Update Engine terminating with exit code " << exit_code;
   return exit_code;
 }
diff --git a/metrics_constants.h b/metrics_constants.h
index abec2ad..eabb8fb 100644
--- a/metrics_constants.h
+++ b/metrics_constants.h
@@ -107,14 +107,15 @@
 //
 // This is used in the UpdateEngine.Attempt.ConnectionType histogram.
 enum class ConnectionType {
-  kUnknown,           // Unknown.
-  kEthernet,          // Ethernet.
-  kWifi,              // Wireless.
-  kWimax,             // WiMax.
-  kBluetooth,         // Bluetooth.
-  kCellular,          // Cellular.
-  kTetheredEthernet,  // Tethered (Ethernet).
-  kTetheredWifi,      // Tethered (Wifi).
+  kUnknown = 0,           // Unknown.
+  kEthernet = 1,          // Ethernet.
+  kWifi = 2,              // Wireless.
+  kWimax = 3,             // WiMax.
+  kBluetooth = 4,         // Bluetooth.
+  kCellular = 5,          // Cellular.
+  kTetheredEthernet = 6,  // Tethered (Ethernet).
+  kTetheredWifi = 7,      // Tethered (Wifi).
+  kDisconnected = 8,      // Disconnected.
 
   kNumConstants,
   kUnset = -1
diff --git a/metrics_reporter_android.cc b/metrics_reporter_android.cc
index 3cb356f..9165f0d 100644
--- a/metrics_reporter_android.cc
+++ b/metrics_reporter_android.cc
@@ -41,7 +41,7 @@
 constexpr char kMetricsUpdateEngineAttemptResult[] =
     "ota_update_engine_attempt_result";
 constexpr char kMetricsUpdateEngineAttemptDurationInMinutes[] =
-    "ota_update_engine_attempt_duration_boottime_in_minutes";
+    "ota_update_engine_attempt_fixed_duration_boottime_in_minutes";
 constexpr char kMetricsUpdateEngineAttemptDurationUptimeInMinutes[] =
     "ota_update_engine_attempt_duration_monotonic_in_minutes";
 constexpr char kMetricsUpdateEngineAttemptErrorCode[] =
@@ -51,12 +51,12 @@
 constexpr char kMetricsUpdateEngineAttemptPayloadType[] =
     "ota_update_engine_attempt_payload_type";
 constexpr char kMetricsUpdateEngineAttemptCurrentBytesDownloadedMiB[] =
-    "ota_update_engine_attempt_current_bytes_downloaded_mib";
+    "ota_update_engine_attempt_fixed_current_bytes_downloaded_mib";
 
 constexpr char kMetricsUpdateEngineSuccessfulUpdateAttemptCount[] =
     "ota_update_engine_successful_update_attempt_count";
 constexpr char kMetricsUpdateEngineSuccessfulUpdateTotalDurationInMinutes[] =
-    "ota_update_engine_successful_update_total_duration_in_minutes";
+    "ota_update_engine_successful_update_fixed_total_duration_in_minutes";
 constexpr char kMetricsUpdateEngineSuccessfulUpdatePayloadSizeMiB[] =
     "ota_update_engine_successful_update_payload_size_mib";
 constexpr char kMetricsUpdateEngineSuccessfulUpdatePayloadType[] =
@@ -109,7 +109,7 @@
     metrics::DownloadErrorCode /* payload_download_error_code */,
     metrics::ConnectionType /* connection_type */) {
   LogHistogram(metrics::kMetricsUpdateEngineAttemptCurrentBytesDownloadedMiB,
-               payload_bytes_downloaded);
+               payload_bytes_downloaded / kNumBytesInOneMiB);
 }
 
 void MetricsReporterAndroid::ReportSuccessfulUpdateMetrics(
@@ -120,6 +120,7 @@
     int64_t num_bytes_downloaded[kNumDownloadSources],
     int download_overhead_percentage,
     base::TimeDelta total_duration,
+    base::TimeDelta /* total_duration_uptime */,
     int reboot_count,
     int /* url_switch_count */) {
   LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdateAttemptCount,
diff --git a/metrics_reporter_android.h b/metrics_reporter_android.h
index ee94e43..e320c12 100644
--- a/metrics_reporter_android.h
+++ b/metrics_reporter_android.h
@@ -17,6 +17,8 @@
 #ifndef UPDATE_ENGINE_METRICS_REPORTER_ANDROID_H_
 #define UPDATE_ENGINE_METRICS_REPORTER_ANDROID_H_
 
+#include <string>
+
 #include "update_engine/common/error_code.h"
 #include "update_engine/metrics_constants.h"
 #include "update_engine/metrics_reporter_interface.h"
@@ -33,6 +35,9 @@
 
   void ReportRollbackMetrics(metrics::RollbackResult result) override {}
 
+  void ReportEnterpriseRollbackMetrics(
+      bool success, const std::string& rollback_version) override {}
+
   void ReportDailyMetrics(base::TimeDelta os_age) override {}
 
   void ReportUpdateCheckMetrics(
@@ -67,6 +72,7 @@
       int64_t num_bytes_downloaded[kNumDownloadSources],
       int download_overhead_percentage,
       base::TimeDelta total_duration,
+      base::TimeDelta total_duration_uptime,
       int reboot_count,
       int url_switch_count) override;
 
@@ -79,6 +85,15 @@
 
   void ReportInstallDateProvisioningSource(int source, int max) override {}
 
+  void ReportInternalErrorCode(ErrorCode error_code) override {}
+
+  void ReportKeyVersionMetrics(int kernel_min_version,
+                               int kernel_max_rollforward_version,
+                               bool kernel_max_rollforward_success) override {}
+
+  void ReportEnterpriseUpdateSeenToDownloadDays(
+      bool has_time_restriction_policy, int time_to_update_days) override {}
+
  private:
   DISALLOW_COPY_AND_ASSIGN(MetricsReporterAndroid);
 };
diff --git a/metrics_reporter_interface.h b/metrics_reporter_interface.h
index 2c7ce5b..fce8bfd 100644
--- a/metrics_reporter_interface.h
+++ b/metrics_reporter_interface.h
@@ -18,6 +18,7 @@
 #define UPDATE_ENGINE_METRICS_REPORTER_INTERFACE_H_
 
 #include <memory>
+#include <string>
 
 #include <base/time/time.h>
 
@@ -43,12 +44,20 @@
 
   virtual void Initialize() = 0;
 
-  // Helper function to report metrics related to rollback. The
+  // Helper function to report metrics related to user-initiated rollback. The
   // following metrics are reported:
   //
   //  |kMetricRollbackResult|
   virtual void ReportRollbackMetrics(metrics::RollbackResult result) = 0;
 
+  // Helper function to report metrics related to enterprise (admin-initiated)
+  // rollback:
+  //
+  //  |kMetricEnterpriseRollbackSuccess|
+  //  |kMetricEnterpriseRollbackFailure|
+  virtual void ReportEnterpriseRollbackMetrics(
+      bool success, const std::string& rollback_version) = 0;
+
   // Helper function to report metrics reported once a day. The
   // following metrics are reported:
   //
@@ -64,6 +73,8 @@
   //  |kMetricCheckDownloadErrorCode|
   //  |kMetricCheckTimeSinceLastCheckMinutes|
   //  |kMetricCheckTimeSinceLastCheckUptimeMinutes|
+  //  |kMetricCheckTargetVersion|
+  //  |kMetricCheckRollbackTargetVersion|
   //
   // The |kMetricCheckResult| metric will only be reported if |result|
   // is not |kUnset|.
@@ -78,6 +89,10 @@
   // |kMetricCheckTimeSinceLastCheckUptimeMinutes| metrics are
   // automatically reported and calculated by maintaining persistent
   // and process-local state variables.
+  //
+  // |kMetricCheckTargetVersion| reports the first section of the target version
+  // if it's set, |kMetricCheckRollbackTargetVersion| reports the same, but only
+  // if rollback is also allowed using enterprise policy.
   virtual void ReportUpdateCheckMetrics(
       SystemState* system_state,
       metrics::CheckResult result,
@@ -150,6 +165,7 @@
   //  |kMetricSuccessfulUpdateDownloadSourcesUsed|
   //  |kMetricSuccessfulUpdateDownloadOverheadPercentage|
   //  |kMetricSuccessfulUpdateTotalDurationMinutes|
+  //  |kMetricSuccessfulUpdateTotalDurationUptimeMinutes|
   //  |kMetricSuccessfulUpdateRebootCount|
   //  |kMetricSuccessfulUpdateUrlSwitchCount|
   //
@@ -164,6 +180,7 @@
       int64_t num_bytes_downloaded[kNumDownloadSources],
       int download_overhead_percentage,
       base::TimeDelta total_duration,
+      base::TimeDelta total_duration_uptime,
       int reboot_count,
       int url_switch_count) = 0;
 
@@ -193,6 +210,38 @@
   //
   // |kMetricInstallDateProvisioningSource|
   virtual void ReportInstallDateProvisioningSource(int source, int max) = 0;
+
+  // Helper function to report an internal error code. The following metrics are
+  // reported:
+  //
+  // |kMetricAttemptInternalErrorCode|
+  virtual void ReportInternalErrorCode(ErrorCode error_code) = 0;
+
+  // Helper function to report metrics related to the verified boot key
+  // versions:
+  //
+  //  |kMetricKernelMinVersion|
+  //  |kMetricKernelMaxRollforwardVersion|
+  //  |kMetricKernelMaxRollforwardSetSuccess|
+  virtual void ReportKeyVersionMetrics(int kernel_min_version,
+                                       int kernel_max_rollforward_version,
+                                       bool kernel_max_rollforward_success) = 0;
+
+  // Helper function to report the duration between an update being seen by the
+  // client to the update being applied. Updates are not always immediately
+  // applied when seen, several enterprise policies can affect when an update
+  // would actually be downloaded and applied.
+  //
+  // This metric should only be reported for enterprise enrolled devices.
+  //
+  // The following metrics are reported from this function:
+  //   If |has_time_restriction_policy| is false:
+  //     |kMetricSuccessfulUpdateDurationFromSeenDays|
+  //   If |has_time_restriction_policy| is true:
+  //     |kMetricSuccessfulUpdateDurationFromSeenTimeRestrictedDays|
+  //
+  virtual void ReportEnterpriseUpdateSeenToDownloadDays(
+      bool has_time_restriction_policy, int time_to_update_days) = 0;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/metrics_reporter_omaha.cc b/metrics_reporter_omaha.cc
index 0397b83..14819d8 100644
--- a/metrics_reporter_omaha.cc
+++ b/metrics_reporter_omaha.cc
@@ -17,9 +17,9 @@
 #include "update_engine/metrics_reporter_omaha.h"
 
 #include <memory>
-#include <string>
 
 #include <base/logging.h>
+#include <base/strings/string_number_conversions.h>
 #include <metrics/metrics_library.h>
 
 #include "update_engine/common/clock_interface.h"
@@ -27,6 +27,7 @@
 #include "update_engine/common/prefs_interface.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/metrics_utils.h"
+#include "update_engine/omaha_request_params.h"
 #include "update_engine/system_state.h"
 
 using std::string;
@@ -43,6 +44,9 @@
     "UpdateEngine.Check.DownloadErrorCode";
 const char kMetricCheckReaction[] = "UpdateEngine.Check.Reaction";
 const char kMetricCheckResult[] = "UpdateEngine.Check.Result";
+const char kMetricCheckTargetVersion[] = "UpdateEngine.Check.TargetVersion";
+const char kMetricCheckRollbackTargetVersion[] =
+    "UpdateEngine.Check.RollbackTargetVersion";
 const char kMetricCheckTimeSinceLastCheckMinutes[] =
     "UpdateEngine.Check.TimeSinceLastCheckMinutes";
 const char kMetricCheckTimeSinceLastCheckUptimeMinutes[] =
@@ -84,6 +88,10 @@
     "UpdateEngine.SuccessfulUpdate.DownloadOverheadPercentage";
 const char kMetricSuccessfulUpdateDownloadSourcesUsed[] =
     "UpdateEngine.SuccessfulUpdate.DownloadSourcesUsed";
+const char kMetricSuccessfulUpdateDurationFromSeenDays[] =
+    "UpdateEngine.SuccessfulUpdate.DurationFromSeenDays.NoTimeRestriction";
+const char kMetricSuccessfulUpdateDurationFromSeenTimeRestrictedDays[] =
+    "UpdateEngine.SuccessfulUpdate.DurationFromSeenDays.TimeRestricted";
 const char kMetricSuccessfulUpdatePayloadType[] =
     "UpdateEngine.SuccessfulUpdate.PayloadType";
 const char kMetricSuccessfulUpdatePayloadSizeMiB[] =
@@ -92,6 +100,8 @@
     "UpdateEngine.SuccessfulUpdate.RebootCount";
 const char kMetricSuccessfulUpdateTotalDurationMinutes[] =
     "UpdateEngine.SuccessfulUpdate.TotalDurationMinutes";
+const char kMetricSuccessfulUpdateTotalDurationUptimeMinutes[] =
+    "UpdateEngine.SuccessfulUpdate.TotalDurationUptimeMinutes";
 const char kMetricSuccessfulUpdateUpdatesAbandonedCount[] =
     "UpdateEngine.SuccessfulUpdate.UpdatesAbandonedCount";
 const char kMetricSuccessfulUpdateUrlSwitchCount[] =
@@ -100,12 +110,25 @@
 // UpdateEngine.Rollback.* metric.
 const char kMetricRollbackResult[] = "UpdateEngine.Rollback.Result";
 
+// UpdateEngine.EnterpriseRollback.* metrics.
+const char kMetricEnterpriseRollbackFailure[] =
+    "UpdateEngine.EnterpriseRollback.Failure";
+const char kMetricEnterpriseRollbackSuccess[] =
+    "UpdateEngine.EnterpriseRollback.Success";
+
 // UpdateEngine.CertificateCheck.* metrics.
 const char kMetricCertificateCheckUpdateCheck[] =
     "UpdateEngine.CertificateCheck.UpdateCheck";
 const char kMetricCertificateCheckDownload[] =
     "UpdateEngine.CertificateCheck.Download";
 
+// UpdateEngine.KernelKey.* metrics.
+const char kMetricKernelMinVersion[] = "UpdateEngine.KernelKey.MinVersion";
+const char kMetricKernelMaxRollforwardVersion[] =
+    "UpdateEngine.KernelKey.MaxRollforwardVersion";
+const char kMetricKernelMaxRollforwardSetSuccess[] =
+    "UpdateEngine.KernelKey.MaxRollforwardSetSuccess";
+
 // UpdateEngine.* metrics.
 const char kMetricFailedUpdateCount[] = "UpdateEngine.FailedUpdateCount";
 const char kMetricInstallDateProvisioningSource[] =
@@ -194,6 +217,25 @@
                             30 * 24 * 60,  // max: 30 days
                             50);           // num_buckets
   }
+
+  // First section of target version specified for the update.
+  if (system_state && system_state->request_params()) {
+    string target_version =
+        system_state->request_params()->target_version_prefix();
+    value = utils::VersionPrefix(target_version);
+    if (value != 0) {
+      metric = metrics::kMetricCheckTargetVersion;
+      LOG(INFO) << "Sending " << value << " for metric " << metric
+                << " (sparse)";
+      metrics_lib_->SendSparseToUMA(metric, value);
+      if (system_state->request_params()->rollback_allowed()) {
+        metric = metrics::kMetricCheckRollbackTargetVersion;
+        LOG(INFO) << "Sending " << value << " for metric " << metric
+                  << " (sparse)";
+        metrics_lib_->SendSparseToUMA(metric, value);
+      }
+    }
+  }
 }
 
 void MetricsReporterOmaha::ReportAbnormallyTerminatedUpdateAttemptMetrics() {
@@ -258,8 +300,6 @@
                           1024,  // max: 1024 MiB = 1 GiB
                           50);   // num_buckets
 
-
-
   metric = metrics::kMetricAttemptResult;
   LOG(INFO) << "Uploading " << static_cast<int>(attempt_result)
             << " for metric " << metric;
@@ -269,12 +309,7 @@
       static_cast<int>(metrics::AttemptResult::kNumConstants));
 
   if (internal_error_code != ErrorCode::kSuccess) {
-    metric = metrics::kMetricAttemptInternalErrorCode;
-    LOG(INFO) << "Uploading " << internal_error_code << " for metric "
-              << metric;
-    metrics_lib_->SendEnumToUMA(metric,
-                                static_cast<int>(internal_error_code),
-                                static_cast<int>(ErrorCode::kUmaReportedMax));
+    ReportInternalErrorCode(internal_error_code);
   }
 
   base::TimeDelta time_since_last;
@@ -363,6 +398,7 @@
     int64_t num_bytes_downloaded[kNumDownloadSources],
     int download_overhead_percentage,
     base::TimeDelta total_duration,
+    base::TimeDelta total_duration_uptime,
     int reboot_count,
     int url_switch_count) {
   string metric = metrics::kMetricSuccessfulUpdatePayloadSizeMiB;
@@ -442,6 +478,15 @@
                           365 * 24 * 60,  // max: 365 days ~= 1 year
                           50);            // num_buckets
 
+  metric = metrics::kMetricSuccessfulUpdateTotalDurationUptimeMinutes;
+  LOG(INFO) << "Uploading " << utils::FormatTimeDelta(total_duration_uptime)
+            << " for metric " << metric;
+  metrics_lib_->SendToUMA(metric,
+                          static_cast<int>(total_duration_uptime.InMinutes()),
+                          0,             // min: 0 min
+                          30 * 24 * 60,  // max: 30 days
+                          50);           // num_buckets
+
   metric = metrics::kMetricSuccessfulUpdateRebootCount;
   LOG(INFO) << "Uploading reboot count of " << reboot_count << " for metric "
             << metric;
@@ -483,6 +528,16 @@
       metric, value, static_cast<int>(metrics::RollbackResult::kNumConstants));
 }
 
+void MetricsReporterOmaha::ReportEnterpriseRollbackMetrics(
+    bool success, const string& rollback_version) {
+  int value = utils::VersionPrefix(rollback_version);
+  string metric = metrics::kMetricEnterpriseRollbackSuccess;
+  if (!success)
+    metric = metrics::kMetricEnterpriseRollbackFailure;
+  LOG(INFO) << "Sending " << value << " for metric " << metric;
+  metrics_lib_->SendSparseToUMA(metric, value);
+}
+
 void MetricsReporterOmaha::ReportCertificateCheckMetrics(
     ServerToCheck server_to_check, CertificateCheckResult result) {
   string metric;
@@ -535,4 +590,48 @@
                               max);
 }
 
+void MetricsReporterOmaha::ReportInternalErrorCode(ErrorCode error_code) {
+  auto metric = metrics::kMetricAttemptInternalErrorCode;
+  LOG(INFO) << "Uploading " << error_code << " for metric " << metric;
+  metrics_lib_->SendEnumToUMA(metric,
+                              static_cast<int>(error_code),
+                              static_cast<int>(ErrorCode::kUmaReportedMax));
+}
+
+void MetricsReporterOmaha::ReportKeyVersionMetrics(
+    int kernel_min_version,
+    int kernel_max_rollforward_version,
+    bool kernel_max_rollforward_success) {
+  int value = kernel_min_version;
+  string metric = metrics::kMetricKernelMinVersion;
+  LOG(INFO) << "Sending " << value << " for metric " << metric;
+  metrics_lib_->SendSparseToUMA(metric, value);
+
+  value = kernel_max_rollforward_version;
+  metric = metrics::kMetricKernelMaxRollforwardVersion;
+  LOG(INFO) << "Sending " << value << " for metric " << metric;
+  metrics_lib_->SendSparseToUMA(metric, value);
+
+  bool bool_value = kernel_max_rollforward_success;
+  metric = metrics::kMetricKernelMaxRollforwardSetSuccess;
+  LOG(INFO) << "Sending " << bool_value << " for metric " << metric
+            << " (bool)";
+  metrics_lib_->SendBoolToUMA(metric, bool_value);
+}
+
+void MetricsReporterOmaha::ReportEnterpriseUpdateSeenToDownloadDays(
+    bool has_time_restriction_policy, int time_to_update_days) {
+  string metric =
+      has_time_restriction_policy
+          ? metrics::kMetricSuccessfulUpdateDurationFromSeenTimeRestrictedDays
+          : metrics::kMetricSuccessfulUpdateDurationFromSeenDays;
+  LOG(INFO) << "Sending " << time_to_update_days << " for metric " << metric;
+
+  metrics_lib_->SendToUMA(metric,
+                          time_to_update_days,
+                          1,       // min: 1 days
+                          6 * 30,  // max: 6 months (approx)
+                          50);     // num_buckets
+}
+
 }  // namespace chromeos_update_engine
diff --git a/metrics_reporter_omaha.h b/metrics_reporter_omaha.h
index c19fe86..5680dec 100644
--- a/metrics_reporter_omaha.h
+++ b/metrics_reporter_omaha.h
@@ -18,6 +18,7 @@
 #define UPDATE_ENGINE_METRICS_REPORTER_OMAHA_H_
 
 #include <memory>
+#include <string>
 
 #include <base/time/time.h>
 #include <metrics/metrics_library.h>
@@ -42,6 +43,8 @@
 extern const char kMetricCheckDownloadErrorCode[];
 extern const char kMetricCheckReaction[];
 extern const char kMetricCheckResult[];
+extern const char kMetricCheckTargetVersion[];
+extern const char kMetricCheckRollbackTargetVersion[];
 extern const char kMetricCheckTimeSinceLastCheckMinutes[];
 extern const char kMetricCheckTimeSinceLastCheckUptimeMinutes[];
 
@@ -66,20 +69,32 @@
 extern const char kMetricSuccessfulUpdateBytesDownloadedMiB[];
 extern const char kMetricSuccessfulUpdateDownloadOverheadPercentage[];
 extern const char kMetricSuccessfulUpdateDownloadSourcesUsed[];
+extern const char kMetricSuccessfulUpdateDurationFromSeenDays[];
+extern const char kMetricSuccessfulUpdateDurationFromSeenTimeRestrictedDays[];
 extern const char kMetricSuccessfulUpdatePayloadType[];
 extern const char kMetricSuccessfulUpdatePayloadSizeMiB[];
 extern const char kMetricSuccessfulUpdateRebootCount[];
 extern const char kMetricSuccessfulUpdateTotalDurationMinutes[];
+extern const char kMetricSuccessfulUpdateTotalDurationUptimeMinutes[];
 extern const char kMetricSuccessfulUpdateUpdatesAbandonedCount[];
 extern const char kMetricSuccessfulUpdateUrlSwitchCount[];
 
 // UpdateEngine.Rollback.* metric.
 extern const char kMetricRollbackResult[];
 
+// UpdateEngine.EnterpriseRollback.* metrics.
+extern const char kMetricEnterpriseRollbackFailure[];
+extern const char kMetricEnterpriseRollbackSuccess[];
+
 // UpdateEngine.CertificateCheck.* metrics.
 extern const char kMetricCertificateCheckUpdateCheck[];
 extern const char kMetricCertificateCheckDownload[];
 
+// UpdateEngine.KernelKey.* metrics.
+extern const char kMetricKernelMinVersion[];
+extern const char kMetricKernelMaxRollforwardVersion[];
+extern const char kMetricKernelMaxRollforwardSetSuccess[];
+
 // UpdateEngine.* metrics.
 extern const char kMetricFailedUpdateCount[];
 extern const char kMetricInstallDateProvisioningSource[];
@@ -97,6 +112,9 @@
 
   void ReportRollbackMetrics(metrics::RollbackResult result) override;
 
+  void ReportEnterpriseRollbackMetrics(
+      bool success, const std::string& rollback_version) override;
+
   void ReportDailyMetrics(base::TimeDelta os_age) override;
 
   void ReportUpdateCheckMetrics(
@@ -131,6 +149,7 @@
       int64_t num_bytes_downloaded[kNumDownloadSources],
       int download_overhead_percentage,
       base::TimeDelta total_duration,
+      base::TimeDelta total_duration_uptime,
       int reboot_count,
       int url_switch_count) override;
 
@@ -143,6 +162,15 @@
 
   void ReportInstallDateProvisioningSource(int source, int max) override;
 
+  void ReportInternalErrorCode(ErrorCode error_code) override;
+
+  void ReportKeyVersionMetrics(int kernel_min_version,
+                               int kernel_max_rollforward_version,
+                               bool kernel_max_rollforward_success) override;
+
+  void ReportEnterpriseUpdateSeenToDownloadDays(
+      bool has_time_restriction_policy, int time_to_update_days) override;
+
  private:
   friend class MetricsReporterOmahaTest;
 
diff --git a/metrics_reporter_omaha_unittest.cc b/metrics_reporter_omaha_unittest.cc
index 76e33c6..545d02f 100644
--- a/metrics_reporter_omaha_unittest.cc
+++ b/metrics_reporter_omaha_unittest.cc
@@ -29,8 +29,9 @@
 #include "update_engine/fake_system_state.h"
 
 using base::TimeDelta;
-using testing::AnyNumber;
 using testing::_;
+using testing::AnyNumber;
+using testing::Return;
 
 namespace chromeos_update_engine {
 class MetricsReporterOmahaTest : public ::testing::Test {
@@ -85,6 +86,14 @@
                               static_cast<int>(error_code)))
       .Times(2);
 
+  // Not pinned nor rollback
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricCheckTargetVersion, _))
+      .Times(0);
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricCheckRollbackTargetVersion, _))
+      .Times(0);
+
   EXPECT_CALL(
       *mock_metrics_lib_,
       SendToUMA(metrics::kMetricCheckTimeSinceLastCheckMinutes, 1, _, _, _))
@@ -101,6 +110,62 @@
   // Advance the clock by 1 minute and report the same metrics again.
   fake_clock.SetWallclockTime(base::Time::FromInternalValue(61000000));
   fake_clock.SetMonotonicTime(base::Time::FromInternalValue(61000000));
+  // Allow rollback
+  reporter_.ReportUpdateCheckMetrics(
+      &fake_system_state, result, reaction, error_code);
+}
+
+TEST_F(MetricsReporterOmahaTest, ReportUpdateCheckMetricsPinned) {
+  FakeSystemState fake_system_state;
+
+  OmahaRequestParams params(&fake_system_state);
+  params.set_target_version_prefix("10575.");
+  params.set_rollback_allowed(false);
+  fake_system_state.set_request_params(&params);
+
+  metrics::CheckResult result = metrics::CheckResult::kUpdateAvailable;
+  metrics::CheckReaction reaction = metrics::CheckReaction::kIgnored;
+  metrics::DownloadErrorCode error_code =
+      metrics::DownloadErrorCode::kHttpStatus200;
+
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricCheckDownloadErrorCode, _));
+  // Target version set, but not a rollback.
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricCheckTargetVersion, 10575))
+      .Times(1);
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricCheckRollbackTargetVersion, _))
+      .Times(0);
+
+  reporter_.ReportUpdateCheckMetrics(
+      &fake_system_state, result, reaction, error_code);
+}
+
+TEST_F(MetricsReporterOmahaTest, ReportUpdateCheckMetricsRollback) {
+  FakeSystemState fake_system_state;
+
+  OmahaRequestParams params(&fake_system_state);
+  params.set_target_version_prefix("10575.");
+  params.set_rollback_allowed(true);
+  fake_system_state.set_request_params(&params);
+
+  metrics::CheckResult result = metrics::CheckResult::kUpdateAvailable;
+  metrics::CheckReaction reaction = metrics::CheckReaction::kIgnored;
+  metrics::DownloadErrorCode error_code =
+      metrics::DownloadErrorCode::kHttpStatus200;
+
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricCheckDownloadErrorCode, _));
+  // Rollback.
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricCheckTargetVersion, 10575))
+      .Times(1);
+  EXPECT_CALL(
+      *mock_metrics_lib_,
+      SendSparseToUMA(metrics::kMetricCheckRollbackTargetVersion, 10575))
+      .Times(1);
+
   reporter_.ReportUpdateCheckMetrics(
       &fake_system_state, result, reaction, error_code);
 }
@@ -161,7 +226,6 @@
                         _))
       .Times(2);
 
-
   // Check the report of attempt result.
   EXPECT_CALL(
       *mock_metrics_lib_,
@@ -258,6 +322,7 @@
   num_bytes_downloaded[0] = 200 * kNumBytesInOneMiB;
   int download_overhead_percentage = 20;
   TimeDelta total_duration = TimeDelta::FromMinutes(30);
+  TimeDelta total_duration_uptime = TimeDelta::FromMinutes(20);
   int reboot_count = 2;
   int url_switch_count = 2;
 
@@ -306,6 +371,14 @@
       .Times(1);
   EXPECT_CALL(
       *mock_metrics_lib_,
+      SendToUMA(metrics::kMetricSuccessfulUpdateTotalDurationUptimeMinutes,
+                20,
+                _,
+                _,
+                _))
+      .Times(1);
+  EXPECT_CALL(
+      *mock_metrics_lib_,
       SendToUMA(
           metrics::kMetricSuccessfulUpdateRebootCount, reboot_count, _, _, _))
       .Times(1);
@@ -333,6 +406,7 @@
                                           num_bytes_downloaded,
                                           download_overhead_percentage,
                                           total_duration,
+                                          total_duration_uptime,
                                           reboot_count,
                                           url_switch_count);
 }
@@ -347,6 +421,18 @@
   reporter_.ReportRollbackMetrics(result);
 }
 
+TEST_F(MetricsReporterOmahaTest, ReportEnterpriseRollbackMetrics) {
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricEnterpriseRollbackSuccess, 10575))
+      .Times(1);
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricEnterpriseRollbackFailure, 10323))
+      .Times(1);
+
+  reporter_.ReportEnterpriseRollbackMetrics(/*success=*/true, "10575.39.2");
+  reporter_.ReportEnterpriseRollbackMetrics(/*success=*/false, "10323.67.7");
+}
+
 TEST_F(MetricsReporterOmahaTest, ReportCertificateCheckMetrics) {
   ServerToCheck server_to_check = ServerToCheck::kUpdate;
   CertificateCheckResult result = CertificateCheckResult::kValid;
@@ -391,4 +477,65 @@
   reporter_.ReportInstallDateProvisioningSource(source, max);
 }
 
+TEST_F(MetricsReporterOmahaTest, ReportKeyVersionMetrics) {
+  int kernel_min_version = 0x00040002;
+  int kernel_max_rollforward_version = 0xfffffffe;
+  bool kernel_max_rollforward_success = true;
+  EXPECT_CALL(
+      *mock_metrics_lib_,
+      SendSparseToUMA(metrics::kMetricKernelMinVersion, kernel_min_version))
+      .Times(1);
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricKernelMaxRollforwardVersion,
+                              kernel_max_rollforward_version))
+      .Times(1);
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendBoolToUMA(metrics::kMetricKernelMaxRollforwardSetSuccess,
+                            kernel_max_rollforward_success))
+      .Times(1);
+
+  reporter_.ReportKeyVersionMetrics(kernel_min_version,
+                                    kernel_max_rollforward_version,
+                                    kernel_max_rollforward_success);
+}
+
+TEST_F(MetricsReporterOmahaTest, ReportEnterpriseUpdateSeenToDownloadDays) {
+  constexpr int kDaysToUpdate = 10;
+  constexpr int kMinBucket = 1;
+  constexpr int kMaxBucket = 6 * 30;  // approximately 6 months
+  constexpr int kNumBuckets = 50;
+
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendToUMA(metrics::kMetricSuccessfulUpdateDurationFromSeenDays,
+                        kDaysToUpdate,
+                        kMinBucket,
+                        kMaxBucket,
+                        kNumBuckets))
+      .Times(1);
+
+  reporter_.ReportEnterpriseUpdateSeenToDownloadDays(
+      false /* has_time_restriction_policy */, kDaysToUpdate);
+}
+
+TEST_F(MetricsReporterOmahaTest,
+       ReportEnterpriseTimeRestrictedUpdateSeenToDownloadTime) {
+  const int kDaysToUpdate = 15;
+  constexpr int kMinBucket = 1;
+  constexpr int kMaxBucket = 6 * 30;  // approximately 6 months
+  constexpr int kNumBuckets = 50;
+
+  EXPECT_CALL(
+      *mock_metrics_lib_,
+      SendToUMA(
+          metrics::kMetricSuccessfulUpdateDurationFromSeenTimeRestrictedDays,
+          kDaysToUpdate,
+          kMinBucket,
+          kMaxBucket,
+          kNumBuckets))
+      .Times(1);
+
+  reporter_.ReportEnterpriseUpdateSeenToDownloadDays(
+      true /* has_time_restriction_policy */, kDaysToUpdate);
+}
+
 }  // namespace chromeos_update_engine
diff --git a/metrics_reporter_stub.h b/metrics_reporter_stub.h
index d0f75ab..25660b5 100644
--- a/metrics_reporter_stub.h
+++ b/metrics_reporter_stub.h
@@ -17,6 +17,8 @@
 #ifndef UPDATE_ENGINE_METRICS_REPORTER_STUB_H_
 #define UPDATE_ENGINE_METRICS_REPORTER_STUB_H_
 
+#include <string>
+
 #include "update_engine/common/error_code.h"
 #include "update_engine/metrics_constants.h"
 #include "update_engine/metrics_reporter_interface.h"
@@ -33,6 +35,9 @@
 
   void ReportRollbackMetrics(metrics::RollbackResult result) override {}
 
+  void ReportEnterpriseRollbackMetrics(
+      bool success, const std::string& rollback_version) override {}
+
   void ReportDailyMetrics(base::TimeDelta os_age) override {}
 
   void ReportUpdateCheckMetrics(
@@ -67,6 +72,7 @@
       int64_t num_bytes_downloaded[kNumDownloadSources],
       int download_overhead_percentage,
       base::TimeDelta total_duration,
+      base::TimeDelta total_duration_uptime,
       int reboot_count,
       int url_switch_count) override {}
 
@@ -79,6 +85,15 @@
 
   void ReportInstallDateProvisioningSource(int source, int max) override {}
 
+  void ReportInternalErrorCode(ErrorCode error_code) override {}
+
+  void ReportKeyVersionMetrics(int kernel_min_version,
+                               int kernel_max_rollforward_version,
+                               bool kernel_max_rollforward_success) override {}
+
+  void ReportEnterpriseUpdateSeenToDownloadDays(
+      bool has_time_restriction_policy, int time_to_update_days) override {}
+
  private:
   DISALLOW_COPY_AND_ASSIGN(MetricsReporterStub);
 };
diff --git a/metrics_utils.cc b/metrics_utils.cc
index 46530f0..070626a 100644
--- a/metrics_utils.cc
+++ b/metrics_utils.cc
@@ -68,6 +68,7 @@
     case ErrorCode::kDownloadWriteError:
     case ErrorCode::kFilesystemCopierError:
     case ErrorCode::kFilesystemVerifierError:
+    case ErrorCode::kVerityCalculationError:
       return metrics::AttemptResult::kOperationExecutionError;
 
     case ErrorCode::kDownloadMetadataSignatureMismatch:
@@ -83,6 +84,7 @@
 
     case ErrorCode::kNewRootfsVerificationError:
     case ErrorCode::kNewKernelVerificationError:
+    case ErrorCode::kRollbackNotPossible:
       return metrics::AttemptResult::kVerificationFailed;
 
     case ErrorCode::kPostinstallRunnerError:
@@ -114,6 +116,9 @@
     case ErrorCode::kPostinstallPowerwashError:
     case ErrorCode::kUpdateCanceledByChannelChange:
     case ErrorCode::kOmahaRequestXMLHasEntityDecl:
+    case ErrorCode::kOmahaUpdateIgnoredOverCellular:
+    case ErrorCode::kNoUpdate:
+    case ErrorCode::kFirstActiveOmahaPingSentPersistenceError:
       return metrics::AttemptResult::kInternalError;
 
     // Special flags. These can't happen (we mask them out above) but
@@ -214,8 +219,13 @@
     case ErrorCode::kOmahaRequestXMLHasEntityDecl:
     case ErrorCode::kFilesystemVerifierError:
     case ErrorCode::kUserCanceled:
+    case ErrorCode::kOmahaUpdateIgnoredOverCellular:
     case ErrorCode::kPayloadTimestampError:
     case ErrorCode::kUpdatedButNotActive:
+    case ErrorCode::kNoUpdate:
+    case ErrorCode::kRollbackNotPossible:
+    case ErrorCode::kFirstActiveOmahaPingSentPersistenceError:
+    case ErrorCode::kVerityCalculationError:
       break;
 
     // Special flags. These can't happen (we mask them out above) but
@@ -241,6 +251,9 @@
     case ConnectionType::kUnknown:
       return metrics::ConnectionType::kUnknown;
 
+    case ConnectionType::kDisconnected:
+      return metrics::ConnectionType::kDisconnected;
+
     case ConnectionType::kEthernet:
       if (tethering == ConnectionTethering::kConfirmed)
         return metrics::ConnectionType::kTetheredEthernet;
@@ -358,10 +371,19 @@
   CHECK(prefs);
   prefs->SetInt64(kPrefsUpdateTimestampStart,
                   update_start_time.ToInternalValue());
-  LOG(INFO) << "Update Timestamp Start = "
+  LOG(INFO) << "Update Monotonic Timestamp Start = "
             << utils::ToString(update_start_time);
 }
 
+void SetUpdateBootTimestampStart(const base::Time& update_start_boot_time,
+                                 PrefsInterface* prefs) {
+  CHECK(prefs);
+  prefs->SetInt64(kPrefsUpdateBootTimestampStart,
+                  update_start_boot_time.ToInternalValue());
+  LOG(INFO) << "Update Boot Timestamp Start = "
+            << utils::ToString(update_start_boot_time);
+}
+
 bool LoadAndReportTimeToReboot(MetricsReporterInterface* metrics_reporter,
                                PrefsInterface* prefs,
                                ClockInterface* clock) {
diff --git a/metrics_utils.h b/metrics_utils.h
index d08cc4a..8f1aad1 100644
--- a/metrics_utils.h
+++ b/metrics_utils.h
@@ -87,10 +87,16 @@
 // Persists the finished time of an update to the |kPrefsSystemUpdatedMarker|.
 void SetSystemUpdatedMarker(ClockInterface* clock, PrefsInterface* prefs);
 
-// Persists the start time of an update to |kPrefsUpdateTimestampStart|.
+// Persists the start monotonic time of an update to
+// |kPrefsUpdateTimestampStart|.
 void SetUpdateTimestampStart(const base::Time& update_start_time,
                              PrefsInterface* prefs);
 
+// Persists the start boot time of an update to
+// |kPrefsUpdateBootTimestampStart|.
+void SetUpdateBootTimestampStart(const base::Time& update_start_boot_time,
+                                 PrefsInterface* prefs);
+
 // Called at program startup if the device booted into a new update.
 // The |time_to_reboot| parameter contains the (monotonic-clock) duration
 // from when the update successfully completed (the value in
diff --git a/metrics_utils_unittest.cc b/metrics_utils_unittest.cc
index edf6bc3..e7c4c26 100644
--- a/metrics_utils_unittest.cc
+++ b/metrics_utils_unittest.cc
@@ -32,15 +32,18 @@
   EXPECT_EQ(metrics::ConnectionType::kUnknown,
             GetConnectionType(ConnectionType::kUnknown,
                               ConnectionTethering::kUnknown));
+  EXPECT_EQ(metrics::ConnectionType::kDisconnected,
+            GetConnectionType(ConnectionType::kDisconnected,
+                              ConnectionTethering::kUnknown));
   EXPECT_EQ(metrics::ConnectionType::kEthernet,
             GetConnectionType(ConnectionType::kEthernet,
                               ConnectionTethering::kUnknown));
-  EXPECT_EQ(metrics::ConnectionType::kWifi,
-            GetConnectionType(ConnectionType::kWifi,
-                              ConnectionTethering::kUnknown));
-  EXPECT_EQ(metrics::ConnectionType::kWimax,
-            GetConnectionType(ConnectionType::kWimax,
-                              ConnectionTethering::kUnknown));
+  EXPECT_EQ(
+      metrics::ConnectionType::kWifi,
+      GetConnectionType(ConnectionType::kWifi, ConnectionTethering::kUnknown));
+  EXPECT_EQ(
+      metrics::ConnectionType::kWimax,
+      GetConnectionType(ConnectionType::kWimax, ConnectionTethering::kUnknown));
   EXPECT_EQ(metrics::ConnectionType::kBluetooth,
             GetConnectionType(ConnectionType::kBluetooth,
                               ConnectionTethering::kUnknown));
@@ -72,9 +75,9 @@
   EXPECT_EQ(metrics::ConnectionType::kWifi,
             GetConnectionType(ConnectionType::kWifi,
                               ConnectionTethering::kSuspected));
-  EXPECT_EQ(metrics::ConnectionType::kWifi,
-            GetConnectionType(ConnectionType::kWifi,
-                              ConnectionTethering::kUnknown));
+  EXPECT_EQ(
+      metrics::ConnectionType::kWifi,
+      GetConnectionType(ConnectionType::kWifi, ConnectionTethering::kUnknown));
 }
 
 TEST(MetricsUtilsTest, WallclockDurationHelper) {
@@ -91,61 +94,51 @@
   fake_clock.SetWallclockTime(base::Time::FromInternalValue(1000000));
 
   // First time called so no previous measurement available.
-  EXPECT_FALSE(metrics_utils::WallclockDurationHelper(&fake_system_state,
-                                                      state_variable_key,
-                                                      &duration));
+  EXPECT_FALSE(metrics_utils::WallclockDurationHelper(
+      &fake_system_state, state_variable_key, &duration));
 
   // Next time, we should get zero since the clock didn't advance.
-  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(&fake_system_state,
-                                                     state_variable_key,
-                                                     &duration));
+  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(
+      &fake_system_state, state_variable_key, &duration));
   EXPECT_EQ(duration.InSeconds(), 0);
 
   // We can also call it as many times as we want with it being
   // considered a failure.
-  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(&fake_system_state,
-                                                     state_variable_key,
-                                                     &duration));
+  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(
+      &fake_system_state, state_variable_key, &duration));
   EXPECT_EQ(duration.InSeconds(), 0);
-  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(&fake_system_state,
-                                                     state_variable_key,
-                                                     &duration));
+  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(
+      &fake_system_state, state_variable_key, &duration));
   EXPECT_EQ(duration.InSeconds(), 0);
 
   // Advance the clock one second, then we should get 1 sec on the
   // next call and 0 sec on the subsequent call.
   fake_clock.SetWallclockTime(base::Time::FromInternalValue(2000000));
-  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(&fake_system_state,
-                                                     state_variable_key,
-                                                     &duration));
+  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(
+      &fake_system_state, state_variable_key, &duration));
   EXPECT_EQ(duration.InSeconds(), 1);
-  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(&fake_system_state,
-                                                     state_variable_key,
-                                                     &duration));
+  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(
+      &fake_system_state, state_variable_key, &duration));
   EXPECT_EQ(duration.InSeconds(), 0);
 
   // Advance clock two seconds and we should get 2 sec and then 0 sec.
   fake_clock.SetWallclockTime(base::Time::FromInternalValue(4000000));
-  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(&fake_system_state,
-                                                     state_variable_key,
-                                                     &duration));
+  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(
+      &fake_system_state, state_variable_key, &duration));
   EXPECT_EQ(duration.InSeconds(), 2);
-  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(&fake_system_state,
-                                                     state_variable_key,
-                                                     &duration));
+  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(
+      &fake_system_state, state_variable_key, &duration));
   EXPECT_EQ(duration.InSeconds(), 0);
 
   // There's a possibility that the wallclock can go backwards (NTP
   // adjustments, for example) so check that we properly handle this
   // case.
   fake_clock.SetWallclockTime(base::Time::FromInternalValue(3000000));
-  EXPECT_FALSE(metrics_utils::WallclockDurationHelper(&fake_system_state,
-                                                      state_variable_key,
-                                                      &duration));
+  EXPECT_FALSE(metrics_utils::WallclockDurationHelper(
+      &fake_system_state, state_variable_key, &duration));
   fake_clock.SetWallclockTime(base::Time::FromInternalValue(4000000));
-  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(&fake_system_state,
-                                                     state_variable_key,
-                                                     &duration));
+  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(
+      &fake_system_state, state_variable_key, &duration));
   EXPECT_EQ(duration.InSeconds(), 1);
 }
 
@@ -161,48 +154,40 @@
   fake_clock.SetMonotonicTime(base::Time::FromInternalValue(1000000));
 
   // First time called so no previous measurement available.
-  EXPECT_FALSE(metrics_utils::MonotonicDurationHelper(&fake_system_state,
-                                                      &storage,
-                                                      &duration));
+  EXPECT_FALSE(metrics_utils::MonotonicDurationHelper(
+      &fake_system_state, &storage, &duration));
 
   // Next time, we should get zero since the clock didn't advance.
-  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(&fake_system_state,
-                                                     &storage,
-                                                     &duration));
+  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(
+      &fake_system_state, &storage, &duration));
   EXPECT_EQ(duration.InSeconds(), 0);
 
   // We can also call it as many times as we want with it being
   // considered a failure.
-  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(&fake_system_state,
-                                                     &storage,
-                                                     &duration));
+  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(
+      &fake_system_state, &storage, &duration));
   EXPECT_EQ(duration.InSeconds(), 0);
-  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(&fake_system_state,
-                                                     &storage,
-                                                     &duration));
+  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(
+      &fake_system_state, &storage, &duration));
   EXPECT_EQ(duration.InSeconds(), 0);
 
   // Advance the clock one second, then we should get 1 sec on the
   // next call and 0 sec on the subsequent call.
   fake_clock.SetMonotonicTime(base::Time::FromInternalValue(2000000));
-  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(&fake_system_state,
-                                                     &storage,
-                                                     &duration));
+  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(
+      &fake_system_state, &storage, &duration));
   EXPECT_EQ(duration.InSeconds(), 1);
-  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(&fake_system_state,
-                                                     &storage,
-                                                     &duration));
+  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(
+      &fake_system_state, &storage, &duration));
   EXPECT_EQ(duration.InSeconds(), 0);
 
   // Advance clock two seconds and we should get 2 sec and then 0 sec.
   fake_clock.SetMonotonicTime(base::Time::FromInternalValue(4000000));
-  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(&fake_system_state,
-                                                     &storage,
-                                                     &duration));
+  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(
+      &fake_system_state, &storage, &duration));
   EXPECT_EQ(duration.InSeconds(), 2);
-  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(&fake_system_state,
-                                                     &storage,
-                                                     &duration));
+  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(
+      &fake_system_state, &storage, &duration));
   EXPECT_EQ(duration.InSeconds(), 0);
 }
 
diff --git a/mock_boot_control_hal.h b/mock_boot_control_hal.h
new file mode 100644
index 0000000..4e9cb50
--- /dev/null
+++ b/mock_boot_control_hal.h
@@ -0,0 +1,49 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <android/hardware/boot/1.0/IBootControl.h>
+#include <stdint.h>
+
+#include <gmock/gmock.h>
+
+namespace chromeos_update_engine {
+
+class MockBootControlHal
+    : public ::android::hardware::boot::V1_0::IBootControl {
+ public:
+  MOCK_METHOD0(getNumberSlots, ::android::hardware::Return<uint32_t>());
+  MOCK_METHOD0(getCurrentSlot, ::android::hardware::Return<uint32_t>());
+  MOCK_METHOD1(markBootSuccessful,
+               ::android::hardware::Return<void>(markBootSuccessful_cb));
+  MOCK_METHOD2(setActiveBootSlot,
+               ::android::hardware::Return<void>(uint32_t,
+                                                 setActiveBootSlot_cb));
+  MOCK_METHOD2(setSlotAsUnbootable,
+               ::android::hardware::Return<void>(uint32_t,
+                                                 setSlotAsUnbootable_cb));
+  MOCK_METHOD1(
+      isSlotBootable,
+      ::android::hardware::Return<::android::hardware::boot::V1_0::BoolResult>(
+          uint32_t));
+  MOCK_METHOD1(
+      isSlotMarkedSuccessful,
+      ::android::hardware::Return<::android::hardware::boot::V1_0::BoolResult>(
+          uint32_t));
+  MOCK_METHOD2(getSuffix,
+               ::android::hardware::Return<void>(uint32_t, getSuffix_cb));
+};
+
+}  // namespace chromeos_update_engine
diff --git a/mock_connection_manager.h b/mock_connection_manager.h
index e37460b..2fff68c 100644
--- a/mock_connection_manager.h
+++ b/mock_connection_manager.h
@@ -36,6 +36,7 @@
 
   MOCK_CONST_METHOD2(IsUpdateAllowedOver,
                      bool(ConnectionType type, ConnectionTethering tethering));
+  MOCK_CONST_METHOD0(IsAllowedConnectionTypesForUpdateSet, bool());
 };
 
 }  // namespace chromeos_update_engine
diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h
new file mode 100644
index 0000000..24aca06
--- /dev/null
+++ b/mock_dynamic_partition_control.h
@@ -0,0 +1,53 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include <gmock/gmock.h>
+
+#include "update_engine/dynamic_partition_control_interface.h"
+
+namespace chromeos_update_engine {
+
+class MockDynamicPartitionControl : public DynamicPartitionControlInterface {
+ public:
+  MOCK_METHOD5(MapPartitionOnDeviceMapper,
+               bool(const std::string&,
+                    const std::string&,
+                    uint32_t,
+                    bool,
+                    std::string*));
+  MOCK_METHOD2(UnmapPartitionOnDeviceMapper, bool(const std::string&, bool));
+  MOCK_METHOD0(Cleanup, void());
+  MOCK_METHOD1(DeviceExists, bool(const std::string&));
+  MOCK_METHOD1(GetState, ::android::dm::DmDeviceState(const std::string&));
+  MOCK_METHOD2(GetDmDevicePathByName, bool(const std::string&, std::string*));
+  MOCK_METHOD3(LoadMetadataBuilder,
+               std::unique_ptr<::android::fs_mgr::MetadataBuilder>(
+                   const std::string&, uint32_t, uint32_t));
+  MOCK_METHOD3(StoreMetadata,
+               bool(const std::string&,
+                    android::fs_mgr::MetadataBuilder*,
+                    uint32_t));
+  MOCK_METHOD1(GetDeviceDir, bool(std::string*));
+  MOCK_METHOD0(IsDynamicPartitionsEnabled, bool());
+  MOCK_METHOD0(IsDynamicPartitionsRetrofit, bool());
+};
+
+}  // namespace chromeos_update_engine
diff --git a/mock_metrics_reporter.h b/mock_metrics_reporter.h
index a0f164b..baf3a78 100644
--- a/mock_metrics_reporter.h
+++ b/mock_metrics_reporter.h
@@ -14,8 +14,10 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_MOCK_METRICS_REPORTER_H
-#define UPDATE_ENGINE_MOCK_METRICS_REPORTER_H
+#ifndef UPDATE_ENGINE_MOCK_METRICS_REPORTER_H_
+#define UPDATE_ENGINE_MOCK_METRICS_REPORTER_H_
+
+#include <string>
 
 #include <gmock/gmock.h>
 
@@ -29,6 +31,9 @@
 
   MOCK_METHOD1(ReportRollbackMetrics, void(metrics::RollbackResult result));
 
+  MOCK_METHOD2(ReportEnterpriseRollbackMetrics,
+               void(bool success, const std::string& rollback_version));
+
   MOCK_METHOD1(ReportDailyMetrics, void(base::TimeDelta os_age));
 
   MOCK_METHOD4(ReportUpdateCheckMetrics,
@@ -56,16 +61,17 @@
 
   MOCK_METHOD0(ReportAbnormallyTerminatedUpdateAttemptMetrics, void());
 
-  MOCK_METHOD9(ReportSuccessfulUpdateMetrics,
-               void(int attempt_count,
-                    int updates_abandoned_count,
-                    PayloadType payload_type,
-                    int64_t payload_size,
-                    int64_t num_bytes_downloaded[kNumDownloadSources],
-                    int download_overhead_percentage,
-                    base::TimeDelta total_duration,
-                    int reboot_count,
-                    int url_switch_count));
+  MOCK_METHOD10(ReportSuccessfulUpdateMetrics,
+                void(int attempt_count,
+                     int updates_abandoned_count,
+                     PayloadType payload_type,
+                     int64_t payload_size,
+                     int64_t num_bytes_downloaded[kNumDownloadSources],
+                     int download_overhead_percentage,
+                     base::TimeDelta total_duration,
+                     base::TimeDelta total_duration_uptime,
+                     int reboot_count,
+                     int url_switch_count));
 
   MOCK_METHOD2(ReportCertificateCheckMetrics,
                void(ServerToCheck server_to_check,
@@ -76,8 +82,18 @@
   MOCK_METHOD1(ReportTimeToReboot, void(int time_to_reboot_minutes));
 
   MOCK_METHOD2(ReportInstallDateProvisioningSource, void(int source, int max));
+
+  MOCK_METHOD1(ReportInternalErrorCode, void(ErrorCode error_code));
+
+  MOCK_METHOD3(ReportKeyVersionMetrics,
+               void(int kernel_min_version,
+                    int kernel_max_rollforward_version,
+                    bool kernel_max_rollforward_success));
+
+  MOCK_METHOD2(ReportEnterpriseUpdateSeenToDownloadDays,
+               void(bool has_time_restriction_policy, int time_to_update_days));
 };
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_MOCK_METRICS_REPORTER_H
+#endif  // UPDATE_ENGINE_MOCK_METRICS_REPORTER_H_
diff --git a/mock_omaha_request_params.h b/mock_omaha_request_params.h
index 6d8d3d8..41bdc19 100644
--- a/mock_omaha_request_params.h
+++ b/mock_omaha_request_params.h
@@ -33,8 +33,8 @@
     // migration from tests using the real RequestParams when they should have
     // use a fake or mock.
     ON_CALL(*this, GetAppId())
-        .WillByDefault(testing::Invoke(
-            this, &MockOmahaRequestParams::FakeGetAppId));
+        .WillByDefault(
+            testing::Invoke(this, &MockOmahaRequestParams::FakeGetAppId));
     ON_CALL(*this, SetTargetChannel(testing::_, testing::_, testing::_))
         .WillByDefault(testing::Invoke(
             this, &MockOmahaRequestParams::FakeSetTargetChannel));
@@ -47,9 +47,11 @@
   }
 
   MOCK_CONST_METHOD0(GetAppId, std::string(void));
-  MOCK_METHOD3(SetTargetChannel, bool(const std::string& channel,
-                                      bool is_powerwash_allowed,
-                                      std::string* error));
+  MOCK_METHOD3(SetTargetChannel,
+               bool(const std::string& channel,
+                    bool is_powerwash_allowed,
+                    std::string* error));
+  MOCK_CONST_METHOD0(target_version_prefix, std::string(void));
   MOCK_METHOD0(UpdateDownloadChannel, void(void));
   MOCK_CONST_METHOD0(IsUpdateUrlOfficial, bool(void));
   MOCK_CONST_METHOD0(ShouldPowerwash, bool(void));
@@ -57,16 +59,13 @@
  private:
   // Wrappers to call the parent class and behave like the real object by
   // default. See "Delegating Calls to a Parent Class" in gmock's documentation.
-  std::string FakeGetAppId() const {
-    return OmahaRequestParams::GetAppId();
-  }
+  std::string FakeGetAppId() const { return OmahaRequestParams::GetAppId(); }
 
   bool FakeSetTargetChannel(const std::string& channel,
                             bool is_powerwash_allowed,
                             std::string* error) {
-    return OmahaRequestParams::SetTargetChannel(channel,
-                                                is_powerwash_allowed,
-                                                error);
+    return OmahaRequestParams::SetTargetChannel(
+        channel, is_powerwash_allowed, error);
   }
 
   void FakeUpdateDownloadChannel() {
diff --git a/mock_p2p_manager.h b/mock_p2p_manager.h
index 5f4418e..fd67034 100644
--- a/mock_p2p_manager.h
+++ b/mock_p2p_manager.h
@@ -31,45 +31,41 @@
   MockP2PManager() {
     // Delegate all calls to the fake instance
     ON_CALL(*this, SetDevicePolicy(testing::_))
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeP2PManager::SetDevicePolicy));
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeP2PManager::SetDevicePolicy));
     ON_CALL(*this, IsP2PEnabled())
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeP2PManager::IsP2PEnabled));
+        .WillByDefault(testing::Invoke(&fake_, &FakeP2PManager::IsP2PEnabled));
     ON_CALL(*this, EnsureP2PRunning())
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeP2PManager::EnsureP2PRunning));
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeP2PManager::EnsureP2PRunning));
     ON_CALL(*this, EnsureP2PNotRunning())
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeP2PManager::EnsureP2PNotRunning));
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeP2PManager::EnsureP2PNotRunning));
     ON_CALL(*this, PerformHousekeeping())
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeP2PManager::PerformHousekeeping));
-    ON_CALL(*this, LookupUrlForFile(testing::_, testing::_, testing::_,
-                                    testing::_))
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeP2PManager::LookupUrlForFile));
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeP2PManager::PerformHousekeeping));
+    ON_CALL(*this,
+            LookupUrlForFile(testing::_, testing::_, testing::_, testing::_))
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeP2PManager::LookupUrlForFile));
     ON_CALL(*this, FileShare(testing::_, testing::_))
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeP2PManager::FileShare));
+        .WillByDefault(testing::Invoke(&fake_, &FakeP2PManager::FileShare));
     ON_CALL(*this, FileGetPath(testing::_))
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeP2PManager::FileGetPath));
+        .WillByDefault(testing::Invoke(&fake_, &FakeP2PManager::FileGetPath));
     ON_CALL(*this, FileGetSize(testing::_))
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeP2PManager::FileGetSize));
+        .WillByDefault(testing::Invoke(&fake_, &FakeP2PManager::FileGetSize));
     ON_CALL(*this, FileGetExpectedSize(testing::_))
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeP2PManager::FileGetExpectedSize));
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeP2PManager::FileGetExpectedSize));
     ON_CALL(*this, FileGetVisible(testing::_, testing::_))
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeP2PManager::FileGetVisible));
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeP2PManager::FileGetVisible));
     ON_CALL(*this, FileMakeVisible(testing::_))
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeP2PManager::FileMakeVisible));
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeP2PManager::FileMakeVisible));
     ON_CALL(*this, CountSharedFiles())
-      .WillByDefault(testing::Invoke(&fake_,
-            &FakeP2PManager::CountSharedFiles));
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeP2PManager::CountSharedFiles));
   }
 
   ~MockP2PManager() override {}
@@ -80,10 +76,9 @@
   MOCK_METHOD0(EnsureP2PRunning, bool());
   MOCK_METHOD0(EnsureP2PNotRunning, bool());
   MOCK_METHOD0(PerformHousekeeping, bool());
-  MOCK_METHOD4(LookupUrlForFile, void(const std::string&,
-                                      size_t,
-                                      base::TimeDelta,
-                                      LookupCallback));
+  MOCK_METHOD4(
+      LookupUrlForFile,
+      void(const std::string&, size_t, base::TimeDelta, LookupCallback));
   MOCK_METHOD2(FileShare, bool(const std::string&, size_t));
   MOCK_METHOD1(FileGetPath, base::FilePath(const std::string&));
   MOCK_METHOD1(FileGetSize, ssize_t(const std::string&));
@@ -93,9 +88,7 @@
   MOCK_METHOD0(CountSharedFiles, int());
 
   // Returns a reference to the underlying FakeP2PManager.
-  FakeP2PManager& fake() {
-    return fake_;
-  }
+  FakeP2PManager& fake() { return fake_; }
 
  private:
   // The underlying FakeP2PManager.
diff --git a/mock_payload_state.h b/mock_payload_state.h
index 6dccc64..ad22de5 100644
--- a/mock_payload_state.h
+++ b/mock_payload_state.h
@@ -26,11 +26,9 @@
 
 namespace chromeos_update_engine {
 
-class MockPayloadState: public PayloadStateInterface {
+class MockPayloadState : public PayloadStateInterface {
  public:
-  bool Initialize(SystemState* system_state) {
-    return true;
-  }
+  bool Initialize(SystemState* system_state) { return true; }
 
   // Significant methods.
   MOCK_METHOD1(SetResponse, void(const OmahaResponse& response));
@@ -53,6 +51,7 @@
   MOCK_METHOD1(SetScatteringWaitPeriod, void(base::TimeDelta));
   MOCK_METHOD1(SetP2PUrl, void(const std::string&));
   MOCK_METHOD0(NextPayload, bool());
+  MOCK_METHOD1(SetStagingWaitPeriod, void(base::TimeDelta));
 
   // Getters.
   MOCK_METHOD0(GetResponseSignature, std::string());
@@ -68,6 +67,8 @@
   MOCK_METHOD1(GetCurrentBytesDownloaded, uint64_t(DownloadSource source));
   MOCK_METHOD1(GetTotalBytesDownloaded, uint64_t(DownloadSource source));
   MOCK_METHOD0(GetNumReboots, uint32_t());
+  MOCK_METHOD0(GetRollbackHappened, bool());
+  MOCK_METHOD1(SetRollbackHappened, void(bool));
   MOCK_METHOD0(GetRollbackVersion, std::string());
   MOCK_METHOD0(GetP2PNumAttempts, int());
   MOCK_METHOD0(GetP2PFirstAttemptTimestamp, base::Time());
@@ -75,7 +76,7 @@
   MOCK_CONST_METHOD0(GetUsingP2PForSharing, bool());
   MOCK_METHOD0(GetScatteringWaitPeriod, base::TimeDelta());
   MOCK_CONST_METHOD0(GetP2PUrl, std::string());
-  MOCK_CONST_METHOD0(GetAttemptErrorCode, ErrorCode());
+  MOCK_METHOD0(GetStagingWaitPeriod, base::TimeDelta());
 };
 
 }  // namespace chromeos_update_engine
diff --git a/mock_update_attempter.h b/mock_update_attempter.h
index d88b840..5df5a6b 100644
--- a/mock_update_attempter.h
+++ b/mock_update_attempter.h
@@ -18,6 +18,7 @@
 #define UPDATE_ENGINE_MOCK_UPDATE_ATTEMPTER_H_
 
 #include <string>
+#include <vector>
 
 #include "update_engine/update_attempter.h"
 
@@ -29,12 +30,14 @@
  public:
   using UpdateAttempter::UpdateAttempter;
 
-  MOCK_METHOD6(Update, void(const std::string& app_version,
-                            const std::string& omaha_url,
-                            const std::string& target_channel,
-                            const std::string& target_version_prefix,
-                            bool obey_proxies,
-                            bool interactive));
+  MOCK_METHOD7(Update,
+               void(const std::string& app_version,
+                    const std::string& omaha_url,
+                    const std::string& target_channel,
+                    const std::string& target_version_prefix,
+                    bool rollback_allowed,
+                    bool obey_proxies,
+                    bool interactive));
 
   MOCK_METHOD1(GetStatus, bool(update_engine::UpdateEngineStatus* out_status));
 
@@ -42,20 +45,22 @@
 
   MOCK_METHOD0(ResetStatus, bool(void));
 
-  MOCK_METHOD0(GetCurrentUpdateAttemptFlags, UpdateAttemptFlags(void));
+  MOCK_CONST_METHOD0(GetCurrentUpdateAttemptFlags, UpdateAttemptFlags(void));
 
   MOCK_METHOD3(CheckForUpdate,
                bool(const std::string& app_version,
                     const std::string& omaha_url,
                     UpdateAttemptFlags flags));
 
+  MOCK_METHOD2(CheckForInstall,
+               bool(const std::vector<std::string>& dlc_module_ids,
+                    const std::string& omaha_url));
+
   MOCK_METHOD0(RefreshDevicePolicy, void(void));
 
   MOCK_CONST_METHOD0(consecutive_failed_update_checks, unsigned int(void));
 
   MOCK_CONST_METHOD0(server_dictated_poll_interval, unsigned int(void));
-
-  MOCK_METHOD0(IsAnyUpdateSourceAllowed, bool(void));
 };
 
 }  // namespace chromeos_update_engine
diff --git a/network_selector_interface.h b/network_selector_interface.h
index 6c17b2c..bd0948f 100644
--- a/network_selector_interface.h
+++ b/network_selector_interface.h
@@ -32,7 +32,6 @@
 
 class NetworkSelectorInterface {
  public:
-
   virtual ~NetworkSelectorInterface() = default;
 
   // Set the current process network. All sockets created in the future will be
diff --git a/omaha_request_action.cc b/omaha_request_action.cc
index d58612c..fae9471 100644
--- a/omaha_request_action.cc
+++ b/omaha_request_action.cc
@@ -18,6 +18,7 @@
 
 #include <inttypes.h>
 
+#include <limits>
 #include <map>
 #include <sstream>
 #include <string>
@@ -35,6 +36,7 @@
 #include <brillo/key_value_store.h>
 #include <expat.h>
 #include <metrics/metrics_library.h>
+#include <policy/libpolicy.h>
 
 #include "update_engine/common/action_pipe.h"
 #include "update_engine/common/constants.h"
@@ -52,40 +54,70 @@
 
 using base::Time;
 using base::TimeDelta;
+using chromeos_update_manager::kRollforwardInfinity;
 using std::map;
+using std::numeric_limits;
 using std::string;
 using std::vector;
 
 namespace chromeos_update_engine {
 
-// List of custom pair tags that we interpret in the Omaha Response:
-static const char* kTagDeadline = "deadline";
-static const char* kTagDisablePayloadBackoff = "DisablePayloadBackoff";
-static const char* kTagVersion = "version";
+// List of custom attributes that we interpret in the Omaha response:
+constexpr char kAttrDeadline[] = "deadline";
+constexpr char kAttrDisableP2PForDownloading[] = "DisableP2PForDownloading";
+constexpr char kAttrDisableP2PForSharing[] = "DisableP2PForSharing";
+constexpr char kAttrDisablePayloadBackoff[] = "DisablePayloadBackoff";
+constexpr char kAttrVersion[] = "version";
 // Deprecated: "IsDelta"
-static const char* kTagIsDeltaPayload = "IsDeltaPayload";
-static const char* kTagMaxFailureCountPerUrl = "MaxFailureCountPerUrl";
-static const char* kTagMaxDaysToScatter = "MaxDaysToScatter";
+constexpr char kAttrIsDeltaPayload[] = "IsDeltaPayload";
+constexpr char kAttrMaxFailureCountPerUrl[] = "MaxFailureCountPerUrl";
+constexpr char kAttrMaxDaysToScatter[] = "MaxDaysToScatter";
 // Deprecated: "ManifestSignatureRsa"
 // Deprecated: "ManifestSize"
-static const char* kTagMetadataSignatureRsa = "MetadataSignatureRsa";
-static const char* kTagMetadataSize = "MetadataSize";
-static const char* kTagMoreInfo = "MoreInfo";
+constexpr char kAttrMetadataSignatureRsa[] = "MetadataSignatureRsa";
+constexpr char kAttrMetadataSize[] = "MetadataSize";
+constexpr char kAttrMoreInfo[] = "MoreInfo";
+constexpr char kAttrNoUpdate[] = "noupdate";
 // Deprecated: "NeedsAdmin"
-static const char* kTagPrompt = "Prompt";
-static const char* kTagDisableP2PForDownloading = "DisableP2PForDownloading";
-static const char* kTagDisableP2PForSharing = "DisableP2PForSharing";
-static const char* kTagPublicKeyRsa = "PublicKeyRsa";
+constexpr char kAttrPollInterval[] = "PollInterval";
+constexpr char kAttrPowerwash[] = "Powerwash";
+constexpr char kAttrPrompt[] = "Prompt";
+constexpr char kAttrPublicKeyRsa[] = "PublicKeyRsa";
 
-static const char* kOmahaUpdaterVersion = "0.1.0.0";
+// List of attributes that we interpret in the Omaha response:
+constexpr char kAttrAppId[] = "appid";
+constexpr char kAttrCodeBase[] = "codebase";
+constexpr char kAttrCohort[] = "cohort";
+constexpr char kAttrCohortHint[] = "cohorthint";
+constexpr char kAttrCohortName[] = "cohortname";
+constexpr char kAttrElapsedDays[] = "elapsed_days";
+constexpr char kAttrElapsedSeconds[] = "elapsed_seconds";
+constexpr char kAttrEvent[] = "event";
+constexpr char kAttrHashSha256[] = "hash_sha256";
+// Deprecated: "hash"; Although we still need to pass it from the server for
+// backward compatibility.
+constexpr char kAttrName[] = "name";
+// Deprecated: "sha256"; Although we still need to pass it from the server for
+// backward compatibility.
+constexpr char kAttrSize[] = "size";
+constexpr char kAttrStatus[] = "status";
 
-// X-GoogleUpdate headers.
-static const char* kXGoogleUpdateInteractivity = "X-GoogleUpdate-Interactivity";
-static const char* kXGoogleUpdateAppId = "X-GoogleUpdate-AppId";
-static const char* kXGoogleUpdateUpdater = "X-GoogleUpdate-Updater";
+// List of values that we interpret in the Omaha response:
+constexpr char kValPostInstall[] = "postinstall";
+constexpr char kValNoUpdate[] = "noupdate";
+
+constexpr char kOmahaUpdaterVersion[] = "0.1.0.0";
+
+// X-Goog-Update headers.
+constexpr char kXGoogleUpdateInteractivity[] = "X-Goog-Update-Interactivity";
+constexpr char kXGoogleUpdateAppId[] = "X-Goog-Update-AppId";
+constexpr char kXGoogleUpdateUpdater[] = "X-Goog-Update-Updater";
 
 // updatecheck attributes (without the underscore prefix).
-static const char* kEolAttr = "eol";
+constexpr char kAttrEol[] = "eol";
+constexpr char kAttrRollback[] = "rollback";
+constexpr char kAttrFirmwareVersion[] = "firmware_version";
+constexpr char kAttrKernelVersion[] = "kernel_version";
 
 namespace {
 
@@ -117,18 +149,29 @@
                   OmahaRequestParams* params,
                   bool ping_only,
                   bool include_ping,
+                  bool skip_updatecheck,
                   int ping_active_days,
                   int ping_roll_call_days,
                   PrefsInterface* prefs) {
   string app_body;
   if (event == nullptr) {
     if (include_ping)
-        app_body = GetPingXml(ping_active_days, ping_roll_call_days);
+      app_body = GetPingXml(ping_active_days, ping_roll_call_days);
     if (!ping_only) {
-      app_body += base::StringPrintf(
-          "        <updatecheck targetversionprefix=\"%s\""
-          "></updatecheck>\n",
-          XmlEncodeWithDefault(params->target_version_prefix(), "").c_str());
+      if (!skip_updatecheck) {
+        app_body += "        <updatecheck";
+        if (!params->target_version_prefix().empty()) {
+          app_body += base::StringPrintf(
+              " targetversionprefix=\"%s\"",
+              XmlEncodeWithDefault(params->target_version_prefix(), "")
+                  .c_str());
+          // Rollback requires target_version_prefix set.
+          if (params->rollback_allowed()) {
+            app_body += " rollback_allowed=\"true\"";
+          }
+        }
+        app_body += "></updatecheck>\n";
+      }
 
       // If this is the first update check after a reboot following a previous
       // update, generate an event containing the previous version number. If
@@ -166,7 +209,9 @@
     }
     app_body = base::StringPrintf(
         "        <event eventtype=\"%d\" eventresult=\"%d\"%s></event>\n",
-        event->type, event->result, error_code.c_str());
+        event->type,
+        event->result,
+        error_code.c_str());
   }
 
   return app_body;
@@ -179,7 +224,7 @@
                        const string arg_name,
                        const string prefs_key) {
   // There's nothing wrong with not having a given cohort setting, so we check
-  // existance first to avoid the warning log message.
+  // existence first to avoid the warning log message.
   if (!prefs->Exists(prefs_key))
     return "";
   string cohort_value;
@@ -202,8 +247,8 @@
     return "";
   }
 
-  return base::StringPrintf("%s=\"%s\" ",
-                            arg_name.c_str(), escaped_xml_value.c_str());
+  return base::StringPrintf(
+      "%s=\"%s\" ", arg_name.c_str(), escaped_xml_value.c_str());
 }
 
 struct OmahaAppData {
@@ -227,12 +272,18 @@
                  const OmahaAppData& app_data,
                  bool ping_only,
                  bool include_ping,
+                 bool skip_updatecheck,
                  int ping_active_days,
                  int ping_roll_call_days,
                  int install_date_in_days,
                  SystemState* system_state) {
-  string app_body = GetAppBody(event, params, ping_only, include_ping,
-                               ping_active_days, ping_roll_call_days,
+  string app_body = GetAppBody(event,
+                               params,
+                               ping_only,
+                               include_ping,
+                               skip_updatecheck,
+                               ping_active_days,
+                               ping_roll_call_days,
                                system_state->prefs());
   string app_versions;
 
@@ -253,8 +304,8 @@
   string app_channels =
       "track=\"" + XmlEncodeWithDefault(download_channel, "") + "\" ";
   if (params->current_channel() != download_channel) {
-    app_channels += "from_track=\"" + XmlEncodeWithDefault(
-        params->current_channel(), "") + "\" ";
+    app_channels += "from_track=\"" +
+                    XmlEncodeWithDefault(params->current_channel(), "") + "\" ";
   }
 
   string delta_okay_str = params->delta_okay() ? "true" : "false";
@@ -263,17 +314,17 @@
   // include the attribute.
   string install_date_in_days_str = "";
   if (install_date_in_days >= 0) {
-    install_date_in_days_str = base::StringPrintf("installdate=\"%d\" ",
-                                                  install_date_in_days);
+    install_date_in_days_str =
+        base::StringPrintf("installdate=\"%d\" ", install_date_in_days);
   }
 
   string app_cohort_args;
-  app_cohort_args += GetCohortArgXml(system_state->prefs(),
-                                     "cohort", kPrefsOmahaCohort);
-  app_cohort_args += GetCohortArgXml(system_state->prefs(),
-                                     "cohorthint", kPrefsOmahaCohortHint);
-  app_cohort_args += GetCohortArgXml(system_state->prefs(),
-                                     "cohortname", kPrefsOmahaCohortName);
+  app_cohort_args +=
+      GetCohortArgXml(system_state->prefs(), "cohort", kPrefsOmahaCohort);
+  app_cohort_args += GetCohortArgXml(
+      system_state->prefs(), "cohorthint", kPrefsOmahaCohortHint);
+  app_cohort_args += GetCohortArgXml(
+      system_state->prefs(), "cohortname", kPrefsOmahaCohortName);
 
   string fingerprint_arg;
   if (!params->os_build_fingerprint().empty()) {
@@ -340,10 +391,13 @@
 // Returns an XML that corresponds to the entire <os> node of the Omaha
 // request based on the given parameters.
 string GetOsXml(OmahaRequestParams* params) {
-  string os_xml ="    <os "
-      "version=\"" + XmlEncodeWithDefault(params->os_version(), "") + "\" " +
-      "platform=\"" + XmlEncodeWithDefault(params->os_platform(), "") + "\" " +
-      "sp=\"" + XmlEncodeWithDefault(params->os_sp(), "") + "\">"
+  string os_xml =
+      "    <os "
+      "version=\"" +
+      XmlEncodeWithDefault(params->os_version(), "") + "\" " + "platform=\"" +
+      XmlEncodeWithDefault(params->os_platform(), "") + "\" " + "sp=\"" +
+      XmlEncodeWithDefault(params->os_sp(), "") +
+      "\">"
       "</os>\n";
   return os_xml;
 }
@@ -363,11 +417,13 @@
       .id = params->GetAppId(),
       .version = params->app_version(),
       .product_components = params->product_components()};
+  // Skips updatecheck for platform app in case of an install operation.
   string app_xml = GetAppXml(event,
                              params,
                              product_app,
                              ping_only,
                              include_ping,
+                             params->is_install(), /* skip_updatecheck */
                              ping_active_days,
                              ping_roll_call_days,
                              install_date_in_days,
@@ -380,29 +436,46 @@
                          system_app,
                          ping_only,
                          include_ping,
+                         false, /* skip_updatecheck */
+                         ping_active_days,
+                         ping_roll_call_days,
+                         install_date_in_days,
+                         system_state);
+  }
+  // Create APP ID according to |dlc_module_id| (sticking the current AppID to
+  // the DLC module ID with an underscode).
+  for (const auto& dlc_module_id : params->dlc_module_ids()) {
+    OmahaAppData dlc_module_app = {
+        .id = params->GetAppId() + "_" + dlc_module_id,
+        .version = params->app_version()};
+    app_xml += GetAppXml(event,
+                         params,
+                         dlc_module_app,
+                         ping_only,
+                         include_ping,
+                         false, /* skip_updatecheck */
                          ping_active_days,
                          ping_roll_call_days,
                          install_date_in_days,
                          system_state);
   }
 
-  string install_source = base::StringPrintf("installsource=\"%s\" ",
+  string install_source = base::StringPrintf(
+      "installsource=\"%s\" ",
       (params->interactive() ? "ondemandupdate" : "scheduler"));
 
   string updater_version = XmlEncodeWithDefault(
-      base::StringPrintf("%s-%s",
-                         constants::kOmahaUpdaterID,
-                         kOmahaUpdaterVersion), "");
+      base::StringPrintf(
+          "%s-%s", constants::kOmahaUpdaterID, kOmahaUpdaterVersion),
+      "");
   string request_xml =
       "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
-      "<request protocol=\"3.0\" " + (
-          "version=\"" + updater_version + "\" "
-          "updaterversion=\"" + updater_version + "\" " +
-          install_source +
-          "ismachine=\"1\">\n") +
-      os_xml +
-      app_xml +
-      "</request>\n";
+      "<request protocol=\"3.0\" " +
+      ("version=\"" + updater_version +
+       "\" "
+       "updaterversion=\"" +
+       updater_version + "\" " + install_source + "ismachine=\"1\">\n") +
+      os_xml + app_xml + "</request>\n";
 
   return request_xml;
 }
@@ -453,7 +526,8 @@
 namespace {
 
 // Callback function invoked by expat.
-void ParserHandlerStart(void* user_data, const XML_Char* element,
+void ParserHandlerStart(void* user_data,
+                        const XML_Char* element,
                         const XML_Char** attr) {
   OmahaParserData* data = reinterpret_cast<OmahaParserData*>(user_data);
 
@@ -464,7 +538,7 @@
 
   map<string, string> attrs;
   if (attr != nullptr) {
-    for (int n = 0; attr[n] != nullptr && attr[n+1] != nullptr; n += 2) {
+    for (int n = 0; attr[n] != nullptr && attr[n + 1] != nullptr; n += 2) {
       string key = attr[n];
       string value = attr[n + 1];
       attrs[key] = value;
@@ -473,27 +547,27 @@
 
   if (data->current_path == "/response/app") {
     OmahaParserData::App app;
-    if (attrs.find("appid") != attrs.end()) {
-      app.id = attrs["appid"];
+    if (attrs.find(kAttrAppId) != attrs.end()) {
+      app.id = attrs[kAttrAppId];
     }
-    if (attrs.find("cohort") != attrs.end()) {
+    if (attrs.find(kAttrCohort) != attrs.end()) {
       app.cohort_set = true;
-      app.cohort = attrs["cohort"];
+      app.cohort = attrs[kAttrCohort];
     }
-    if (attrs.find("cohorthint") != attrs.end()) {
+    if (attrs.find(kAttrCohortHint) != attrs.end()) {
       app.cohorthint_set = true;
-      app.cohorthint = attrs["cohorthint"];
+      app.cohorthint = attrs[kAttrCohortHint];
     }
-    if (attrs.find("cohortname") != attrs.end()) {
+    if (attrs.find(kAttrCohortName) != attrs.end()) {
       app.cohortname_set = true;
-      app.cohortname = attrs["cohortname"];
+      app.cohortname = attrs[kAttrCohortName];
     }
     data->apps.push_back(std::move(app));
   } else if (data->current_path == "/response/app/updatecheck") {
     if (!data->apps.empty())
-      data->apps.back().updatecheck_status = attrs["status"];
+      data->apps.back().updatecheck_status = attrs[kAttrStatus];
     if (data->updatecheck_poll_interval.empty())
-      data->updatecheck_poll_interval = attrs["PollInterval"];
+      data->updatecheck_poll_interval = attrs[kAttrPollInterval];
     // Omaha sends arbitrary key-value pairs as extra attributes starting with
     // an underscore.
     for (const auto& attr : attrs) {
@@ -502,27 +576,27 @@
     }
   } else if (data->current_path == "/response/daystart") {
     // Get the install-date.
-    data->daystart_elapsed_days = attrs["elapsed_days"];
-    data->daystart_elapsed_seconds = attrs["elapsed_seconds"];
+    data->daystart_elapsed_days = attrs[kAttrElapsedDays];
+    data->daystart_elapsed_seconds = attrs[kAttrElapsedSeconds];
   } else if (data->current_path == "/response/app/updatecheck/urls/url") {
     // Look at all <url> elements.
     if (!data->apps.empty())
-      data->apps.back().url_codebase.push_back(attrs["codebase"]);
+      data->apps.back().url_codebase.push_back(attrs[kAttrCodeBase]);
   } else if (data->current_path ==
              "/response/app/updatecheck/manifest/packages/package") {
     // Look at all <package> elements.
     if (!data->apps.empty())
-      data->apps.back().packages.push_back({.name = attrs["name"],
-                                            .size = attrs["size"],
-                                            .hash = attrs["hash_sha256"]});
+      data->apps.back().packages.push_back({.name = attrs[kAttrName],
+                                            .size = attrs[kAttrSize],
+                                            .hash = attrs[kAttrHashSha256]});
   } else if (data->current_path == "/response/app/updatecheck/manifest") {
     // Get the version.
     if (!data->apps.empty())
-      data->apps.back().manifest_version = attrs[kTagVersion];
+      data->apps.back().manifest_version = attrs[kAttrVersion];
   } else if (data->current_path ==
              "/response/app/updatecheck/manifest/actions/action") {
     // We only care about the postinstall action.
-    if (attrs["event"] == "postinstall" && !data->apps.empty()) {
+    if (attrs[kAttrEvent] == kValPostInstall && !data->apps.empty()) {
       data->apps.back().action_postinstall_attrs = std::move(attrs);
     }
   }
@@ -536,8 +610,8 @@
 
   const string path_suffix = string("/") + element;
 
-  if (!base::EndsWith(data->current_path, path_suffix,
-                      base::CompareCase::SENSITIVE)) {
+  if (!base::EndsWith(
+          data->current_path, path_suffix, base::CompareCase::SENSITIVE)) {
     LOG(ERROR) << "Unexpected end element '" << element
                << "' with current_path='" << data->current_path << "'";
     data->failed = true;
@@ -552,15 +626,15 @@
 // to never return any XML with entities our course of action is to
 // just stop parsing. This avoids potential resource exhaustion
 // problems AKA the "billion laughs". CVE-2013-0340.
-void ParserHandlerEntityDecl(void *user_data,
-                             const XML_Char *entity_name,
+void ParserHandlerEntityDecl(void* user_data,
+                             const XML_Char* entity_name,
                              int is_parameter_entity,
-                             const XML_Char *value,
+                             const XML_Char* value,
                              int value_length,
-                             const XML_Char *base,
-                             const XML_Char *system_id,
-                             const XML_Char *public_id,
-                             const XML_Char *notation_name) {
+                             const XML_Char* base,
+                             const XML_Char* system_id,
+                             const XML_Char* public_id,
+                             const XML_Char* notation_name) {
   OmahaParserData* data = reinterpret_cast<OmahaParserData*>(user_data);
 
   LOG(ERROR) << "XML entities are not supported. Aborting parsing.";
@@ -572,8 +646,9 @@
 }  // namespace
 
 bool XmlEncode(const string& input, string* output) {
-  if (std::find_if(input.begin(), input.end(),
-                   [](const char c){return c & 0x80;}) != input.end()) {
+  if (std::find_if(input.begin(), input.end(), [](const char c) {
+        return c & 0x80;
+      }) != input.end()) {
     LOG(WARNING) << "Invalid ASCII-7 string passed to the XML encoder:";
     utils::HexDumpString(input);
     return false;
@@ -619,12 +694,14 @@
     std::unique_ptr<HttpFetcher> http_fetcher,
     bool ping_only)
     : system_state_(system_state),
+      params_(system_state->request_params()),
       event_(event),
       http_fetcher_(std::move(http_fetcher)),
+      policy_provider_(std::make_unique<policy::PolicyProvider>()),
       ping_only_(ping_only),
       ping_active_days_(0),
       ping_roll_call_days_(0) {
-  params_ = system_state->request_params();
+  policy_provider_->Reload();
 }
 
 OmahaRequestAction::~OmahaRequestAction() {}
@@ -642,8 +719,8 @@
       // last ping daystart preference. This way the next ping time
       // will be correct, hopefully.
       days = kPingTimeJump;
-      LOG(WARNING) <<
-          "System clock jumped back in time. Resetting ping daystarts.";
+      LOG(WARNING)
+          << "System clock jumped back in time. Resetting ping daystarts.";
     }
   }
   return days;
@@ -734,13 +811,12 @@
   }
 
   // Persist this to disk, for future use.
-  if (!OmahaRequestAction::PersistInstallDate(system_state,
-                                              num_days,
-                                              kProvisionedFromOOBEMarker))
+  if (!OmahaRequestAction::PersistInstallDate(
+          system_state, num_days, kProvisionedFromOOBEMarker))
     return -1;
 
-  LOG(INFO) << "Set the Omaha InstallDate from OOBE time-stamp to "
-            << num_days << " days";
+  LOG(INFO) << "Set the Omaha InstallDate from OOBE time-stamp to " << num_days
+            << " days";
 
   return num_days;
 }
@@ -762,7 +838,7 @@
                                     GetInstallDate(system_state_),
                                     system_state_));
 
-  // Set X-GoogleUpdate headers.
+  // Set X-Goog-Update headers.
   http_fetcher_->SetHeader(kXGoogleUpdateInteractivity,
                            params_->interactive() ? "fg" : "bg");
   http_fetcher_->SetHeader(kXGoogleUpdateAppId, params_->GetAppId());
@@ -771,8 +847,8 @@
       base::StringPrintf(
           "%s-%s", constants::kOmahaUpdaterID, kOmahaUpdaterVersion));
 
-  http_fetcher_->SetPostData(request_post.data(), request_post.size(),
-                             kHttpContentTypeTextXml);
+  http_fetcher_->SetPostData(
+      request_post.data(), request_post.size(), kHttpContentTypeTextXml);
   LOG(INFO) << "Posting an Omaha request to " << params_->update_url();
   LOG(INFO) << "Request: " << request_post;
   http_fetcher_->BeginTransfer(params_->update_url());
@@ -784,11 +860,12 @@
 
 // We just store the response in the buffer. Once we've received all bytes,
 // we'll look in the buffer and decide what to do.
-void OmahaRequestAction::ReceivedBytes(HttpFetcher *fetcher,
+bool OmahaRequestAction::ReceivedBytes(HttpFetcher* fetcher,
                                        const void* bytes,
                                        size_t length) {
   const uint8_t* byte_ptr = reinterpret_cast<const uint8_t*>(bytes);
   response_buffer_.insert(response_buffer_.end(), byte_ptr, byte_ptr + length);
+  return true;
 }
 
 namespace {
@@ -813,11 +890,10 @@
 
 // Update the last ping day preferences based on the server daystart
 // response. Returns true on success, false otherwise.
-bool UpdateLastPingDays(OmahaParserData *parser_data, PrefsInterface* prefs) {
+bool UpdateLastPingDays(OmahaParserData* parser_data, PrefsInterface* prefs) {
   int64_t elapsed_seconds = 0;
-  TEST_AND_RETURN_FALSE(
-      base::StringToInt64(parser_data->daystart_elapsed_seconds,
-                          &elapsed_seconds));
+  TEST_AND_RETURN_FALSE(base::StringToInt64(
+      parser_data->daystart_elapsed_seconds, &elapsed_seconds));
   TEST_AND_RETURN_FALSE(elapsed_seconds >= 0);
 
   // Remember the local time that matches the server's last midnight
@@ -834,7 +910,8 @@
 bool ParsePackage(OmahaParserData::App* app,
                   OmahaResponse* output_object,
                   ScopedActionCompleter* completer) {
-  if (app->updatecheck_status == "noupdate") {
+  if (app->updatecheck_status.empty() ||
+      app->updatecheck_status == kValNoUpdate) {
     if (!app->packages.empty()) {
       LOG(ERROR) << "No update in this <app> but <package> is not empty.";
       completer->set_code(ErrorCode::kOmahaResponseInvalid);
@@ -854,17 +931,17 @@
   }
   LOG(INFO) << "Found " << app->url_codebase.size() << " url(s)";
   vector<string> metadata_sizes =
-      base::SplitString(app->action_postinstall_attrs[kTagMetadataSize],
+      base::SplitString(app->action_postinstall_attrs[kAttrMetadataSize],
                         ":",
                         base::TRIM_WHITESPACE,
                         base::SPLIT_WANT_ALL);
-  vector<string> metadata_signatures =
-      base::SplitString(app->action_postinstall_attrs[kTagMetadataSignatureRsa],
-                        ":",
-                        base::TRIM_WHITESPACE,
-                        base::SPLIT_WANT_ALL);
+  vector<string> metadata_signatures = base::SplitString(
+      app->action_postinstall_attrs[kAttrMetadataSignatureRsa],
+      ":",
+      base::TRIM_WHITESPACE,
+      base::SPLIT_WANT_ALL);
   vector<string> is_delta_payloads =
-      base::SplitString(app->action_postinstall_attrs[kTagIsDeltaPayload],
+      base::SplitString(app->action_postinstall_attrs[kAttrIsDeltaPayload],
                         ":",
                         base::TRIM_WHITESPACE,
                         base::SPLIT_WANT_ALL);
@@ -923,6 +1000,20 @@
   return true;
 }
 
+// Parses the 2 key version strings kernel_version and firmware_version. If the
+// field is not present, or cannot be parsed the values default to 0xffff.
+void ParseRollbackVersions(OmahaParserData* parser_data,
+                           OmahaResponse* output_object) {
+  utils::ParseRollbackKeyVersion(
+      parser_data->updatecheck_attrs[kAttrFirmwareVersion],
+      &output_object->rollback_key_version.firmware_key,
+      &output_object->rollback_key_version.firmware);
+  utils::ParseRollbackKeyVersion(
+      parser_data->updatecheck_attrs[kAttrKernelVersion],
+      &output_object->rollback_key_version.kernel_key,
+      &output_object->rollback_key_version.kernel);
+}
+
 }  // namespace
 
 bool OmahaRequestAction::ParseResponse(OmahaParserData* parser_data,
@@ -985,6 +1076,14 @@
 
   // Parse the updatecheck attributes.
   PersistEolStatus(parser_data->updatecheck_attrs);
+  // Rollback-related updatecheck attributes.
+  // Defaults to false if attribute is not present.
+  output_object->is_rollback =
+      ParseBool(parser_data->updatecheck_attrs[kAttrRollback]);
+
+  // Parses the rollback versions of the current image. If the fields do not
+  // exist they default to 0xffff for the 4 key versions.
+  ParseRollbackVersions(parser_data, output_object);
 
   if (!ParseStatus(parser_data, output_object, completer))
     return false;
@@ -1005,22 +1104,29 @@
                                      OmahaResponse* output_object,
                                      ScopedActionCompleter* completer) {
   output_object->update_exists = false;
-  for (size_t i = 0; i < parser_data->apps.size(); i++) {
-    const string& status = parser_data->apps[i].updatecheck_status;
-    if (status == "noupdate") {
+  for (const auto& app : parser_data->apps) {
+    const string& status = app.updatecheck_status;
+    if (status == kValNoUpdate) {
       // Don't update if any app has status="noupdate".
-      LOG(INFO) << "No update for <app> " << i;
+      LOG(INFO) << "No update for <app> " << app.id;
       output_object->update_exists = false;
       break;
     } else if (status == "ok") {
-      if (parser_data->apps[i].action_postinstall_attrs["noupdate"] == "true") {
+      auto const& attr_no_update =
+          app.action_postinstall_attrs.find(kAttrNoUpdate);
+      if (attr_no_update != app.action_postinstall_attrs.end() &&
+          attr_no_update->second == "true") {
         // noupdate="true" in postinstall attributes means it's an update to
         // self, only update if there's at least one app really have update.
-        LOG(INFO) << "Update to self for <app> " << i;
+        LOG(INFO) << "Update to self for <app> " << app.id;
       } else {
-        LOG(INFO) << "Update for <app> " << i;
+        LOG(INFO) << "Update for <app> " << app.id;
         output_object->update_exists = true;
       }
+    } else if (status.empty() && params_->is_install() &&
+               params_->GetAppId() == app.id) {
+      // Skips the platform app for install operation.
+      LOG(INFO) << "No payload (and ignore) for <app> " << app.id;
     } else {
       LOG(ERROR) << "Unknown Omaha response status: " << status;
       completer->set_code(ErrorCode::kOmahaResponseInvalid);
@@ -1048,11 +1154,20 @@
       // this is the system app (this check is intentionally skipped if there is
       // no system_app_id set)
       output_object->system_version = app.manifest_version;
+    } else if (params_->is_install() &&
+               app.manifest_version != params_->app_version()) {
+      LOG(WARNING) << "An app has a different version (" << app.manifest_version
+                   << ") that is different than platform app version ("
+                   << params_->app_version() << ")";
     }
     if (!app.action_postinstall_attrs.empty() && attrs.empty()) {
       attrs = app.action_postinstall_attrs;
     }
   }
+  if (params_->is_install()) {
+    LOG(INFO) << "Use request version for Install operation.";
+    output_object->version = params_->app_version();
+  }
   if (output_object->version.empty()) {
     LOG(ERROR) << "Omaha Response does not have version in manifest!";
     completer->set_code(ErrorCode::kOmahaResponseInvalid);
@@ -1069,22 +1184,23 @@
   }
 
   // Get the optional properties one by one.
-  output_object->more_info_url = attrs[kTagMoreInfo];
-  output_object->prompt = ParseBool(attrs[kTagPrompt]);
-  output_object->deadline = attrs[kTagDeadline];
-  output_object->max_days_to_scatter = ParseInt(attrs[kTagMaxDaysToScatter]);
+  output_object->more_info_url = attrs[kAttrMoreInfo];
+  output_object->prompt = ParseBool(attrs[kAttrPrompt]);
+  output_object->deadline = attrs[kAttrDeadline];
+  output_object->max_days_to_scatter = ParseInt(attrs[kAttrMaxDaysToScatter]);
   output_object->disable_p2p_for_downloading =
-      ParseBool(attrs[kTagDisableP2PForDownloading]);
+      ParseBool(attrs[kAttrDisableP2PForDownloading]);
   output_object->disable_p2p_for_sharing =
-      ParseBool(attrs[kTagDisableP2PForSharing]);
-  output_object->public_key_rsa = attrs[kTagPublicKeyRsa];
+      ParseBool(attrs[kAttrDisableP2PForSharing]);
+  output_object->public_key_rsa = attrs[kAttrPublicKeyRsa];
 
-  string max = attrs[kTagMaxFailureCountPerUrl];
+  string max = attrs[kAttrMaxFailureCountPerUrl];
   if (!base::StringToUint(max, &output_object->max_failure_count_per_url))
     output_object->max_failure_count_per_url = kDefaultMaxFailureCountPerUrl;
 
   output_object->disable_payload_backoff =
-      ParseBool(attrs[kTagDisablePayloadBackoff]);
+      ParseBool(attrs[kAttrDisablePayloadBackoff]);
+  output_object->powerwash_required = ParseBool(attrs[kAttrPowerwash]);
 
   return true;
 }
@@ -1092,7 +1208,7 @@
 // If the transfer was successful, this uses expat to parse the response
 // and fill in the appropriate fields of the output object. Also, notifies
 // the processor that we're done.
-void OmahaRequestAction::TransferComplete(HttpFetcher *fetcher,
+void OmahaRequestAction::TransferComplete(HttpFetcher* fetcher,
                                           bool successful) {
   ScopedActionCompleter completer(processor_, this);
   string current_response(response_buffer_.begin(), response_buffer_.end());
@@ -1100,6 +1216,9 @@
 
   PayloadStateInterface* const payload_state = system_state_->payload_state();
 
+  // Set the max kernel key version based on whether rollback is allowed.
+  SetMaxKernelKeyVersionForRollback();
+
   // Events are best effort transactions -- assume they always succeed.
   if (IsEvent()) {
     CHECK(!HasOutputPipe()) << "No output pipe allowed for event requests.";
@@ -1124,18 +1243,18 @@
   XML_SetUserData(parser, &parser_data);
   XML_SetElementHandler(parser, ParserHandlerStart, ParserHandlerEnd);
   XML_SetEntityDeclHandler(parser, ParserHandlerEntityDecl);
-  XML_Status res = XML_Parse(
-      parser,
-      reinterpret_cast<const char*>(response_buffer_.data()),
-      response_buffer_.size(),
-      XML_TRUE);
-  XML_ParserFree(parser);
+  XML_Status res =
+      XML_Parse(parser,
+                reinterpret_cast<const char*>(response_buffer_.data()),
+                response_buffer_.size(),
+                XML_TRUE);
 
   if (res != XML_STATUS_OK || parser_data.failed) {
     LOG(ERROR) << "Omaha response not valid XML: "
-               << XML_ErrorString(XML_GetErrorCode(parser))
-               << " at line " << XML_GetCurrentLineNumber(parser)
-               << " col " << XML_GetCurrentColumnNumber(parser);
+               << XML_ErrorString(XML_GetErrorCode(parser)) << " at line "
+               << XML_GetCurrentLineNumber(parser) << " col "
+               << XML_GetCurrentColumnNumber(parser);
+    XML_ParserFree(parser);
     ErrorCode error_code = ErrorCode::kOmahaRequestXMLParseError;
     if (response_buffer_.empty()) {
       error_code = ErrorCode::kOmahaRequestEmptyResponseError;
@@ -1145,6 +1264,7 @@
     completer.set_code(error_code);
     return;
   }
+  XML_ParserFree(parser);
 
   // Update the last ping day preferences based on the server daystart response
   // even if we didn't send a ping. Omaha always includes the daystart in the
@@ -1159,7 +1279,10 @@
   // their a=-1 in the past and we have to set first_active_omaha_ping_sent for
   // future checks.
   if (!system_state_->hardware()->GetFirstActiveOmahaPingSent()) {
-    system_state_->hardware()->SetFirstActiveOmahaPingSent();
+    if (!system_state_->hardware()->SetFirstActiveOmahaPingSent()) {
+      system_state_->metrics_reporter()->ReportInternalErrorCode(
+          ErrorCode::kFirstActiveOmahaPingSentPersistenceError);
+    }
   }
 
   if (!HasOutputPipe()) {
@@ -1175,9 +1298,13 @@
   output_object.update_exists = true;
   SetOutputObject(output_object);
 
-  if (ShouldIgnoreUpdate(output_object)) {
-    output_object.update_exists = false;
-    completer.set_code(ErrorCode::kOmahaUpdateIgnoredPerPolicy);
+  LoadOrPersistUpdateFirstSeenAtPref();
+
+  ErrorCode error = ErrorCode::kSuccess;
+  if (ShouldIgnoreUpdate(output_object, &error)) {
+    // No need to change output_object.update_exists here, since the value
+    // has been output to the pipe.
+    completer.set_code(error);
     return;
   }
 
@@ -1233,16 +1360,6 @@
   OmahaResponse& output_object = const_cast<OmahaResponse&>(GetOutputObject());
   PayloadStateInterface* payload_state = system_state_->payload_state();
 
-  if (system_state_->hardware()->IsOOBEEnabled() &&
-      !system_state_->hardware()->IsOOBEComplete(nullptr) &&
-      output_object.deadline.empty() &&
-      params_->app_version() != "ForcedUpdate") {
-    output_object.update_exists = false;
-    LOG(INFO) << "Ignoring non-critical Omaha updates until OOBE is done.";
-    completer.set_code(ErrorCode::kNonCriticalUpdateInOOBE);
-    return;
-  }
-
   if (ShouldDeferDownload(&output_object)) {
     output_object.update_exists = false;
     LOG(INFO) << "Ignoring Omaha updates as updates are deferred by policy.";
@@ -1384,60 +1501,29 @@
 OmahaRequestAction::WallClockWaitResult
 OmahaRequestAction::IsWallClockBasedWaitingSatisfied(
     OmahaResponse* output_object) {
-  Time update_first_seen_at;
-  int64_t update_first_seen_at_int;
-
-  if (system_state_->prefs()->Exists(kPrefsUpdateFirstSeenAt)) {
-    if (system_state_->prefs()->GetInt64(kPrefsUpdateFirstSeenAt,
-                                         &update_first_seen_at_int)) {
-      // Note: This timestamp could be that of ANY update we saw in the past
-      // (not necessarily this particular update we're considering to apply)
-      // but never got to apply because of some reason (e.g. stop AU policy,
-      // updates being pulled out from Omaha, changes in target version prefix,
-      // new update being rolled out, etc.). But for the purposes of scattering
-      // it doesn't matter which update the timestamp corresponds to. i.e.
-      // the clock starts ticking the first time we see an update and we're
-      // ready to apply when the random wait period is satisfied relative to
-      // that first seen timestamp.
-      update_first_seen_at = Time::FromInternalValue(update_first_seen_at_int);
-      LOG(INFO) << "Using persisted value of UpdateFirstSeenAt: "
-                << utils::ToString(update_first_seen_at);
-    } else {
-      // This seems like an unexpected error where the persisted value exists
-      // but it's not readable for some reason. Just skip scattering in this
-      // case to be safe.
-     LOG(INFO) << "Not scattering as UpdateFirstSeenAt value cannot be read";
-     return kWallClockWaitDoneAndUpdateCheckWaitNotRequired;
-    }
-  } else {
-    update_first_seen_at = system_state_->clock()->GetWallclockTime();
-    update_first_seen_at_int = update_first_seen_at.ToInternalValue();
-    if (system_state_->prefs()->SetInt64(kPrefsUpdateFirstSeenAt,
-                                         update_first_seen_at_int)) {
-      LOG(INFO) << "Persisted the new value for UpdateFirstSeenAt: "
-                << utils::ToString(update_first_seen_at);
-    } else {
-      // This seems like an unexpected error where the value cannot be
-      // persisted for some reason. Just skip scattering in this
-      // case to be safe.
-      LOG(INFO) << "Not scattering as UpdateFirstSeenAt value "
-                << utils::ToString(update_first_seen_at)
-                << " cannot be persisted";
-     return kWallClockWaitDoneAndUpdateCheckWaitNotRequired;
-    }
+  Time update_first_seen_at = LoadOrPersistUpdateFirstSeenAtPref();
+  if (update_first_seen_at == base::Time()) {
+    LOG(INFO) << "Not scattering as UpdateFirstSeenAt value cannot be read or "
+                 "persisted";
+    return kWallClockWaitDoneAndUpdateCheckWaitNotRequired;
   }
 
   TimeDelta elapsed_time =
       system_state_->clock()->GetWallclockTime() - update_first_seen_at;
   TimeDelta max_scatter_period =
       TimeDelta::FromDays(output_object->max_days_to_scatter);
+  int64_t staging_wait_time_in_days = 0;
+  // Use staging and its default max value if staging is on.
+  if (system_state_->prefs()->GetInt64(kPrefsWallClockStagingWaitPeriod,
+                                       &staging_wait_time_in_days) &&
+      staging_wait_time_in_days > 0)
+    max_scatter_period = TimeDelta::FromDays(kMaxWaitTimeStagingInDays);
 
   LOG(INFO) << "Waiting Period = "
             << utils::FormatSecs(params_->waiting_period().InSeconds())
             << ", Time Elapsed = "
             << utils::FormatSecs(elapsed_time.InSeconds())
-            << ", MaxDaysToScatter = "
-            << max_scatter_period.InDays();
+            << ", MaxDaysToScatter = " << max_scatter_period.InDays();
 
   if (!output_object->deadline.empty()) {
     // The deadline is set for all rules which serve a delta update from a
@@ -1472,9 +1558,9 @@
 
     // But we can't download until the update-check-count-based wait is also
     // satisfied, so mark it as required now if update checks are enabled.
-    return params_->update_check_count_wait_enabled() ?
-              kWallClockWaitDoneButUpdateCheckWaitRequired :
-              kWallClockWaitDoneAndUpdateCheckWaitNotRequired;
+    return params_->update_check_count_wait_enabled()
+               ? kWallClockWaitDoneButUpdateCheckWaitRequired
+               : kWallClockWaitDoneAndUpdateCheckWaitNotRequired;
   }
 
   // Not our turn yet, so we have to wait until our turn to
@@ -1500,9 +1586,9 @@
   } else {
     // This file does not exist. This means we haven't started our update
     // check count down yet, so this is the right time to start the count down.
-    update_check_count_value = base::RandInt(
-      params_->min_update_checks_needed(),
-      params_->max_update_checks_allowed());
+    update_check_count_value =
+        base::RandInt(params_->min_update_checks_needed(),
+                      params_->max_update_checks_allowed());
 
     LOG(INFO) << "Randomly picked update check count value = "
               << update_check_count_value;
@@ -1535,8 +1621,7 @@
   // Legal value, we need to wait for more update checks to happen
   // until this becomes 0.
   LOG(INFO) << "Deferring Omaha updates for another "
-            << update_check_count_value
-            << " update checks per policy";
+            << update_check_count_value << " update checks per policy";
   return false;
 }
 
@@ -1544,8 +1629,7 @@
 bool OmahaRequestAction::ParseInstallDate(OmahaParserData* parser_data,
                                           OmahaResponse* output_object) {
   int64_t elapsed_days = 0;
-  if (!base::StringToInt64(parser_data->daystart_elapsed_days,
-                           &elapsed_days))
+  if (!base::StringToInt64(parser_data->daystart_elapsed_days, &elapsed_days))
     return false;
 
   if (elapsed_days < 0)
@@ -1556,7 +1640,7 @@
 }
 
 // static
-bool OmahaRequestAction::HasInstallDate(SystemState *system_state) {
+bool OmahaRequestAction::HasInstallDate(SystemState* system_state) {
   PrefsInterface* prefs = system_state->prefs();
   if (prefs == nullptr)
     return false;
@@ -1566,7 +1650,7 @@
 
 // static
 bool OmahaRequestAction::PersistInstallDate(
-    SystemState *system_state,
+    SystemState* system_state,
     int install_date_days,
     InstallDateProvisioningSource source) {
   TEST_AND_RETURN_FALSE(install_date_days >= 0);
@@ -1584,9 +1668,8 @@
   return true;
 }
 
-bool OmahaRequestAction::PersistCohortData(
-    const string& prefs_key,
-    const string& new_value) {
+bool OmahaRequestAction::PersistCohortData(const string& prefs_key,
+                                           const string& new_value) {
   if (new_value.empty() && system_state_->prefs()->Exists(prefs_key)) {
     LOG(INFO) << "Removing stored " << prefs_key << " value.";
     return system_state_->prefs()->Delete(prefs_key);
@@ -1598,7 +1681,7 @@
 }
 
 bool OmahaRequestAction::PersistEolStatus(const map<string, string>& attrs) {
-  auto eol_attr = attrs.find(kEolAttr);
+  auto eol_attr = attrs.find(kAttrEol);
   if (eol_attr != attrs.end()) {
     return system_state_->prefs()->SetString(kPrefsOmahaEolStatus,
                                              eol_attr->second);
@@ -1620,58 +1703,59 @@
 
   // Regular update attempt.
   switch (code) {
-  case ErrorCode::kSuccess:
-    // OK, we parsed the response successfully but that does
-    // necessarily mean that an update is available.
-    if (HasOutputPipe()) {
-      const OmahaResponse& response = GetOutputObject();
-      if (response.update_exists) {
-        result = metrics::CheckResult::kUpdateAvailable;
-        reaction = metrics::CheckReaction::kUpdating;
+    case ErrorCode::kSuccess:
+      // OK, we parsed the response successfully but that does
+      // necessarily mean that an update is available.
+      if (HasOutputPipe()) {
+        const OmahaResponse& response = GetOutputObject();
+        if (response.update_exists) {
+          result = metrics::CheckResult::kUpdateAvailable;
+          reaction = metrics::CheckReaction::kUpdating;
+        } else {
+          result = metrics::CheckResult::kNoUpdateAvailable;
+        }
       } else {
         result = metrics::CheckResult::kNoUpdateAvailable;
       }
-    } else {
-      result = metrics::CheckResult::kNoUpdateAvailable;
-    }
-    break;
+      break;
 
-  case ErrorCode::kOmahaUpdateIgnoredPerPolicy:
-    result = metrics::CheckResult::kUpdateAvailable;
-    reaction = metrics::CheckReaction::kIgnored;
-    break;
+    case ErrorCode::kOmahaUpdateIgnoredPerPolicy:
+    case ErrorCode::kOmahaUpdateIgnoredOverCellular:
+      result = metrics::CheckResult::kUpdateAvailable;
+      reaction = metrics::CheckReaction::kIgnored;
+      break;
 
-  case ErrorCode::kOmahaUpdateDeferredPerPolicy:
-    result = metrics::CheckResult::kUpdateAvailable;
-    reaction = metrics::CheckReaction::kDeferring;
-    break;
+    case ErrorCode::kOmahaUpdateDeferredPerPolicy:
+      result = metrics::CheckResult::kUpdateAvailable;
+      reaction = metrics::CheckReaction::kDeferring;
+      break;
 
-  case ErrorCode::kOmahaUpdateDeferredForBackoff:
-    result = metrics::CheckResult::kUpdateAvailable;
-    reaction = metrics::CheckReaction::kBackingOff;
-    break;
+    case ErrorCode::kOmahaUpdateDeferredForBackoff:
+      result = metrics::CheckResult::kUpdateAvailable;
+      reaction = metrics::CheckReaction::kBackingOff;
+      break;
 
-  default:
-    // We report two flavors of errors, "Download errors" and "Parsing
-    // error". Try to convert to the former and if that doesn't work
-    // we know it's the latter.
-    metrics::DownloadErrorCode tmp_error =
-        metrics_utils::GetDownloadErrorCode(code);
-    if (tmp_error != metrics::DownloadErrorCode::kInputMalformed) {
-      result = metrics::CheckResult::kDownloadError;
-      download_error_code = tmp_error;
-    } else {
-      result = metrics::CheckResult::kParsingError;
-    }
-    break;
+    default:
+      // We report two flavors of errors, "Download errors" and "Parsing
+      // error". Try to convert to the former and if that doesn't work
+      // we know it's the latter.
+      metrics::DownloadErrorCode tmp_error =
+          metrics_utils::GetDownloadErrorCode(code);
+      if (tmp_error != metrics::DownloadErrorCode::kInputMalformed) {
+        result = metrics::CheckResult::kDownloadError;
+        download_error_code = tmp_error;
+      } else {
+        result = metrics::CheckResult::kParsingError;
+      }
+      break;
   }
 
   system_state_->metrics_reporter()->ReportUpdateCheckMetrics(
       system_state_, result, reaction, download_error_code);
 }
 
-bool OmahaRequestAction::ShouldIgnoreUpdate(
-    const OmahaResponse& response) const {
+bool OmahaRequestAction::ShouldIgnoreUpdate(const OmahaResponse& response,
+                                            ErrorCode* error) const {
   // Note: policy decision to not update to a version we rolled back from.
   string rollback_version =
       system_state_->payload_state()->GetRollbackVersion();
@@ -1679,11 +1763,22 @@
     LOG(INFO) << "Detected previous rollback from version " << rollback_version;
     if (rollback_version == response.version) {
       LOG(INFO) << "Received version that we rolled back from. Ignoring.";
+      *error = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
       return true;
     }
   }
 
-  if (!IsUpdateAllowedOverCurrentConnection()) {
+  if (system_state_->hardware()->IsOOBEEnabled() &&
+      !system_state_->hardware()->IsOOBEComplete(nullptr) &&
+      (response.deadline.empty() ||
+       system_state_->payload_state()->GetRollbackHappened()) &&
+      params_->app_version() != "ForcedUpdate") {
+    LOG(INFO) << "Ignoring a non-critical Omaha update before OOBE completion.";
+    *error = ErrorCode::kNonCriticalUpdateInOOBE;
+    return true;
+  }
+
+  if (!IsUpdateAllowedOverCurrentConnection(error, response)) {
     LOG(INFO) << "Update is not allowed over current connection.";
     return true;
   }
@@ -1698,7 +1793,62 @@
   return false;
 }
 
-bool OmahaRequestAction::IsUpdateAllowedOverCurrentConnection() const {
+bool OmahaRequestAction::IsUpdateAllowedOverCellularByPrefs(
+    const OmahaResponse& response) const {
+  PrefsInterface* prefs = system_state_->prefs();
+
+  if (!prefs) {
+    LOG(INFO) << "Disabling updates over cellular as the preferences are "
+                 "not available.";
+    return false;
+  }
+
+  bool is_allowed;
+
+  if (prefs->Exists(kPrefsUpdateOverCellularPermission) &&
+      prefs->GetBoolean(kPrefsUpdateOverCellularPermission, &is_allowed) &&
+      is_allowed) {
+    LOG(INFO) << "Allowing updates over cellular as permission preference is "
+                 "set to true.";
+    return true;
+  }
+
+  if (!prefs->Exists(kPrefsUpdateOverCellularTargetVersion) ||
+      !prefs->Exists(kPrefsUpdateOverCellularTargetSize)) {
+    LOG(INFO) << "Disabling updates over cellular as permission preference is "
+                 "set to false or does not exist while target does not exist.";
+    return false;
+  }
+
+  std::string target_version;
+  int64_t target_size;
+
+  if (!prefs->GetString(kPrefsUpdateOverCellularTargetVersion,
+                        &target_version) ||
+      !prefs->GetInt64(kPrefsUpdateOverCellularTargetSize, &target_size)) {
+    LOG(INFO) << "Disabling updates over cellular as the target version or "
+                 "size is not accessible.";
+    return false;
+  }
+
+  uint64_t total_packages_size = 0;
+  for (const auto& package : response.packages) {
+    total_packages_size += package.size;
+  }
+  if (target_version == response.version &&
+      static_cast<uint64_t>(target_size) == total_packages_size) {
+    LOG(INFO) << "Allowing updates over cellular as the target matches the"
+                 "omaha response.";
+    return true;
+  } else {
+    LOG(INFO) << "Disabling updates over cellular as the target does not"
+                 "match the omaha response.";
+    return false;
+  }
+}
+
+bool OmahaRequestAction::IsUpdateAllowedOverCurrentConnection(
+    ErrorCode* error, const OmahaResponse& response) const {
   ConnectionType type;
   ConnectionTethering tethering;
   ConnectionManagerInterface* connection_manager =
@@ -1708,11 +1858,140 @@
               << "Defaulting to allow updates.";
     return true;
   }
+
   bool is_allowed = connection_manager->IsUpdateAllowedOver(type, tethering);
+  bool is_device_policy_set =
+      connection_manager->IsAllowedConnectionTypesForUpdateSet();
+  // Treats tethered connection as if it is cellular connection.
+  bool is_over_cellular = type == ConnectionType::kCellular ||
+                          tethering == ConnectionTethering::kConfirmed;
+
+  if (!is_over_cellular) {
+    // There's no need to further check user preferences as we are not over
+    // cellular connection.
+    if (!is_allowed)
+      *error = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
+  } else if (is_device_policy_set) {
+    // There's no need to further check user preferences as the device policy
+    // is set regarding updates over cellular.
+    if (!is_allowed)
+      *error = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
+  } else {
+    // Deivce policy is not set, so user preferences overwrite whether to
+    // allow updates over cellular.
+    is_allowed = IsUpdateAllowedOverCellularByPrefs(response);
+    if (!is_allowed)
+      *error = ErrorCode::kOmahaUpdateIgnoredOverCellular;
+  }
+
   LOG(INFO) << "We are connected via "
             << connection_utils::StringForConnectionType(type)
             << ", Updates allowed: " << (is_allowed ? "Yes" : "No");
   return is_allowed;
 }
 
+bool OmahaRequestAction::IsRollbackEnabled() const {
+  if (policy_provider_->IsConsumerDevice()) {
+    LOG(INFO) << "Rollback is not enabled for consumer devices.";
+    return false;
+  }
+
+  if (!policy_provider_->device_policy_is_loaded()) {
+    LOG(INFO) << "No device policy is loaded. Assuming rollback enabled.";
+    return true;
+  }
+
+  int allowed_milestones;
+  if (!policy_provider_->GetDevicePolicy().GetRollbackAllowedMilestones(
+          &allowed_milestones)) {
+    LOG(INFO) << "RollbackAllowedMilestones policy can't be read. "
+                 "Defaulting to rollback enabled.";
+    return true;
+  }
+
+  LOG(INFO) << "Rollback allows " << allowed_milestones << " milestones.";
+  return allowed_milestones > 0;
+}
+
+void OmahaRequestAction::SetMaxKernelKeyVersionForRollback() const {
+  int max_kernel_rollforward;
+  int min_kernel_version = system_state_->hardware()->GetMinKernelKeyVersion();
+  if (IsRollbackEnabled()) {
+    // If rollback is enabled, set the max kernel key version to the current
+    // kernel key version. This has the effect of freezing kernel key roll
+    // forwards.
+    //
+    // TODO(zentaro): This behavior is temporary, and ensures that no kernel
+    // key roll forward happens until the server side components of rollback
+    // are implemented. Future changes will allow the Omaha server to return
+    // the kernel key version from max_rollback_versions in the past. At that
+    // point the max kernel key version will be set to that value, creating a
+    // sliding window of versions that can be rolled back to.
+    LOG(INFO) << "Rollback is enabled. Setting kernel_max_rollforward to "
+              << min_kernel_version;
+    max_kernel_rollforward = min_kernel_version;
+  } else {
+    // For devices that are not rollback enabled (ie. consumer devices), the
+    // max kernel key version is set to 0xfffffffe, which is logically
+    // infinity. This maintains the previous behavior that that kernel key
+    // versions roll forward each time they are incremented.
+    LOG(INFO) << "Rollback is disabled. Setting kernel_max_rollforward to "
+              << kRollforwardInfinity;
+    max_kernel_rollforward = kRollforwardInfinity;
+  }
+
+  bool max_rollforward_set =
+      system_state_->hardware()->SetMaxKernelKeyRollforward(
+          max_kernel_rollforward);
+  if (!max_rollforward_set) {
+    LOG(ERROR) << "Failed to set kernel_max_rollforward";
+  }
+  // Report metrics
+  system_state_->metrics_reporter()->ReportKeyVersionMetrics(
+      min_kernel_version, max_kernel_rollforward, max_rollforward_set);
+}
+
+base::Time OmahaRequestAction::LoadOrPersistUpdateFirstSeenAtPref() const {
+  Time update_first_seen_at;
+  int64_t update_first_seen_at_int;
+  if (system_state_->prefs()->Exists(kPrefsUpdateFirstSeenAt)) {
+    if (system_state_->prefs()->GetInt64(kPrefsUpdateFirstSeenAt,
+                                         &update_first_seen_at_int)) {
+      // Note: This timestamp could be that of ANY update we saw in the past
+      // (not necessarily this particular update we're considering to apply)
+      // but never got to apply because of some reason (e.g. stop AU policy,
+      // updates being pulled out from Omaha, changes in target version prefix,
+      // new update being rolled out, etc.). But for the purposes of scattering
+      // it doesn't matter which update the timestamp corresponds to. i.e.
+      // the clock starts ticking the first time we see an update and we're
+      // ready to apply when the random wait period is satisfied relative to
+      // that first seen timestamp.
+      update_first_seen_at = Time::FromInternalValue(update_first_seen_at_int);
+      LOG(INFO) << "Using persisted value of UpdateFirstSeenAt: "
+                << utils::ToString(update_first_seen_at);
+    } else {
+      // This seems like an unexpected error where the persisted value exists
+      // but it's not readable for some reason.
+      LOG(INFO) << "UpdateFirstSeenAt value cannot be read";
+      return base::Time();
+    }
+  } else {
+    update_first_seen_at = system_state_->clock()->GetWallclockTime();
+    update_first_seen_at_int = update_first_seen_at.ToInternalValue();
+    if (system_state_->prefs()->SetInt64(kPrefsUpdateFirstSeenAt,
+                                         update_first_seen_at_int)) {
+      LOG(INFO) << "Persisted the new value for UpdateFirstSeenAt: "
+                << utils::ToString(update_first_seen_at);
+    } else {
+      // This seems like an unexpected error where the value cannot be
+      // persisted for some reason.
+      LOG(INFO) << "UpdateFirstSeenAt value "
+                << utils::ToString(update_first_seen_at)
+                << " cannot be persisted";
+      return base::Time();
+    }
+  }
+  return update_first_seen_at;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/omaha_request_action.h b/omaha_request_action.h
index 924da40..8db5fb9 100644
--- a/omaha_request_action.h
+++ b/omaha_request_action.h
@@ -39,6 +39,10 @@
 // The Omaha Request action makes a request to Omaha and can output
 // the response on the output ActionPipe.
 
+namespace policy {
+class PolicyProvider;
+}
+
 namespace chromeos_update_engine {
 
 // Encodes XML entities in a given string. Input must be ASCII-7 valid. If
@@ -84,9 +88,7 @@
         result(kResultSuccess),
         error_code(ErrorCode::kSuccess) {}
   OmahaEvent(Type in_type, Result in_result, ErrorCode in_error_code)
-      : type(in_type),
-        result(in_result),
-        error_code(in_error_code) {}
+      : type(in_type), result(in_result), error_code(in_error_code) {}
 
   Type type;
   Result result;
@@ -101,7 +103,7 @@
 // This struct is declared in the .cc file.
 struct OmahaParserData;
 
-template<>
+template <>
 class ActionTraits<OmahaRequestAction> {
  public:
   // Takes parameters on the input pipe.
@@ -123,6 +125,10 @@
   // fallback ones.
   static const int kDefaultMaxFailureCountPerUrl = 10;
 
+  // If staging is enabled, set the maximum wait time to 28 days, since that is
+  // the predetermined wait time for staging.
+  static const int kMaxWaitTimeStagingInDays = 28;
+
   // These are the possible outcome upon checking whether we satisfied
   // the wall-clock-based-wait.
   enum WallClockWaitResult {
@@ -163,15 +169,18 @@
   std::string Type() const override { return StaticType(); }
 
   // Delegate methods (see http_fetcher.h)
-  void ReceivedBytes(HttpFetcher *fetcher,
-                     const void* bytes, size_t length) override;
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* bytes,
+                     size_t length) override;
 
-  void TransferComplete(HttpFetcher *fetcher, bool successful) override;
+  void TransferComplete(HttpFetcher* fetcher, bool successful) override;
 
   // Returns true if this is an Event request, false if it's an UpdateCheck.
   bool IsEvent() const { return event_.get() != nullptr; }
 
  private:
+  friend class OmahaRequestActionTest;
+  friend class OmahaRequestActionTestProcessorDelegate;
   FRIEND_TEST(OmahaRequestActionTest, GetInstallDateWhenNoPrefsNorOOBE);
   FRIEND_TEST(OmahaRequestActionTest,
               GetInstallDateWhenOOBECompletedWithInvalidDate);
@@ -204,12 +213,12 @@
 
   // Returns True if the kPrefsInstallDateDays state variable is set,
   // False otherwise.
-  static bool HasInstallDate(SystemState *system_state);
+  static bool HasInstallDate(SystemState* system_state);
 
   // Writes |install_date_days| into the kPrefsInstallDateDays state
   // variable and emits an UMA stat for the |source| used. Returns
   // True if the value was written, False if an error occurred.
-  static bool PersistInstallDate(SystemState *system_state,
+  static bool PersistInstallDate(SystemState* system_state,
                                  int install_date_days,
                                  InstallDateProvisioningSource source);
 
@@ -292,11 +301,29 @@
   void OnLookupPayloadViaP2PCompleted(const std::string& url);
 
   // Returns true if the current update should be ignored.
-  bool ShouldIgnoreUpdate(const OmahaResponse& response) const;
+  bool ShouldIgnoreUpdate(const OmahaResponse& response,
+                          ErrorCode* error) const;
+
+  // Return true if updates are allowed by user preferences.
+  bool IsUpdateAllowedOverCellularByPrefs(const OmahaResponse& response) const;
 
   // Returns true if updates are allowed over the current type of connection.
   // False otherwise.
-  bool IsUpdateAllowedOverCurrentConnection() const;
+  bool IsUpdateAllowedOverCurrentConnection(
+      ErrorCode* error, const OmahaResponse& response) const;
+
+  // Returns true if rollback is enabled. Always returns false for consumer
+  // devices.
+  bool IsRollbackEnabled() const;
+
+  // Sets the appropriate max kernel key version based on whether rollback is
+  // enabled.
+  void SetMaxKernelKeyVersionForRollback() const;
+
+  // Reads and returns the kPrefsUpdateFirstSeenAt pref if the pref currently
+  // exists. Otherwise saves the current wallclock time to the
+  // kPrefsUpdateFirstSeenAt pref and returns it as a base::Time object.
+  base::Time LoadOrPersistUpdateFirstSeenAtPref() const;
 
   // Global system context.
   SystemState* system_state_;
@@ -310,6 +337,9 @@
   // pointer to the HttpFetcher that does the http work
   std::unique_ptr<HttpFetcher> http_fetcher_;
 
+  // Used for fetching information about the device policy.
+  std::unique_ptr<policy::PolicyProvider> policy_provider_;
+
   // If true, only include the <ping> element in the request.
   bool ping_only_;
 
diff --git a/omaha_request_action_fuzzer.cc b/omaha_request_action_fuzzer.cc
new file mode 100644
index 0000000..6c2f7ca
--- /dev/null
+++ b/omaha_request_action_fuzzer.cc
@@ -0,0 +1,53 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <brillo/message_loops/fake_message_loop.h>
+
+#include "update_engine/common/mock_http_fetcher.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/fake_system_state.h"
+#include "update_engine/omaha_request_action.h"
+
+class Environment {
+ public:
+  Environment() { logging::SetMinLogLevel(logging::LOG_FATAL); }
+};
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  static Environment env;
+  brillo::FakeMessageLoop loop(nullptr);
+  loop.SetAsCurrent();
+
+  chromeos_update_engine::FakeSystemState fake_system_state;
+  auto omaha_request_action =
+      std::make_unique<chromeos_update_engine::OmahaRequestAction>(
+          &fake_system_state,
+          nullptr,
+          std::make_unique<chromeos_update_engine::MockHttpFetcher>(
+              data, size, nullptr),
+          false);
+  auto collector_action =
+      std::make_unique<chromeos_update_engine::ObjectCollectorAction<
+          chromeos_update_engine::OmahaResponse>>();
+  BondActions(omaha_request_action.get(), collector_action.get());
+  chromeos_update_engine::ActionProcessor action_processor;
+  action_processor.EnqueueAction(std::move(omaha_request_action));
+  action_processor.EnqueueAction(std::move(collector_action));
+  action_processor.StartProcessing();
+
+  loop.Run();
+  return 0;
+}
diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc
index 2f466dd..1786bcc 100644
--- a/omaha_request_action_unittest.cc
+++ b/omaha_request_action_unittest.cc
@@ -20,6 +20,7 @@
 
 #include <memory>
 #include <string>
+#include <utility>
 #include <vector>
 
 #include <base/bind.h>
@@ -30,11 +31,12 @@
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
 #include <base/time/time.h>
-#include <brillo/bind_lambda.h>
 #include <brillo/message_loops/fake_message_loop.h>
 #include <brillo/message_loops/message_loop.h>
 #include <brillo/message_loops/message_loop_utils.h>
 #include <gtest/gtest.h>
+#include <policy/libpolicy.h>
+#include <policy/mock_libpolicy.h>
 
 #include "update_engine/common/action_pipe.h"
 #include "update_engine/common/constants.h"
@@ -49,11 +51,14 @@
 #include "update_engine/mock_connection_manager.h"
 #include "update_engine/mock_payload_state.h"
 #include "update_engine/omaha_request_params.h"
+#include "update_engine/update_manager/rollback_prefs.h"
 
 using base::Time;
 using base::TimeDelta;
+using chromeos_update_manager::kRollforwardInfinity;
 using std::string;
 using std::vector;
+using testing::_;
 using testing::AllOf;
 using testing::AnyNumber;
 using testing::DoAll;
@@ -62,18 +67,34 @@
 using testing::NiceMock;
 using testing::Return;
 using testing::ReturnPointee;
+using testing::ReturnRef;
 using testing::SaveArg;
 using testing::SetArgPointee;
-using testing::_;
 
 namespace {
 
+static_assert(kRollforwardInfinity == 0xfffffffe,
+              "Don't change the value of kRollforward infinity unless its "
+              "size has been changed in firmware.");
+
+const char kCurrentVersion[] = "0.1.0.0";
 const char kTestAppId[] = "test-app-id";
 const char kTestAppId2[] = "test-app2-id";
+const char kTestAppIdSkipUpdatecheck[] = "test-app-id-skip-updatecheck";
 
 // This is a helper struct to allow unit tests build an update response with the
 // values they care about.
 struct FakeUpdateResponse {
+  string GetRollbackVersionAttributes() const {
+    return (rollback ? " _rollback=\"true\"" : "") +
+           (!rollback_firmware_version.empty()
+                ? " _firmware_version=\"" + rollback_firmware_version + "\""
+                : "") +
+           (!rollback_kernel_version.empty()
+                ? " _kernel_version=\"" + rollback_kernel_version + "\""
+                : "");
+  }
+
   string GetNoUpdateResponse() const {
     string entity_str;
     if (include_entity)
@@ -111,8 +132,8 @@
                       "\" cohortname=\"" + cohortname + "\" "
                 : "") +
            " status=\"ok\">"
-           "<ping status=\"ok\"/><updatecheck status=\"ok\">"
-           "<urls><url codebase=\"" +
+           "<ping status=\"ok\"/><updatecheck status=\"ok\"" +
+           GetRollbackVersionAttributes() + ">" + "<urls><url codebase=\"" +
            codebase +
            "\"/></urls>"
            "<manifest version=\"" +
@@ -126,23 +147,21 @@
                           : "") +
            "</packages>"
            "<actions><action event=\"postinstall\" MetadataSize=\"11" +
-           (multi_package ? ":22" : "") + "\" ChromeOSVersion=\"" + version +
-           "\" MoreInfo=\"" + more_info_url + "\" Prompt=\"" + prompt +
+           (multi_package ? ":22" : "") + "\" MoreInfo=\"" + more_info_url +
+           "\" Prompt=\"" + prompt +
            "\" "
-           "IsDelta=\"true\" "
            "IsDeltaPayload=\"true" +
            (multi_package ? ":false" : "") +
            "\" "
            "MaxDaysToScatter=\"" +
            max_days_to_scatter +
            "\" "
-           "sha256=\"not-used\" "
-           "needsadmin=\"" +
-           needsadmin + "\" " +
+           "sha256=\"not-used\" " +
            (deadline.empty() ? "" : ("deadline=\"" + deadline + "\" ")) +
            (disable_p2p_for_downloading ? "DisableP2PForDownloading=\"true\" "
                                         : "") +
            (disable_p2p_for_sharing ? "DisableP2PForSharing=\"true\" " : "") +
+           (powerwash ? "Powerwash=\"true\" " : "") +
            "/></actions></manifest></updatecheck></app>" +
            (multi_app
                 ? "<app appid=\"" + app_id2 + "\"" +
@@ -162,16 +181,19 @@
            (multi_app_no_update
                 ? "<app><updatecheck status=\"noupdate\"/></app>"
                 : "") +
+           (multi_app_skip_updatecheck
+                ? "<app appid=\"" + app_id_skip_updatecheck + "\"></app>"
+                : "") +
            "</response>";
   }
 
   // Return the payload URL, which is split in two fields in the XML response.
-  string GetPayloadUrl() {
-    return codebase + filename;
-  }
+  string GetPayloadUrl() { return codebase + filename; }
 
   string app_id = kTestAppId;
   string app_id2 = kTestAppId2;
+  string app_id_skip_updatecheck = kTestAppIdSkipUpdatecheck;
+  string current_version = kCurrentVersion;
   string version = "1.2.3.4";
   string version2 = "2.3.4.5";
   string more_info_url = "http://more/info";
@@ -180,7 +202,6 @@
   string codebase2 = "http://code/base/2/";
   string filename = "file.signed";
   string hash = "4841534831323334";
-  string needsadmin = "false";
   uint64_t size = 123;
   string deadline = "";
   string max_days_to_scatter = "7";
@@ -190,6 +211,8 @@
   bool disable_p2p_for_downloading = false;
   bool disable_p2p_for_sharing = false;
 
+  bool powerwash = false;
+
   // Omaha cohorts settings.
   bool include_cohorts = false;
   string cohort = "";
@@ -205,17 +228,94 @@
   bool multi_app_self_update = false;
   // Whether to include an additional app with status="noupdate".
   bool multi_app_no_update = false;
+  // Whether to include an additional app with no updatecheck tag.
+  bool multi_app_skip_updatecheck = false;
   // Whether to include more than one package in an app.
   bool multi_package = false;
+
+  // Whether the payload is a rollback.
+  bool rollback = false;
+  // The verified boot firmware key version for the rollback image.
+  string rollback_firmware_version = "";
+  // The verified boot kernel key version for the rollback image.
+  string rollback_kernel_version = "";
 };
 
 }  // namespace
 
 namespace chromeos_update_engine {
 
+class OmahaRequestActionTestProcessorDelegate : public ActionProcessorDelegate {
+ public:
+  OmahaRequestActionTestProcessorDelegate()
+      : expected_code_(ErrorCode::kSuccess),
+        interactive_(false),
+        test_http_fetcher_headers_(false) {}
+  ~OmahaRequestActionTestProcessorDelegate() override = default;
+
+  void ProcessingDone(const ActionProcessor* processor,
+                      ErrorCode code) override {
+    brillo::MessageLoop::current()->BreakLoop();
+  }
+
+  void ActionCompleted(ActionProcessor* processor,
+                       AbstractAction* action,
+                       ErrorCode code) override {
+    // Make sure actions always succeed.
+    if (action->Type() == OmahaRequestAction::StaticType()) {
+      EXPECT_EQ(expected_code_, code);
+      // Check that the headers were set in the fetcher during the action. Note
+      // that we set this request as "interactive".
+      auto fetcher = static_cast<const MockHttpFetcher*>(
+          static_cast<OmahaRequestAction*>(action)->http_fetcher_.get());
+
+      if (test_http_fetcher_headers_) {
+        EXPECT_EQ(interactive_ ? "fg" : "bg",
+                  fetcher->GetHeader("X-Goog-Update-Interactivity"));
+        EXPECT_EQ(kTestAppId, fetcher->GetHeader("X-Goog-Update-AppId"));
+        EXPECT_NE("", fetcher->GetHeader("X-Goog-Update-Updater"));
+      }
+      post_data_ = fetcher->post_data();
+    } else if (action->Type() ==
+               ObjectCollectorAction<OmahaResponse>::StaticType()) {
+      EXPECT_EQ(ErrorCode::kSuccess, code);
+      auto collector_action =
+          static_cast<ObjectCollectorAction<OmahaResponse>*>(action);
+      omaha_response_.reset(new OmahaResponse(collector_action->object()));
+      EXPECT_TRUE(omaha_response_);
+    } else {
+      EXPECT_EQ(ErrorCode::kSuccess, code);
+    }
+  }
+  ErrorCode expected_code_;
+  brillo::Blob post_data_;
+  bool interactive_;
+  bool test_http_fetcher_headers_;
+  std::unique_ptr<OmahaResponse> omaha_response_;
+};
+
 class OmahaRequestActionTest : public ::testing::Test {
  protected:
   void SetUp() override {
+    request_params_.set_os_sp("service_pack");
+    request_params_.set_os_board("x86-generic");
+    request_params_.set_app_id(kTestAppId);
+    request_params_.set_app_version(kCurrentVersion);
+    request_params_.set_app_lang("en-US");
+    request_params_.set_current_channel("unittest");
+    request_params_.set_target_channel("unittest");
+    request_params_.set_hwid("OEM MODEL 09235 7471");
+    request_params_.set_fw_version("ChromeOSFirmware.1.0");
+    request_params_.set_ec_version("0X0A1");
+    request_params_.set_delta_okay(true);
+    request_params_.set_interactive(false);
+    request_params_.set_update_url("http://url");
+    request_params_.set_target_version_prefix("");
+    request_params_.set_rollback_allowed(false);
+    request_params_.set_is_powerwash_allowed(false);
+    request_params_.set_is_install(false);
+    request_params_.set_dlc_module_ids({});
+
     fake_system_state_.set_request_params(&request_params_);
     fake_system_state_.set_prefs(&fake_prefs_);
   }
@@ -235,8 +335,22 @@
   // about reporting UpdateEngine.Check.{Result,Reaction,DownloadError}
   // UMA statistics. Use the appropriate ::kUnset value to specify that
   // the given metric should not be reported.
-  bool TestUpdateCheck(OmahaRequestParams* request_params,
-                       const string& http_response,
+  bool TestUpdateCheck(const string& http_response,
+                       int fail_http_response_code,
+                       bool ping_only,
+                       bool is_consumer_device,
+                       int rollback_allowed_milestones,
+                       bool is_policy_loaded,
+                       ErrorCode expected_code,
+                       metrics::CheckResult expected_check_result,
+                       metrics::CheckReaction expected_check_reaction,
+                       metrics::DownloadErrorCode expected_download_error_code,
+                       OmahaResponse* out_response,
+                       brillo::Blob* out_post_data);
+
+  // Overload of TestUpdateCheck that does not supply |is_consumer_device| or
+  // |rollback_allowed_milestones| which are only required for rollback tests.
+  bool TestUpdateCheck(const string& http_response,
                        int fail_http_response_code,
                        bool ping_only,
                        ErrorCode expected_code,
@@ -246,121 +360,54 @@
                        OmahaResponse* out_response,
                        brillo::Blob* out_post_data);
 
+  void TestRollbackCheck(bool is_consumer_device,
+                         int rollback_allowed_milestones,
+                         bool is_policy_loaded,
+                         OmahaResponse* out_response);
+
+  void TestEvent(OmahaEvent* event,
+                 const string& http_response,
+                 brillo::Blob* out_post_data);
+
   // Runs and checks a ping test. |ping_only| indicates whether it should send
   // only a ping or also an updatecheck.
   void PingTest(bool ping_only);
 
   // InstallDate test helper function.
-  bool InstallDateParseHelper(const string &elapsed_days,
-                              OmahaResponse *response);
+  bool InstallDateParseHelper(const string& elapsed_days,
+                              OmahaResponse* response);
 
   // P2P test helper function.
-  void P2PTest(
-      bool initial_allow_p2p_for_downloading,
-      bool initial_allow_p2p_for_sharing,
-      bool omaha_disable_p2p_for_downloading,
-      bool omaha_disable_p2p_for_sharing,
-      bool payload_state_allow_p2p_attempt,
-      bool expect_p2p_client_lookup,
-      const string& p2p_client_result_url,
-      bool expected_allow_p2p_for_downloading,
-      bool expected_allow_p2p_for_sharing,
-      const string& expected_p2p_url);
+  void P2PTest(bool initial_allow_p2p_for_downloading,
+               bool initial_allow_p2p_for_sharing,
+               bool omaha_disable_p2p_for_downloading,
+               bool omaha_disable_p2p_for_sharing,
+               bool payload_state_allow_p2p_attempt,
+               bool expect_p2p_client_lookup,
+               const string& p2p_client_result_url,
+               bool expected_allow_p2p_for_downloading,
+               bool expected_allow_p2p_for_sharing,
+               const string& expected_p2p_url);
 
   FakeSystemState fake_system_state_;
   FakeUpdateResponse fake_update_response_;
-
-  // By default, all tests use these objects unless they replace them in the
-  // fake_system_state_.
-  OmahaRequestParams request_params_ = OmahaRequestParams{
-      &fake_system_state_,
-      constants::kOmahaPlatformName,
-      OmahaRequestParams::kOsVersion,
-      "service_pack",
-      "x86-generic",
-      kTestAppId,
-      "0.1.0.0",
-      "en-US",
-      "unittest",
-      "OEM MODEL 09235 7471",
-      "ChromeOSFirmware.1.0",
-      "0X0A1",
-      false,   // delta okay
-      false,   // interactive
-      "http://url",
-      ""};     // target_version_prefix
+  // Used by all tests.
+  OmahaRequestParams request_params_{&fake_system_state_};
 
   FakePrefs fake_prefs_;
-};
 
-namespace {
-class OmahaRequestActionTestProcessorDelegate : public ActionProcessorDelegate {
- public:
-  OmahaRequestActionTestProcessorDelegate()
-      : expected_code_(ErrorCode::kSuccess) {}
-  ~OmahaRequestActionTestProcessorDelegate() override {
-  }
-  void ProcessingDone(const ActionProcessor* processor,
-                      ErrorCode code) override {
-    brillo::MessageLoop::current()->BreakLoop();
-  }
+  OmahaRequestActionTestProcessorDelegate delegate_;
 
-  void ActionCompleted(ActionProcessor* processor,
-                       AbstractAction* action,
-                       ErrorCode code) override {
-    // make sure actions always succeed
-    if (action->Type() == OmahaRequestAction::StaticType())
-      EXPECT_EQ(expected_code_, code);
-    else
-      EXPECT_EQ(ErrorCode::kSuccess, code);
-  }
-  ErrorCode expected_code_;
-};
-}  // namespace
-
-class OutputObjectCollectorAction;
-
-template<>
-class ActionTraits<OutputObjectCollectorAction> {
- public:
-  // Does not take an object for input
-  typedef OmahaResponse InputObjectType;
-  // On success, puts the output path on output
-  typedef NoneType OutputObjectType;
-};
-
-class OutputObjectCollectorAction : public Action<OutputObjectCollectorAction> {
- public:
-  OutputObjectCollectorAction() : has_input_object_(false) {}
-  void PerformAction() {
-    // copy input object
-    has_input_object_ = HasInputObject();
-    if (has_input_object_)
-      omaha_response_ = GetInputObject();
-    processor_->ActionComplete(this, ErrorCode::kSuccess);
-  }
-  // Should never be called
-  void TerminateProcessing() {
-    CHECK(false);
-  }
-  // Debugging/logging
-  static string StaticType() {
-    return "OutputObjectCollectorAction";
-  }
-  string Type() const { return StaticType(); }
-  using InputObjectType =
-      ActionTraits<OutputObjectCollectorAction>::InputObjectType;
-  using OutputObjectType =
-      ActionTraits<OutputObjectCollectorAction>::OutputObjectType;
-  bool has_input_object_;
-  OmahaResponse omaha_response_;
+  bool test_http_fetcher_headers_{false};
 };
 
 bool OmahaRequestActionTest::TestUpdateCheck(
-    OmahaRequestParams* request_params,
     const string& http_response,
     int fail_http_response_code,
     bool ping_only,
+    bool is_consumer_device,
+    int rollback_allowed_milestones,
+    bool is_policy_loaded,
     ErrorCode expected_code,
     metrics::CheckResult expected_check_result,
     metrics::CheckReaction expected_check_reaction,
@@ -369,28 +416,47 @@
     brillo::Blob* out_post_data) {
   brillo::FakeMessageLoop loop(nullptr);
   loop.SetAsCurrent();
-  MockHttpFetcher* fetcher = new MockHttpFetcher(http_response.data(),
-                                                 http_response.size(),
-                                                 nullptr);
+  auto fetcher = std::make_unique<MockHttpFetcher>(
+      http_response.data(), http_response.size(), nullptr);
   if (fail_http_response_code >= 0) {
     fetcher->FailTransfer(fail_http_response_code);
   }
-  if (request_params)
-    fake_system_state_.set_request_params(request_params);
-  OmahaRequestAction action(&fake_system_state_,
-                            nullptr,
-                            base::WrapUnique(fetcher),
-                            ping_only);
-  OmahaRequestActionTestProcessorDelegate delegate;
-  delegate.expected_code_ = expected_code;
+  // This ensures the tests didn't forget to update fake_system_state_ if they
+  // are not using the default request_params_.
+  EXPECT_EQ(&request_params_, fake_system_state_.request_params());
 
+  auto omaha_request_action = std::make_unique<OmahaRequestAction>(
+      &fake_system_state_, nullptr, std::move(fetcher), ping_only);
+
+  auto mock_policy_provider =
+      std::make_unique<NiceMock<policy::MockPolicyProvider>>();
+  EXPECT_CALL(*mock_policy_provider, IsConsumerDevice())
+      .WillRepeatedly(Return(is_consumer_device));
+
+  EXPECT_CALL(*mock_policy_provider, device_policy_is_loaded())
+      .WillRepeatedly(Return(is_policy_loaded));
+
+  const policy::MockDevicePolicy device_policy;
+  const bool get_allowed_milestone_succeeds = rollback_allowed_milestones >= 0;
+  EXPECT_CALL(device_policy, GetRollbackAllowedMilestones(_))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(rollback_allowed_milestones),
+                            Return(get_allowed_milestone_succeeds)));
+
+  EXPECT_CALL(*mock_policy_provider, GetDevicePolicy())
+      .WillRepeatedly(ReturnRef(device_policy));
+  omaha_request_action->policy_provider_ = std::move(mock_policy_provider);
+
+  delegate_.expected_code_ = expected_code;
+  delegate_.interactive_ = request_params_.interactive();
+  delegate_.test_http_fetcher_headers_ = test_http_fetcher_headers_;
   ActionProcessor processor;
-  processor.set_delegate(&delegate);
-  processor.EnqueueAction(&action);
+  processor.set_delegate(&delegate_);
 
-  OutputObjectCollectorAction collector_action;
-  BondActions(&action, &collector_action);
-  processor.EnqueueAction(&collector_action);
+  auto collector_action =
+      std::make_unique<ObjectCollectorAction<OmahaResponse>>();
+  BondActions(omaha_request_action.get(), collector_action.get());
+  processor.EnqueueAction(std::move(omaha_request_action));
+  processor.EnqueueAction(std::move(collector_action));
 
   EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
               ReportUpdateCheckMetrics(_, _, _, _))
@@ -408,35 +474,75 @@
       base::Unretained(&processor)));
   loop.Run();
   EXPECT_FALSE(loop.PendingTasks());
-  if (collector_action.has_input_object_ && out_response)
-    *out_response = collector_action.omaha_response_;
+  if (delegate_.omaha_response_ && out_response)
+    *out_response = *delegate_.omaha_response_;
   if (out_post_data)
-    *out_post_data = fetcher->post_data();
-  return collector_action.has_input_object_;
+    *out_post_data = delegate_.post_data_;
+  return delegate_.omaha_response_ != nullptr;
 }
 
-// Tests Event requests -- they should always succeed. |out_post_data|
-// may be null; if non-null, the post-data received by the mock
-// HttpFetcher is returned.
-void TestEvent(OmahaRequestParams params,
-               OmahaEvent* event,
-               const string& http_response,
-               brillo::Blob* out_post_data) {
+bool OmahaRequestActionTest::TestUpdateCheck(
+    const string& http_response,
+    int fail_http_response_code,
+    bool ping_only,
+    ErrorCode expected_code,
+    metrics::CheckResult expected_check_result,
+    metrics::CheckReaction expected_check_reaction,
+    metrics::DownloadErrorCode expected_download_error_code,
+    OmahaResponse* out_response,
+    brillo::Blob* out_post_data) {
+  return TestUpdateCheck(http_response,
+                         fail_http_response_code,
+                         ping_only,
+                         true,   // is_consumer_device
+                         0,      // rollback_allowed_milestones
+                         false,  // is_policy_loaded
+                         expected_code,
+                         expected_check_result,
+                         expected_check_reaction,
+                         expected_download_error_code,
+                         out_response,
+                         out_post_data);
+}
+
+void OmahaRequestActionTest::TestRollbackCheck(bool is_consumer_device,
+                                               int rollback_allowed_milestones,
+                                               bool is_policy_loaded,
+                                               OmahaResponse* out_response) {
+  fake_update_response_.deadline = "20101020";
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              is_consumer_device,
+                              rollback_allowed_milestones,
+                              is_policy_loaded,
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              out_response,
+                              nullptr));
+  ASSERT_TRUE(out_response->update_exists);
+}
+
+// Tests Event requests -- they should always succeed. |out_post_data| may be
+// null; if non-null, the post-data received by the mock HttpFetcher is
+// returned.
+void OmahaRequestActionTest::TestEvent(OmahaEvent* event,
+                                       const string& http_response,
+                                       brillo::Blob* out_post_data) {
   brillo::FakeMessageLoop loop(nullptr);
   loop.SetAsCurrent();
-  MockHttpFetcher* fetcher = new MockHttpFetcher(http_response.data(),
-                                                 http_response.size(),
-                                                 nullptr);
-  FakeSystemState fake_system_state;
-  fake_system_state.set_request_params(&params);
-  OmahaRequestAction action(&fake_system_state,
-                            event,
-                            base::WrapUnique(fetcher),
-                            false);
-  OmahaRequestActionTestProcessorDelegate delegate;
+
+  auto action = std::make_unique<OmahaRequestAction>(
+      &fake_system_state_,
+      event,
+      std::make_unique<MockHttpFetcher>(
+          http_response.data(), http_response.size(), nullptr),
+      false);
   ActionProcessor processor;
-  processor.set_delegate(&delegate);
-  processor.EnqueueAction(&action);
+  processor.set_delegate(&delegate_);
+  processor.EnqueueAction(std::move(action));
 
   loop.PostTask(base::Bind(
       [](ActionProcessor* processor) { processor->StartProcessing(); },
@@ -445,47 +551,42 @@
   EXPECT_FALSE(loop.PendingTasks());
 
   if (out_post_data)
-    *out_post_data = fetcher->post_data();
+    *out_post_data = delegate_.post_data_;
 }
 
 TEST_F(OmahaRequestActionTest, RejectEntities) {
   OmahaResponse response;
   fake_update_response_.include_entity = true;
-  ASSERT_FALSE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kOmahaRequestXMLHasEntityDecl,
-                      metrics::CheckResult::kParsingError,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaRequestXMLHasEntityDecl,
+                               metrics::CheckResult::kParsingError,
+                               metrics::CheckReaction::kUnset,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
   EXPECT_FALSE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, NoUpdateTest) {
   OmahaResponse response;
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kNoUpdateAvailable,
+                              metrics::CheckReaction::kUnset,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_FALSE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, MultiAppNoUpdateTest) {
   OmahaResponse response;
   fake_update_response_.multi_app_no_update = true;
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetNoUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -500,8 +601,7 @@
 TEST_F(OmahaRequestActionTest, MultiAppNoPartialUpdateTest) {
   OmahaResponse response;
   fake_update_response_.multi_app_no_update = true;
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -516,7 +616,6 @@
 TEST_F(OmahaRequestActionTest, NoSelfUpdateTest) {
   OmahaResponse response;
   ASSERT_TRUE(TestUpdateCheck(
-      nullptr,  // request_params
       "<response><app><updatecheck status=\"ok\"><manifest><actions><action "
       "event=\"postinstall\" noupdate=\"true\"/></actions>"
       "</manifest></updatecheck></app></response>",
@@ -536,17 +635,15 @@
 TEST_F(OmahaRequestActionTest, ValidUpdateTest) {
   OmahaResponse response;
   fake_update_response_.deadline = "20101020";
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_TRUE(response.update_exists);
   EXPECT_EQ(fake_update_response_.version, response.version);
   EXPECT_EQ("", response.system_version);
@@ -558,6 +655,7 @@
   EXPECT_EQ(true, response.packages[0].is_delta);
   EXPECT_EQ(fake_update_response_.prompt == "true", response.prompt);
   EXPECT_EQ(fake_update_response_.deadline, response.deadline);
+  EXPECT_FALSE(response.powerwash_required);
   // Omaha cohort attributes are not set in the response, so they should not be
   // persisted.
   EXPECT_FALSE(fake_prefs_.Exists(kPrefsOmahaCohort));
@@ -568,8 +666,7 @@
 TEST_F(OmahaRequestActionTest, MultiPackageUpdateTest) {
   OmahaResponse response;
   fake_update_response_.multi_package = true;
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -598,8 +695,7 @@
 TEST_F(OmahaRequestActionTest, MultiAppUpdateTest) {
   OmahaResponse response;
   fake_update_response_.multi_app = true;
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -631,8 +727,7 @@
   // trigger the lining up of the app and system versions
   request_params_.set_system_app_id(fake_update_response_.app_id2);
 
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -663,8 +758,7 @@
   OmahaResponse response;
   fake_update_response_.multi_app = true;
   fake_update_response_.multi_app_self_update = true;
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -692,8 +786,7 @@
   OmahaResponse response;
   fake_update_response_.multi_app = true;
   fake_update_response_.multi_package = true;
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -726,31 +819,52 @@
   EXPECT_EQ(false, response.packages[2].is_delta);
 }
 
-TEST_F(OmahaRequestActionTest, ExtraHeadersSentTest) {
-  const string http_response = "<?xml invalid response";
+TEST_F(OmahaRequestActionTest, PowerwashTest) {
+  OmahaResponse response;
+  fake_update_response_.powerwash = true;
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
+  EXPECT_TRUE(response.update_exists);
+  EXPECT_TRUE(response.powerwash_required);
+}
+
+TEST_F(OmahaRequestActionTest, ExtraHeadersSentInteractiveTest) {
+  OmahaResponse response;
   request_params_.set_interactive(true);
+  test_http_fetcher_headers_ = true;
+  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaRequestXMLParseError,
+                               metrics::CheckResult::kParsingError,
+                               metrics::CheckReaction::kUnset,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
+  EXPECT_FALSE(response.update_exists);
+}
 
-  brillo::FakeMessageLoop loop(nullptr);
-  loop.SetAsCurrent();
-
-  MockHttpFetcher* fetcher =
-      new MockHttpFetcher(http_response.data(), http_response.size(), nullptr);
-  OmahaRequestAction action(&fake_system_state_, nullptr,
-                            base::WrapUnique(fetcher), false);
-  ActionProcessor processor;
-  processor.EnqueueAction(&action);
-
-  loop.PostTask(base::Bind(
-      [](ActionProcessor* processor) { processor->StartProcessing(); },
-      base::Unretained(&processor)));
-  loop.Run();
-  EXPECT_FALSE(loop.PendingTasks());
-
-  // Check that the headers were set in the fetcher during the action. Note that
-  // we set this request as "interactive".
-  EXPECT_EQ("fg", fetcher->GetHeader("X-GoogleUpdate-Interactivity"));
-  EXPECT_EQ(kTestAppId, fetcher->GetHeader("X-GoogleUpdate-AppId"));
-  EXPECT_NE("", fetcher->GetHeader("X-GoogleUpdate-Updater"));
+TEST_F(OmahaRequestActionTest, ExtraHeadersSentNoInteractiveTest) {
+  OmahaResponse response;
+  request_params_.set_interactive(false);
+  test_http_fetcher_headers_ = true;
+  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaRequestXMLParseError,
+                               metrics::CheckResult::kParsingError,
+                               metrics::CheckReaction::kUnset,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
+  EXPECT_FALSE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, ValidUpdateBlockedByConnection) {
@@ -761,27 +875,188 @@
   fake_system_state_.set_connection_manager(&mock_cm);
 
   EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
-      .WillRepeatedly(
-          DoAll(SetArgPointee<0>(ConnectionType::kEthernet),
-                SetArgPointee<1>(ConnectionTethering::kUnknown),
-                Return(true)));
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kEthernet),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
   EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kEthernet, _))
       .WillRepeatedly(Return(false));
 
-  ASSERT_FALSE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kOmahaUpdateIgnoredPerPolicy,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kIgnored,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaUpdateIgnoredPerPolicy,
+                               metrics::CheckResult::kUpdateAvailable,
+                               metrics::CheckReaction::kIgnored,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
   EXPECT_FALSE(response.update_exists);
 }
 
+TEST_F(OmahaRequestActionTest, ValidUpdateOverCellularAllowedByDevicePolicy) {
+  // This test tests that update over cellular is allowed as device policy
+  // says yes.
+  OmahaResponse response;
+  MockConnectionManager mock_cm;
+
+  fake_system_state_.set_connection_manager(&mock_cm);
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
+      .WillRepeatedly(Return(true));
+  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
+      .WillRepeatedly(Return(true));
+
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
+  EXPECT_TRUE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, ValidUpdateOverCellularBlockedByDevicePolicy) {
+  // This test tests that update over cellular is blocked as device policy
+  // says no.
+  OmahaResponse response;
+  MockConnectionManager mock_cm;
+
+  fake_system_state_.set_connection_manager(&mock_cm);
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
+      .WillRepeatedly(Return(true));
+  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
+      .WillRepeatedly(Return(false));
+
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaUpdateIgnoredPerPolicy,
+                               metrics::CheckResult::kUpdateAvailable,
+                               metrics::CheckReaction::kIgnored,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
+  EXPECT_FALSE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest,
+       ValidUpdateOverCellularAllowedByUserPermissionTrue) {
+  // This test tests that, when device policy is not set, update over cellular
+  // is allowed as permission for update over cellular is set to true.
+  OmahaResponse response;
+  MockConnectionManager mock_cm;
+
+  fake_prefs_.SetBoolean(kPrefsUpdateOverCellularPermission, true);
+  fake_system_state_.set_connection_manager(&mock_cm);
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
+      .WillRepeatedly(Return(false));
+  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
+      .WillRepeatedly(Return(true));
+
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
+  EXPECT_TRUE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest,
+       ValidUpdateOverCellularBlockedByUpdateTargetNotMatch) {
+  // This test tests that, when device policy is not set and permission for
+  // update over cellular is set to false or does not exist, update over
+  // cellular is blocked as update target does not match the omaha response.
+  OmahaResponse response;
+  MockConnectionManager mock_cm;
+  // A version different from the version in omaha response.
+  string diff_version = "99.99.99";
+  // A size different from the size in omaha response.
+  int64_t diff_size = 999;
+
+  fake_prefs_.SetString(kPrefsUpdateOverCellularTargetVersion, diff_version);
+  fake_prefs_.SetInt64(kPrefsUpdateOverCellularTargetSize, diff_size);
+  // This test tests cellular (3G) being the only connection type being allowed.
+  fake_system_state_.set_connection_manager(&mock_cm);
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
+      .WillRepeatedly(Return(false));
+  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
+      .WillRepeatedly(Return(true));
+
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaUpdateIgnoredOverCellular,
+                               metrics::CheckResult::kUpdateAvailable,
+                               metrics::CheckReaction::kIgnored,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
+  EXPECT_FALSE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest,
+       ValidUpdateOverCellularAllowedByUpdateTargetMatch) {
+  // This test tests that, when device policy is not set and permission for
+  // update over cellular is set to false or does not exist, update over
+  // cellular is allowed as update target matches the omaha response.
+  OmahaResponse response;
+  MockConnectionManager mock_cm;
+  // A version same as the version in omaha response.
+  string new_version = fake_update_response_.version;
+  // A size same as the size in omaha response.
+  int64_t new_size = fake_update_response_.size;
+
+  fake_prefs_.SetString(kPrefsUpdateOverCellularTargetVersion, new_version);
+  fake_prefs_.SetInt64(kPrefsUpdateOverCellularTargetSize, new_size);
+  fake_system_state_.set_connection_manager(&mock_cm);
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
+      .WillRepeatedly(Return(false));
+  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
+      .WillRepeatedly(Return(true));
+
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
+  EXPECT_TRUE(response.update_exists);
+}
+
 TEST_F(OmahaRequestActionTest, ValidUpdateBlockedByRollback) {
   string rollback_version = "1234.0.0";
   OmahaResponse response;
@@ -790,33 +1065,30 @@
   fake_system_state_.set_payload_state(&mock_payload_state);
 
   EXPECT_CALL(mock_payload_state, GetRollbackVersion())
-    .WillRepeatedly(Return(rollback_version));
+      .WillRepeatedly(Return(rollback_version));
 
   fake_update_response_.version = rollback_version;
-  ASSERT_FALSE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kOmahaUpdateIgnoredPerPolicy,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kIgnored,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaUpdateIgnoredPerPolicy,
+                               metrics::CheckResult::kUpdateAvailable,
+                               metrics::CheckReaction::kIgnored,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
   EXPECT_FALSE(response.update_exists);
 }
 
-// Verify that update checks called during OOBE will only try to download
-// an update if the response includes a non-empty deadline field.
+// Verify that update checks called during OOBE will not try to download an
+// update if the response doesn't include the deadline field.
 TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBE) {
   OmahaResponse response;
+  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
 
   // TODO(senj): set better default value for metrics::checkresult in
   // OmahaRequestAction::ActionCompleted.
-  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
-  ASSERT_FALSE(TestUpdateCheck(nullptr,  // request_params
-                               fake_update_response_.GetUpdateResponse(),
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                                -1,
                                false,  // ping_only
                                ErrorCode::kNonCriticalUpdateInOOBE,
@@ -826,11 +1098,16 @@
                                &response,
                                nullptr));
   EXPECT_FALSE(response.update_exists);
+}
 
-  // The IsOOBEComplete() value is ignored when the OOBE flow is not enabled.
+// Verify that the IsOOBEComplete() value is ignored when the OOBE flow is not
+// enabled.
+TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBEDisabled) {
+  OmahaResponse response;
+  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
   fake_system_state_.fake_hardware()->SetIsOOBEEnabled(false);
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetUpdateResponse(),
+
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -840,131 +1117,173 @@
                               &response,
                               nullptr));
   EXPECT_TRUE(response.update_exists);
-  fake_system_state_.fake_hardware()->SetIsOOBEEnabled(true);
+}
 
-  // The payload is applied when a deadline was set in the response.
+// Verify that update checks called during OOBE will still try to download an
+// update if the response includes the deadline field.
+TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBEDeadlineSet) {
+  OmahaResponse response;
+  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
   fake_update_response_.deadline = "20101020";
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_TRUE(response.update_exists);
 }
 
+// Verify that update checks called during OOBE will not try to download an
+// update if a rollback happened, even when the response includes the deadline
+// field.
+TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBERollback) {
+  OmahaResponse response;
+  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
+  fake_update_response_.deadline = "20101020";
+  EXPECT_CALL(*(fake_system_state_.mock_payload_state()), GetRollbackHappened())
+      .WillOnce(Return(true));
+
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kNonCriticalUpdateInOOBE,
+                               metrics::CheckResult::kParsingError,
+                               metrics::CheckReaction::kUnset,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
+  EXPECT_FALSE(response.update_exists);
+}
+
+// Verify that non-critical updates are skipped by reporting the
+// kNonCriticalUpdateInOOBE error code when attempted over cellular network -
+// i.e. when the update would need user permission. Note that reporting
+// kOmahaUpdateIgnoredOverCellular error in this case  might cause undesired UX
+// in OOBE (warning the user about an update that will be skipped).
+TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesInOOBEOverCellular) {
+  OmahaResponse response;
+  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
+
+  MockConnectionManager mock_cm;
+  fake_system_state_.set_connection_manager(&mock_cm);
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
+      .WillRepeatedly(Return(false));
+
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kNonCriticalUpdateInOOBE,
+                               metrics::CheckResult::kParsingError,
+                               metrics::CheckReaction::kUnset,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
+  EXPECT_FALSE(response.update_exists);
+}
+
 TEST_F(OmahaRequestActionTest, WallClockBasedWaitAloneCausesScattering) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
-  params.set_wall_clock_based_wait_enabled(true);
-  params.set_update_check_count_wait_enabled(false);
-  params.set_waiting_period(TimeDelta::FromDays(2));
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_update_check_count_wait_enabled(false);
+  request_params_.set_waiting_period(TimeDelta::FromDays(2));
 
-  ASSERT_FALSE(
-      TestUpdateCheck(&params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kOmahaUpdateDeferredPerPolicy,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kDeferring,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
+
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaUpdateDeferredPerPolicy,
+                               metrics::CheckResult::kUpdateAvailable,
+                               metrics::CheckReaction::kDeferring,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
   EXPECT_FALSE(response.update_exists);
 
   // Verify if we are interactive check we don't defer.
-  params.set_interactive(true);
-  ASSERT_TRUE(
-      TestUpdateCheck(&params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  request_params_.set_interactive(true);
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_TRUE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, NoWallClockBasedWaitCausesNoScattering) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
-  params.set_wall_clock_based_wait_enabled(false);
-  params.set_waiting_period(TimeDelta::FromDays(2));
+  request_params_.set_wall_clock_based_wait_enabled(false);
+  request_params_.set_waiting_period(TimeDelta::FromDays(2));
+  request_params_.set_update_check_count_wait_enabled(true);
+  request_params_.set_min_update_checks_needed(1);
+  request_params_.set_max_update_checks_allowed(8);
 
-  params.set_update_check_count_wait_enabled(true);
-  params.set_min_update_checks_needed(1);
-  params.set_max_update_checks_allowed(8);
-
-  ASSERT_TRUE(
-      TestUpdateCheck(&params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_TRUE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, ZeroMaxDaysToScatterCausesNoScattering) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
-  params.set_wall_clock_based_wait_enabled(true);
-  params.set_waiting_period(TimeDelta::FromDays(2));
-
-  params.set_update_check_count_wait_enabled(true);
-  params.set_min_update_checks_needed(1);
-  params.set_max_update_checks_allowed(8);
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta::FromDays(2));
+  request_params_.set_update_check_count_wait_enabled(true);
+  request_params_.set_min_update_checks_needed(1);
+  request_params_.set_max_update_checks_allowed(8);
 
   fake_update_response_.max_days_to_scatter = "0";
-  ASSERT_TRUE(
-      TestUpdateCheck(&params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_TRUE(response.update_exists);
 }
 
-
 TEST_F(OmahaRequestActionTest, ZeroUpdateCheckCountCausesNoScattering) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
-  params.set_wall_clock_based_wait_enabled(true);
-  params.set_waiting_period(TimeDelta());
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta());
+  request_params_.set_update_check_count_wait_enabled(true);
+  request_params_.set_min_update_checks_needed(0);
+  request_params_.set_max_update_checks_allowed(0);
 
-  params.set_update_check_count_wait_enabled(true);
-  params.set_min_update_checks_needed(0);
-  params.set_max_update_checks_allowed(0);
+  fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
 
-  ASSERT_TRUE(TestUpdateCheck(
-                      &params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
 
   int64_t count;
   ASSERT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateCheckCount, &count));
@@ -974,25 +1293,23 @@
 
 TEST_F(OmahaRequestActionTest, NonZeroUpdateCheckCountCausesScattering) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
-  params.set_wall_clock_based_wait_enabled(true);
-  params.set_waiting_period(TimeDelta());
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta());
+  request_params_.set_update_check_count_wait_enabled(true);
+  request_params_.set_min_update_checks_needed(1);
+  request_params_.set_max_update_checks_allowed(8);
 
-  params.set_update_check_count_wait_enabled(true);
-  params.set_min_update_checks_needed(1);
-  params.set_max_update_checks_allowed(8);
+  fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
 
-  ASSERT_FALSE(TestUpdateCheck(
-                      &params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kOmahaUpdateDeferredPerPolicy,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kDeferring,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaUpdateDeferredPerPolicy,
+                               metrics::CheckResult::kUpdateAvailable,
+                               metrics::CheckReaction::kDeferring,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
 
   int64_t count;
   ASSERT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateCheckCount, &count));
@@ -1000,44 +1317,40 @@
   EXPECT_FALSE(response.update_exists);
 
   // Verify if we are interactive check we don't defer.
-  params.set_interactive(true);
-  ASSERT_TRUE(
-      TestUpdateCheck(&params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  request_params_.set_interactive(true);
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_TRUE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, ExistingUpdateCheckCountCausesScattering) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
-  params.set_wall_clock_based_wait_enabled(true);
-  params.set_waiting_period(TimeDelta());
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta());
+  request_params_.set_update_check_count_wait_enabled(true);
+  request_params_.set_min_update_checks_needed(1);
+  request_params_.set_max_update_checks_allowed(8);
 
-  params.set_update_check_count_wait_enabled(true);
-  params.set_min_update_checks_needed(1);
-  params.set_max_update_checks_allowed(8);
+  fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
 
   ASSERT_TRUE(fake_prefs_.SetInt64(kPrefsUpdateCheckCount, 5));
 
-  ASSERT_FALSE(TestUpdateCheck(
-                      &params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kOmahaUpdateDeferredPerPolicy,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kDeferring,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaUpdateDeferredPerPolicy,
+                               metrics::CheckResult::kUpdateAvailable,
+                               metrics::CheckReaction::kDeferring,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
 
   int64_t count;
   ASSERT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateCheckCount, &count));
@@ -1047,31 +1360,65 @@
   EXPECT_FALSE(response.update_exists);
 
   // Verify if we are interactive check we don't defer.
-  params.set_interactive(true);
-  ASSERT_TRUE(
-      TestUpdateCheck(&params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  request_params_.set_interactive(true);
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
+  EXPECT_TRUE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, StagingTurnedOnCausesScattering) {
+  // If staging is on, the value for max days to scatter should be ignored, and
+  // staging's scatter value should be used.
+  OmahaResponse response;
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta::FromDays(6));
+  request_params_.set_update_check_count_wait_enabled(false);
+
+  fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
+
+  ASSERT_TRUE(fake_prefs_.SetInt64(kPrefsWallClockStagingWaitPeriod, 6));
+  // This should not prevent scattering due to staging.
+  fake_update_response_.max_days_to_scatter = "0";
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaUpdateDeferredPerPolicy,
+                               metrics::CheckResult::kUpdateAvailable,
+                               metrics::CheckReaction::kDeferring,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
+  EXPECT_FALSE(response.update_exists);
+
+  // Interactive updates should not be affected.
+  request_params_.set_interactive(true);
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_TRUE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, CohortsArePersisted) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
   fake_update_response_.include_cohorts = true;
   fake_update_response_.cohort = "s/154454/8479665";
   fake_update_response_.cohorthint = "please-put-me-on-beta";
   fake_update_response_.cohortname = "stable";
 
-  ASSERT_TRUE(TestUpdateCheck(&params,
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -1094,7 +1441,6 @@
 
 TEST_F(OmahaRequestActionTest, CohortsAreUpdated) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
   EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohort, "old_value"));
   EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohortHint, "old_hint"));
   EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohortName, "old_name"));
@@ -1103,8 +1449,7 @@
   fake_update_response_.cohorthint = "please-put-me-on-beta";
   fake_update_response_.cohortname = "";
 
-  ASSERT_TRUE(TestUpdateCheck(&params,
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -1126,11 +1471,9 @@
 
 TEST_F(OmahaRequestActionTest, CohortsAreNotModifiedWhenMissing) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
   EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohort, "old_value"));
 
-  ASSERT_TRUE(TestUpdateCheck(&params,
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -1150,14 +1493,12 @@
 
 TEST_F(OmahaRequestActionTest, CohortsArePersistedWhenNoUpdate) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
   fake_update_response_.include_cohorts = true;
   fake_update_response_.cohort = "s/154454/8479665";
   fake_update_response_.cohorthint = "please-put-me-on-beta";
   fake_update_response_.cohortname = "stable";
 
-  ASSERT_TRUE(TestUpdateCheck(&params,
-                              fake_update_response_.GetNoUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -1180,15 +1521,13 @@
 
 TEST_F(OmahaRequestActionTest, MultiAppCohortTest) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
   fake_update_response_.multi_app = true;
   fake_update_response_.include_cohorts = true;
   fake_update_response_.cohort = "s/154454/8479665";
   fake_update_response_.cohorthint = "please-put-me-on-beta";
   fake_update_response_.cohortname = "stable";
 
-  ASSERT_TRUE(TestUpdateCheck(&params,
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -1215,19 +1554,15 @@
   brillo::FakeMessageLoop loop(nullptr);
   loop.SetAsCurrent();
 
-  OmahaRequestParams params = request_params_;
-  fake_system_state_.set_request_params(&params);
-  OmahaRequestAction action(
+  auto action = std::make_unique<OmahaRequestAction>(
       &fake_system_state_,
       nullptr,
-      std::make_unique<MockHttpFetcher>(http_response.data(),
-                                        http_response.size(),
-                                        nullptr),
+      std::make_unique<MockHttpFetcher>(
+          http_response.data(), http_response.size(), nullptr),
       false);
-  OmahaRequestActionTestProcessorDelegate delegate;
   ActionProcessor processor;
-  processor.set_delegate(&delegate);
-  processor.EnqueueAction(&action);
+  processor.set_delegate(&delegate_);
+  processor.EnqueueAction(std::move(action));
 
   loop.PostTask(base::Bind(
       [](ActionProcessor* processor) { processor->StartProcessing(); },
@@ -1239,40 +1574,35 @@
 
 TEST_F(OmahaRequestActionTest, InvalidXmlTest) {
   OmahaResponse response;
-  ASSERT_FALSE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "invalid xml>",
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kOmahaRequestXMLParseError,
-                      metrics::CheckResult::kParsingError,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaRequestXMLParseError,
+                               metrics::CheckResult::kParsingError,
+                               metrics::CheckReaction::kUnset,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
   EXPECT_FALSE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, EmptyResponseTest) {
   OmahaResponse response;
-  ASSERT_FALSE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "",
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kOmahaRequestEmptyResponseError,
-                      metrics::CheckResult::kParsingError,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_FALSE(TestUpdateCheck("",
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaRequestEmptyResponseError,
+                               metrics::CheckResult::kParsingError,
+                               metrics::CheckReaction::kUnset,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
   EXPECT_FALSE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, MissingStatusTest) {
   OmahaResponse response;
   ASSERT_FALSE(TestUpdateCheck(
-      nullptr,  // request_params
       "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
       "<daystart elapsed_seconds=\"100\"/>"
       "<app appid=\"foo\" status=\"ok\">"
@@ -1292,7 +1622,6 @@
 TEST_F(OmahaRequestActionTest, InvalidStatusTest) {
   OmahaResponse response;
   ASSERT_FALSE(TestUpdateCheck(
-      nullptr,  // request_params
       "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
       "<daystart elapsed_seconds=\"100\"/>"
       "<app appid=\"foo\" status=\"ok\">"
@@ -1312,7 +1641,6 @@
 TEST_F(OmahaRequestActionTest, MissingNodesetTest) {
   OmahaResponse response;
   ASSERT_FALSE(TestUpdateCheck(
-      nullptr,  // request_params
       "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
       "<daystart elapsed_seconds=\"100\"/>"
       "<app appid=\"foo\" status=\"ok\">"
@@ -1343,18 +1671,14 @@
       "<packages><package hash=\"not-used\" name=\"f\" "
       "size=\"587\" hash_sha256=\"lkq34j5345\"/></packages>"
       "<actions><action event=\"postinstall\" "
-      "ChromeOSVersion=\"10.2.3.4\" "
       "Prompt=\"false\" "
-      "IsDelta=\"true\" "
       "IsDeltaPayload=\"false\" "
       "sha256=\"not-used\" "
-      "needsadmin=\"true\" "
       "/></actions></manifest></updatecheck></app></response>";
   LOG(INFO) << "Input Response = " << input_response;
 
   OmahaResponse response;
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              input_response,
+  ASSERT_TRUE(TestUpdateCheck(input_response,
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -1394,17 +1718,16 @@
   loop.SetAsCurrent();
 
   string http_response("doesn't matter");
-  OmahaRequestAction action(
+  auto action = std::make_unique<OmahaRequestAction>(
       &fake_system_state_,
       nullptr,
-      std::make_unique<MockHttpFetcher>(http_response.data(),
-                                        http_response.size(),
-                                        nullptr),
+      std::make_unique<MockHttpFetcher>(
+          http_response.data(), http_response.size(), nullptr),
       false);
   TerminateEarlyTestProcessorDelegate delegate;
   ActionProcessor processor;
   processor.set_delegate(&delegate);
-  processor.EnqueueAction(&action);
+  processor.EnqueueAction(std::move(action));
 
   loop.PostTask(base::Bind(&TerminateTransferTestStarter, &processor));
   loop.Run();
@@ -1436,39 +1759,26 @@
   brillo::Blob post_data;
 
   // Make sure XML Encode is being called on the params
-  OmahaRequestParams params(&fake_system_state_,
-                            constants::kOmahaPlatformName,
-                            OmahaRequestParams::kOsVersion,
-                            "testtheservice_pack>",
-                            "x86 generic<id",
-                            kTestAppId,
-                            "0.1.0.0",
-                            "en-US",
-                            "unittest_track&lt;",
-                            "<OEM MODEL>",
-                            "ChromeOSFirmware.1.0",
-                            "EC100",
-                            false,   // delta okay
-                            false,   // interactive
-                            "http://url",
-                            "");     // target_version_prefix
+  request_params_.set_os_sp("testtheservice_pack>");
+  request_params_.set_os_board("x86 generic<id");
+  request_params_.set_current_channel("unittest_track&lt;");
+  request_params_.set_target_channel("unittest_track&lt;");
+  request_params_.set_hwid("<OEM MODEL>");
   fake_prefs_.SetString(kPrefsOmahaCohort, "evil\nstring");
   fake_prefs_.SetString(kPrefsOmahaCohortHint, "evil&string\\");
-  fake_prefs_.SetString(kPrefsOmahaCohortName,
-                        base::JoinString(
-                            vector<string>(100, "My spoon is too big."), " "));
+  fake_prefs_.SetString(
+      kPrefsOmahaCohortName,
+      base::JoinString(vector<string>(100, "My spoon is too big."), " "));
   OmahaResponse response;
-  ASSERT_FALSE(
-      TestUpdateCheck(&params,
-                      "invalid xml>",
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kOmahaRequestXMLParseError,
-                      metrics::CheckResult::kParsingError,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      &post_data));
+  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaRequestXMLParseError,
+                               metrics::CheckResult::kParsingError,
+                               metrics::CheckReaction::kUnset,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               &post_data));
   // convert post_data to string
   string post_str(post_data.begin(), post_data.end());
   EXPECT_NE(string::npos, post_str.find("testtheservice_pack&gt;"));
@@ -1492,17 +1802,15 @@
   fake_update_response_.deadline = "&lt;20110101";
   fake_update_response_.more_info_url = "testthe&lt;url";
   fake_update_response_.codebase = "testthe&amp;codebase/";
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
 
   EXPECT_EQ("testthe<url", response.more_info_url);
   EXPECT_EQ("testthe&codebase/file.signed",
@@ -1514,17 +1822,15 @@
   OmahaResponse response;
   // overflows int32_t:
   fake_update_response_.size = 123123123123123ull;
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
 
   EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
 }
@@ -1539,8 +1845,7 @@
   // An existing but empty previous version means that we didn't reboot to a new
   // update, therefore, no need to update the previous version.
   EXPECT_CALL(prefs, SetString(kPrefsPreviousVersion, _)).Times(0);
-  ASSERT_FALSE(TestUpdateCheck(nullptr,  // request_params
-                               "invalid xml>",
+  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
                                -1,
                                false,  // ping_only
                                ErrorCode::kOmahaRequestXMLParseError,
@@ -1551,25 +1856,21 @@
                                &post_data));
   // convert post_data to string
   string post_str(post_data.begin(), post_data.end());
-  EXPECT_NE(post_str.find(
-      "        <ping active=\"1\" a=\"-1\" r=\"-1\"></ping>\n"
-      "        <updatecheck targetversionprefix=\"\"></updatecheck>\n"),
+  EXPECT_NE(
+      post_str.find("        <ping active=\"1\" a=\"-1\" r=\"-1\"></ping>\n"
+                    "        <updatecheck></updatecheck>\n"),
       string::npos);
   EXPECT_NE(post_str.find("hardware_class=\"OEM MODEL 09235 7471\""),
             string::npos);
-  EXPECT_NE(post_str.find("fw_version=\"ChromeOSFirmware.1.0\""),
-            string::npos);
-  EXPECT_NE(post_str.find("ec_version=\"0X0A1\""),
-            string::npos);
+  EXPECT_NE(post_str.find("fw_version=\"ChromeOSFirmware.1.0\""), string::npos);
+  EXPECT_NE(post_str.find("ec_version=\"0X0A1\""), string::npos);
   // No <event> tag should be sent if we didn't reboot to an update.
   EXPECT_EQ(post_str.find("<event"), string::npos);
 }
 
-
 TEST_F(OmahaRequestActionTest, FormatSuccessEventOutputTest) {
   brillo::Blob post_data;
-  TestEvent(request_params_,
-            new OmahaEvent(OmahaEvent::kTypeUpdateDownloadStarted),
+  TestEvent(new OmahaEvent(OmahaEvent::kTypeUpdateDownloadStarted),
             "invalid xml>",
             &post_data);
   // convert post_data to string
@@ -1585,8 +1886,7 @@
 
 TEST_F(OmahaRequestActionTest, FormatErrorEventOutputTest) {
   brillo::Blob post_data;
-  TestEvent(request_params_,
-            new OmahaEvent(OmahaEvent::kTypeDownloadComplete,
+  TestEvent(new OmahaEvent(OmahaEvent::kTypeDownloadComplete,
                            OmahaEvent::kResultError,
                            ErrorCode::kError),
             "invalid xml>",
@@ -1605,26 +1905,19 @@
 
 TEST_F(OmahaRequestActionTest, IsEventTest) {
   string http_response("doesn't matter");
-  // Create a copy of the OmahaRequestParams to reuse it later.
-  OmahaRequestParams params = request_params_;
-  fake_system_state_.set_request_params(&params);
   OmahaRequestAction update_check_action(
       &fake_system_state_,
       nullptr,
-      std::make_unique<MockHttpFetcher>(http_response.data(),
-                                        http_response.size(),
-                                        nullptr),
+      std::make_unique<MockHttpFetcher>(
+          http_response.data(), http_response.size(), nullptr),
       false);
   EXPECT_FALSE(update_check_action.IsEvent());
 
-  params = request_params_;
-  fake_system_state_.set_request_params(&params);
   OmahaRequestAction event_action(
       &fake_system_state_,
       new OmahaEvent(OmahaEvent::kTypeUpdateComplete),
-      std::make_unique<MockHttpFetcher>(http_response.data(),
-                                        http_response.size(),
-                                        nullptr),
+      std::make_unique<MockHttpFetcher>(
+          http_response.data(), http_response.size(), nullptr),
       false);
   EXPECT_TRUE(event_action.IsEvent());
 }
@@ -1634,24 +1927,10 @@
     bool delta_okay = i == 1;
     const char* delta_okay_str = delta_okay ? "true" : "false";
     brillo::Blob post_data;
-    OmahaRequestParams params(&fake_system_state_,
-                              constants::kOmahaPlatformName,
-                              OmahaRequestParams::kOsVersion,
-                              "service_pack",
-                              "x86-generic",
-                              kTestAppId,
-                              "0.1.0.0",
-                              "en-US",
-                              "unittest_track",
-                              "OEM MODEL REV 1234",
-                              "ChromeOSFirmware.1.0",
-                              "EC100",
-                              delta_okay,
-                              false,  // interactive
-                              "http://url",
-                              "");    // target_version_prefix
-    ASSERT_FALSE(TestUpdateCheck(&params,
-                                 "invalid xml>",
+
+    request_params_.set_delta_okay(delta_okay);
+
+    ASSERT_FALSE(TestUpdateCheck("invalid xml>",
                                  -1,
                                  false,  // ping_only
                                  ErrorCode::kOmahaRequestXMLParseError,
@@ -1662,9 +1941,9 @@
                                  &post_data));
     // convert post_data to string
     string post_str(post_data.begin(), post_data.end());
-    EXPECT_NE(post_str.find(base::StringPrintf(" delta_okay=\"%s\"",
-                                               delta_okay_str)),
-              string::npos)
+    EXPECT_NE(
+        post_str.find(base::StringPrintf(" delta_okay=\"%s\"", delta_okay_str)),
+        string::npos)
         << "i = " << i;
   }
 }
@@ -1675,24 +1954,10 @@
     const char* interactive_str = interactive ? "ondemandupdate" : "scheduler";
     brillo::Blob post_data;
     FakeSystemState fake_system_state;
-    OmahaRequestParams params(&fake_system_state_,
-                              constants::kOmahaPlatformName,
-                              OmahaRequestParams::kOsVersion,
-                              "service_pack",
-                              "x86-generic",
-                              kTestAppId,
-                              "0.1.0.0",
-                              "en-US",
-                              "unittest_track",
-                              "OEM MODEL REV 1234",
-                              "ChromeOSFirmware.1.0",
-                              "EC100",
-                              true,   // delta_okay
-                              interactive,
-                              "http://url",
-                              "");    // target_version_prefix
-    ASSERT_FALSE(TestUpdateCheck(&params,
-                                 "invalid xml>",
+
+    request_params_.set_interactive(interactive);
+
+    ASSERT_FALSE(TestUpdateCheck("invalid xml>",
                                  -1,
                                  false,  // ping_only
                                  ErrorCode::kOmahaRequestXMLParseError,
@@ -1703,13 +1968,75 @@
                                  &post_data));
     // convert post_data to string
     string post_str(post_data.begin(), post_data.end());
-    EXPECT_NE(post_str.find(base::StringPrintf("installsource=\"%s\"",
-                                               interactive_str)),
+    EXPECT_NE(post_str.find(
+                  base::StringPrintf("installsource=\"%s\"", interactive_str)),
               string::npos)
         << "i = " << i;
   }
 }
 
+TEST_F(OmahaRequestActionTest, FormatTargetVersionPrefixOutputTest) {
+  for (int i = 0; i < 2; i++) {
+    bool target_version_set = i == 1;
+    const char* target_version_prefix = target_version_set ? "10032." : "";
+    brillo::Blob post_data;
+    FakeSystemState fake_system_state;
+
+    request_params_.set_target_version_prefix(target_version_prefix);
+
+    ASSERT_FALSE(TestUpdateCheck("invalid xml>",
+                                 -1,
+                                 false,  // ping_only
+                                 ErrorCode::kOmahaRequestXMLParseError,
+                                 metrics::CheckResult::kParsingError,
+                                 metrics::CheckReaction::kUnset,
+                                 metrics::DownloadErrorCode::kUnset,
+                                 nullptr,
+                                 &post_data));
+    // convert post_data to string
+    string post_str(post_data.begin(), post_data.end());
+    if (target_version_set) {
+      EXPECT_NE(post_str.find("<updatecheck targetversionprefix=\"10032.\">"),
+                string::npos)
+          << "i = " << i;
+    } else {
+      EXPECT_EQ(post_str.find("targetversionprefix"), string::npos)
+          << "i = " << i;
+    }
+  }
+}
+
+TEST_F(OmahaRequestActionTest, FormatRollbackAllowedOutputTest) {
+  for (int i = 0; i < 4; i++) {
+    bool rollback_allowed = i / 2 == 0;
+    bool target_version_set = i % 2 == 0;
+    brillo::Blob post_data;
+    FakeSystemState fake_system_state;
+
+    request_params_.set_target_version_prefix(target_version_set ? "10032."
+                                                                 : "");
+    request_params_.set_rollback_allowed(rollback_allowed);
+
+    ASSERT_FALSE(TestUpdateCheck("invalid xml>",
+                                 -1,
+                                 false,  // ping_only
+                                 ErrorCode::kOmahaRequestXMLParseError,
+                                 metrics::CheckResult::kParsingError,
+                                 metrics::CheckReaction::kUnset,
+                                 metrics::DownloadErrorCode::kUnset,
+                                 nullptr,
+                                 &post_data));
+    // convert post_data to string
+    string post_str(post_data.begin(), post_data.end());
+    if (rollback_allowed && target_version_set) {
+      EXPECT_NE(post_str.find("rollback_allowed=\"true\""), string::npos)
+          << "i = " << i;
+    } else {
+      EXPECT_EQ(post_str.find("rollback_allowed"), string::npos) << "i = " << i;
+    }
+  }
+}
+
 TEST_F(OmahaRequestActionTest, OmahaEventTest) {
   OmahaEvent default_event;
   EXPECT_EQ(OmahaEvent::kTypeUnknown, default_event.type);
@@ -1733,7 +2060,7 @@
   NiceMock<MockPrefs> prefs;
   fake_system_state_.set_prefs(&prefs);
   EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _))
-    .Times(AnyNumber());
+      .Times(AnyNumber());
   EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
   // Add a few hours to the day difference to test no rounding, etc.
   int64_t five_days_ago =
@@ -1747,8 +2074,7 @@
   EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
       .WillOnce(DoAll(SetArgPointee<1>(five_days_ago), Return(true)));
   brillo::Blob post_data;
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetNoUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
                               -1,
                               ping_only,
                               ErrorCode::kSuccess,
@@ -1770,18 +2096,18 @@
 }
 
 TEST_F(OmahaRequestActionTest, PingTestSendOnlyAPing) {
-  PingTest(true  /* ping_only */);
+  PingTest(true /* ping_only */);
 }
 
 TEST_F(OmahaRequestActionTest, PingTestSendAlsoAnUpdateCheck) {
-  PingTest(false  /* ping_only */);
+  PingTest(false /* ping_only */);
 }
 
 TEST_F(OmahaRequestActionTest, ActivePingTest) {
   NiceMock<MockPrefs> prefs;
   fake_system_state_.set_prefs(&prefs);
   EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _))
-    .Times(AnyNumber());
+      .Times(AnyNumber());
   EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
   int64_t three_days_ago =
       (Time::Now() - TimeDelta::FromHours(3 * 24 + 12)).ToInternalValue();
@@ -1793,27 +2119,24 @@
   EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
       .WillOnce(DoAll(SetArgPointee<1>(now), Return(true)));
   brillo::Blob post_data;
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      &post_data));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kNoUpdateAvailable,
+                              metrics::CheckReaction::kUnset,
+                              metrics::DownloadErrorCode::kUnset,
+                              nullptr,
+                              &post_data));
   string post_str(post_data.begin(), post_data.end());
-  EXPECT_NE(post_str.find("<ping active=\"1\" a=\"3\"></ping>"),
-            string::npos);
+  EXPECT_NE(post_str.find("<ping active=\"1\" a=\"3\"></ping>"), string::npos);
 }
 
 TEST_F(OmahaRequestActionTest, RollCallPingTest) {
   NiceMock<MockPrefs> prefs;
   fake_system_state_.set_prefs(&prefs);
   EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _))
-    .Times(AnyNumber());
+      .Times(AnyNumber());
   EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
   int64_t four_days_ago =
       (Time::Now() - TimeDelta::FromHours(4 * 24)).ToInternalValue();
@@ -1825,17 +2148,15 @@
   EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
       .WillOnce(DoAll(SetArgPointee<1>(four_days_ago), Return(true)));
   brillo::Blob post_data;
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      &post_data));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kNoUpdateAvailable,
+                              metrics::CheckReaction::kUnset,
+                              metrics::DownloadErrorCode::kUnset,
+                              nullptr,
+                              &post_data));
   string post_str(post_data.begin(), post_data.end());
   EXPECT_NE(post_str.find("<ping active=\"1\" r=\"4\"></ping>\n"),
             string::npos);
@@ -1845,7 +2166,7 @@
   NiceMock<MockPrefs> prefs;
   fake_system_state_.set_prefs(&prefs);
   EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _))
-    .Times(AnyNumber());
+      .Times(AnyNumber());
   EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
   int64_t one_hour_ago =
       (Time::Now() - TimeDelta::FromHours(1)).ToInternalValue();
@@ -1862,17 +2183,15 @@
   EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _))
       .WillOnce(Return(true));
   brillo::Blob post_data;
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      &post_data));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kNoUpdateAvailable,
+                              metrics::CheckReaction::kUnset,
+                              metrics::DownloadErrorCode::kUnset,
+                              nullptr,
+                              &post_data));
   string post_str(post_data.begin(), post_data.end());
   EXPECT_EQ(post_str.find("ping"), string::npos);
 }
@@ -1889,17 +2208,15 @@
   EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0);
   EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0);
   brillo::Blob post_data;
-  EXPECT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      true,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUnset,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      &post_data));
+  EXPECT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                              -1,
+                              true,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUnset,
+                              metrics::CheckReaction::kUnset,
+                              metrics::DownloadErrorCode::kUnset,
+                              nullptr,
+                              &post_data));
   EXPECT_EQ(0U, post_data.size());
 }
 
@@ -1907,7 +2224,7 @@
   NiceMock<MockPrefs> prefs;
   fake_system_state_.set_prefs(&prefs);
   EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _))
-    .Times(AnyNumber());
+      .Times(AnyNumber());
   EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
   int64_t future =
       (Time::Now() + TimeDelta::FromHours(3 * 24 + 4)).ToInternalValue();
@@ -1923,8 +2240,7 @@
       .WillOnce(Return(true));
   brillo::Blob post_data;
   ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+      TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
                       "protocol=\"3.0\"><daystart elapsed_seconds=\"100\"/>"
                       "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
                       "<updatecheck status=\"noupdate\"/></app></response>",
@@ -1953,15 +2269,16 @@
   fake_system_state_.set_prefs(&prefs);
   EXPECT_CALL(prefs, GetInt64(_, _)).Times(AnyNumber());
   EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
-  EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay,
-                              AllOf(Ge(midnight), Le(midnight_slack))))
+  EXPECT_CALL(prefs,
+              SetInt64(kPrefsLastActivePingDay,
+                       AllOf(Ge(midnight), Le(midnight_slack))))
       .WillOnce(Return(true));
-  EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay,
-                              AllOf(Ge(midnight), Le(midnight_slack))))
+  EXPECT_CALL(prefs,
+              SetInt64(kPrefsLastRollCallPingDay,
+                       AllOf(Ge(midnight), Le(midnight_slack))))
       .WillOnce(Return(true));
   ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+      TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
                       "protocol=\"3.0\"><daystart elapsed_seconds=\"200\"/>"
                       "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
                       "<updatecheck status=\"noupdate\"/></app></response>",
@@ -1983,8 +2300,7 @@
   EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0);
   EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0);
   ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+      TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
                       "protocol=\"3.0\"><daystart blah=\"200\"/>"
                       "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
                       "<updatecheck status=\"noupdate\"/></app></response>",
@@ -2006,8 +2322,7 @@
   EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0);
   EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0);
   ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+      TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
                       "protocol=\"3.0\"><daystart elapsed_seconds=\"x\"/>"
                       "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
                       "<updatecheck status=\"noupdate\"/></app></response>",
@@ -2025,8 +2340,7 @@
   // Test that the "eol" flags is only parsed from the "_eol" attribute and not
   // the "eol" attribute.
   ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+      TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
                       "protocol=\"3.0\"><app appid=\"foo\" status=\"ok\">"
                       "<ping status=\"ok\"/><updatecheck status=\"noupdate\" "
                       "_eol=\"security-only\" eol=\"eol\" _foo=\"bar\"/>"
@@ -2049,8 +2363,7 @@
 
 TEST_F(OmahaRequestActionTest, NoUniqueIDTest) {
   brillo::Blob post_data;
-  ASSERT_FALSE(TestUpdateCheck(nullptr,  // request_params
-                               "invalid xml>",
+  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
                                -1,
                                false,  // ping_only
                                ErrorCode::kOmahaRequestXMLParseError,
@@ -2069,17 +2382,15 @@
   OmahaResponse response;
   const int http_error_code =
       static_cast<int>(ErrorCode::kOmahaRequestHTTPResponseBase) + 501;
-  ASSERT_FALSE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "",
-                      501,
-                      false,  // ping_only
-                      static_cast<ErrorCode>(http_error_code),
-                      metrics::CheckResult::kDownloadError,
-                      metrics::CheckReaction::kUnset,
-                      static_cast<metrics::DownloadErrorCode>(501),
-                      &response,
-                      nullptr));
+  ASSERT_FALSE(TestUpdateCheck("",
+                               501,
+                               false,  // ping_only
+                               static_cast<ErrorCode>(http_error_code),
+                               metrics::CheckResult::kDownloadError,
+                               metrics::CheckReaction::kUnset,
+                               static_cast<metrics::DownloadErrorCode>(501),
+                               &response,
+                               nullptr));
   EXPECT_FALSE(response.update_exists);
 }
 
@@ -2087,32 +2398,28 @@
   OmahaResponse response;
   const int http_error_code =
       static_cast<int>(ErrorCode::kOmahaRequestHTTPResponseBase) + 999;
-  ASSERT_FALSE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "",
-                      1500,
-                      false,  // ping_only
-                      static_cast<ErrorCode>(http_error_code),
-                      metrics::CheckResult::kDownloadError,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kHttpStatusOther,
-                      &response,
-                      nullptr));
+  ASSERT_FALSE(TestUpdateCheck("",
+                               1500,
+                               false,  // ping_only
+                               static_cast<ErrorCode>(http_error_code),
+                               metrics::CheckResult::kDownloadError,
+                               metrics::CheckReaction::kUnset,
+                               metrics::DownloadErrorCode::kHttpStatusOther,
+                               &response,
+                               nullptr));
   EXPECT_FALSE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, TestUpdateFirstSeenAtGetsPersistedFirstTime) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
-  params.set_wall_clock_based_wait_enabled(true);
-  params.set_waiting_period(TimeDelta().FromDays(1));
-  params.set_update_check_count_wait_enabled(false);
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta().FromDays(1));
+  request_params_.set_update_check_count_wait_enabled(false);
 
   Time arbitrary_date;
   ASSERT_TRUE(Time::FromString("6/4/1989", &arbitrary_date));
   fake_system_state_.fake_clock()->SetWallclockTime(arbitrary_date);
-  ASSERT_FALSE(TestUpdateCheck(&params,
-                               fake_update_response_.GetUpdateResponse(),
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                                -1,
                                false,  // ping_only
                                ErrorCode::kOmahaUpdateDeferredPerPolicy,
@@ -2128,9 +2435,8 @@
   EXPECT_FALSE(response.update_exists);
 
   // Verify if we are interactive check we don't defer.
-  params.set_interactive(true);
-  ASSERT_TRUE(TestUpdateCheck(&params,
-                              fake_update_response_.GetUpdateResponse(),
+  request_params_.set_interactive(true);
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -2144,10 +2450,9 @@
 
 TEST_F(OmahaRequestActionTest, TestUpdateFirstSeenAtGetsUsedIfAlreadyPresent) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
-  params.set_wall_clock_based_wait_enabled(true);
-  params.set_waiting_period(TimeDelta().FromDays(1));
-  params.set_update_check_count_wait_enabled(false);
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta().FromDays(1));
+  request_params_.set_update_check_count_wait_enabled(false);
 
   Time t1, t2;
   ASSERT_TRUE(Time::FromString("1/1/2012", &t1));
@@ -2155,8 +2460,7 @@
   ASSERT_TRUE(
       fake_prefs_.SetInt64(kPrefsUpdateFirstSeenAt, t1.ToInternalValue()));
   fake_system_state_.fake_clock()->SetWallclockTime(t2);
-  ASSERT_TRUE(TestUpdateCheck(&params,
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -2180,17 +2484,16 @@
   ASSERT_TRUE(tempdir.CreateUniqueTempDir());
 
   brillo::Blob post_data;
-  OmahaRequestParams params(&fake_system_state_);
-  params.set_root(tempdir.GetPath().value());
-  params.set_app_id("{22222222-2222-2222-2222-222222222222}");
-  params.set_app_version("1.2.3.4");
-  params.set_product_components("o.bundle=1");
-  params.set_current_channel("canary-channel");
-  EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr));
-  params.UpdateDownloadChannel();
-  EXPECT_TRUE(params.ShouldPowerwash());
-  ASSERT_FALSE(TestUpdateCheck(&params,
-                               "invalid xml>",
+  request_params_.set_root(tempdir.GetPath().value());
+  request_params_.set_app_id("{22222222-2222-2222-2222-222222222222}");
+  request_params_.set_app_version("1.2.3.4");
+  request_params_.set_product_components("o.bundle=1");
+  request_params_.set_current_channel("canary-channel");
+  EXPECT_TRUE(
+      request_params_.SetTargetChannel("stable-channel", true, nullptr));
+  request_params_.UpdateDownloadChannel();
+  EXPECT_TRUE(request_params_.ShouldPowerwash());
+  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
                                -1,
                                false,  // ping_only
                                ErrorCode::kOmahaRequestXMLParseError,
@@ -2201,10 +2504,11 @@
                                &post_data));
   // convert post_data to string
   string post_str(post_data.begin(), post_data.end());
-  EXPECT_NE(string::npos, post_str.find(
-      "appid=\"{22222222-2222-2222-2222-222222222222}\" "
-      "version=\"0.0.0.0\" from_version=\"1.2.3.4\" "
-      "track=\"stable-channel\" from_track=\"canary-channel\" "));
+  EXPECT_NE(
+      string::npos,
+      post_str.find("appid=\"{22222222-2222-2222-2222-222222222222}\" "
+                    "version=\"0.0.0.0\" from_version=\"1.2.3.4\" "
+                    "track=\"stable-channel\" from_track=\"canary-channel\" "));
   EXPECT_EQ(string::npos, post_str.find("o.bundle"));
 }
 
@@ -2214,17 +2518,16 @@
   ASSERT_TRUE(tempdir.CreateUniqueTempDir());
 
   brillo::Blob post_data;
-  OmahaRequestParams params(&fake_system_state_);
-  params.set_root(tempdir.GetPath().value());
-  params.set_app_id("{11111111-1111-1111-1111-111111111111}");
-  params.set_app_version("5.6.7.8");
-  params.set_product_components("o.bundle=1");
-  params.set_current_channel("stable-channel");
-  EXPECT_TRUE(params.SetTargetChannel("canary-channel", false, nullptr));
-  params.UpdateDownloadChannel();
-  EXPECT_FALSE(params.ShouldPowerwash());
-  ASSERT_FALSE(TestUpdateCheck(&params,
-                               "invalid xml>",
+  request_params_.set_root(tempdir.GetPath().value());
+  request_params_.set_app_id("{11111111-1111-1111-1111-111111111111}");
+  request_params_.set_app_version("5.6.7.8");
+  request_params_.set_product_components("o.bundle=1");
+  request_params_.set_current_channel("stable-channel");
+  EXPECT_TRUE(
+      request_params_.SetTargetChannel("canary-channel", false, nullptr));
+  request_params_.UpdateDownloadChannel();
+  EXPECT_FALSE(request_params_.ShouldPowerwash());
+  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
                                -1,
                                false,  // ping_only
                                ErrorCode::kOmahaRequestXMLParseError,
@@ -2233,12 +2536,13 @@
                                metrics::DownloadErrorCode::kUnset,
                                nullptr,  // response
                                &post_data));
-  // convert post_data to string
+  // Convert post_data to string.
   string post_str(post_data.begin(), post_data.end());
-  EXPECT_NE(string::npos, post_str.find(
-      "appid=\"{11111111-1111-1111-1111-111111111111}\" "
-      "version=\"5.6.7.8\" "
-      "track=\"canary-channel\" from_track=\"stable-channel\""));
+  EXPECT_NE(
+      string::npos,
+      post_str.find("appid=\"{11111111-1111-1111-1111-111111111111}\" "
+                    "version=\"5.6.7.8\" "
+                    "track=\"canary-channel\" from_track=\"stable-channel\""));
   EXPECT_EQ(string::npos, post_str.find("from_version"));
   EXPECT_NE(string::npos, post_str.find("o.bundle.version=\"1\""));
 }
@@ -2252,17 +2556,15 @@
   fake_system_state_.fake_hardware()->SetPowerwashCount(1);
 
   brillo::Blob post_data;
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      &post_data));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kNoUpdateAvailable,
+                              metrics::CheckReaction::kUnset,
+                              metrics::DownloadErrorCode::kUnset,
+                              nullptr,
+                              &post_data));
   // We shouldn't send a ping in this case since powerwash > 0.
   string post_str(post_data.begin(), post_data.end());
   EXPECT_EQ(string::npos, post_str.find("<ping"));
@@ -2280,17 +2582,15 @@
   fake_system_state_.fake_hardware()->SetFirstActiveOmahaPingSent();
 
   brillo::Blob post_data;
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      &post_data));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kNoUpdateAvailable,
+                              metrics::CheckReaction::kUnset,
+                              metrics::DownloadErrorCode::kUnset,
+                              nullptr,
+                              &post_data));
   // We shouldn't send a ping in this case since
   // first_active_omaha_ping_sent=true
   string post_str(post_data.begin(), post_data.end());
@@ -2303,24 +2603,22 @@
   fake_prefs_.SetString(kPrefsPreviousVersion, "1.2.3.4");
 
   brillo::Blob post_data;
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      &post_data));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kNoUpdateAvailable,
+                              metrics::CheckReaction::kUnset,
+                              metrics::DownloadErrorCode::kUnset,
+                              nullptr,
+                              &post_data));
   string post_str(post_data.begin(), post_data.end());
 
   // An event 54 is included and has the right version.
-  EXPECT_NE(string::npos,
-            post_str.find(base::StringPrintf(
-                              "<event eventtype=\"%d\"",
-                              OmahaEvent::kTypeRebootedAfterUpdate)));
+  EXPECT_NE(
+      string::npos,
+      post_str.find(base::StringPrintf("<event eventtype=\"%d\"",
+                                       OmahaEvent::kTypeRebootedAfterUpdate)));
   EXPECT_NE(string::npos,
             post_str.find("previousversion=\"1.2.3.4\"></event>"));
 
@@ -2331,19 +2629,17 @@
   EXPECT_TRUE(prev_version.empty());
 }
 
-void OmahaRequestActionTest::P2PTest(
-    bool initial_allow_p2p_for_downloading,
-    bool initial_allow_p2p_for_sharing,
-    bool omaha_disable_p2p_for_downloading,
-    bool omaha_disable_p2p_for_sharing,
-    bool payload_state_allow_p2p_attempt,
-    bool expect_p2p_client_lookup,
-    const string& p2p_client_result_url,
-    bool expected_allow_p2p_for_downloading,
-    bool expected_allow_p2p_for_sharing,
-    const string& expected_p2p_url) {
+void OmahaRequestActionTest::P2PTest(bool initial_allow_p2p_for_downloading,
+                                     bool initial_allow_p2p_for_sharing,
+                                     bool omaha_disable_p2p_for_downloading,
+                                     bool omaha_disable_p2p_for_sharing,
+                                     bool payload_state_allow_p2p_attempt,
+                                     bool expect_p2p_client_lookup,
+                                     const string& p2p_client_result_url,
+                                     bool expected_allow_p2p_for_downloading,
+                                     bool expected_allow_p2p_for_sharing,
+                                     const string& expected_p2p_url) {
   OmahaResponse response;
-  OmahaRequestParams request_params = request_params_;
   bool actual_allow_p2p_for_downloading = initial_allow_p2p_for_downloading;
   bool actual_allow_p2p_for_sharing = initial_allow_p2p_for_sharing;
   string actual_p2p_url;
@@ -2374,23 +2670,20 @@
   fake_update_response_.disable_p2p_for_downloading =
       omaha_disable_p2p_for_downloading;
   fake_update_response_.disable_p2p_for_sharing = omaha_disable_p2p_for_sharing;
-  ASSERT_TRUE(
-      TestUpdateCheck(&request_params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_TRUE(response.update_exists);
 
   EXPECT_EQ(omaha_disable_p2p_for_downloading,
             response.disable_p2p_for_downloading);
-  EXPECT_EQ(omaha_disable_p2p_for_sharing,
-            response.disable_p2p_for_sharing);
+  EXPECT_EQ(omaha_disable_p2p_for_sharing, response.disable_p2p_for_sharing);
 
   EXPECT_EQ(expected_allow_p2p_for_downloading,
             actual_allow_p2p_for_downloading);
@@ -2412,42 +2705,42 @@
 }
 
 TEST_F(OmahaRequestActionTest, P2PWithoutPeer) {
-  P2PTest(true,                   // initial_allow_p2p_for_downloading
-          true,                   // initial_allow_p2p_for_sharing
-          false,                  // omaha_disable_p2p_for_downloading
-          false,                  // omaha_disable_p2p_for_sharing
-          true,                   // payload_state_allow_p2p_attempt
-          true,                   // expect_p2p_client_lookup
-          "",                     // p2p_client_result_url
-          false,                  // expected_allow_p2p_for_downloading
-          true,                   // expected_allow_p2p_for_sharing
-          "");                    // expected_p2p_url
+  P2PTest(true,   // initial_allow_p2p_for_downloading
+          true,   // initial_allow_p2p_for_sharing
+          false,  // omaha_disable_p2p_for_downloading
+          false,  // omaha_disable_p2p_for_sharing
+          true,   // payload_state_allow_p2p_attempt
+          true,   // expect_p2p_client_lookup
+          "",     // p2p_client_result_url
+          false,  // expected_allow_p2p_for_downloading
+          true,   // expected_allow_p2p_for_sharing
+          "");    // expected_p2p_url
 }
 
 TEST_F(OmahaRequestActionTest, P2PDownloadNotAllowed) {
-  P2PTest(false,                  // initial_allow_p2p_for_downloading
-          true,                   // initial_allow_p2p_for_sharing
-          false,                  // omaha_disable_p2p_for_downloading
-          false,                  // omaha_disable_p2p_for_sharing
-          true,                   // payload_state_allow_p2p_attempt
-          false,                  // expect_p2p_client_lookup
-          "unset",                // p2p_client_result_url
-          false,                  // expected_allow_p2p_for_downloading
-          true,                   // expected_allow_p2p_for_sharing
-          "");                    // expected_p2p_url
+  P2PTest(false,    // initial_allow_p2p_for_downloading
+          true,     // initial_allow_p2p_for_sharing
+          false,    // omaha_disable_p2p_for_downloading
+          false,    // omaha_disable_p2p_for_sharing
+          true,     // payload_state_allow_p2p_attempt
+          false,    // expect_p2p_client_lookup
+          "unset",  // p2p_client_result_url
+          false,    // expected_allow_p2p_for_downloading
+          true,     // expected_allow_p2p_for_sharing
+          "");      // expected_p2p_url
 }
 
 TEST_F(OmahaRequestActionTest, P2PWithPeerDownloadDisabledByOmaha) {
-  P2PTest(true,                   // initial_allow_p2p_for_downloading
-          true,                   // initial_allow_p2p_for_sharing
-          true,                   // omaha_disable_p2p_for_downloading
-          false,                  // omaha_disable_p2p_for_sharing
-          true,                   // payload_state_allow_p2p_attempt
-          false,                  // expect_p2p_client_lookup
-          "unset",                // p2p_client_result_url
-          false,                  // expected_allow_p2p_for_downloading
-          true,                   // expected_allow_p2p_for_sharing
-          "");                    // expected_p2p_url
+  P2PTest(true,     // initial_allow_p2p_for_downloading
+          true,     // initial_allow_p2p_for_sharing
+          true,     // omaha_disable_p2p_for_downloading
+          false,    // omaha_disable_p2p_for_sharing
+          true,     // payload_state_allow_p2p_attempt
+          false,    // expect_p2p_client_lookup
+          "unset",  // p2p_client_result_url
+          false,    // expected_allow_p2p_for_downloading
+          true,     // expected_allow_p2p_for_sharing
+          "");      // expected_p2p_url
 }
 
 TEST_F(OmahaRequestActionTest, P2PWithPeerSharingDisabledByOmaha) {
@@ -2464,32 +2757,30 @@
 }
 
 TEST_F(OmahaRequestActionTest, P2PWithPeerBothDisabledByOmaha) {
-  P2PTest(true,                   // initial_allow_p2p_for_downloading
-          true,                   // initial_allow_p2p_for_sharing
-          true,                   // omaha_disable_p2p_for_downloading
-          true,                   // omaha_disable_p2p_for_sharing
-          true,                   // payload_state_allow_p2p_attempt
-          false,                  // expect_p2p_client_lookup
-          "unset",                // p2p_client_result_url
-          false,                  // expected_allow_p2p_for_downloading
-          false,                  // expected_allow_p2p_for_sharing
-          "");                    // expected_p2p_url
+  P2PTest(true,     // initial_allow_p2p_for_downloading
+          true,     // initial_allow_p2p_for_sharing
+          true,     // omaha_disable_p2p_for_downloading
+          true,     // omaha_disable_p2p_for_sharing
+          true,     // payload_state_allow_p2p_attempt
+          false,    // expect_p2p_client_lookup
+          "unset",  // p2p_client_result_url
+          false,    // expected_allow_p2p_for_downloading
+          false,    // expected_allow_p2p_for_sharing
+          "");      // expected_p2p_url
 }
 
-bool OmahaRequestActionTest::InstallDateParseHelper(const string &elapsed_days,
-                                                    OmahaResponse *response) {
+bool OmahaRequestActionTest::InstallDateParseHelper(const string& elapsed_days,
+                                                    OmahaResponse* response) {
   fake_update_response_.elapsed_days = elapsed_days;
-  return
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      response,
-                      nullptr);
+  return TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                         -1,
+                         false,  // ping_only
+                         ErrorCode::kSuccess,
+                         metrics::CheckResult::kUpdateAvailable,
+                         metrics::CheckReaction::kUpdating,
+                         metrics::DownloadErrorCode::kUnset,
+                         response,
+                         nullptr);
 }
 
 TEST_F(OmahaRequestActionTest, ParseInstallDateFromResponse) {
@@ -2591,4 +2882,266 @@
   EXPECT_EQ(prefs_days, 28);
 }
 
+// Verifies that a device with no device policy, and is not a consumer
+// device sets the max kernel key version to the current version.
+// ie. the same behavior as if rollback is enabled.
+TEST_F(OmahaRequestActionTest, NoPolicyEnterpriseDevicesSetMaxRollback) {
+  FakeHardware* fake_hw = fake_system_state_.fake_hardware();
+
+  // Setup and verify some initial default values for the kernel TPM
+  // values that control verified boot and rollback.
+  const int min_kernel_version = 4;
+  fake_hw->SetMinKernelKeyVersion(min_kernel_version);
+  fake_hw->SetMaxKernelKeyRollforward(kRollforwardInfinity);
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+
+  EXPECT_CALL(
+      *fake_system_state_.mock_metrics_reporter(),
+      ReportKeyVersionMetrics(min_kernel_version, min_kernel_version, true))
+      .Times(1);
+
+  OmahaResponse response;
+  TestRollbackCheck(false /* is_consumer_device */,
+                    3 /* rollback_allowed_milestones */,
+                    false /* is_policy_loaded */,
+                    &response);
+
+  // Verify kernel_max_rollforward was set to the current minimum
+  // kernel key version. This has the effect of freezing roll
+  // forwards indefinitely. This will hold the rollback window
+  // open until a future change will be able to move this forward
+  // relative the configured window.
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMaxKernelKeyRollforward());
+}
+
+// Verifies that a conmsumer device with no device policy sets the
+// max kernel key version to the current version. ie. the same
+// behavior as if rollback is enabled.
+TEST_F(OmahaRequestActionTest, NoPolicyConsumerDevicesSetMaxRollback) {
+  FakeHardware* fake_hw = fake_system_state_.fake_hardware();
+
+  // Setup and verify some initial default values for the kernel TPM
+  // values that control verified boot and rollback.
+  const int min_kernel_version = 3;
+  fake_hw->SetMinKernelKeyVersion(min_kernel_version);
+  fake_hw->SetMaxKernelKeyRollforward(kRollforwardInfinity);
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+
+  EXPECT_CALL(
+      *fake_system_state_.mock_metrics_reporter(),
+      ReportKeyVersionMetrics(min_kernel_version, kRollforwardInfinity, true))
+      .Times(1);
+
+  OmahaResponse response;
+  TestRollbackCheck(true /* is_consumer_device */,
+                    3 /* rollback_allowed_milestones */,
+                    false /* is_policy_loaded */,
+                    &response);
+
+  // Verify that with rollback disabled that kernel_max_rollforward
+  // was set to logical infinity. This is the expected behavior for
+  // consumer devices and matches the existing behavior prior to the
+  // rollback features.
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+}
+
+// Verifies that a device with rollback enabled sets kernel_max_rollforward
+// in the TPM to prevent roll forward.
+TEST_F(OmahaRequestActionTest, RollbackEnabledDevicesSetMaxRollback) {
+  FakeHardware* fake_hw = fake_system_state_.fake_hardware();
+
+  // Setup and verify some initial default values for the kernel TPM
+  // values that control verified boot and rollback.
+  const int allowed_milestones = 4;
+  const int min_kernel_version = 3;
+  fake_hw->SetMinKernelKeyVersion(min_kernel_version);
+  fake_hw->SetMaxKernelKeyRollforward(kRollforwardInfinity);
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+
+  EXPECT_CALL(
+      *fake_system_state_.mock_metrics_reporter(),
+      ReportKeyVersionMetrics(min_kernel_version, min_kernel_version, true))
+      .Times(1);
+
+  OmahaResponse response;
+  TestRollbackCheck(false /* is_consumer_device */,
+                    allowed_milestones,
+                    true /* is_policy_loaded */,
+                    &response);
+
+  // Verify that with rollback enabled that kernel_max_rollforward
+  // was set to the current minimum kernel key version. This has
+  // the effect of freezing roll forwards indefinitely. This will
+  // hold the rollback window open until a future change will
+  // be able to move this forward relative the configured window.
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMaxKernelKeyRollforward());
+}
+
+// Verifies that a device with rollback disabled sets kernel_max_rollforward
+// in the TPM to logical infinity, to allow roll forward.
+TEST_F(OmahaRequestActionTest, RollbackDisabledDevicesSetMaxRollback) {
+  FakeHardware* fake_hw = fake_system_state_.fake_hardware();
+
+  // Setup and verify some initial default values for the kernel TPM
+  // values that control verified boot and rollback.
+  const int allowed_milestones = 0;
+  const int min_kernel_version = 3;
+  fake_hw->SetMinKernelKeyVersion(min_kernel_version);
+  fake_hw->SetMaxKernelKeyRollforward(kRollforwardInfinity);
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+
+  EXPECT_CALL(
+      *fake_system_state_.mock_metrics_reporter(),
+      ReportKeyVersionMetrics(min_kernel_version, kRollforwardInfinity, true))
+      .Times(1);
+
+  OmahaResponse response;
+  TestRollbackCheck(false /* is_consumer_device */,
+                    allowed_milestones,
+                    true /* is_policy_loaded */,
+                    &response);
+
+  // Verify that with rollback disabled that kernel_max_rollforward
+  // was set to logical infinity.
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+}
+
+TEST_F(OmahaRequestActionTest, RollbackResponseParsedNoEntries) {
+  OmahaResponse response;
+  fake_update_response_.rollback = true;
+  TestRollbackCheck(false /* is_consumer_device */,
+                    4 /* rollback_allowed_milestones */,
+                    true /* is_policy_loaded */,
+                    &response);
+  EXPECT_TRUE(response.is_rollback);
+}
+
+TEST_F(OmahaRequestActionTest, RollbackResponseValidVersionsParsed) {
+  OmahaResponse response;
+  fake_update_response_.rollback_firmware_version = "1.2";
+  fake_update_response_.rollback_kernel_version = "3.4";
+  fake_update_response_.rollback = true;
+  TestRollbackCheck(false /* is_consumer_device */,
+                    4 /* rollback_allowed_milestones */,
+                    true /* is_policy_loaded */,
+                    &response);
+  EXPECT_TRUE(response.is_rollback);
+  EXPECT_EQ(1, response.rollback_key_version.firmware_key);
+  EXPECT_EQ(2, response.rollback_key_version.firmware);
+  EXPECT_EQ(3, response.rollback_key_version.kernel_key);
+  EXPECT_EQ(4, response.rollback_key_version.kernel);
+}
+
+TEST_F(OmahaRequestActionTest,
+       TestUpdateFirstSeenAtPrefPersistedIfUpdateExists) {
+  FakeClock fake_clock;
+  Time now = Time::Now();
+  fake_clock.SetWallclockTime(now);
+  fake_system_state_.set_clock(&fake_clock);
+
+  OmahaResponse response;
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
+  EXPECT_TRUE(response.update_exists);
+  EXPECT_TRUE(fake_prefs_.Exists(kPrefsUpdateFirstSeenAt));
+
+  int64_t stored_first_seen_at_time;
+  EXPECT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateFirstSeenAt,
+                                   &stored_first_seen_at_time));
+  EXPECT_EQ(now.ToInternalValue(), stored_first_seen_at_time);
+}
+
+TEST_F(OmahaRequestActionTest,
+       TestUpdateFirstSeenAtPrefNotPersistedIfUpdateFails) {
+  FakeClock fake_clock;
+  Time now = Time::Now();
+  fake_clock.SetWallclockTime(now);
+  fake_system_state_.set_clock(&fake_clock);
+
+  OmahaResponse response;
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kNoUpdateAvailable,
+                              metrics::CheckReaction::kUnset,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
+  EXPECT_FALSE(response.update_exists);
+  EXPECT_FALSE(fake_prefs_.Exists(kPrefsUpdateFirstSeenAt));
+}
+
+TEST_F(OmahaRequestActionTest, InstallTest) {
+  OmahaResponse response;
+  request_params_.set_is_install(true);
+  request_params_.set_dlc_module_ids({"dlc_no_0", "dlc_no_1"});
+  brillo::Blob post_data;
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              true,   // is_consumer_device
+                              0,      // rollback_allowed_milestones
+                              false,  // is_policy_loaded
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              &post_data));
+  // Convert post_data to string.
+  string post_str(post_data.begin(), post_data.end());
+  for (const auto& dlc_module_id : request_params_.dlc_module_ids()) {
+    EXPECT_NE(string::npos,
+              post_str.find("appid=\"" + fake_update_response_.app_id + "_" +
+                            dlc_module_id + "\""));
+  }
+  EXPECT_NE(string::npos,
+            post_str.find("appid=\"" + fake_update_response_.app_id + "\""));
+
+  // Count number of updatecheck tag in response.
+  int updatecheck_count = 0;
+  size_t pos = 0;
+  while ((pos = post_str.find("<updatecheck", pos)) != string::npos) {
+    updatecheck_count++;
+    pos++;
+  }
+  EXPECT_EQ(request_params_.dlc_module_ids().size(), updatecheck_count);
+}
+
+TEST_F(OmahaRequestActionTest, InstallMissingPlatformVersionTest) {
+  fake_update_response_.multi_app_skip_updatecheck = true;
+  fake_update_response_.multi_app_no_update = false;
+  request_params_.set_is_install(true);
+  request_params_.set_dlc_module_ids({"dlc_no_0", "dlc_no_1"});
+  request_params_.set_app_id(fake_update_response_.app_id_skip_updatecheck);
+  OmahaResponse response;
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
+  EXPECT_TRUE(response.update_exists);
+  EXPECT_EQ(fake_update_response_.current_version, response.version);
+}
+
 }  // namespace chromeos_update_engine
diff --git a/omaha_request_params.cc b/omaha_request_params.cc
index 9e78a93..8c410f1 100644
--- a/omaha_request_params.cc
+++ b/omaha_request_params.cc
@@ -77,10 +77,14 @@
   LOG(INFO) << "Running from channel " << image_props_.current_channel;
 
   os_platform_ = constants::kOmahaPlatformName;
-  if (!image_props_.system_version.empty())
+  if (!image_props_.system_version.empty()) {
+    if (in_app_version == "ForcedUpdate") {
+      image_props_.system_version = in_app_version;
+    }
     os_version_ = image_props_.system_version;
-  else
+  } else {
     os_version_ = OmahaRequestParams::kOsVersion;
+  }
   if (!in_app_version.empty())
     image_props_.version = in_app_version;
 
@@ -97,8 +101,8 @@
     // know (i.e. stat() returns some unexpected error), then err on the side of
     // caution and say deltas are not okay.
     struct stat stbuf;
-    delta_okay_ = (stat((root_ + "/.nodelta").c_str(), &stbuf) < 0) &&
-                  (errno == ENOENT);
+    delta_okay_ =
+        (stat((root_ + "/.nodelta").c_str(), &stbuf) < 0) && (errno == ENOENT);
   } else {
     LOG(INFO) << "Disabling deltas as a channel change to "
               << mutable_image_props_.target_channel
@@ -118,6 +122,10 @@
 
   // Set the interactive flag accordingly.
   interactive_ = in_interactive;
+
+  dlc_module_ids_.clear();
+  // Set false so it will do update by default.
+  is_install_ = false;
   return true;
 }
 
@@ -127,16 +135,10 @@
 }
 
 bool OmahaRequestParams::CollectECFWVersions() const {
-  return base::StartsWith(hwid_, string("SAMS ALEX"),
-                          base::CompareCase::SENSITIVE) ||
-         base::StartsWith(hwid_, string("BUTTERFLY"),
-                          base::CompareCase::SENSITIVE) ||
-         base::StartsWith(hwid_, string("LUMPY"),
-                          base::CompareCase::SENSITIVE) ||
-         base::StartsWith(hwid_, string("PARROT"),
-                          base::CompareCase::SENSITIVE) ||
-         base::StartsWith(hwid_, string("SPRING"),
-                          base::CompareCase::SENSITIVE) ||
+  return base::StartsWith(
+             hwid_, string("PARROT"), base::CompareCase::SENSITIVE) ||
+         base::StartsWith(
+             hwid_, string("SPRING"), base::CompareCase::SENSITIVE) ||
          base::StartsWith(hwid_, string("SNOW"), base::CompareCase::SENSITIVE);
 }
 
diff --git a/omaha_request_params.h b/omaha_request_params.h
index 60619f9..18235c0 100644
--- a/omaha_request_params.h
+++ b/omaha_request_params.h
@@ -20,6 +20,7 @@
 #include <stdint.h>
 
 #include <string>
+#include <vector>
 
 #include <base/macros.h>
 #include <base/time/time.h>
@@ -49,51 +50,12 @@
         os_version_(kOsVersion),
         delta_okay_(true),
         interactive_(false),
+        rollback_allowed_(false),
         wall_clock_based_wait_enabled_(false),
         update_check_count_wait_enabled_(false),
         min_update_checks_needed_(kDefaultMinUpdateChecks),
-        max_update_checks_allowed_(kDefaultMaxUpdateChecks) {}
-
-  OmahaRequestParams(SystemState* system_state,
-                     const std::string& in_os_platform,
-                     const std::string& in_os_version,
-                     const std::string& in_os_sp,
-                     const std::string& in_os_board,
-                     const std::string& in_app_id,
-                     const std::string& in_app_version,
-                     const std::string& in_app_lang,
-                     const std::string& in_target_channel,
-                     const std::string& in_hwid,
-                     const std::string& in_fw_version,
-                     const std::string& in_ec_version,
-                     bool in_delta_okay,
-                     bool in_interactive,
-                     const std::string& in_update_url,
-                     const std::string& in_target_version_prefix)
-      : system_state_(system_state),
-        os_platform_(in_os_platform),
-        os_version_(in_os_version),
-        os_sp_(in_os_sp),
-        app_lang_(in_app_lang),
-        hwid_(in_hwid),
-        fw_version_(in_fw_version),
-        ec_version_(in_ec_version),
-        delta_okay_(in_delta_okay),
-        interactive_(in_interactive),
-        update_url_(in_update_url),
-        target_version_prefix_(in_target_version_prefix),
-        wall_clock_based_wait_enabled_(false),
-        update_check_count_wait_enabled_(false),
-        min_update_checks_needed_(kDefaultMinUpdateChecks),
-        max_update_checks_allowed_(kDefaultMaxUpdateChecks) {
-    image_props_.board = in_os_board;
-    image_props_.product_id = in_app_id;
-    image_props_.canary_product_id = in_app_id;
-    image_props_.version = in_app_version;
-    image_props_.current_channel = in_target_channel;
-    mutable_image_props_.target_channel = in_target_channel;
-    mutable_image_props_.is_powerwash_allowed = false;
-  }
+        max_update_checks_allowed_(kDefaultMaxUpdateChecks),
+        is_install_(false) {}
 
   virtual ~OmahaRequestParams();
 
@@ -164,6 +126,12 @@
     return target_version_prefix_;
   }
 
+  inline void set_rollback_allowed(bool rollback_allowed) {
+    rollback_allowed_ = rollback_allowed;
+  }
+
+  inline bool rollback_allowed() const { return rollback_allowed_; }
+
   inline void set_wall_clock_based_wait_enabled(bool enabled) {
     wall_clock_based_wait_enabled_ = enabled;
   }
@@ -197,6 +165,15 @@
   inline int64_t max_update_checks_allowed() const {
     return max_update_checks_allowed_;
   }
+  inline void set_dlc_module_ids(
+      const std::vector<std::string>& dlc_module_ids) {
+    dlc_module_ids_ = dlc_module_ids;
+  }
+  inline std::vector<std::string> dlc_module_ids() const {
+    return dlc_module_ids_;
+  }
+  inline void set_is_install(bool is_install) { is_install_ = is_install; }
+  inline bool is_install() const { return is_install_; }
 
   // Returns the app id corresponding to the current value of the
   // download channel.
@@ -204,7 +181,6 @@
 
   // Suggested defaults
   static const char kOsVersion[];
-  static const char kIsPowerwashAllowedKey[];
   static const int64_t kDefaultMinUpdateChecks = 0;
   static const int64_t kDefaultMaxUpdateChecks = 8;
 
@@ -249,6 +225,21 @@
   void set_target_channel(const std::string& channel) {
     mutable_image_props_.target_channel = channel;
   }
+  void set_os_sp(const std::string& os_sp) { os_sp_ = os_sp; }
+  void set_os_board(const std::string& os_board) {
+    image_props_.board = os_board;
+  }
+  void set_app_lang(const std::string& app_lang) { app_lang_ = app_lang; }
+  void set_hwid(const std::string& hwid) { hwid_ = hwid; }
+  void set_fw_version(const std::string& fw_version) {
+    fw_version_ = fw_version;
+  }
+  void set_ec_version(const std::string& ec_version) {
+    ec_version_ = ec_version;
+  }
+  void set_is_powerwash_allowed(bool powerwash_allowed) {
+    mutable_image_props_.is_powerwash_allowed = powerwash_allowed;
+  }
 
  private:
   FRIEND_TEST(OmahaRequestParamsTest, ChannelIndexTest);
@@ -279,15 +270,6 @@
   // Compares hwid to a set of whitelisted prefixes.
   bool CollectECFWVersions() const;
 
-  // These are individual helper methods to initialize the said properties from
-  // the LSB value.
-  void SetTargetChannelFromLsbValue();
-  void SetCurrentChannelFromLsbValue();
-  void SetIsPowerwashAllowedFromLsbValue();
-
-  // Initializes the required properties from the LSB value.
-  void InitFromLsbValue();
-
   // Gets the machine type (e.g. "i686").
   std::string GetMachineType() const;
 
@@ -324,11 +306,11 @@
   //   changed and cancel the current download attempt.
   std::string download_channel_;
 
-  std::string hwid_;  // Hardware Qualification ID of the client
+  std::string hwid_;        // Hardware Qualification ID of the client
   std::string fw_version_;  // Chrome OS Firmware Version.
   std::string ec_version_;  // Chrome OS EC Version.
-  bool delta_okay_;  // If this client can accept a delta
-  bool interactive_;   // Whether this is a user-initiated update check
+  bool delta_okay_;         // If this client can accept a delta
+  bool interactive_;        // Whether this is a user-initiated update check
 
   // The URL to send the Omaha request to.
   std::string update_url_;
@@ -337,14 +319,17 @@
   // to be pinned to. It's empty otherwise.
   std::string target_version_prefix_;
 
-  // True if scattering is enabled, in which case waiting_period_ specifies the
-  // amount of absolute time that we've to wait for before sending a request to
-  // Omaha.
+  // Whether the client is accepting rollback images too.
+  bool rollback_allowed_;
+
+  // True if scattering or staging are enabled, in which case waiting_period_
+  // specifies the amount of absolute time that we've to wait for before sending
+  // a request to Omaha.
   bool wall_clock_based_wait_enabled_;
   base::TimeDelta waiting_period_;
 
-  // True if scattering is enabled to denote the number of update checks
-  // we've to skip before we can send a request to Omaha. The min and max
+  // True if scattering or staging are enabled to denote the number of update
+  // checks we've to skip before we can send a request to Omaha. The min and max
   // values establish the bounds for a random number to be chosen within that
   // range to enable such a wait.
   bool update_check_count_wait_enabled_;
@@ -354,9 +339,15 @@
   // When reading files, prepend root_ to the paths. Useful for testing.
   std::string root_;
 
-  // TODO(jaysri): Uncomment this after fixing unit tests, as part of
-  // chromium-os:39752
-  // DISALLOW_COPY_AND_ASSIGN(OmahaRequestParams);
+  // A list of DLC module IDs to install.
+  std::vector<std::string> dlc_module_ids_;
+
+  // This variable defines whether the payload is being installed in the current
+  // partition. At the moment, this is used for installing DLC modules on the
+  // current active partition instead of the inactive partition.
+  bool is_install_;
+
+  DISALLOW_COPY_AND_ASSIGN(OmahaRequestParams);
 };
 
 }  // namespace chromeos_update_engine
diff --git a/omaha_request_params_unittest.cc b/omaha_request_params_unittest.cc
index ce77f31..7332431 100644
--- a/omaha_request_params_unittest.cc
+++ b/omaha_request_params_unittest.cc
@@ -44,9 +44,6 @@
   void SetUp() override {
     // Create a uniquely named test directory.
     ASSERT_TRUE(tempdir_.CreateUniqueTempDir());
-    // Create a fresh copy of the params for each test, so there's no
-    // unintended reuse of state across tests.
-    params_ = OmahaRequestParams(&fake_system_state_);
     params_.set_root(tempdir_.GetPath().value());
     SetLockDown(false);
     fake_system_state_.set_prefs(&fake_prefs_);
@@ -57,8 +54,8 @@
     fake_system_state_.fake_hardware()->SetIsNormalBootMode(locked_down);
   }
 
-  OmahaRequestParams params_;
   FakeSystemState fake_system_state_;
+  OmahaRequestParams params_{&fake_system_state_};
   FakePrefs fake_prefs_;
 
   base::ScopedTempDir tempdir_;
@@ -259,9 +256,6 @@
 
   params_.hwid_ = string("SNOW 12345");
   EXPECT_TRUE(params_.CollectECFWVersions());
-
-  params_.hwid_ = string("SAMS ALEX 12345");
-  EXPECT_TRUE(params_.CollectECFWVersions());
 }
 
 }  // namespace chromeos_update_engine
diff --git a/omaha_response.h b/omaha_response.h
index b973eb5..0ac09df 100644
--- a/omaha_response.h
+++ b/omaha_response.h
@@ -21,6 +21,7 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 
+#include <limits>
 #include <string>
 #include <vector>
 
@@ -72,6 +73,9 @@
   // True if the Omaha rule instructs us to disable p2p for sharing.
   bool disable_p2p_for_sharing = false;
 
+  // True if the Omaha rule instructs us to powerwash.
+  bool powerwash_required = false;
+
   // If not blank, a base-64 encoded representation of the PEM-encoded
   // public key in the response.
   std::string public_key_rsa;
@@ -80,6 +84,24 @@
   // PST, according to the Omaha Server's clock and timezone (PST8PDT,
   // aka "Pacific Time".)
   int install_date_days = -1;
+
+  // True if the returned image is a rollback for the device.
+  bool is_rollback = false;
+
+  struct RollbackKeyVersion {
+    // Kernel key version. 0xffff if the value is unknown.
+    uint16_t kernel_key = std::numeric_limits<uint16_t>::max();
+    // Kernel version. 0xffff if the value is unknown.
+    uint16_t kernel = std::numeric_limits<uint16_t>::max();
+    // Firmware key verison. 0xffff if the value is unknown.
+    uint16_t firmware_key = std::numeric_limits<uint16_t>::max();
+    // Firmware version. 0xffff if the value is unknown.
+    uint16_t firmware = std::numeric_limits<uint16_t>::max();
+  };
+
+  // Key versions of the returned rollback image. Values are 0xffff if the
+  // image not a rollback, or the fields were not present.
+  RollbackKeyVersion rollback_key_version;
 };
 static_assert(sizeof(off_t) == 8, "off_t not 64 bit");
 
diff --git a/omaha_response_handler_action.cc b/omaha_response_handler_action.cc
index 2d6105a..ab41b84 100644
--- a/omaha_response_handler_action.cc
+++ b/omaha_response_handler_action.cc
@@ -16,11 +16,11 @@
 
 #include "update_engine/omaha_response_handler_action.h"
 
+#include <limits>
 #include <string>
 
 #include <base/logging.h>
 #include <base/strings/string_number_conversions.h>
-#include <base/strings/string_util.h>
 #include <policy/device_policy.h>
 
 #include "update_engine/common/constants.h"
@@ -36,29 +36,23 @@
 
 using chromeos_update_manager::Policy;
 using chromeos_update_manager::UpdateManager;
+using std::numeric_limits;
 using std::string;
 
 namespace chromeos_update_engine {
 
 OmahaResponseHandlerAction::OmahaResponseHandlerAction(
     SystemState* system_state)
-    : OmahaResponseHandlerAction(system_state,
-                                 constants::kOmahaResponseDeadlineFile) {}
-
-OmahaResponseHandlerAction::OmahaResponseHandlerAction(
-    SystemState* system_state, const string& deadline_file)
     : system_state_(system_state),
-      got_no_update_response_(false),
-      key_path_(constants::kUpdatePayloadPublicKeyPath),
-      deadline_file_(deadline_file) {}
+      deadline_file_(constants::kOmahaResponseDeadlineFile) {}
 
 void OmahaResponseHandlerAction::PerformAction() {
   CHECK(HasInputObject());
   ScopedActionCompleter completer(processor_, this);
   const OmahaResponse& response = GetInputObject();
   if (!response.update_exists) {
-    got_no_update_response_ = true;
     LOG(INFO) << "There are no updates. Aborting.";
+    completer.set_code(ErrorCode::kNoUpdate);
     return;
   }
 
@@ -127,8 +121,13 @@
         << "Unable to save the update check response hash.";
   }
 
-  install_plan_.source_slot = system_state_->boot_control()->GetCurrentSlot();
-  install_plan_.target_slot = install_plan_.source_slot == 0 ? 1 : 0;
+  if (params->is_install()) {
+    install_plan_.target_slot = system_state_->boot_control()->GetCurrentSlot();
+    install_plan_.source_slot = BootControlInterface::kInvalidSlot;
+  } else {
+    install_plan_.source_slot = system_state_->boot_control()->GetCurrentSlot();
+    install_plan_.target_slot = install_plan_.source_slot == 0 ? 1 : 0;
+  }
 
   // The Omaha response doesn't include the channel name for this image, so we
   // use the download_channel we used during the request to tag the target slot.
@@ -139,7 +138,39 @@
   system_state_->prefs()->SetString(current_channel_key,
                                     params->download_channel());
 
-  if (params->ShouldPowerwash())
+  // Checking whether device is able to boot up the returned rollback image.
+  if (response.is_rollback) {
+    if (!params->rollback_allowed()) {
+      LOG(ERROR) << "Received rollback image but rollback is not allowed.";
+      completer.set_code(ErrorCode::kOmahaResponseInvalid);
+      return;
+    }
+    auto min_kernel_key_version = static_cast<uint32_t>(
+        system_state_->hardware()->GetMinKernelKeyVersion());
+    auto min_firmware_key_version = static_cast<uint32_t>(
+        system_state_->hardware()->GetMinFirmwareKeyVersion());
+    uint32_t kernel_key_version =
+        static_cast<uint32_t>(response.rollback_key_version.kernel_key) << 16 |
+        static_cast<uint32_t>(response.rollback_key_version.kernel);
+    uint32_t firmware_key_version =
+        static_cast<uint32_t>(response.rollback_key_version.firmware_key)
+            << 16 |
+        static_cast<uint32_t>(response.rollback_key_version.firmware);
+
+    // Don't attempt a rollback if the versions are incompatible or the
+    // target image does not specify the version information.
+    if (kernel_key_version == numeric_limits<uint32_t>::max() ||
+        firmware_key_version == numeric_limits<uint32_t>::max() ||
+        kernel_key_version < min_kernel_key_version ||
+        firmware_key_version < min_firmware_key_version) {
+      LOG(ERROR) << "Device won't be able to boot up the rollback image.";
+      completer.set_code(ErrorCode::kRollbackNotPossible);
+      return;
+    }
+    install_plan_.is_rollback = true;
+  }
+
+  if (response.powerwash_required || params->ShouldPowerwash())
     install_plan_.powerwash_required = true;
 
   TEST_AND_RETURN(HasOutputPipe());
@@ -156,9 +187,16 @@
   // method and UpdateStatus signal. A potential issue is that update_engine may
   // be unresponsive during an update download.
   if (!deadline_file_.empty()) {
-    utils::WriteFile(deadline_file_.c_str(),
-                     response.deadline.data(),
-                     response.deadline.size());
+    if (payload_state->GetRollbackHappened()) {
+      // Don't do forced update if rollback has happened since the last update
+      // check where policy was present.
+      LOG(INFO) << "Not forcing update because a rollback happened.";
+      utils::WriteFile(deadline_file_.c_str(), nullptr, 0);
+    } else {
+      utils::WriteFile(deadline_file_.c_str(),
+                       response.deadline.data(),
+                       response.deadline.size());
+    }
     chmod(deadline_file_.c_str(), S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
   }
 
@@ -202,37 +240,8 @@
     }
   }
 
-  // If we're using p2p, |install_plan_.download_url| may contain a
-  // HTTP URL even if |response.payload_urls| contain only HTTPS URLs.
-  if (!base::StartsWith(install_plan_.download_url, "https://",
-                        base::CompareCase::INSENSITIVE_ASCII)) {
-    LOG(INFO) << "Mandating hash checks since download_url is not HTTPS.";
-    return true;
-  }
-
-  // TODO(jaysri): VALIDATION: For official builds, we currently waive hash
-  // checks for HTTPS until we have rolled out at least once and are confident
-  // nothing breaks. chromium-os:37082 tracks turning this on for HTTPS
-  // eventually.
-
-  // Even if there's a single non-HTTPS URL, make the hash checks as
-  // mandatory because we could be downloading the payload from any URL later
-  // on. It's really hard to do book-keeping based on each byte being
-  // downloaded to see whether we only used HTTPS throughout.
-  for (const auto& package : response.packages) {
-    for (const string& payload_url : package.payload_urls) {
-      if (!base::StartsWith(
-              payload_url, "https://", base::CompareCase::INSENSITIVE_ASCII)) {
-        LOG(INFO) << "Mandating payload hash checks since Omaha response "
-                  << "contains non-HTTPS URL(s)";
-        return true;
-      }
-    }
-  }
-
-  LOG(INFO) << "Waiving payload hash checks since Omaha response "
-            << "only has HTTPS URL(s)";
-  return false;
+  LOG(INFO) << "Mandating hash checks for official URL on official build.";
+  return true;
 }
 
 }  // namespace chromeos_update_engine
diff --git a/omaha_response_handler_action.h b/omaha_response_handler_action.h
index 2974841..d2e6db8 100644
--- a/omaha_response_handler_action.h
+++ b/omaha_response_handler_action.h
@@ -33,7 +33,7 @@
 
 class OmahaResponseHandlerAction;
 
-template<>
+template <>
 class ActionTraits<OmahaResponseHandlerAction> {
  public:
   typedef OmahaResponse InputObjectType;
@@ -54,13 +54,11 @@
   // never be called
   void TerminateProcessing() override { CHECK(false); }
 
-  bool GotNoUpdateResponse() const { return got_no_update_response_; }
   const InstallPlan& install_plan() const { return install_plan_; }
 
   // Debugging/logging
   static std::string StaticType() { return "OmahaResponseHandlerAction"; }
   std::string Type() const override { return StaticType(); }
-  void set_key_path(const std::string& path) { key_path_ = path; }
 
  private:
   // Returns true if payload hash checks are mandatory based on the state
@@ -73,22 +71,18 @@
   // The install plan, if we have an update.
   InstallPlan install_plan_;
 
-  // True only if we got a response and the response said no updates
-  bool got_no_update_response_;
-
-  // Public key path to use for payload verification.
-  std::string key_path_;
-
   // File used for communication deadline to Chrome.
-  const std::string deadline_file_;
-
-  // Special ctor + friend declarations for testing purposes.
-  OmahaResponseHandlerAction(SystemState* system_state,
-                             const std::string& deadline_file);
+  std::string deadline_file_;
 
   friend class OmahaResponseHandlerActionTest;
-
+  friend class OmahaResponseHandlerActionProcessorDelegate;
   FRIEND_TEST(UpdateAttempterTest, CreatePendingErrorEventResumedTest);
+  FRIEND_TEST(UpdateAttempterTest, RollbackMetricsNotRollbackFailure);
+  FRIEND_TEST(UpdateAttempterTest, RollbackMetricsNotRollbackSuccess);
+  FRIEND_TEST(UpdateAttempterTest, RollbackMetricsRollbackFailure);
+  FRIEND_TEST(UpdateAttempterTest, RollbackMetricsRollbackSuccess);
+  FRIEND_TEST(UpdateAttempterTest, SetRollbackHappenedNotRollback);
+  FRIEND_TEST(UpdateAttempterTest, SetRollbackHappenedRollback);
   FRIEND_TEST(UpdateAttempterTest, UpdateDeferredByPolicyTest);
 
   DISALLOW_COPY_AND_ASSIGN(OmahaResponseHandlerAction);
diff --git a/omaha_response_handler_action_unittest.cc b/omaha_response_handler_action_unittest.cc
index 9e2cdd1..b47040b 100644
--- a/omaha_response_handler_action_unittest.cc
+++ b/omaha_response_handler_action_unittest.cc
@@ -18,6 +18,7 @@
 
 #include <memory>
 #include <string>
+#include <utility>
 
 #include <base/files/file_util.h>
 #include <base/files/scoped_temp_dir.h>
@@ -46,38 +47,6 @@
 
 namespace chromeos_update_engine {
 
-class OmahaResponseHandlerActionTest : public ::testing::Test {
- protected:
-  void SetUp() override {
-    FakeBootControl* fake_boot_control = fake_system_state_.fake_boot_control();
-    fake_boot_control->SetPartitionDevice(
-        kLegacyPartitionNameKernel, 0, "/dev/sdz2");
-    fake_boot_control->SetPartitionDevice(
-        kLegacyPartitionNameRoot, 0, "/dev/sdz3");
-    fake_boot_control->SetPartitionDevice(
-        kLegacyPartitionNameKernel, 1, "/dev/sdz4");
-    fake_boot_control->SetPartitionDevice(
-        kLegacyPartitionNameRoot, 1, "/dev/sdz5");
-  }
-
-  // Return true iff the OmahaResponseHandlerAction succeeded.
-  // If out is non-null, it's set w/ the response from the action.
-  bool DoTest(const OmahaResponse& in,
-              const string& deadline_file,
-              InstallPlan* out);
-
-  // Pointer to the Action, valid after |DoTest|, released when the test is
-  // finished.
-  std::unique_ptr<OmahaResponseHandlerAction> action_;
-  // Captures the action's result code, for tests that need to directly verify
-  // it in non-success cases.
-  ErrorCode action_result_code_;
-
-  FakeSystemState fake_system_state_;
-  // "Hash+"
-  const brillo::Blob expected_hash_ = {0x48, 0x61, 0x73, 0x68, 0x2b};
-};
-
 class OmahaResponseHandlerActionProcessorDelegate
     : public ActionProcessorDelegate {
  public:
@@ -87,12 +56,52 @@
                        AbstractAction* action,
                        ErrorCode code) {
     if (action->Type() == OmahaResponseHandlerAction::StaticType()) {
+      auto response_handler_action =
+          static_cast<OmahaResponseHandlerAction*>(action);
       code_ = code;
       code_set_ = true;
+      response_handler_action_install_plan_.reset(
+          new InstallPlan(response_handler_action->install_plan_));
+    } else if (action->Type() ==
+               ObjectCollectorAction<InstallPlan>::StaticType()) {
+      auto collector_action =
+          static_cast<ObjectCollectorAction<InstallPlan>*>(action);
+      collector_action_install_plan_.reset(
+          new InstallPlan(collector_action->object()));
     }
   }
   ErrorCode code_;
   bool code_set_;
+  std::unique_ptr<InstallPlan> collector_action_install_plan_;
+  std::unique_ptr<InstallPlan> response_handler_action_install_plan_;
+};
+
+class OmahaResponseHandlerActionTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    FakeBootControl* fake_boot_control = fake_system_state_.fake_boot_control();
+    fake_boot_control->SetPartitionDevice(kPartitionNameKernel, 0, "/dev/sdz2");
+    fake_boot_control->SetPartitionDevice(kPartitionNameRoot, 0, "/dev/sdz3");
+    fake_boot_control->SetPartitionDevice(kPartitionNameKernel, 1, "/dev/sdz4");
+    fake_boot_control->SetPartitionDevice(kPartitionNameRoot, 1, "/dev/sdz5");
+  }
+
+  // Return true iff the OmahaResponseHandlerAction succeeded.
+  // If out is non-null, it's set w/ the response from the action.
+  bool DoTest(const OmahaResponse& in,
+              const string& deadline_file,
+              InstallPlan* out);
+
+  // Delegate passed to the ActionProcessor.
+  OmahaResponseHandlerActionProcessorDelegate delegate_;
+
+  // Captures the action's result code, for tests that need to directly verify
+  // it in non-success cases.
+  ErrorCode action_result_code_;
+
+  FakeSystemState fake_system_state_;
+  // "Hash+"
+  const brillo::Blob expected_hash_ = {0x48, 0x61, 0x73, 0x68, 0x2b};
 };
 
 namespace {
@@ -115,11 +124,10 @@
   brillo::FakeMessageLoop loop(nullptr);
   loop.SetAsCurrent();
   ActionProcessor processor;
-  OmahaResponseHandlerActionProcessorDelegate delegate;
-  processor.set_delegate(&delegate);
+  processor.set_delegate(&delegate_);
 
-  ObjectFeederAction<OmahaResponse> feeder_action;
-  feeder_action.set_obj(in);
+  auto feeder_action = std::make_unique<ObjectFeederAction<OmahaResponse>>();
+  feeder_action->set_obj(in);
   if (in.update_exists && in.version != kBadVersion) {
     string expected_hash;
     for (const auto& package : in.packages)
@@ -128,7 +136,10 @@
                 SetString(kPrefsUpdateCheckResponseHash, expected_hash))
         .WillOnce(Return(true));
 
-    int slot = 1 - fake_system_state_.fake_boot_control()->GetCurrentSlot();
+    int slot =
+        fake_system_state_.request_params()->is_install()
+            ? fake_system_state_.fake_boot_control()->GetCurrentSlot()
+            : 1 - fake_system_state_.fake_boot_control()->GetCurrentSlot();
     string key = kPrefsChannelOnSlotPrefix + std::to_string(slot);
     EXPECT_CALL(*(fake_system_state_.mock_prefs()), SetString(key, testing::_))
         .WillOnce(Return(true));
@@ -138,32 +149,34 @@
   EXPECT_CALL(*(fake_system_state_.mock_payload_state()), GetCurrentUrl())
       .WillRepeatedly(Return(current_url));
 
-  action_.reset(new OmahaResponseHandlerAction(
-      &fake_system_state_,
-      (test_deadline_file.empty() ? constants::kOmahaResponseDeadlineFile
-                                  : test_deadline_file)));
-  BondActions(&feeder_action, action_.get());
-  ObjectCollectorAction<InstallPlan> collector_action;
-  BondActions(action_.get(), &collector_action);
-  processor.EnqueueAction(&feeder_action);
-  processor.EnqueueAction(action_.get());
-  processor.EnqueueAction(&collector_action);
+  auto response_handler_action =
+      std::make_unique<OmahaResponseHandlerAction>(&fake_system_state_);
+  if (!test_deadline_file.empty())
+    response_handler_action->deadline_file_ = test_deadline_file;
+
+  auto collector_action =
+      std::make_unique<ObjectCollectorAction<InstallPlan>>();
+
+  BondActions(feeder_action.get(), response_handler_action.get());
+  BondActions(response_handler_action.get(), collector_action.get());
+  processor.EnqueueAction(std::move(feeder_action));
+  processor.EnqueueAction(std::move(response_handler_action));
+  processor.EnqueueAction(std::move(collector_action));
   processor.StartProcessing();
   EXPECT_TRUE(!processor.IsRunning())
       << "Update test to handle non-async actions";
-  if (out)
-    *out = collector_action.object();
-  EXPECT_TRUE(delegate.code_set_);
-  action_result_code_ = delegate.code_;
-  return delegate.code_ == ErrorCode::kSuccess;
+
+  if (out && delegate_.collector_action_install_plan_)
+    *out = *delegate_.collector_action_install_plan_;
+
+  EXPECT_TRUE(delegate_.code_set_);
+  action_result_code_ = delegate_.code_;
+  return delegate_.code_ == ErrorCode::kSuccess;
 }
 
 TEST_F(OmahaResponseHandlerActionTest, SimpleTest) {
-  string test_deadline_file;
-  CHECK(utils::MakeTempFile("omaha_response_handler_action_unittest-XXXXXX",
-                            &test_deadline_file,
-                            nullptr));
-  ScopedPathUnlinker deadline_unlinker(test_deadline_file);
+  test_utils::ScopedTempFile test_deadline_file(
+      "omaha_response_handler_action_unittest-XXXXXX");
   {
     OmahaResponse in;
     in.update_exists = true;
@@ -176,15 +189,15 @@
     in.prompt = false;
     in.deadline = "20101020";
     InstallPlan install_plan;
-    EXPECT_TRUE(DoTest(in, test_deadline_file, &install_plan));
+    EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
     EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
     EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
     EXPECT_EQ(1U, install_plan.target_slot);
     string deadline;
-    EXPECT_TRUE(utils::ReadFile(test_deadline_file, &deadline));
+    EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline));
     EXPECT_EQ("20101020", deadline);
     struct stat deadline_stat;
-    EXPECT_EQ(0, stat(test_deadline_file.c_str(), &deadline_stat));
+    EXPECT_EQ(0, stat(test_deadline_file.path().c_str(), &deadline_stat));
     EXPECT_EQ(
         static_cast<mode_t>(S_IFREG | S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH),
         deadline_stat.st_mode);
@@ -203,12 +216,12 @@
     InstallPlan install_plan;
     // Set the other slot as current.
     fake_system_state_.fake_boot_control()->SetCurrentSlot(1);
-    EXPECT_TRUE(DoTest(in, test_deadline_file, &install_plan));
+    EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
     EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
     EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
     EXPECT_EQ(0U, install_plan.target_slot);
     string deadline;
-    EXPECT_TRUE(utils::ReadFile(test_deadline_file, &deadline) &&
+    EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline) &&
                 deadline.empty());
     EXPECT_EQ(in.version, install_plan.version);
   }
@@ -223,12 +236,40 @@
     in.deadline = "some-deadline";
     InstallPlan install_plan;
     fake_system_state_.fake_boot_control()->SetCurrentSlot(0);
-    EXPECT_TRUE(DoTest(in, test_deadline_file, &install_plan));
+    // Because rollback happened, the deadline shouldn't be written into the
+    // file.
+    EXPECT_CALL(*(fake_system_state_.mock_payload_state()),
+                GetRollbackHappened())
+        .WillOnce(Return(true));
+    EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
     EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
     EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
     EXPECT_EQ(1U, install_plan.target_slot);
     string deadline;
-    EXPECT_TRUE(utils::ReadFile(test_deadline_file, &deadline));
+    EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline));
+    EXPECT_TRUE(deadline.empty());
+    EXPECT_EQ(in.version, install_plan.version);
+  }
+  {
+    OmahaResponse in;
+    in.update_exists = true;
+    in.version = "a.b.c.d";
+    in.packages.push_back(
+        {.payload_urls = {kLongName}, .size = 12, .hash = kPayloadHashHex});
+    in.more_info_url = "http://more/info";
+    in.prompt = true;
+    in.deadline = "some-deadline";
+    InstallPlan install_plan;
+    fake_system_state_.fake_boot_control()->SetCurrentSlot(0);
+    EXPECT_CALL(*(fake_system_state_.mock_payload_state()),
+                GetRollbackHappened())
+        .WillOnce(Return(false));
+    EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
+    EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+    EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+    EXPECT_EQ(1U, install_plan.target_slot);
+    string deadline;
+    EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline));
     EXPECT_EQ("some-deadline", deadline);
     EXPECT_EQ(in.version, install_plan.version);
   }
@@ -242,6 +283,25 @@
   EXPECT_TRUE(install_plan.partitions.empty());
 }
 
+TEST_F(OmahaResponseHandlerActionTest, InstallTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "a.b.c.d";
+  in.packages.push_back(
+      {.payload_urls = {kLongName}, .size = 1, .hash = kPayloadHashHex});
+  in.packages.push_back(
+      {.payload_urls = {kLongName}, .size = 2, .hash = kPayloadHashHex});
+  in.more_info_url = "http://more/info";
+
+  OmahaRequestParams params(&fake_system_state_);
+  params.set_is_install(true);
+
+  fake_system_state_.set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_EQ(install_plan.source_slot, UINT_MAX);
+}
+
 TEST_F(OmahaResponseHandlerActionTest, MultiPackageTest) {
   OmahaResponse in;
   in.update_exists = true;
@@ -344,7 +404,7 @@
   EXPECT_TRUE(DoTest(in, "", &install_plan));
   EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
   EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
-  EXPECT_FALSE(install_plan.hash_checks_mandatory);
+  EXPECT_TRUE(install_plan.hash_checks_mandatory);
   EXPECT_EQ(in.version, install_plan.version);
 }
 
@@ -467,6 +527,109 @@
   EXPECT_TRUE(install_plan.hash_checks_mandatory);
 }
 
+TEST_F(OmahaResponseHandlerActionTest, RollbackTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.is_rollback = true;
+  in.rollback_key_version.kernel = 1;
+  in.rollback_key_version.kernel = 2;
+  in.rollback_key_version.firmware_key = 3;
+  in.rollback_key_version.firmware = 4;
+
+  fake_system_state_.fake_hardware()->SetMinKernelKeyVersion(0x00010002);
+  fake_system_state_.fake_hardware()->SetMinFirmwareKeyVersion(0x00030004);
+
+  OmahaRequestParams params(&fake_system_state_);
+  params.set_rollback_allowed(true);
+
+  fake_system_state_.set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_TRUE(install_plan.is_rollback);
+}
+
+TEST_F(OmahaResponseHandlerActionTest, RollbackKernelVersionErrorTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.is_rollback = true;
+  in.rollback_key_version.kernel_key = 1;
+  in.rollback_key_version.kernel = 1;  // This is lower than the minimum.
+  in.rollback_key_version.firmware_key = 3;
+  in.rollback_key_version.firmware = 4;
+
+  fake_system_state_.fake_hardware()->SetMinKernelKeyVersion(0x00010002);
+  fake_system_state_.fake_hardware()->SetMinFirmwareKeyVersion(0x00030004);
+
+  OmahaRequestParams params(&fake_system_state_);
+  params.set_rollback_allowed(true);
+
+  fake_system_state_.set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_FALSE(DoTest(in, "", &install_plan));
+}
+
+TEST_F(OmahaResponseHandlerActionTest, RollbackFirmwareVersionErrorTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.is_rollback = true;
+  in.rollback_key_version.kernel_key = 1;
+  in.rollback_key_version.kernel = 2;
+  in.rollback_key_version.firmware_key = 3;
+  in.rollback_key_version.firmware = 3;  // This is lower than the minimum.
+
+  fake_system_state_.fake_hardware()->SetMinKernelKeyVersion(0x00010002);
+  fake_system_state_.fake_hardware()->SetMinFirmwareKeyVersion(0x00030004);
+
+  OmahaRequestParams params(&fake_system_state_);
+  params.set_rollback_allowed(true);
+
+  fake_system_state_.set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_FALSE(DoTest(in, "", &install_plan));
+}
+
+TEST_F(OmahaResponseHandlerActionTest, RollbackNotRollbackTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.is_rollback = false;
+
+  OmahaRequestParams params(&fake_system_state_);
+  params.set_rollback_allowed(true);
+
+  fake_system_state_.set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_FALSE(install_plan.is_rollback);
+}
+
+TEST_F(OmahaResponseHandlerActionTest, RollbackNotAllowedTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.is_rollback = true;
+
+  OmahaRequestParams params(&fake_system_state_);
+  params.set_rollback_allowed(false);
+
+  fake_system_state_.set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_FALSE(DoTest(in, "", &install_plan));
+}
+
 TEST_F(OmahaResponseHandlerActionTest, SystemVersionTest) {
   OmahaResponse in;
   in.update_exists = true;
@@ -514,9 +677,8 @@
   EXPECT_EQ(ErrorCode::kOmahaUpdateDeferredPerPolicy, action_result_code_);
   // Verify that DoTest() didn't set the output install plan.
   EXPECT_EQ("", install_plan.version);
-  // Copy the underlying InstallPlan from the Action (like a real Delegate).
-  install_plan = action_->install_plan();
   // Now verify the InstallPlan that was generated.
+  install_plan = *delegate_.response_handler_action_install_plan_;
   EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
   EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
   EXPECT_EQ(1U, install_plan.target_slot);
diff --git a/p2p_manager.cc b/p2p_manager.cc
index 1ee124d..6720908 100644
--- a/p2p_manager.cc
+++ b/p2p_manager.cc
@@ -89,9 +89,7 @@
  public:
   ConfigurationImpl() {}
 
-  FilePath GetP2PDir() override {
-    return FilePath(kDefaultP2PDir);
-  }
+  FilePath GetP2PDir() override { return FilePath(kDefaultP2PDir); }
 
   vector<string> GetInitctlArgs(bool is_start) override {
     vector<string> args;
@@ -101,7 +99,7 @@
     return args;
   }
 
-  vector<string> GetP2PClientArgs(const string &file_id,
+  vector<string> GetP2PClientArgs(const string& file_id,
                                   size_t minimum_size) override {
     vector<string> args;
     args.push_back("p2p-client");
@@ -117,8 +115,8 @@
 // The default P2PManager implementation.
 class P2PManagerImpl : public P2PManager {
  public:
-  P2PManagerImpl(Configuration *configuration,
-                 ClockInterface *clock,
+  P2PManagerImpl(Configuration* configuration,
+                 ClockInterface* clock,
                  UpdateManager* update_manager,
                  const string& file_extension,
                  const int num_files_to_keep,
@@ -134,22 +132,17 @@
                         size_t minimum_size,
                         TimeDelta max_time_to_wait,
                         LookupCallback callback) override;
-  bool FileShare(const string& file_id,
-                 size_t expected_size) override;
+  bool FileShare(const string& file_id, size_t expected_size) override;
   FilePath FileGetPath(const string& file_id) override;
   ssize_t FileGetSize(const string& file_id) override;
   ssize_t FileGetExpectedSize(const string& file_id) override;
-  bool FileGetVisible(const string& file_id,
-                      bool *out_result) override;
+  bool FileGetVisible(const string& file_id, bool* out_result) override;
   bool FileMakeVisible(const string& file_id) override;
   int CountSharedFiles() override;
 
  private:
   // Enumeration for specifying visibility.
-  enum Visibility {
-    kVisible,
-    kNonVisible
-  };
+  enum Visibility { kVisible, kNonVisible };
 
   // Returns "." + |file_extension_| + ".p2p" if |visibility| is
   // |kVisible|. Returns the same concatenated with ".tmp" otherwise.
@@ -218,19 +211,19 @@
 
 const char P2PManagerImpl::kTmpExtension[] = ".tmp";
 
-P2PManagerImpl::P2PManagerImpl(Configuration *configuration,
-                               ClockInterface *clock,
+P2PManagerImpl::P2PManagerImpl(Configuration* configuration,
+                               ClockInterface* clock,
                                UpdateManager* update_manager,
                                const string& file_extension,
                                const int num_files_to_keep,
                                const TimeDelta& max_file_age)
-  : clock_(clock),
-    update_manager_(update_manager),
-    file_extension_(file_extension),
-    num_files_to_keep_(num_files_to_keep),
-    max_file_age_(max_file_age) {
-  configuration_.reset(configuration != nullptr ? configuration :
-                       new ConfigurationImpl());
+    : clock_(clock),
+      update_manager_(update_manager),
+      file_extension_(file_extension),
+      num_files_to_keep_(num_files_to_keep),
+      max_file_age_(max_file_age) {
+  configuration_.reset(configuration != nullptr ? configuration
+                                                : new ConfigurationImpl());
 }
 
 void P2PManagerImpl::SetDevicePolicy(
@@ -272,9 +265,9 @@
   // running" or "stop if running".
   // TODO(zeuthen,chromium:277051): Avoid doing this.
   if (return_code != 0) {
-    const char *expected_error_message = should_be_running ?
-      "initctl: Job is already running: p2p\n" :
-      "initctl: Unknown instance \n";
+    const char* expected_error_message =
+        should_be_running ? "initctl: Job is already running: p2p\n"
+                          : "initctl: Unknown instance \n";
     if (output != expected_error_message)
       return false;
   }
@@ -303,13 +296,13 @@
 string P2PManagerImpl::GetExt(Visibility visibility) {
   string ext = string(".") + file_extension_ + kP2PExtension;
   switch (visibility) {
-  case kVisible:
-    break;
-  case kNonVisible:
-    ext += kTmpExtension;
-    break;
-  // Don't add a default case to let the compiler warn about newly
-  // added enum values.
+    case kVisible:
+      break;
+    case kNonVisible:
+      ext += kTmpExtension;
+      break;
+      // Don't add a default case to let the compiler warn about newly
+      // added enum values.
   }
   return ext;
 }
@@ -318,10 +311,9 @@
   return configuration_->GetP2PDir().Append(file_id + GetExt(visibility));
 }
 
-bool P2PManagerImpl::DeleteP2PFile(const FilePath& path,
-                                   const string& reason) {
-  LOG(INFO) << "Deleting p2p file " << path.value()
-            << " (reason: " << reason << ")";
+bool P2PManagerImpl::DeleteP2PFile(const FilePath& path, const string& reason) {
+  LOG(INFO) << "Deleting p2p file " << path.value() << " (reason: " << reason
+            << ")";
   if (unlink(path.value().c_str()) != 0) {
     PLOG(ERROR) << "Error deleting p2p file " << path.value();
     return false;
@@ -329,7 +321,6 @@
   return true;
 }
 
-
 bool P2PManagerImpl::PerformHousekeeping() {
   // Open p2p dir.
   FilePath p2p_dir = configuration_->GetP2PDir();
@@ -342,10 +333,10 @@
   base::FileEnumerator dir(p2p_dir, false, base::FileEnumerator::FILES);
   // Go through all files and collect their mtime.
   for (FilePath name = dir.Next(); !name.empty(); name = dir.Next()) {
-    if (!(base::EndsWith(name.value(), ext_visible,
-                         base::CompareCase::SENSITIVE) ||
-          base::EndsWith(name.value(), ext_non_visible,
-                         base::CompareCase::SENSITIVE))) {
+    if (!(base::EndsWith(
+              name.value(), ext_visible, base::CompareCase::SENSITIVE) ||
+          base::EndsWith(
+              name.value(), ext_non_visible, base::CompareCase::SENSITIVE))) {
       continue;
     }
 
@@ -382,7 +373,7 @@
 class LookupData {
  public:
   explicit LookupData(P2PManager::LookupCallback callback)
-    : callback_(callback) {}
+      : callback_(callback) {}
 
   ~LookupData() {
     if (timeout_task_ != MessageLoop::kTaskIdNull)
@@ -399,7 +390,9 @@
 
     // We expect to run just "p2p-client" and find it in the path.
     child_pid_ = Subprocess::Get().ExecFlags(
-        cmd, Subprocess::kSearchPath, {},
+        cmd,
+        Subprocess::kSearchPath,
+        {},
         Bind(&LookupData::OnLookupDone, base::Unretained(this)));
 
     if (!child_pid_) {
@@ -418,9 +411,10 @@
 
  private:
   void ReportErrorAndDeleteInIdle() {
-    MessageLoop::current()->PostTask(FROM_HERE, Bind(
-        &LookupData::OnIdleForReportErrorAndDelete,
-        base::Unretained(this)));
+    MessageLoop::current()->PostTask(
+        FROM_HERE,
+        Bind(&LookupData::OnIdleForReportErrorAndDelete,
+             base::Unretained(this)));
   }
 
   void OnIdleForReportErrorAndDelete() {
@@ -463,8 +457,7 @@
   void OnLookupDone(int return_code, const string& output) {
     child_pid_ = 0;
     if (return_code != 0) {
-      LOG(INFO) << "Child exited with non-zero exit code "
-                << return_code;
+      LOG(INFO) << "Child exited with non-zero exit code " << return_code;
       ReportError();
     } else {
       ReportSuccess(output);
@@ -494,15 +487,14 @@
                                       size_t minimum_size,
                                       TimeDelta max_time_to_wait,
                                       LookupCallback callback) {
-  LookupData *lookup_data = new LookupData(callback);
+  LookupData* lookup_data = new LookupData(callback);
   string file_id_with_ext = file_id + "." + file_extension_;
-  vector<string> args = configuration_->GetP2PClientArgs(file_id_with_ext,
-                                                         minimum_size);
+  vector<string> args =
+      configuration_->GetP2PClientArgs(file_id_with_ext, minimum_size);
   lookup_data->InitiateLookup(args, max_time_to_wait);
 }
 
-bool P2PManagerImpl::FileShare(const string& file_id,
-                               size_t expected_size) {
+bool P2PManagerImpl::FileShare(const string& file_id, size_t expected_size) {
   // Check if file already exist.
   FilePath path = FileGetPath(file_id);
   if (!path.empty()) {
@@ -563,8 +555,8 @@
       } else {
         // ENOSPC can happen (funky race though, cf. the statvfs() check
         // above), handle it gracefully, e.g. use logging level INFO.
-        PLOG(INFO) << "Error allocating " << expected_size
-                   << " bytes for file " << path.value();
+        PLOG(INFO) << "Error allocating " << expected_size << " bytes for file "
+                   << path.value();
         if (unlink(path.value().c_str()) != 0) {
           PLOG(ERROR) << "Error deleting file with path " << path.value();
         }
@@ -573,8 +565,11 @@
     }
 
     string decimal_size = std::to_string(expected_size);
-    if (fsetxattr(fd, kCrosP2PFileSizeXAttrName,
-                  decimal_size.c_str(), decimal_size.size(), 0) != 0) {
+    if (fsetxattr(fd,
+                  kCrosP2PFileSizeXAttrName,
+                  decimal_size.c_str(),
+                  decimal_size.size(),
+                  0) != 0) {
       PLOG(ERROR) << "Error setting xattr " << path.value();
       return false;
     }
@@ -601,8 +596,7 @@
   return path;
 }
 
-bool P2PManagerImpl::FileGetVisible(const string& file_id,
-                                    bool *out_result) {
+bool P2PManagerImpl::FileGetVisible(const string& file_id, bool* out_result) {
   FilePath path = FileGetPath(file_id);
   if (path.empty()) {
     LOG(ERROR) << "No file for id " << file_id;
@@ -628,8 +622,8 @@
   FilePath new_path = path.RemoveExtension();
   LOG_ASSERT(new_path.MatchesExtension(kP2PExtension));
   if (rename(path.value().c_str(), new_path.value().c_str()) != 0) {
-    PLOG(ERROR) << "Error renaming " << path.value()
-                << " to " << new_path.value();
+    PLOG(ERROR) << "Error renaming " << path.value() << " to "
+                << new_path.value();
     return false;
   }
 
@@ -649,10 +643,12 @@
   if (path.empty())
     return -1;
 
-  char ea_value[64] = { 0 };
+  char ea_value[64] = {0};
   ssize_t ea_size;
-  ea_size = getxattr(path.value().c_str(), kCrosP2PFileSizeXAttrName,
-                     &ea_value, sizeof(ea_value) - 1);
+  ea_size = getxattr(path.value().c_str(),
+                     kCrosP2PFileSizeXAttrName,
+                     &ea_value,
+                     sizeof(ea_value) - 1);
   if (ea_size == -1) {
     PLOG(ERROR) << "Error calling getxattr() on file " << path.value();
     return -1;
@@ -661,9 +657,8 @@
   char* endp = nullptr;
   long long int val = strtoll(ea_value, &endp, 0);  // NOLINT(runtime/int)
   if (*endp != '\0') {
-    LOG(ERROR) << "Error parsing the value '" << ea_value
-               << "' of the xattr " << kCrosP2PFileSizeXAttrName
-               << " as an integer";
+    LOG(ERROR) << "Error parsing the value '" << ea_value << "' of the xattr "
+               << kCrosP2PFileSizeXAttrName << " as an integer";
     return -1;
   }
 
@@ -679,10 +674,10 @@
 
   base::FileEnumerator dir(p2p_dir, false, base::FileEnumerator::FILES);
   for (FilePath name = dir.Next(); !name.empty(); name = dir.Next()) {
-    if (base::EndsWith(name.value(), ext_visible,
-                       base::CompareCase::SENSITIVE) ||
-        base::EndsWith(name.value(), ext_non_visible,
-                       base::CompareCase::SENSITIVE)) {
+    if (base::EndsWith(
+            name.value(), ext_visible, base::CompareCase::SENSITIVE) ||
+        base::EndsWith(
+            name.value(), ext_non_visible, base::CompareCase::SENSITIVE)) {
       num_files += 1;
     }
   }
@@ -694,10 +689,10 @@
   if (waiting_for_enabled_status_change_)
     return;
 
-  Callback<void(EvalStatus, const bool&)> callback = Bind(
-      &P2PManagerImpl::OnEnabledStatusChange, base::Unretained(this));
-  update_manager_->AsyncPolicyRequest(callback, &Policy::P2PEnabledChanged,
-                                      is_enabled_);
+  Callback<void(EvalStatus, const bool&)> callback =
+      Bind(&P2PManagerImpl::OnEnabledStatusChange, base::Unretained(this));
+  update_manager_->AsyncPolicyRequest(
+      callback, &Policy::P2PEnabledChanged, is_enabled_);
   waiting_for_enabled_status_change_ = true;
 }
 
@@ -727,13 +722,12 @@
   ScheduleEnabledStatusChange();
 }
 
-P2PManager* P2PManager::Construct(
-    Configuration *configuration,
-    ClockInterface *clock,
-    UpdateManager* update_manager,
-    const string& file_extension,
-    const int num_files_to_keep,
-    const TimeDelta& max_file_age) {
+P2PManager* P2PManager::Construct(Configuration* configuration,
+                                  ClockInterface* clock,
+                                  UpdateManager* update_manager,
+                                  const string& file_extension,
+                                  const int num_files_to_keep,
+                                  const TimeDelta& max_file_age) {
   return new P2PManagerImpl(configuration,
                             clock,
                             update_manager,
diff --git a/p2p_manager.h b/p2p_manager.h
index 4ffab9a..ef62f0d 100644
--- a/p2p_manager.h
+++ b/p2p_manager.h
@@ -119,8 +119,7 @@
   //
   // If the file already exists, true is returned. Any on-disk xattr
   // is not updated.
-  virtual bool FileShare(const std::string& file_id,
-                         size_t expected_size) = 0;
+  virtual bool FileShare(const std::string& file_id, size_t expected_size) = 0;
 
   // Gets a fully qualified path for the file identified by |file_id|.
   // If the file has not been shared already using the FileShare()
@@ -148,8 +147,7 @@
   // Gets whether the file identified by |file_id| is publicly
   // visible. If |out_result| is not null, the result is returned
   // there. Returns false if an error occurs.
-  virtual bool FileGetVisible(const std::string& file_id,
-                              bool *out_result) = 0;
+  virtual bool FileGetVisible(const std::string& file_id, bool* out_result) = 0;
 
   // Makes the file identified by |file_id| publicly visible
   // (e.g. removes the .tmp extension). If the file is already
@@ -175,8 +173,8 @@
   // |max_file_age| parameter specifies the maximum file age after
   // performing housekeeping (pass zero to allow files of any age).
   static P2PManager* Construct(
-      Configuration *configuration,
-      ClockInterface *clock,
+      Configuration* configuration,
+      ClockInterface* clock,
       chromeos_update_manager::UpdateManager* update_manager,
       const std::string& file_extension,
       const int num_files_to_keep,
diff --git a/p2p_manager_unittest.cc b/p2p_manager_unittest.cc
index 5ffb358..5771ec1 100644
--- a/p2p_manager_unittest.cc
+++ b/p2p_manager_unittest.cc
@@ -43,6 +43,7 @@
 
 #include "update_engine/common/fake_clock.h"
 #include "update_engine/common/prefs.h"
+#include "update_engine/common/subprocess.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/fake_p2p_manager_configuration.h"
@@ -54,10 +55,10 @@
 using std::string;
 using std::unique_ptr;
 using std::vector;
+using testing::_;
 using testing::DoAll;
 using testing::Return;
 using testing::SetArgPointee;
-using testing::_;
 
 namespace chromeos_update_engine {
 
@@ -83,8 +84,11 @@
     fake_um_.set_policy(mock_policy_);
 
     // Construct the P2P manager under test.
-    manager_.reset(P2PManager::Construct(test_conf_, &fake_clock_, &fake_um_,
-                                         "cros_au", 3,
+    manager_.reset(P2PManager::Construct(test_conf_,
+                                         &fake_clock_,
+                                         &fake_um_,
+                                         "cros_au",
+                                         3,
                                          TimeDelta::FromDays(5)));
   }
 
@@ -94,16 +98,15 @@
   Subprocess subprocess_;
 
   // The P2PManager::Configuration instance used for testing.
-  FakeP2PManagerConfiguration *test_conf_;
+  FakeP2PManagerConfiguration* test_conf_;
 
   FakeClock fake_clock_;
-  chromeos_update_manager::MockPolicy *mock_policy_ = nullptr;
+  chromeos_update_manager::MockPolicy* mock_policy_ = nullptr;
   chromeos_update_manager::FakeUpdateManager fake_um_;
 
   unique_ptr<P2PManager> manager_;
 };
 
-
 // Check that IsP2PEnabled() polls the policy correctly, with the value not
 // changing between calls.
 TEST_F(P2PManagerTest, P2PEnabledInitAndNotChanged) {
@@ -119,9 +122,8 @@
 // between calls.
 TEST_F(P2PManagerTest, P2PEnabledInitAndChanged) {
   EXPECT_CALL(*mock_policy_, P2PEnabled(_, _, _, _))
-      .WillOnce(DoAll(
-              SetArgPointee<3>(true),
-              Return(chromeos_update_manager::EvalStatus::kSucceeded)));
+      .WillOnce(DoAll(SetArgPointee<3>(true),
+                      Return(chromeos_update_manager::EvalStatus::kSucceeded)));
   EXPECT_CALL(*mock_policy_, P2PEnabledChanged(_, _, _, _, true));
   EXPECT_CALL(*mock_policy_, P2PEnabledChanged(_, _, _, _, false));
 
@@ -136,23 +138,26 @@
   // we need to reallocate the test_conf_ member, whose currently aliased object
   // will be freed.
   test_conf_ = new FakeP2PManagerConfiguration();
-  manager_.reset(P2PManager::Construct(
-      test_conf_, &fake_clock_, &fake_um_, "cros_au", 3,
-      TimeDelta() /* max_file_age */));
+  manager_.reset(P2PManager::Construct(test_conf_,
+                                       &fake_clock_,
+                                       &fake_um_,
+                                       "cros_au",
+                                       3,
+                                       TimeDelta() /* max_file_age */));
   EXPECT_EQ(manager_->CountSharedFiles(), 0);
 
   base::Time start_time = base::Time::FromDoubleT(1246996800.);
   // Generate files with different timestamps matching our pattern and generate
   // other files not matching the pattern.
   for (int n = 0; n < 5; n++) {
-    base::FilePath path = test_conf_->GetP2PDir().Append(base::StringPrintf(
-        "file_%d.cros_au.p2p", n));
+    base::FilePath path = test_conf_->GetP2PDir().Append(
+        base::StringPrintf("file_%d.cros_au.p2p", n));
     base::Time file_time = start_time + TimeDelta::FromMinutes(n);
     EXPECT_EQ(0, base::WriteFile(path, nullptr, 0));
     EXPECT_TRUE(base::TouchFile(path, file_time, file_time));
 
-    path = test_conf_->GetP2PDir().Append(base::StringPrintf(
-        "file_%d.OTHER.p2p", n));
+    path = test_conf_->GetP2PDir().Append(
+        base::StringPrintf("file_%d.OTHER.p2p", n));
     EXPECT_EQ(0, base::WriteFile(path, nullptr, 0));
     EXPECT_TRUE(base::TouchFile(path, file_time, file_time));
   }
@@ -169,13 +174,11 @@
 
     expect = (n >= 2);
     file_name = base::StringPrintf(
-        "%s/file_%d.cros_au.p2p",
-         test_conf_->GetP2PDir().value().c_str(), n);
+        "%s/file_%d.cros_au.p2p", test_conf_->GetP2PDir().value().c_str(), n);
     EXPECT_EQ(expect, utils::FileExists(file_name.c_str()));
 
     file_name = base::StringPrintf(
-        "%s/file_%d.OTHER.p2p",
-        test_conf_->GetP2PDir().value().c_str(), n);
+        "%s/file_%d.OTHER.p2p", test_conf_->GetP2PDir().value().c_str(), n);
     EXPECT_TRUE(utils::FileExists(file_name.c_str()));
   }
   // CountSharedFiles() only counts 'cros_au' files.
@@ -183,7 +186,7 @@
 }
 
 // Check that we keep files with the .$EXT.p2p extension not older
-// than some specificed age (5 days, in this test).
+// than some specific age (5 days, in this test).
 TEST_F(P2PManagerTest, HousekeepingAgeLimit) {
   // We set the cutoff time to be 1 billion seconds (01:46:40 UTC on 9
   // September 2001 - arbitrary number, but constant to avoid test
@@ -200,16 +203,19 @@
   // Note that we need to reallocate the test_conf_ member, whose currently
   // aliased object will be freed.
   test_conf_ = new FakeP2PManagerConfiguration();
-  manager_.reset(P2PManager::Construct(
-      test_conf_, &fake_clock_, &fake_um_, "cros_au",
-      0 /* num_files_to_keep */, age_limit));
+  manager_.reset(P2PManager::Construct(test_conf_,
+                                       &fake_clock_,
+                                       &fake_um_,
+                                       "cros_au",
+                                       0 /* num_files_to_keep */,
+                                       age_limit));
   EXPECT_EQ(manager_->CountSharedFiles(), 0);
 
   // Generate files with different timestamps matching our pattern and generate
   // other files not matching the pattern.
   for (int n = 0; n < 5; n++) {
-    base::FilePath path = test_conf_->GetP2PDir().Append(base::StringPrintf(
-        "file_%d.cros_au.p2p", n));
+    base::FilePath path = test_conf_->GetP2PDir().Append(
+        base::StringPrintf("file_%d.cros_au.p2p", n));
 
     // With five files and aiming for two of them to be before
     // |cutoff_time|, we distribute it like this:
@@ -218,14 +224,14 @@
     //                            |
     //                       cutoff_time
     //
-    base::Time file_date = cutoff_time + (n - 2) * TimeDelta::FromDays(1)
-        + TimeDelta::FromHours(12);
+    base::Time file_date = cutoff_time + (n - 2) * TimeDelta::FromDays(1) +
+                           TimeDelta::FromHours(12);
 
     EXPECT_EQ(0, base::WriteFile(path, nullptr, 0));
     EXPECT_TRUE(base::TouchFile(path, file_date, file_date));
 
-    path = test_conf_->GetP2PDir().Append(base::StringPrintf(
-        "file_%d.OTHER.p2p", n));
+    path = test_conf_->GetP2PDir().Append(
+        base::StringPrintf("file_%d.OTHER.p2p", n));
     EXPECT_EQ(0, base::WriteFile(path, nullptr, 0));
     EXPECT_TRUE(base::TouchFile(path, file_date, file_date));
   }
@@ -242,23 +248,23 @@
 
     expect = (n >= 2);
     file_name = base::StringPrintf(
-        "%s/file_%d.cros_au.p2p",
-         test_conf_->GetP2PDir().value().c_str(), n);
+        "%s/file_%d.cros_au.p2p", test_conf_->GetP2PDir().value().c_str(), n);
     EXPECT_EQ(expect, utils::FileExists(file_name.c_str()));
 
     file_name = base::StringPrintf(
-        "%s/file_%d.OTHER.p2p",
-        test_conf_->GetP2PDir().value().c_str(), n);
+        "%s/file_%d.OTHER.p2p", test_conf_->GetP2PDir().value().c_str(), n);
     EXPECT_TRUE(utils::FileExists(file_name.c_str()));
   }
   // CountSharedFiles() only counts 'cros_au' files.
   EXPECT_EQ(manager_->CountSharedFiles(), 3);
 }
 
-static bool CheckP2PFile(const string& p2p_dir, const string& file_name,
-                         ssize_t expected_size, ssize_t expected_size_xattr) {
+static bool CheckP2PFile(const string& p2p_dir,
+                         const string& file_name,
+                         ssize_t expected_size,
+                         ssize_t expected_size_xattr) {
   string path = p2p_dir + "/" + file_name;
-  char ea_value[64] = { 0 };
+  char ea_value[64] = {0};
   ssize_t ea_size;
 
   off_t p2p_size = utils::FileSize(path);
@@ -269,15 +275,15 @@
 
   if (expected_size != 0) {
     if (p2p_size != expected_size) {
-      LOG(ERROR) << "Expected size " << expected_size
-                 << " but size was " << p2p_size;
+      LOG(ERROR) << "Expected size " << expected_size << " but size was "
+                 << p2p_size;
       return false;
     }
   }
 
   if (expected_size_xattr == 0) {
-    ea_size = getxattr(path.c_str(), "user.cros-p2p-filesize",
-                       &ea_value, sizeof ea_value - 1);
+    ea_size = getxattr(
+        path.c_str(), "user.cros-p2p-filesize", &ea_value, sizeof ea_value - 1);
     if (ea_size == -1 && errno == ENODATA) {
       // This is valid behavior as we support files without the xattr set.
     } else {
@@ -286,8 +292,8 @@
       return false;
     }
   } else {
-    ea_size = getxattr(path.c_str(), "user.cros-p2p-filesize",
-                       &ea_value, sizeof ea_value - 1);
+    ea_size = getxattr(
+        path.c_str(), "user.cros-p2p-filesize", &ea_value, sizeof ea_value - 1);
     if (ea_size < 0) {
       LOG(ERROR) << "Error getting xattr attribute";
       return false;
@@ -295,8 +301,7 @@
     char* endp = nullptr;
     long long int val = strtoll(ea_value, &endp, 0);  // NOLINT(runtime/int)
     if (endp == nullptr || *endp != '\0') {
-      LOG(ERROR) << "Error parsing xattr '" << ea_value
-                 << "' as an integer";
+      LOG(ERROR) << "Error parsing xattr '" << ea_value << "' as an integer";
       return false;
     }
     if (val != expected_size_xattr) {
@@ -309,11 +314,13 @@
   return true;
 }
 
-static bool CreateP2PFile(string p2p_dir, string file_name,
-                          size_t size, size_t size_xattr) {
+static bool CreateP2PFile(string p2p_dir,
+                          string file_name,
+                          size_t size,
+                          size_t size_xattr) {
   string path = p2p_dir + "/" + file_name;
 
-  int fd = open(path.c_str(), O_CREAT|O_RDWR, 0644);
+  int fd = open(path.c_str(), O_CREAT | O_RDWR, 0644);
   if (fd == -1) {
     PLOG(ERROR) << "Error creating file with path " << path;
     return false;
@@ -326,8 +333,11 @@
 
   if (size_xattr != 0) {
     string decimal_size = std::to_string(size_xattr);
-    if (fsetxattr(fd, "user.cros-p2p-filesize",
-                  decimal_size.c_str(), decimal_size.size(), 0) != 0) {
+    if (fsetxattr(fd,
+                  "user.cros-p2p-filesize",
+                  decimal_size.c_str(),
+                  decimal_size.size(),
+                  0) != 0) {
       PLOG(ERROR) << "Error setting xattr on " << path;
       close(fd);
       return false;
@@ -340,18 +350,15 @@
 
 // Check that sharing a *new* file works.
 TEST_F(P2PManagerTest, ShareFile) {
-  if (!test_utils::IsXAttrSupported(base::FilePath("/tmp"))) {
-    LOG(WARNING) << "Skipping test because /tmp does not support xattr. "
-                 << "Please update your system to support this feature.";
-    return;
-  }
   const int kP2PTestFileSize = 1000 * 1000;  // 1 MB
 
   EXPECT_TRUE(manager_->FileShare("foo", kP2PTestFileSize));
   EXPECT_EQ(manager_->FileGetPath("foo"),
             test_conf_->GetP2PDir().Append("foo.cros_au.p2p.tmp"));
   EXPECT_TRUE(CheckP2PFile(test_conf_->GetP2PDir().value(),
-                           "foo.cros_au.p2p.tmp", 0, kP2PTestFileSize));
+                           "foo.cros_au.p2p.tmp",
+                           0,
+                           kP2PTestFileSize));
 
   // Sharing it again - with the same expected size - should return true
   EXPECT_TRUE(manager_->FileShare("foo", kP2PTestFileSize));
@@ -362,11 +369,6 @@
 
 // Check that making a shared file visible, does what is expected.
 TEST_F(P2PManagerTest, MakeFileVisible) {
-  if (!test_utils::IsXAttrSupported(base::FilePath("/tmp"))) {
-    LOG(WARNING) << "Skipping test because /tmp does not support xattr. "
-                 << "Please update your system to support this feature.";
-    return;
-  }
   const int kP2PTestFileSize = 1000 * 1000;  // 1 MB
 
   // First, check that it's not visible.
@@ -374,7 +376,9 @@
   EXPECT_EQ(manager_->FileGetPath("foo"),
             test_conf_->GetP2PDir().Append("foo.cros_au.p2p.tmp"));
   EXPECT_TRUE(CheckP2PFile(test_conf_->GetP2PDir().value(),
-                           "foo.cros_au.p2p.tmp", 0, kP2PTestFileSize));
+                           "foo.cros_au.p2p.tmp",
+                           0,
+                           kP2PTestFileSize));
   // Make the file visible and check that it changed its name. Do it
   // twice to check that FileMakeVisible() is idempotent.
   for (int n = 0; n < 2; n++) {
@@ -382,18 +386,14 @@
     EXPECT_EQ(manager_->FileGetPath("foo"),
               test_conf_->GetP2PDir().Append("foo.cros_au.p2p"));
     EXPECT_TRUE(CheckP2PFile(test_conf_->GetP2PDir().value(),
-                             "foo.cros_au.p2p", 0, kP2PTestFileSize));
+                             "foo.cros_au.p2p",
+                             0,
+                             kP2PTestFileSize));
   }
 }
 
 // Check that we return the right values for existing files in P2P_DIR.
 TEST_F(P2PManagerTest, ExistingFiles) {
-  if (!test_utils::IsXAttrSupported(base::FilePath("/tmp"))) {
-    LOG(WARNING) << "Skipping test because /tmp does not support xattr. "
-                 << "Please update your system to support this feature.";
-    return;
-  }
-
   bool visible;
 
   // Check that errors are returned if the file does not exist
@@ -402,8 +402,8 @@
   EXPECT_EQ(manager_->FileGetExpectedSize("foo"), -1);
   EXPECT_FALSE(manager_->FileGetVisible("foo", nullptr));
   // ... then create the file ...
-  EXPECT_TRUE(CreateP2PFile(test_conf_->GetP2PDir().value(),
-                            "foo.cros_au.p2p", 42, 43));
+  EXPECT_TRUE(CreateP2PFile(
+      test_conf_->GetP2PDir().value(), "foo.cros_au.p2p", 42, 43));
   // ... and then check that the expected values are returned
   EXPECT_EQ(manager_->FileGetPath("foo"),
             test_conf_->GetP2PDir().Append("foo.cros_au.p2p"));
@@ -418,8 +418,8 @@
   EXPECT_EQ(manager_->FileGetExpectedSize("bar"), -1);
   EXPECT_FALSE(manager_->FileGetVisible("bar", nullptr));
   // ... then create the file ...
-  EXPECT_TRUE(CreateP2PFile(test_conf_->GetP2PDir().value(),
-                            "bar.cros_au.p2p.tmp", 44, 45));
+  EXPECT_TRUE(CreateP2PFile(
+      test_conf_->GetP2PDir().value(), "bar.cros_au.p2p.tmp", 44, 45));
   // ... and then check that the expected values are returned
   EXPECT_EQ(manager_->FileGetPath("bar"),
             test_conf_->GetP2PDir().Append("bar.cros_au.p2p.tmp"));
@@ -438,11 +438,11 @@
   EXPECT_TRUE(manager_->EnsureP2PRunning());
   test_conf_->SetInitctlStartCommand({"false"});
   EXPECT_FALSE(manager_->EnsureP2PRunning());
-  test_conf_->SetInitctlStartCommand({
-      "sh", "-c", "echo \"initctl: Job is already running: p2p\" >&2; false"});
+  test_conf_->SetInitctlStartCommand(
+      {"sh", "-c", "echo \"initctl: Job is already running: p2p\" >&2; false"});
   EXPECT_TRUE(manager_->EnsureP2PRunning());
-  test_conf_->SetInitctlStartCommand({
-      "sh", "-c", "echo something else >&2; false"});
+  test_conf_->SetInitctlStartCommand(
+      {"sh", "-c", "echo something else >&2; false"});
   EXPECT_FALSE(manager_->EnsureP2PRunning());
 }
 
@@ -453,16 +453,15 @@
   EXPECT_TRUE(manager_->EnsureP2PNotRunning());
   test_conf_->SetInitctlStopCommand({"false"});
   EXPECT_FALSE(manager_->EnsureP2PNotRunning());
-  test_conf_->SetInitctlStopCommand({
-      "sh", "-c", "echo \"initctl: Unknown instance \" >&2; false"});
+  test_conf_->SetInitctlStopCommand(
+      {"sh", "-c", "echo \"initctl: Unknown instance \" >&2; false"});
   EXPECT_TRUE(manager_->EnsureP2PNotRunning());
-  test_conf_->SetInitctlStopCommand({
-      "sh", "-c", "echo something else >&2; false"});
+  test_conf_->SetInitctlStopCommand(
+      {"sh", "-c", "echo something else >&2; false"});
   EXPECT_FALSE(manager_->EnsureP2PNotRunning());
 }
 
-static void ExpectUrl(const string& expected_url,
-                      const string& url) {
+static void ExpectUrl(const string& expected_url, const string& url) {
   EXPECT_EQ(url, expected_url);
   MessageLoop::current()->BreakLoop();
 }
@@ -472,53 +471,56 @@
 TEST_F(P2PManagerTest, LookupURL) {
   // Emulate p2p-client returning valid URL with "fooX", 42 and "cros_au"
   // being propagated in the right places.
-  test_conf_->SetP2PClientCommand({
-      "echo", "http://1.2.3.4/{file_id}_{minsize}"});
-  manager_->LookupUrlForFile("fooX", 42, TimeDelta(),
-                             base::Bind(ExpectUrl,
-                                        "http://1.2.3.4/fooX.cros_au_42"));
+  test_conf_->SetP2PClientCommand(
+      {"echo", "http://1.2.3.4/{file_id}_{minsize}"});
+  manager_->LookupUrlForFile(
+      "fooX",
+      42,
+      TimeDelta(),
+      base::Bind(ExpectUrl, "http://1.2.3.4/fooX.cros_au_42"));
   loop_.Run();
 
   // Emulate p2p-client returning invalid URL.
   test_conf_->SetP2PClientCommand({"echo", "not_a_valid_url"});
-  manager_->LookupUrlForFile("foobar", 42, TimeDelta(),
-                             base::Bind(ExpectUrl, ""));
+  manager_->LookupUrlForFile(
+      "foobar", 42, TimeDelta(), base::Bind(ExpectUrl, ""));
   loop_.Run();
 
   // Emulate p2p-client conveying failure.
   test_conf_->SetP2PClientCommand({"false"});
-  manager_->LookupUrlForFile("foobar", 42, TimeDelta(),
-                             base::Bind(ExpectUrl, ""));
+  manager_->LookupUrlForFile(
+      "foobar", 42, TimeDelta(), base::Bind(ExpectUrl, ""));
   loop_.Run();
 
   // Emulate p2p-client not existing.
   test_conf_->SetP2PClientCommand({"/path/to/non/existent/helper/program"});
-  manager_->LookupUrlForFile("foobar", 42,
-                             TimeDelta(),
-                             base::Bind(ExpectUrl, ""));
+  manager_->LookupUrlForFile(
+      "foobar", 42, TimeDelta(), base::Bind(ExpectUrl, ""));
   loop_.Run();
 
   // Emulate p2p-client crashing.
   test_conf_->SetP2PClientCommand({"sh", "-c", "kill -SEGV $$"});
-  manager_->LookupUrlForFile("foobar", 42, TimeDelta(),
-                             base::Bind(ExpectUrl, ""));
+  manager_->LookupUrlForFile(
+      "foobar", 42, TimeDelta(), base::Bind(ExpectUrl, ""));
   loop_.Run();
 
   // Emulate p2p-client exceeding its timeout.
-  test_conf_->SetP2PClientCommand({
-      "sh", "-c",
-      // The 'sleep' launched below could be left behind as an orphaned
-      // process when the 'sh' process is terminated by SIGTERM. As a
-      // remedy, trap SIGTERM and kill the 'sleep' process, which requires
-      // launching 'sleep' in background and then waiting for it.
-      "cleanup() { kill \"${sleep_pid}\"; exit 0; }; "
-      "trap cleanup TERM; "
-      "sleep 5 & "
-      "sleep_pid=$!; "
-      "echo http://1.2.3.4/; "
-      "wait"
-  });
-  manager_->LookupUrlForFile("foobar", 42, TimeDelta::FromMilliseconds(500),
+  test_conf_->SetP2PClientCommand(
+      {"sh",
+       "-c",
+       // The 'sleep' launched below could be left behind as an orphaned
+       // process when the 'sh' process is terminated by SIGTERM. As a
+       // remedy, trap SIGTERM and kill the 'sleep' process, which requires
+       // launching 'sleep' in background and then waiting for it.
+       "cleanup() { kill \"${sleep_pid}\"; exit 0; }; "
+       "trap cleanup TERM; "
+       "sleep 5 & "
+       "sleep_pid=$!; "
+       "echo http://1.2.3.4/; "
+       "wait"});
+  manager_->LookupUrlForFile("foobar",
+                             42,
+                             TimeDelta::FromMilliseconds(500),
                              base::Bind(ExpectUrl, ""));
   loop_.Run();
 }
diff --git a/parcelable_update_engine_status.h b/parcelable_update_engine_status.h
index 82006e4..3feac76 100644
--- a/parcelable_update_engine_status.h
+++ b/parcelable_update_engine_status.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_UPDATE_ENGINE_STATUS_H_
-#define UPDATE_ENGINE_UPDATE_ENGINE_STATUS_H_
+#ifndef UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_
+#define UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_
 
 #include <binder/Parcelable.h>
 #include <utils/String16.h>
@@ -60,4 +60,4 @@
 }  // namespace brillo
 }  // namespace android
 
-#endif  // UPDATE_ENGINE_UPDATE_ENGINE_STATUS_H_
+#endif  // UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_
diff --git a/parcelable_update_engine_status_unittest.cc b/parcelable_update_engine_status_unittest.cc
index f4bd518..20decb6 100644
--- a/parcelable_update_engine_status_unittest.cc
+++ b/parcelable_update_engine_status_unittest.cc
@@ -21,9 +21,9 @@
 #include <gtest/gtest.h>
 
 using android::Parcel;
+using android::status_t;
 using android::String16;
 using android::brillo::ParcelableUpdateEngineStatus;
-using android::status_t;
 using update_engine::UpdateEngineStatus;
 using update_engine::UpdateStatus;
 
diff --git a/payload_consumer/bzip_extent_writer.cc b/payload_consumer/bzip_extent_writer.cc
index 39d9d67..0c25c71 100644
--- a/payload_consumer/bzip_extent_writer.cc
+++ b/payload_consumer/bzip_extent_writer.cc
@@ -24,6 +24,11 @@
 const brillo::Blob::size_type kOutputBufferLength = 16 * 1024;
 }
 
+BzipExtentWriter::~BzipExtentWriter() {
+  TEST_AND_RETURN(BZ2_bzDecompressEnd(&stream_) == BZ_OK);
+  TEST_AND_RETURN(input_buffer_.empty());
+}
+
 bool BzipExtentWriter::Init(FileDescriptorPtr fd,
                             const RepeatedPtrField<Extent>& extents,
                             uint32_t block_size) {
@@ -63,9 +68,8 @@
     if (stream_.avail_out == output_buffer.size())
       break;  // got no new bytes
 
-    TEST_AND_RETURN_FALSE(
-        next_->Write(output_buffer.data(),
-                     output_buffer.size() - stream_.avail_out));
+    TEST_AND_RETURN_FALSE(next_->Write(
+        output_buffer.data(), output_buffer.size() - stream_.avail_out));
 
     if (rc == BZ_STREAM_END)
       CHECK_EQ(stream_.avail_in, 0u);
@@ -82,10 +86,4 @@
   return true;
 }
 
-bool BzipExtentWriter::EndImpl() {
-  TEST_AND_RETURN_FALSE(input_buffer_.empty());
-  TEST_AND_RETURN_FALSE(BZ2_bzDecompressEnd(&stream_) == BZ_OK);
-  return next_->End();
-}
-
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/bzip_extent_writer.h b/payload_consumer/bzip_extent_writer.h
index 86b346a..ec181a7 100644
--- a/payload_consumer/bzip_extent_writer.h
+++ b/payload_consumer/bzip_extent_writer.h
@@ -38,17 +38,16 @@
       : next_(std::move(next)) {
     memset(&stream_, 0, sizeof(stream_));
   }
-  ~BzipExtentWriter() override = default;
+  ~BzipExtentWriter() override;
 
   bool Init(FileDescriptorPtr fd,
             const google::protobuf::RepeatedPtrField<Extent>& extents,
             uint32_t block_size) override;
   bool Write(const void* bytes, size_t count) override;
-  bool EndImpl() override;
 
  private:
   std::unique_ptr<ExtentWriter> next_;  // The underlying ExtentWriter.
-  bz_stream stream_;  // the libbz2 stream
+  bz_stream stream_;                    // the libbz2 stream
   brillo::Blob input_buffer_;
 };
 
diff --git a/payload_consumer/bzip_extent_writer_unittest.cc b/payload_consumer/bzip_extent_writer_unittest.cc
index bf050ef..125e1e5 100644
--- a/payload_consumer/bzip_extent_writer_unittest.cc
+++ b/payload_consumer/bzip_extent_writer_unittest.cc
@@ -46,11 +46,7 @@
     fd_.reset(new EintrSafeFileDescriptor);
     ASSERT_TRUE(fd_->Open(temp_file_.path().c_str(), O_RDWR, 0600));
   }
-  void TearDown() override {
-    fd_->Close();
-  }
-  void WriteAlignedExtents(size_t chunk_size, size_t first_chunk_size);
-  void TestZeroPad(bool aligned_size);
+  void TearDown() override { fd_->Close(); }
 
   FileDescriptorPtr fd_;
   test_utils::ScopedTempFile temp_file_{"BzipExtentWriterTest-file.XXXXXX"};
@@ -62,17 +58,16 @@
   // 'echo test | bzip2 | hexdump' yields:
   static const char test_uncompressed[] = "test\n";
   static const uint8_t test[] = {
-    0x42, 0x5a, 0x68, 0x39, 0x31, 0x41, 0x59, 0x26, 0x53, 0x59, 0xcc, 0xc3,
-    0x71, 0xd4, 0x00, 0x00, 0x02, 0x41, 0x80, 0x00, 0x10, 0x02, 0x00, 0x0c,
-    0x00, 0x20, 0x00, 0x21, 0x9a, 0x68, 0x33, 0x4d, 0x19, 0x97, 0x8b, 0xb9,
-    0x22, 0x9c, 0x28, 0x48, 0x66, 0x61, 0xb8, 0xea, 0x00,
+      0x42, 0x5a, 0x68, 0x39, 0x31, 0x41, 0x59, 0x26, 0x53, 0x59, 0xcc, 0xc3,
+      0x71, 0xd4, 0x00, 0x00, 0x02, 0x41, 0x80, 0x00, 0x10, 0x02, 0x00, 0x0c,
+      0x00, 0x20, 0x00, 0x21, 0x9a, 0x68, 0x33, 0x4d, 0x19, 0x97, 0x8b, 0xb9,
+      0x22, 0x9c, 0x28, 0x48, 0x66, 0x61, 0xb8, 0xea, 0x00,
   };
 
   BzipExtentWriter bzip_writer(std::make_unique<DirectExtentWriter>());
   EXPECT_TRUE(
       bzip_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
   EXPECT_TRUE(bzip_writer.Write(test, sizeof(test)));
-  EXPECT_TRUE(bzip_writer.End());
 
   brillo::Blob buf;
   EXPECT_TRUE(utils::ReadFile(temp_file_.path(), &buf));
@@ -100,8 +95,7 @@
   for (size_t i = 0; i < decompressed_data.size(); ++i)
     decompressed_data[i] = static_cast<uint8_t>("ABC\n"[i % 4]);
 
-  vector<Extent> extents = {
-      ExtentForRange(0, (kDecompressedLength + kBlockSize - 1) / kBlockSize)};
+  vector<Extent> extents = {ExtentForBytes(kBlockSize, 0, kDecompressedLength)};
 
   BzipExtentWriter bzip_writer(std::make_unique<DirectExtentWriter>());
   EXPECT_TRUE(
@@ -113,7 +107,6 @@
     size_t this_chunk_size = min(kChunkSize, compressed_data.size() - i);
     EXPECT_TRUE(bzip_writer.Write(&compressed_data[i], this_chunk_size));
   }
-  EXPECT_TRUE(bzip_writer.End());
 
   // Check that the const input has not been clobbered.
   test_utils::ExpectVectorsEq(original_compressed_data, compressed_data);
diff --git a/payload_consumer/cached_file_descriptor_unittest.cc b/payload_consumer/cached_file_descriptor_unittest.cc
index 6a6302a..d2965fc 100644
--- a/payload_consumer/cached_file_descriptor_unittest.cc
+++ b/payload_consumer/cached_file_descriptor_unittest.cc
@@ -159,7 +159,7 @@
   off64_t seek = 10;
   brillo::Blob blob_in(kFileSize, 0);
   std::fill_n(&blob_in[seek], kCacheSize, value_);
-  // We are writing exactly one cache size; Then it should be commited.
+  // We are writing exactly one cache size; Then it should be committed.
   EXPECT_EQ(cfd_->Seek(seek, SEEK_SET), seek);
   Write(&blob_in[seek], kCacheSize);
 
@@ -174,7 +174,7 @@
   EXPECT_EQ(cfd_->Seek(seek, SEEK_SET), seek);
   brillo::Blob blob_in(kFileSize, 0);
   std::fill_n(&blob_in[seek], less_than_cache_size, value_);
-  // We are writing less than one cache size; then it should not be commited.
+  // We are writing less than one cache size; then it should not be committed.
   Write(&blob_in[seek], less_than_cache_size);
 
   // Revert the changes in |blob_in|.
@@ -190,7 +190,7 @@
   EXPECT_EQ(cfd_->Seek(seek, SEEK_SET), seek);
   brillo::Blob blob_in(kFileSize, 0);
   std::fill_n(&blob_in[seek], less_than_cache_size, value_);
-  // We are writing less than  one cache size; then it should not be commited.
+  // We are writing less than  one cache size; then it should not be committed.
   Write(&blob_in[seek], less_than_cache_size);
 
   // Then we seek, it should've written the cache after seek.
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index a619d1d..ae73d03 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -21,6 +21,7 @@
 
 #include <algorithm>
 #include <cstring>
+#include <map>
 #include <memory>
 #include <string>
 #include <utility>
@@ -48,11 +49,14 @@
 #include "update_engine/payload_consumer/download_action.h"
 #include "update_engine/payload_consumer/extent_reader.h"
 #include "update_engine/payload_consumer/extent_writer.h"
+#if USE_FEC
+#include "update_engine/payload_consumer/fec_file_descriptor.h"
+#endif  // USE_FEC
 #include "update_engine/payload_consumer/file_descriptor_utils.h"
 #include "update_engine/payload_consumer/mount_history.h"
 #if USE_MTD
 #include "update_engine/payload_consumer/mtd_file_descriptor.h"
-#endif
+#endif  // USE_MTD
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/payload_consumer/xz_extent_writer.h"
@@ -63,14 +67,11 @@
 using std::vector;
 
 namespace chromeos_update_engine {
-
-const uint64_t DeltaPerformer::kSupportedMajorPayloadVersion = 2;
-const uint32_t DeltaPerformer::kSupportedMinorPayloadVersion = 5;
-
 const unsigned DeltaPerformer::kProgressLogMaxChunks = 10;
 const unsigned DeltaPerformer::kProgressLogTimeoutSeconds = 30;
 const unsigned DeltaPerformer::kProgressDownloadWeight = 50;
 const unsigned DeltaPerformer::kProgressOperationsWeight = 50;
+const uint64_t DeltaPerformer::kCheckpointFrequencySeconds = 1;
 
 namespace {
 const int kUpdateStateOperationInvalid = -1;
@@ -175,7 +176,6 @@
 
 }  // namespace
 
-
 // Computes the ratio of |part| and |total|, scaled to |norm|, using integer
 // arithmetic.
 static uint64_t IntRatio(uint64_t part, uint64_t total, uint64_t norm) {
@@ -189,10 +189,9 @@
   if (num_total_operations_) {
     total_operations_str = std::to_string(num_total_operations_);
     // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
-    completed_percentage_str =
-        base::StringPrintf(" (%" PRIu64 "%%)",
-                           IntRatio(next_operation_num_, num_total_operations_,
-                                    100));
+    completed_percentage_str = base::StringPrintf(
+        " (%" PRIu64 "%%)",
+        IntRatio(next_operation_num_, num_total_operations_, 100));
   }
 
   // Format download total count and percentage.
@@ -202,15 +201,14 @@
   if (payload_size) {
     payload_size_str = std::to_string(payload_size);
     // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
-    downloaded_percentage_str =
-        base::StringPrintf(" (%" PRIu64 "%%)",
-                           IntRatio(total_bytes_received_, payload_size, 100));
+    downloaded_percentage_str = base::StringPrintf(
+        " (%" PRIu64 "%%)", IntRatio(total_bytes_received_, payload_size, 100));
   }
 
   LOG(INFO) << (message_prefix ? message_prefix : "") << next_operation_num_
             << "/" << total_operations_str << " operations"
-            << completed_percentage_str << ", " << total_bytes_received_
-            << "/" << payload_size_str << " bytes downloaded"
+            << completed_percentage_str << ", " << total_bytes_received_ << "/"
+            << payload_size_str << " bytes downloaded"
             << downloaded_percentage_str << ", overall progress "
             << overall_progress_ << "%";
 }
@@ -234,10 +232,10 @@
   size_t payload_size = payload_->size;
   unsigned actual_operations_weight = kProgressOperationsWeight;
   if (payload_size)
-    new_overall_progress += min(
-        static_cast<unsigned>(IntRatio(total_bytes_received_, payload_size,
-                                       kProgressDownloadWeight)),
-        kProgressDownloadWeight);
+    new_overall_progress +=
+        min(static_cast<unsigned>(IntRatio(
+                total_bytes_received_, payload_size, kProgressDownloadWeight)),
+            kProgressDownloadWeight);
   else
     actual_operations_weight += kProgressDownloadWeight;
 
@@ -245,8 +243,8 @@
   // expect an update to have at least one operation, so the expectation is that
   // this will eventually reach |actual_operations_weight|.
   if (num_total_operations_)
-    new_overall_progress += IntRatio(next_operation_num_, num_total_operations_,
-                                     actual_operations_weight);
+    new_overall_progress += IntRatio(
+        next_operation_num_, num_total_operations_, actual_operations_weight);
 
   // Progress ratio cannot recede, unless our assumptions about the total
   // payload size, total number of operations, or the monotonicity of progress
@@ -260,7 +258,7 @@
 
   // Update chunk index, log as needed: if forced by called, or we completed a
   // progress chunk, or a timeout has expired.
-  base::Time curr_time = base::Time::Now();
+  base::TimeTicks curr_time = base::TimeTicks::Now();
   unsigned curr_progress_chunk =
       overall_progress_ * kProgressLogMaxChunks / 100;
   if (force_log || curr_progress_chunk > last_progress_chunk_ ||
@@ -271,8 +269,8 @@
   last_progress_chunk_ = curr_progress_chunk;
 }
 
-
-size_t DeltaPerformer::CopyDataToBuffer(const char** bytes_p, size_t* count_p,
+size_t DeltaPerformer::CopyDataToBuffer(const char** bytes_p,
+                                        size_t* count_p,
                                         size_t max) {
   const size_t count = *count_p;
   if (!count)
@@ -287,8 +285,8 @@
   return read_len;
 }
 
-
-bool DeltaPerformer::HandleOpResult(bool op_result, const char* op_type_name,
+bool DeltaPerformer::HandleOpResult(bool op_result,
+                                    const char* op_type_name,
                                     ErrorCode* error) {
   if (op_result)
     return true;
@@ -307,8 +305,9 @@
 
 int DeltaPerformer::Close() {
   int err = -CloseCurrentPartition();
-  LOG_IF(ERROR, !payload_hash_calculator_.Finalize() ||
-                !signed_hash_calculator_.Finalize())
+  LOG_IF(ERROR,
+         !payload_hash_calculator_.Finalize() ||
+             !signed_hash_calculator_.Finalize())
       << "Unable to finalize the hash.";
   if (!buffer_.empty()) {
     LOG(INFO) << "Discarding " << buffer_.size() << " unused downloaded bytes";
@@ -327,6 +326,14 @@
       err = 1;
   }
   source_fd_.reset();
+  if (source_ecc_fd_ && !source_ecc_fd_->Close()) {
+    err = errno;
+    PLOG(ERROR) << "Error closing ECC source partition";
+    if (!err)
+      err = 1;
+  }
+  source_ecc_fd_.reset();
+  source_ecc_open_failure_ = false;
   source_path_.clear();
 
   if (target_fd_ && !target_fd_->Close()) {
@@ -351,7 +358,10 @@
       install_plan_->partitions[num_previous_partitions + current_partition_];
   // Open source fds if we have a delta payload with minor version >= 2.
   if (payload_->type == InstallPayloadType::kDelta &&
-      GetMinorVersion() != kInPlaceMinorPayloadVersion) {
+      GetMinorVersion() != kInPlaceMinorPayloadVersion &&
+      // With dynamic partitions we could create a new partition in a
+      // delta payload, and we shouldn't open source partition in that case.
+      install_part.source_size > 0) {
     source_path_ = install_part.source_path;
     int err;
     source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, false, &err);
@@ -368,11 +378,11 @@
   int err;
 
   int flags = O_RDWR;
-  if (!is_interactive_)
+  if (!interactive_)
     flags |= O_DSYNC;
 
   LOG(INFO) << "Opening " << target_path_ << " partition with"
-            << (is_interactive_ ? "out" : "") << " O_DSYNC";
+            << (interactive_ ? "out" : "") << " O_DSYNC";
 
   target_fd_ = OpenFile(target_path_.c_str(), flags, true, &err);
   if (!target_fd_) {
@@ -393,6 +403,46 @@
   return true;
 }
 
+bool DeltaPerformer::OpenCurrentECCPartition() {
+  if (source_ecc_fd_)
+    return true;
+
+  if (source_ecc_open_failure_)
+    return false;
+
+  if (current_partition_ >= partitions_.size())
+    return false;
+
+  // No support for ECC in minor version 1 or full payloads.
+  if (payload_->type == InstallPayloadType::kFull ||
+      GetMinorVersion() == kInPlaceMinorPayloadVersion)
+    return false;
+
+#if USE_FEC
+  const PartitionUpdate& partition = partitions_[current_partition_];
+  size_t num_previous_partitions =
+      install_plan_->partitions.size() - partitions_.size();
+  const InstallPlan::Partition& install_part =
+      install_plan_->partitions[num_previous_partitions + current_partition_];
+  string path = install_part.source_path;
+  FileDescriptorPtr fd(new FecFileDescriptor());
+  if (!fd->Open(path.c_str(), O_RDONLY, 0)) {
+    PLOG(ERROR) << "Unable to open ECC source partition "
+                << partition.partition_name() << " on slot "
+                << BootControlInterface::SlotName(install_plan_->source_slot)
+                << ", file " << path;
+    source_ecc_open_failure_ = true;
+    return false;
+  }
+  source_ecc_fd_ = fd;
+#else
+  // No support for ECC compiled.
+  source_ecc_open_failure_ = true;
+#endif  // USE_FEC
+
+  return !source_ecc_open_failure_;
+}
+
 namespace {
 
 void LogPartitionInfoHash(const PartitionInfo& info, const string& tag) {
@@ -417,11 +467,10 @@
 uint32_t DeltaPerformer::GetMinorVersion() const {
   if (manifest_.has_minor_version()) {
     return manifest_.minor_version();
-  } else {
-    return payload_->type == InstallPayloadType::kDelta
-               ? kSupportedMinorPayloadVersion
-               : kFullPayloadMinorVersion;
   }
+  return payload_->type == InstallPayloadType::kDelta
+             ? kMaxSupportedMinorPayloadVersion
+             : kFullPayloadMinorVersion;
 }
 
 bool DeltaPerformer::IsHeaderParsed() const {
@@ -433,8 +482,8 @@
   *error = ErrorCode::kSuccess;
 
   if (!IsHeaderParsed()) {
-    MetadataParseResult result = payload_metadata_.ParsePayloadHeader(
-        payload, supported_major_version_, error);
+    MetadataParseResult result =
+        payload_metadata_.ParsePayloadHeader(payload, error);
     if (result != MetadataParseResult::kSuccess)
       return result;
 
@@ -477,19 +526,17 @@
                  << "Trusting metadata size in payload = " << metadata_size_;
   }
 
-  // See if we should use the public RSA key in the Omaha response.
-  base::FilePath path_to_public_key(public_key_path_);
-  base::FilePath tmp_key;
-  if (GetPublicKeyFromResponse(&tmp_key))
-    path_to_public_key = tmp_key;
-  ScopedPathUnlinker tmp_key_remover(tmp_key.value());
-  if (tmp_key.empty())
-    tmp_key_remover.set_should_remove(false);
+  string public_key;
+  if (!GetPublicKey(&public_key)) {
+    LOG(ERROR) << "Failed to get public key.";
+    *error = ErrorCode::kDownloadMetadataSignatureVerificationError;
+    return MetadataParseResult::kError;
+  }
 
   // We have the full metadata in |payload|. Verify its integrity
   // and authenticity based on the information we have in Omaha response.
   *error = payload_metadata_.ValidateMetadataSignature(
-      payload, payload_->metadata_signature, path_to_public_key);
+      payload, payload_->metadata_signature, public_key);
   if (*error != ErrorCode::kSuccess) {
     if (install_plan_->hash_checks_mandatory) {
       // The autoupdate_CatchBadSignatures test checks for this string
@@ -514,19 +561,18 @@
   return MetadataParseResult::kSuccess;
 }
 
-#define OP_DURATION_HISTOGRAM(_op_name, _start_time)      \
-    LOCAL_HISTOGRAM_CUSTOM_TIMES(                         \
-        "UpdateEngine.DownloadAction.InstallOperation::"  \
-        _op_name ".Duration",                             \
-        base::TimeTicks::Now() - _start_time,             \
-        base::TimeDelta::FromMilliseconds(10),            \
-        base::TimeDelta::FromMinutes(5),                  \
-        20);
+#define OP_DURATION_HISTOGRAM(_op_name, _start_time)                         \
+  LOCAL_HISTOGRAM_CUSTOM_TIMES(                                              \
+      "UpdateEngine.DownloadAction.InstallOperation::" _op_name ".Duration", \
+      base::TimeTicks::Now() - _start_time,                                  \
+      base::TimeDelta::FromMilliseconds(10),                                 \
+      base::TimeDelta::FromMinutes(5),                                       \
+      20);
 
 // Wrapper around write. Returns true if all requested bytes
 // were written, or false on any error, regardless of progress
 // and stores an action exit code in |error|.
-bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode *error) {
+bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode* error) {
   *error = ErrorCode::kSuccess;
   const char* c_bytes = reinterpret_cast<const char*>(bytes);
 
@@ -538,9 +584,11 @@
     // Read data up to the needed limit; this is either maximium payload header
     // size, or the full metadata size (once it becomes known).
     const bool do_read_header = !IsHeaderParsed();
-    CopyDataToBuffer(&c_bytes, &count,
-                     (do_read_header ? kMaxPayloadHeaderSize :
-                      metadata_size_ + metadata_signature_size_));
+    CopyDataToBuffer(
+        &c_bytes,
+        &count,
+        (do_read_header ? kMaxPayloadHeaderSize
+                        : metadata_size_ + metadata_signature_size_));
 
     MetadataParseResult result = ParsePayloadMetadata(buffer_, error);
     if (result == MetadataParseResult::kError)
@@ -561,6 +609,8 @@
     // Clear the download buffer.
     DiscardBuffer(false, metadata_size_);
 
+    block_size_ = manifest_.block_size();
+
     // This populates |partitions_| and the |install_plan.partitions| with the
     // list of partitions from the manifest.
     if (!ParseManifestPartitions(error))
@@ -578,11 +628,12 @@
       acc_num_operations_.push_back(num_total_operations_);
     }
 
-    LOG_IF(WARNING, !prefs_->SetInt64(kPrefsManifestMetadataSize,
-                                      metadata_size_))
+    LOG_IF(WARNING,
+           !prefs_->SetInt64(kPrefsManifestMetadataSize, metadata_size_))
         << "Unable to save the manifest metadata size.";
-    LOG_IF(WARNING, !prefs_->SetInt64(kPrefsManifestSignatureSize,
-                                      metadata_signature_size_))
+    LOG_IF(WARNING,
+           !prefs_->SetInt64(kPrefsManifestSignatureSize,
+                             metadata_signature_size_))
         << "Unable to save the manifest signature size.";
 
     if (!PrimeUpdateState()) {
@@ -591,9 +642,11 @@
       return false;
     }
 
-    if (!OpenCurrentPartition()) {
-      *error = ErrorCode::kInstallDeviceOpenError;
-      return false;
+    if (next_operation_num_ < acc_num_operations_[current_partition_]) {
+      if (!OpenCurrentPartition()) {
+        *error = ErrorCode::kInstallDeviceOpenError;
+        return false;
+      }
     }
 
     if (next_operation_num_ > 0)
@@ -610,16 +663,20 @@
 
     // We know there are more operations to perform because we didn't reach the
     // |num_total_operations_| limit yet.
-    while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
+    if (next_operation_num_ >= acc_num_operations_[current_partition_]) {
       CloseCurrentPartition();
-      current_partition_++;
+      // Skip until there are operations for current_partition_.
+      while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
+        current_partition_++;
+      }
       if (!OpenCurrentPartition()) {
         *error = ErrorCode::kInstallDeviceOpenError;
         return false;
       }
     }
-    const size_t partition_operation_num = next_operation_num_ - (
-        current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0);
+    const size_t partition_operation_num =
+        next_operation_num_ -
+        (current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0);
 
     const InstallOperation& op =
         partitions_[current_partition_].operations(partition_operation_num);
@@ -705,7 +762,7 @@
 
     next_operation_num_++;
     UpdateOverallProgress(false, "Completed ");
-    CheckpointUpdateProgress();
+    CheckpointUpdateProgress(false);
   }
 
   // In major version 2, we don't add dummy operation to the payload.
@@ -716,8 +773,7 @@
     if (manifest_.signatures_offset() != buffer_offset_) {
       LOG(ERROR) << "Payload signatures offset points to blob offset "
                  << manifest_.signatures_offset()
-                 << " but signatures are expected at offset "
-                 << buffer_offset_;
+                 << " but signatures are expected at offset " << buffer_offset_;
       *error = ErrorCode::kDownloadPayloadVerificationError;
       return false;
     }
@@ -734,7 +790,9 @@
     // Since we extracted the SignatureMessage we need to advance the
     // checkpoint, otherwise we would reload the signature and try to extract
     // it again.
-    CheckpointUpdateProgress();
+    // This is the last checkpoint for an update, force this checkpoint to be
+    // saved.
+    CheckpointUpdateProgress(true);
   }
 
   return true;
@@ -754,7 +812,7 @@
   } else if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
     LOG(INFO) << "Converting update information from old format.";
     PartitionUpdate root_part;
-    root_part.set_partition_name(kLegacyPartitionNameRoot);
+    root_part.set_partition_name(kPartitionNameRoot);
 #ifdef __ANDROID__
     LOG(WARNING) << "Legacy payload major version provided to an Android "
                     "build. Assuming no post-install. Please use major version "
@@ -776,7 +834,7 @@
     partitions_.push_back(std::move(root_part));
 
     PartitionUpdate kern_part;
-    kern_part.set_partition_name(kLegacyPartitionNameKernel);
+    kern_part.set_partition_name(kPartitionNameKernel);
     kern_part.set_run_postinstall(false);
     if (manifest_.has_old_kernel_info()) {
       *kern_part.mutable_old_partition_info() = manifest_.old_kernel_info();
@@ -822,9 +880,55 @@
     install_part.target_size = info.size();
     install_part.target_hash.assign(info.hash().begin(), info.hash().end());
 
+    install_part.block_size = block_size_;
+    if (partition.has_hash_tree_extent()) {
+      Extent extent = partition.hash_tree_data_extent();
+      install_part.hash_tree_data_offset = extent.start_block() * block_size_;
+      install_part.hash_tree_data_size = extent.num_blocks() * block_size_;
+      extent = partition.hash_tree_extent();
+      install_part.hash_tree_offset = extent.start_block() * block_size_;
+      install_part.hash_tree_size = extent.num_blocks() * block_size_;
+      uint64_t hash_tree_data_end =
+          install_part.hash_tree_data_offset + install_part.hash_tree_data_size;
+      if (install_part.hash_tree_offset < hash_tree_data_end) {
+        LOG(ERROR) << "Invalid hash tree extents, hash tree data ends at "
+                   << hash_tree_data_end << ", but hash tree starts at "
+                   << install_part.hash_tree_offset;
+        *error = ErrorCode::kDownloadNewPartitionInfoError;
+        return false;
+      }
+      install_part.hash_tree_algorithm = partition.hash_tree_algorithm();
+      install_part.hash_tree_salt.assign(partition.hash_tree_salt().begin(),
+                                         partition.hash_tree_salt().end());
+    }
+    if (partition.has_fec_extent()) {
+      Extent extent = partition.fec_data_extent();
+      install_part.fec_data_offset = extent.start_block() * block_size_;
+      install_part.fec_data_size = extent.num_blocks() * block_size_;
+      extent = partition.fec_extent();
+      install_part.fec_offset = extent.start_block() * block_size_;
+      install_part.fec_size = extent.num_blocks() * block_size_;
+      uint64_t fec_data_end =
+          install_part.fec_data_offset + install_part.fec_data_size;
+      if (install_part.fec_offset < fec_data_end) {
+        LOG(ERROR) << "Invalid fec extents, fec data ends at " << fec_data_end
+                   << ", but fec starts at " << install_part.fec_offset;
+        *error = ErrorCode::kDownloadNewPartitionInfoError;
+        return false;
+      }
+      install_part.fec_roots = partition.fec_roots();
+    }
+
     install_plan_->partitions.push_back(install_part);
   }
 
+  if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) {
+    if (!InitPartitionMetadata()) {
+      *error = ErrorCode::kInstallDeviceOpenError;
+      return false;
+    }
+  }
+
   if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) {
     LOG(ERROR) << "Unable to determine all the partition devices.";
     *error = ErrorCode::kInstallDeviceOpenError;
@@ -834,6 +938,49 @@
   return true;
 }
 
+bool DeltaPerformer::InitPartitionMetadata() {
+  BootControlInterface::PartitionMetadata partition_metadata;
+  if (manifest_.has_dynamic_partition_metadata()) {
+    std::map<string, uint64_t> partition_sizes;
+    for (const auto& partition : install_plan_->partitions) {
+      partition_sizes.emplace(partition.name, partition.target_size);
+    }
+    for (const auto& group : manifest_.dynamic_partition_metadata().groups()) {
+      BootControlInterface::PartitionMetadata::Group e;
+      e.name = group.name();
+      e.size = group.size();
+      for (const auto& partition_name : group.partition_names()) {
+        auto it = partition_sizes.find(partition_name);
+        if (it == partition_sizes.end()) {
+          // TODO(tbao): Support auto-filling partition info for framework-only
+          // OTA.
+          LOG(ERROR) << "dynamic_partition_metadata contains partition "
+                     << partition_name
+                     << " but it is not part of the manifest. "
+                     << "This is not supported.";
+          return false;
+        }
+        e.partitions.push_back({partition_name, it->second});
+      }
+      partition_metadata.groups.push_back(std::move(e));
+    }
+  }
+
+  bool metadata_updated = false;
+  prefs_->GetBoolean(kPrefsDynamicPartitionMetadataUpdated, &metadata_updated);
+  if (!boot_control_->InitPartitionMetadata(
+          install_plan_->target_slot, partition_metadata, !metadata_updated)) {
+    LOG(ERROR) << "Unable to initialize partition metadata for slot "
+               << BootControlInterface::SlotName(install_plan_->target_slot);
+    return false;
+  }
+  TEST_AND_RETURN_FALSE(
+      prefs_->SetBoolean(kPrefsDynamicPartitionMetadataUpdated, true));
+  LOG(INFO) << "InitPartitionMetadata done.";
+
+  return true;
+}
+
 bool DeltaPerformer::CanPerformInstallOperation(
     const chromeos_update_engine::InstallOperation& operation) {
   // If we don't have a data blob we can apply it right away.
@@ -870,8 +1017,7 @@
   }
 
   // Setup the ExtentWriter stack based on the operation type.
-  std::unique_ptr<ExtentWriter> writer = std::make_unique<ZeroPadExtentWriter>(
-      std::make_unique<DirectExtentWriter>());
+  std::unique_ptr<ExtentWriter> writer = std::make_unique<DirectExtentWriter>();
 
   if (operation.type() == InstallOperation::REPLACE_BZ) {
     writer.reset(new BzipExtentWriter(std::move(writer)));
@@ -882,7 +1028,6 @@
   TEST_AND_RETURN_FALSE(
       writer->Init(target_fd_, operation.dst_extents(), block_size_));
   TEST_AND_RETURN_FALSE(writer->Write(buffer_.data(), operation.data_length()));
-  TEST_AND_RETURN_FALSE(writer->End());
 
   // Update buffer
   DiscardBuffer(true, buffer_.size());
@@ -902,7 +1047,7 @@
   bool attempt_ioctl = true;
   int request =
       (operation.type() == InstallOperation::ZERO ? BLKZEROOUT : BLKDISCARD);
-#else  // !defined(BLKZEROOUT)
+#else   // !defined(BLKZEROOUT)
   bool attempt_ioctl = false;
   int request = 0;
 #endif  // !defined(BLKZEROOUT)
@@ -920,8 +1065,8 @@
     // In case of failure, we fall back to writing 0 to the selected region.
     zeros.resize(16 * block_size_);
     for (uint64_t offset = 0; offset < length; offset += zeros.size()) {
-      uint64_t chunk_length = min(length - offset,
-                                  static_cast<uint64_t>(zeros.size()));
+      uint64_t chunk_length =
+          min(length - offset, static_cast<uint64_t>(zeros.size()));
       TEST_AND_RETURN_FALSE(utils::PWriteAll(
           target_fd_, zeros.data(), chunk_length, start + offset));
     }
@@ -958,8 +1103,8 @@
                                           bytes,
                                           extent.start_block() * block_size_,
                                           &bytes_read_this_iteration));
-    TEST_AND_RETURN_FALSE(
-        bytes_read_this_iteration == static_cast<ssize_t>(bytes));
+    TEST_AND_RETURN_FALSE(bytes_read_this_iteration ==
+                          static_cast<ssize_t>(bytes));
     bytes_read += bytes_read_this_iteration;
   }
 
@@ -1025,20 +1170,128 @@
   if (operation.has_dst_length())
     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
 
-  brillo::Blob source_hash;
-  TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_,
-                                                     operation.src_extents(),
-                                                     target_fd_,
-                                                     operation.dst_extents(),
-                                                     block_size_,
-                                                     &source_hash));
+  TEST_AND_RETURN_FALSE(source_fd_ != nullptr);
 
   if (operation.has_src_sha256_hash()) {
+    brillo::Blob source_hash;
+    brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
+                                      operation.src_sha256_hash().end());
+
+    // We fall back to use the error corrected device if the hash of the raw
+    // device doesn't match or there was an error reading the source partition.
+    // Note that this code will also fall back if writing the target partition
+    // fails.
+    bool read_ok = fd_utils::CopyAndHashExtents(source_fd_,
+                                                operation.src_extents(),
+                                                target_fd_,
+                                                operation.dst_extents(),
+                                                block_size_,
+                                                &source_hash);
+    if (read_ok && expected_source_hash == source_hash)
+      return true;
+
+    if (!OpenCurrentECCPartition()) {
+      // The following function call will return false since the source hash
+      // mismatches, but we still want to call it so it prints the appropriate
+      // log message.
+      return ValidateSourceHash(source_hash, operation, source_fd_, error);
+    }
+
+    LOG(WARNING) << "Source hash from RAW device mismatched: found "
+                 << base::HexEncode(source_hash.data(), source_hash.size())
+                 << ", expected "
+                 << base::HexEncode(expected_source_hash.data(),
+                                    expected_source_hash.size());
+
+    TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_ecc_fd_,
+                                                       operation.src_extents(),
+                                                       target_fd_,
+                                                       operation.dst_extents(),
+                                                       block_size_,
+                                                       &source_hash));
     TEST_AND_RETURN_FALSE(
-        ValidateSourceHash(source_hash, operation, source_fd_, error));
+        ValidateSourceHash(source_hash, operation, source_ecc_fd_, error));
+    // At this point reading from the the error corrected device worked, but
+    // reading from the raw device failed, so this is considered a recovered
+    // failure.
+    source_ecc_recovered_failures_++;
+  } else {
+    // When the operation doesn't include a source hash, we attempt the error
+    // corrected device first since we can't verify the block in the raw device
+    // at this point, but we fall back to the raw device since the error
+    // corrected device can be shorter or not available.
+    if (OpenCurrentECCPartition() &&
+        fd_utils::CopyAndHashExtents(source_ecc_fd_,
+                                     operation.src_extents(),
+                                     target_fd_,
+                                     operation.dst_extents(),
+                                     block_size_,
+                                     nullptr)) {
+      return true;
+    }
+    TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_,
+                                                       operation.src_extents(),
+                                                       target_fd_,
+                                                       operation.dst_extents(),
+                                                       block_size_,
+                                                       nullptr));
+  }
+  return true;
+}
+
+FileDescriptorPtr DeltaPerformer::ChooseSourceFD(
+    const InstallOperation& operation, ErrorCode* error) {
+  if (source_fd_ == nullptr) {
+    LOG(ERROR) << "ChooseSourceFD fail: source_fd_ == nullptr";
+    return nullptr;
   }
 
-  return true;
+  if (!operation.has_src_sha256_hash()) {
+    // When the operation doesn't include a source hash, we attempt the error
+    // corrected device first since we can't verify the block in the raw device
+    // at this point, but we first need to make sure all extents are readable
+    // since the error corrected device can be shorter or not available.
+    if (OpenCurrentECCPartition() &&
+        fd_utils::ReadAndHashExtents(
+            source_ecc_fd_, operation.src_extents(), block_size_, nullptr)) {
+      return source_ecc_fd_;
+    }
+    return source_fd_;
+  }
+
+  brillo::Blob source_hash;
+  brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
+                                    operation.src_sha256_hash().end());
+  if (fd_utils::ReadAndHashExtents(
+          source_fd_, operation.src_extents(), block_size_, &source_hash) &&
+      source_hash == expected_source_hash) {
+    return source_fd_;
+  }
+  // We fall back to use the error corrected device if the hash of the raw
+  // device doesn't match or there was an error reading the source partition.
+  if (!OpenCurrentECCPartition()) {
+    // The following function call will return false since the source hash
+    // mismatches, but we still want to call it so it prints the appropriate
+    // log message.
+    ValidateSourceHash(source_hash, operation, source_fd_, error);
+    return nullptr;
+  }
+  LOG(WARNING) << "Source hash from RAW device mismatched: found "
+               << base::HexEncode(source_hash.data(), source_hash.size())
+               << ", expected "
+               << base::HexEncode(expected_source_hash.data(),
+                                  expected_source_hash.size());
+
+  if (fd_utils::ReadAndHashExtents(
+          source_ecc_fd_, operation.src_extents(), block_size_, &source_hash) &&
+      ValidateSourceHash(source_hash, operation, source_ecc_fd_, error)) {
+    // At this point reading from the the error corrected device worked, but
+    // reading from the raw device failed, so this is considered a recovered
+    // failure.
+    source_ecc_recovered_failures_++;
+    return source_ecc_fd_;
+  }
+  return nullptr;
 }
 
 bool DeltaPerformer::ExtentsToBsdiffPositionsString(
@@ -1141,12 +1394,7 @@
     return true;
   }
 
-  bool Close() override {
-    if (writer_ != nullptr) {
-      TEST_AND_RETURN_FALSE(writer_->End());
-    }
-    return true;
-  }
+  bool Close() override { return true; }
 
   bool GetSize(uint64_t* size) override {
     *size = size_;
@@ -1183,17 +1431,12 @@
   if (operation.has_dst_length())
     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
 
-  if (operation.has_src_sha256_hash()) {
-    brillo::Blob source_hash;
-    TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
-        source_fd_, operation.src_extents(), block_size_, &source_hash));
-    TEST_AND_RETURN_FALSE(
-        ValidateSourceHash(source_hash, operation, source_fd_, error));
-  }
+  FileDescriptorPtr source_fd = ChooseSourceFD(operation, error);
+  TEST_AND_RETURN_FALSE(source_fd != nullptr);
 
   auto reader = std::make_unique<DirectExtentReader>();
   TEST_AND_RETURN_FALSE(
-      reader->Init(source_fd_, operation.src_extents(), block_size_));
+      reader->Init(source_fd, operation.src_extents(), block_size_));
   auto src_file = std::make_unique<BsdiffExtentFile>(
       std::move(reader),
       utils::BlocksInExtents(operation.src_extents()) * block_size_);
@@ -1265,12 +1508,7 @@
     return true;
   }
 
-  bool Close() override {
-    if (!is_read_) {
-      TEST_AND_RETURN_FALSE(writer_->End());
-    }
-    return true;
-  }
+  bool Close() override { return true; }
 
  private:
   PuffinExtentStream(std::unique_ptr<ExtentReader> reader,
@@ -1300,17 +1538,12 @@
   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
 
-  if (operation.has_src_sha256_hash()) {
-    brillo::Blob source_hash;
-    TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
-        source_fd_, operation.src_extents(), block_size_, &source_hash));
-    TEST_AND_RETURN_FALSE(
-        ValidateSourceHash(source_hash, operation, source_fd_, error));
-  }
+  FileDescriptorPtr source_fd = ChooseSourceFD(operation, error);
+  TEST_AND_RETURN_FALSE(source_fd != nullptr);
 
   auto reader = std::make_unique<DirectExtentReader>();
   TEST_AND_RETURN_FALSE(
-      reader->Init(source_fd_, operation.src_extents(), block_size_));
+      reader->Init(source_fd, operation.src_extents(), block_size_));
   puffin::UniqueStreamPtr src_stream(new PuffinExtentStream(
       std::move(reader),
       utils::BlocksInExtents(operation.src_extents()) * block_size_));
@@ -1350,8 +1583,7 @@
   TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset());
   TEST_AND_RETURN_FALSE(buffer_.size() >= manifest_.signatures_size());
   signatures_message_data_.assign(
-      buffer_.begin(),
-      buffer_.begin() + manifest_.signatures_size());
+      buffer_.begin(), buffer_.begin() + manifest_.signatures_size());
 
   // Save the signature blob because if the update is interrupted after the
   // download phase we don't go through this path anymore. Some alternatives to
@@ -1362,9 +1594,9 @@
   //
   // 2. Verify the signature as soon as it's received and don't checkpoint the
   // blob and the signed sha-256 context.
-  LOG_IF(WARNING, !prefs_->SetString(kPrefsUpdateStateSignatureBlob,
-                                     string(signatures_message_data_.begin(),
-                                            signatures_message_data_.end())))
+  LOG_IF(WARNING,
+         !prefs_->SetString(kPrefsUpdateStateSignatureBlob,
+                            signatures_message_data_))
       << "Unable to store the signature blob.";
 
   LOG(INFO) << "Extracted signature data of size "
@@ -1373,15 +1605,21 @@
   return true;
 }
 
-bool DeltaPerformer::GetPublicKeyFromResponse(base::FilePath *out_tmp_key) {
-  if (hardware_->IsOfficialBuild() ||
-      utils::FileExists(public_key_path_.c_str()) ||
-      install_plan_->public_key_rsa.empty())
-    return false;
+bool DeltaPerformer::GetPublicKey(string* out_public_key) {
+  out_public_key->clear();
 
-  if (!utils::DecodeAndStoreBase64String(install_plan_->public_key_rsa,
-                                         out_tmp_key))
-    return false;
+  if (utils::FileExists(public_key_path_.c_str())) {
+    LOG(INFO) << "Verifying using public key: " << public_key_path_;
+    return utils::ReadFile(public_key_path_, out_public_key);
+  }
+
+  // If this is an official build then we are not allowed to use public key from
+  // Omaha response.
+  if (!hardware_->IsOfficialBuild() && !install_plan_->public_key_rsa.empty()) {
+    LOG(INFO) << "Verifying using public key from Omaha response.";
+    return brillo::data_encoding::Base64Decode(install_plan_->public_key_rsa,
+                                               out_public_key);
+  }
 
   return true;
 }
@@ -1425,20 +1663,20 @@
       return ErrorCode::kUnsupportedMinorPayloadVersion;
     }
   } else {
-    if (manifest_.minor_version() != supported_minor_version_) {
+    if (manifest_.minor_version() < kMinSupportedMinorPayloadVersion ||
+        manifest_.minor_version() > kMaxSupportedMinorPayloadVersion) {
       LOG(ERROR) << "Manifest contains minor version "
                  << manifest_.minor_version()
-                 << " not the supported "
-                 << supported_minor_version_;
+                 << " not in the range of supported minor versions ["
+                 << kMinSupportedMinorPayloadVersion << ", "
+                 << kMaxSupportedMinorPayloadVersion << "].";
       return ErrorCode::kUnsupportedMinorPayloadVersion;
     }
   }
 
   if (major_payload_version_ != kChromeOSMajorPayloadVersion) {
-    if (manifest_.has_old_rootfs_info() ||
-        manifest_.has_new_rootfs_info() ||
-        manifest_.has_old_kernel_info() ||
-        manifest_.has_new_kernel_info() ||
+    if (manifest_.has_old_rootfs_info() || manifest_.has_new_rootfs_info() ||
+        manifest_.has_old_kernel_info() || manifest_.has_new_kernel_info() ||
         manifest_.install_operations_size() != 0 ||
         manifest_.kernel_install_operations_size() != 0) {
       LOG(ERROR) << "Manifest contains deprecated field only supported in "
@@ -1456,6 +1694,16 @@
     return ErrorCode::kPayloadTimestampError;
   }
 
+  if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
+    if (manifest_.has_dynamic_partition_metadata()) {
+      LOG(ERROR)
+          << "Should not contain dynamic_partition_metadata for major version "
+          << kChromeOSMajorPayloadVersion
+          << ". Please use major version 2 or above.";
+      return ErrorCode::kPayloadMismatchedType;
+    }
+  }
+
   // TODO(garnold) we should be adding more and more manifest checks, such as
   // partition boundaries etc (see chromium-os:37661).
 
@@ -1525,35 +1773,32 @@
   return ErrorCode::kSuccess;
 }
 
-#define TEST_AND_RETURN_VAL(_retval, _condition)                \
-  do {                                                          \
-    if (!(_condition)) {                                        \
-      LOG(ERROR) << "VerifyPayload failure: " << #_condition;   \
-      return _retval;                                           \
-    }                                                           \
+#define TEST_AND_RETURN_VAL(_retval, _condition)              \
+  do {                                                        \
+    if (!(_condition)) {                                      \
+      LOG(ERROR) << "VerifyPayload failure: " << #_condition; \
+      return _retval;                                         \
+    }                                                         \
   } while (0);
 
 ErrorCode DeltaPerformer::VerifyPayload(
     const brillo::Blob& update_check_response_hash,
     const uint64_t update_check_response_size) {
-
-  // See if we should use the public RSA key in the Omaha response.
-  base::FilePath path_to_public_key(public_key_path_);
-  base::FilePath tmp_key;
-  if (GetPublicKeyFromResponse(&tmp_key))
-    path_to_public_key = tmp_key;
-  ScopedPathUnlinker tmp_key_remover(tmp_key.value());
-  if (tmp_key.empty())
-    tmp_key_remover.set_should_remove(false);
-
-  LOG(INFO) << "Verifying payload using public key: "
-            << path_to_public_key.value();
+  string public_key;
+  if (!GetPublicKey(&public_key)) {
+    LOG(ERROR) << "Failed to get public key.";
+    return ErrorCode::kDownloadPayloadPubKeyVerificationError;
+  }
 
   // Verifies the download size.
-  TEST_AND_RETURN_VAL(ErrorCode::kPayloadSizeMismatchError,
-                      update_check_response_size ==
-                      metadata_size_ + metadata_signature_size_ +
-                      buffer_offset_);
+  if (update_check_response_size !=
+      metadata_size_ + metadata_signature_size_ + buffer_offset_) {
+    LOG(ERROR) << "update_check_response_size (" << update_check_response_size
+               << ") doesn't match metadata_size (" << metadata_size_
+               << ") + metadata_signature_size (" << metadata_signature_size_
+               << ") + buffer_offset (" << buffer_offset_ << ").";
+    return ErrorCode::kPayloadSizeMismatchError;
+  }
 
   // Verifies the payload hash.
   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError,
@@ -1563,7 +1808,7 @@
       payload_hash_calculator_.raw_hash() == update_check_response_hash);
 
   // Verifies the signed payload hash.
-  if (!utils::FileExists(path_to_public_key.value().c_str())) {
+  if (public_key.empty()) {
     LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
     return ErrorCode::kSuccess;
   }
@@ -1576,7 +1821,7 @@
                       !hash_data.empty());
 
   if (!PayloadVerifier::VerifySignature(
-      signatures_message_data_, path_to_public_key.value(), hash_data)) {
+          signatures_message_data_, public_key, hash_data)) {
     // The autoupdate_CatchBadSignatures test checks for this string
     // in log-files. Keep in sync.
     LOG(ERROR) << "Public key verification failed, thus update failed.";
@@ -1584,16 +1829,6 @@
   }
 
   LOG(INFO) << "Payload hash matches value in payload.";
-
-  // At this point, we are guaranteed to have downloaded a full payload, i.e
-  // the one whose size matches the size mentioned in Omaha response. If any
-  // errors happen after this, it's likely a problem with the payload itself or
-  // the state of the system and not a problem with the URL or network.  So,
-  // indicate that to the download delegate so that AU can backoff
-  // appropriately.
-  if (download_delegate_)
-    download_delegate_->DownloadComplete();
-
   return ErrorCode::kSuccess;
 }
 
@@ -1615,8 +1850,7 @@
                                      const string& update_check_response_hash) {
   int64_t next_operation = kUpdateStateOperationInvalid;
   if (!(prefs->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) &&
-        next_operation != kUpdateStateOperationInvalid &&
-        next_operation > 0))
+        next_operation != kUpdateStateOperationInvalid && next_operation > 0))
     return false;
 
   string interrupted_hash;
@@ -1670,53 +1904,60 @@
     prefs->SetInt64(kPrefsManifestSignatureSize, -1);
     prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
     prefs->Delete(kPrefsPostInstallSucceeded);
+    prefs->Delete(kPrefsVerityWritten);
+    prefs->Delete(kPrefsDynamicPartitionMetadataUpdated);
   }
   return true;
 }
 
-bool DeltaPerformer::CheckpointUpdateProgress() {
+bool DeltaPerformer::CheckpointUpdateProgress(bool force) {
+  base::TimeTicks curr_time = base::TimeTicks::Now();
+  if (force || curr_time > update_checkpoint_time_) {
+    update_checkpoint_time_ = curr_time + update_checkpoint_wait_;
+  } else {
+    return false;
+  }
+
   Terminator::set_exit_blocked(true);
   if (last_updated_buffer_offset_ != buffer_offset_) {
     // Resets the progress in case we die in the middle of the state update.
     ResetUpdateProgress(prefs_, true);
-    TEST_AND_RETURN_FALSE(
-        prefs_->SetString(kPrefsUpdateStateSHA256Context,
-                          payload_hash_calculator_.GetContext()));
+    TEST_AND_RETURN_FALSE(prefs_->SetString(
+        kPrefsUpdateStateSHA256Context, payload_hash_calculator_.GetContext()));
     TEST_AND_RETURN_FALSE(
         prefs_->SetString(kPrefsUpdateStateSignedSHA256Context,
                           signed_hash_calculator_.GetContext()));
-    TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataOffset,
-                                           buffer_offset_));
+    TEST_AND_RETURN_FALSE(
+        prefs_->SetInt64(kPrefsUpdateStateNextDataOffset, buffer_offset_));
     last_updated_buffer_offset_ = buffer_offset_;
 
     if (next_operation_num_ < num_total_operations_) {
       size_t partition_index = current_partition_;
       while (next_operation_num_ >= acc_num_operations_[partition_index])
         partition_index++;
-      const size_t partition_operation_num = next_operation_num_ - (
-          partition_index ? acc_num_operations_[partition_index - 1] : 0);
+      const size_t partition_operation_num =
+          next_operation_num_ -
+          (partition_index ? acc_num_operations_[partition_index - 1] : 0);
       const InstallOperation& op =
           partitions_[partition_index].operations(partition_operation_num);
-      TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataLength,
-                                             op.data_length()));
+      TEST_AND_RETURN_FALSE(
+          prefs_->SetInt64(kPrefsUpdateStateNextDataLength, op.data_length()));
     } else {
-      TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataLength,
-                                             0));
+      TEST_AND_RETURN_FALSE(
+          prefs_->SetInt64(kPrefsUpdateStateNextDataLength, 0));
     }
   }
-  TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextOperation,
-                                         next_operation_num_));
+  TEST_AND_RETURN_FALSE(
+      prefs_->SetInt64(kPrefsUpdateStateNextOperation, next_operation_num_));
   return true;
 }
 
 bool DeltaPerformer::PrimeUpdateState() {
   CHECK(manifest_valid_);
-  block_size_ = manifest_.block_size();
 
   int64_t next_operation = kUpdateStateOperationInvalid;
   if (!prefs_->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) ||
-      next_operation == kUpdateStateOperationInvalid ||
-      next_operation <= 0) {
+      next_operation == kUpdateStateOperationInvalid || next_operation <= 0) {
     // Initiating a new update, no more state needs to be initialized.
     return true;
   }
@@ -1724,9 +1965,9 @@
 
   // Resuming an update -- load the rest of the update state.
   int64_t next_data_offset = -1;
-  TEST_AND_RETURN_FALSE(prefs_->GetInt64(kPrefsUpdateStateNextDataOffset,
-                                         &next_data_offset) &&
-                        next_data_offset >= 0);
+  TEST_AND_RETURN_FALSE(
+      prefs_->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
+      next_data_offset >= 0);
   buffer_offset_ = next_data_offset;
 
   // The signed hash context and the signature blob may be empty if the
@@ -1738,21 +1979,17 @@
         signed_hash_calculator_.SetContext(signed_hash_context));
   }
 
-  string signature_blob;
-  if (prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signature_blob)) {
-    signatures_message_data_.assign(signature_blob.begin(),
-                                    signature_blob.end());
-  }
+  prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signatures_message_data_);
 
   string hash_context;
-  TEST_AND_RETURN_FALSE(prefs_->GetString(kPrefsUpdateStateSHA256Context,
-                                          &hash_context) &&
-                        payload_hash_calculator_.SetContext(hash_context));
+  TEST_AND_RETURN_FALSE(
+      prefs_->GetString(kPrefsUpdateStateSHA256Context, &hash_context) &&
+      payload_hash_calculator_.SetContext(hash_context));
 
   int64_t manifest_metadata_size = 0;
-  TEST_AND_RETURN_FALSE(prefs_->GetInt64(kPrefsManifestMetadataSize,
-                                         &manifest_metadata_size) &&
-                        manifest_metadata_size > 0);
+  TEST_AND_RETURN_FALSE(
+      prefs_->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) &&
+      manifest_metadata_size > 0);
   metadata_size_ = manifest_metadata_size;
 
   int64_t manifest_signature_size = 0;
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index ac9ca80..17cb599 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -48,9 +48,6 @@
 
 class DeltaPerformer : public FileWriter {
  public:
-  static const uint64_t kSupportedMajorPayloadVersion;
-  static const uint32_t kSupportedMinorPayloadVersion;
-
   // Defines the granularity of progress logging in terms of how many "completed
   // chunks" we want to report at the most.
   static const unsigned kProgressLogMaxChunks;
@@ -63,6 +60,7 @@
   // operations. They must add up to one hundred (100).
   static const unsigned kProgressDownloadWeight;
   static const unsigned kProgressOperationsWeight;
+  static const uint64_t kCheckpointFrequencySeconds;
 
   DeltaPerformer(PrefsInterface* prefs,
                  BootControlInterface* boot_control,
@@ -70,14 +68,14 @@
                  DownloadActionDelegate* download_delegate,
                  InstallPlan* install_plan,
                  InstallPlan::Payload* payload,
-                 bool is_interactive)
+                 bool interactive)
       : prefs_(prefs),
         boot_control_(boot_control),
         hardware_(hardware),
         download_delegate_(download_delegate),
         install_plan_(install_plan),
         payload_(payload),
-        is_interactive_(is_interactive) {}
+        interactive_(interactive) {}
 
   // FileWriter's Write implementation where caller doesn't care about
   // error codes.
@@ -88,7 +86,7 @@
 
   // FileWriter's Write implementation that returns a more specific |error| code
   // in case of failures in Write operation.
-  bool Write(const void* bytes, size_t count, ErrorCode *error) override;
+  bool Write(const void* bytes, size_t count, ErrorCode* error) override;
 
   // Wrapper around close. Returns 0 on success or -errno on error.
   // Closes both 'path' given to Open() and the kernel path.
@@ -99,6 +97,10 @@
   // work. Returns whether the required file descriptors were successfully open.
   bool OpenCurrentPartition();
 
+  // Attempt to open the error-corrected device for the current partition.
+  // Returns whether the operation succeeded.
+  bool OpenCurrentECCPartition();
+
   // Closes the current partition file descriptors if open. Returns 0 on success
   // or -errno on error.
   int CloseCurrentPartition();
@@ -174,6 +176,7 @@
   friend class DeltaPerformerIntegrationTest;
   FRIEND_TEST(DeltaPerformerTest, BrilloMetadataSignatureSizeTest);
   FRIEND_TEST(DeltaPerformerTest, BrilloParsePayloadMetadataTest);
+  FRIEND_TEST(DeltaPerformerTest, ChooseSourceFDTest);
   FRIEND_TEST(DeltaPerformerTest, UsePublicKeyFromResponse);
 
   // Parse and move the update instructions of all partitions into our local
@@ -189,7 +192,8 @@
 
   // If |op_result| is false, emits an error message using |op_type_name| and
   // sets |*error| accordingly. Otherwise does nothing. Returns |op_result|.
-  bool HandleOpResult(bool op_result, const char* op_type_name,
+  bool HandleOpResult(bool op_result,
+                      const char* op_type_name,
                       ErrorCode* error);
 
   // Logs the progress of downloading/applying an update.
@@ -228,6 +232,13 @@
   bool PerformPuffDiffOperation(const InstallOperation& operation,
                                 ErrorCode* error);
 
+  // For a given operation, choose the source fd to be used (raw device or error
+  // correction device) based on the source operation hash.
+  // Returns nullptr if the source hash mismatch cannot be corrected, and set
+  // the |error| accordingly.
+  FileDescriptorPtr ChooseSourceFD(const InstallOperation& operation,
+                                   ErrorCode* error);
+
   // Extracts the payload signature message from the blob on the |operation| if
   // the offset matches the one specified by the manifest. Returns whether the
   // signature was extracted.
@@ -247,20 +258,24 @@
 
   // Checkpoints the update progress into persistent storage to allow this
   // update attempt to be resumed after reboot.
-  bool CheckpointUpdateProgress();
+  // If |force| is false, checkpoint may be throttled.
+  bool CheckpointUpdateProgress(bool force);
 
   // Primes the required update state. Returns true if the update state was
   // successfully initialized to a saved resume state or if the update is a new
   // update. Returns false otherwise.
   bool PrimeUpdateState();
 
-  // If the Omaha response contains a public RSA key and we're allowed
-  // to use it (e.g. if we're in developer mode), extract the key from
-  // the response and store it in a temporary file and return true. In
-  // the affirmative the path to the temporary file is stored in
-  // |out_tmp_key| and it is the responsibility of the caller to clean
-  // it up.
-  bool GetPublicKeyFromResponse(base::FilePath *out_tmp_key);
+  // Get the public key to be used to verify metadata signature or payload
+  // signature. Always use |public_key_path_| if exists, otherwise if the Omaha
+  // response contains a public RSA key and we're allowed to use it (e.g. if
+  // we're in developer mode), decode the key from the response and store it in
+  // |out_public_key|. Returns false on failures.
+  bool GetPublicKey(std::string* out_public_key);
+
+  // After install_plan_ is filled with partition names and sizes, initialize
+  // metadata of partitions and map necessary devices before opening devices.
+  bool InitPartitionMetadata();
 
   // Update Engine preference store.
   PrefsInterface* prefs_;
@@ -283,6 +298,22 @@
   // partition when using a delta payload.
   FileDescriptorPtr source_fd_{nullptr};
 
+  // File descriptor of the error corrected source partition. Only set while
+  // updating partition using a delta payload for a partition where error
+  // correction is available. The size of the error corrected device is smaller
+  // than the underlying raw device, since it doesn't include the error
+  // correction blocks.
+  FileDescriptorPtr source_ecc_fd_{nullptr};
+
+  // The total number of operations that failed source hash verification but
+  // passed after falling back to the error-corrected |source_ecc_fd_| device.
+  uint64_t source_ecc_recovered_failures_{0};
+
+  // Whether opening the current partition as an error-corrected device failed.
+  // Used to avoid re-opening the same source partition if it is not actually
+  // error corrected.
+  bool source_ecc_open_failure_{false};
+
   // File descriptor of the target partition. Only set while performing the
   // operations of a given partition.
   FileDescriptorPtr target_fd_{nullptr};
@@ -346,7 +377,7 @@
   HashCalculator signed_hash_calculator_;
 
   // Signatures message blob extracted directly from the payload.
-  brillo::Blob signatures_message_data_;
+  std::string signatures_message_data_;
 
   // The public key to be used. Provided as a member so that tests can
   // override with test keys.
@@ -363,19 +394,19 @@
   unsigned last_progress_chunk_{0};
 
   // If |true|, the update is user initiated (vs. periodic update checks).
-  bool is_interactive_{false};
+  bool interactive_{false};
 
   // The timeout after which we should force emitting a progress log (constant),
   // and the actual point in time for the next forced log to be emitted.
   const base::TimeDelta forced_progress_log_wait_{
       base::TimeDelta::FromSeconds(kProgressLogTimeoutSeconds)};
-  base::Time forced_progress_log_time_;
+  base::TimeTicks forced_progress_log_time_;
 
-  // The payload major payload version supported by DeltaPerformer.
-  uint64_t supported_major_version_{kSupportedMajorPayloadVersion};
-
-  // The delta minor payload version supported by DeltaPerformer.
-  uint32_t supported_minor_version_{kSupportedMinorPayloadVersion};
+  // The frequency that we should write an update checkpoint (constant), and
+  // the point in time at which the next checkpoint should be written.
+  const base::TimeDelta update_checkpoint_wait_{
+      base::TimeDelta::FromSeconds(kCheckpointFrequencySeconds)};
+  base::TimeTicks update_checkpoint_time_;
 
   DISALLOW_COPY_AND_ASSIGN(DeltaPerformer);
 };
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index 3572a6d..6b4771d 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -39,6 +39,7 @@
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/mock_download_action.h"
 #include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_consumer/payload_metadata.h"
 #include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/payload_signer.h"
@@ -49,11 +50,11 @@
 using std::string;
 using std::vector;
 using test_utils::GetBuildArtifactsPath;
+using test_utils::kRandomString;
 using test_utils::ScopedLoopMounter;
 using test_utils::System;
-using test_utils::kRandomString;
-using testing::Return;
 using testing::_;
+using testing::Return;
 
 extern const char* kUnittestPrivateKeyPath;
 extern const char* kUnittestPublicKeyPath;
@@ -61,8 +62,10 @@
 extern const char* kUnittestPublicKey2Path;
 
 static const uint32_t kDefaultKernelSize = 4096;  // Something small for a test
+// clang-format off
 static const uint8_t kNewData[] = {'T', 'h', 'i', 's', ' ', 'i', 's', ' ',
                                    'n', 'e', 'w', ' ', 'd', 'a', 't', 'a', '.'};
+// clang-format on
 
 namespace {
 struct DeltaState {
@@ -98,13 +101,13 @@
 };
 
 enum SignatureTest {
-  kSignatureNone,  // No payload signing.
-  kSignatureGenerator,  // Sign the payload at generation time.
-  kSignatureGenerated,  // Sign the payload after it's generated.
+  kSignatureNone,                  // No payload signing.
+  kSignatureGenerator,             // Sign the payload at generation time.
+  kSignatureGenerated,             // Sign the payload after it's generated.
   kSignatureGeneratedPlaceholder,  // Insert placeholder signatures, then real.
   kSignatureGeneratedPlaceholderMismatch,  // Insert a wrong sized placeholder.
   kSignatureGeneratedShell,  // Sign the generated payload through shell cmds.
-  kSignatureGeneratedShellBadKey,  // Sign with a bad key through shell cmds.
+  kSignatureGeneratedShellBadKey,     // Sign with a bad key through shell cmds.
   kSignatureGeneratedShellRotateCl1,  // Rotate key, test client v1
   kSignatureGeneratedShellRotateCl2,  // Rotate key, test client v2
 };
@@ -116,15 +119,10 @@
 
 }  // namespace
 
-class DeltaPerformerIntegrationTest : public ::testing::Test {
- public:
-  static void SetSupportedVersion(DeltaPerformer* performer,
-                                  uint64_t minor_version) {
-    performer->supported_minor_version_ = minor_version;
-  }
-};
+class DeltaPerformerIntegrationTest : public ::testing::Test {};
 
-static void CompareFilesByBlock(const string& a_file, const string& b_file,
+static void CompareFilesByBlock(const string& a_file,
+                                const string& b_file,
                                 size_t image_size) {
   EXPECT_EQ(0U, image_size % kBlockSize);
 
@@ -138,7 +136,7 @@
     EXPECT_EQ(0U, i % kBlockSize);
     brillo::Blob a_sub(&a_data[i], &a_data[i + kBlockSize]);
     brillo::Blob b_sub(&b_data[i], &b_data[i + kBlockSize]);
-    EXPECT_TRUE(a_sub == b_sub) << "Block " << (i/kBlockSize) << " differs";
+    EXPECT_TRUE(a_sub == b_sub) << "Block " << (i / kBlockSize) << " differs";
   }
   if (::testing::Test::HasNonfatalFailure()) {
     LOG(INFO) << "Compared filesystems with size " << image_size
@@ -171,9 +169,7 @@
   brillo::Blob hash;
   EXPECT_TRUE(HashCalculator::RawHashOfData(data, &hash));
   brillo::Blob signature;
-  EXPECT_TRUE(PayloadSigner::SignHash(hash,
-                                      private_key_path,
-                                      &signature));
+  EXPECT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature));
   return signature.size();
 }
 
@@ -184,11 +180,7 @@
   signatures.push_back(brillo::Blob(signature_size, 0));
 
   return PayloadSigner::AddSignatureToPayload(
-      payload_path,
-      signatures,
-      {},
-      payload_path,
-      out_metadata_size);
+      payload_path, signatures, {}, payload_path, out_metadata_size);
 }
 
 static void SignGeneratedPayload(const string& payload_path,
@@ -210,9 +202,7 @@
                                       const string& payload_path) {
   string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath);
   if (signature_test == kSignatureGeneratedShellBadKey) {
-    ASSERT_TRUE(utils::MakeTempFile("key.XXXXXX",
-                                    &private_key_path,
-                                    nullptr));
+    ASSERT_TRUE(utils::MakeTempFile("key.XXXXXX", &private_key_path, nullptr));
   } else {
     ASSERT_TRUE(signature_test == kSignatureGeneratedShell ||
                 signature_test == kSignatureGeneratedShellRotateCl1 ||
@@ -240,14 +230,12 @@
     RSA_free(rsa);
   }
   int signature_size = GetSignatureSize(private_key_path);
-  string hash_file;
-  ASSERT_TRUE(utils::MakeTempFile("hash.XXXXXX", &hash_file, nullptr));
-  ScopedPathUnlinker hash_unlinker(hash_file);
+  test_utils::ScopedTempFile hash_file("hash.XXXXXX");
   string signature_size_string;
   if (signature_test == kSignatureGeneratedShellRotateCl1 ||
       signature_test == kSignatureGeneratedShellRotateCl2)
-    signature_size_string = base::StringPrintf("%d:%d",
-                                               signature_size, signature_size);
+    signature_size_string =
+        base::StringPrintf("%d:%d", signature_size, signature_size);
   else
     signature_size_string = base::StringPrintf("%d", signature_size);
   string delta_generator_path = GetBuildArtifactsPath("delta_generator");
@@ -257,36 +245,33 @@
                 delta_generator_path.c_str(),
                 payload_path.c_str(),
                 signature_size_string.c_str(),
-                hash_file.c_str())));
+                hash_file.path().c_str())));
 
   // Sign the hash
   brillo::Blob hash, signature;
-  ASSERT_TRUE(utils::ReadFile(hash_file, &hash));
+  ASSERT_TRUE(utils::ReadFile(hash_file.path(), &hash));
   ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature));
 
-  string sig_file;
-  ASSERT_TRUE(utils::MakeTempFile("signature.XXXXXX", &sig_file, nullptr));
-  ScopedPathUnlinker sig_unlinker(sig_file);
-  ASSERT_TRUE(test_utils::WriteFileVector(sig_file, signature));
+  test_utils::ScopedTempFile sig_file("signature.XXXXXX");
+  ASSERT_TRUE(test_utils::WriteFileVector(sig_file.path(), signature));
+  string sig_files = sig_file.path();
 
-  string sig_file2;
-  ASSERT_TRUE(utils::MakeTempFile("signature.XXXXXX", &sig_file2, nullptr));
-  ScopedPathUnlinker sig2_unlinker(sig_file2);
+  test_utils::ScopedTempFile sig_file2("signature.XXXXXX");
   if (signature_test == kSignatureGeneratedShellRotateCl1 ||
       signature_test == kSignatureGeneratedShellRotateCl2) {
     ASSERT_TRUE(PayloadSigner::SignHash(
         hash, GetBuildArtifactsPath(kUnittestPrivateKey2Path), &signature));
-    ASSERT_TRUE(test_utils::WriteFileVector(sig_file2, signature));
+    ASSERT_TRUE(test_utils::WriteFileVector(sig_file2.path(), signature));
     // Append second sig file to first path
-    sig_file += ":" + sig_file2;
+    sig_files += ":" + sig_file2.path();
   }
 
   ASSERT_EQ(0,
             System(base::StringPrintf(
-                "%s -in_file=%s -signature_file=%s -out_file=%s",
+                "%s -in_file=%s -payload_signature_file=%s -out_file=%s",
                 delta_generator_path.c_str(),
                 payload_path.c_str(),
-                sig_file.c_str(),
+                sig_files.c_str(),
                 payload_path.c_str())));
   int verify_result = System(base::StringPrintf(
       "%s -in_file=%s -public_key=%s -public_key_version=%d",
@@ -309,7 +294,7 @@
                               bool noop,
                               ssize_t chunk_size,
                               SignatureTest signature_test,
-                              DeltaState *state,
+                              DeltaState* state,
                               uint32_t minor_version) {
   EXPECT_TRUE(utils::MakeTempFile("a_img.XXXXXX", &state->a_img, nullptr));
   EXPECT_TRUE(utils::MakeTempFile("b_img.XXXXXX", &state->b_img, nullptr));
@@ -353,12 +338,13 @@
     brillo::Blob hardtocompress;
     while (hardtocompress.size() < 3 * kBlockSize) {
       hardtocompress.insert(hardtocompress.end(),
-                            std::begin(kRandomString), std::end(kRandomString));
+                            std::begin(kRandomString),
+                            std::end(kRandomString));
     }
-    EXPECT_TRUE(utils::WriteFile(base::StringPrintf("%s/hardtocompress",
-                                                    a_mnt.c_str()).c_str(),
-                                 hardtocompress.data(),
-                                 hardtocompress.size()));
+    EXPECT_TRUE(utils::WriteFile(
+        base::StringPrintf("%s/hardtocompress", a_mnt.c_str()).c_str(),
+        hardtocompress.data(),
+        hardtocompress.size()));
 
     brillo::Blob zeros(16 * 1024, 0);
     EXPECT_EQ(static_cast<int>(zeros.size()),
@@ -367,9 +353,8 @@
                               reinterpret_cast<const char*>(zeros.data()),
                               zeros.size()));
 
-    EXPECT_TRUE(
-        WriteSparseFile(base::StringPrintf("%s/move-from-sparse",
-                                           a_mnt.c_str()), 16 * 1024));
+    EXPECT_TRUE(WriteSparseFile(
+        base::StringPrintf("%s/move-from-sparse", a_mnt.c_str()), 16 * 1024));
 
     EXPECT_TRUE(WriteByteAtOffset(
         base::StringPrintf("%s/move-semi-sparse", a_mnt.c_str()), 4096));
@@ -377,10 +362,10 @@
     // Write 1 MiB of 0xff to try to catch the case where writing a bsdiff
     // patch fails to zero out the final block.
     brillo::Blob ones(1024 * 1024, 0xff);
-    EXPECT_TRUE(utils::WriteFile(base::StringPrintf("%s/ones",
-                                                    a_mnt.c_str()).c_str(),
-                                 ones.data(),
-                                 ones.size()));
+    EXPECT_TRUE(
+        utils::WriteFile(base::StringPrintf("%s/ones", a_mnt.c_str()).c_str(),
+                         ones.data(),
+                         ones.size()));
   }
 
   if (noop) {
@@ -391,9 +376,8 @@
     if (minor_version == kSourceMinorPayloadVersion) {
       // Create a result image with image_size bytes of garbage.
       brillo::Blob ones(state->image_size, 0xff);
-      EXPECT_TRUE(utils::WriteFile(state->result_img.c_str(),
-                                   ones.data(),
-                                   ones.size()));
+      EXPECT_TRUE(utils::WriteFile(
+          state->result_img.c_str(), ones.data(), ones.size()));
       EXPECT_EQ(utils::FileSize(state->a_img),
                 utils::FileSize(state->result_img));
     }
@@ -443,28 +427,26 @@
     brillo::Blob hardtocompress;
     while (hardtocompress.size() < 3 * kBlockSize) {
       hardtocompress.insert(hardtocompress.end(),
-                            std::begin(kRandomString), std::end(kRandomString));
+                            std::begin(kRandomString),
+                            std::end(kRandomString));
     }
-    EXPECT_TRUE(utils::WriteFile(base::StringPrintf("%s/hardtocompress",
-                                              b_mnt.c_str()).c_str(),
-                                 hardtocompress.data(),
-                                 hardtocompress.size()));
+    EXPECT_TRUE(utils::WriteFile(
+        base::StringPrintf("%s/hardtocompress", b_mnt.c_str()).c_str(),
+        hardtocompress.data(),
+        hardtocompress.size()));
   }
 
   string old_kernel;
-  EXPECT_TRUE(utils::MakeTempFile("old_kernel.XXXXXX",
-                                  &state->old_kernel,
-                                  nullptr));
+  EXPECT_TRUE(
+      utils::MakeTempFile("old_kernel.XXXXXX", &state->old_kernel, nullptr));
 
   string new_kernel;
-  EXPECT_TRUE(utils::MakeTempFile("new_kernel.XXXXXX",
-                                  &state->new_kernel,
-                                  nullptr));
+  EXPECT_TRUE(
+      utils::MakeTempFile("new_kernel.XXXXXX", &state->new_kernel, nullptr));
 
   string result_kernel;
-  EXPECT_TRUE(utils::MakeTempFile("result_kernel.XXXXXX",
-                                  &state->result_kernel,
-                                  nullptr));
+  EXPECT_TRUE(utils::MakeTempFile(
+      "result_kernel.XXXXXX", &state->result_kernel, nullptr));
 
   state->kernel_size = kDefaultKernelSize;
   state->old_kernel_data.resize(kDefaultKernelSize);
@@ -475,8 +457,8 @@
   test_utils::FillWithData(&state->result_kernel_data);
 
   // change the new kernel data
-  std::copy(std::begin(kNewData), std::end(kNewData),
-            state->new_kernel_data.begin());
+  std::copy(
+      std::begin(kNewData), std::end(kNewData), state->new_kernel_data.begin());
 
   if (noop) {
     state->old_kernel_data = state->new_kernel_data;
@@ -493,9 +475,7 @@
                                state->result_kernel_data.data(),
                                state->result_kernel_data.size()));
 
-  EXPECT_TRUE(utils::MakeTempFile("delta.XXXXXX",
-                                  &state->delta_path,
-                                  nullptr));
+  EXPECT_TRUE(utils::MakeTempFile("delta.XXXXXX", &state->delta_path, nullptr));
   LOG(INFO) << "delta path: " << state->delta_path;
   {
     const string private_key =
@@ -510,8 +490,8 @@
     payload_config.version.major = kChromeOSMajorPayloadVersion;
     payload_config.version.minor = minor_version;
     if (!full_rootfs) {
-      payload_config.source.partitions.emplace_back(kLegacyPartitionNameRoot);
-      payload_config.source.partitions.emplace_back(kLegacyPartitionNameKernel);
+      payload_config.source.partitions.emplace_back(kPartitionNameRoot);
+      payload_config.source.partitions.emplace_back(kPartitionNameKernel);
       payload_config.source.partitions.front().path = state->a_img;
       if (!full_kernel)
         payload_config.source.partitions.back().path = state->old_kernel;
@@ -524,9 +504,9 @@
         // Use 1 MiB chunk size for the full unittests.
         payload_config.hard_chunk_size = 1024 * 1024;
     }
-    payload_config.target.partitions.emplace_back(kLegacyPartitionNameRoot);
+    payload_config.target.partitions.emplace_back(kPartitionNameRoot);
     payload_config.target.partitions.back().path = state->b_img;
-    payload_config.target.partitions.emplace_back(kLegacyPartitionNameKernel);
+    payload_config.target.partitions.emplace_back(kPartitionNameKernel);
     payload_config.target.partitions.back().path = state->new_kernel;
     payload_config.target.image_info = new_image_info;
     EXPECT_TRUE(payload_config.target.LoadImageSize());
@@ -534,20 +514,18 @@
       EXPECT_TRUE(part.OpenFilesystem());
 
     EXPECT_TRUE(payload_config.Validate());
-    EXPECT_TRUE(
-        GenerateUpdatePayloadFile(
-            payload_config,
-            state->delta_path,
-            private_key,
-            &state->metadata_size));
+    EXPECT_TRUE(GenerateUpdatePayloadFile(
+        payload_config, state->delta_path, private_key, &state->metadata_size));
   }
   // Extend the "partitions" holding the file system a bit.
-  EXPECT_EQ(0, HANDLE_EINTR(truncate(state->a_img.c_str(),
-                                     state->image_size + 1024 * 1024)));
+  EXPECT_EQ(0,
+            HANDLE_EINTR(truncate(state->a_img.c_str(),
+                                  state->image_size + 1024 * 1024)));
   EXPECT_EQ(static_cast<off_t>(state->image_size + 1024 * 1024),
             utils::FileSize(state->a_img));
-  EXPECT_EQ(0, HANDLE_EINTR(truncate(state->b_img.c_str(),
-                                     state->image_size + 1024 * 1024)));
+  EXPECT_EQ(0,
+            HANDLE_EINTR(truncate(state->b_img.c_str(),
+                                  state->image_size + 1024 * 1024)));
   EXPECT_EQ(static_cast<off_t>(state->image_size + 1024 * 1024),
             utils::FileSize(state->b_img));
 
@@ -556,14 +534,14 @@
     int signature_size =
         GetSignatureSize(GetBuildArtifactsPath(kUnittestPrivateKeyPath));
     LOG(INFO) << "Inserting placeholder signature.";
-    ASSERT_TRUE(InsertSignaturePlaceholder(signature_size, state->delta_path,
-                                           &state->metadata_size));
+    ASSERT_TRUE(InsertSignaturePlaceholder(
+        signature_size, state->delta_path, &state->metadata_size));
 
     if (signature_test == kSignatureGeneratedPlaceholderMismatch) {
       signature_size -= 1;
       LOG(INFO) << "Inserting mismatched placeholder signature.";
-      ASSERT_FALSE(InsertSignaturePlaceholder(signature_size, state->delta_path,
-                                              &state->metadata_size));
+      ASSERT_FALSE(InsertSignaturePlaceholder(
+          signature_size, state->delta_path, &state->metadata_size));
       return;
     }
   }
@@ -584,24 +562,25 @@
   }
 }
 
-static void ApplyDeltaFile(bool full_kernel, bool full_rootfs, bool noop,
-                           SignatureTest signature_test, DeltaState* state,
+static void ApplyDeltaFile(bool full_kernel,
+                           bool full_rootfs,
+                           bool noop,
+                           SignatureTest signature_test,
+                           DeltaState* state,
                            bool hash_checks_mandatory,
                            OperationHashTest op_hash_test,
                            DeltaPerformer** performer,
                            uint32_t minor_version) {
   // Check the metadata.
   {
-    DeltaArchiveManifest manifest;
-    EXPECT_TRUE(PayloadSigner::LoadPayloadMetadata(state->delta_path,
-                                                   nullptr,
-                                                   &manifest,
-                                                   nullptr,
-                                                   &state->metadata_size,
-                                                   nullptr));
-    LOG(INFO) << "Metadata size: " << state->metadata_size;
     EXPECT_TRUE(utils::ReadFile(state->delta_path, &state->delta));
+    PayloadMetadata payload_metadata;
+    EXPECT_TRUE(payload_metadata.ParsePayloadHeader(state->delta));
+    state->metadata_size = payload_metadata.GetMetadataSize();
+    LOG(INFO) << "Metadata size: " << state->metadata_size;
 
+    DeltaArchiveManifest manifest;
+    EXPECT_TRUE(payload_metadata.GetManifest(state->delta, &manifest));
     if (signature_test == kSignatureNone) {
       EXPECT_FALSE(manifest.has_signatures_offset());
       EXPECT_FALSE(manifest.has_signatures_size());
@@ -617,7 +596,7 @@
         EXPECT_EQ(2, sigs_message.signatures_size());
       else
         EXPECT_EQ(1, sigs_message.signatures_size());
-      const Signatures_Signature& signature = sigs_message.signatures(0);
+      const Signatures::Signature& signature = sigs_message.signatures(0);
       EXPECT_EQ(1U, signature.version());
 
       uint64_t expected_sig_data_length = 0;
@@ -627,8 +606,7 @@
         key_paths.push_back(GetBuildArtifactsPath(kUnittestPrivateKey2Path));
       }
       EXPECT_TRUE(PayloadSigner::SignatureBlobLength(
-          key_paths,
-          &expected_sig_data_length));
+          key_paths, &expected_sig_data_length));
       EXPECT_EQ(expected_sig_data_length, manifest.signatures_size());
       EXPECT_FALSE(signature.data().empty());
     }
@@ -675,7 +653,6 @@
       }
     }
 
-
     if (full_rootfs) {
       EXPECT_FALSE(manifest.has_old_rootfs_info());
       EXPECT_FALSE(manifest.has_old_image_info());
@@ -693,8 +670,8 @@
   }
 
   MockPrefs prefs;
-  EXPECT_CALL(prefs, SetInt64(kPrefsManifestMetadataSize,
-                              state->metadata_size)).WillOnce(Return(true));
+  EXPECT_CALL(prefs, SetInt64(kPrefsManifestMetadataSize, state->metadata_size))
+      .WillOnce(Return(true));
   EXPECT_CALL(prefs, SetInt64(kPrefsManifestSignatureSize, 0))
       .WillOnce(Return(true));
   EXPECT_CALL(prefs, SetInt64(kPrefsUpdateStateNextOperation, _))
@@ -709,6 +686,8 @@
       .WillRepeatedly(Return(true));
   EXPECT_CALL(prefs, SetString(kPrefsUpdateStateSignedSHA256Context, _))
       .WillRepeatedly(Return(true));
+  EXPECT_CALL(prefs, SetBoolean(kPrefsDynamicPartitionMetadataUpdated, _))
+      .WillRepeatedly(Return(true));
   if (op_hash_test == kValidOperationData && signature_test != kSignatureNone) {
     EXPECT_CALL(prefs, SetString(kPrefsUpdateStateSignatureBlob, _))
         .WillOnce(Return(true));
@@ -728,10 +707,10 @@
   install_plan->target_slot = 1;
 
   InstallPlan::Partition root_part;
-  root_part.name = kLegacyPartitionNameRoot;
+  root_part.name = kPartitionNameRoot;
 
   InstallPlan::Partition kernel_part;
-  kernel_part.name = kLegacyPartitionNameKernel;
+  kernel_part.name = kPartitionNameKernel;
 
   LOG(INFO) << "Setting payload metadata size in Omaha  = "
             << state->metadata_size;
@@ -748,20 +727,16 @@
                                   &state->mock_delegate_,
                                   install_plan,
                                   &install_plan->payloads[0],
-                                  false /* is_interactive */);
+                                  false /* interactive */);
   string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath);
   EXPECT_TRUE(utils::FileExists(public_key_path.c_str()));
   (*performer)->set_public_key_path(public_key_path);
-  DeltaPerformerIntegrationTest::SetSupportedVersion(*performer, minor_version);
 
   EXPECT_EQ(static_cast<off_t>(state->image_size),
             HashCalculator::RawHashOfFile(
-                state->a_img,
-                state->image_size,
-                &root_part.source_hash));
-  EXPECT_TRUE(HashCalculator::RawHashOfData(
-                  state->old_kernel_data,
-                  &kernel_part.source_hash));
+                state->a_img, state->image_size, &root_part.source_hash));
+  EXPECT_TRUE(HashCalculator::RawHashOfData(state->old_kernel_data,
+                                            &kernel_part.source_hash));
 
   // The partitions should be empty before DeltaPerformer.
   install_plan->partitions.clear();
@@ -778,13 +753,13 @@
   }
 
   state->fake_boot_control_.SetPartitionDevice(
-      kLegacyPartitionNameRoot, install_plan->source_slot, state->a_img);
+      kPartitionNameRoot, install_plan->source_slot, state->a_img);
   state->fake_boot_control_.SetPartitionDevice(
-      kLegacyPartitionNameKernel, install_plan->source_slot, state->old_kernel);
+      kPartitionNameKernel, install_plan->source_slot, state->old_kernel);
   state->fake_boot_control_.SetPartitionDevice(
-      kLegacyPartitionNameRoot, install_plan->target_slot, target_root);
+      kPartitionNameRoot, install_plan->target_slot, target_root);
   state->fake_boot_control_.SetPartitionDevice(
-      kLegacyPartitionNameKernel, install_plan->target_slot, target_kernel);
+      kPartitionNameKernel, install_plan->target_slot, target_kernel);
 
   ErrorCode expected_error, actual_error;
   bool continue_writing;
@@ -812,9 +787,8 @@
   const size_t kBytesPerWrite = 5;
   for (size_t i = 0; i < state->delta.size(); i += kBytesPerWrite) {
     size_t count = std::min(state->delta.size() - i, kBytesPerWrite);
-    bool write_succeeded = ((*performer)->Write(&state->delta[i],
-                                                count,
-                                                &actual_error));
+    bool write_succeeded =
+        ((*performer)->Write(&state->delta[i], count, &actual_error));
     // Normally write_succeeded should be true every time and
     // actual_error should be ErrorCode::kSuccess. If so, continue the loop.
     // But if we seeded an operation hash error above, then write_succeeded
@@ -852,9 +826,6 @@
     return;
   }
 
-  int expected_times = (expected_result == ErrorCode::kSuccess) ? 1 : 0;
-  EXPECT_CALL(state->mock_delegate_, DownloadComplete()).Times(expected_times);
-
   LOG(INFO) << "Verifying payload for expected result " << expected_result;
   brillo::Blob expected_hash;
   HashCalculator::RawHashOfData(state->delta, &expected_hash);
@@ -869,28 +840,27 @@
 
   brillo::Blob updated_kernel_partition;
   if (minor_version == kSourceMinorPayloadVersion) {
-    CompareFilesByBlock(state->result_kernel, state->new_kernel,
-                        state->kernel_size);
-    CompareFilesByBlock(state->result_img, state->b_img,
-                        state->image_size);
-    EXPECT_TRUE(utils::ReadFile(state->result_kernel,
-                                &updated_kernel_partition));
+    CompareFilesByBlock(
+        state->result_kernel, state->new_kernel, state->kernel_size);
+    CompareFilesByBlock(state->result_img, state->b_img, state->image_size);
+    EXPECT_TRUE(
+        utils::ReadFile(state->result_kernel, &updated_kernel_partition));
   } else {
-    CompareFilesByBlock(state->old_kernel, state->new_kernel,
-                        state->kernel_size);
-    CompareFilesByBlock(state->a_img, state->b_img,
-                        state->image_size);
+    CompareFilesByBlock(
+        state->old_kernel, state->new_kernel, state->kernel_size);
+    CompareFilesByBlock(state->a_img, state->b_img, state->image_size);
     EXPECT_TRUE(utils::ReadFile(state->old_kernel, &updated_kernel_partition));
   }
 
   ASSERT_GE(updated_kernel_partition.size(), arraysize(kNewData));
-  EXPECT_TRUE(std::equal(std::begin(kNewData), std::end(kNewData),
+  EXPECT_TRUE(std::equal(std::begin(kNewData),
+                         std::end(kNewData),
                          updated_kernel_partition.begin()));
 
   const auto& partitions = state->install_plan.partitions;
   EXPECT_EQ(2U, partitions.size());
-  EXPECT_EQ(kLegacyPartitionNameRoot, partitions[0].name);
-  EXPECT_EQ(kLegacyPartitionNameKernel, partitions[1].name);
+  EXPECT_EQ(kPartitionNameRoot, partitions[0].name);
+  EXPECT_EQ(kPartitionNameKernel, partitions[1].name);
 
   EXPECT_EQ(kDefaultKernelSize, partitions[1].target_size);
   brillo::Blob expected_new_kernel_hash;
@@ -901,9 +871,8 @@
   EXPECT_EQ(state->image_size, partitions[0].target_size);
   brillo::Blob expected_new_rootfs_hash;
   EXPECT_EQ(static_cast<off_t>(state->image_size),
-            HashCalculator::RawHashOfFile(state->b_img,
-                                          state->image_size,
-                                          &expected_new_rootfs_hash));
+            HashCalculator::RawHashOfFile(
+                state->b_img, state->image_size, &expected_new_rootfs_hash));
   EXPECT_EQ(expected_new_rootfs_hash, partitions[0].target_hash);
 }
 
@@ -919,20 +888,29 @@
     case kSignatureGeneratedShellBadKey:
       expected_result = ErrorCode::kDownloadPayloadPubKeyVerificationError;
       break;
-    default: break;  // appease gcc
+    default:
+      break;  // appease gcc
   }
 
   VerifyPayloadResult(performer, state, expected_result, minor_version);
 }
 
-void DoSmallImageTest(bool full_kernel, bool full_rootfs, bool noop,
+void DoSmallImageTest(bool full_kernel,
+                      bool full_rootfs,
+                      bool noop,
                       ssize_t chunk_size,
                       SignatureTest signature_test,
-                      bool hash_checks_mandatory, uint32_t minor_version) {
+                      bool hash_checks_mandatory,
+                      uint32_t minor_version) {
   DeltaState state;
-  DeltaPerformer *performer = nullptr;
-  GenerateDeltaFile(full_kernel, full_rootfs, noop, chunk_size,
-                    signature_test, &state, minor_version);
+  DeltaPerformer* performer = nullptr;
+  GenerateDeltaFile(full_kernel,
+                    full_rootfs,
+                    noop,
+                    chunk_size,
+                    signature_test,
+                    &state,
+                    minor_version);
 
   ScopedPathUnlinker a_img_unlinker(state.a_img);
   ScopedPathUnlinker b_img_unlinker(state.b_img);
@@ -941,9 +919,15 @@
   ScopedPathUnlinker old_kernel_unlinker(state.old_kernel);
   ScopedPathUnlinker new_kernel_unlinker(state.new_kernel);
   ScopedPathUnlinker result_kernel_unlinker(state.result_kernel);
-  ApplyDeltaFile(full_kernel, full_rootfs, noop, signature_test,
-                 &state, hash_checks_mandatory, kValidOperationData,
-                 &performer, minor_version);
+  ApplyDeltaFile(full_kernel,
+                 full_rootfs,
+                 noop,
+                 signature_test,
+                 &state,
+                 hash_checks_mandatory,
+                 kValidOperationData,
+                 &performer,
+                 minor_version);
   VerifyPayload(performer, &state, signature_test, minor_version);
   delete performer;
 }
@@ -952,94 +936,174 @@
                                  bool hash_checks_mandatory) {
   DeltaState state;
   uint64_t minor_version = kFullPayloadMinorVersion;
-  GenerateDeltaFile(true, true, false, -1, kSignatureGenerated, &state,
-                    minor_version);
+  GenerateDeltaFile(
+      true, true, false, -1, kSignatureGenerated, &state, minor_version);
   ScopedPathUnlinker a_img_unlinker(state.a_img);
   ScopedPathUnlinker b_img_unlinker(state.b_img);
   ScopedPathUnlinker delta_unlinker(state.delta_path);
   ScopedPathUnlinker old_kernel_unlinker(state.old_kernel);
   ScopedPathUnlinker new_kernel_unlinker(state.new_kernel);
-  DeltaPerformer *performer = nullptr;
-  ApplyDeltaFile(true, true, false, kSignatureGenerated, &state,
-                 hash_checks_mandatory, op_hash_test, &performer,
+  DeltaPerformer* performer = nullptr;
+  ApplyDeltaFile(true,
+                 true,
+                 false,
+                 kSignatureGenerated,
+                 &state,
+                 hash_checks_mandatory,
+                 op_hash_test,
+                 &performer,
                  minor_version);
   delete performer;
 }
 
-
 TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageTest) {
-  DoSmallImageTest(false, false, false, -1, kSignatureGenerator,
-                   false, kInPlaceMinorPayloadVersion);
+  DoSmallImageTest(false,
+                   false,
+                   false,
+                   -1,
+                   kSignatureGenerator,
+                   false,
+                   kInPlaceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignaturePlaceholderTest) {
-  DoSmallImageTest(false, false, false, -1, kSignatureGeneratedPlaceholder,
-                   false, kInPlaceMinorPayloadVersion);
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootSmallImageSignaturePlaceholderTest) {
+  DoSmallImageTest(false,
+                   false,
+                   false,
+                   -1,
+                   kSignatureGeneratedPlaceholder,
+                   false,
+                   kInPlaceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignaturePlaceholderMismatchTest) {
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootSmallImageSignaturePlaceholderMismatchTest) {
   DeltaState state;
-  GenerateDeltaFile(false, false, false, -1,
-                    kSignatureGeneratedPlaceholderMismatch, &state,
+  GenerateDeltaFile(false,
+                    false,
+                    false,
+                    -1,
+                    kSignatureGeneratedPlaceholderMismatch,
+                    &state,
                     kInPlaceMinorPayloadVersion);
 }
 
 TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageChunksTest) {
-  DoSmallImageTest(false, false, false, kBlockSize, kSignatureGenerator,
-                   false, kInPlaceMinorPayloadVersion);
+  DoSmallImageTest(false,
+                   false,
+                   false,
+                   kBlockSize,
+                   kSignatureGenerator,
+                   false,
+                   kInPlaceMinorPayloadVersion);
 }
 
 TEST(DeltaPerformerIntegrationTest, RunAsRootFullKernelSmallImageTest) {
-  DoSmallImageTest(true, false, false, -1, kSignatureGenerator,
-                   false, kInPlaceMinorPayloadVersion);
+  DoSmallImageTest(true,
+                   false,
+                   false,
+                   -1,
+                   kSignatureGenerator,
+                   false,
+                   kInPlaceMinorPayloadVersion);
 }
 
 TEST(DeltaPerformerIntegrationTest, RunAsRootFullSmallImageTest) {
-  DoSmallImageTest(true, true, false, -1, kSignatureGenerator,
-                   true, kFullPayloadMinorVersion);
+  DoSmallImageTest(true,
+                   true,
+                   false,
+                   -1,
+                   kSignatureGenerator,
+                   true,
+                   kFullPayloadMinorVersion);
 }
 
 TEST(DeltaPerformerIntegrationTest, RunAsRootNoopSmallImageTest) {
-  DoSmallImageTest(false, false, true, -1, kSignatureGenerator,
-                   false, kInPlaceMinorPayloadVersion);
+  DoSmallImageTest(false,
+                   false,
+                   true,
+                   -1,
+                   kSignatureGenerator,
+                   false,
+                   kInPlaceMinorPayloadVersion);
 }
 
 TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignNoneTest) {
-  DoSmallImageTest(false, false, false, -1, kSignatureNone,
-                   false, kInPlaceMinorPayloadVersion);
+  DoSmallImageTest(false,
+                   false,
+                   false,
+                   -1,
+                   kSignatureNone,
+                   false,
+                   kInPlaceMinorPayloadVersion);
 }
 
 TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedTest) {
-  DoSmallImageTest(false, false, false, -1, kSignatureGenerated,
-                   true, kInPlaceMinorPayloadVersion);
+  DoSmallImageTest(false,
+                   false,
+                   false,
+                   -1,
+                   kSignatureGenerated,
+                   true,
+                   kInPlaceMinorPayloadVersion);
 }
 
 TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellTest) {
-  DoSmallImageTest(false, false, false, -1, kSignatureGeneratedShell,
-                   false, kInPlaceMinorPayloadVersion);
+  DoSmallImageTest(false,
+                   false,
+                   false,
+                   -1,
+                   kSignatureGeneratedShell,
+                   false,
+                   kInPlaceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellBadKeyTest) {
-  DoSmallImageTest(false, false, false, -1, kSignatureGeneratedShellBadKey,
-                   false, kInPlaceMinorPayloadVersion);
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootSmallImageSignGeneratedShellBadKeyTest) {
+  DoSmallImageTest(false,
+                   false,
+                   false,
+                   -1,
+                   kSignatureGeneratedShellBadKey,
+                   false,
+                   kInPlaceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellRotateCl1Test) {
-  DoSmallImageTest(false, false, false, -1, kSignatureGeneratedShellRotateCl1,
-                   false, kInPlaceMinorPayloadVersion);
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootSmallImageSignGeneratedShellRotateCl1Test) {
+  DoSmallImageTest(false,
+                   false,
+                   false,
+                   -1,
+                   kSignatureGeneratedShellRotateCl1,
+                   false,
+                   kInPlaceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellRotateCl2Test) {
-  DoSmallImageTest(false, false, false, -1, kSignatureGeneratedShellRotateCl2,
-                   false, kInPlaceMinorPayloadVersion);
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootSmallImageSignGeneratedShellRotateCl2Test) {
+  DoSmallImageTest(false,
+                   false,
+                   false,
+                   -1,
+                   kSignatureGeneratedShellRotateCl2,
+                   false,
+                   kInPlaceMinorPayloadVersion);
 }
 
 TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSourceOpsTest) {
-  DoSmallImageTest(false, false, false, -1, kSignatureGenerator,
-                   false, kSourceMinorPayloadVersion);
+  DoSmallImageTest(false,
+                   false,
+                   false,
+                   -1,
+                   kSignatureGenerator,
+                   false,
+                   kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootMandatoryOperationHashMismatchTest) {
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootMandatoryOperationHashMismatchTest) {
   DoOperationHashMismatchTest(kInvalidOperationData, true);
 }
 
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index 88df98a..b7a38cc 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -18,7 +18,9 @@
 
 #include <endian.h>
 #include <inttypes.h>
+#include <time.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -38,6 +40,7 @@
 #include "update_engine/common/fake_prefs.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/fake_file_descriptor.h"
 #include "update_engine/payload_consumer/mock_download_action.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/bzip.h"
@@ -51,8 +54,8 @@
 using std::string;
 using std::vector;
 using test_utils::GetBuildArtifactsPath;
-using test_utils::System;
 using test_utils::kRandomString;
+using test_utils::System;
 using testing::_;
 
 extern const char* kUnittestPrivateKeyPath;
@@ -78,15 +81,19 @@
 };
 
 // Compressed data without checksum, generated with:
-// echo -n a | xz -9 --check=none | hexdump -v -e '"    " 12/1 "0x%02x, " "\n"'
+// echo -n "a$(head -c 4095 /dev/zero)" | xz -9 --check=none |
+//     hexdump -v -e '"    " 12/1 "0x%02x, " "\n"'
 const uint8_t kXzCompressedData[] = {
     0xfd, 0x37, 0x7a, 0x58, 0x5a, 0x00, 0x00, 0x00, 0xff, 0x12, 0xd9, 0x41,
     0x02, 0x00, 0x21, 0x01, 0x1c, 0x00, 0x00, 0x00, 0x10, 0xcf, 0x58, 0xcc,
-    0x01, 0x00, 0x00, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x11, 0x01,
-    0xad, 0xa6, 0x58, 0x04, 0x06, 0x72, 0x9e, 0x7a, 0x01, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x59, 0x5a,
+    0xe0, 0x0f, 0xff, 0x00, 0x1b, 0x5d, 0x00, 0x30, 0x80, 0x33, 0xff, 0xdf,
+    0xff, 0x51, 0xd6, 0xaf, 0x90, 0x1c, 0x1b, 0x4c, 0xaa, 0x3d, 0x7b, 0x28,
+    0xe4, 0x7a, 0x74, 0xbc, 0xe5, 0xa7, 0x33, 0x4e, 0xcf, 0x00, 0x00, 0x00,
+    0x00, 0x01, 0x2f, 0x80, 0x20, 0x00, 0x00, 0x00, 0x92, 0x7c, 0x7b, 0x24,
+    0xa8, 0x00, 0x0a, 0xfc, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x5a,
 };
 
+// clang-format off
 const uint8_t src_deflates[] = {
   /* raw      0  */ 0x11, 0x22,
   /* deflate  2  */ 0x63, 0x64, 0x62, 0x66, 0x61, 0x05, 0x00,
@@ -103,6 +110,7 @@
   /* deflate  9  */ 0x01, 0x05, 0x00, 0xFA, 0xFF, 0x01, 0x02, 0x03, 0x04, 0x05,
   /* deflate  19 */ 0x63, 0x04, 0x00
 };
+// clang-format on
 
 // To generate this patch either:
 // - Use puffin/src/patching_unittest.cc:TestPatching
@@ -118,29 +126,29 @@
 //   --patch_file=patch.bin
 // * hexdump -ve '"  " 12/1 "0x%02x, " "\n"' patch.bin
 const uint8_t puffdiff_patch[] = {
-  0x50, 0x55, 0x46, 0x31, 0x00, 0x00, 0x00, 0x51, 0x08, 0x01, 0x12, 0x27,
-  0x0A, 0x04, 0x08, 0x10, 0x10, 0x32, 0x0A, 0x04, 0x08, 0x50, 0x10, 0x0A,
-  0x0A, 0x04, 0x08, 0x60, 0x10, 0x12, 0x12, 0x04, 0x08, 0x10, 0x10, 0x58,
-  0x12, 0x04, 0x08, 0x78, 0x10, 0x28, 0x12, 0x05, 0x08, 0xA8, 0x01, 0x10,
-  0x38, 0x18, 0x1F, 0x1A, 0x24, 0x0A, 0x02, 0x10, 0x32, 0x0A, 0x04, 0x08,
-  0x48, 0x10, 0x50, 0x0A, 0x05, 0x08, 0x98, 0x01, 0x10, 0x12, 0x12, 0x02,
-  0x10, 0x58, 0x12, 0x04, 0x08, 0x70, 0x10, 0x58, 0x12, 0x05, 0x08, 0xC8,
-  0x01, 0x10, 0x38, 0x18, 0x21, 0x42, 0x53, 0x44, 0x49, 0x46, 0x46, 0x34,
-  0x30, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00,
-  0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-  0x00, 0x42, 0x5A, 0x68, 0x39, 0x31, 0x41, 0x59, 0x26, 0x53, 0x59, 0x65,
-  0x29, 0x8C, 0x9B, 0x00, 0x00, 0x03, 0x60, 0x40, 0x7A, 0x0E, 0x08, 0x00,
-  0x40, 0x00, 0x20, 0x00, 0x21, 0x22, 0x9A, 0x3D, 0x4F, 0x50, 0x40, 0x0C,
-  0x3B, 0xC7, 0x9B, 0xB2, 0x21, 0x0E, 0xE9, 0x15, 0x98, 0x7A, 0x7C, 0x5D,
-  0xC9, 0x14, 0xE1, 0x42, 0x41, 0x94, 0xA6, 0x32, 0x6C, 0x42, 0x5A, 0x68,
-  0x39, 0x31, 0x41, 0x59, 0x26, 0x53, 0x59, 0xF1, 0x20, 0x5F, 0x0D, 0x00,
-  0x00, 0x02, 0x41, 0x15, 0x42, 0x08, 0x20, 0x00, 0x40, 0x00, 0x00, 0x02,
-  0x40, 0x00, 0x20, 0x00, 0x22, 0x3D, 0x23, 0x10, 0x86, 0x03, 0x96, 0x54,
-  0x11, 0x16, 0x5F, 0x17, 0x72, 0x45, 0x38, 0x50, 0x90, 0xF1, 0x20, 0x5F,
-  0x0D, 0x42, 0x5A, 0x68, 0x39, 0x31, 0x41, 0x59, 0x26, 0x53, 0x59, 0x07,
-  0xD4, 0xCB, 0x6E, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x20, 0x00,
-  0x21, 0x18, 0x46, 0x82, 0xEE, 0x48, 0xA7, 0x0A, 0x12, 0x00, 0xFA, 0x99,
-  0x6D, 0xC0};
+    0x50, 0x55, 0x46, 0x31, 0x00, 0x00, 0x00, 0x51, 0x08, 0x01, 0x12, 0x27,
+    0x0A, 0x04, 0x08, 0x10, 0x10, 0x32, 0x0A, 0x04, 0x08, 0x50, 0x10, 0x0A,
+    0x0A, 0x04, 0x08, 0x60, 0x10, 0x12, 0x12, 0x04, 0x08, 0x10, 0x10, 0x58,
+    0x12, 0x04, 0x08, 0x78, 0x10, 0x28, 0x12, 0x05, 0x08, 0xA8, 0x01, 0x10,
+    0x38, 0x18, 0x1F, 0x1A, 0x24, 0x0A, 0x02, 0x10, 0x32, 0x0A, 0x04, 0x08,
+    0x48, 0x10, 0x50, 0x0A, 0x05, 0x08, 0x98, 0x01, 0x10, 0x12, 0x12, 0x02,
+    0x10, 0x58, 0x12, 0x04, 0x08, 0x70, 0x10, 0x58, 0x12, 0x05, 0x08, 0xC8,
+    0x01, 0x10, 0x38, 0x18, 0x21, 0x42, 0x53, 0x44, 0x49, 0x46, 0x46, 0x34,
+    0x30, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x42, 0x5A, 0x68, 0x39, 0x31, 0x41, 0x59, 0x26, 0x53, 0x59, 0x65,
+    0x29, 0x8C, 0x9B, 0x00, 0x00, 0x03, 0x60, 0x40, 0x7A, 0x0E, 0x08, 0x00,
+    0x40, 0x00, 0x20, 0x00, 0x21, 0x22, 0x9A, 0x3D, 0x4F, 0x50, 0x40, 0x0C,
+    0x3B, 0xC7, 0x9B, 0xB2, 0x21, 0x0E, 0xE9, 0x15, 0x98, 0x7A, 0x7C, 0x5D,
+    0xC9, 0x14, 0xE1, 0x42, 0x41, 0x94, 0xA6, 0x32, 0x6C, 0x42, 0x5A, 0x68,
+    0x39, 0x31, 0x41, 0x59, 0x26, 0x53, 0x59, 0xF1, 0x20, 0x5F, 0x0D, 0x00,
+    0x00, 0x02, 0x41, 0x15, 0x42, 0x08, 0x20, 0x00, 0x40, 0x00, 0x00, 0x02,
+    0x40, 0x00, 0x20, 0x00, 0x22, 0x3D, 0x23, 0x10, 0x86, 0x03, 0x96, 0x54,
+    0x11, 0x16, 0x5F, 0x17, 0x72, 0x45, 0x38, 0x50, 0x90, 0xF1, 0x20, 0x5F,
+    0x0D, 0x42, 0x5A, 0x68, 0x39, 0x31, 0x41, 0x59, 0x26, 0x53, 0x59, 0x07,
+    0xD4, 0xCB, 0x6E, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x20, 0x00,
+    0x21, 0x18, 0x46, 0x82, 0xEE, 0x48, 0xA7, 0x0A, 0x12, 0x00, 0xFA, 0x99,
+    0x6D, 0xC0};
 
 }  // namespace
 
@@ -169,23 +177,24 @@
 
   brillo::Blob GeneratePayload(const brillo::Blob& blob_data,
                                const vector<AnnotatedOperation>& aops,
-                               bool sign_payload) {
-    return GeneratePayload(blob_data, aops, sign_payload,
-                           DeltaPerformer::kSupportedMajorPayloadVersion,
-                           DeltaPerformer::kSupportedMinorPayloadVersion);
+                               bool sign_payload,
+                               PartitionConfig* old_part = nullptr) {
+    return GeneratePayload(blob_data,
+                           aops,
+                           sign_payload,
+                           kMaxSupportedMajorPayloadVersion,
+                           kMaxSupportedMinorPayloadVersion,
+                           old_part);
   }
 
   brillo::Blob GeneratePayload(const brillo::Blob& blob_data,
                                const vector<AnnotatedOperation>& aops,
                                bool sign_payload,
                                uint64_t major_version,
-                               uint32_t minor_version) {
-    string blob_path;
-    EXPECT_TRUE(utils::MakeTempFile("Blob-XXXXXX", &blob_path, nullptr));
-    ScopedPathUnlinker blob_unlinker(blob_path);
-    EXPECT_TRUE(utils::WriteFile(blob_path.c_str(),
-                                 blob_data.data(),
-                                 blob_data.size()));
+                               uint32_t minor_version,
+                               PartitionConfig* old_part = nullptr) {
+    test_utils::ScopedTempFile blob_file("Blob-XXXXXX");
+    EXPECT_TRUE(test_utils::WriteFileVector(blob_file.path(), blob_data));
 
     PayloadGenerationConfig config;
     config.version.major = major_version;
@@ -194,46 +203,70 @@
     PayloadFile payload;
     EXPECT_TRUE(payload.Init(config));
 
-    PartitionConfig old_part(kLegacyPartitionNameRoot);
+    std::unique_ptr<PartitionConfig> old_part_uptr;
+    if (!old_part) {
+      old_part_uptr = std::make_unique<PartitionConfig>(kPartitionNameRoot);
+      old_part = old_part_uptr.get();
+    }
     if (minor_version != kFullPayloadMinorVersion) {
       // When generating a delta payload we need to include the old partition
       // information to mark it as a delta payload.
-      old_part.path = "/dev/null";
-      old_part.size = 0;
+      if (old_part->path.empty()) {
+        old_part->path = "/dev/null";
+      }
     }
-    PartitionConfig new_part(kLegacyPartitionNameRoot);
+    PartitionConfig new_part(kPartitionNameRoot);
     new_part.path = "/dev/zero";
     new_part.size = 1234;
 
-    payload.AddPartition(old_part, new_part, aops);
+    payload.AddPartition(*old_part, new_part, aops);
 
     // We include a kernel partition without operations.
-    old_part.name = kLegacyPartitionNameKernel;
-    new_part.name = kLegacyPartitionNameKernel;
+    old_part->name = kPartitionNameKernel;
+    new_part.name = kPartitionNameKernel;
     new_part.size = 0;
-    payload.AddPartition(old_part, new_part, {});
+    payload.AddPartition(*old_part, new_part, {});
 
-    string payload_path;
-    EXPECT_TRUE(utils::MakeTempFile("Payload-XXXXXX", &payload_path, nullptr));
-    ScopedPathUnlinker payload_unlinker(payload_path);
+    test_utils::ScopedTempFile payload_file("Payload-XXXXXX");
     string private_key =
         sign_payload ? GetBuildArtifactsPath(kUnittestPrivateKeyPath) : "";
-    EXPECT_TRUE(payload.WritePayload(
-        payload_path, blob_path, private_key, &payload_.metadata_size));
+    EXPECT_TRUE(payload.WritePayload(payload_file.path(),
+                                     blob_file.path(),
+                                     private_key,
+                                     &payload_.metadata_size));
 
     brillo::Blob payload_data;
-    EXPECT_TRUE(utils::ReadFile(payload_path, &payload_data));
+    EXPECT_TRUE(utils::ReadFile(payload_file.path(), &payload_data));
     return payload_data;
   }
 
+  brillo::Blob GenerateSourceCopyPayload(const brillo::Blob& copied_data,
+                                         bool add_hash,
+                                         PartitionConfig* old_part = nullptr) {
+    PayloadGenerationConfig config;
+    const uint64_t kDefaultBlockSize = config.block_size;
+    EXPECT_EQ(0U, copied_data.size() % kDefaultBlockSize);
+    uint64_t num_blocks = copied_data.size() / kDefaultBlockSize;
+    AnnotatedOperation aop;
+    *(aop.op.add_src_extents()) = ExtentForRange(0, num_blocks);
+    *(aop.op.add_dst_extents()) = ExtentForRange(0, num_blocks);
+    aop.op.set_type(InstallOperation::SOURCE_COPY);
+    brillo::Blob src_hash;
+    EXPECT_TRUE(HashCalculator::RawHashOfData(copied_data, &src_hash));
+    if (add_hash)
+      aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
+
+    return GeneratePayload(brillo::Blob(), {aop}, false, old_part);
+  }
+
   // Apply |payload_data| on partition specified in |source_path|.
   // Expect result of performer_.Write() to be |expect_success|.
   // Returns the result of the payload application.
   brillo::Blob ApplyPayload(const brillo::Blob& payload_data,
                             const string& source_path,
                             bool expect_success) {
-    return ApplyPayloadToData(payload_data, source_path, brillo::Blob(),
-                              expect_success);
+    return ApplyPayloadToData(
+        payload_data, source_path, brillo::Blob(), expect_success);
   }
 
   // Apply the payload provided in |payload_data| reading from the |source_path|
@@ -245,29 +278,26 @@
                                   const string& source_path,
                                   const brillo::Blob& target_data,
                                   bool expect_success) {
-    string new_part;
-    EXPECT_TRUE(utils::MakeTempFile("Partition-XXXXXX", &new_part, nullptr));
-    ScopedPathUnlinker partition_unlinker(new_part);
-    EXPECT_TRUE(utils::WriteFile(new_part.c_str(), target_data.data(),
-                                 target_data.size()));
+    test_utils::ScopedTempFile new_part("Partition-XXXXXX");
+    EXPECT_TRUE(test_utils::WriteFileVector(new_part.path(), target_data));
 
     // We installed the operations only in the rootfs partition, but the
     // delta performer needs to access all the partitions.
     fake_boot_control_.SetPartitionDevice(
-        kLegacyPartitionNameRoot, install_plan_.target_slot, new_part);
+        kPartitionNameRoot, install_plan_.target_slot, new_part.path());
     fake_boot_control_.SetPartitionDevice(
-        kLegacyPartitionNameRoot, install_plan_.source_slot, source_path);
+        kPartitionNameRoot, install_plan_.source_slot, source_path);
     fake_boot_control_.SetPartitionDevice(
-        kLegacyPartitionNameKernel, install_plan_.target_slot, "/dev/null");
+        kPartitionNameKernel, install_plan_.target_slot, "/dev/null");
     fake_boot_control_.SetPartitionDevice(
-        kLegacyPartitionNameKernel, install_plan_.source_slot, "/dev/null");
+        kPartitionNameKernel, install_plan_.source_slot, "/dev/null");
 
     EXPECT_EQ(expect_success,
               performer_.Write(payload_data.data(), payload_data.size()));
     EXPECT_EQ(0, performer_.Close());
 
     brillo::Blob partition_data;
-    EXPECT_TRUE(utils::ReadFile(new_part, &partition_data));
+    EXPECT_TRUE(utils::ReadFile(new_part.path(), &partition_data));
     return partition_data;
   }
 
@@ -310,8 +340,10 @@
                                bool hash_checks_mandatory) {
     // Loads the payload and parses the manifest.
     brillo::Blob payload = GeneratePayload(brillo::Blob(),
-        vector<AnnotatedOperation>(), sign_payload,
-        kChromeOSMajorPayloadVersion, kFullPayloadMinorVersion);
+                                           vector<AnnotatedOperation>(),
+                                           sign_payload,
+                                           kChromeOSMajorPayloadVersion,
+                                           kFullPayloadMinorVersion);
 
     LOG(INFO) << "Payload size: " << payload.size();
 
@@ -375,28 +407,43 @@
     EXPECT_EQ(payload_.metadata_size, performer_.metadata_size_);
   }
 
-  void SetSupportedMajorVersion(uint64_t major_version) {
-    performer_.supported_major_version_ = major_version;
+  // Helper function to pretend that the ECC file descriptor was already opened.
+  // Returns a pointer to the created file descriptor.
+  FakeFileDescriptor* SetFakeECCFile(size_t size) {
+    EXPECT_FALSE(performer_.source_ecc_fd_) << "source_ecc_fd_ already open.";
+    FakeFileDescriptor* ret = new FakeFileDescriptor();
+    fake_ecc_fd_.reset(ret);
+    // Call open to simulate it was already opened.
+    ret->Open("", 0);
+    ret->SetFileSize(size);
+    performer_.source_ecc_fd_ = fake_ecc_fd_;
+    return ret;
   }
+
+  uint64_t GetSourceEccRecoveredFailures() const {
+    return performer_.source_ecc_recovered_failures_;
+  }
+
   FakePrefs prefs_;
   InstallPlan install_plan_;
   InstallPlan::Payload payload_;
   FakeBootControl fake_boot_control_;
   FakeHardware fake_hardware_;
   MockDownloadActionDelegate mock_delegate_;
+  FileDescriptorPtr fake_ecc_fd_;
   DeltaPerformer performer_{&prefs_,
                             &fake_boot_control_,
                             &fake_hardware_,
                             &mock_delegate_,
                             &install_plan_,
                             &payload_,
-                            false /* is_interactive*/};
+                            false /* interactive*/};
 };
 
 TEST_F(DeltaPerformerTest, FullPayloadWriteTest) {
   payload_.type = InstallPayloadType::kFull;
-  brillo::Blob expected_data = brillo::Blob(std::begin(kRandomString),
-                                            std::end(kRandomString));
+  brillo::Blob expected_data =
+      brillo::Blob(std::begin(kRandomString), std::end(kRandomString));
   expected_data.resize(4096);  // block size
   vector<AnnotatedOperation> aops;
   AnnotatedOperation aop;
@@ -406,16 +453,19 @@
   aop.op.set_type(InstallOperation::REPLACE);
   aops.push_back(aop);
 
-  brillo::Blob payload_data = GeneratePayload(expected_data, aops, false,
-      kChromeOSMajorPayloadVersion, kFullPayloadMinorVersion);
+  brillo::Blob payload_data = GeneratePayload(expected_data,
+                                              aops,
+                                              false,
+                                              kChromeOSMajorPayloadVersion,
+                                              kFullPayloadMinorVersion);
 
   EXPECT_EQ(expected_data, ApplyPayload(payload_data, "/dev/null", true));
 }
 
 TEST_F(DeltaPerformerTest, ShouldCancelTest) {
   payload_.type = InstallPayloadType::kFull;
-  brillo::Blob expected_data = brillo::Blob(std::begin(kRandomString),
-                                            std::end(kRandomString));
+  brillo::Blob expected_data =
+      brillo::Blob(std::begin(kRandomString), std::end(kRandomString));
   expected_data.resize(4096);  // block size
   vector<AnnotatedOperation> aops;
   AnnotatedOperation aop;
@@ -425,21 +475,23 @@
   aop.op.set_type(InstallOperation::REPLACE);
   aops.push_back(aop);
 
-  brillo::Blob payload_data = GeneratePayload(expected_data, aops, false,
-      kChromeOSMajorPayloadVersion, kFullPayloadMinorVersion);
+  brillo::Blob payload_data = GeneratePayload(expected_data,
+                                              aops,
+                                              false,
+                                              kChromeOSMajorPayloadVersion,
+                                              kFullPayloadMinorVersion);
 
   testing::Mock::VerifyAndClearExpectations(&mock_delegate_);
   EXPECT_CALL(mock_delegate_, ShouldCancel(_))
-      .WillOnce(
-          testing::DoAll(testing::SetArgPointee<0>(ErrorCode::kError),
-                         testing::Return(true)));
+      .WillOnce(testing::DoAll(testing::SetArgPointee<0>(ErrorCode::kError),
+                               testing::Return(true)));
 
   ApplyPayload(payload_data, "/dev/null", false);
 }
 
 TEST_F(DeltaPerformerTest, ReplaceOperationTest) {
-  brillo::Blob expected_data = brillo::Blob(std::begin(kRandomString),
-                                            std::end(kRandomString));
+  brillo::Blob expected_data =
+      brillo::Blob(std::begin(kRandomString), std::end(kRandomString));
   expected_data.resize(4096);  // block size
   vector<AnnotatedOperation> aops;
   AnnotatedOperation aop;
@@ -455,8 +507,8 @@
 }
 
 TEST_F(DeltaPerformerTest, ReplaceBzOperationTest) {
-  brillo::Blob expected_data = brillo::Blob(std::begin(kRandomString),
-                                            std::end(kRandomString));
+  brillo::Blob expected_data =
+      brillo::Blob(std::begin(kRandomString), std::end(kRandomString));
   expected_data.resize(4096);  // block size
   brillo::Blob bz_data;
   EXPECT_TRUE(BzipCompress(expected_data, &bz_data));
@@ -476,9 +528,9 @@
 
 TEST_F(DeltaPerformerTest, ReplaceXzOperationTest) {
   brillo::Blob xz_data(std::begin(kXzCompressedData),
-                         std::end(kXzCompressedData));
-  // The compressed xz data contains only a single "a", but the operation should
-  // pad the rest of the two blocks with zeros.
+                       std::end(kXzCompressedData));
+  // The compressed xz data contains a single "a" and padded with zero for the
+  // rest of the block.
   brillo::Blob expected_data = brillo::Blob(4096, 0);
   expected_data[0] = 'a';
 
@@ -499,10 +551,10 @@
   brillo::Blob expected_data = existing_data;
   // Blocks 4, 5 and 7 should have zeros instead of 'a' after the operation is
   // applied.
-  std::fill(expected_data.data() + 4096 * 4, expected_data.data() + 4096 * 6,
-            0);
-  std::fill(expected_data.data() + 4096 * 7, expected_data.data() + 4096 * 8,
-            0);
+  std::fill(
+      expected_data.data() + 4096 * 4, expected_data.data() + 4096 * 6, 0);
+  std::fill(
+      expected_data.data() + 4096 * 7, expected_data.data() + 4096 * 8, 0);
 
   AnnotatedOperation aop;
   *(aop.op.add_dst_extents()) = ExtentForRange(4, 2);
@@ -528,17 +580,17 @@
   EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash));
   aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
 
-  brillo::Blob payload_data = GeneratePayload(brillo::Blob(), {aop}, false);
+  test_utils::ScopedTempFile source("Source-XXXXXX");
+  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data));
 
-  string source_path;
-  EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX",
-                                  &source_path, nullptr));
-  ScopedPathUnlinker path_unlinker(source_path);
-  EXPECT_TRUE(utils::WriteFile(source_path.c_str(),
-                               expected_data.data(),
-                               expected_data.size()));
+  PartitionConfig old_part(kPartitionNameRoot);
+  old_part.path = source.path();
+  old_part.size = expected_data.size();
 
-  EXPECT_EQ(expected_data, ApplyPayload(payload_data, source_path, true));
+  brillo::Blob payload_data =
+      GeneratePayload(brillo::Blob(), {aop}, false, &old_part);
+
+  EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
 }
 
 TEST_F(DeltaPerformerTest, PuffdiffOperationTest) {
@@ -556,15 +608,18 @@
   EXPECT_TRUE(HashCalculator::RawHashOfData(src, &src_hash));
   aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
 
-  brillo::Blob payload_data = GeneratePayload(puffdiff_payload, {aop}, false);
+  test_utils::ScopedTempFile source("Source-XXXXXX");
+  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), src));
 
-  string source_path;
-  EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
-  ScopedPathUnlinker path_unlinker(source_path);
-  EXPECT_TRUE(utils::WriteFile(source_path.c_str(), src.data(), src.size()));
+  PartitionConfig old_part(kPartitionNameRoot);
+  old_part.path = source.path();
+  old_part.size = src.size();
+
+  brillo::Blob payload_data =
+      GeneratePayload(puffdiff_payload, {aop}, false, &old_part);
 
   brillo::Blob dst(std::begin(dst_deflates), std::end(dst_deflates));
-  EXPECT_EQ(dst, ApplyPayload(payload_data, source_path, true));
+  EXPECT_EQ(dst, ApplyPayload(payload_data, source.path(), true));
 }
 
 TEST_F(DeltaPerformerTest, SourceHashMismatchTest) {
@@ -581,15 +636,106 @@
   EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash));
   aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
 
-  brillo::Blob payload_data = GeneratePayload(brillo::Blob(), {aop}, false);
+  test_utils::ScopedTempFile source("Source-XXXXXX");
+  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), actual_data));
 
-  string source_path;
-  EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
-  ScopedPathUnlinker path_unlinker(source_path);
-  EXPECT_TRUE(utils::WriteFile(source_path.c_str(), actual_data.data(),
-                               actual_data.size()));
+  PartitionConfig old_part(kPartitionNameRoot);
+  old_part.path = source.path();
+  old_part.size = actual_data.size();
 
-  EXPECT_EQ(actual_data, ApplyPayload(payload_data, source_path, false));
+  brillo::Blob payload_data =
+      GeneratePayload(brillo::Blob(), {aop}, false, &old_part);
+
+  EXPECT_EQ(actual_data, ApplyPayload(payload_data, source.path(), false));
+}
+
+// Test that the error-corrected file descriptor is used to read the partition
+// since the source partition doesn't match the operation hash.
+TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyFallbackTest) {
+  constexpr size_t kCopyOperationSize = 4 * 4096;
+  test_utils::ScopedTempFile source("Source-XXXXXX");
+  // Write invalid data to the source image, which doesn't match the expected
+  // hash.
+  brillo::Blob invalid_data(kCopyOperationSize, 0x55);
+  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), invalid_data));
+
+  // Setup the fec file descriptor as the fake stream, which matches
+  // |expected_data|.
+  FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize);
+  brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize);
+
+  PartitionConfig old_part(kPartitionNameRoot);
+  old_part.path = source.path();
+  old_part.size = invalid_data.size();
+
+  brillo::Blob payload_data =
+      GenerateSourceCopyPayload(expected_data, true, &old_part);
+  EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
+  // Verify that the fake_fec was actually used.
+  EXPECT_EQ(1U, fake_fec->GetReadOps().size());
+  EXPECT_EQ(1U, GetSourceEccRecoveredFailures());
+}
+
+// Test that the error-corrected file descriptor is used to read a partition
+// when no hash is available for SOURCE_COPY but it falls back to the normal
+// file descriptor when the size of the error corrected one is too small.
+TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyWhenNoHashFallbackTest) {
+  constexpr size_t kCopyOperationSize = 4 * 4096;
+  test_utils::ScopedTempFile source("Source-XXXXXX");
+  // Setup the source path with the right expected data.
+  brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize);
+  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data));
+
+  // Setup the fec file descriptor as the fake stream, with smaller data than
+  // the expected.
+  FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize / 2);
+
+  PartitionConfig old_part(kPartitionNameRoot);
+  old_part.path = source.path();
+  old_part.size = expected_data.size();
+
+  // The payload operation doesn't include an operation hash.
+  brillo::Blob payload_data =
+      GenerateSourceCopyPayload(expected_data, false, &old_part);
+  EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
+  // Verify that the fake_fec was attempted to be used. Since the file
+  // descriptor is shorter it can actually do more than one read to realize it
+  // reached the EOF.
+  EXPECT_LE(1U, fake_fec->GetReadOps().size());
+  // This fallback doesn't count as an error-corrected operation since the
+  // operation hash was not available.
+  EXPECT_EQ(0U, GetSourceEccRecoveredFailures());
+}
+
+TEST_F(DeltaPerformerTest, ChooseSourceFDTest) {
+  constexpr size_t kSourceSize = 4 * 4096;
+  test_utils::ScopedTempFile source("Source-XXXXXX");
+  // Write invalid data to the source image, which doesn't match the expected
+  // hash.
+  brillo::Blob invalid_data(kSourceSize, 0x55);
+  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), invalid_data));
+
+  performer_.source_fd_ = std::make_shared<EintrSafeFileDescriptor>();
+  performer_.source_fd_->Open(source.path().c_str(), O_RDONLY);
+  performer_.block_size_ = 4096;
+
+  // Setup the fec file descriptor as the fake stream, which matches
+  // |expected_data|.
+  FakeFileDescriptor* fake_fec = SetFakeECCFile(kSourceSize);
+  brillo::Blob expected_data = FakeFileDescriptorData(kSourceSize);
+
+  InstallOperation op;
+  *(op.add_src_extents()) = ExtentForRange(0, kSourceSize / 4096);
+  brillo::Blob src_hash;
+  EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash));
+  op.set_src_sha256_hash(src_hash.data(), src_hash.size());
+
+  ErrorCode error = ErrorCode::kSuccess;
+  EXPECT_EQ(performer_.source_ecc_fd_, performer_.ChooseSourceFD(op, &error));
+  EXPECT_EQ(ErrorCode::kSuccess, error);
+  // Verify that the fake_fec was actually used.
+  EXPECT_EQ(1U, fake_fec->GetReadOps().size());
+  EXPECT_EQ(1U, GetSourceEccRecoveredFailures());
 }
 
 TEST_F(DeltaPerformerTest, ExtentsToByteStringTest) {
@@ -605,10 +751,8 @@
 
   string expected_output = "4096:4096,16384:8192,0:4083";
   string actual_output;
-  EXPECT_TRUE(DeltaPerformer::ExtentsToBsdiffPositionsString(extents,
-                                                             block_size,
-                                                             file_length,
-                                                             &actual_output));
+  EXPECT_TRUE(DeltaPerformer::ExtentsToBsdiffPositionsString(
+      extents, block_size, file_length, &actual_output));
   EXPECT_EQ(expected_output, actual_output);
 }
 
@@ -632,7 +776,22 @@
   manifest.mutable_old_rootfs_info();
   manifest.mutable_new_kernel_info();
   manifest.mutable_new_rootfs_info();
-  manifest.set_minor_version(DeltaPerformer::kSupportedMinorPayloadVersion);
+  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
+
+  RunManifestValidation(manifest,
+                        kChromeOSMajorPayloadVersion,
+                        InstallPayloadType::kDelta,
+                        ErrorCode::kSuccess);
+}
+
+TEST_F(DeltaPerformerTest, ValidateManifestDeltaMinGoodTest) {
+  // The Manifest we are validating.
+  DeltaArchiveManifest manifest;
+  manifest.mutable_old_kernel_info();
+  manifest.mutable_old_rootfs_info();
+  manifest.mutable_new_kernel_info();
+  manifest.mutable_new_rootfs_info();
+  manifest.set_minor_version(kMinSupportedMinorPayloadVersion);
 
   RunManifestValidation(manifest,
                         kChromeOSMajorPayloadVersion,
@@ -645,7 +804,7 @@
   DeltaArchiveManifest manifest;
 
   RunManifestValidation(manifest,
-                        DeltaPerformer::kSupportedMajorPayloadVersion,
+                        kMaxSupportedMajorPayloadVersion,
                         InstallPayloadType::kFull,
                         ErrorCode::kSuccess);
 }
@@ -658,7 +817,7 @@
   manifest.mutable_old_rootfs_info();
 
   RunManifestValidation(manifest,
-                        DeltaPerformer::kSupportedMajorPayloadVersion,
+                        kMaxSupportedMajorPayloadVersion,
                         InstallPayloadType::kDelta,
                         ErrorCode::kUnsupportedMinorPayloadVersion);
 }
@@ -669,7 +828,7 @@
   manifest.mutable_old_kernel_info();
   manifest.mutable_new_kernel_info();
   manifest.mutable_new_rootfs_info();
-  manifest.set_minor_version(DeltaPerformer::kSupportedMinorPayloadVersion);
+  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
 
   RunManifestValidation(manifest,
                         kChromeOSMajorPayloadVersion,
@@ -683,7 +842,7 @@
   manifest.mutable_old_rootfs_info();
   manifest.mutable_new_kernel_info();
   manifest.mutable_new_rootfs_info();
-  manifest.set_minor_version(DeltaPerformer::kSupportedMinorPayloadVersion);
+  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
 
   RunManifestValidation(manifest,
                         kChromeOSMajorPayloadVersion,
@@ -697,7 +856,7 @@
   PartitionUpdate* partition = manifest.add_partitions();
   partition->mutable_old_partition_info();
   partition->mutable_new_partition_info();
-  manifest.set_minor_version(DeltaPerformer::kSupportedMinorPayloadVersion);
+  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
 
   RunManifestValidation(manifest,
                         kBrilloMajorPayloadVersion,
@@ -710,13 +869,12 @@
   DeltaArchiveManifest manifest;
 
   // Generate a bad version number.
-  manifest.set_minor_version(DeltaPerformer::kSupportedMinorPayloadVersion +
-                             10000);
+  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion + 10000);
   // Mark the manifest as a delta payload by setting old_rootfs_info.
   manifest.mutable_old_rootfs_info();
 
   RunManifestValidation(manifest,
-                        DeltaPerformer::kSupportedMajorPayloadVersion,
+                        kMaxSupportedMajorPayloadVersion,
                         InstallPayloadType::kDelta,
                         ErrorCode::kUnsupportedMinorPayloadVersion);
 }
@@ -730,22 +888,23 @@
   fake_hardware_.SetBuildTimestamp(2);
 
   RunManifestValidation(manifest,
-                        DeltaPerformer::kSupportedMajorPayloadVersion,
+                        kMaxSupportedMajorPayloadVersion,
                         InstallPayloadType::kFull,
                         ErrorCode::kPayloadTimestampError);
 }
 
 TEST_F(DeltaPerformerTest, BrilloMetadataSignatureSizeTest) {
+  unsigned int seed = time(nullptr);
   EXPECT_TRUE(performer_.Write(kDeltaMagic, sizeof(kDeltaMagic)));
 
   uint64_t major_version = htobe64(kBrilloMajorPayloadVersion);
   EXPECT_TRUE(performer_.Write(&major_version, 8));
 
-  uint64_t manifest_size = 222;
+  uint64_t manifest_size = rand_r(&seed) % 256;
   uint64_t manifest_size_be = htobe64(manifest_size);
   EXPECT_TRUE(performer_.Write(&manifest_size_be, 8));
 
-  uint32_t metadata_signature_size = 111;
+  uint32_t metadata_signature_size = rand_r(&seed) % 256;
   uint32_t metadata_signature_size_be = htobe32(metadata_signature_size);
   EXPECT_TRUE(performer_.Write(&metadata_signature_size_be, 4));
 
@@ -758,9 +917,8 @@
 }
 
 TEST_F(DeltaPerformerTest, BrilloParsePayloadMetadataTest) {
-  brillo::Blob payload_data = GeneratePayload({}, {}, true,
-                                              kBrilloMajorPayloadVersion,
-                                              kSourceMinorPayloadVersion);
+  brillo::Blob payload_data = GeneratePayload(
+      {}, {}, true, kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion);
   install_plan_.hash_checks_mandatory = true;
   performer_.set_public_key_path(GetBuildArtifactsPath(kUnittestPublicKeyPath));
   ErrorCode error;
@@ -824,8 +982,6 @@
 }
 
 TEST_F(DeltaPerformerTest, UsePublicKeyFromResponse) {
-  base::FilePath key_path;
-
   // The result of the GetPublicKeyResponse() method is based on three things
   //
   //  1. Whether it's an official build; and
@@ -842,70 +998,81 @@
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
   string non_existing_file = temp_dir.GetPath().Append("non-existing").value();
   string existing_file = temp_dir.GetPath().Append("existing").value();
-  EXPECT_EQ(0, System(base::StringPrintf("touch %s", existing_file.c_str())));
+  constexpr char kExistingKey[] = "Existing";
+  ASSERT_TRUE(test_utils::WriteFileString(existing_file, kExistingKey));
 
-  // Non-official build, non-existing public-key, key in response -> true
+  // Non-official build, non-existing public-key, key in response ->
+  // kResponseKey
   fake_hardware_.SetIsOfficialBuild(false);
   performer_.public_key_path_ = non_existing_file;
-  // result of 'echo "Test" | base64'
-  install_plan_.public_key_rsa = "VGVzdAo=";
-  EXPECT_TRUE(performer_.GetPublicKeyFromResponse(&key_path));
-  EXPECT_FALSE(key_path.empty());
-  EXPECT_EQ(unlink(key_path.value().c_str()), 0);
-  // Same with official build -> false
+  // This is the result of 'echo -n "Response" | base64' and is not meant to be
+  // a valid public key, but it is valid base-64.
+  constexpr char kResponseKey[] = "Response";
+  constexpr char kBase64ResponseKey[] = "UmVzcG9uc2U=";
+  install_plan_.public_key_rsa = kBase64ResponseKey;
+  string public_key;
+  EXPECT_TRUE(performer_.GetPublicKey(&public_key));
+  EXPECT_EQ(public_key, kResponseKey);
+  // Same with official build -> no key
   fake_hardware_.SetIsOfficialBuild(true);
-  EXPECT_FALSE(performer_.GetPublicKeyFromResponse(&key_path));
+  EXPECT_TRUE(performer_.GetPublicKey(&public_key));
+  EXPECT_TRUE(public_key.empty());
 
-  // Non-official build, existing public-key, key in response -> false
+  // Non-official build, existing public-key, key in response -> kExistingKey
   fake_hardware_.SetIsOfficialBuild(false);
   performer_.public_key_path_ = existing_file;
-  // result of 'echo "Test" | base64'
-  install_plan_.public_key_rsa = "VGVzdAo=";
-  EXPECT_FALSE(performer_.GetPublicKeyFromResponse(&key_path));
-  // Same with official build -> false
+  install_plan_.public_key_rsa = kBase64ResponseKey;
+  EXPECT_TRUE(performer_.GetPublicKey(&public_key));
+  EXPECT_EQ(public_key, kExistingKey);
+  // Same with official build -> kExistingKey
   fake_hardware_.SetIsOfficialBuild(true);
-  EXPECT_FALSE(performer_.GetPublicKeyFromResponse(&key_path));
+  EXPECT_TRUE(performer_.GetPublicKey(&public_key));
+  EXPECT_EQ(public_key, kExistingKey);
 
-  // Non-official build, non-existing public-key, no key in response -> false
+  // Non-official build, non-existing public-key, no key in response -> no key
   fake_hardware_.SetIsOfficialBuild(false);
   performer_.public_key_path_ = non_existing_file;
   install_plan_.public_key_rsa = "";
-  EXPECT_FALSE(performer_.GetPublicKeyFromResponse(&key_path));
-  // Same with official build -> false
+  EXPECT_TRUE(performer_.GetPublicKey(&public_key));
+  EXPECT_TRUE(public_key.empty());
+  // Same with official build -> no key
   fake_hardware_.SetIsOfficialBuild(true);
-  EXPECT_FALSE(performer_.GetPublicKeyFromResponse(&key_path));
+  EXPECT_TRUE(performer_.GetPublicKey(&public_key));
+  EXPECT_TRUE(public_key.empty());
 
-  // Non-official build, existing public-key, no key in response -> false
+  // Non-official build, existing public-key, no key in response -> kExistingKey
   fake_hardware_.SetIsOfficialBuild(false);
   performer_.public_key_path_ = existing_file;
   install_plan_.public_key_rsa = "";
-  EXPECT_FALSE(performer_.GetPublicKeyFromResponse(&key_path));
-  // Same with official build -> false
+  EXPECT_TRUE(performer_.GetPublicKey(&public_key));
+  EXPECT_EQ(public_key, kExistingKey);
+  // Same with official build -> kExistingKey
   fake_hardware_.SetIsOfficialBuild(true);
-  EXPECT_FALSE(performer_.GetPublicKeyFromResponse(&key_path));
+  EXPECT_TRUE(performer_.GetPublicKey(&public_key));
+  EXPECT_EQ(public_key, kExistingKey);
 
   // Non-official build, non-existing public-key, key in response
   // but invalid base64 -> false
   fake_hardware_.SetIsOfficialBuild(false);
   performer_.public_key_path_ = non_existing_file;
   install_plan_.public_key_rsa = "not-valid-base64";
-  EXPECT_FALSE(performer_.GetPublicKeyFromResponse(&key_path));
+  EXPECT_FALSE(performer_.GetPublicKey(&public_key));
 }
 
 TEST_F(DeltaPerformerTest, ConfVersionsMatch) {
   // Test that the versions in update_engine.conf that is installed to the
-  // image match the supported delta versions in the update engine.
+  // image match the maximum supported delta versions in the update engine.
   uint32_t minor_version;
   brillo::KeyValueStore store;
   EXPECT_TRUE(store.Load(GetBuildArtifactsPath().Append("update_engine.conf")));
   EXPECT_TRUE(utils::GetMinorVersion(store, &minor_version));
-  EXPECT_EQ(DeltaPerformer::kSupportedMinorPayloadVersion, minor_version);
+  EXPECT_EQ(kMaxSupportedMinorPayloadVersion, minor_version);
 
   string major_version_str;
   uint64_t major_version;
   EXPECT_TRUE(store.GetString("PAYLOAD_MAJOR_VERSION", &major_version_str));
   EXPECT_TRUE(base::StringToUint64(major_version_str, &major_version));
-  EXPECT_EQ(DeltaPerformer::kSupportedMajorPayloadVersion, major_version);
+  EXPECT_EQ(kMaxSupportedMajorPayloadVersion, major_version);
 }
 
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/download_action.cc b/payload_consumer/download_action.cc
index f1b6e33..09afc42 100644
--- a/payload_consumer/download_action.cc
+++ b/payload_consumer/download_action.cc
@@ -44,19 +44,21 @@
                                HardwareInterface* hardware,
                                SystemState* system_state,
                                HttpFetcher* http_fetcher,
-                               bool is_interactive)
+                               bool interactive)
     : prefs_(prefs),
       boot_control_(boot_control),
       hardware_(hardware),
       system_state_(system_state),
       http_fetcher_(new MultiRangeHttpFetcher(http_fetcher)),
-      is_interactive_(is_interactive),
+      interactive_(interactive),
       writer_(nullptr),
       code_(ErrorCode::kSuccess),
       delegate_(nullptr),
       p2p_sharing_fd_(-1),
       p2p_visible_(true) {
+#if BASE_VER < 576279
   base::StatisticsRecorder::Initialize();
+#endif
 }
 
 DownloadAction::~DownloadAction() {}
@@ -70,8 +72,7 @@
   }
 
   if (delete_p2p_file) {
-    FilePath path =
-      system_state_->p2p_manager()->FileGetPath(p2p_file_id_);
+    FilePath path = system_state_->p2p_manager()->FileGetPath(p2p_file_id_);
     if (unlink(path.value().c_str()) != 0) {
       PLOG(ERROR) << "Error deleting p2p file " << path.value();
     } else {
@@ -84,7 +85,7 @@
 }
 
 bool DownloadAction::SetupP2PSharingFd() {
-  P2PManager *p2p_manager = system_state_->p2p_manager();
+  P2PManager* p2p_manager = system_state_->p2p_manager();
 
   if (!p2p_manager->FileShare(p2p_file_id_, payload_->size)) {
     LOG(ERROR) << "Unable to share file via p2p";
@@ -145,23 +146,21 @@
   }
   if (p2p_size < file_offset) {
     LOG(ERROR) << "Wanting to write to file offset " << file_offset
-               << " but existing p2p file is only " << p2p_size
-               << " bytes.";
+               << " but existing p2p file is only " << p2p_size << " bytes.";
     CloseP2PSharingFd(true);  // Delete p2p file.
     return;
   }
 
   off_t cur_file_offset = lseek(p2p_sharing_fd_, file_offset, SEEK_SET);
   if (cur_file_offset != static_cast<off_t>(file_offset)) {
-    PLOG(ERROR) << "Error seeking to position "
-                << file_offset << " in p2p file";
+    PLOG(ERROR) << "Error seeking to position " << file_offset
+                << " in p2p file";
     CloseP2PSharingFd(true);  // Delete p2p file.
   } else {
     // OK, seeking worked, now write the data
     ssize_t bytes_written = write(p2p_sharing_fd_, data, length);
     if (bytes_written != static_cast<ssize_t>(length)) {
-      PLOG(ERROR) << "Error writing "
-                  << length << " bytes at file offset "
+      PLOG(ERROR) << "Error writing " << length << " bytes at file offset "
                   << file_offset << " in p2p file";
       CloseP2PSharingFd(true);  // Delete p2p file.
     }
@@ -251,7 +250,7 @@
                                               delegate_,
                                               &install_plan_,
                                               payload_,
-                                              is_interactive_));
+                                              interactive_));
     writer_ = delta_performer_.get();
   }
   if (system_state_ != nullptr) {
@@ -318,7 +317,7 @@
   bytes_received_ = offset;
 }
 
-void DownloadAction::ReceivedBytes(HttpFetcher* fetcher,
+bool DownloadAction::ReceivedBytes(HttpFetcher* fetcher,
                                    const void* bytes,
                                    size_t length) {
   // Note that bytes_received_ is the current offset.
@@ -345,7 +344,7 @@
     // the TransferTerminated callback. Otherwise, this and the HTTP fetcher
     // objects may get destroyed before all callbacks are complete.
     TerminateProcessing();
-    return;
+    return false;
   }
 
   // Call p2p_manager_->FileMakeVisible() when we've successfully
@@ -356,6 +355,7 @@
     system_state_->p2p_manager()->FileMakeVisible(p2p_file_id_);
     p2p_visible_ = true;
   }
+  return true;
 }
 
 void DownloadAction::TransferComplete(HttpFetcher* fetcher, bool successful) {
@@ -374,7 +374,7 @@
       code = delta_performer_->VerifyPayload(payload_->hash, payload_->size);
     if (code == ErrorCode::kSuccess) {
       if (payload_ < &install_plan_.payloads.back() &&
-                 system_state_->payload_state()->NextPayload()) {
+          system_state_->payload_state()->NextPayload()) {
         LOG(INFO) << "Incrementing to next payload";
         // No need to reset if this payload was already applied.
         if (delta_performer_ && !payload_->already_applied)
@@ -387,11 +387,16 @@
         StartDownloading();
         return;
       }
+
+      // All payloads have been applied and verified.
+      if (delegate_)
+        delegate_->DownloadComplete();
+
       // Log UpdateEngine.DownloadAction.* histograms to help diagnose
-      // long-blocking oeprations.
+      // long-blocking operations.
       std::string histogram_output;
-      base::StatisticsRecorder::WriteGraph(
-          "UpdateEngine.DownloadAction.", &histogram_output);
+      base::StatisticsRecorder::WriteGraph("UpdateEngine.DownloadAction.",
+                                           &histogram_output);
       LOG(INFO) << histogram_output;
     } else {
       LOG(ERROR) << "Download of " << install_plan_.download_url
diff --git a/payload_consumer/download_action.h b/payload_consumer/download_action.h
index 81d7333..1777e22 100644
--- a/payload_consumer/download_action.h
+++ b/payload_consumer/download_action.h
@@ -64,8 +64,7 @@
 
 class PrefsInterface;
 
-class DownloadAction : public InstallPlanAction,
-                       public HttpFetcherDelegate {
+class DownloadAction : public InstallPlanAction, public HttpFetcherDelegate {
  public:
   // Debugging/logging
   static std::string StaticType() { return "DownloadAction"; }
@@ -79,7 +78,7 @@
                  HardwareInterface* hardware,
                  SystemState* system_state,
                  HttpFetcher* http_fetcher,
-                 bool is_interactive);
+                 bool interactive);
   ~DownloadAction() override;
 
   // InstallPlanAction overrides.
@@ -90,23 +89,20 @@
   std::string Type() const override { return StaticType(); }
 
   // Testing
-  void SetTestFileWriter(FileWriter* writer) {
-    writer_ = writer;
-  }
+  void SetTestFileWriter(FileWriter* writer) { writer_ = writer; }
 
   int GetHTTPResponseCode() { return http_fetcher_->http_response_code(); }
 
   // HttpFetcherDelegate methods (see http_fetcher.h)
-  void ReceivedBytes(HttpFetcher* fetcher,
-                     const void* bytes, size_t length) override;
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* bytes,
+                     size_t length) override;
   void SeekToOffset(off_t offset) override;
   void TransferComplete(HttpFetcher* fetcher, bool successful) override;
   void TransferTerminated(HttpFetcher* fetcher) override;
 
   DownloadActionDelegate* delegate() const { return delegate_; }
-  void set_delegate(DownloadActionDelegate* delegate) {
-    delegate_ = delegate;
-  }
+  void set_delegate(DownloadActionDelegate* delegate) { delegate_ = delegate; }
 
   void set_base_offset(int64_t base_offset) { base_offset_ = base_offset; }
 
@@ -158,7 +154,7 @@
   // If |true|, the update is user initiated (vs. periodic update checks). Hence
   // the |delta_performer_| can decide not to use O_DSYNC flag for faster
   // update.
-  bool is_interactive_;
+  bool interactive_;
 
   // The FileWriter that downloaded data should be written to. It will
   // either point to *decompressing_file_writer_ or *delta_performer_.
diff --git a/payload_consumer/download_action_unittest.cc b/payload_consumer/download_action_unittest.cc
index 7ec7e0e..e6ca219 100644
--- a/payload_consumer/download_action_unittest.cc
+++ b/payload_consumer/download_action_unittest.cc
@@ -52,21 +52,20 @@
 using std::string;
 using std::unique_ptr;
 using test_utils::ScopedTempFile;
+using testing::_;
 using testing::AtLeast;
 using testing::InSequence;
 using testing::Return;
 using testing::SetArgPointee;
-using testing::_;
 
-class DownloadActionTest : public ::testing::Test { };
+class DownloadActionTest : public ::testing::Test {};
 
 namespace {
 
 class DownloadActionTestProcessorDelegate : public ActionProcessorDelegate {
  public:
-  explicit DownloadActionTestProcessorDelegate(ErrorCode expected_code)
-      : processing_done_called_(false),
-        expected_code_(expected_code) {}
+  DownloadActionTestProcessorDelegate()
+      : processing_done_called_(false), expected_code_(ErrorCode::kSuccess) {}
   ~DownloadActionTestProcessorDelegate() override {
     EXPECT_TRUE(processing_done_called_);
   }
@@ -90,6 +89,7 @@
     const string type = action->Type();
     if (type == DownloadAction::StaticType()) {
       EXPECT_EQ(expected_code_, code);
+      p2p_file_id_ = static_cast<DownloadAction*>(action)->p2p_file_id();
     } else {
       EXPECT_EQ(ErrorCode::kSuccess, code);
     }
@@ -99,6 +99,7 @@
   brillo::Blob expected_data_;
   bool processing_done_called_;
   ErrorCode expected_code_;
+  string p2p_file_id_;
 };
 
 class TestDirectFileWriter : public DirectFileWriter {
@@ -154,40 +155,41 @@
       install_plan.source_slot, true);
   fake_system_state.fake_boot_control()->SetSlotBootable(
       install_plan.target_slot, true);
-  ObjectFeederAction<InstallPlan> feeder_action;
-  feeder_action.set_obj(install_plan);
+  auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
+  feeder_action->set_obj(install_plan);
   MockPrefs prefs;
-  MockHttpFetcher* http_fetcher = new MockHttpFetcher(data.data(),
-                                                      data.size(),
-                                                      nullptr);
+  MockHttpFetcher* http_fetcher =
+      new MockHttpFetcher(data.data(), data.size(), nullptr);
   // takes ownership of passed in HttpFetcher
-  DownloadAction download_action(&prefs,
-                                 fake_system_state.boot_control(),
-                                 fake_system_state.hardware(),
-                                 &fake_system_state,
-                                 http_fetcher,
-                                 false /* is_interactive */);
-  download_action.SetTestFileWriter(&writer);
-  BondActions(&feeder_action, &download_action);
+  auto download_action =
+      std::make_unique<DownloadAction>(&prefs,
+                                       fake_system_state.boot_control(),
+                                       fake_system_state.hardware(),
+                                       &fake_system_state,
+                                       http_fetcher,
+                                       false /* interactive */);
+  download_action->SetTestFileWriter(&writer);
+  BondActions(feeder_action.get(), download_action.get());
   MockDownloadActionDelegate download_delegate;
   if (use_download_delegate) {
     InSequence s;
-    download_action.set_delegate(&download_delegate);
+    download_action->set_delegate(&download_delegate);
     if (data.size() > kMockHttpFetcherChunkSize)
       EXPECT_CALL(download_delegate,
                   BytesReceived(_, kMockHttpFetcherChunkSize, _));
     EXPECT_CALL(download_delegate, BytesReceived(_, _, _)).Times(AtLeast(1));
+    EXPECT_CALL(download_delegate, DownloadComplete())
+        .Times(fail_write == 0 ? 1 : 0);
   }
-  ErrorCode expected_code = ErrorCode::kSuccess;
-  if (fail_write > 0)
-    expected_code = ErrorCode::kDownloadWriteError;
-  DownloadActionTestProcessorDelegate delegate(expected_code);
+  DownloadActionTestProcessorDelegate delegate;
+  delegate.expected_code_ =
+      (fail_write > 0) ? ErrorCode::kDownloadWriteError : ErrorCode::kSuccess;
   delegate.expected_data_ = brillo::Blob(data.begin() + 1, data.end());
   delegate.path_ = output_temp_file.path();
   ActionProcessor processor;
   processor.set_delegate(&delegate);
-  processor.EnqueueAction(&feeder_action);
-  processor.EnqueueAction(&download_action);
+  processor.EnqueueAction(std::move(feeder_action));
+  processor.EnqueueAction(std::move(download_action));
 
   loop.PostTask(FROM_HERE,
                 base::Bind(&StartProcessorInRunLoop, &processor, http_fetcher));
@@ -206,7 +208,7 @@
   const char* foo = "foo";
   small.insert(small.end(), foo, foo + strlen(foo));
   TestWithData(small,
-               0,  // fail_write
+               0,      // fail_write
                true);  // use_download_delegate
 }
 
@@ -218,7 +220,7 @@
     c = ('9' == c) ? '0' : c + 1;
   }
   TestWithData(big,
-               0,  // fail_write
+               0,      // fail_write
                true);  // use_download_delegate
 }
 
@@ -230,7 +232,7 @@
     c = ('9' == c) ? '0' : c + 1;
   }
   TestWithData(big,
-               2,  // fail_write
+               2,      // fail_write
                true);  // use_download_delegate
 }
 
@@ -239,7 +241,7 @@
   const char* foo = "foofoo";
   small.insert(small.end(), foo, foo + strlen(foo));
   TestWithData(small,
-               0,  // fail_write
+               0,       // fail_write
                false);  // use_download_delegate
 }
 
@@ -269,24 +271,25 @@
         {.size = size, .type = InstallPayloadType::kFull});
     total_expected_download_size += size;
   }
-  ObjectFeederAction<InstallPlan> feeder_action;
-  feeder_action.set_obj(install_plan);
+  auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
+  feeder_action->set_obj(install_plan);
   MockPrefs prefs;
   MockHttpFetcher* http_fetcher = new MockHttpFetcher(
       payload_datas[0].data(), payload_datas[0].size(), nullptr);
   // takes ownership of passed in HttpFetcher
-  DownloadAction download_action(&prefs,
-                                 fake_system_state.boot_control(),
-                                 fake_system_state.hardware(),
-                                 &fake_system_state,
-                                 http_fetcher,
-                                 false /* is_interactive */);
-  download_action.SetTestFileWriter(&mock_file_writer);
-  BondActions(&feeder_action, &download_action);
+  auto download_action =
+      std::make_unique<DownloadAction>(&prefs,
+                                       fake_system_state.boot_control(),
+                                       fake_system_state.hardware(),
+                                       &fake_system_state,
+                                       http_fetcher,
+                                       false /* interactive */);
+  download_action->SetTestFileWriter(&mock_file_writer);
+  BondActions(feeder_action.get(), download_action.get());
   MockDownloadActionDelegate download_delegate;
   {
     InSequence s;
-    download_action.set_delegate(&download_delegate);
+    download_action->set_delegate(&download_delegate);
     // these are hand-computed based on the payloads specified above
     EXPECT_CALL(download_delegate,
                 BytesReceived(kMockHttpFetcherChunkSize,
@@ -318,8 +321,8 @@
                               total_expected_download_size));
   }
   ActionProcessor processor;
-  processor.EnqueueAction(&feeder_action);
-  processor.EnqueueAction(&download_action);
+  processor.EnqueueAction(std::move(feeder_action));
+  processor.EnqueueAction(std::move(download_action));
 
   loop.PostTask(
       FROM_HERE,
@@ -348,8 +351,7 @@
   brillo::FakeMessageLoop loop(nullptr);
   loop.SetAsCurrent();
 
-  brillo::Blob data(kMockHttpFetcherChunkSize +
-                      kMockHttpFetcherChunkSize / 2);
+  brillo::Blob data(kMockHttpFetcherChunkSize + kMockHttpFetcherChunkSize / 2);
   memset(data.data(), 0, data.size());
 
   ScopedTempFile temp_file;
@@ -358,31 +360,31 @@
     EXPECT_EQ(0, writer.Open(temp_file.path().c_str(), O_WRONLY | O_CREAT, 0));
 
     // takes ownership of passed in HttpFetcher
-    ObjectFeederAction<InstallPlan> feeder_action;
+    auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
     InstallPlan install_plan;
     install_plan.payloads.resize(1);
-    feeder_action.set_obj(install_plan);
+    feeder_action->set_obj(install_plan);
     FakeSystemState fake_system_state_;
     MockPrefs prefs;
-    DownloadAction download_action(
+    auto download_action = std::make_unique<DownloadAction>(
         &prefs,
         fake_system_state_.boot_control(),
         fake_system_state_.hardware(),
         &fake_system_state_,
         new MockHttpFetcher(data.data(), data.size(), nullptr),
-        false /* is_interactive */);
-    download_action.SetTestFileWriter(&writer);
+        false /* interactive */);
+    download_action->SetTestFileWriter(&writer);
     MockDownloadActionDelegate download_delegate;
     if (use_download_delegate) {
-      download_action.set_delegate(&download_delegate);
+      download_action->set_delegate(&download_delegate);
       EXPECT_CALL(download_delegate, BytesReceived(_, _, _)).Times(0);
     }
     TerminateEarlyTestProcessorDelegate delegate;
     ActionProcessor processor;
     processor.set_delegate(&delegate);
-    processor.EnqueueAction(&feeder_action);
-    processor.EnqueueAction(&download_action);
-    BondActions(&feeder_action, &download_action);
+    BondActions(feeder_action.get(), download_action.get());
+    processor.EnqueueAction(std::move(feeder_action));
+    processor.EnqueueAction(std::move(download_action));
 
     loop.PostTask(FROM_HERE,
                   base::Bind(&TerminateEarlyTestStarter, &processor));
@@ -410,7 +412,7 @@
 
 class DownloadActionTestAction;
 
-template<>
+template <>
 class ActionTraits<DownloadActionTestAction> {
  public:
   typedef InstallPlan OutputObjectType;
@@ -420,22 +422,21 @@
 // This is a simple Action class for testing.
 class DownloadActionTestAction : public Action<DownloadActionTestAction> {
  public:
-  DownloadActionTestAction() : did_run_(false) {}
+  DownloadActionTestAction() = default;
   typedef InstallPlan InputObjectType;
   typedef InstallPlan OutputObjectType;
   ActionPipe<InstallPlan>* in_pipe() { return in_pipe_.get(); }
   ActionPipe<InstallPlan>* out_pipe() { return out_pipe_.get(); }
   ActionProcessor* processor() { return processor_; }
   void PerformAction() {
-    did_run_ = true;
     ASSERT_TRUE(HasInputObject());
     EXPECT_TRUE(expected_input_object_ == GetInputObject());
     ASSERT_TRUE(processor());
     processor()->ActionComplete(this, ErrorCode::kSuccess);
   }
-  string Type() const { return "DownloadActionTestAction"; }
+  static std::string StaticType() { return "DownloadActionTestAction"; }
+  string Type() const { return StaticType(); }
   InstallPlan expected_input_object_;
-  bool did_run_;
 };
 
 namespace {
@@ -444,9 +445,19 @@
 // only by the test PassObjectOutTest.
 class PassObjectOutTestProcessorDelegate : public ActionProcessorDelegate {
  public:
-  void ProcessingDone(const ActionProcessor* processor, ErrorCode code) {
+  void ProcessingDone(const ActionProcessor* processor,
+                      ErrorCode code) override {
     brillo::MessageLoop::current()->BreakLoop();
   }
+  void ActionCompleted(ActionProcessor* processor,
+                       AbstractAction* action,
+                       ErrorCode code) override {
+    if (action->Type() == DownloadActionTestAction::StaticType()) {
+      did_test_action_run_ = true;
+    }
+  }
+
+  bool did_test_action_run_ = false;
 };
 
 }  // namespace
@@ -463,29 +474,30 @@
   install_plan.payloads.push_back({.size = 1});
   EXPECT_TRUE(
       HashCalculator::RawHashOfData({'x'}, &install_plan.payloads[0].hash));
-  ObjectFeederAction<InstallPlan> feeder_action;
-  feeder_action.set_obj(install_plan);
+  auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
+  feeder_action->set_obj(install_plan);
   MockPrefs prefs;
   FakeSystemState fake_system_state_;
-  DownloadAction download_action(&prefs,
-                                 fake_system_state_.boot_control(),
-                                 fake_system_state_.hardware(),
-                                 &fake_system_state_,
-                                 new MockHttpFetcher("x", 1, nullptr),
-                                 false /* is_interactive */);
-  download_action.SetTestFileWriter(&writer);
+  auto download_action =
+      std::make_unique<DownloadAction>(&prefs,
+                                       fake_system_state_.boot_control(),
+                                       fake_system_state_.hardware(),
+                                       &fake_system_state_,
+                                       new MockHttpFetcher("x", 1, nullptr),
+                                       false /* interactive */);
+  download_action->SetTestFileWriter(&writer);
 
-  DownloadActionTestAction test_action;
-  test_action.expected_input_object_ = install_plan;
-  BondActions(&feeder_action, &download_action);
-  BondActions(&download_action, &test_action);
+  auto test_action = std::make_unique<DownloadActionTestAction>();
+  test_action->expected_input_object_ = install_plan;
+  BondActions(feeder_action.get(), download_action.get());
+  BondActions(download_action.get(), test_action.get());
 
   ActionProcessor processor;
   PassObjectOutTestProcessorDelegate delegate;
   processor.set_delegate(&delegate);
-  processor.EnqueueAction(&feeder_action);
-  processor.EnqueueAction(&download_action);
-  processor.EnqueueAction(&test_action);
+  processor.EnqueueAction(std::move(feeder_action));
+  processor.EnqueueAction(std::move(download_action));
+  processor.EnqueueAction(std::move(test_action));
 
   loop.PostTask(
       FROM_HERE,
@@ -495,27 +507,22 @@
   loop.Run();
   EXPECT_FALSE(loop.PendingTasks());
 
-  EXPECT_EQ(true, test_action.did_run_);
+  EXPECT_EQ(true, delegate.did_test_action_run_);
 }
 
 // Test fixture for P2P tests.
 class P2PDownloadActionTest : public testing::Test {
  protected:
   P2PDownloadActionTest()
-    : start_at_offset_(0),
-      fake_um_(fake_system_state_.fake_clock()) {}
+      : start_at_offset_(0), fake_um_(fake_system_state_.fake_clock()) {}
 
   ~P2PDownloadActionTest() override {}
 
   // Derived from testing::Test.
-  void SetUp() override {
-    loop_.SetAsCurrent();
-  }
+  void SetUp() override { loop_.SetAsCurrent(); }
 
   // Derived from testing::Test.
-  void TearDown() override {
-    EXPECT_FALSE(loop_.PendingTasks());
-  }
+  void TearDown() override { EXPECT_FALSE(loop_.PendingTasks()); }
 
   // To be called by tests to setup the download. The
   // |starting_offset| parameter is for where to resume.
@@ -527,10 +534,13 @@
       data_ += 'a' + (i % 25);
 
     // Setup p2p.
-    FakeP2PManagerConfiguration *test_conf = new FakeP2PManagerConfiguration();
-    p2p_manager_.reset(P2PManager::Construct(
-        test_conf, nullptr, &fake_um_, "cros_au", 3,
-        base::TimeDelta::FromDays(5)));
+    FakeP2PManagerConfiguration* test_conf = new FakeP2PManagerConfiguration();
+    p2p_manager_.reset(P2PManager::Construct(test_conf,
+                                             nullptr,
+                                             &fake_um_,
+                                             "cros_au",
+                                             3,
+                                             base::TimeDelta::FromDays(5)));
     fake_system_state_.set_p2p_manager(p2p_manager_.get());
   }
 
@@ -550,43 +560,44 @@
     install_plan.payloads.push_back(
         {.size = data_.length(),
          .hash = {'1', '2', '3', '4', 'h', 'a', 's', 'h'}});
-    ObjectFeederAction<InstallPlan> feeder_action;
-    feeder_action.set_obj(install_plan);
+    auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
+    feeder_action->set_obj(install_plan);
     MockPrefs prefs;
-    http_fetcher_ = new MockHttpFetcher(data_.c_str(),
-                                        data_.length(),
-                                        nullptr);
     // Note that DownloadAction takes ownership of the passed in HttpFetcher.
-    download_action_.reset(new DownloadAction(&prefs,
-                                              fake_system_state_.boot_control(),
-                                              fake_system_state_.hardware(),
-                                              &fake_system_state_,
-                                              http_fetcher_,
-                                              false /* is_interactive */));
-    download_action_->SetTestFileWriter(&writer);
-    BondActions(&feeder_action, download_action_.get());
-    DownloadActionTestProcessorDelegate delegate(ErrorCode::kSuccess);
-    delegate.expected_data_ = brillo::Blob(data_.begin() + start_at_offset_,
-                                           data_.end());
-    delegate.path_ = output_temp_file.path();
-    processor_.set_delegate(&delegate);
-    processor_.EnqueueAction(&feeder_action);
-    processor_.EnqueueAction(download_action_.get());
+    auto download_action = std::make_unique<DownloadAction>(
+        &prefs,
+        fake_system_state_.boot_control(),
+        fake_system_state_.hardware(),
+        &fake_system_state_,
+        new MockHttpFetcher(data_.c_str(), data_.length(), nullptr),
+        false /* interactive */);
+    auto http_fetcher = download_action->http_fetcher();
+    download_action->SetTestFileWriter(&writer);
+    BondActions(feeder_action.get(), download_action.get());
+    delegate_.expected_data_ =
+        brillo::Blob(data_.begin() + start_at_offset_, data_.end());
+    delegate_.path_ = output_temp_file.path();
+    processor_.set_delegate(&delegate_);
+    processor_.EnqueueAction(std::move(feeder_action));
+    processor_.EnqueueAction(std::move(download_action));
 
-    loop_.PostTask(FROM_HERE, base::Bind(
-        &P2PDownloadActionTest::StartProcessorInRunLoopForP2P,
-        base::Unretained(this)));
+    loop_.PostTask(
+        FROM_HERE,
+        base::Bind(
+            [](P2PDownloadActionTest* action_test, HttpFetcher* http_fetcher) {
+              action_test->processor_.StartProcessing();
+              http_fetcher->SetOffset(action_test->start_at_offset_);
+            },
+            base::Unretained(this),
+            base::Unretained(http_fetcher)));
     loop_.Run();
   }
 
   // Mainloop used to make StartDownload() synchronous.
   brillo::FakeMessageLoop loop_{nullptr};
 
-  // The DownloadAction instance under test.
-  unique_ptr<DownloadAction> download_action_;
-
-  // The HttpFetcher used in the test.
-  MockHttpFetcher* http_fetcher_;
+  // Delegate that is passed to the ActionProcessor.
+  DownloadActionTestProcessorDelegate delegate_;
 
   // The P2PManager used in the test.
   unique_ptr<P2PManager> p2p_manager_;
@@ -601,12 +612,6 @@
   string data_;
 
  private:
-  // Callback used in StartDownload() method.
-  void StartProcessorInRunLoopForP2P() {
-    processor_.StartProcessing();
-    download_action_->http_fetcher()->SetOffset(start_at_offset_);
-  }
-
   // The requested starting offset passed to SetupDownload().
   off_t start_at_offset_;
 
@@ -614,51 +619,33 @@
 };
 
 TEST_F(P2PDownloadActionTest, IsWrittenTo) {
-  if (!test_utils::IsXAttrSupported(FilePath("/tmp"))) {
-    LOG(WARNING) << "Skipping test because /tmp does not support xattr. "
-                 << "Please update your system to support this feature.";
-    return;
-  }
-
   SetupDownload(0);     // starting_offset
   StartDownload(true);  // use_p2p_to_share
 
   // Check the p2p file and its content matches what was sent.
-  string file_id = download_action_->p2p_file_id();
+  string file_id = delegate_.p2p_file_id_;
   EXPECT_NE("", file_id);
   EXPECT_EQ(static_cast<int>(data_.length()),
             p2p_manager_->FileGetSize(file_id));
   EXPECT_EQ(static_cast<int>(data_.length()),
             p2p_manager_->FileGetExpectedSize(file_id));
   string p2p_file_contents;
-  EXPECT_TRUE(ReadFileToString(p2p_manager_->FileGetPath(file_id),
-                               &p2p_file_contents));
+  EXPECT_TRUE(
+      ReadFileToString(p2p_manager_->FileGetPath(file_id), &p2p_file_contents));
   EXPECT_EQ(data_, p2p_file_contents);
 }
 
 TEST_F(P2PDownloadActionTest, DeleteIfHoleExists) {
-  if (!test_utils::IsXAttrSupported(FilePath("/tmp"))) {
-    LOG(WARNING) << "Skipping test because /tmp does not support xattr. "
-                 << "Please update your system to support this feature.";
-    return;
-  }
-
   SetupDownload(1000);  // starting_offset
   StartDownload(true);  // use_p2p_to_share
 
   // DownloadAction should convey that the file is not being shared.
   // and that we don't have any p2p files.
-  EXPECT_EQ(download_action_->p2p_file_id(), "");
+  EXPECT_EQ(delegate_.p2p_file_id_, "");
   EXPECT_EQ(p2p_manager_->CountSharedFiles(), 0);
 }
 
 TEST_F(P2PDownloadActionTest, CanAppend) {
-  if (!test_utils::IsXAttrSupported(FilePath("/tmp"))) {
-    LOG(WARNING) << "Skipping test because /tmp does not support xattr. "
-                 << "Please update your system to support this feature.";
-    return;
-  }
-
   SetupDownload(1000);  // starting_offset
 
   // Prepare the file with existing data before starting to write to
@@ -669,14 +656,16 @@
   string existing_data;
   for (unsigned int i = 0; i < 1000; i++)
     existing_data += '0' + (i % 10);
-  ASSERT_EQ(WriteFile(p2p_manager_->FileGetPath(file_id), existing_data.c_str(),
-                      1000), 1000);
+  ASSERT_EQ(
+      WriteFile(
+          p2p_manager_->FileGetPath(file_id), existing_data.c_str(), 1000),
+      1000);
 
   StartDownload(true);  // use_p2p_to_share
 
   // DownloadAction should convey the same file_id and the file should
   // have the expected size.
-  EXPECT_EQ(download_action_->p2p_file_id(), file_id);
+  EXPECT_EQ(delegate_.p2p_file_id_, file_id);
   EXPECT_EQ(static_cast<ssize_t>(data_.length()),
             p2p_manager_->FileGetSize(file_id));
   EXPECT_EQ(static_cast<ssize_t>(data_.length()),
@@ -684,19 +673,13 @@
   string p2p_file_contents;
   // Check that the first 1000 bytes wasn't touched and that we
   // appended the remaining as appropriate.
-  EXPECT_TRUE(ReadFileToString(p2p_manager_->FileGetPath(file_id),
-                               &p2p_file_contents));
+  EXPECT_TRUE(
+      ReadFileToString(p2p_manager_->FileGetPath(file_id), &p2p_file_contents));
   EXPECT_EQ(existing_data, p2p_file_contents.substr(0, 1000));
   EXPECT_EQ(data_.substr(1000), p2p_file_contents.substr(1000));
 }
 
 TEST_F(P2PDownloadActionTest, DeletePartialP2PFileIfResumingWithoutP2P) {
-  if (!test_utils::IsXAttrSupported(FilePath("/tmp"))) {
-    LOG(WARNING) << "Skipping test because /tmp does not support xattr. "
-                 << "Please update your system to support this feature.";
-    return;
-  }
-
   SetupDownload(1000);  // starting_offset
 
   // Prepare the file with all existing data before starting to write
@@ -707,8 +690,10 @@
   string existing_data;
   for (unsigned int i = 0; i < 1000; i++)
     existing_data += '0' + (i % 10);
-  ASSERT_EQ(WriteFile(p2p_manager_->FileGetPath(file_id), existing_data.c_str(),
-                      1000), 1000);
+  ASSERT_EQ(
+      WriteFile(
+          p2p_manager_->FileGetPath(file_id), existing_data.c_str(), 1000),
+      1000);
 
   // Check that the file is there.
   EXPECT_EQ(1000, p2p_manager_->FileGetSize(file_id));
diff --git a/payload_consumer/extent_reader.cc b/payload_consumer/extent_reader.cc
index 96ea918..ad983ae 100644
--- a/payload_consumer/extent_reader.cc
+++ b/payload_consumer/extent_reader.cc
@@ -16,6 +16,8 @@
 
 #include "update_engine/payload_consumer/extent_reader.h"
 
+#include <algorithm>
+
 #include <sys/types.h>
 #include <unistd.h>
 
@@ -52,8 +54,9 @@
   }
   // The first item is zero and upper_bound never returns it because it always
   // return the item which is greater than the given value.
-  auto extent_idx = std::upper_bound(
-      extents_upper_bounds_.begin(), extents_upper_bounds_.end(), offset) -
+  auto extent_idx =
+      std::upper_bound(
+          extents_upper_bounds_.begin(), extents_upper_bounds_.end(), offset) -
       extents_upper_bounds_.begin() - 1;
   cur_extent_ = std::next(extents_.begin(), extent_idx);
   offset_ = offset;
diff --git a/payload_consumer/extent_writer.h b/payload_consumer/extent_writer.h
index 2c15861..9e53561 100644
--- a/payload_consumer/extent_writer.h
+++ b/payload_consumer/extent_writer.h
@@ -35,9 +35,7 @@
 class ExtentWriter {
  public:
   ExtentWriter() = default;
-  virtual ~ExtentWriter() {
-    LOG_IF(ERROR, !end_called_) << "End() not called on ExtentWriter.";
-  }
+  virtual ~ExtentWriter() = default;
 
   // Returns true on success.
   virtual bool Init(FileDescriptorPtr fd,
@@ -46,16 +44,6 @@
 
   // Returns true on success.
   virtual bool Write(const void* bytes, size_t count) = 0;
-
-  // Should be called when all writing is complete. Returns true on success.
-  // The fd is not closed. Caller is responsible for closing it.
-  bool End() {
-    end_called_ = true;
-    return EndImpl();
-  }
-  virtual bool EndImpl() = 0;
- private:
-  bool end_called_{false};
 };
 
 // DirectExtentWriter is probably the simplest ExtentWriter implementation.
@@ -76,7 +64,6 @@
     return true;
   }
   bool Write(const void* bytes, size_t count) override;
-  bool EndImpl() override { return true; }
 
  private:
   FileDescriptorPtr fd_{nullptr};
@@ -89,48 +76,6 @@
   google::protobuf::RepeatedPtrField<Extent>::iterator cur_extent_;
 };
 
-// Takes an underlying ExtentWriter to which all operations are delegated.
-// When End() is called, ZeroPadExtentWriter ensures that the total number
-// of bytes written is a multiple of block_size_. If not, it writes zeros
-// to pad as needed.
-
-class ZeroPadExtentWriter : public ExtentWriter {
- public:
-  explicit ZeroPadExtentWriter(
-      std::unique_ptr<ExtentWriter> underlying_extent_writer)
-      : underlying_extent_writer_(std::move(underlying_extent_writer)) {}
-  ~ZeroPadExtentWriter() override = default;
-
-  bool Init(FileDescriptorPtr fd,
-            const google::protobuf::RepeatedPtrField<Extent>& extents,
-            uint32_t block_size) override {
-    block_size_ = block_size;
-    return underlying_extent_writer_->Init(fd, extents, block_size);
-  }
-  bool Write(const void* bytes, size_t count) override {
-    if (underlying_extent_writer_->Write(bytes, count)) {
-      bytes_written_mod_block_size_ += count;
-      bytes_written_mod_block_size_ %= block_size_;
-      return true;
-    }
-    return false;
-  }
-  bool EndImpl() override {
-    if (bytes_written_mod_block_size_) {
-      const size_t write_size = block_size_ - bytes_written_mod_block_size_;
-      brillo::Blob zeros(write_size, 0);
-      TEST_AND_RETURN_FALSE(underlying_extent_writer_->Write(zeros.data(),
-                                                             write_size));
-    }
-    return underlying_extent_writer_->End();
-  }
-
- private:
-  std::unique_ptr<ExtentWriter> underlying_extent_writer_;
-  size_t block_size_{0};
-  size_t bytes_written_mod_block_size_{0};
-};
-
 }  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_EXTENT_WRITER_H_
diff --git a/payload_consumer/extent_writer_unittest.cc b/payload_consumer/extent_writer_unittest.cc
index 48b27cb..aef856b 100644
--- a/payload_consumer/extent_writer_unittest.cc
+++ b/payload_consumer/extent_writer_unittest.cc
@@ -50,16 +50,13 @@
     fd_.reset(new EintrSafeFileDescriptor);
     ASSERT_TRUE(fd_->Open(temp_file_.path().c_str(), O_RDWR, 0600));
   }
-  void TearDown() override {
-    fd_->Close();
-  }
+  void TearDown() override { fd_->Close(); }
 
   // Writes data to an extent writer in 'chunk_size' chunks with
   // the first chunk of size first_chunk_size. It calculates what the
   // resultant file should look like and ensure that the extent writer
   // wrote the file correctly.
   void WriteAlignedExtents(size_t chunk_size, size_t first_chunk_size);
-  void TestZeroPad(bool aligned_size);
 
   FileDescriptorPtr fd_;
   test_utils::ScopedTempFile temp_file_{"ExtentWriterTest-file.XXXXXX"};
@@ -72,7 +69,6 @@
   EXPECT_TRUE(
       direct_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
   EXPECT_TRUE(direct_writer.Write(bytes.data(), bytes.size()));
-  EXPECT_TRUE(direct_writer.End());
 
   EXPECT_EQ(static_cast<off_t>(kBlockSize + bytes.size()),
             utils::FileSize(temp_file_.path()));
@@ -81,8 +77,8 @@
   EXPECT_TRUE(utils::ReadFile(temp_file_.path(), &result_file));
 
   brillo::Blob expected_file(kBlockSize);
-  expected_file.insert(expected_file.end(),
-                       bytes.data(), bytes.data() + bytes.size());
+  expected_file.insert(
+      expected_file.end(), bytes.data(), bytes.data() + bytes.size());
   ExpectVectorsEq(expected_file, result_file);
 }
 
@@ -92,7 +88,6 @@
   EXPECT_TRUE(
       direct_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
   EXPECT_TRUE(direct_writer.Write(nullptr, 0));
-  EXPECT_TRUE(direct_writer.End());
 }
 
 TEST_F(ExtentWriterTest, OverflowExtentTest) {
@@ -127,7 +122,6 @@
     EXPECT_TRUE(direct_writer.Write(&data[bytes_written], bytes_to_write));
     bytes_written += bytes_to_write;
   }
-  EXPECT_TRUE(direct_writer.End());
 
   EXPECT_EQ(static_cast<off_t>(data.size()),
             utils::FileSize(temp_file_.path()));
@@ -139,54 +133,10 @@
   expected_file.insert(expected_file.end(),
                        data.begin() + kBlockSize,
                        data.begin() + kBlockSize * 2);
-  expected_file.insert(expected_file.end(),
-                       data.begin(), data.begin() + kBlockSize);
-  expected_file.insert(expected_file.end(),
-                       data.begin() + kBlockSize * 2, data.end());
-  ExpectVectorsEq(expected_file, result_file);
-}
-
-TEST_F(ExtentWriterTest, ZeroPadNullTest) {
-  TestZeroPad(true);
-}
-
-TEST_F(ExtentWriterTest, ZeroPadFillTest) {
-  TestZeroPad(false);
-}
-
-void ExtentWriterTest::TestZeroPad(bool aligned_size) {
-  vector<Extent> extents = {ExtentForRange(1, 1), ExtentForRange(0, 1)};
-  brillo::Blob data(kBlockSize * 2);
-  test_utils::FillWithData(&data);
-
-  ZeroPadExtentWriter zero_pad_writer(std::make_unique<DirectExtentWriter>());
-
-  EXPECT_TRUE(
-      zero_pad_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
-  size_t bytes_to_write = data.size();
-  const size_t missing_bytes = (aligned_size ? 0 : 9);
-  bytes_to_write -= missing_bytes;
-  fd_->Seek(kBlockSize - missing_bytes, SEEK_SET);
-  EXPECT_EQ(3, fd_->Write("xxx", 3));
-  ASSERT_TRUE(zero_pad_writer.Write(data.data(), bytes_to_write));
-  EXPECT_TRUE(zero_pad_writer.End());
-
-  EXPECT_EQ(static_cast<off_t>(data.size()),
-            utils::FileSize(temp_file_.path()));
-
-  brillo::Blob result_file;
-  EXPECT_TRUE(utils::ReadFile(temp_file_.path(), &result_file));
-
-  brillo::Blob expected_file;
-  expected_file.insert(expected_file.end(),
-                       data.begin() + kBlockSize,
-                       data.begin() + kBlockSize * 2);
-  expected_file.insert(expected_file.end(),
-                       data.begin(), data.begin() + kBlockSize);
-  if (missing_bytes) {
-    memset(&expected_file[kBlockSize - missing_bytes], 0, missing_bytes);
-  }
-
+  expected_file.insert(
+      expected_file.end(), data.begin(), data.begin() + kBlockSize);
+  expected_file.insert(
+      expected_file.end(), data.begin() + kBlockSize * 2, data.end());
   ExpectVectorsEq(expected_file, result_file);
 }
 
@@ -206,12 +156,11 @@
 
   size_t bytes_written = 0;
   while (bytes_written < (block_count * kBlockSize)) {
-    size_t bytes_to_write = min(block_count * kBlockSize - bytes_written,
-                                data.size());
+    size_t bytes_to_write =
+        min(block_count * kBlockSize - bytes_written, data.size());
     EXPECT_TRUE(direct_writer.Write(data.data(), bytes_to_write));
     bytes_written += bytes_to_write;
   }
-  EXPECT_TRUE(direct_writer.End());
 
   // check file size, then data inside
   ASSERT_EQ(static_cast<off_t>(2 * kBlockSize),
diff --git a/payload_consumer/fake_extent_writer.h b/payload_consumer/fake_extent_writer.h
index 4418a9e..7b2b7ac 100644
--- a/payload_consumer/fake_extent_writer.h
+++ b/payload_consumer/fake_extent_writer.h
@@ -40,26 +40,20 @@
     return true;
   };
   bool Write(const void* bytes, size_t count) override {
-    if (!init_called_ || end_called_)
+    if (!init_called_)
       return false;
     written_data_.insert(written_data_.end(),
                          reinterpret_cast<const uint8_t*>(bytes),
                          reinterpret_cast<const uint8_t*>(bytes) + count);
     return true;
   }
-  bool EndImpl() override {
-    end_called_ = true;
-    return true;
-  }
 
   // Fake methods.
   bool InitCalled() { return init_called_; }
-  bool EndCalled() { return end_called_; }
   brillo::Blob WrittenData() { return written_data_; }
 
  private:
   bool init_called_{false};
-  bool end_called_{false};
   brillo::Blob written_data_;
 
   DISALLOW_COPY_AND_ASSIGN(FakeExtentWriter);
diff --git a/payload_consumer/fake_file_descriptor.cc b/payload_consumer/fake_file_descriptor.cc
index d54856b..63af181 100644
--- a/payload_consumer/fake_file_descriptor.cc
+++ b/payload_consumer/fake_file_descriptor.cc
@@ -73,4 +73,12 @@
   return offset_;
 }
 
+brillo::Blob FakeFileDescriptorData(size_t size) {
+  brillo::Blob ret(size);
+  FakeFileDescriptor fd;
+  fd.SetFileSize(size);
+  fd.Read(ret.data(), size);
+  return ret;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/fake_file_descriptor.h b/payload_consumer/fake_file_descriptor.h
index f17820b..8dbd10b 100644
--- a/payload_consumer/fake_file_descriptor.h
+++ b/payload_consumer/fake_file_descriptor.h
@@ -22,6 +22,8 @@
 #include <utility>
 #include <vector>
 
+#include <brillo/secure_blob.h>
+
 #include "update_engine/payload_consumer/file_descriptor.h"
 
 namespace chromeos_update_engine {
@@ -67,9 +69,7 @@
     return false;
   }
 
-  bool Flush() override {
-    return open_;
-  }
+  bool Flush() override { return open_; }
 
   bool Close() override {
     if (!open_)
@@ -121,6 +121,9 @@
   DISALLOW_COPY_AND_ASSIGN(FakeFileDescriptor);
 };
 
+// Return a blob with the first |size| bytes of a FakeFileDescriptor stream.
+brillo::Blob FakeFileDescriptorData(size_t size);
+
 }  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_FAKE_FILE_DESCRIPTOR_H_
diff --git a/payload_consumer/fec_file_descriptor.cc b/payload_consumer/fec_file_descriptor.cc
new file mode 100644
index 0000000..de22cf3
--- /dev/null
+++ b/payload_consumer/fec_file_descriptor.cc
@@ -0,0 +1,78 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/fec_file_descriptor.h"
+
+namespace chromeos_update_engine {
+
+bool FecFileDescriptor::Open(const char* path, int flags) {
+  return Open(path, flags, 0600);
+}
+
+bool FecFileDescriptor::Open(const char* path, int flags, mode_t mode) {
+  if (!fh_.open(path, flags, mode))
+    return false;
+
+  if (!fh_.has_ecc()) {
+    LOG(ERROR) << "No ECC data in the passed file";
+    fh_.close();
+    return false;
+  }
+
+  fec_status status;
+  if (!fh_.get_status(status)) {
+    LOG(ERROR) << "Couldn't load ECC status";
+    fh_.close();
+    return false;
+  }
+
+  dev_size_ = status.data_size;
+  return true;
+}
+
+ssize_t FecFileDescriptor::Read(void* buf, size_t count) {
+  return fh_.read(buf, count);
+}
+
+ssize_t FecFileDescriptor::Write(const void* buf, size_t count) {
+  errno = EROFS;
+  return -1;
+}
+
+off64_t FecFileDescriptor::Seek(off64_t offset, int whence) {
+  if (fh_.seek(offset, whence)) {
+    return offset;
+  }
+  return -1;
+}
+
+uint64_t FecFileDescriptor::BlockDevSize() {
+  return dev_size_;
+}
+
+bool FecFileDescriptor::BlkIoctl(int request,
+                                 uint64_t start,
+                                 uint64_t length,
+                                 int* result) {
+  // No IOCTL pass-through in this mode.
+  return false;
+}
+
+bool FecFileDescriptor::Close() {
+  return fh_.close();
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/fec_file_descriptor.h b/payload_consumer/fec_file_descriptor.h
new file mode 100644
index 0000000..e7f2e40
--- /dev/null
+++ b/payload_consumer/fec_file_descriptor.h
@@ -0,0 +1,65 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_FEC_FILE_DESCRIPTOR_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_FEC_FILE_DESCRIPTOR_H_
+
+#include <fec/io.h>
+
+#include "update_engine/payload_consumer/file_descriptor.h"
+
+// A FileDescriptor implementation with error correction based on the "libfec"
+// library. The libfec on the running system allows to parse the error
+// correction blocks stored in partitions that have verity and error correction
+// enabled. This information is present in the raw block device, but of course
+// not available via the dm-verity block device.
+
+namespace chromeos_update_engine {
+
+// An error corrected file based on FEC.
+class FecFileDescriptor : public FileDescriptor {
+ public:
+  FecFileDescriptor() = default;
+  ~FecFileDescriptor() = default;
+
+  // Interface methods.
+  bool Open(const char* path, int flags, mode_t mode) override;
+  bool Open(const char* path, int flags) override;
+  ssize_t Read(void* buf, size_t count) override;
+  ssize_t Write(const void* buf, size_t count) override;
+  off64_t Seek(off64_t offset, int whence) override;
+  uint64_t BlockDevSize() override;
+  bool BlkIoctl(int request,
+                uint64_t start,
+                uint64_t length,
+                int* result) override;
+  bool Flush() override { return true; }
+  bool Close() override;
+  bool IsSettingErrno() override { return true; }
+  bool IsOpen() override {
+    // The bool operator on the fec::io class tells whether the internal
+    // handle is open.
+    return static_cast<bool>(fh_);
+  }
+
+ protected:
+  fec::io fh_;
+  uint64_t dev_size_{0};
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_FEC_FILE_DESCRIPTOR_H_
diff --git a/payload_consumer/file_descriptor.cc b/payload_consumer/file_descriptor.cc
index 4eabb8f..1de615c 100644
--- a/payload_consumer/file_descriptor.cc
+++ b/payload_consumer/file_descriptor.cc
@@ -89,7 +89,7 @@
   // operations.
 #ifndef BLKZEROOUT
   return false;
-#else  // defined(BLKZEROOUT)
+#else   // defined(BLKZEROOUT)
   DCHECK(request == BLKDISCARD || request == BLKZEROOUT ||
          request == BLKSECDISCARD);
   // On some devices, the BLKDISCARD will actually read back as zeros, instead
diff --git a/payload_consumer/file_descriptor.h b/payload_consumer/file_descriptor.h
index 5e524d9..55f76c6 100644
--- a/payload_consumer/file_descriptor.h
+++ b/payload_consumer/file_descriptor.h
@@ -18,8 +18,8 @@
 #define UPDATE_ENGINE_PAYLOAD_CONSUMER_FILE_DESCRIPTOR_H_
 
 #include <errno.h>
-#include <memory>
 #include <sys/types.h>
+#include <memory>
 
 #include <base/logging.h>
 
@@ -125,12 +125,8 @@
                 int* result) override;
   bool Flush() override;
   bool Close() override;
-  bool IsSettingErrno() override {
-    return true;
-  }
-  bool IsOpen() override {
-    return (fd_ >= 0);
-  }
+  bool IsSettingErrno() override { return true; }
+  bool IsOpen() override { return (fd_ >= 0); }
 
  protected:
   int fd_;
diff --git a/payload_consumer/file_descriptor_utils.cc b/payload_consumer/file_descriptor_utils.cc
index b1902de..846cbd7 100644
--- a/payload_consumer/file_descriptor_utils.cc
+++ b/payload_consumer/file_descriptor_utils.cc
@@ -88,7 +88,6 @@
                         utils::BlocksInExtents(tgt_extents));
   TEST_AND_RETURN_FALSE(
       CommonHashExtents(source, src_extents, &writer, block_size, hash_out));
-  TEST_AND_RETURN_FALSE(writer.End());
   return true;
 }
 
@@ -96,10 +95,7 @@
                         const RepeatedPtrField<Extent>& extents,
                         uint64_t block_size,
                         brillo::Blob* hash_out) {
-  TEST_AND_RETURN_FALSE(hash_out != nullptr);
-  TEST_AND_RETURN_FALSE(
-      CommonHashExtents(source, extents, nullptr, block_size, hash_out));
-  return true;
+  return CommonHashExtents(source, extents, nullptr, block_size, hash_out);
 }
 
 }  // namespace fd_utils
diff --git a/payload_consumer/file_descriptor_utils.h b/payload_consumer/file_descriptor_utils.h
index 397c35e..68fb001 100644
--- a/payload_consumer/file_descriptor_utils.h
+++ b/payload_consumer/file_descriptor_utils.h
@@ -42,7 +42,7 @@
     uint64_t block_size,
     brillo::Blob* hash_out);
 
-// Reads blocks from |source| and caculates the hash. The blocks to read are
+// Reads blocks from |source| and calculates the hash. The blocks to read are
 // specified by |extents|. Stores the hash in |hash_out| if it is not null. The
 // block sizes are passed as |block_size|. In case of error reading, it returns
 // false and the value pointed by |hash_out| is undefined.
diff --git a/payload_consumer/file_descriptor_utils_unittest.cc b/payload_consumer/file_descriptor_utils_unittest.cc
index 79d2184..48e610f 100644
--- a/payload_consumer/file_descriptor_utils_unittest.cc
+++ b/payload_consumer/file_descriptor_utils_unittest.cc
@@ -175,10 +175,10 @@
   EXPECT_FALSE(fd_utils::ReadAndHashExtents(source_, extents, 4, &hash_out));
 }
 
-// Test that if hash_out is null, then it should fail.
+// Test that if hash_out is null, it still works.
 TEST_F(FileDescriptorUtilsTest, ReadAndHashExtentsWithoutHashingTest) {
   auto extents = CreateExtentList({{0, 5}});
-  EXPECT_FALSE(fd_utils::ReadAndHashExtents(source_, extents, 4, nullptr));
+  EXPECT_TRUE(fd_utils::ReadAndHashExtents(source_, extents, 4, nullptr));
 }
 
 // Tests that it can calculate the hash properly.
diff --git a/payload_consumer/file_writer.cc b/payload_consumer/file_writer.cc
index d280ddb..84b19ed 100644
--- a/payload_consumer/file_writer.cc
+++ b/payload_consumer/file_writer.cc
@@ -34,8 +34,7 @@
 
   size_t bytes_written = 0;
   while (bytes_written < count) {
-    ssize_t rc = write(fd_, char_bytes + bytes_written,
-                       count - bytes_written);
+    ssize_t rc = write(fd_, char_bytes + bytes_written, count - bytes_written);
     if (rc < 0)
       return false;
     bytes_written += rc;
diff --git a/payload_consumer/file_writer.h b/payload_consumer/file_writer.h
index 96ebde6..cdc9fa0 100644
--- a/payload_consumer/file_writer.h
+++ b/payload_consumer/file_writer.h
@@ -47,11 +47,9 @@
   // in addition if the returned value is false. By default this method
   // returns kActionExitDownloadWriteError as the error code, but subclasses
   // can override if they wish to return more specific error codes.
-  virtual bool Write(const void* bytes,
-                     size_t count,
-                     ErrorCode* error) {
-     *error = ErrorCode::kDownloadWriteError;
-     return Write(bytes, count);
+  virtual bool Write(const void* bytes, size_t count, ErrorCode* error) {
+    *error = ErrorCode::kDownloadWriteError;
+    return Write(bytes, count);
   }
 
   // Wrapper around close. Returns 0 on success or -errno on error.
@@ -92,6 +90,7 @@
       LOG(ERROR) << "FileWriter::Close failed: "
                  << utils::ErrnoNumberAsString(-err);
   }
+
  private:
   FileWriter* writer_;
 
diff --git a/payload_consumer/file_writer_unittest.cc b/payload_consumer/file_writer_unittest.cc
index 92837c8..59cfe2b 100644
--- a/payload_consumer/file_writer_unittest.cc
+++ b/payload_consumer/file_writer_unittest.cc
@@ -32,46 +32,42 @@
 
 namespace chromeos_update_engine {
 
-class FileWriterTest : public ::testing::Test { };
+class FileWriterTest : public ::testing::Test {};
 
 TEST(FileWriterTest, SimpleTest) {
   // Create a uniquely named file for testing.
-  string path;
-  ASSERT_TRUE(utils::MakeTempFile("FileWriterTest-XXXXXX", &path, nullptr));
-  ScopedPathUnlinker path_unlinker(path);
-
+  test_utils::ScopedTempFile file("FileWriterTest-XXXXXX");
   DirectFileWriter file_writer;
-  EXPECT_EQ(0, file_writer.Open(path.c_str(),
-                                O_CREAT | O_LARGEFILE | O_TRUNC | O_WRONLY,
-                                0644));
+  EXPECT_EQ(0,
+            file_writer.Open(file.path().c_str(),
+                             O_CREAT | O_LARGEFILE | O_TRUNC | O_WRONLY,
+                             0644));
   EXPECT_TRUE(file_writer.Write("test", 4));
   brillo::Blob actual_data;
-  EXPECT_TRUE(utils::ReadFile(path, &actual_data));
+  EXPECT_TRUE(utils::ReadFile(file.path(), &actual_data));
 
-  EXPECT_FALSE(memcmp("test", actual_data.data(), actual_data.size()));
+  EXPECT_EQ("test", string(actual_data.begin(), actual_data.end()));
   EXPECT_EQ(0, file_writer.Close());
 }
 
 TEST(FileWriterTest, ErrorTest) {
   DirectFileWriter file_writer;
   const string path("/tmp/ENOENT/FileWriterTest");
-  EXPECT_EQ(-ENOENT, file_writer.Open(path.c_str(),
-                                      O_CREAT | O_LARGEFILE | O_TRUNC, 0644));
+  EXPECT_EQ(
+      -ENOENT,
+      file_writer.Open(path.c_str(), O_CREAT | O_LARGEFILE | O_TRUNC, 0644));
 }
 
 TEST(FileWriterTest, WriteErrorTest) {
   // Create a uniquely named file for testing.
-  string path;
-  ASSERT_TRUE(utils::MakeTempFile("FileWriterTest-XXXXXX", &path, nullptr));
-  ScopedPathUnlinker path_unlinker(path);
-
+  test_utils::ScopedTempFile file("FileWriterTest-XXXXXX");
   DirectFileWriter file_writer;
-  EXPECT_EQ(0, file_writer.Open(path.c_str(),
-                                O_CREAT | O_LARGEFILE | O_TRUNC | O_RDONLY,
-                                0644));
+  EXPECT_EQ(0,
+            file_writer.Open(file.path().c_str(),
+                             O_CREAT | O_LARGEFILE | O_TRUNC | O_RDONLY,
+                             0644));
   EXPECT_FALSE(file_writer.Write("x", 1));
   EXPECT_EQ(0, file_writer.Close());
 }
 
-
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/filesystem_verifier_action.cc b/payload_consumer/filesystem_verifier_action.cc
index 5edde9e..36e5a35 100644
--- a/payload_consumer/filesystem_verifier_action.cc
+++ b/payload_consumer/filesystem_verifier_action.cc
@@ -29,10 +29,7 @@
 #include <brillo/data_encoding.h>
 #include <brillo/streams/file_stream.h>
 
-#include "update_engine/common/boot_control_interface.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/delta_performer.h"
-#include "update_engine/payload_consumer/payload_constants.h"
 
 using brillo::data_encoding::Base64Encode;
 using std::string;
@@ -70,10 +67,6 @@
   Cleanup(ErrorCode::kSuccess);  // error code is ignored if canceled_ is true.
 }
 
-bool FilesystemVerifierAction::IsCleanupPending() const {
-  return src_stream_ != nullptr;
-}
-
 void FilesystemVerifierAction::Cleanup(ErrorCode code) {
   src_stream_.reset();
   // This memory is not used anymore.
@@ -91,61 +84,97 @@
     Cleanup(ErrorCode::kSuccess);
     return;
   }
-  InstallPlan::Partition& partition =
+  const InstallPlan::Partition& partition =
       install_plan_.partitions[partition_index_];
 
   string part_path;
   switch (verifier_step_) {
     case VerifierStep::kVerifySourceHash:
       part_path = partition.source_path;
-      remaining_size_ = partition.source_size;
+      partition_size_ = partition.source_size;
       break;
     case VerifierStep::kVerifyTargetHash:
       part_path = partition.target_path;
-      remaining_size_ = partition.target_size;
+      partition_size_ = partition.target_size;
       break;
   }
+
+  if (part_path.empty()) {
+    if (partition_size_ == 0) {
+      LOG(INFO) << "Skip hashing partition " << partition_index_ << " ("
+                << partition.name << ") because size is 0.";
+      partition_index_++;
+      StartPartitionHashing();
+      return;
+    }
+    LOG(ERROR) << "Cannot hash partition " << partition_index_ << " ("
+               << partition.name
+               << ") because its device path cannot be determined.";
+    Cleanup(ErrorCode::kFilesystemVerifierError);
+    return;
+  }
+
   LOG(INFO) << "Hashing partition " << partition_index_ << " ("
             << partition.name << ") on device " << part_path;
-  if (part_path.empty())
-    return Cleanup(ErrorCode::kFilesystemVerifierError);
 
   brillo::ErrorPtr error;
-  src_stream_ = brillo::FileStream::Open(
-      base::FilePath(part_path),
-      brillo::Stream::AccessMode::READ,
-      brillo::FileStream::Disposition::OPEN_EXISTING,
-      &error);
+  src_stream_ =
+      brillo::FileStream::Open(base::FilePath(part_path),
+                               brillo::Stream::AccessMode::READ,
+                               brillo::FileStream::Disposition::OPEN_EXISTING,
+                               &error);
 
   if (!src_stream_) {
     LOG(ERROR) << "Unable to open " << part_path << " for reading";
-    return Cleanup(ErrorCode::kFilesystemVerifierError);
+    Cleanup(ErrorCode::kFilesystemVerifierError);
+    return;
   }
 
   buffer_.resize(kReadFileBufferSize);
-  read_done_ = false;
-  hasher_.reset(new HashCalculator());
+  hasher_ = std::make_unique<HashCalculator>();
+
+  offset_ = 0;
+  if (verifier_step_ == VerifierStep::kVerifyTargetHash &&
+      install_plan_.write_verity) {
+    if (!verity_writer_->Init(partition)) {
+      Cleanup(ErrorCode::kVerityCalculationError);
+      return;
+    }
+  }
 
   // Start the first read.
   ScheduleRead();
 }
 
 void FilesystemVerifierAction::ScheduleRead() {
-  size_t bytes_to_read = std::min(static_cast<int64_t>(buffer_.size()),
-                                  remaining_size_);
+  const InstallPlan::Partition& partition =
+      install_plan_.partitions[partition_index_];
+
+  // We can only start reading anything past |hash_tree_offset| after we have
+  // already read all the data blocks that the hash tree covers. The same
+  // applies to FEC.
+  uint64_t read_end = partition_size_;
+  if (partition.hash_tree_size != 0 &&
+      offset_ < partition.hash_tree_data_offset + partition.hash_tree_data_size)
+    read_end = std::min(read_end, partition.hash_tree_offset);
+  if (partition.fec_size != 0 &&
+      offset_ < partition.fec_data_offset + partition.fec_data_size)
+    read_end = std::min(read_end, partition.fec_offset);
+  size_t bytes_to_read =
+      std::min(static_cast<uint64_t>(buffer_.size()), read_end - offset_);
   if (!bytes_to_read) {
-    OnReadDoneCallback(0);
+    FinishPartitionHashing();
     return;
   }
 
   bool read_async_ok = src_stream_->ReadAsync(
-    buffer_.data(),
-    bytes_to_read,
-    base::Bind(&FilesystemVerifierAction::OnReadDoneCallback,
-               base::Unretained(this)),
-    base::Bind(&FilesystemVerifierAction::OnReadErrorCallback,
-               base::Unretained(this)),
-    nullptr);
+      buffer_.data(),
+      bytes_to_read,
+      base::Bind(&FilesystemVerifierAction::OnReadDoneCallback,
+                 base::Unretained(this)),
+      base::Bind(&FilesystemVerifierAction::OnReadErrorCallback,
+                 base::Unretained(this)),
+      nullptr);
 
   if (!read_async_ok) {
     LOG(ERROR) << "Unable to schedule an asynchronous read from the stream.";
@@ -154,36 +183,44 @@
 }
 
 void FilesystemVerifierAction::OnReadDoneCallback(size_t bytes_read) {
+  if (cancelled_) {
+    Cleanup(ErrorCode::kError);
+    return;
+  }
+
   if (bytes_read == 0) {
-    read_done_ = true;
-  } else {
-    remaining_size_ -= bytes_read;
-    CHECK(!read_done_);
-    if (!hasher_->Update(buffer_.data(), bytes_read)) {
-      LOG(ERROR) << "Unable to update the hash.";
-      Cleanup(ErrorCode::kError);
+    LOG(ERROR) << "Failed to read the remaining " << partition_size_ - offset_
+               << " bytes from partition "
+               << install_plan_.partitions[partition_index_].name;
+    Cleanup(ErrorCode::kFilesystemVerifierError);
+    return;
+  }
+
+  if (!hasher_->Update(buffer_.data(), bytes_read)) {
+    LOG(ERROR) << "Unable to update the hash.";
+    Cleanup(ErrorCode::kError);
+    return;
+  }
+
+  if (verifier_step_ == VerifierStep::kVerifyTargetHash &&
+      install_plan_.write_verity) {
+    if (!verity_writer_->Update(offset_, buffer_.data(), bytes_read)) {
+      Cleanup(ErrorCode::kVerityCalculationError);
       return;
     }
   }
 
-  // We either terminate the current partition or have more data to read.
-  if (cancelled_)
-    return Cleanup(ErrorCode::kError);
+  offset_ += bytes_read;
 
-  if (read_done_ || remaining_size_ == 0) {
-    if (remaining_size_ != 0) {
-      LOG(ERROR) << "Failed to read the remaining " << remaining_size_
-                 << " bytes from partition "
-                 << install_plan_.partitions[partition_index_].name;
-      return Cleanup(ErrorCode::kFilesystemVerifierError);
-    }
-    return FinishPartitionHashing();
+  if (offset_ == partition_size_) {
+    FinishPartitionHashing();
+    return;
   }
+
   ScheduleRead();
 }
 
-void FilesystemVerifierAction::OnReadErrorCallback(
-      const brillo::Error* error) {
+void FilesystemVerifierAction::OnReadErrorCallback(const brillo::Error* error) {
   // TODO(deymo): Transform the read-error into an specific ErrorCode.
   LOG(ERROR) << "Asynchronous read failed.";
   Cleanup(ErrorCode::kError);
@@ -192,7 +229,8 @@
 void FilesystemVerifierAction::FinishPartitionHashing() {
   if (!hasher_->Finalize()) {
     LOG(ERROR) << "Unable to finalize the hash.";
-    return Cleanup(ErrorCode::kError);
+    Cleanup(ErrorCode::kError);
+    return;
   }
   InstallPlan::Partition& partition =
       install_plan_.partitions[partition_index_];
@@ -206,7 +244,8 @@
                    << "' partition verification failed.";
         if (partition.source_hash.empty()) {
           // No need to verify source if it is a full payload.
-          return Cleanup(ErrorCode::kNewRootfsVerificationError);
+          Cleanup(ErrorCode::kNewRootfsVerificationError);
+          return;
         }
         // If we have not verified source partition yet, now that the target
         // partition does not match, and it's not a full payload, we need to
@@ -242,7 +281,8 @@
                      "-binary | openssl base64";
         LOG(INFO) << "To get the checksum of partitions in a bin file, "
                   << "run: .../src/scripts/sha256_partitions.sh .../file.bin";
-        return Cleanup(ErrorCode::kDownloadStateInitializationError);
+        Cleanup(ErrorCode::kDownloadStateInitializationError);
+        return;
       }
       // The action will skip kVerifySourceHash step if target partition hash
       // matches, if we are in this step, it means target hash does not match,
@@ -250,7 +290,8 @@
       // code to reflect the error in target partition.
       // We only need to verify the source partition which the target hash does
       // not match, the rest of the partitions don't matter.
-      return Cleanup(ErrorCode::kNewRootfsVerificationError);
+      Cleanup(ErrorCode::kNewRootfsVerificationError);
+      return;
   }
   // Start hashing the next partition, if any.
   hasher_.reset();
diff --git a/payload_consumer/filesystem_verifier_action.h b/payload_consumer/filesystem_verifier_action.h
index 616f7b7..83d6668 100644
--- a/payload_consumer/filesystem_verifier_action.h
+++ b/payload_consumer/filesystem_verifier_action.h
@@ -20,6 +20,7 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -28,6 +29,7 @@
 #include "update_engine/common/action.h"
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_consumer/verity_writer_interface.h"
 
 // This action will hash all the partitions of the target slot involved in the
 // update. The hashes are then verified against the ones in the InstallPlan.
@@ -49,22 +51,19 @@
 
 class FilesystemVerifierAction : public InstallPlanAction {
  public:
-  FilesystemVerifierAction() = default;
+  FilesystemVerifierAction()
+      : verity_writer_(verity_writer::CreateVerityWriter()) {}
+  ~FilesystemVerifierAction() override = default;
 
   void PerformAction() override;
   void TerminateProcessing() override;
 
-  // Used for testing. Return true if Cleanup() has not yet been called due
-  // to a callback upon the completion or cancellation of the verifier action.
-  // A test should wait until IsCleanupPending() returns false before
-  // terminating the main loop.
-  bool IsCleanupPending() const;
-
   // Debugging/logging
   static std::string StaticType() { return "FilesystemVerifierAction"; }
   std::string Type() const override { return StaticType(); }
 
  private:
+  friend class FilesystemVerifierActionTestDelegate;
   // Starts the hashing of the current partition. If there aren't any partitions
   // remaining to be hashed, it finishes the action.
   void StartPartitionHashing();
@@ -99,7 +98,6 @@
   // Buffer for storing data we read.
   brillo::Blob buffer_;
 
-  bool read_done_{false};  // true if reached EOF on the input stream.
   bool cancelled_{false};  // true if the action has been cancelled.
 
   // The install plan we're passed in via the input pipe.
@@ -108,10 +106,18 @@
   // Calculates the hash of the data.
   std::unique_ptr<HashCalculator> hasher_;
 
-  // Reads and hashes this many bytes from the head of the input stream. This
-  // field is initialized from the corresponding InstallPlan::Partition size,
-  // when the partition starts to be hashed.
-  int64_t remaining_size_{0};
+  // Write verity data of the current partition.
+  std::unique_ptr<VerityWriterInterface> verity_writer_;
+
+  // Reads and hashes this many bytes from the head of the input stream. When
+  // the partition starts to be hashed, this field is initialized from the
+  // corresponding InstallPlan::Partition size which is the total size
+  // update_engine is expected to write, and may be smaller than the size of the
+  // partition in gpt.
+  uint64_t partition_size_{0};
+
+  // The byte offset that we are reading in the current partition.
+  uint64_t offset_{0};
 
   DISALLOW_COPY_AND_ASSIGN(FilesystemVerifierAction);
 };
diff --git a/payload_consumer/filesystem_verifier_action_unittest.cc b/payload_consumer/filesystem_verifier_action_unittest.cc
index b4f7f7f..cb33404 100644
--- a/payload_consumer/filesystem_verifier_action_unittest.cc
+++ b/payload_consumer/filesystem_verifier_action_unittest.cc
@@ -16,38 +16,29 @@
 
 #include "update_engine/payload_consumer/filesystem_verifier_action.h"
 
-#include <fcntl.h>
-
-#include <set>
+#include <memory>
 #include <string>
-#include <vector>
+#include <utility>
 
 #include <base/bind.h>
 #include <base/posix/eintr_wrapper.h>
-#include <base/strings/string_util.h>
-#include <base/strings/stringprintf.h>
 #include <brillo/message_loops/fake_message_loop.h>
 #include <brillo/message_loops/message_loop_utils.h>
-#include <gmock/gmock.h>
+#include <brillo/secure_blob.h>
 #include <gtest/gtest.h>
 
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/payload_constants.h"
 
 using brillo::MessageLoop;
-using std::set;
 using std::string;
-using std::vector;
 
 namespace chromeos_update_engine {
 
 class FilesystemVerifierActionTest : public ::testing::Test {
  protected:
-  void SetUp() override {
-    loop_.SetAsCurrent();
-  }
+  void SetUp() override { loop_.SetAsCurrent(); }
 
   void TearDown() override {
     EXPECT_EQ(0, brillo::MessageLoopRunMaxIterations(&loop_, 1));
@@ -56,32 +47,22 @@
   // Returns true iff test has completed successfully.
   bool DoTest(bool terminate_early, bool hash_fail);
 
+  void BuildActions(const InstallPlan& install_plan);
+
   brillo::FakeMessageLoop loop_{nullptr};
+  ActionProcessor processor_;
 };
 
 class FilesystemVerifierActionTestDelegate : public ActionProcessorDelegate {
  public:
-  explicit FilesystemVerifierActionTestDelegate(
-      FilesystemVerifierAction* action)
-      : action_(action), ran_(false), code_(ErrorCode::kError) {}
-  void ExitMainLoop() {
-    // We need to wait for the Action to call Cleanup.
-    if (action_->IsCleanupPending()) {
-      LOG(INFO) << "Waiting for Cleanup() to be called.";
-      MessageLoop::current()->PostDelayedTask(
-          FROM_HERE,
-          base::Bind(&FilesystemVerifierActionTestDelegate::ExitMainLoop,
-                     base::Unretained(this)),
-          base::TimeDelta::FromMilliseconds(100));
-    } else {
-      MessageLoop::current()->BreakLoop();
-    }
-  }
+  FilesystemVerifierActionTestDelegate()
+      : ran_(false), code_(ErrorCode::kError) {}
+
   void ProcessingDone(const ActionProcessor* processor, ErrorCode code) {
-    ExitMainLoop();
+    MessageLoop::current()->BreakLoop();
   }
   void ProcessingStopped(const ActionProcessor* processor) {
-    ExitMainLoop();
+    MessageLoop::current()->BreakLoop();
   }
   void ActionCompleted(ActionProcessor* processor,
                        AbstractAction* action,
@@ -89,36 +70,27 @@
     if (action->Type() == FilesystemVerifierAction::StaticType()) {
       ran_ = true;
       code_ = code;
+      EXPECT_FALSE(static_cast<FilesystemVerifierAction*>(action)->src_stream_);
+    } else if (action->Type() ==
+               ObjectCollectorAction<InstallPlan>::StaticType()) {
+      auto collector_action =
+          static_cast<ObjectCollectorAction<InstallPlan>*>(action);
+      install_plan_.reset(new InstallPlan(collector_action->object()));
     }
   }
   bool ran() const { return ran_; }
   ErrorCode code() const { return code_; }
 
+  std::unique_ptr<InstallPlan> install_plan_;
+
  private:
-  FilesystemVerifierAction* action_;
   bool ran_;
   ErrorCode code_;
 };
 
-void StartProcessorInRunLoop(ActionProcessor* processor,
-                             FilesystemVerifierAction* filesystem_copier_action,
-                             bool terminate_early) {
-  processor->StartProcessing();
-  if (terminate_early) {
-    EXPECT_NE(nullptr, filesystem_copier_action);
-    processor->StopProcessing();
-  }
-}
-
 bool FilesystemVerifierActionTest::DoTest(bool terminate_early,
                                           bool hash_fail) {
-  string a_loop_file;
-
-  if (!(utils::MakeTempFile("a_loop_file.XXXXXX", &a_loop_file, nullptr))) {
-    ADD_FAILURE();
-    return false;
-  }
-  ScopedPathUnlinker a_loop_file_unlinker(a_loop_file);
+  test_utils::ScopedTempFile a_loop_file("a_loop_file.XXXXXX");
 
   // Make random data for a.
   const size_t kLoopFileSize = 10 * 1024 * 1024 + 512;
@@ -126,7 +98,7 @@
   test_utils::FillWithData(&a_loop_data);
 
   // Write data to disk
-  if (!(test_utils::WriteFileVector(a_loop_file, a_loop_data))) {
+  if (!(test_utils::WriteFileVector(a_loop_file.path(), a_loop_data))) {
     ADD_FAILURE();
     return false;
   }
@@ -134,13 +106,13 @@
   // Attach loop devices to the files
   string a_dev;
   test_utils::ScopedLoopbackDeviceBinder a_dev_releaser(
-      a_loop_file, false, &a_dev);
+      a_loop_file.path(), false, &a_dev);
   if (!(a_dev_releaser.is_bound())) {
     ADD_FAILURE();
     return false;
   }
 
-  LOG(INFO) << "verifying: "  << a_loop_file << " (" << a_dev << ")";
+  LOG(INFO) << "verifying: " << a_loop_file.path() << " (" << a_dev << ")";
 
   bool success = true;
 
@@ -164,27 +136,21 @@
   }
   install_plan.partitions = {part};
 
-  ActionProcessor processor;
+  BuildActions(install_plan);
 
-  ObjectFeederAction<InstallPlan> feeder_action;
-  FilesystemVerifierAction copier_action;
-  ObjectCollectorAction<InstallPlan> collector_action;
+  FilesystemVerifierActionTestDelegate delegate;
+  processor_.set_delegate(&delegate);
 
-  BondActions(&feeder_action, &copier_action);
-  BondActions(&copier_action, &collector_action);
-
-  FilesystemVerifierActionTestDelegate delegate(&copier_action);
-  processor.set_delegate(&delegate);
-  processor.EnqueueAction(&feeder_action);
-  processor.EnqueueAction(&copier_action);
-  processor.EnqueueAction(&collector_action);
-
-  feeder_action.set_obj(install_plan);
-
-  loop_.PostTask(FROM_HERE, base::Bind(&StartProcessorInRunLoop,
-                                       &processor,
-                                       &copier_action,
-                                       terminate_early));
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(
+                     [](ActionProcessor* processor, bool terminate_early) {
+                       processor->StartProcessing();
+                       if (terminate_early) {
+                         processor->StopProcessing();
+                       }
+                     },
+                     base::Unretained(&processor_),
+                     terminate_early));
   loop_.Run();
 
   if (!terminate_early) {
@@ -213,12 +179,29 @@
   EXPECT_TRUE(is_a_file_reading_eq);
   success = success && is_a_file_reading_eq;
 
-  bool is_install_plan_eq = (collector_action.object() == install_plan);
+  bool is_install_plan_eq = (*delegate.install_plan_ == install_plan);
   EXPECT_TRUE(is_install_plan_eq);
   success = success && is_install_plan_eq;
   return success;
 }
 
+void FilesystemVerifierActionTest::BuildActions(
+    const InstallPlan& install_plan) {
+  auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
+  auto verifier_action = std::make_unique<FilesystemVerifierAction>();
+  auto collector_action =
+      std::make_unique<ObjectCollectorAction<InstallPlan>>();
+
+  feeder_action->set_obj(install_plan);
+
+  BondActions(feeder_action.get(), verifier_action.get());
+  BondActions(verifier_action.get(), collector_action.get());
+
+  processor_.EnqueueAction(std::move(feeder_action));
+  processor_.EnqueueAction(std::move(verifier_action));
+  processor_.EnqueueAction(std::move(collector_action));
+}
+
 class FilesystemVerifierActionTest2Delegate : public ActionProcessorDelegate {
  public:
   void ActionCompleted(ActionProcessor* processor,
@@ -234,31 +217,25 @@
 };
 
 TEST_F(FilesystemVerifierActionTest, MissingInputObjectTest) {
-  ActionProcessor processor;
+  auto copier_action = std::make_unique<FilesystemVerifierAction>();
+  auto collector_action =
+      std::make_unique<ObjectCollectorAction<InstallPlan>>();
+
+  BondActions(copier_action.get(), collector_action.get());
+
+  processor_.EnqueueAction(std::move(copier_action));
+  processor_.EnqueueAction(std::move(collector_action));
+
   FilesystemVerifierActionTest2Delegate delegate;
+  processor_.set_delegate(&delegate);
 
-  processor.set_delegate(&delegate);
-
-  FilesystemVerifierAction copier_action;
-  ObjectCollectorAction<InstallPlan> collector_action;
-
-  BondActions(&copier_action, &collector_action);
-
-  processor.EnqueueAction(&copier_action);
-  processor.EnqueueAction(&collector_action);
-  processor.StartProcessing();
-  EXPECT_FALSE(processor.IsRunning());
+  processor_.StartProcessing();
+  EXPECT_FALSE(processor_.IsRunning());
   EXPECT_TRUE(delegate.ran_);
   EXPECT_EQ(ErrorCode::kError, delegate.code_);
 }
 
 TEST_F(FilesystemVerifierActionTest, NonExistentDriveTest) {
-  ActionProcessor processor;
-  FilesystemVerifierActionTest2Delegate delegate;
-
-  processor.set_delegate(&delegate);
-
-  ObjectFeederAction<InstallPlan> feeder_action;
   InstallPlan install_plan;
   InstallPlan::Partition part;
   part.name = "nope";
@@ -266,19 +243,15 @@
   part.target_path = "/no/such/file";
   install_plan.partitions = {part};
 
-  feeder_action.set_obj(install_plan);
-  FilesystemVerifierAction verifier_action;
-  ObjectCollectorAction<InstallPlan> collector_action;
+  BuildActions(install_plan);
 
-  BondActions(&verifier_action, &collector_action);
+  FilesystemVerifierActionTest2Delegate delegate;
+  processor_.set_delegate(&delegate);
 
-  processor.EnqueueAction(&feeder_action);
-  processor.EnqueueAction(&verifier_action);
-  processor.EnqueueAction(&collector_action);
-  processor.StartProcessing();
-  EXPECT_FALSE(processor.IsRunning());
+  processor_.StartProcessing();
+  EXPECT_FALSE(processor_.IsRunning());
   EXPECT_TRUE(delegate.ran_);
-  EXPECT_EQ(ErrorCode::kError, delegate.code_);
+  EXPECT_EQ(ErrorCode::kFilesystemVerifierError, delegate.code_);
 }
 
 TEST_F(FilesystemVerifierActionTest, RunAsRootVerifyHashTest) {
@@ -295,7 +268,116 @@
   ASSERT_EQ(0U, getuid());
   EXPECT_TRUE(DoTest(true, false));
   // TerminateEarlyTest may leak some null callbacks from the Stream class.
-  while (loop_.RunOnce(false)) {}
+  while (loop_.RunOnce(false)) {
+  }
 }
 
+#ifdef __ANDROID__
+TEST_F(FilesystemVerifierActionTest, RunAsRootWriteVerityTest) {
+  test_utils::ScopedTempFile part_file("part_file.XXXXXX");
+  constexpr size_t filesystem_size = 200 * 4096;
+  constexpr size_t part_size = 256 * 4096;
+  brillo::Blob part_data(filesystem_size, 0x1);
+  part_data.resize(part_size);
+  ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
+  string target_path;
+  test_utils::ScopedLoopbackDeviceBinder target_device(
+      part_file.path(), true, &target_path);
+
+  InstallPlan install_plan;
+  InstallPlan::Partition part;
+  part.name = "part";
+  part.target_path = target_path;
+  part.target_size = part_size;
+  part.block_size = 4096;
+  part.hash_tree_algorithm = "sha1";
+  part.hash_tree_data_offset = 0;
+  part.hash_tree_data_size = filesystem_size;
+  part.hash_tree_offset = filesystem_size;
+  part.hash_tree_size = 3 * 4096;
+  part.fec_data_offset = 0;
+  part.fec_data_size = filesystem_size + part.hash_tree_size;
+  part.fec_offset = part.fec_data_size;
+  part.fec_size = 2 * 4096;
+  part.fec_roots = 2;
+  // for i in {1..$((200 * 4096))}; do echo -n -e '\x1' >> part; done
+  // avbtool add_hashtree_footer --image part --partition_size $((256 * 4096))
+  //     --partition_name part --do_not_append_vbmeta_image
+  //     --output_vbmeta_image vbmeta
+  // truncate -s $((256 * 4096)) part
+  // sha256sum part | xxd -r -p | hexdump -v -e '/1 "0x%02x, "'
+  part.target_hash = {0x28, 0xd4, 0x96, 0x75, 0x4c, 0xf5, 0x8a, 0x3e,
+                      0x31, 0x85, 0x08, 0x92, 0x85, 0x62, 0xf0, 0x37,
+                      0xbc, 0x8d, 0x7e, 0xa4, 0xcb, 0x24, 0x18, 0x7b,
+                      0xf3, 0xeb, 0xb5, 0x8d, 0x6f, 0xc8, 0xd8, 0x1a};
+  // avbtool info_image --image vbmeta | grep Salt | cut -d':' -f 2 |
+  //     xxd -r -p | hexdump -v -e '/1 "0x%02x, "'
+  part.hash_tree_salt = {0x9e, 0xcb, 0xf8, 0xd5, 0x0b, 0xb4, 0x43,
+                         0x0a, 0x7a, 0x10, 0xad, 0x96, 0xd7, 0x15,
+                         0x70, 0xba, 0xed, 0x27, 0xe2, 0xae};
+  install_plan.partitions = {part};
+
+  BuildActions(install_plan);
+
+  FilesystemVerifierActionTestDelegate delegate;
+  processor_.set_delegate(&delegate);
+
+  loop_.PostTask(
+      FROM_HERE,
+      base::Bind(
+          [](ActionProcessor* processor) { processor->StartProcessing(); },
+          base::Unretained(&processor_)));
+  loop_.Run();
+
+  EXPECT_FALSE(processor_.IsRunning());
+  EXPECT_TRUE(delegate.ran());
+  EXPECT_EQ(ErrorCode::kSuccess, delegate.code());
+}
+#endif  // __ANDROID__
+
+TEST_F(FilesystemVerifierActionTest, RunAsRootSkipWriteVerityTest) {
+  test_utils::ScopedTempFile part_file("part_file.XXXXXX");
+  constexpr size_t filesystem_size = 200 * 4096;
+  constexpr size_t part_size = 256 * 4096;
+  brillo::Blob part_data(part_size);
+  test_utils::FillWithData(&part_data);
+  ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
+  string target_path;
+  test_utils::ScopedLoopbackDeviceBinder target_device(
+      part_file.path(), true, &target_path);
+
+  InstallPlan install_plan;
+  install_plan.write_verity = false;
+  InstallPlan::Partition part;
+  part.name = "part";
+  part.target_path = target_path;
+  part.target_size = part_size;
+  part.block_size = 4096;
+  part.hash_tree_data_offset = 0;
+  part.hash_tree_data_size = filesystem_size;
+  part.hash_tree_offset = filesystem_size;
+  part.hash_tree_size = 3 * 4096;
+  part.fec_data_offset = 0;
+  part.fec_data_size = filesystem_size + part.hash_tree_size;
+  part.fec_offset = part.fec_data_size;
+  part.fec_size = 2 * 4096;
+  EXPECT_TRUE(HashCalculator::RawHashOfData(part_data, &part.target_hash));
+  install_plan.partitions = {part};
+
+  BuildActions(install_plan);
+
+  FilesystemVerifierActionTestDelegate delegate;
+  processor_.set_delegate(&delegate);
+
+  loop_.PostTask(
+      FROM_HERE,
+      base::Bind(
+          [](ActionProcessor* processor) { processor->StartProcessing(); },
+          base::Unretained(&processor_)));
+  loop_.Run();
+
+  EXPECT_FALSE(processor_.IsRunning());
+  EXPECT_TRUE(delegate.ran());
+  EXPECT_EQ(ErrorCode::kSuccess, delegate.code());
+}
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc
index 45112d6..2e7b6d4 100644
--- a/payload_consumer/install_plan.cc
+++ b/payload_consumer/install_plan.cc
@@ -90,22 +90,28 @@
             << ", powerwash_required: " << utils::ToString(powerwash_required)
             << ", switch_slot_on_reboot: "
             << utils::ToString(switch_slot_on_reboot)
-            << ", run_post_install: " << utils::ToString(run_post_install);
+            << ", run_post_install: " << utils::ToString(run_post_install)
+            << ", is_rollback: " << utils::ToString(is_rollback)
+            << ", write_verity: " << utils::ToString(write_verity);
 }
 
 bool InstallPlan::LoadPartitionsFromSlots(BootControlInterface* boot_control) {
   bool result = true;
   for (Partition& partition : partitions) {
-    if (source_slot != BootControlInterface::kInvalidSlot) {
+    if (source_slot != BootControlInterface::kInvalidSlot &&
+        partition.source_size > 0) {
       result = boot_control->GetPartitionDevice(
-          partition.name, source_slot, &partition.source_path) && result;
+                   partition.name, source_slot, &partition.source_path) &&
+               result;
     } else {
       partition.source_path.clear();
     }
 
-    if (target_slot != BootControlInterface::kInvalidSlot) {
+    if (target_slot != BootControlInterface::kInvalidSlot &&
+        partition.target_size > 0) {
       result = boot_control->GetPartitionDevice(
-          partition.name, target_slot, &partition.target_path) && result;
+                   partition.name, target_slot, &partition.target_path) &&
+               result;
     } else {
       partition.target_path.clear();
     }
@@ -115,12 +121,9 @@
 
 bool InstallPlan::Partition::operator==(
     const InstallPlan::Partition& that) const {
-  return (name == that.name &&
-          source_path == that.source_path &&
-          source_size == that.source_size &&
-          source_hash == that.source_hash &&
-          target_path == that.target_path &&
-          target_size == that.target_size &&
+  return (name == that.name && source_path == that.source_path &&
+          source_size == that.source_size && source_hash == that.source_hash &&
+          target_path == that.target_path && target_size == that.target_size &&
           target_hash == that.target_hash &&
           run_postinstall == that.run_postinstall &&
           postinstall_path == that.postinstall_path &&
diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h
index 5cdfbc1..ede36b3 100644
--- a/payload_consumer/install_plan.h
+++ b/payload_consumer/install_plan.h
@@ -46,7 +46,7 @@
 
   void Dump() const;
 
-  // Load the |source_path| and |target_path| of all |partitions| based on the
+  // Loads the |source_path| and |target_path| of all |partitions| based on the
   // |source_slot| and |target_slot| if available. Returns whether it succeeded
   // to load all the partitions for the valid slots.
   bool LoadPartitionsFromSlots(BootControlInterface* boot_control);
@@ -101,6 +101,7 @@
     std::string target_path;
     uint64_t target_size{0};
     brillo::Blob target_hash;
+    uint32_t block_size{0};
 
     // Whether we should run the postinstall script from this partition and the
     // postinstall parameters.
@@ -108,6 +109,21 @@
     std::string postinstall_path;
     std::string filesystem_type;
     bool postinstall_optional{false};
+
+    // Verity hash tree and FEC config. See update_metadata.proto for details.
+    // All offsets and sizes are in bytes.
+    uint64_t hash_tree_data_offset{0};
+    uint64_t hash_tree_data_size{0};
+    uint64_t hash_tree_offset{0};
+    uint64_t hash_tree_size{0};
+    std::string hash_tree_algorithm;
+    brillo::Blob hash_tree_salt;
+
+    uint64_t fec_data_offset{0};
+    uint64_t fec_data_size{0};
+    uint64_t fec_offset{0};
+    uint64_t fec_size{0};
+    uint32_t fec_roots{0};
   };
   std::vector<Partition> partitions;
 
@@ -127,6 +143,13 @@
   // False otherwise.
   bool run_post_install{true};
 
+  // True if this update is a rollback.
+  bool is_rollback{false};
+
+  // True if the update should write verity.
+  // False otherwise.
+  bool write_verity{true};
+
   // If not blank, a base-64 encoded representation of the PEM-encoded
   // public key in the response.
   std::string public_key_rsa;
@@ -134,7 +157,7 @@
 
 class InstallPlanAction;
 
-template<>
+template <>
 class ActionTraits<InstallPlanAction> {
  public:
   // Takes the install plan as input
@@ -149,8 +172,8 @@
 class InstallPlanAction : public Action<InstallPlanAction> {
  public:
   InstallPlanAction() {}
-  explicit InstallPlanAction(const InstallPlan& install_plan):
-    install_plan_(install_plan) {}
+  explicit InstallPlanAction(const InstallPlan& install_plan)
+      : install_plan_(install_plan) {}
 
   void PerformAction() override {
     if (HasOutputPipe()) {
diff --git a/payload_consumer/mtd_file_descriptor.cc b/payload_consumer/mtd_file_descriptor.cc
index 5d7758a..5d940cb 100644
--- a/payload_consumer/mtd_file_descriptor.cc
+++ b/payload_consumer/mtd_file_descriptor.cc
@@ -67,9 +67,8 @@
     return ret;
   }
 
-  base::TrimWhitespaceASCII(s_reserved_ebs,
-                            base::TRIM_TRAILING,
-                            &s_reserved_ebs);
+  base::TrimWhitespaceASCII(
+      s_reserved_ebs, base::TRIM_TRAILING, &s_reserved_ebs);
   base::TrimWhitespaceASCII(s_eb_size, base::TRIM_TRAILING, &s_eb_size);
 
   uint64_t reserved_ebs, eb_size;
@@ -142,9 +141,8 @@
 
 ssize_t MtdFileDescriptor::Write(const void* buf, size_t count) {
   CHECK(write_ctx_);
-  ssize_t result = mtd_write_data(write_ctx_.get(),
-                                  static_cast<const char*>(buf),
-                                  count);
+  ssize_t result =
+      mtd_write_data(write_ctx_.get(), static_cast<const char*>(buf), count);
   if (result > 0) {
     nr_written_ += result;
   }
@@ -168,8 +166,8 @@
 bool UbiFileDescriptor::IsUbi(const char* path) {
   base::FilePath device_node(path);
   base::FilePath ubi_name(device_node.BaseName());
-  TEST_AND_RETURN_FALSE(base::StartsWith(ubi_name.MaybeAsASCII(), "ubi",
-                                         base::CompareCase::SENSITIVE));
+  TEST_AND_RETURN_FALSE(base::StartsWith(
+      ubi_name.MaybeAsASCII(), "ubi", base::CompareCase::SENSITIVE));
 
   return static_cast<bool>(GetUbiVolumeInfo(path));
 }
diff --git a/payload_consumer/mtd_file_descriptor.h b/payload_consumer/mtd_file_descriptor.h
index 6c945b2..c0170b7 100644
--- a/payload_consumer/mtd_file_descriptor.h
+++ b/payload_consumer/mtd_file_descriptor.h
@@ -20,6 +20,8 @@
 // This module defines file descriptors that deal with NAND media. We are
 // concerned with raw NAND access (as MTD device), and through UBI layer.
 
+#include <memory>
+
 #include <mtdutils.h>
 
 #include "update_engine/payload_consumer/file_descriptor.h"
@@ -86,10 +88,7 @@
   bool Close() override;
 
  private:
-  enum Mode {
-    kReadOnly,
-    kWriteOnly
-  };
+  enum Mode { kReadOnly, kWriteOnly };
 
   uint64_t usable_eb_blocks_;
   uint64_t eraseblock_size_;
diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc
index e679316..a2368a4 100644
--- a/payload_consumer/payload_constants.cc
+++ b/payload_consumer/payload_constants.cc
@@ -21,21 +21,28 @@
 const uint64_t kChromeOSMajorPayloadVersion = 1;
 const uint64_t kBrilloMajorPayloadVersion = 2;
 
+const uint32_t kMinSupportedMinorPayloadVersion = 1;
+const uint32_t kMaxSupportedMinorPayloadVersion = 6;
+
 const uint32_t kFullPayloadMinorVersion = 0;
 const uint32_t kInPlaceMinorPayloadVersion = 1;
 const uint32_t kSourceMinorPayloadVersion = 2;
 const uint32_t kOpSrcHashMinorPayloadVersion = 3;
 const uint32_t kBrotliBsdiffMinorPayloadVersion = 4;
 const uint32_t kPuffdiffMinorPayloadVersion = 5;
+const uint32_t kVerityMinorPayloadVersion = 6;
+
+const uint64_t kMinSupportedMajorPayloadVersion = 1;
+const uint64_t kMaxSupportedMajorPayloadVersion = 2;
 
 const uint64_t kMaxPayloadHeaderSize = 24;
 
-const char kLegacyPartitionNameKernel[] = "boot";
-const char kLegacyPartitionNameRoot[] = "system";
+const char kPartitionNameKernel[] = "kernel";
+const char kPartitionNameRoot[] = "root";
 
 const char kDeltaMagic[4] = {'C', 'r', 'A', 'U'};
 
-const char* InstallOperationTypeName(InstallOperation_Type op_type) {
+const char* InstallOperationTypeName(InstallOperation::Type op_type) {
   switch (op_type) {
     case InstallOperation::BSDIFF:
       return "BSDIFF";
diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h
index ac3e882..1642488 100644
--- a/payload_consumer/payload_constants.h
+++ b/payload_consumer/payload_constants.h
@@ -31,6 +31,10 @@
 // The major version used by Brillo.
 extern const uint64_t kBrilloMajorPayloadVersion;
 
+// The minimum and maximum supported major version.
+extern const uint64_t kMinSupportedMajorPayloadVersion;
+extern const uint64_t kMaxSupportedMajorPayloadVersion;
+
 // The minor version used for all full payloads.
 extern const uint32_t kFullPayloadMinorVersion;
 
@@ -49,14 +53,21 @@
 // The minor version that allows PUFFDIFF operation.
 extern const uint32_t kPuffdiffMinorPayloadVersion;
 
+// The minor version that allows Verity hash tree and FEC generation.
+extern const uint32_t kVerityMinorPayloadVersion;
+
+// The minimum and maximum supported minor version.
+extern const uint32_t kMinSupportedMinorPayloadVersion;
+extern const uint32_t kMaxSupportedMinorPayloadVersion;
+
 // The maximum size of the payload header (anything before the protobuf).
 extern const uint64_t kMaxPayloadHeaderSize;
 
 // The kernel and rootfs partition names used by the BootControlInterface when
 // handling update payloads with a major version 1. The names of the updated
 // partitions are include in the payload itself for major version 2.
-extern const char kLegacyPartitionNameKernel[];
-extern const char kLegacyPartitionNameRoot[];
+extern const char kPartitionNameKernel[];
+extern const char kPartitionNameRoot[];
 
 extern const char kBspatchPath[];
 extern const char kDeltaMagic[4];
@@ -66,7 +77,7 @@
 const uint64_t kSparseHole = std::numeric_limits<uint64_t>::max();
 
 // Return the name of the operation type.
-const char* InstallOperationTypeName(InstallOperation_Type op_type);
+const char* InstallOperationTypeName(InstallOperation::Type op_type);
 
 }  // namespace chromeos_update_engine
 
diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc
index 6b8d448..8b3eb4e 100644
--- a/payload_consumer/payload_metadata.cc
+++ b/payload_consumer/payload_metadata.cc
@@ -25,6 +25,8 @@
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/payload_verifier.h"
 
+using std::string;
+
 namespace chromeos_update_engine {
 
 const uint64_t PayloadMetadata::kDeltaVersionOffset = sizeof(kDeltaMagic);
@@ -60,9 +62,7 @@
 }
 
 MetadataParseResult PayloadMetadata::ParsePayloadHeader(
-    const brillo::Blob& payload,
-    uint64_t supported_major_version,
-    ErrorCode* error) {
+    const brillo::Blob& payload, ErrorCode* error) {
   uint64_t manifest_offset;
   // Ensure we have data to cover the major payload version.
   if (payload.size() < kDeltaManifestSizeOffset)
@@ -84,8 +84,8 @@
   // Switch big endian to host.
   major_payload_version_ = be64toh(major_payload_version_);
 
-  if (major_payload_version_ != supported_major_version &&
-      major_payload_version_ != kChromeOSMajorPayloadVersion) {
+  if (major_payload_version_ < kMinSupportedMajorPayloadVersion ||
+      major_payload_version_ > kMaxSupportedMajorPayloadVersion) {
     LOG(ERROR) << "Bad payload format -- unsupported payload version: "
                << major_payload_version_;
     *error = ErrorCode::kUnsupportedMajorPayloadVersion;
@@ -140,6 +140,11 @@
   return MetadataParseResult::kSuccess;
 }
 
+bool PayloadMetadata::ParsePayloadHeader(const brillo::Blob& payload) {
+  ErrorCode error;
+  return ParsePayloadHeader(payload, &error) == MetadataParseResult::kSuccess;
+}
+
 bool PayloadMetadata::GetManifest(const brillo::Blob& payload,
                                   DeltaArchiveManifest* out_manifest) const {
   uint64_t manifest_offset;
@@ -152,12 +157,16 @@
 
 ErrorCode PayloadMetadata::ValidateMetadataSignature(
     const brillo::Blob& payload,
-    std::string metadata_signature,
-    base::FilePath path_to_public_key) const {
+    const string& metadata_signature,
+    const string& pem_public_key) const {
   if (payload.size() < metadata_size_ + metadata_signature_size_)
     return ErrorCode::kDownloadMetadataSignatureError;
 
-  brillo::Blob metadata_signature_blob, metadata_signature_protobuf_blob;
+  // A single signature in raw bytes.
+  brillo::Blob metadata_signature_blob;
+  // The serialized Signatures protobuf message stored in major version >=2
+  // payload, it may contain multiple signatures.
+  string metadata_signature_protobuf;
   if (!metadata_signature.empty()) {
     // Convert base64-encoded signature to raw bytes.
     if (!brillo::data_encoding::Base64Decode(metadata_signature,
@@ -167,21 +176,17 @@
       return ErrorCode::kDownloadMetadataSignatureError;
     }
   } else if (major_payload_version_ == kBrilloMajorPayloadVersion) {
-    metadata_signature_protobuf_blob.assign(
+    metadata_signature_protobuf.assign(
         payload.begin() + metadata_size_,
         payload.begin() + metadata_size_ + metadata_signature_size_);
   }
 
-  if (metadata_signature_blob.empty() &&
-      metadata_signature_protobuf_blob.empty()) {
+  if (metadata_signature_blob.empty() && metadata_signature_protobuf.empty()) {
     LOG(ERROR) << "Missing mandatory metadata signature in both Omaha "
                << "response and payload.";
     return ErrorCode::kDownloadMetadataSignatureMissingError;
   }
 
-  LOG(INFO) << "Verifying metadata hash signature using public key: "
-            << path_to_public_key.value();
-
   brillo::Blob calculated_metadata_hash;
   if (!HashCalculator::RawHashOfBytes(
           payload.data(), metadata_size_, &calculated_metadata_hash)) {
@@ -197,9 +202,8 @@
 
   if (!metadata_signature_blob.empty()) {
     brillo::Blob expected_metadata_hash;
-    if (!PayloadVerifier::GetRawHashFromSignature(metadata_signature_blob,
-                                                  path_to_public_key.value(),
-                                                  &expected_metadata_hash)) {
+    if (!PayloadVerifier::GetRawHashFromSignature(
+            metadata_signature_blob, pem_public_key, &expected_metadata_hash)) {
       LOG(ERROR) << "Unable to compute expected hash from metadata signature";
       return ErrorCode::kDownloadMetadataSignatureError;
     }
@@ -211,8 +215,8 @@
       return ErrorCode::kDownloadMetadataSignatureMismatch;
     }
   } else {
-    if (!PayloadVerifier::VerifySignature(metadata_signature_protobuf_blob,
-                                          path_to_public_key.value(),
+    if (!PayloadVerifier::VerifySignature(metadata_signature_protobuf,
+                                          pem_public_key,
                                           calculated_metadata_hash)) {
       LOG(ERROR) << "Manifest hash verification failed.";
       return ErrorCode::kDownloadMetadataSignatureMismatch;
diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h
index e00b5c1..1b4c5c8 100644
--- a/payload_consumer/payload_metadata.h
+++ b/payload_consumer/payload_metadata.h
@@ -22,7 +22,7 @@
 #include <string>
 #include <vector>
 
-#include <base/files/file_path.h>
+#include <base/macros.h>
 #include <brillo/secure_blob.h>
 
 #include "update_engine/common/error_code.h"
@@ -54,8 +54,9 @@
   // metadata. Returns kMetadataParseError if the metadata can't be parsed given
   // the payload.
   MetadataParseResult ParsePayloadHeader(const brillo::Blob& payload,
-                                         uint64_t supported_major_version,
                                          ErrorCode* error);
+  // Simpler version of the above, returns true on success.
+  bool ParsePayloadHeader(const brillo::Blob& payload);
 
   // Given the |payload|, verifies that the signed hash of its metadata matches
   // |metadata_signature| (if present) or the metadata signature in payload
@@ -65,8 +66,8 @@
   // to the payload server doesn't exploit any vulnerability in the code that
   // parses the protocol buffer.
   ErrorCode ValidateMetadataSignature(const brillo::Blob& payload,
-                                      std::string metadata_signature,
-                                      base::FilePath path_to_public_key) const;
+                                      const std::string& metadata_signature,
+                                      const std::string& pem_public_key) const;
 
   // Returns the major payload version. If the version was not yet parsed,
   // returns zero.
diff --git a/payload_consumer/payload_verifier.cc b/payload_consumer/payload_verifier.cc
index ab5238c..3eb1da8 100644
--- a/payload_consumer/payload_verifier.cc
+++ b/payload_consumer/payload_verifier.cc
@@ -16,6 +16,8 @@
 
 #include "update_engine/payload_consumer/payload_verifier.h"
 
+#include <vector>
+
 #include <base/logging.h>
 #include <openssl/pem.h>
 
@@ -48,51 +50,41 @@
 //   }
 //   OCTET STRING(2+32) <actual signature bytes...>
 //  }
+// clang-format off
 const uint8_t kRSA2048SHA256Padding[] = {
-  // PKCS1-v1_5 padding
-  0x00, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-  0xff, 0xff, 0xff, 0xff, 0x00,
-  // ASN.1 header
-  0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
-  0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05,
-  0x00, 0x04, 0x20,
+    // PKCS1-v1_5 padding
+    0x00, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0x00,
+    // ASN.1 header
+    0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03,
+    0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20,
 };
+// clang-format on
 
 }  // namespace
 
-bool PayloadVerifier::VerifySignature(const brillo::Blob& signature_blob,
-                                      const string& public_key_path,
+bool PayloadVerifier::VerifySignature(const string& signature_proto,
+                                      const string& pem_public_key,
                                       const brillo::Blob& hash_data) {
-  TEST_AND_RETURN_FALSE(!public_key_path.empty());
-
   Signatures signatures;
-  LOG(INFO) << "signature blob size = " <<  signature_blob.size();
-  TEST_AND_RETURN_FALSE(signatures.ParseFromArray(signature_blob.data(),
-                                                  signature_blob.size()));
+  LOG(INFO) << "signature blob size = " << signature_proto.size();
+  TEST_AND_RETURN_FALSE(signatures.ParseFromString(signature_proto));
 
   if (!signatures.signatures_size()) {
     LOG(ERROR) << "No signatures stored in the blob.";
@@ -102,10 +94,10 @@
   std::vector<brillo::Blob> tested_hashes;
   // Tries every signature in the signature blob.
   for (int i = 0; i < signatures.signatures_size(); i++) {
-    const Signatures_Signature& signature = signatures.signatures(i);
+    const Signatures::Signature& signature = signatures.signatures(i);
     brillo::Blob sig_data(signature.data().begin(), signature.data().end());
     brillo::Blob sig_hash_data;
-    if (!GetRawHashFromSignature(sig_data, public_key_path, &sig_hash_data))
+    if (!GetRawHashFromSignature(sig_data, pem_public_key, &sig_hash_data))
       continue;
 
     if (hash_data == sig_hash_data) {
@@ -125,28 +117,19 @@
   return false;
 }
 
-
-bool PayloadVerifier::GetRawHashFromSignature(
-    const brillo::Blob& sig_data,
-    const string& public_key_path,
-    brillo::Blob* out_hash_data) {
-  TEST_AND_RETURN_FALSE(!public_key_path.empty());
-
+bool PayloadVerifier::GetRawHashFromSignature(const brillo::Blob& sig_data,
+                                              const string& pem_public_key,
+                                              brillo::Blob* out_hash_data) {
   // The code below executes the equivalent of:
   //
-  // openssl rsautl -verify -pubin -inkey |public_key_path|
+  // openssl rsautl -verify -pubin -inkey <(echo |pem_public_key|)
   //   -in |sig_data| -out |out_hash_data|
 
-  // Loads the public key.
-  FILE* fpubkey = fopen(public_key_path.c_str(), "rb");
-  if (!fpubkey) {
-    LOG(ERROR) << "Unable to open public key file: " << public_key_path;
-    return false;
-  }
+  BIO* bp = BIO_new_mem_buf(pem_public_key.data(), pem_public_key.size());
+  char dummy_password[] = {' ', 0};  // Ensure no password is read from stdin.
+  RSA* rsa = PEM_read_bio_RSA_PUBKEY(bp, nullptr, nullptr, dummy_password);
+  BIO_free(bp);
 
-  char dummy_password[] = { ' ', 0 };  // Ensure no password is read from stdin.
-  RSA* rsa = PEM_read_RSA_PUBKEY(fpubkey, nullptr, nullptr, dummy_password);
-  fclose(fpubkey);
   TEST_AND_RETURN_FALSE(rsa != nullptr);
   unsigned int keysize = RSA_size(rsa);
   if (sig_data.size() > 2 * keysize) {
@@ -157,11 +140,8 @@
 
   // Decrypts the signature.
   brillo::Blob hash_data(keysize);
-  int decrypt_size = RSA_public_decrypt(sig_data.size(),
-                                        sig_data.data(),
-                                        hash_data.data(),
-                                        rsa,
-                                        RSA_NO_PADDING);
+  int decrypt_size = RSA_public_decrypt(
+      sig_data.size(), sig_data.data(), hash_data.data(), rsa, RSA_NO_PADDING);
   RSA_free(rsa);
   TEST_AND_RETURN_FALSE(decrypt_size > 0 &&
                         decrypt_size <= static_cast<int>(hash_data.size()));
diff --git a/payload_consumer/payload_verifier.h b/payload_consumer/payload_verifier.h
index 8caef35..09bdbf9 100644
--- a/payload_consumer/payload_verifier.h
+++ b/payload_consumer/payload_verifier.h
@@ -31,20 +31,21 @@
 
 class PayloadVerifier {
  public:
-  // Interprets |signature_blob| as a protocol buffer containing the Signatures
-  // message and decrypts each signature data using the |public_key_path|.
+  // Interprets |signature_proto| as a protocol buffer containing the Signatures
+  // message and decrypts each signature data using the |pem_public_key|.
+  // |pem_public_key| should be a PEM format RSA public key data.
   // Returns whether *any* of the decrypted hashes matches the |hash_data|.
   // In case of any error parsing the signatures or the public key, returns
   // false.
-  static bool VerifySignature(const brillo::Blob& signature_blob,
-                              const std::string& public_key_path,
+  static bool VerifySignature(const std::string& signature_proto,
+                              const std::string& pem_public_key,
                               const brillo::Blob& hash_data);
 
-  // Decrypts sig_data with the given public_key_path and populates
-  // out_hash_data with the decoded raw hash. Returns true if successful,
-  // false otherwise.
+  // Decrypts |sig_data| with the given |pem_public_key| and populates
+  // |out_hash_data| with the decoded raw hash. |pem_public_key| should be a PEM
+  // format RSA public key data. Returns true if successful, false otherwise.
   static bool GetRawHashFromSignature(const brillo::Blob& sig_data,
-                                      const std::string& public_key_path,
+                                      const std::string& pem_public_key,
                                       brillo::Blob* out_hash_data);
 
   // Pads a SHA256 hash so that it may be encrypted/signed with RSA2048
diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc
index cedecda..a782b8f 100644
--- a/payload_consumer/postinstall_runner_action.cc
+++ b/payload_consumer/postinstall_runner_action.cc
@@ -57,8 +57,9 @@
   CHECK(HasInputObject());
   install_plan_ = GetInputObject();
 
-  if (install_plan_.powerwash_required) {
-    if (hardware_->SchedulePowerwash()) {
+  // Currently we're always powerwashing when rolling back.
+  if (install_plan_.powerwash_required || install_plan_.is_rollback) {
+    if (hardware_->SchedulePowerwash(install_plan_.is_rollback)) {
       powerwash_scheduled_ = true;
     } else {
       return CompletePostinstall(ErrorCode::kPostinstallPowerwashError);
@@ -264,7 +265,7 @@
 void PostinstallRunnerAction::ReportProgress(double frac) {
   if (!delegate_)
     return;
-  if (current_partition_ >= partition_weight_.size()) {
+  if (current_partition_ >= partition_weight_.size() || total_weight_ == 0) {
     delegate_->ProgressUpdate(1.);
     return;
   }
diff --git a/payload_consumer/postinstall_runner_action.h b/payload_consumer/postinstall_runner_action.h
index 2e48e11..b9b7069 100644
--- a/payload_consumer/postinstall_runner_action.h
+++ b/payload_consumer/postinstall_runner_action.h
@@ -89,8 +89,7 @@
   void Cleanup();
 
   // Subprocess::Exec callback.
-  void CompletePartitionPostinstall(int return_code,
-                                    const std::string& output);
+  void CompletePartitionPostinstall(int return_code, const std::string& output);
 
   // Complete the Action with the passed |error_code| and mark the new slot as
   // ready. Called when the post-install script was run for all the partitions.
diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc
index f15171b..caee5e2 100644
--- a/payload_consumer/postinstall_runner_action_unittest.cc
+++ b/payload_consumer/postinstall_runner_action_unittest.cc
@@ -22,6 +22,7 @@
 
 #include <memory>
 #include <string>
+#include <utility>
 
 #include <base/bind.h>
 #include <base/files/file_util.h>
@@ -36,8 +37,10 @@
 #include "update_engine/common/constants.h"
 #include "update_engine/common/fake_boot_control.h"
 #include "update_engine/common/fake_hardware.h"
+#include "update_engine/common/subprocess.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/mock_payload_state.h"
 
 using brillo::MessageLoop;
 using chromeos_update_engine::test_utils::ScopedLoopbackDeviceBinder;
@@ -94,9 +97,10 @@
   // Setup an action processor and run the PostinstallRunnerAction with a single
   // partition |device_path|, running the |postinstall_program| command from
   // there.
-  void RunPosinstallAction(const string& device_path,
-                           const string& postinstall_program,
-                           bool powerwash_required);
+  void RunPostinstallAction(const string& device_path,
+                            const string& postinstall_program,
+                            bool powerwash_required,
+                            bool is_rollback);
 
  public:
   void ResumeRunningAction() {
@@ -162,13 +166,14 @@
   ActionProcessor* processor_{nullptr};
 };
 
-void PostinstallRunnerActionTest::RunPosinstallAction(
+void PostinstallRunnerActionTest::RunPostinstallAction(
     const string& device_path,
     const string& postinstall_program,
-    bool powerwash_required) {
+    bool powerwash_required,
+    bool is_rollback) {
   ActionProcessor processor;
   processor_ = &processor;
-  ObjectFeederAction<InstallPlan> feeder_action;
+  auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
   InstallPlan::Partition part;
   part.name = "part";
   part.target_path = device_path;
@@ -178,16 +183,19 @@
   install_plan.partitions = {part};
   install_plan.download_url = "http://127.0.0.1:8080/update";
   install_plan.powerwash_required = powerwash_required;
-  feeder_action.set_obj(install_plan);
-  PostinstallRunnerAction runner_action(&fake_boot_control_, &fake_hardware_);
-  postinstall_action_ = &runner_action;
-  runner_action.set_delegate(setup_action_delegate_);
-  BondActions(&feeder_action, &runner_action);
-  ObjectCollectorAction<InstallPlan> collector_action;
-  BondActions(&runner_action, &collector_action);
-  processor.EnqueueAction(&feeder_action);
-  processor.EnqueueAction(&runner_action);
-  processor.EnqueueAction(&collector_action);
+  install_plan.is_rollback = is_rollback;
+  feeder_action->set_obj(install_plan);
+  auto runner_action = std::make_unique<PostinstallRunnerAction>(
+      &fake_boot_control_, &fake_hardware_);
+  postinstall_action_ = runner_action.get();
+  runner_action->set_delegate(setup_action_delegate_);
+  BondActions(feeder_action.get(), runner_action.get());
+  auto collector_action =
+      std::make_unique<ObjectCollectorAction<InstallPlan>>();
+  BondActions(runner_action.get(), collector_action.get());
+  processor.EnqueueAction(std::move(feeder_action));
+  processor.EnqueueAction(std::move(runner_action));
+  processor.EnqueueAction(std::move(collector_action));
   processor.set_delegate(&processor_delegate_);
 
   loop_.PostTask(
@@ -240,46 +248,68 @@
 // /postinst command which only exits 0.
 TEST_F(PostinstallRunnerActionTest, RunAsRootSimpleTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
-  RunPosinstallAction(loop.dev(), kPostinstallDefaultScript, false);
+
+  RunPostinstallAction(loop.dev(), kPostinstallDefaultScript, false, false);
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
   EXPECT_TRUE(processor_delegate_.processing_done_called_);
 
   // Since powerwash_required was false, this should not trigger a powerwash.
   EXPECT_FALSE(fake_hardware_.IsPowerwashScheduled());
+  EXPECT_FALSE(fake_hardware_.GetIsRollbackPowerwashScheduled());
 }
 
 TEST_F(PostinstallRunnerActionTest, RunAsRootRunSymlinkFileTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
-  RunPosinstallAction(loop.dev(), "bin/postinst_link", false);
+  RunPostinstallAction(loop.dev(), "bin/postinst_link", false, false);
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
 }
 
 TEST_F(PostinstallRunnerActionTest, RunAsRootPowerwashRequiredTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
   // Run a simple postinstall program but requiring a powerwash.
-  RunPosinstallAction(loop.dev(), "bin/postinst_example", true);
+  RunPostinstallAction(loop.dev(),
+                       "bin/postinst_example",
+                       /*powerwash_required=*/true,
+                       false);
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
 
   // Check that powerwash was scheduled.
   EXPECT_TRUE(fake_hardware_.IsPowerwashScheduled());
+  EXPECT_FALSE(fake_hardware_.GetIsRollbackPowerwashScheduled());
+}
+
+TEST_F(PostinstallRunnerActionTest, RunAsRootRollbackTest) {
+  ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
+
+  // Run a simple postinstall program, rollback happened.
+  RunPostinstallAction(loop.dev(),
+                       "bin/postinst_example",
+                       false,
+                       /*is_rollback=*/true);
+  EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
+
+  // Check that powerwash was scheduled and that it's a rollback powerwash.
+  EXPECT_TRUE(fake_hardware_.IsPowerwashScheduled());
+  EXPECT_TRUE(fake_hardware_.GetIsRollbackPowerwashScheduled());
 }
 
 // Runs postinstall from a partition file that doesn't mount, so it should
 // fail.
 TEST_F(PostinstallRunnerActionTest, RunAsRootCantMountTest) {
-  RunPosinstallAction("/dev/null", kPostinstallDefaultScript, false);
+  RunPostinstallAction("/dev/null", kPostinstallDefaultScript, false, false);
   EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
 
   // In case of failure, Postinstall should not signal a powerwash even if it
   // was requested.
   EXPECT_FALSE(fake_hardware_.IsPowerwashScheduled());
+  EXPECT_FALSE(fake_hardware_.GetIsRollbackPowerwashScheduled());
 }
 
 // Check that the failures from the postinstall script cause the action to
 // fail.
 TEST_F(PostinstallRunnerActionTest, RunAsRootErrScriptTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
-  RunPosinstallAction(loop.dev(), "bin/postinst_fail1", false);
+  RunPostinstallAction(loop.dev(), "bin/postinst_fail1", false, false);
   EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
 }
 
@@ -287,7 +317,7 @@
 // UMA with a different error code. Test those cases are properly detected.
 TEST_F(PostinstallRunnerActionTest, RunAsRootFirmwareBErrScriptTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
-  RunPosinstallAction(loop.dev(), "bin/postinst_fail3", false);
+  RunPostinstallAction(loop.dev(), "bin/postinst_fail3", false, false);
   EXPECT_EQ(ErrorCode::kPostinstallBootedFromFirmwareB,
             processor_delegate_.code_);
 }
@@ -295,7 +325,7 @@
 // Check that you can't specify an absolute path.
 TEST_F(PostinstallRunnerActionTest, RunAsRootAbsolutePathNotAllowedTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
-  RunPosinstallAction(loop.dev(), "/etc/../bin/sh", false);
+  RunPostinstallAction(loop.dev(), "/etc/../bin/sh", false, false);
   EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
 }
 
@@ -304,7 +334,7 @@
 // SElinux labels are only set on Android.
 TEST_F(PostinstallRunnerActionTest, RunAsRootCheckFileContextsTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
-  RunPosinstallAction(loop.dev(), "bin/self_check_context", false);
+  RunPostinstallAction(loop.dev(), "bin/self_check_context", false, false);
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
 }
 #endif  // __ANDROID__
@@ -317,7 +347,7 @@
   loop_.PostTask(FROM_HERE,
                  base::Bind(&PostinstallRunnerActionTest::SuspendRunningAction,
                             base::Unretained(this)));
-  RunPosinstallAction(loop.dev(), "bin/postinst_suspend", false);
+  RunPostinstallAction(loop.dev(), "bin/postinst_suspend", false, false);
   // postinst_suspend returns 0 only if it was suspended at some point.
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
   EXPECT_TRUE(processor_delegate_.processing_done_called_);
@@ -329,7 +359,7 @@
 
   // Wait for the action to start and then cancel it.
   CancelWhenStarted();
-  RunPosinstallAction(loop.dev(), "bin/postinst_suspend", false);
+  RunPostinstallAction(loop.dev(), "bin/postinst_suspend", false, false);
   // When canceling the action, the action never finished and therefore we had
   // a ProcessingStopped call instead.
   EXPECT_FALSE(processor_delegate_.code_set_);
@@ -352,7 +382,7 @@
 
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
   setup_action_delegate_ = &mock_delegate_;
-  RunPosinstallAction(loop.dev(), "bin/postinst_progress", false);
+  RunPostinstallAction(loop.dev(), "bin/postinst_progress", false, false);
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
 }
 
diff --git a/payload_consumer/verity_writer_android.cc b/payload_consumer/verity_writer_android.cc
new file mode 100644
index 0000000..06d1489
--- /dev/null
+++ b/payload_consumer/verity_writer_android.cc
@@ -0,0 +1,192 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/verity_writer_android.h"
+
+#include <fcntl.h>
+
+#include <algorithm>
+#include <memory>
+
+#include <base/logging.h>
+#include <base/posix/eintr_wrapper.h>
+#include <fec/ecc.h>
+extern "C" {
+#include <fec.h>
+}
+
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+namespace verity_writer {
+std::unique_ptr<VerityWriterInterface> CreateVerityWriter() {
+  return std::make_unique<VerityWriterAndroid>();
+}
+}  // namespace verity_writer
+
+bool VerityWriterAndroid::Init(const InstallPlan::Partition& partition) {
+  partition_ = &partition;
+
+  if (partition_->hash_tree_size != 0) {
+    auto hash_function =
+        HashTreeBuilder::HashFunction(partition_->hash_tree_algorithm);
+    if (hash_function == nullptr) {
+      LOG(ERROR) << "Verity hash algorithm not supported: "
+                 << partition_->hash_tree_algorithm;
+      return false;
+    }
+    hash_tree_builder_ = std::make_unique<HashTreeBuilder>(
+        partition_->block_size, hash_function);
+    TEST_AND_RETURN_FALSE(hash_tree_builder_->Initialize(
+        partition_->hash_tree_data_size, partition_->hash_tree_salt));
+    if (hash_tree_builder_->CalculateSize(partition_->hash_tree_data_size) !=
+        partition_->hash_tree_size) {
+      LOG(ERROR) << "Verity hash tree size does not match, stored: "
+                 << partition_->hash_tree_size << ", calculated: "
+                 << hash_tree_builder_->CalculateSize(
+                        partition_->hash_tree_data_size);
+      return false;
+    }
+  }
+  return true;
+}
+
+bool VerityWriterAndroid::Update(uint64_t offset,
+                                 const uint8_t* buffer,
+                                 size_t size) {
+  if (partition_->hash_tree_size != 0) {
+    uint64_t hash_tree_data_end =
+        partition_->hash_tree_data_offset + partition_->hash_tree_data_size;
+    uint64_t start_offset = std::max(offset, partition_->hash_tree_data_offset);
+    uint64_t end_offset = std::min(offset + size, hash_tree_data_end);
+    if (start_offset < end_offset) {
+      TEST_AND_RETURN_FALSE(hash_tree_builder_->Update(
+          buffer + start_offset - offset, end_offset - start_offset));
+
+      if (end_offset == hash_tree_data_end) {
+        // All hash tree data blocks has been hashed, write hash tree to disk.
+        int fd = HANDLE_EINTR(open(partition_->target_path.c_str(), O_WRONLY));
+        if (fd < 0) {
+          PLOG(ERROR) << "Failed to open " << partition_->target_path
+                      << " to write hash tree.";
+          return false;
+        }
+        ScopedFdCloser fd_closer(&fd);
+
+        LOG(INFO) << "Writing verity hash tree to " << partition_->target_path;
+        TEST_AND_RETURN_FALSE(hash_tree_builder_->BuildHashTree());
+        TEST_AND_RETURN_FALSE(hash_tree_builder_->WriteHashTreeToFd(
+            fd, partition_->hash_tree_offset));
+        hash_tree_builder_.reset();
+      }
+    }
+  }
+  if (partition_->fec_size != 0) {
+    uint64_t fec_data_end =
+        partition_->fec_data_offset + partition_->fec_data_size;
+    if (offset < fec_data_end && offset + size >= fec_data_end) {
+      LOG(INFO) << "Writing verity FEC to " << partition_->target_path;
+      TEST_AND_RETURN_FALSE(EncodeFEC(partition_->target_path,
+                                      partition_->fec_data_offset,
+                                      partition_->fec_data_size,
+                                      partition_->fec_offset,
+                                      partition_->fec_size,
+                                      partition_->fec_roots,
+                                      partition_->block_size,
+                                      false /* verify_mode */));
+    }
+  }
+  return true;
+}
+
+bool VerityWriterAndroid::EncodeFEC(const std::string& path,
+                                    uint64_t data_offset,
+                                    uint64_t data_size,
+                                    uint64_t fec_offset,
+                                    uint64_t fec_size,
+                                    uint32_t fec_roots,
+                                    uint32_t block_size,
+                                    bool verify_mode) {
+  TEST_AND_RETURN_FALSE(data_size % block_size == 0);
+  TEST_AND_RETURN_FALSE(fec_roots >= 0 && fec_roots < FEC_RSM);
+  // This is the N in RS(M, N), which is the number of bytes for each rs block.
+  size_t rs_n = FEC_RSM - fec_roots;
+  uint64_t rounds = utils::DivRoundUp(data_size / block_size, rs_n);
+  TEST_AND_RETURN_FALSE(rounds * fec_roots * block_size == fec_size);
+
+  std::unique_ptr<void, decltype(&free_rs_char)> rs_char(
+      init_rs_char(FEC_PARAMS(fec_roots)), &free_rs_char);
+  TEST_AND_RETURN_FALSE(rs_char != nullptr);
+
+  int fd = HANDLE_EINTR(open(path.c_str(), verify_mode ? O_RDONLY : O_RDWR));
+  if (fd < 0) {
+    PLOG(ERROR) << "Failed to open " << path << " to write FEC.";
+    return false;
+  }
+  ScopedFdCloser fd_closer(&fd);
+
+  for (size_t i = 0; i < rounds; i++) {
+    // Encodes |block_size| number of rs blocks each round so that we can read
+    // one block each time instead of 1 byte to increase random read
+    // performance. This uses about 1 MiB memory for 4K block size.
+    brillo::Blob rs_blocks(block_size * rs_n);
+    for (size_t j = 0; j < rs_n; j++) {
+      brillo::Blob buffer(block_size, 0);
+      uint64_t offset =
+          fec_ecc_interleave(i * rs_n * block_size + j, rs_n, rounds);
+      // Don't read past |data_size|, treat them as 0.
+      if (offset < data_size) {
+        ssize_t bytes_read = 0;
+        TEST_AND_RETURN_FALSE(utils::PReadAll(fd,
+                                              buffer.data(),
+                                              buffer.size(),
+                                              data_offset + offset,
+                                              &bytes_read));
+        TEST_AND_RETURN_FALSE(bytes_read ==
+                              static_cast<ssize_t>(buffer.size()));
+      }
+      for (size_t k = 0; k < buffer.size(); k++) {
+        rs_blocks[k * rs_n + j] = buffer[k];
+      }
+    }
+    brillo::Blob fec(block_size * fec_roots);
+    for (size_t j = 0; j < block_size; j++) {
+      // Encode [j * rs_n : (j + 1) * rs_n) in |rs_blocks| and write |fec_roots|
+      // number of parity bytes to |j * fec_roots| in |fec|.
+      encode_rs_char(rs_char.get(),
+                     rs_blocks.data() + j * rs_n,
+                     fec.data() + j * fec_roots);
+    }
+
+    if (verify_mode) {
+      brillo::Blob fec_read(fec.size());
+      ssize_t bytes_read = 0;
+      TEST_AND_RETURN_FALSE(utils::PReadAll(
+          fd, fec_read.data(), fec_read.size(), fec_offset, &bytes_read));
+      TEST_AND_RETURN_FALSE(bytes_read ==
+                            static_cast<ssize_t>(fec_read.size()));
+      TEST_AND_RETURN_FALSE(fec == fec_read);
+    } else {
+      TEST_AND_RETURN_FALSE(
+          utils::PWriteAll(fd, fec.data(), fec.size(), fec_offset));
+    }
+    fec_offset += fec.size();
+  }
+
+  return true;
+}
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/verity_writer_android.h b/payload_consumer/verity_writer_android.h
new file mode 100644
index 0000000..05a5856
--- /dev/null
+++ b/payload_consumer/verity_writer_android.h
@@ -0,0 +1,62 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_ANDROID_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_ANDROID_H_
+
+#include <memory>
+#include <string>
+
+#include <verity/hash_tree_builder.h>
+
+#include "update_engine/payload_consumer/verity_writer_interface.h"
+
+namespace chromeos_update_engine {
+
+class VerityWriterAndroid : public VerityWriterInterface {
+ public:
+  VerityWriterAndroid() = default;
+  ~VerityWriterAndroid() override = default;
+
+  bool Init(const InstallPlan::Partition& partition) override;
+  bool Update(uint64_t offset, const uint8_t* buffer, size_t size) override;
+
+  // Read [data_offset : data_offset + data_size) from |path| and encode FEC
+  // data, if |verify_mode|, then compare the encoded FEC with the one in
+  // |path|, otherwise write the encoded FEC to |path|. We can't encode as we go
+  // in each Update() like hash tree, because for every rs block, its data are
+  // spreaded across entire |data_size|, unless we can cache all data in
+  // memory, we have to re-read them from disk.
+  static bool EncodeFEC(const std::string& path,
+                        uint64_t data_offset,
+                        uint64_t data_size,
+                        uint64_t fec_offset,
+                        uint64_t fec_size,
+                        uint32_t fec_roots,
+                        uint32_t block_size,
+                        bool verify_mode);
+
+ private:
+  const InstallPlan::Partition* partition_ = nullptr;
+
+  std::unique_ptr<HashTreeBuilder> hash_tree_builder_;
+
+  DISALLOW_COPY_AND_ASSIGN(VerityWriterAndroid);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_ANDROID_H_
diff --git a/payload_consumer/verity_writer_android_unittest.cc b/payload_consumer/verity_writer_android_unittest.cc
new file mode 100644
index 0000000..f943ce8
--- /dev/null
+++ b/payload_consumer/verity_writer_android_unittest.cc
@@ -0,0 +1,120 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/verity_writer_android.h"
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+class VerityWriterAndroidTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    partition_.target_path = temp_file_.path();
+    partition_.block_size = 4096;
+    partition_.hash_tree_data_offset = 0;
+    partition_.hash_tree_data_size = 4096;
+    partition_.hash_tree_offset = 4096;
+    partition_.hash_tree_size = 4096;
+    partition_.hash_tree_algorithm = "sha1";
+    partition_.fec_roots = 2;
+  }
+
+  VerityWriterAndroid verity_writer_;
+  InstallPlan::Partition partition_;
+  test_utils::ScopedTempFile temp_file_;
+};
+
+TEST_F(VerityWriterAndroidTest, SimpleTest) {
+  brillo::Blob part_data(8192);
+  test_utils::WriteFileVector(partition_.target_path, part_data);
+  ASSERT_TRUE(verity_writer_.Init(partition_));
+  EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), 4096));
+  EXPECT_TRUE(verity_writer_.Update(4096, part_data.data() + 4096, 4096));
+  brillo::Blob actual_part;
+  utils::ReadFile(partition_.target_path, &actual_part);
+  // dd if=/dev/zero bs=4096 count=1 2>/dev/null | sha1sum | xxd -r -p |
+  //     hexdump -v -e '/1 "0x%02x, "'
+  brillo::Blob hash = {0x1c, 0xea, 0xf7, 0x3d, 0xf4, 0x0e, 0x53,
+                       0x1d, 0xf3, 0xbf, 0xb2, 0x6b, 0x4f, 0xb7,
+                       0xcd, 0x95, 0xfb, 0x7b, 0xff, 0x1d};
+  memcpy(part_data.data() + 4096, hash.data(), hash.size());
+  EXPECT_EQ(part_data, actual_part);
+}
+
+TEST_F(VerityWriterAndroidTest, NoOpTest) {
+  partition_.hash_tree_data_size = 0;
+  partition_.hash_tree_size = 0;
+  brillo::Blob part_data(4096);
+  ASSERT_TRUE(verity_writer_.Init(partition_));
+  EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), part_data.size()));
+  EXPECT_TRUE(verity_writer_.Update(4096, part_data.data(), part_data.size()));
+  EXPECT_TRUE(verity_writer_.Update(8192, part_data.data(), part_data.size()));
+}
+
+TEST_F(VerityWriterAndroidTest, InvalidHashAlgorithmTest) {
+  partition_.hash_tree_algorithm = "sha123";
+  EXPECT_FALSE(verity_writer_.Init(partition_));
+}
+
+TEST_F(VerityWriterAndroidTest, WrongHashTreeSizeTest) {
+  partition_.hash_tree_size = 8192;
+  EXPECT_FALSE(verity_writer_.Init(partition_));
+}
+
+TEST_F(VerityWriterAndroidTest, SHA256Test) {
+  partition_.hash_tree_algorithm = "sha256";
+  brillo::Blob part_data(8192);
+  test_utils::WriteFileVector(partition_.target_path, part_data);
+  ASSERT_TRUE(verity_writer_.Init(partition_));
+  EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), 4096));
+  EXPECT_TRUE(verity_writer_.Update(4096, part_data.data() + 4096, 4096));
+  brillo::Blob actual_part;
+  utils::ReadFile(partition_.target_path, &actual_part);
+  // dd if=/dev/zero bs=4096 count=1 2>/dev/null | sha256sum | xxd -r -p |
+  //     hexdump -v -e '/1 "0x%02x, "'
+  brillo::Blob hash = {0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9,
+                       0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b, 0x02,
+                       0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a,
+                       0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89, 0x2c, 0xa7};
+  memcpy(part_data.data() + 4096, hash.data(), hash.size());
+  EXPECT_EQ(part_data, actual_part);
+}
+
+TEST_F(VerityWriterAndroidTest, FECTest) {
+  partition_.fec_data_offset = 0;
+  partition_.fec_data_size = 4096;
+  partition_.fec_offset = 4096;
+  partition_.fec_size = 2 * 4096;
+  brillo::Blob part_data(3 * 4096, 0x1);
+  test_utils::WriteFileVector(partition_.target_path, part_data);
+  ASSERT_TRUE(verity_writer_.Init(partition_));
+  EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), part_data.size()));
+  brillo::Blob actual_part;
+  utils::ReadFile(partition_.target_path, &actual_part);
+  // Write FEC data.
+  for (size_t i = 4096; i < part_data.size(); i += 2) {
+    part_data[i] = 0x8e;
+    part_data[i + 1] = 0x8f;
+  }
+  EXPECT_EQ(part_data, actual_part);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/verity_writer_interface.h b/payload_consumer/verity_writer_interface.h
new file mode 100644
index 0000000..a3ecef3
--- /dev/null
+++ b/payload_consumer/verity_writer_interface.h
@@ -0,0 +1,53 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_INTERFACE_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_INTERFACE_H_
+
+#include <cstdint>
+#include <memory>
+
+#include <base/macros.h>
+
+#include "update_engine/payload_consumer/install_plan.h"
+
+namespace chromeos_update_engine {
+
+class VerityWriterInterface {
+ public:
+  virtual ~VerityWriterInterface() = default;
+
+  virtual bool Init(const InstallPlan::Partition& partition) = 0;
+  // Update partition data at [offset : offset + size) stored in |buffer|.
+  // Data not in |hash_tree_data_extent| or |fec_data_extent| is ignored.
+  // Will write verity data to the target partition once all the necessary
+  // blocks has passed.
+  virtual bool Update(uint64_t offset, const uint8_t* buffer, size_t size) = 0;
+
+ protected:
+  VerityWriterInterface() = default;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(VerityWriterInterface);
+};
+
+namespace verity_writer {
+std::unique_ptr<VerityWriterInterface> CreateVerityWriter();
+}
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_INTERFACE_H_
diff --git a/payload_consumer/verity_writer_stub.cc b/payload_consumer/verity_writer_stub.cc
new file mode 100644
index 0000000..a0e2467
--- /dev/null
+++ b/payload_consumer/verity_writer_stub.cc
@@ -0,0 +1,39 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/verity_writer_stub.h"
+
+#include <memory>
+
+namespace chromeos_update_engine {
+
+namespace verity_writer {
+std::unique_ptr<VerityWriterInterface> CreateVerityWriter() {
+  return std::make_unique<VerityWriterStub>();
+}
+}  // namespace verity_writer
+
+bool VerityWriterStub::Init(const InstallPlan::Partition& partition) {
+  return partition.hash_tree_size == 0 && partition.fec_size == 0;
+}
+
+bool VerityWriterStub::Update(uint64_t offset,
+                              const uint8_t* buffer,
+                              size_t size) {
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/verity_writer_stub.h b/payload_consumer/verity_writer_stub.h
new file mode 100644
index 0000000..ea5e574
--- /dev/null
+++ b/payload_consumer/verity_writer_stub.h
@@ -0,0 +1,38 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_STUB_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_STUB_H_
+
+#include "update_engine/payload_consumer/verity_writer_interface.h"
+
+namespace chromeos_update_engine {
+
+class VerityWriterStub : public VerityWriterInterface {
+ public:
+  VerityWriterStub() = default;
+  ~VerityWriterStub() override = default;
+
+  bool Init(const InstallPlan::Partition& partition) override;
+  bool Update(uint64_t offset, const uint8_t* buffer, size_t size) override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(VerityWriterStub);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_STUB_H_
diff --git a/payload_consumer/xz_extent_writer.cc b/payload_consumer/xz_extent_writer.cc
index 343ed80..a5b939d 100644
--- a/payload_consumer/xz_extent_writer.cc
+++ b/payload_consumer/xz_extent_writer.cc
@@ -32,7 +32,9 @@
 const uint32_t kXzMaxDictSize = 64 * 1024 * 1024;
 
 const char* XzErrorString(enum xz_ret error) {
-  #define __XZ_ERROR_STRING_CASE(code) case code: return #code;
+#define __XZ_ERROR_STRING_CASE(code) \
+  case code:                         \
+    return #code;
   switch (error) {
     __XZ_ERROR_STRING_CASE(XZ_OK)
     __XZ_ERROR_STRING_CASE(XZ_STREAM_END)
@@ -46,12 +48,13 @@
     default:
       return "<unknown xz error>";
   }
-  #undef __XZ_ERROR_STRING_CASE
+#undef __XZ_ERROR_STRING_CASE
 }
 }  // namespace
 
 XzExtentWriter::~XzExtentWriter() {
   xz_dec_end(stream_);
+  TEST_AND_RETURN(input_buffer_.empty());
 }
 
 bool XzExtentWriter::Init(FileDescriptorPtr fd,
@@ -110,9 +113,4 @@
   return true;
 }
 
-bool XzExtentWriter::EndImpl() {
-  TEST_AND_RETURN_FALSE(input_buffer_.empty());
-  return underlying_writer_->End();
-}
-
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/xz_extent_writer.h b/payload_consumer/xz_extent_writer.h
index 5e50256..e022274 100644
--- a/payload_consumer/xz_extent_writer.h
+++ b/payload_consumer/xz_extent_writer.h
@@ -43,7 +43,6 @@
             const google::protobuf::RepeatedPtrField<Extent>& extents,
             uint32_t block_size) override;
   bool Write(const void* bytes, size_t count) override;
-  bool EndImpl() override;
 
  private:
   // The underlying ExtentWriter.
diff --git a/payload_consumer/xz_extent_writer_unittest.cc b/payload_consumer/xz_extent_writer_unittest.cc
index c8bcdf9..34980a9 100644
--- a/payload_consumer/xz_extent_writer_unittest.cc
+++ b/payload_consumer/xz_extent_writer_unittest.cc
@@ -89,10 +89,8 @@
   void WriteAll(const brillo::Blob& compressed) {
     EXPECT_TRUE(xz_writer_->Init(fd_, {}, 1024));
     EXPECT_TRUE(xz_writer_->Write(compressed.data(), compressed.size()));
-    EXPECT_TRUE(xz_writer_->End());
 
     EXPECT_TRUE(fake_extent_writer_->InitCalled());
-    EXPECT_TRUE(fake_extent_writer_->EndCalled());
   }
 
   // Owned by |xz_writer_|. This object is invalidated after |xz_writer_| is
@@ -101,26 +99,24 @@
   std::unique_ptr<XzExtentWriter> xz_writer_;
 
   const brillo::Blob sample_data_{
-      std::begin(kSampleData),
-      std::begin(kSampleData) + strlen(kSampleData)};
+      std::begin(kSampleData), std::begin(kSampleData) + strlen(kSampleData)};
   FileDescriptorPtr fd_;
 };
 
 TEST_F(XzExtentWriterTest, CreateAndDestroy) {
   // Test that no Init() or End() called doesn't crash the program.
   EXPECT_FALSE(fake_extent_writer_->InitCalled());
-  EXPECT_FALSE(fake_extent_writer_->EndCalled());
 }
 
 TEST_F(XzExtentWriterTest, CompressedSampleData) {
   WriteAll(brillo::Blob(std::begin(kCompressedDataNoCheck),
-                          std::end(kCompressedDataNoCheck)));
+                        std::end(kCompressedDataNoCheck)));
   EXPECT_EQ(sample_data_, fake_extent_writer_->WrittenData());
 }
 
 TEST_F(XzExtentWriterTest, CompressedSampleDataWithCrc) {
   WriteAll(brillo::Blob(std::begin(kCompressedDataCRC32),
-                          std::end(kCompressedDataCRC32)));
+                        std::end(kCompressedDataCRC32)));
   EXPECT_EQ(sample_data_, fake_extent_writer_->WrittenData());
 }
 
@@ -128,7 +124,7 @@
   // Test that even if the output data is bigger than the internal buffer, all
   // the data is written.
   WriteAll(brillo::Blob(std::begin(kCompressed30KiBofA),
-                          std::end(kCompressed30KiBofA)));
+                        std::end(kCompressed30KiBofA)));
   brillo::Blob expected_data(30 * 1024, 'a');
   EXPECT_EQ(expected_data, fake_extent_writer_->WrittenData());
 }
@@ -137,19 +133,15 @@
   EXPECT_TRUE(xz_writer_->Init(fd_, {}, 1024));
   // The sample_data_ is an uncompressed string.
   EXPECT_FALSE(xz_writer_->Write(sample_data_.data(), sample_data_.size()));
-  EXPECT_TRUE(xz_writer_->End());
-
-  EXPECT_TRUE(fake_extent_writer_->EndCalled());
 }
 
 TEST_F(XzExtentWriterTest, PartialDataIsKept) {
   brillo::Blob compressed(std::begin(kCompressed30KiBofA),
-                            std::end(kCompressed30KiBofA));
+                          std::end(kCompressed30KiBofA));
   EXPECT_TRUE(xz_writer_->Init(fd_, {}, 1024));
   for (uint8_t byte : compressed) {
     EXPECT_TRUE(xz_writer_->Write(&byte, 1));
   }
-  EXPECT_TRUE(xz_writer_->End());
 
   // The sample_data_ is an uncompressed string.
   brillo::Blob expected_data(30 * 1024, 'a');
diff --git a/payload_generator/ab_generator.cc b/payload_generator/ab_generator.cc
index 089dfd9..d9b9d88 100644
--- a/payload_generator/ab_generator.cc
+++ b/payload_generator/ab_generator.cc
@@ -35,16 +35,17 @@
 
 namespace chromeos_update_engine {
 
-bool ABGenerator::GenerateOperations(
-    const PayloadGenerationConfig& config,
-    const PartitionConfig& old_part,
-    const PartitionConfig& new_part,
-    BlobFileWriter* blob_file,
-    vector<AnnotatedOperation>* aops) {
+bool ABGenerator::GenerateOperations(const PayloadGenerationConfig& config,
+                                     const PartitionConfig& old_part,
+                                     const PartitionConfig& new_part,
+                                     BlobFileWriter* blob_file,
+                                     vector<AnnotatedOperation>* aops) {
   TEST_AND_RETURN_FALSE(old_part.name == new_part.name);
 
-  ssize_t hard_chunk_blocks = (config.hard_chunk_size == -1 ? -1 :
-                               config.hard_chunk_size / config.block_size);
+  ssize_t hard_chunk_blocks =
+      (config.hard_chunk_size == -1
+           ? -1
+           : config.hard_chunk_size / config.block_size);
   size_t soft_chunk_blocks = config.soft_chunk_size / config.block_size;
 
   aops->clear();
@@ -57,8 +58,6 @@
                                                        blob_file));
   LOG(INFO) << "done reading " << new_part.name;
 
-  TEST_AND_RETURN_FALSE(
-      FragmentOperations(config.version, aops, new_part.path, blob_file));
   SortOperationsByDestination(aops);
 
   // Use the soft_chunk_size when merging operations to prevent merging all
@@ -69,8 +68,10 @@
     merge_chunk_blocks = hard_chunk_blocks;
   }
 
+  LOG(INFO) << "Merging " << aops->size() << " operations.";
   TEST_AND_RETURN_FALSE(MergeOperations(
       aops, config.version, merge_chunk_blocks, new_part.path, blob_file));
+  LOG(INFO) << aops->size() << " operations after merge.";
 
   if (config.version.minor >= kOpSrcHashMinorPayloadVersion)
     TEST_AND_RETURN_FALSE(AddSourceHash(aops, old_part.path));
@@ -107,9 +108,8 @@
   return true;
 }
 
-bool ABGenerator::SplitSourceCopy(
-    const AnnotatedOperation& original_aop,
-    vector<AnnotatedOperation>* result_aops) {
+bool ABGenerator::SplitSourceCopy(const AnnotatedOperation& original_aop,
+                                  vector<AnnotatedOperation>* result_aops) {
   InstallOperation original_op = original_aop.op;
   TEST_AND_RETURN_FALSE(original_op.type() == InstallOperation::SOURCE_COPY);
   // Keeps track of the index of curr_src_ext.
@@ -167,13 +167,13 @@
   TEST_AND_RETURN_FALSE(IsAReplaceOperation(original_op.type()));
   const bool is_replace = original_op.type() == InstallOperation::REPLACE;
 
-  uint32_t data_offset = original_op.data_offset();
+  uint64_t data_offset = original_op.data_offset();
   for (int i = 0; i < original_op.dst_extents_size(); i++) {
     const Extent& dst_ext = original_op.dst_extents(i);
     // Make a new operation with only one dst extent.
     InstallOperation new_op;
     *(new_op.add_dst_extents()) = dst_ext;
-    uint32_t data_size = dst_ext.num_blocks() * kBlockSize;
+    uint64_t data_size = dst_ext.num_blocks() * kBlockSize;
     // If this is a REPLACE, attempt to reuse portions of the existing blob.
     if (is_replace) {
       new_op.set_type(InstallOperation::REPLACE);
@@ -231,9 +231,8 @@
       // merge), are contiguous, are fragmented to have one destination extent,
       // and their combined block count would be less than chunk size, merge
       // them.
-      last_aop.name = base::StringPrintf("%s,%s",
-                                         last_aop.name.c_str(),
-                                         curr_aop.name.c_str());
+      last_aop.name = base::StringPrintf(
+          "%s,%s", last_aop.name.c_str(), curr_aop.name.c_str());
 
       if (is_delta_op) {
         ExtendExtents(last_aop.op.mutable_src_extents(),
@@ -273,14 +272,11 @@
   vector<Extent> dst_extents;
   ExtentsToVector(aop->op.dst_extents(), &dst_extents);
   brillo::Blob data(utils::BlocksInExtents(dst_extents) * kBlockSize);
-  TEST_AND_RETURN_FALSE(utils::ReadExtents(target_part_path,
-                                           dst_extents,
-                                           &data,
-                                           data.size(),
-                                           kBlockSize));
+  TEST_AND_RETURN_FALSE(utils::ReadExtents(
+      target_part_path, dst_extents, &data, data.size(), kBlockSize));
 
   brillo::Blob blob;
-  InstallOperation_Type op_type;
+  InstallOperation::Type op_type;
   TEST_AND_RETURN_FALSE(
       diff_utils::GenerateBestFullOperation(data, version, &blob, &op_type));
 
diff --git a/payload_generator/ab_generator.h b/payload_generator/ab_generator.h
index 343b546..2accf1e 100644
--- a/payload_generator/ab_generator.h
+++ b/payload_generator/ab_generator.h
@@ -48,12 +48,11 @@
   // order. The operations are stored in |aops| and should be executed in that
   // order. All the offsets in the operations reference the data written to
   // |blob_file|.
-  bool GenerateOperations(
-      const PayloadGenerationConfig& config,
-      const PartitionConfig& old_part,
-      const PartitionConfig& new_part,
-      BlobFileWriter* blob_file,
-      std::vector<AnnotatedOperation>* aops) override;
+  bool GenerateOperations(const PayloadGenerationConfig& config,
+                          const PartitionConfig& old_part,
+                          const PartitionConfig& new_part,
+                          BlobFileWriter* blob_file,
+                          std::vector<AnnotatedOperation>* aops) override;
 
   // Split the operations in the vector of AnnotatedOperations |aops| such that
   // for every operation there is only one dst extent and updates |aops| with
diff --git a/payload_generator/ab_generator_unittest.cc b/payload_generator/ab_generator_unittest.cc
index 25609c7..270657a 100644
--- a/payload_generator/ab_generator_unittest.cc
+++ b/payload_generator/ab_generator_unittest.cc
@@ -49,7 +49,7 @@
 }
 
 // Tests splitting of a REPLACE/REPLACE_BZ operation.
-void TestSplitReplaceOrReplaceBzOperation(InstallOperation_Type orig_type,
+void TestSplitReplaceOrReplaceBzOperation(InstallOperation::Type orig_type,
                                           bool compressible) {
   const size_t op_ex1_start_block = 2;
   const size_t op_ex1_num_blocks = 2;
@@ -58,10 +58,6 @@
   const size_t part_num_blocks = 7;
 
   // Create the target partition data.
-  string part_path;
-  EXPECT_TRUE(utils::MakeTempFile(
-      "SplitReplaceOrReplaceBzTest_part.XXXXXX", &part_path, nullptr));
-  ScopedPathUnlinker part_path_unlinker(part_path);
   const size_t part_size = part_num_blocks * kBlockSize;
   brillo::Blob part_data;
   if (compressible) {
@@ -74,7 +70,9 @@
       part_data.push_back(dis(gen));
   }
   ASSERT_EQ(part_size, part_data.size());
-  ASSERT_TRUE(utils::WriteFile(part_path.c_str(), part_data.data(), part_size));
+  test_utils::ScopedTempFile part_file(
+      "SplitReplaceOrReplaceBzTest_part.XXXXXX");
+  ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
 
   // Create original operation and blob data.
   const size_t op_ex1_offset = op_ex1_start_block * kBlockSize;
@@ -83,10 +81,10 @@
   const size_t op_ex2_size = op_ex2_num_blocks * kBlockSize;
   InstallOperation op;
   op.set_type(orig_type);
-  *(op.add_dst_extents()) = ExtentForRange(op_ex1_start_block,
-                                           op_ex1_num_blocks);
-  *(op.add_dst_extents()) = ExtentForRange(op_ex2_start_block,
-                                           op_ex2_num_blocks);
+  *(op.add_dst_extents()) =
+      ExtentForRange(op_ex1_start_block, op_ex1_num_blocks);
+  *(op.add_dst_extents()) =
+      ExtentForRange(op_ex2_start_block, op_ex2_num_blocks);
 
   brillo::Blob op_data;
   op_data.insert(op_data.end(),
@@ -109,15 +107,12 @@
   aop.name = "SplitTestOp";
 
   // Create the data file.
-  string data_path;
-  EXPECT_TRUE(utils::MakeTempFile(
-      "SplitReplaceOrReplaceBzTest_data.XXXXXX", &data_path, nullptr));
-  ScopedPathUnlinker data_path_unlinker(data_path);
-  int data_fd = open(data_path.c_str(), O_RDWR, 000);
+  test_utils::ScopedTempFile data_file(
+      "SplitReplaceOrReplaceBzTest_data.XXXXXX");
+  EXPECT_TRUE(test_utils::WriteFileVector(data_file.path(), op_blob));
+  int data_fd = open(data_file.path().c_str(), O_RDWR, 000);
   EXPECT_GE(data_fd, 0);
   ScopedFdCloser data_fd_closer(&data_fd);
-  EXPECT_TRUE(utils::WriteFile(data_path.c_str(), op_blob.data(),
-                               op_blob.size()));
   off_t data_file_size = op_blob.size();
   BlobFileWriter blob_file(data_fd, &data_file_size);
 
@@ -126,10 +121,10 @@
   PayloadVersion version(kChromeOSMajorPayloadVersion,
                          kSourceMinorPayloadVersion);
   ASSERT_TRUE(ABGenerator::SplitAReplaceOp(
-      version, aop, part_path, &result_ops, &blob_file));
+      version, aop, part_file.path(), &result_ops, &blob_file));
 
   // Check the result.
-  InstallOperation_Type expected_type =
+  InstallOperation::Type expected_type =
       compressible ? InstallOperation::REPLACE_BZ : InstallOperation::REPLACE;
 
   ASSERT_EQ(2U, result_ops.size());
@@ -140,8 +135,8 @@
   EXPECT_FALSE(first_op.has_src_length());
   EXPECT_FALSE(first_op.has_dst_length());
   EXPECT_EQ(1, first_op.dst_extents().size());
-  EXPECT_TRUE(ExtentEquals(first_op.dst_extents(0), op_ex1_start_block,
-                           op_ex1_num_blocks));
+  EXPECT_TRUE(ExtentEquals(
+      first_op.dst_extents(0), op_ex1_start_block, op_ex1_num_blocks));
   // Obtain the expected blob.
   brillo::Blob first_expected_data(
       part_data.begin() + op_ex1_offset,
@@ -170,8 +165,8 @@
   EXPECT_FALSE(second_op.has_src_length());
   EXPECT_FALSE(second_op.has_dst_length());
   EXPECT_EQ(1, second_op.dst_extents().size());
-  EXPECT_TRUE(ExtentEquals(second_op.dst_extents(0), op_ex2_start_block,
-                           op_ex2_num_blocks));
+  EXPECT_TRUE(ExtentEquals(
+      second_op.dst_extents(0), op_ex2_start_block, op_ex2_num_blocks));
   // Obtain the expected blob.
   brillo::Blob second_expected_data(
       part_data.begin() + op_ex2_offset,
@@ -196,7 +191,8 @@
   // Check relative layout of data blobs.
   EXPECT_EQ(first_op.data_offset() + first_op.data_length(),
             second_op.data_offset());
-  EXPECT_EQ(second_op.data_offset() + second_op.data_length(), data_file_size);
+  EXPECT_EQ(second_op.data_offset() + second_op.data_length(),
+            static_cast<uint64_t>(data_file_size));
   // If we split a REPLACE into multiple ones, ensure reuse of preexisting blob.
   if (!compressible && orig_type == InstallOperation::REPLACE) {
     EXPECT_EQ(0U, first_op.data_offset());
@@ -204,7 +200,7 @@
 }
 
 // Tests merging of REPLACE/REPLACE_BZ operations.
-void TestMergeReplaceOrReplaceBzOperations(InstallOperation_Type orig_type,
+void TestMergeReplaceOrReplaceBzOperations(InstallOperation::Type orig_type,
                                            bool compressible) {
   const size_t first_op_num_blocks = 1;
   const size_t second_op_num_blocks = 2;
@@ -212,10 +208,6 @@
   const size_t part_num_blocks = total_op_num_blocks + 2;
 
   // Create the target partition data.
-  string part_path;
-  EXPECT_TRUE(utils::MakeTempFile(
-      "MergeReplaceOrReplaceBzTest_part.XXXXXX", &part_path, nullptr));
-  ScopedPathUnlinker part_path_unlinker(part_path);
   const size_t part_size = part_num_blocks * kBlockSize;
   brillo::Blob part_data;
   if (compressible) {
@@ -228,7 +220,9 @@
       part_data.push_back(dis(gen));
   }
   ASSERT_EQ(part_size, part_data.size());
-  ASSERT_TRUE(utils::WriteFile(part_path.c_str(), part_data.data(), part_size));
+  test_utils::ScopedTempFile part_file(
+      "MergeReplaceOrReplaceBzTest_part.XXXXXX");
+  ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
 
   // Create original operations and blob data.
   vector<AnnotatedOperation> aops;
@@ -240,7 +234,7 @@
   const size_t first_op_size = first_op_num_blocks * kBlockSize;
   *(first_op.add_dst_extents()) = ExtentForRange(0, first_op_num_blocks);
   brillo::Blob first_op_data(part_data.begin(),
-                               part_data.begin() + first_op_size);
+                             part_data.begin() + first_op_size);
   brillo::Blob first_op_blob;
   if (orig_type == InstallOperation::REPLACE) {
     first_op_blob = first_op_data;
@@ -257,10 +251,10 @@
 
   InstallOperation second_op;
   second_op.set_type(orig_type);
-  *(second_op.add_dst_extents()) = ExtentForRange(first_op_num_blocks,
-                                                  second_op_num_blocks);
+  *(second_op.add_dst_extents()) =
+      ExtentForRange(first_op_num_blocks, second_op_num_blocks);
   brillo::Blob second_op_data(part_data.begin() + first_op_size,
-                                part_data.begin() + total_op_size);
+                              part_data.begin() + total_op_size);
   brillo::Blob second_op_blob;
   if (orig_type == InstallOperation::REPLACE) {
     second_op_blob = second_op_data;
@@ -269,34 +263,31 @@
   }
   second_op.set_data_offset(first_op_blob.size());
   second_op.set_data_length(second_op_blob.size());
-  blob_data.insert(blob_data.end(), second_op_blob.begin(),
-                   second_op_blob.end());
+  blob_data.insert(
+      blob_data.end(), second_op_blob.begin(), second_op_blob.end());
   AnnotatedOperation second_aop;
   second_aop.op = second_op;
   second_aop.name = "second";
   aops.push_back(second_aop);
 
   // Create the data file.
-  string data_path;
-  EXPECT_TRUE(utils::MakeTempFile(
-      "MergeReplaceOrReplaceBzTest_data.XXXXXX", &data_path, nullptr));
-  ScopedPathUnlinker data_path_unlinker(data_path);
-  int data_fd = open(data_path.c_str(), O_RDWR, 000);
+  test_utils::ScopedTempFile data_file(
+      "MergeReplaceOrReplaceBzTest_data.XXXXXX");
+  EXPECT_TRUE(test_utils::WriteFileVector(data_file.path(), blob_data));
+  int data_fd = open(data_file.path().c_str(), O_RDWR, 000);
   EXPECT_GE(data_fd, 0);
   ScopedFdCloser data_fd_closer(&data_fd);
-  EXPECT_TRUE(utils::WriteFile(data_path.c_str(), blob_data.data(),
-                               blob_data.size()));
   off_t data_file_size = blob_data.size();
   BlobFileWriter blob_file(data_fd, &data_file_size);
 
   // Merge the operations.
   PayloadVersion version(kChromeOSMajorPayloadVersion,
                          kSourceMinorPayloadVersion);
-  EXPECT_TRUE(
-      ABGenerator::MergeOperations(&aops, version, 5, part_path, &blob_file));
+  EXPECT_TRUE(ABGenerator::MergeOperations(
+      &aops, version, 5, part_file.path(), &blob_file));
 
   // Check the result.
-  InstallOperation_Type expected_op_type =
+  InstallOperation::Type expected_op_type =
       compressible ? InstallOperation::REPLACE_BZ : InstallOperation::REPLACE;
   EXPECT_EQ(1U, aops.size());
   InstallOperation new_op = aops[0].op;
@@ -309,7 +300,7 @@
 
   // Check to see if the blob pointed to in the new extent has what we expect.
   brillo::Blob expected_data(part_data.begin(),
-                               part_data.begin() + total_op_size);
+                             part_data.begin() + total_op_size);
   brillo::Blob expected_blob;
   if (compressible) {
     ASSERT_TRUE(BzipCompress(expected_data, &expected_blob));
@@ -570,16 +561,12 @@
   second_aop.op = second_op;
   aops.push_back(second_aop);
 
-  string src_part_path;
-  EXPECT_TRUE(utils::MakeTempFile("AddSourceHashTest_src_part.XXXXXX",
-                                  &src_part_path, nullptr));
-  ScopedPathUnlinker src_part_path_unlinker(src_part_path);
+  test_utils::ScopedTempFile src_part_file("AddSourceHashTest_src_part.XXXXXX");
   brillo::Blob src_data(kBlockSize);
   test_utils::FillWithData(&src_data);
-  ASSERT_TRUE(utils::WriteFile(src_part_path.c_str(), src_data.data(),
-                               src_data.size()));
+  ASSERT_TRUE(test_utils::WriteFileVector(src_part_file.path(), src_data));
 
-  EXPECT_TRUE(ABGenerator::AddSourceHash(&aops, src_part_path));
+  EXPECT_TRUE(ABGenerator::AddSourceHash(&aops, src_part_file.path()));
 
   EXPECT_TRUE(aops[0].op.has_src_sha256_hash());
   EXPECT_FALSE(aops[1].op.has_src_sha256_hash());
diff --git a/payload_generator/annotated_operation.cc b/payload_generator/annotated_operation.cc
index e28fe85..5637cb1 100644
--- a/payload_generator/annotated_operation.cc
+++ b/payload_generator/annotated_operation.cc
@@ -55,7 +55,7 @@
   // REPLACE_BZ 500 @3000
   //   name: /foo/bar
   //    dst: (123, 3) (127, 2)
-  os << InstallOperationTypeName(aop.op.type()) << " "  << aop.op.data_length();
+  os << InstallOperationTypeName(aop.op.type()) << " " << aop.op.data_length();
   if (aop.op.data_length() > 0)
     os << " @" << aop.op.data_offset();
   if (!aop.name.empty()) {
diff --git a/payload_generator/blob_file_writer.cc b/payload_generator/blob_file_writer.cc
index 8225df4..7cdeb35 100644
--- a/payload_generator/blob_file_writer.cc
+++ b/payload_generator/blob_file_writer.cc
@@ -29,11 +29,10 @@
   *blob_file_size_ += blob.size();
 
   stored_blobs_++;
-  if (total_blobs_ > 0 &&
-      (10 * (stored_blobs_ - 1) / total_blobs_) !=
-      (10 * stored_blobs_ / total_blobs_)) {
-    LOG(INFO) << (100 * stored_blobs_ / total_blobs_)
-              << "% complete " << stored_blobs_ << "/" << total_blobs_
+  if (total_blobs_ > 0 && (10 * (stored_blobs_ - 1) / total_blobs_) !=
+                              (10 * stored_blobs_ / total_blobs_)) {
+    LOG(INFO) << (100 * stored_blobs_ / total_blobs_) << "% complete "
+              << stored_blobs_ << "/" << total_blobs_
               << " ops (output size: " << *blob_file_size_ << ")";
   }
   return result;
diff --git a/payload_generator/blob_file_writer.h b/payload_generator/blob_file_writer.h
index cbc13ae..48553be 100644
--- a/payload_generator/blob_file_writer.h
+++ b/payload_generator/blob_file_writer.h
@@ -29,8 +29,7 @@
   // Create the BlobFileWriter object that will manage the blobs stored to
   // |blob_fd| in a thread safe way.
   BlobFileWriter(int blob_fd, off_t* blob_file_size)
-    : blob_fd_(blob_fd),
-      blob_file_size_(blob_file_size) {}
+      : blob_fd_(blob_fd), blob_file_size_(blob_file_size) {}
 
   // Store the passed |blob| in the blob file. Returns the offset at which it
   // was stored, or -1 in case of failure.
diff --git a/payload_generator/blob_file_writer_unittest.cc b/payload_generator/blob_file_writer_unittest.cc
index 5f94ef3..487bc73 100644
--- a/payload_generator/blob_file_writer_unittest.cc
+++ b/payload_generator/blob_file_writer_unittest.cc
@@ -33,9 +33,8 @@
 TEST(BlobFileWriterTest, SimpleTest) {
   string blob_path;
   int blob_fd;
-  EXPECT_TRUE(utils::MakeTempFile("BlobFileWriterTest.XXXXXX",
-                                  &blob_path,
-                                  &blob_fd));
+  EXPECT_TRUE(
+      utils::MakeTempFile("BlobFileWriterTest.XXXXXX", &blob_path, &blob_fd));
   off_t blob_file_size = 0;
   BlobFileWriter blob_file(blob_fd, &blob_file_size);
 
@@ -47,11 +46,8 @@
 
   brillo::Blob stored_blob(blob_size);
   ssize_t bytes_read;
-  ASSERT_TRUE(utils::PReadAll(blob_fd,
-                              stored_blob.data(),
-                              blob_size,
-                              0,
-                              &bytes_read));
+  ASSERT_TRUE(
+      utils::PReadAll(blob_fd, stored_blob.data(), blob_size, 0, &bytes_read));
   EXPECT_EQ(bytes_read, blob_size);
   EXPECT_EQ(blob, stored_blob);
 }
diff --git a/payload_generator/block_mapping.cc b/payload_generator/block_mapping.cc
index ff10f0b..d0f0178 100644
--- a/payload_generator/block_mapping.cc
+++ b/payload_generator/block_mapping.cc
@@ -22,6 +22,7 @@
 
 #include <functional>
 #include <string>
+#include <utility>
 #include <vector>
 
 #include "update_engine/common/utils.h"
@@ -61,8 +62,8 @@
   bool ret = true;
   block_ids->resize(num_blocks);
   for (size_t block = 0; block < num_blocks; block++) {
-    (*block_ids)[block] = AddDiskBlock(
-        fd, initial_byte_offset + block * block_size_);
+    (*block_ids)[block] =
+        AddDiskBlock(fd, initial_byte_offset + block * block_size_);
     ret = ret && (*block_ids)[block] != -1;
   }
   return ret;
@@ -78,7 +79,7 @@
   // We either reuse a UniqueBlock or create a new one. If we need a new
   // UniqueBlock it could also be part of a new or existing bucket (if there is
   // a hash collision).
-  vector<UniqueBlock> *bucket = nullptr;
+  vector<UniqueBlock>* bucket = nullptr;
 
   auto mapping_it = mapping_.find(h);
   if (mapping_it == mapping_.end()) {
@@ -97,7 +98,7 @@
   // No existing block was found at this point, so we create and fill in a new
   // one.
   bucket->emplace_back();
-  UniqueBlock *new_ublock = &bucket->back();
+  UniqueBlock* new_ublock = &bucket->back();
 
   new_ublock->times_read = 1;
   new_ublock->fd = fd;
diff --git a/payload_generator/block_mapping.h b/payload_generator/block_mapping.h
index 3fe94ab..3738f6f 100644
--- a/payload_generator/block_mapping.h
+++ b/payload_generator/block_mapping.h
@@ -53,7 +53,9 @@
   // from the file descriptor |fd| starting at offset |initial_byte_offset|.
   // Returns whether it succeeded to add all the disk blocks and stores in
   // |block_ids| the block id for each one of the added blocks.
-  bool AddManyDiskBlocks(int fd, off_t initial_byte_offset, size_t num_blocks,
+  bool AddManyDiskBlocks(int fd,
+                         off_t initial_byte_offset,
+                         size_t num_blocks,
                          std::vector<BlockId>* block_ids);
 
  private:
diff --git a/payload_generator/block_mapping_unittest.cc b/payload_generator/block_mapping_unittest.cc
index 4d09710..9b9b4f1 100644
--- a/payload_generator/block_mapping_unittest.cc
+++ b/payload_generator/block_mapping_unittest.cc
@@ -33,29 +33,11 @@
 
 namespace chromeos_update_engine {
 
-namespace {
-
-}  // namespace
-
 class BlockMappingTest : public ::testing::Test {
  protected:
-  void SetUp() override {
-    EXPECT_TRUE(utils::MakeTempFile("BlockMappingTest_old.XXXXXX",
-                                    &old_part_path_,
-                                    nullptr));
-    EXPECT_TRUE(utils::MakeTempFile("BlockMappingTest_new.XXXXXX",
-                                    &new_part_path_,
-                                    nullptr));
-
-    old_part_unlinker_.reset(new ScopedPathUnlinker(old_part_path_));
-    new_part_unlinker_.reset(new ScopedPathUnlinker(new_part_path_));
-  }
-
   // Old new partition files used in testing.
-  string old_part_path_;
-  string new_part_path_;
-  std::unique_ptr<ScopedPathUnlinker> old_part_unlinker_;
-  std::unique_ptr<ScopedPathUnlinker> new_part_unlinker_;
+  test_utils::ScopedTempFile old_part_{"BlockMappingTest_old.XXXXXX"};
+  test_utils::ScopedTempFile new_part_{"BlockMappingTest_new.XXXXXX"};
 
   size_t block_size_{1024};
   BlockMapping bm_{block_size_};  // BlockMapping under test.
@@ -72,8 +54,8 @@
 }
 
 TEST_F(BlockMappingTest, BlocksAreNotKeptInMemory) {
-  test_utils::WriteFileString(old_part_path_, string(block_size_, 'a'));
-  int old_fd = HANDLE_EINTR(open(old_part_path_.c_str(), O_RDONLY));
+  test_utils::WriteFileString(old_part_.path(), string(block_size_, 'a'));
+  int old_fd = HANDLE_EINTR(open(old_part_.path().c_str(), O_RDONLY));
   ScopedFdCloser old_fd_closer(&old_fd);
 
   EXPECT_EQ(0, bm_.AddDiskBlock(old_fd, 0));
@@ -107,18 +89,18 @@
   string old_contents(10 * block_size_, '\0');
   for (size_t i = 0; i < old_contents.size(); ++i)
     old_contents[i] = 4 + i / block_size_;
-  test_utils::WriteFileString(old_part_path_, old_contents);
+  test_utils::WriteFileString(old_part_.path(), old_contents);
 
   // A string including the block with all zeros and overlapping some of the
   // other blocks in old_contents.
   string new_contents(6 * block_size_, '\0');
   for (size_t i = 0; i < new_contents.size(); ++i)
     new_contents[i] = i / block_size_;
-  test_utils::WriteFileString(new_part_path_, new_contents);
+  test_utils::WriteFileString(new_part_.path(), new_contents);
 
   vector<BlockMapping::BlockId> old_ids, new_ids;
-  EXPECT_TRUE(MapPartitionBlocks(old_part_path_,
-                                 new_part_path_,
+  EXPECT_TRUE(MapPartitionBlocks(old_part_.path(),
+                                 new_part_.path(),
                                  old_contents.size(),
                                  new_contents.size(),
                                  block_size_,
@@ -127,8 +109,7 @@
 
   EXPECT_EQ((vector<BlockMapping::BlockId>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}),
             old_ids);
-  EXPECT_EQ((vector<BlockMapping::BlockId>{0, 11, 12, 13, 1, 2}),
-            new_ids);
+  EXPECT_EQ((vector<BlockMapping::BlockId>{0, 11, 12, 13, 1, 2}), new_ids);
 }
 
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/boot_img_filesystem.cc b/payload_generator/boot_img_filesystem.cc
new file mode 100644
index 0000000..19de410
--- /dev/null
+++ b/payload_generator/boot_img_filesystem.cc
@@ -0,0 +1,105 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/boot_img_filesystem.h"
+
+#include <base/logging.h>
+#include <brillo/secure_blob.h>
+#include <puffin/utils.h>
+
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_generator/delta_diff_generator.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+
+using std::string;
+using std::unique_ptr;
+using std::vector;
+
+namespace chromeos_update_engine {
+
+unique_ptr<BootImgFilesystem> BootImgFilesystem::CreateFromFile(
+    const string& filename) {
+  if (filename.empty())
+    return nullptr;
+
+  brillo::Blob header;
+  if (!utils::ReadFileChunk(filename, 0, sizeof(boot_img_hdr), &header) ||
+      header.size() != sizeof(boot_img_hdr) ||
+      memcmp(header.data(), BOOT_MAGIC, BOOT_MAGIC_SIZE) != 0) {
+    return nullptr;
+  }
+
+  unique_ptr<BootImgFilesystem> result(new BootImgFilesystem());
+  result->filename_ = filename;
+  memcpy(&result->hdr_, header.data(), header.size());
+  return result;
+}
+
+size_t BootImgFilesystem::GetBlockSize() const {
+  // Page size may not be 4K, but we currently only support 4K block size.
+  return kBlockSize;
+}
+
+size_t BootImgFilesystem::GetBlockCount() const {
+  return utils::DivRoundUp(utils::FileSize(filename_), kBlockSize);
+}
+
+FilesystemInterface::File BootImgFilesystem::GetFile(const string& name,
+                                                     uint64_t offset,
+                                                     uint64_t size) const {
+  File file;
+  file.name = name;
+  file.extents = {ExtentForBytes(kBlockSize, offset, size)};
+
+  brillo::Blob data;
+  if (utils::ReadFileChunk(filename_, offset, size, &data)) {
+    constexpr size_t kGZipHeaderSize = 10;
+    // Check GZip header magic.
+    if (data.size() > kGZipHeaderSize && data[0] == 0x1F && data[1] == 0x8B) {
+      if (!puffin::LocateDeflatesInGzip(data, &file.deflates)) {
+        LOG(ERROR) << "Error occurred parsing gzip " << name << " at offset "
+                   << offset << " of " << filename_ << ", found "
+                   << file.deflates.size() << " deflates.";
+        return file;
+      }
+      for (auto& deflate : file.deflates) {
+        deflate.offset += offset * 8;
+      }
+    }
+  }
+  return file;
+}
+
+bool BootImgFilesystem::GetFiles(vector<File>* files) const {
+  files->clear();
+  const uint64_t file_size = utils::FileSize(filename_);
+  // The first page is header.
+  uint64_t offset = hdr_.page_size;
+  if (hdr_.kernel_size > 0 && offset + hdr_.kernel_size <= file_size) {
+    files->emplace_back(GetFile("<kernel>", offset, hdr_.kernel_size));
+  }
+  offset += utils::RoundUp(hdr_.kernel_size, hdr_.page_size);
+  if (hdr_.ramdisk_size > 0 && offset + hdr_.ramdisk_size <= file_size) {
+    files->emplace_back(GetFile("<ramdisk>", offset, hdr_.ramdisk_size));
+  }
+  return true;
+}
+
+bool BootImgFilesystem::LoadSettings(brillo::KeyValueStore* store) const {
+  return false;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/boot_img_filesystem.h b/payload_generator/boot_img_filesystem.h
new file mode 100644
index 0000000..87725d4
--- /dev/null
+++ b/payload_generator/boot_img_filesystem.h
@@ -0,0 +1,78 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_BOOT_IMG_FILESYSTEM_H_
+#define UPDATE_ENGINE_PAYLOAD_GENERATOR_BOOT_IMG_FILESYSTEM_H_
+
+#include "update_engine/payload_generator/filesystem_interface.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace chromeos_update_engine {
+
+class BootImgFilesystem : public FilesystemInterface {
+ public:
+  // Creates an BootImgFilesystem from an Android boot.img file.
+  static std::unique_ptr<BootImgFilesystem> CreateFromFile(
+      const std::string& filename);
+  ~BootImgFilesystem() override = default;
+
+  // FilesystemInterface overrides.
+  size_t GetBlockSize() const override;
+  size_t GetBlockCount() const override;
+
+  // GetFiles will return one FilesystemInterface::File for kernel and one for
+  // ramdisk.
+  bool GetFiles(std::vector<File>* files) const override;
+
+  bool LoadSettings(brillo::KeyValueStore* store) const override;
+
+ private:
+  friend class BootImgFilesystemTest;
+
+  BootImgFilesystem() = default;
+
+  File GetFile(const std::string& name, uint64_t offset, uint64_t size) const;
+
+  // The boot.img file path.
+  std::string filename_;
+
+// https://android.googlesource.com/platform/system/core/+/master/mkbootimg/include/bootimg/bootimg.h
+#define BOOT_MAGIC "ANDROID!"
+#define BOOT_MAGIC_SIZE 8
+  struct boot_img_hdr {
+    // Must be BOOT_MAGIC.
+    uint8_t magic[BOOT_MAGIC_SIZE];
+    uint32_t kernel_size;  /* size in bytes */
+    uint32_t kernel_addr;  /* physical load addr */
+    uint32_t ramdisk_size; /* size in bytes */
+    uint32_t ramdisk_addr; /* physical load addr */
+    uint32_t second_size;  /* size in bytes */
+    uint32_t second_addr;  /* physical load addr */
+    uint32_t tags_addr;    /* physical addr for kernel tags */
+    uint32_t page_size;    /* flash page size we assume */
+  } __attribute__((packed));
+  // The boot image header.
+  boot_img_hdr hdr_;
+
+  DISALLOW_COPY_AND_ASSIGN(BootImgFilesystem);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_PAYLOAD_GENERATOR_BOOT_IMG_FILESYSTEM_H_
diff --git a/payload_generator/boot_img_filesystem_unittest.cc b/payload_generator/boot_img_filesystem_unittest.cc
new file mode 100644
index 0000000..b1e0d99
--- /dev/null
+++ b/payload_generator/boot_img_filesystem_unittest.cc
@@ -0,0 +1,117 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/boot_img_filesystem.h"
+
+#include <vector>
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+using std::unique_ptr;
+using std::vector;
+
+class BootImgFilesystemTest : public ::testing::Test {
+ protected:
+  brillo::Blob GetBootImg(const brillo::Blob& kernel,
+                          const brillo::Blob& ramdisk) {
+    brillo::Blob boot_img(16 * 1024);
+    BootImgFilesystem::boot_img_hdr hdr;
+    memcpy(hdr.magic, BOOT_MAGIC, BOOT_MAGIC_SIZE);
+    hdr.kernel_size = kernel.size();
+    hdr.ramdisk_size = ramdisk.size();
+    hdr.page_size = 4096;
+    size_t offset = 0;
+    memcpy(boot_img.data() + offset, &hdr, sizeof(hdr));
+    offset += utils::RoundUp(sizeof(hdr), hdr.page_size);
+    memcpy(boot_img.data() + offset, kernel.data(), kernel.size());
+    offset += utils::RoundUp(kernel.size(), hdr.page_size);
+    memcpy(boot_img.data() + offset, ramdisk.data(), ramdisk.size());
+    return boot_img;
+  }
+
+  test_utils::ScopedTempFile boot_file_;
+};
+
+TEST_F(BootImgFilesystemTest, SimpleTest) {
+  test_utils::WriteFileVector(
+      boot_file_.path(),
+      GetBootImg(brillo::Blob(1234, 'k'), brillo::Blob(5678, 'r')));
+  unique_ptr<BootImgFilesystem> fs =
+      BootImgFilesystem::CreateFromFile(boot_file_.path());
+  EXPECT_NE(nullptr, fs);
+
+  vector<FilesystemInterface::File> files;
+  EXPECT_TRUE(fs->GetFiles(&files));
+  ASSERT_EQ(2u, files.size());
+
+  EXPECT_EQ("<kernel>", files[0].name);
+  EXPECT_EQ(1u, files[0].extents.size());
+  EXPECT_EQ(1u, files[0].extents[0].start_block());
+  EXPECT_EQ(1u, files[0].extents[0].num_blocks());
+  EXPECT_TRUE(files[0].deflates.empty());
+
+  EXPECT_EQ("<ramdisk>", files[1].name);
+  EXPECT_EQ(1u, files[1].extents.size());
+  EXPECT_EQ(2u, files[1].extents[0].start_block());
+  EXPECT_EQ(2u, files[1].extents[0].num_blocks());
+  EXPECT_TRUE(files[1].deflates.empty());
+}
+
+TEST_F(BootImgFilesystemTest, BadImageTest) {
+  brillo::Blob boot_img = GetBootImg({}, {});
+  boot_img[7] = '?';
+  test_utils::WriteFileVector(boot_file_.path(), boot_img);
+  unique_ptr<BootImgFilesystem> fs =
+      BootImgFilesystem::CreateFromFile(boot_file_.path());
+  EXPECT_EQ(nullptr, fs);
+}
+
+TEST_F(BootImgFilesystemTest, GZipRamdiskTest) {
+  // echo ramdisk | gzip | hexdump -v -e '/1 "0x%02x, "'
+  const brillo::Blob ramdisk = {0x1f, 0x8b, 0x08, 0x00, 0x3a, 0x83, 0x35,
+                                0x5b, 0x00, 0x03, 0x2b, 0x4a, 0xcc, 0x4d,
+                                0xc9, 0x2c, 0xce, 0xe6, 0x02, 0x00, 0x2e,
+                                0xf6, 0x0b, 0x08, 0x08, 0x00, 0x00, 0x00};
+  test_utils::WriteFileVector(boot_file_.path(),
+                              GetBootImg(brillo::Blob(5678, 'k'), ramdisk));
+  unique_ptr<BootImgFilesystem> fs =
+      BootImgFilesystem::CreateFromFile(boot_file_.path());
+  EXPECT_NE(nullptr, fs);
+
+  vector<FilesystemInterface::File> files;
+  EXPECT_TRUE(fs->GetFiles(&files));
+  ASSERT_EQ(2u, files.size());
+
+  EXPECT_EQ("<kernel>", files[0].name);
+  EXPECT_EQ(1u, files[0].extents.size());
+  EXPECT_EQ(1u, files[0].extents[0].start_block());
+  EXPECT_EQ(2u, files[0].extents[0].num_blocks());
+  EXPECT_TRUE(files[0].deflates.empty());
+
+  EXPECT_EQ("<ramdisk>", files[1].name);
+  EXPECT_EQ(1u, files[1].extents.size());
+  EXPECT_EQ(3u, files[1].extents[0].start_block());
+  EXPECT_EQ(1u, files[1].extents[0].num_blocks());
+  EXPECT_EQ(1u, files[1].deflates.size());
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/cycle_breaker.cc b/payload_generator/cycle_breaker.cc
index 52a6f60..d6eeed2 100644
--- a/payload_generator/cycle_breaker.cc
+++ b/payload_generator/cycle_breaker.cc
@@ -18,14 +18,15 @@
 
 #include <inttypes.h>
 
+#include <limits>
 #include <set>
 #include <string>
 #include <utility>
 
+#include <base/stl_util.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
 
-#include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/graph_utils.h"
 #include "update_engine/payload_generator/tarjan.h"
 
@@ -55,7 +56,7 @@
   skipped_ops_ = 0;
 
   for (Graph::size_type i = 0; i < subgraph_.size(); i++) {
-    InstallOperation_Type op_type = graph[i].aop.op.type();
+    InstallOperation::Type op_type = graph[i].aop.op.type();
     if (op_type == InstallOperation::REPLACE ||
         op_type == InstallOperation::REPLACE_BZ) {
       skipped_ops_++;
@@ -77,13 +78,15 @@
 
     // Set subgraph edges for the components in the SCC.
     for (vector<Vertex::Index>::iterator it = component_indexes.begin();
-         it != component_indexes.end(); ++it) {
+         it != component_indexes.end();
+         ++it) {
       subgraph_[*it].subgraph_edges.clear();
       for (vector<Vertex::Index>::iterator jt = component_indexes.begin();
-           jt != component_indexes.end(); ++jt) {
+           jt != component_indexes.end();
+           ++jt) {
         // If there's a link from *it -> *jt in the graph,
         // add a subgraph_ edge
-        if (utils::MapContainsKey(subgraph_[*it].out_edges, *jt))
+        if (base::ContainsKey(subgraph_[*it].out_edges, *jt))
           subgraph_[*it].subgraph_edges.insert(*jt);
       }
     }
@@ -105,13 +108,13 @@
 
 void CycleBreaker::HandleCircuit() {
   stack_.push_back(current_vertex_);
-  CHECK_GE(stack_.size(),
-           static_cast<vector<Vertex::Index>::size_type>(2));
+  CHECK_GE(stack_.size(), static_cast<vector<Vertex::Index>::size_type>(2));
   Edge min_edge = make_pair(stack_[0], stack_[1]);
   uint64_t min_edge_weight = std::numeric_limits<uint64_t>::max();
   size_t edges_considered = 0;
   for (vector<Vertex::Index>::const_iterator it = stack_.begin();
-       it != (stack_.end() - 1); ++it) {
+       it != (stack_.end() - 1);
+       ++it) {
     Edge edge = make_pair(*it, *(it + 1));
     if (cut_edges_.find(edge) != cut_edges_.end()) {
       stack_.pop_back();
@@ -134,7 +137,7 @@
   blocked_[u] = false;
 
   for (Vertex::EdgeMap::iterator it = blocked_graph_[u].out_edges.begin();
-       it != blocked_graph_[u].out_edges.end(); ) {
+       it != blocked_graph_[u].out_edges.end();) {
     Vertex::Index w = it->first;
     blocked_graph_[u].out_edges.erase(it++);
     if (blocked_[w])
@@ -144,9 +147,11 @@
 
 bool CycleBreaker::StackContainsCutEdge() const {
   for (vector<Vertex::Index>::const_iterator it = ++stack_.begin(),
-           e = stack_.end(); it != e; ++it) {
+                                             e = stack_.end();
+       it != e;
+       ++it) {
     Edge edge = make_pair(*(it - 1), *it);
-    if (utils::SetContainsKey(cut_edges_, edge)) {
+    if (base::ContainsKey(cut_edges_, edge)) {
       return true;
     }
   }
@@ -174,7 +179,8 @@
 
   for (Vertex::SubgraphEdgeMap::iterator w =
            subgraph_[vertex].subgraph_edges.begin();
-       w != subgraph_[vertex].subgraph_edges.end(); ++w) {
+       w != subgraph_[vertex].subgraph_edges.end();
+       ++w) {
     if (*w == current_vertex_) {
       // The original paper called for printing stack_ followed by
       // current_vertex_ here, which is a cycle. Instead, we call
@@ -195,11 +201,12 @@
   } else {
     for (Vertex::SubgraphEdgeMap::iterator w =
              subgraph_[vertex].subgraph_edges.begin();
-         w != subgraph_[vertex].subgraph_edges.end(); ++w) {
+         w != subgraph_[vertex].subgraph_edges.end();
+         ++w) {
       if (blocked_graph_[*w].out_edges.find(vertex) ==
           blocked_graph_[*w].out_edges.end()) {
-        blocked_graph_[*w].out_edges.insert(make_pair(vertex,
-                                                      EdgeProperties()));
+        blocked_graph_[*w].out_edges.insert(
+            make_pair(vertex, EdgeProperties()));
       }
     }
   }
diff --git a/payload_generator/cycle_breaker.h b/payload_generator/cycle_breaker.h
index 231d63a..01518fe 100644
--- a/payload_generator/cycle_breaker.h
+++ b/payload_generator/cycle_breaker.h
@@ -53,11 +53,11 @@
   bool Circuit(Vertex::Index vertex, Vertex::Index depth);
   bool StackContainsCutEdge() const;
 
-  std::vector<bool> blocked_;  // "blocked" in the paper
-  Vertex::Index current_vertex_;  // "s" in the paper
+  std::vector<bool> blocked_;         // "blocked" in the paper
+  Vertex::Index current_vertex_;      // "s" in the paper
   std::vector<Vertex::Index> stack_;  // the stack variable in the paper
-  Graph subgraph_;  // "A_K" in the paper
-  Graph blocked_graph_;  // "B" in the paper
+  Graph subgraph_;                    // "A_K" in the paper
+  Graph blocked_graph_;               // "B" in the paper
 
   std::set<Edge> cut_edges_;
 
diff --git a/payload_generator/cycle_breaker_unittest.cc b/payload_generator/cycle_breaker_unittest.cc
index e92bc30..fdcf49b 100644
--- a/payload_generator/cycle_breaker_unittest.cc
+++ b/payload_generator/cycle_breaker_unittest.cc
@@ -22,9 +22,9 @@
 #include <vector>
 
 #include <base/logging.h>
+#include <base/stl_util.h>
 #include <gtest/gtest.h>
 
-#include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/graph_types.h"
 
 using std::make_pair;
@@ -83,20 +83,20 @@
   // C->D->E
   // G->H
 
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_a, n_e)) ||
-              utils::SetContainsKey(broken_edges, make_pair(n_e, n_b)) ||
-              utils::SetContainsKey(broken_edges, make_pair(n_b, n_a)));
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_c, n_d)) ||
-              utils::SetContainsKey(broken_edges, make_pair(n_d, n_e)) ||
-              utils::SetContainsKey(broken_edges, make_pair(n_e, n_c)));
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_g, n_h)) ||
-              utils::SetContainsKey(broken_edges, make_pair(n_h, n_g)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_a, n_e)) ||
+              base::ContainsKey(broken_edges, make_pair(n_e, n_b)) ||
+              base::ContainsKey(broken_edges, make_pair(n_b, n_a)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_c, n_d)) ||
+              base::ContainsKey(broken_edges, make_pair(n_d, n_e)) ||
+              base::ContainsKey(broken_edges, make_pair(n_e, n_c)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_g, n_h)) ||
+              base::ContainsKey(broken_edges, make_pair(n_h, n_g)));
   EXPECT_EQ(3U, broken_edges.size());
 }
 
 namespace {
 pair<Vertex::Index, EdgeProperties> EdgeWithWeight(Vertex::Index dest,
-uint64_t weight) {
+                                                   uint64_t weight) {
   EdgeProperties props;
   props.extents.resize(1);
   props.extents[0].set_num_blocks(weight);
@@ -104,7 +104,6 @@
 }
 }  // namespace
 
-
 // This creates a bunch of cycles like this:
 //
 //               root <------.
@@ -168,7 +167,9 @@
   set<Edge> expected_cuts;
 
   for (Vertex::EdgeMap::const_iterator it = graph[n_root].out_edges.begin(),
-       e = graph[n_root].out_edges.end(); it != e; ++it) {
+                                       e = graph[n_root].out_edges.end();
+       it != e;
+       ++it) {
     expected_cuts.insert(make_pair(n_root, it->first));
   }
 
@@ -217,11 +218,11 @@
   breaker.BreakCycles(graph, &broken_edges);
 
   // These are required to be broken:
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_b, n_a)));
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_b, n_c)));
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_d, n_e)));
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_f, n_g)));
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_h, n_i)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_b, n_a)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_b, n_c)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_d, n_e)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_f, n_g)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_h, n_i)));
 }
 
 TEST(CycleBreakerTest, UnblockGraphTest) {
@@ -248,8 +249,8 @@
   breaker.BreakCycles(graph, &broken_edges);
 
   // These are required to be broken:
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_a, n_b)));
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_a, n_c)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_a, n_b)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_a, n_c)));
 }
 
 TEST(CycleBreakerTest, SkipOpsTest) {
diff --git a/payload_generator/deflate_utils.cc b/payload_generator/deflate_utils.cc
index e331142..a7a0503 100644
--- a/payload_generator/deflate_utils.cc
+++ b/payload_generator/deflate_utils.cc
@@ -31,10 +31,10 @@
 #include "update_engine/payload_generator/squashfs_filesystem.h"
 #include "update_engine/update_metadata.pb.h"
 
-using std::string;
-using std::vector;
 using puffin::BitExtent;
 using puffin::ByteExtent;
+using std::string;
+using std::vector;
 
 namespace chromeos_update_engine {
 namespace deflate_utils {
@@ -102,6 +102,15 @@
              ((extent.start_block() + extent.num_blocks()) * kBlockSize);
 }
 
+// Returns whether the given file |name| has an extension listed in
+// |extensions|.
+bool IsFileExtensions(const string& name,
+                      const std::initializer_list<string>& extensions) {
+  return any_of(extensions.begin(), extensions.end(), [&name](const auto& ext) {
+    return base::EndsWith(name, ext, base::CompareCase::INSENSITIVE_ASCII);
+  });
+}
+
 }  // namespace
 
 ByteExtent ExpandToByteExtent(const BitExtent& extent) {
@@ -247,9 +256,9 @@
   return true;
 }
 
-bool PreprocessParitionFiles(const PartitionConfig& part,
-                             vector<FilesystemInterface::File>* result_files,
-                             bool extract_deflates) {
+bool PreprocessPartitionFiles(const PartitionConfig& part,
+                              vector<FilesystemInterface::File>* result_files,
+                              bool extract_deflates) {
   // Get the file system files.
   vector<FilesystemInterface::File> tmp_files;
   part.fs_interface->GetFiles(&tmp_files);
@@ -286,31 +295,35 @@
       }
     }
 
-    // Search for deflates if the file is in zip format.
-    bool is_zip =
-        base::EndsWith(
-            file.name, ".apk", base::CompareCase::INSENSITIVE_ASCII) ||
-        base::EndsWith(
-            file.name, ".zip", base::CompareCase::INSENSITIVE_ASCII) ||
-        base::EndsWith(file.name, ".jar", base::CompareCase::INSENSITIVE_ASCII);
-
-    if (is_zip && extract_deflates) {
-      brillo::Blob data;
-      TEST_AND_RETURN_FALSE(
-          utils::ReadExtents(part.path,
-                             file.extents,
-                             &data,
-                             kBlockSize * utils::BlocksInExtents(file.extents),
-                             kBlockSize));
-      std::vector<puffin::BitExtent> deflates_sub_blocks;
-      TEST_AND_RETURN_FALSE(puffin::LocateDeflateSubBlocksInZipArchive(
-          data, &deflates_sub_blocks));
-      // Shift the deflate's extent to the offset starting from the beginning
-      // of the current partition; and the delta processor will align the
-      // extents in a continuous buffer later.
-      TEST_AND_RETURN_FALSE(
-          ShiftBitExtentsOverExtents(file.extents, &deflates_sub_blocks));
-      file.deflates = std::move(deflates_sub_blocks);
+    if (extract_deflates) {
+      // Search for deflates if the file is in zip or gzip format.
+      // .zvoice files may eventually move out of rootfs. If that happens,
+      // remove ".zvoice" (crbug.com/782918).
+      bool is_zip = IsFileExtensions(
+          file.name, {".apk", ".zip", ".jar", ".zvoice", ".apex"});
+      bool is_gzip = IsFileExtensions(file.name, {".gz", ".gzip", ".tgz"});
+      if (is_zip || is_gzip) {
+        brillo::Blob data;
+        TEST_AND_RETURN_FALSE(utils::ReadExtents(
+            part.path,
+            file.extents,
+            &data,
+            kBlockSize * utils::BlocksInExtents(file.extents),
+            kBlockSize));
+        vector<puffin::BitExtent> deflates;
+        if (is_zip) {
+          TEST_AND_RETURN_FALSE(
+              puffin::LocateDeflatesInZipArchive(data, &deflates));
+        } else if (is_gzip) {
+          TEST_AND_RETURN_FALSE(puffin::LocateDeflatesInGzip(data, &deflates));
+        }
+        // Shift the deflate's extent to the offset starting from the beginning
+        // of the current partition; and the delta processor will align the
+        // extents in a continuous buffer later.
+        TEST_AND_RETURN_FALSE(
+            ShiftBitExtentsOverExtents(file.extents, &deflates));
+        file.deflates = std::move(deflates);
+      }
     }
 
     result_files->push_back(file);
diff --git a/payload_generator/deflate_utils.h b/payload_generator/deflate_utils.h
index 798ce25..752bd9f 100644
--- a/payload_generator/deflate_utils.h
+++ b/payload_generator/deflate_utils.h
@@ -29,9 +29,10 @@
 // Gets the files from the partition and processes all its files. Processing
 // includes:
 //  - splitting large Squashfs containers into its smaller files.
-bool PreprocessParitionFiles(const PartitionConfig& part,
-                             std::vector<FilesystemInterface::File>* result,
-                             bool extract_deflates);
+//  - extracting deflates in zip and gzip files.
+bool PreprocessPartitionFiles(const PartitionConfig& part,
+                              std::vector<FilesystemInterface::File>* result,
+                              bool extract_deflates);
 
 // Spreads all extents in |over_extents| over |base_extents|. Here we assume the
 // |over_extents| are non-overlapping and sorted by their offset.
diff --git a/payload_generator/deflate_utils_unittest.cc b/payload_generator/deflate_utils_unittest.cc
index cb9476a..f2c4dba 100644
--- a/payload_generator/deflate_utils_unittest.cc
+++ b/payload_generator/deflate_utils_unittest.cc
@@ -29,9 +29,9 @@
 #include "update_engine/payload_generator/extent_ranges.h"
 #include "update_engine/payload_generator/extent_utils.h"
 
-using std::vector;
 using puffin::BitExtent;
 using puffin::ByteExtent;
+using std::vector;
 
 namespace chromeos_update_engine {
 namespace deflate_utils {
diff --git a/payload_generator/delta_diff_generator.cc b/payload_generator/delta_diff_generator.cc
index 1db2144..d484d32 100644
--- a/payload_generator/delta_diff_generator.cc
+++ b/payload_generator/delta_diff_generator.cc
@@ -50,11 +50,10 @@
 const size_t kRootFSPartitionSize = static_cast<size_t>(2) * 1024 * 1024 * 1024;
 const size_t kBlockSize = 4096;  // bytes
 
-bool GenerateUpdatePayloadFile(
-    const PayloadGenerationConfig& config,
-    const string& output_path,
-    const string& private_key_path,
-    uint64_t* metadata_size) {
+bool GenerateUpdatePayloadFile(const PayloadGenerationConfig& config,
+                               const string& output_path,
+                               const string& private_key_path,
+                               uint64_t* metadata_size) {
   if (!config.version.Validate()) {
     LOG(ERROR) << "Unsupported major.minor version: " << config.version.major
                << "." << config.version.minor;
@@ -108,11 +107,8 @@
 
       vector<AnnotatedOperation> aops;
       // Generate the operations using the strategy we selected above.
-      TEST_AND_RETURN_FALSE(strategy->GenerateOperations(config,
-                                                         old_part,
-                                                         new_part,
-                                                         &blob_file,
-                                                         &aops));
+      TEST_AND_RETURN_FALSE(strategy->GenerateOperations(
+          config, old_part, new_part, &blob_file, &aops));
 
       // Filter the no-operations. OperationsGenerators should not output this
       // kind of operations normally, but this is an extra step to fix that if
@@ -125,8 +121,8 @@
 
   LOG(INFO) << "Writing payload file...";
   // Write payload file to disk.
-  TEST_AND_RETURN_FALSE(payload.WritePayload(output_path, temp_file_path,
-                                             private_key_path, metadata_size));
+  TEST_AND_RETURN_FALSE(payload.WritePayload(
+      output_path, temp_file_path, private_key_path, metadata_size));
 
   LOG(INFO) << "All done. Successfully created delta file with "
             << "metadata size = " << *metadata_size;
diff --git a/payload_generator/delta_diff_generator.h b/payload_generator/delta_diff_generator.h
index d8bdae2..8323f47 100644
--- a/payload_generator/delta_diff_generator.h
+++ b/payload_generator/delta_diff_generator.h
@@ -41,7 +41,6 @@
                                const std::string& private_key_path,
                                uint64_t* metadata_size);
 
-
 };  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_PAYLOAD_GENERATOR_DELTA_DIFF_GENERATOR_H_
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index 877e13f..4ba6e24 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -29,8 +29,11 @@
 #include <unistd.h>
 
 #include <algorithm>
+#include <functional>
+#include <list>
 #include <map>
 #include <memory>
+#include <numeric>
 #include <utility>
 
 #include <base/files/file_util.h>
@@ -38,14 +41,17 @@
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
 #include <base/threading/simple_thread.h>
+#include <base/time/time.h>
 #include <brillo/data_encoding.h>
 #include <bsdiff/bsdiff.h>
 #include <bsdiff/patch_writer_factory.h>
+#include <puffin/utils.h>
 
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_generator/ab_generator.h"
 #include "update_engine/payload_generator/block_mapping.h"
 #include "update_engine/payload_generator/bzip.h"
 #include "update_engine/payload_generator/deflate_utils.h"
@@ -55,6 +61,7 @@
 #include "update_engine/payload_generator/squashfs_filesystem.h"
 #include "update_engine/payload_generator/xz.h"
 
+using std::list;
 using std::map;
 using std::string;
 using std::vector;
@@ -81,8 +88,10 @@
 // removed, which may cause the extent to be trimmed, split or removed entirely.
 // The value of |*idx_p| is updated to point to the next extent to be processed.
 // Returns true iff the next extent to process is a new or updated one.
-bool ProcessExtentBlockRange(vector<Extent>* extents, size_t* idx_p,
-                             const bool do_remove, uint64_t range_start,
+bool ProcessExtentBlockRange(vector<Extent>* extents,
+                             size_t* idx_p,
+                             const bool do_remove,
+                             uint64_t range_start,
                              uint64_t range_end) {
   size_t idx = *idx_p;
   uint64_t start_block = (*extents)[idx].start_block();
@@ -141,17 +150,17 @@
 
     uint64_t src_num_blocks = (*src_extents)[src_idx].num_blocks();
     uint64_t dst_num_blocks = (*dst_extents)[dst_idx].num_blocks();
-    uint64_t min_num_blocks = std::min(src_num_blocks - src_offset,
-                                       dst_num_blocks - dst_offset);
+    uint64_t min_num_blocks =
+        std::min(src_num_blocks - src_offset, dst_num_blocks - dst_offset);
     uint64_t prev_src_offset = src_offset;
     uint64_t prev_dst_offset = dst_offset;
     src_offset += min_num_blocks;
     dst_offset += min_num_blocks;
 
-    bool new_src = ProcessExtentBlockRange(src_extents, &src_idx, do_remove,
-                                           prev_src_offset, src_offset);
-    bool new_dst = ProcessExtentBlockRange(dst_extents, &dst_idx, do_remove,
-                                           prev_dst_offset, dst_offset);
+    bool new_src = ProcessExtentBlockRange(
+        src_extents, &src_idx, do_remove, prev_src_offset, src_offset);
+    bool new_dst = ProcessExtentBlockRange(
+        dst_extents, &dst_idx, do_remove, prev_dst_offset, dst_offset);
     if (new_src) {
       src_offset = 0;
     }
@@ -171,6 +180,54 @@
   return removed_bytes;
 }
 
+// Storing a diff operation has more overhead over replace operation in the
+// manifest, we need to store an additional src_sha256_hash which is 32 bytes
+// and not compressible, and also src_extents which could use anywhere from a
+// few bytes to hundreds of bytes depending on the number of extents.
+// This function evaluates the overhead tradeoff and determines if it's worth to
+// use a diff operation with data blob of |diff_size| and |num_src_extents|
+// extents over an existing |op| with data blob of |old_blob_size|.
+bool IsDiffOperationBetter(const InstallOperation& op,
+                           size_t old_blob_size,
+                           size_t diff_size,
+                           size_t num_src_extents) {
+  if (!diff_utils::IsAReplaceOperation(op.type()))
+    return diff_size < old_blob_size;
+
+  // Reference: https://developers.google.com/protocol-buffers/docs/encoding
+  // For |src_sha256_hash| we need 1 byte field number/type, 1 byte size and 32
+  // bytes data, for |src_extents| we need 1 byte field number/type and 1 byte
+  // size.
+  constexpr size_t kDiffOverhead = 1 + 1 + 32 + 1 + 1;
+  // Each extent has two variable length encoded uint64, here we use a rough
+  // estimate of 6 bytes overhead per extent, since |num_blocks| is usually
+  // very small.
+  constexpr size_t kDiffOverheadPerExtent = 6;
+
+  return diff_size + kDiffOverhead + num_src_extents * kDiffOverheadPerExtent <
+         old_blob_size;
+}
+
+// Returns the levenshtein distance between string |a| and |b|.
+// https://en.wikipedia.org/wiki/Levenshtein_distance
+int LevenshteinDistance(const string& a, const string& b) {
+  vector<int> distances(a.size() + 1);
+  std::iota(distances.begin(), distances.end(), 0);
+
+  for (size_t i = 1; i <= b.size(); i++) {
+    distances[0] = i;
+    int previous_distance = i - 1;
+    for (size_t j = 1; j <= a.size(); j++) {
+      int new_distance =
+          std::min({distances[j] + 1,
+                    distances[j - 1] + 1,
+                    previous_distance + (a[j - 1] == b[i - 1] ? 0 : 1)});
+      previous_distance = distances[j];
+      distances[j] = new_distance;
+    }
+  }
+  return distances.back();
+}
 }  // namespace
 
 namespace diff_utils {
@@ -195,13 +252,16 @@
         version_(version),
         old_extents_(old_extents),
         new_extents_(new_extents),
+        new_extents_blocks_(utils::BlocksInExtents(new_extents)),
         old_deflates_(old_deflates),
         new_deflates_(new_deflates),
         name_(name),
         chunk_blocks_(chunk_blocks),
         blob_file_(blob_file) {}
 
-  FileDeltaProcessor(FileDeltaProcessor&& processor) = default;
+  bool operator>(const FileDeltaProcessor& other) const {
+    return new_extents_blocks_ > other.new_extents_blocks_;
+  }
 
   ~FileDeltaProcessor() override = default;
 
@@ -211,34 +271,35 @@
   void Run() override;
 
   // Merge each file processor's ops list to aops.
-  void MergeOperation(vector<AnnotatedOperation>* aops);
+  bool MergeOperation(vector<AnnotatedOperation>* aops);
 
  private:
-  const string& old_part_;
-  const string& new_part_;
+  const string& old_part_;  // NOLINT(runtime/member_string_references)
+  const string& new_part_;  // NOLINT(runtime/member_string_references)
   const PayloadVersion& version_;
 
   // The block ranges of the old/new file within the src/tgt image
   const vector<Extent> old_extents_;
   const vector<Extent> new_extents_;
+  const size_t new_extents_blocks_;
   const vector<puffin::BitExtent> old_deflates_;
   const vector<puffin::BitExtent> new_deflates_;
   const string name_;
   // Block limit of one aop.
-  ssize_t chunk_blocks_;
+  const ssize_t chunk_blocks_;
   BlobFileWriter* blob_file_;
 
   // The list of ops to reach the new file from the old file.
   vector<AnnotatedOperation> file_aops_;
 
+  bool failed_ = false;
+
   DISALLOW_COPY_AND_ASSIGN(FileDeltaProcessor);
 };
 
 void FileDeltaProcessor::Run() {
   TEST_AND_RETURN(blob_file_ != nullptr);
-
-  LOG(INFO) << "Encoding file " << name_ << " ("
-            << utils::BlocksInExtents(new_extents_) << " blocks)";
+  base::TimeTicks start = base::TimeTicks::Now();
 
   if (!DeltaReadFile(&file_aops_,
                      old_part_,
@@ -252,13 +313,57 @@
                      version_,
                      blob_file_)) {
     LOG(ERROR) << "Failed to generate delta for " << name_ << " ("
-               << utils::BlocksInExtents(new_extents_) << " blocks)";
+               << new_extents_blocks_ << " blocks)";
+    failed_ = true;
+    return;
   }
+
+  if (!version_.InplaceUpdate()) {
+    if (!ABGenerator::FragmentOperations(
+            version_, &file_aops_, new_part_, blob_file_)) {
+      LOG(ERROR) << "Failed to fragment operations for " << name_;
+      failed_ = true;
+      return;
+    }
+  }
+
+  LOG(INFO) << "Encoded file " << name_ << " (" << new_extents_blocks_
+            << " blocks) in " << (base::TimeTicks::Now() - start);
 }
 
-void FileDeltaProcessor::MergeOperation(vector<AnnotatedOperation>* aops) {
+bool FileDeltaProcessor::MergeOperation(vector<AnnotatedOperation>* aops) {
+  if (failed_)
+    return false;
   aops->reserve(aops->size() + file_aops_.size());
   std::move(file_aops_.begin(), file_aops_.end(), std::back_inserter(*aops));
+  return true;
+}
+
+FilesystemInterface::File GetOldFile(
+    const map<string, FilesystemInterface::File>& old_files_map,
+    const string& new_file_name) {
+  if (old_files_map.empty())
+    return {};
+
+  auto old_file_iter = old_files_map.find(new_file_name);
+  if (old_file_iter != old_files_map.end())
+    return old_file_iter->second;
+
+  // No old file match for the new file name, use a similar file with the
+  // shortest levenshtein distance.
+  // This works great if the file has version number in it, but even for
+  // a completely new file, using a similar file can still help.
+  int min_distance = new_file_name.size();
+  const FilesystemInterface::File* old_file;
+  for (const auto& pair : old_files_map) {
+    int distance = LevenshteinDistance(new_file_name, pair.first);
+    if (distance < min_distance) {
+      min_distance = distance;
+      old_file = &pair.second;
+    }
+  }
+  LOG(INFO) << "Using " << old_file->name << " as source for " << new_file_name;
+  return *old_file;
 }
 
 bool DeltaReadPartition(vector<AnnotatedOperation>* aops,
@@ -271,23 +376,36 @@
   ExtentRanges old_visited_blocks;
   ExtentRanges new_visited_blocks;
 
-  TEST_AND_RETURN_FALSE(DeltaMovedAndZeroBlocks(
-      aops,
-      old_part.path,
-      new_part.path,
-      old_part.size / kBlockSize,
-      new_part.size / kBlockSize,
-      soft_chunk_blocks,
-      version,
-      blob_file,
-      &old_visited_blocks,
-      &new_visited_blocks));
+  // If verity is enabled, mark those blocks as visited to skip generating
+  // operations for them.
+  if (version.minor >= kVerityMinorPayloadVersion &&
+      !new_part.verity.IsEmpty()) {
+    LOG(INFO) << "Skipping verity hash tree blocks: "
+              << ExtentsToString({new_part.verity.hash_tree_extent});
+    new_visited_blocks.AddExtent(new_part.verity.hash_tree_extent);
+    LOG(INFO) << "Skipping verity FEC blocks: "
+              << ExtentsToString({new_part.verity.fec_extent});
+    new_visited_blocks.AddExtent(new_part.verity.fec_extent);
+  }
+
+  ExtentRanges old_zero_blocks;
+  TEST_AND_RETURN_FALSE(DeltaMovedAndZeroBlocks(aops,
+                                                old_part.path,
+                                                new_part.path,
+                                                old_part.size / kBlockSize,
+                                                new_part.size / kBlockSize,
+                                                soft_chunk_blocks,
+                                                version,
+                                                blob_file,
+                                                &old_visited_blocks,
+                                                &new_visited_blocks,
+                                                &old_zero_blocks));
 
   bool puffdiff_allowed = version.OperationAllowed(InstallOperation::PUFFDIFF);
   map<string, FilesystemInterface::File> old_files_map;
   if (old_part.fs_interface) {
     vector<FilesystemInterface::File> old_files;
-    TEST_AND_RETURN_FALSE(deflate_utils::PreprocessParitionFiles(
+    TEST_AND_RETURN_FALSE(deflate_utils::PreprocessPartitionFiles(
         old_part, &old_files, puffdiff_allowed));
     for (const FilesystemInterface::File& file : old_files)
       old_files_map[file.name] = file;
@@ -295,10 +413,10 @@
 
   TEST_AND_RETURN_FALSE(new_part.fs_interface);
   vector<FilesystemInterface::File> new_files;
-  TEST_AND_RETURN_FALSE(deflate_utils::PreprocessParitionFiles(
+  TEST_AND_RETURN_FALSE(deflate_utils::PreprocessPartitionFiles(
       new_part, &new_files, puffdiff_allowed));
 
-  vector<FileDeltaProcessor> file_delta_processors;
+  list<FileDeltaProcessor> file_delta_processors;
 
   // The processing is very straightforward here, we generate operations for
   // every file (and pseudo-file such as the metadata) in the new filesystem
@@ -312,8 +430,8 @@
     // data blocks (for example, symlinks bigger than 60 bytes in ext2) are
     // handled as normal files. We also ignore blocks that were already
     // processed by a previous file.
-    vector<Extent> new_file_extents = FilterExtentRanges(
-        new_file.extents, new_visited_blocks);
+    vector<Extent> new_file_extents =
+        FilterExtentRanges(new_file.extents, new_visited_blocks);
     new_visited_blocks.AddExtents(new_file_extents);
 
     if (new_file_extents.empty())
@@ -327,9 +445,14 @@
     // from using a graph/cycle detection/etc to generate diffs, and at that
     // time, it will be easy (non-complex) to have many operations read
     // from the same source blocks. At that time, this code can die. -adlr
-    auto old_file = old_files_map[new_file.name];
-    vector<Extent> old_file_extents =
-        FilterExtentRanges(old_file.extents, old_visited_blocks);
+    FilesystemInterface::File old_file =
+        GetOldFile(old_files_map, new_file.name);
+    vector<Extent> old_file_extents;
+    if (version.InplaceUpdate())
+      old_file_extents =
+          FilterExtentRanges(old_file.extents, old_visited_blocks);
+    else
+      old_file_extents = FilterExtentRanges(old_file.extents, old_zero_blocks);
     old_visited_blocks.AddExtents(old_file_extents);
 
     file_delta_processors.emplace_back(old_part.path,
@@ -343,8 +466,45 @@
                                        hard_chunk_blocks,
                                        blob_file);
   }
+  // Process all the blocks not included in any file. We provided all the unused
+  // blocks in the old partition as available data.
+  vector<Extent> new_unvisited = {
+      ExtentForRange(0, new_part.size / kBlockSize)};
+  new_unvisited = FilterExtentRanges(new_unvisited, new_visited_blocks);
+  if (!new_unvisited.empty()) {
+    vector<Extent> old_unvisited;
+    if (old_part.fs_interface) {
+      old_unvisited.push_back(ExtentForRange(0, old_part.size / kBlockSize));
+      old_unvisited = FilterExtentRanges(old_unvisited, old_visited_blocks);
+    }
+
+    LOG(INFO) << "Scanning " << utils::BlocksInExtents(new_unvisited)
+              << " unwritten blocks using chunk size of " << soft_chunk_blocks
+              << " blocks.";
+    // We use the soft_chunk_blocks limit for the <non-file-data> as we don't
+    // really know the structure of this data and we should not expect it to
+    // have redundancy between partitions.
+    file_delta_processors.emplace_back(
+        old_part.path,
+        new_part.path,
+        version,
+        std::move(old_unvisited),
+        std::move(new_unvisited),
+        vector<puffin::BitExtent>{},  // old_deflates,
+        vector<puffin::BitExtent>{},  // new_deflates
+        "<non-file-data>",            // operation name
+        soft_chunk_blocks,
+        blob_file);
+  }
 
   size_t max_threads = GetMaxThreads();
+
+  // Sort the files in descending order based on number of new blocks to make
+  // sure we start the largest ones first.
+  if (file_delta_processors.size() > max_threads) {
+    file_delta_processors.sort(std::greater<FileDeltaProcessor>());
+  }
+
   base::DelegateSimpleThreadPool thread_pool("incremental-update-generator",
                                              max_threads);
   thread_pool.Start();
@@ -354,41 +514,9 @@
   thread_pool.JoinAll();
 
   for (auto& processor : file_delta_processors) {
-    processor.MergeOperation(aops);
+    TEST_AND_RETURN_FALSE(processor.MergeOperation(aops));
   }
 
-  // Process all the blocks not included in any file. We provided all the unused
-  // blocks in the old partition as available data.
-  vector<Extent> new_unvisited = {
-      ExtentForRange(0, new_part.size / kBlockSize)};
-  new_unvisited = FilterExtentRanges(new_unvisited, new_visited_blocks);
-  if (new_unvisited.empty())
-    return true;
-
-  vector<Extent> old_unvisited;
-  if (old_part.fs_interface) {
-    old_unvisited.push_back(ExtentForRange(0, old_part.size / kBlockSize));
-    old_unvisited = FilterExtentRanges(old_unvisited, old_visited_blocks);
-  }
-
-  LOG(INFO) << "Scanning " << utils::BlocksInExtents(new_unvisited)
-            << " unwritten blocks using chunk size of " << soft_chunk_blocks
-            << " blocks.";
-  // We use the soft_chunk_blocks limit for the <non-file-data> as we don't
-  // really know the structure of this data and we should not expect it to have
-  // redundancy between partitions.
-  TEST_AND_RETURN_FALSE(DeltaReadFile(aops,
-                                      old_part.path,
-                                      new_part.path,
-                                      old_unvisited,
-                                      new_unvisited,
-                                      {},                 // old_deflates,
-                                      {},                 // new_deflates
-                                      "<non-file-data>",  // operation name
-                                      soft_chunk_blocks,
-                                      version,
-                                      blob_file));
-
   return true;
 }
 
@@ -401,7 +529,8 @@
                              const PayloadVersion& version,
                              BlobFileWriter* blob_file,
                              ExtentRanges* old_visited_blocks,
-                             ExtentRanges* new_visited_blocks) {
+                             ExtentRanges* new_visited_blocks,
+                             ExtentRanges* old_zero_blocks) {
   vector<BlockMapping::BlockId> old_block_ids;
   vector<BlockMapping::BlockId> new_block_ids;
   TEST_AND_RETURN_FALSE(MapPartitionBlocks(old_part,
@@ -432,7 +561,7 @@
   // is a block from the new partition.
   map<BlockMapping::BlockId, vector<uint64_t>> old_blocks_map;
 
-  for (uint64_t block = old_num_blocks; block-- > 0; ) {
+  for (uint64_t block = old_num_blocks; block-- > 0;) {
     if (old_block_ids[block] != 0 && !old_visited_blocks->ContainsBlock(block))
       old_blocks_map[old_block_ids[block]].push_back(block);
 
@@ -441,8 +570,9 @@
     // importantly, these could sometimes be blocks discarded in the SSD which
     // would read non-zero values.
     if (old_block_ids[block] == 0)
-      old_visited_blocks->AddBlock(block);
+      old_zero_blocks->AddBlock(block);
   }
+  old_visited_blocks->AddRanges(*old_zero_blocks);
 
   // The collection of blocks in the new partition with just zeros. This is a
   // common case for free-space that's also problematic for bsdiff, so we want
@@ -478,30 +608,44 @@
       old_blocks_map_it->second.pop_back();
   }
 
+  if (chunk_blocks == -1)
+    chunk_blocks = new_num_blocks;
+
   // Produce operations for the zero blocks split per output extent.
-  // TODO(deymo): Produce ZERO operations instead of calling DeltaReadFile().
   size_t num_ops = aops->size();
   new_visited_blocks->AddExtents(new_zeros);
   for (const Extent& extent : new_zeros) {
-    TEST_AND_RETURN_FALSE(DeltaReadFile(aops,
-                                        "",
-                                        new_part,
-                                        vector<Extent>(),        // old_extents
-                                        vector<Extent>{extent},  // new_extents
-                                        {},                      // old_deflates
-                                        {},                      // new_deflates
-                                        "<zeros>",
-                                        chunk_blocks,
-                                        version,
-                                        blob_file));
+    if (version.OperationAllowed(InstallOperation::ZERO)) {
+      for (uint64_t offset = 0; offset < extent.num_blocks();
+           offset += chunk_blocks) {
+        uint64_t num_blocks =
+            std::min(static_cast<uint64_t>(extent.num_blocks()) - offset,
+                     static_cast<uint64_t>(chunk_blocks));
+        InstallOperation operation;
+        operation.set_type(InstallOperation::ZERO);
+        *(operation.add_dst_extents()) =
+            ExtentForRange(extent.start_block() + offset, num_blocks);
+        aops->push_back({.name = "<zeros>", .op = operation});
+      }
+    } else {
+      TEST_AND_RETURN_FALSE(DeltaReadFile(aops,
+                                          "",
+                                          new_part,
+                                          {},        // old_extents
+                                          {extent},  // new_extents
+                                          {},        // old_deflates
+                                          {},        // new_deflates
+                                          "<zeros>",
+                                          chunk_blocks,
+                                          version,
+                                          blob_file));
+    }
   }
   LOG(INFO) << "Produced " << (aops->size() - num_ops) << " operations for "
             << utils::BlocksInExtents(new_zeros) << " zeroed blocks";
 
   // Produce MOVE/SOURCE_COPY operations for the moved blocks.
   num_ops = aops->size();
-  if (chunk_blocks == -1)
-    chunk_blocks = new_num_blocks;
   uint64_t used_blocks = 0;
   old_visited_blocks->AddExtents(old_identical_blocks);
   new_visited_blocks->AddExtents(new_identical_blocks);
@@ -564,15 +708,15 @@
     chunk_blocks = total_blocks;
 
   for (uint64_t block_offset = 0; block_offset < total_blocks;
-      block_offset += chunk_blocks) {
+       block_offset += chunk_blocks) {
     // Split the old/new file in the same chunks. Note that this could drop
     // some information from the old file used for the new chunk. If the old
     // file is smaller (or even empty when there's no old file) the chunk will
     // also be empty.
-    vector<Extent> old_extents_chunk = ExtentsSublist(
-        old_extents, block_offset, chunk_blocks);
-    vector<Extent> new_extents_chunk = ExtentsSublist(
-        new_extents, block_offset, chunk_blocks);
+    vector<Extent> old_extents_chunk =
+        ExtentsSublist(old_extents, block_offset, chunk_blocks);
+    vector<Extent> new_extents_chunk =
+        ExtentsSublist(new_extents, block_offset, chunk_blocks);
     NormalizeExtents(&old_extents_chunk);
     NormalizeExtents(&new_extents_chunk);
 
@@ -589,8 +733,7 @@
     // Check if the operation writes nothing.
     if (operation.dst_extents_size() == 0) {
       if (operation.type() == InstallOperation::MOVE) {
-        LOG(INFO) << "Empty MOVE operation ("
-                  << name << "), skipping";
+        LOG(INFO) << "Empty MOVE operation (" << name << "), skipping";
         continue;
       } else {
         LOG(ERROR) << "Empty non-MOVE operation";
@@ -602,8 +745,8 @@
     AnnotatedOperation aop;
     aop.name = name;
     if (static_cast<uint64_t>(chunk_blocks) < total_blocks) {
-      aop.name = base::StringPrintf("%s:%" PRIu64,
-                                    name.c_str(), block_offset / chunk_blocks);
+      aop.name = base::StringPrintf(
+          "%s:%" PRIu64, name.c_str(), block_offset / chunk_blocks);
     }
     aop.op = operation;
 
@@ -617,7 +760,7 @@
 bool GenerateBestFullOperation(const brillo::Blob& new_data,
                                const PayloadVersion& version,
                                brillo::Blob* out_blob,
-                               InstallOperation_Type* out_type) {
+                               InstallOperation::Type* out_type) {
   if (new_data.empty())
     return false;
 
@@ -720,7 +863,7 @@
 
   // Try generating a full operation for the given new data, regardless of the
   // old_data.
-  InstallOperation_Type op_type;
+  InstallOperation::Type op_type;
   TEST_AND_RETURN_FALSE(
       GenerateBestFullOperation(new_data, version, &data_blob, &op_type));
   operation.set_type(op_type);
@@ -728,23 +871,28 @@
   brillo::Blob old_data;
   if (blocks_to_read > 0) {
     // Read old data.
-    TEST_AND_RETURN_FALSE(
-        utils::ReadExtents(old_part, src_extents, &old_data,
-                           kBlockSize * blocks_to_read, kBlockSize));
+    TEST_AND_RETURN_FALSE(utils::ReadExtents(old_part,
+                                             src_extents,
+                                             &old_data,
+                                             kBlockSize * blocks_to_read,
+                                             kBlockSize));
     if (old_data == new_data) {
       // No change in data.
       operation.set_type(version.OperationAllowed(InstallOperation::SOURCE_COPY)
                              ? InstallOperation::SOURCE_COPY
                              : InstallOperation::MOVE);
       data_blob = brillo::Blob();
-    } else {
+    } else if (IsDiffOperationBetter(
+                   operation, data_blob.size(), 0, src_extents.size())) {
+      // No point in trying diff if zero blob size diff operation is
+      // still worse than replace.
       if (bsdiff_allowed) {
         base::FilePath patch;
         TEST_AND_RETURN_FALSE(base::CreateTemporaryFile(&patch));
         ScopedPathUnlinker unlinker(patch.value());
 
         std::unique_ptr<bsdiff::PatchWriterInterface> bsdiff_patch_writer;
-        InstallOperation_Type operation_type = InstallOperation::BSDIFF;
+        InstallOperation::Type operation_type = InstallOperation::BSDIFF;
         if (version.OperationAllowed(InstallOperation::BROTLI_BSDIFF)) {
           bsdiff_patch_writer =
               bsdiff::CreateBSDF2PatchWriter(patch.value(),
@@ -768,7 +916,10 @@
 
         TEST_AND_RETURN_FALSE(utils::ReadFile(patch.value(), &bsdiff_delta));
         CHECK_GT(bsdiff_delta.size(), static_cast<brillo::Blob::size_type>(0));
-        if (bsdiff_delta.size() < data_blob.size()) {
+        if (IsDiffOperationBetter(operation,
+                                  data_blob.size(),
+                                  bsdiff_delta.size(),
+                                  src_extents.size())) {
           operation.set_type(operation_type);
           data_blob = std::move(bsdiff_delta);
         }
@@ -785,24 +936,16 @@
         TEST_AND_RETURN_FALSE(deflate_utils::FindAndCompactDeflates(
             dst_extents, new_deflates, &dst_deflates));
 
-        // Remove equal deflates. TODO(*): We can do a N*N check using
-        // hashing. It will not reduce the payload size, but it will speeds up
-        // the puffing on the client device.
-        auto src = src_deflates.begin();
-        auto dst = dst_deflates.begin();
-        for (; src != src_deflates.end() && dst != dst_deflates.end();) {
-          auto src_in_bytes = deflate_utils::ExpandToByteExtent(*src);
-          auto dst_in_bytes = deflate_utils::ExpandToByteExtent(*dst);
-          if (src_in_bytes.length == dst_in_bytes.length &&
-              !memcmp(old_data.data() + src_in_bytes.offset,
-                      new_data.data() + dst_in_bytes.offset,
-                      src_in_bytes.length)) {
-            src = src_deflates.erase(src);
-            dst = dst_deflates.erase(dst);
-          } else {
-            src++;
-            dst++;
-          }
+        puffin::RemoveEqualBitExtents(
+            old_data, new_data, &src_deflates, &dst_deflates);
+
+        // See crbug.com/915559.
+        if (version.minor <= kPuffdiffMinorPayloadVersion) {
+          TEST_AND_RETURN_FALSE(puffin::RemoveDeflatesWithBadDistanceCaches(
+              old_data, &src_deflates));
+
+          TEST_AND_RETURN_FALSE(puffin::RemoveDeflatesWithBadDistanceCaches(
+              new_data, &dst_deflates));
         }
 
         // Only Puffdiff if both files have at least one deflate left.
@@ -821,7 +964,10 @@
                                                  temp_file_path,
                                                  &puffdiff_delta));
           TEST_AND_RETURN_FALSE(puffdiff_delta.size() > 0);
-          if (puffdiff_delta.size() < data_blob.size()) {
+          if (IsDiffOperationBetter(operation,
+                                    data_blob.size(),
+                                    puffdiff_delta.size(),
+                                    src_extents.size())) {
             operation.set_type(InstallOperation::PUFFDIFF);
             data_blob = std::move(puffdiff_delta);
           }
@@ -832,8 +978,8 @@
 
   // Remove identical src/dst block ranges in MOVE operations.
   if (operation.type() == InstallOperation::MOVE) {
-    auto removed_bytes = RemoveIdenticalBlockRanges(
-        &src_extents, &dst_extents, new_data.size());
+    auto removed_bytes =
+        RemoveIdenticalBlockRanges(&src_extents, &dst_extents, new_data.size());
     operation.set_src_length(old_data.size() - removed_bytes);
     operation.set_dst_length(new_data.size() - removed_bytes);
   }
@@ -864,15 +1010,14 @@
   return true;
 }
 
-bool IsAReplaceOperation(InstallOperation_Type op_type) {
+bool IsAReplaceOperation(InstallOperation::Type op_type) {
   return (op_type == InstallOperation::REPLACE ||
           op_type == InstallOperation::REPLACE_BZ ||
           op_type == InstallOperation::REPLACE_XZ);
 }
 
-bool IsNoSourceOperation(InstallOperation_Type op_type) {
-  return (IsAReplaceOperation(op_type) ||
-          op_type == InstallOperation::ZERO ||
+bool IsNoSourceOperation(InstallOperation::Type op_type) {
+  return (IsAReplaceOperation(op_type) || op_type == InstallOperation::ZERO ||
           op_type == InstallOperation::DISCARD);
 }
 
@@ -884,11 +1029,12 @@
 }
 
 void FilterNoopOperations(vector<AnnotatedOperation>* ops) {
-  ops->erase(
-      std::remove_if(
-          ops->begin(), ops->end(),
-          [](const AnnotatedOperation& aop){return IsNoopOperation(aop.op);}),
-      ops->end());
+  ops->erase(std::remove_if(ops->begin(),
+                            ops->end(),
+                            [](const AnnotatedOperation& aop) {
+                              return IsNoopOperation(aop.op);
+                            }),
+             ops->end());
 }
 
 bool InitializePartitionInfo(const PartitionConfig& part, PartitionInfo* info) {
diff --git a/payload_generator/delta_diff_utils.h b/payload_generator/delta_diff_utils.h
index dea8535..2211b30 100644
--- a/payload_generator/delta_diff_utils.h
+++ b/payload_generator/delta_diff_utils.h
@@ -17,6 +17,7 @@
 #ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_DELTA_DIFF_UTILS_H_
 #define UPDATE_ENGINE_PAYLOAD_GENERATOR_DELTA_DIFF_UTILS_H_
 
+#include <map>
 #include <string>
 #include <vector>
 
@@ -69,7 +70,8 @@
                              const PayloadVersion& version,
                              BlobFileWriter* blob_file,
                              ExtentRanges* old_visited_blocks,
-                             ExtentRanges* new_visited_blocks);
+                             ExtentRanges* new_visited_blocks,
+                             ExtentRanges* old_zero_blocks);
 
 // For a given file |name| append operations to |aops| to produce it in the
 // |new_part|. The file will be split in chunks of |chunk_blocks| blocks each
@@ -117,13 +119,13 @@
 bool GenerateBestFullOperation(const brillo::Blob& new_data,
                                const PayloadVersion& version,
                                brillo::Blob* out_blob,
-                               InstallOperation_Type* out_type);
+                               InstallOperation::Type* out_type);
 
 // Returns whether |op_type| is one of the REPLACE full operations.
-bool IsAReplaceOperation(InstallOperation_Type op_type);
+bool IsAReplaceOperation(InstallOperation::Type op_type);
 
 // Returns true if an operation with type |op_type| has no |src_extents|.
-bool IsNoSourceOperation(InstallOperation_Type op_type);
+bool IsNoSourceOperation(InstallOperation::Type op_type);
 
 // Returns true if |op| is a no-op operation that doesn't do any useful work
 // (e.g., a move operation that copies blocks onto themselves).
@@ -149,6 +151,12 @@
 // Returns the max number of threads to process the files(chunks) in parallel.
 size_t GetMaxThreads();
 
+// Returns the old file which file name has the shortest levenshtein distance to
+// |new_file_name|.
+FilesystemInterface::File GetOldFile(
+    const std::map<std::string, FilesystemInterface::File>& old_files_map,
+    const std::string& new_file_name);
+
 }  // namespace diff_utils
 
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc
index a83cea2..b2950e8 100644
--- a/payload_generator/delta_diff_utils_unittest.cc
+++ b/payload_generator/delta_diff_utils_unittest.cc
@@ -59,8 +59,8 @@
     uint64_t to_write =
         std::min(static_cast<uint64_t>(extent.num_blocks()) * block_size,
                  static_cast<uint64_t>(data.size()) - offset);
-    TEST_AND_RETURN_FALSE(
-        fwrite(data.data() + offset, 1, to_write, fp.get()) == to_write);
+    TEST_AND_RETURN_FALSE(fwrite(data.data() + offset, 1, to_write, fp.get()) ==
+                          to_write);
     offset += extent.num_blocks() * block_size;
   }
   return true;
@@ -68,8 +68,10 @@
 
 // Create a fake filesystem of the given |size| and initialize the partition
 // holding it in the PartitionConfig |part|.
-void CreatePartition(PartitionConfig* part, const string& pattern,
-                     uint64_t block_size, off_t size) {
+void CreatePartition(PartitionConfig* part,
+                     const string& pattern,
+                     uint64_t block_size,
+                     off_t size) {
   int fd = -1;
   ASSERT_TRUE(utils::MakeTempFile(pattern.c_str(), &part->path, &fd));
   ASSERT_EQ(0, ftruncate(fd, size));
@@ -95,7 +97,8 @@
     brillo::Blob block_data(prefix.begin(), prefix.end());
     TEST_AND_RETURN_FALSE(prefix.size() <= block_size);
     block_data.resize(block_size, 'X');
-    std::copy(block_data.begin(), block_data.end(),
+    std::copy(block_data.begin(),
+              block_data.end(),
               file_data.begin() + i * block_size);
   }
   return test_utils::WriteFileVector(part.path, file_data);
@@ -108,13 +111,16 @@
   const uint64_t kDefaultBlockCount = 128;
 
   void SetUp() override {
-    CreatePartition(&old_part_, "DeltaDiffUtilsTest-old_part-XXXXXX",
-                    block_size_, block_size_ * kDefaultBlockCount);
-    CreatePartition(&new_part_, "DeltaDiffUtilsTest-old_part-XXXXXX",
-                    block_size_, block_size_ * kDefaultBlockCount);
-    ASSERT_TRUE(utils::MakeTempFile("DeltaDiffUtilsTest-blob-XXXXXX",
-                                    &blob_path_,
-                                    &blob_fd_));
+    CreatePartition(&old_part_,
+                    "DeltaDiffUtilsTest-old_part-XXXXXX",
+                    block_size_,
+                    block_size_ * kDefaultBlockCount);
+    CreatePartition(&new_part_,
+                    "DeltaDiffUtilsTest-old_part-XXXXXX",
+                    block_size_,
+                    block_size_ * kDefaultBlockCount);
+    ASSERT_TRUE(utils::MakeTempFile(
+        "DeltaDiffUtilsTest-blob-XXXXXX", &blob_path_, &blob_fd_));
   }
 
   void TearDown() override {
@@ -131,6 +137,7 @@
                                   uint32_t minor_version) {
     BlobFileWriter blob_file(blob_fd_, &blob_size_);
     PayloadVersion version(kChromeOSMajorPayloadVersion, minor_version);
+    ExtentRanges old_zero_blocks;
     return diff_utils::DeltaMovedAndZeroBlocks(&aops_,
                                                old_part_.path,
                                                new_part_.path,
@@ -140,7 +147,8 @@
                                                version,
                                                &blob_file,
                                                &old_visited_blocks_,
-                                               &new_visited_blocks_);
+                                               &new_visited_blocks_,
+                                               &old_zero_blocks);
   }
 
   // Old and new temporary partitions used in the tests. These are initialized
@@ -161,13 +169,38 @@
   ExtentRanges new_visited_blocks_;
 };
 
+TEST_F(DeltaDiffUtilsTest, SkipVerityExtentsTest) {
+  new_part_.verity.hash_tree_extent = ExtentForRange(20, 30);
+  new_part_.verity.fec_extent = ExtentForRange(40, 50);
+
+  BlobFileWriter blob_file(blob_fd_, &blob_size_);
+  EXPECT_TRUE(diff_utils::DeltaReadPartition(
+      &aops_,
+      old_part_,
+      new_part_,
+      -1,
+      -1,
+      PayloadVersion(kMaxSupportedMajorPayloadVersion,
+                     kVerityMinorPayloadVersion),
+      &blob_file));
+  for (const auto& aop : aops_) {
+    new_visited_blocks_.AddRepeatedExtents(aop.op.dst_extents());
+  }
+  for (const auto& extent : new_visited_blocks_.extent_set()) {
+    EXPECT_FALSE(ExtentRanges::ExtentsOverlap(
+        extent, new_part_.verity.hash_tree_extent));
+    EXPECT_FALSE(
+        ExtentRanges::ExtentsOverlap(extent, new_part_.verity.fec_extent));
+  }
+}
+
 TEST_F(DeltaDiffUtilsTest, MoveSmallTest) {
   brillo::Blob data_blob(block_size_);
   test_utils::FillWithData(&data_blob);
 
   // The old file is on a different block than the new one.
-  vector<Extent> old_extents = { ExtentForRange(11, 1) };
-  vector<Extent> new_extents = { ExtentForRange(1, 1) };
+  vector<Extent> old_extents = {ExtentForRange(11, 1)};
+  vector<Extent> new_extents = {ExtentForRange(1, 1)};
 
   EXPECT_TRUE(WriteExtents(old_part_.path, old_extents, kBlockSize, data_blob));
   EXPECT_TRUE(WriteExtents(new_part_.path, new_extents, kBlockSize, data_blob));
@@ -210,15 +243,12 @@
   // Old:  [ 20     21 22     23     24 25 ] [ 28     29 ]
   // New:  [ 18 ] [ 21 22 ] [ 20 ] [ 24 25     26 ] [ 29 ]
   // Same:          ^^ ^^            ^^ ^^            ^^
-  vector<Extent> old_extents = {
-      ExtentForRange(20, 6),
-      ExtentForRange(28, 2) };
-  vector<Extent> new_extents = {
-      ExtentForRange(18, 1),
-      ExtentForRange(21, 2),
-      ExtentForRange(20, 1),
-      ExtentForRange(24, 3),
-      ExtentForRange(29, 1) };
+  vector<Extent> old_extents = {ExtentForRange(20, 6), ExtentForRange(28, 2)};
+  vector<Extent> new_extents = {ExtentForRange(18, 1),
+                                ExtentForRange(21, 2),
+                                ExtentForRange(20, 1),
+                                ExtentForRange(24, 3),
+                                ExtentForRange(29, 1)};
 
   uint64_t num_blocks = utils::BlocksInExtents(old_extents);
   EXPECT_EQ(num_blocks, utils::BlocksInExtents(new_extents));
@@ -255,13 +285,9 @@
 
   // The expected old and new extents that actually moved. See comment above.
   old_extents = {
-      ExtentForRange(20, 1),
-      ExtentForRange(23, 1),
-      ExtentForRange(28, 1) };
+      ExtentForRange(20, 1), ExtentForRange(23, 1), ExtentForRange(28, 1)};
   new_extents = {
-      ExtentForRange(18, 1),
-      ExtentForRange(20, 1),
-      ExtentForRange(26, 1) };
+      ExtentForRange(18, 1), ExtentForRange(20, 1), ExtentForRange(26, 1)};
   num_blocks = utils::BlocksInExtents(old_extents);
 
   EXPECT_EQ(num_blocks * kBlockSize, op.src_length());
@@ -290,8 +316,8 @@
   test_utils::FillWithData(&data_blob);
 
   // The old file is on a different block than the new one.
-  vector<Extent> old_extents = { ExtentForRange(1, 1) };
-  vector<Extent> new_extents = { ExtentForRange(2, 1) };
+  vector<Extent> old_extents = {ExtentForRange(1, 1)};
+  vector<Extent> new_extents = {ExtentForRange(2, 1)};
 
   EXPECT_TRUE(WriteExtents(old_part_.path, old_extents, kBlockSize, data_blob));
   // Modify one byte in the new file.
@@ -328,8 +354,8 @@
 
 TEST_F(DeltaDiffUtilsTest, ReplaceSmallTest) {
   // The old file is on a different block than the new one.
-  vector<Extent> old_extents = { ExtentForRange(1, 1) };
-  vector<Extent> new_extents = { ExtentForRange(2, 1) };
+  vector<Extent> old_extents = {ExtentForRange(1, 1)};
+  vector<Extent> new_extents = {ExtentForRange(2, 1)};
 
   // Make a blob that's just 1's that will compress well.
   brillo::Blob ones(kBlockSize, 1);
@@ -345,8 +371,8 @@
   for (int i = 0; i < 2; i++) {
     brillo::Blob data_to_test = i == 0 ? random_data : ones;
     // The old_extents will be initialized with 0.
-    EXPECT_TRUE(WriteExtents(new_part_.path, new_extents, kBlockSize,
-                             data_to_test));
+    EXPECT_TRUE(
+        WriteExtents(new_part_.path, new_extents, kBlockSize, data_to_test));
 
     brillo::Blob data;
     InstallOperation op;
@@ -364,7 +390,7 @@
     EXPECT_FALSE(data.empty());
 
     EXPECT_TRUE(op.has_type());
-    const InstallOperation_Type expected_type =
+    const InstallOperation::Type expected_type =
         (i == 0 ? InstallOperation::REPLACE : InstallOperation::REPLACE_BZ);
     EXPECT_EQ(expected_type, op.type());
     EXPECT_FALSE(op.has_data_offset());
@@ -385,8 +411,8 @@
   test_utils::FillWithData(&data_blob);
 
   // The old file is on a different block than the new one.
-  vector<Extent> old_extents = { ExtentForRange(11, 1) };
-  vector<Extent> new_extents = { ExtentForRange(1, 1) };
+  vector<Extent> old_extents = {ExtentForRange(11, 1)};
+  vector<Extent> new_extents = {ExtentForRange(1, 1)};
 
   EXPECT_TRUE(WriteExtents(old_part_.path, old_extents, kBlockSize, data_blob));
   EXPECT_TRUE(WriteExtents(new_part_.path, new_extents, kBlockSize, data_blob));
@@ -417,8 +443,8 @@
   test_utils::FillWithData(&data_blob);
 
   // The old file is on a different block than the new one.
-  vector<Extent> old_extents = { ExtentForRange(1, 1) };
-  vector<Extent> new_extents = { ExtentForRange(2, 1) };
+  vector<Extent> old_extents = {ExtentForRange(1, 1)};
+  vector<Extent> new_extents = {ExtentForRange(2, 1)};
 
   EXPECT_TRUE(WriteExtents(old_part_.path, old_extents, kBlockSize, data_blob));
   // Modify one byte in the new file.
@@ -443,6 +469,37 @@
   EXPECT_EQ(InstallOperation::SOURCE_BSDIFF, op.type());
 }
 
+TEST_F(DeltaDiffUtilsTest, PreferReplaceTest) {
+  brillo::Blob data_blob(kBlockSize);
+  vector<Extent> extents = {ExtentForRange(1, 1)};
+
+  // Write something in the first 50 bytes so that REPLACE_BZ will be slightly
+  // larger than BROTLI_BSDIFF.
+  std::iota(data_blob.begin(), data_blob.begin() + 50, 0);
+  EXPECT_TRUE(WriteExtents(old_part_.path, extents, kBlockSize, data_blob));
+  // Shift the first 50 bytes in the new file by one.
+  std::iota(data_blob.begin(), data_blob.begin() + 50, 1);
+  EXPECT_TRUE(WriteExtents(new_part_.path, extents, kBlockSize, data_blob));
+
+  brillo::Blob data;
+  InstallOperation op;
+  EXPECT_TRUE(diff_utils::ReadExtentsToDiff(
+      old_part_.path,
+      new_part_.path,
+      extents,
+      extents,
+      {},  // old_deflates
+      {},  // new_deflates
+      PayloadVersion(kMaxSupportedMajorPayloadVersion,
+                     kMaxSupportedMinorPayloadVersion),
+      &data,
+      &op));
+
+  EXPECT_FALSE(data.empty());
+  EXPECT_TRUE(op.has_type());
+  EXPECT_EQ(InstallOperation::REPLACE_BZ, op.type());
+}
+
 TEST_F(DeltaDiffUtilsTest, IsNoopOperationTest) {
   InstallOperation op;
   op.set_type(InstallOperation::REPLACE_BZ);
@@ -542,7 +599,9 @@
 
   // Override some of the old blocks with different data.
   vector<Extent> different_blocks = {ExtentForRange(40, 5)};
-  EXPECT_TRUE(WriteExtents(old_part_.path, different_blocks, kBlockSize,
+  EXPECT_TRUE(WriteExtents(old_part_.path,
+                           different_blocks,
+                           kBlockSize,
                            brillo::Blob(5 * kBlockSize, 'a')));
 
   EXPECT_TRUE(RunDeltaMovedAndZeroBlocks(10,  // chunk_blocks
@@ -592,8 +651,8 @@
   brillo::Blob partition_data(old_part_.size);
   for (size_t offset = 0; offset < partition_data.size();
        offset += file_data.size()) {
-    std::copy(file_data.begin(), file_data.end(),
-              partition_data.begin() + offset);
+    std::copy(
+        file_data.begin(), file_data.end(), partition_data.begin() + offset);
   }
   EXPECT_TRUE(test_utils::WriteFileVector(old_part_.path, partition_data));
   EXPECT_TRUE(test_utils::WriteFileVector(new_part_.path, partition_data));
@@ -693,8 +752,8 @@
   // as block permutation[i] in the new_part_.
   brillo::Blob new_contents;
   EXPECT_TRUE(utils::ReadFile(new_part_.path, &new_contents));
-  EXPECT_TRUE(WriteExtents(old_part_.path, perm_extents, block_size_,
-                           new_contents));
+  EXPECT_TRUE(
+      WriteExtents(old_part_.path, perm_extents, block_size_, new_contents));
 
   EXPECT_TRUE(RunDeltaMovedAndZeroBlocks(-1,  // chunk_blocks
                                          kSourceMinorPayloadVersion));
@@ -723,4 +782,45 @@
       test_utils::GetBuildArtifactsPath("gen/disk_ext2_4k.img")));
 }
 
+TEST_F(DeltaDiffUtilsTest, GetOldFileEmptyTest) {
+  EXPECT_TRUE(diff_utils::GetOldFile({}, "filename").name.empty());
+}
+
+TEST_F(DeltaDiffUtilsTest, GetOldFileTest) {
+  std::map<string, FilesystemInterface::File> old_files_map;
+  auto file_list = {
+      "filename",
+      "filename.zip",
+      "version1.1",
+      "version2.0",
+      "version",
+      "update_engine",
+      "delta_generator",
+  };
+  for (const auto& name : file_list) {
+    FilesystemInterface::File file;
+    file.name = name;
+    old_files_map.emplace(name, file);
+  }
+
+  // Always return exact match if possible.
+  for (const auto& name : file_list)
+    EXPECT_EQ(diff_utils::GetOldFile(old_files_map, name).name, name);
+
+  EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "file_name").name,
+            "filename");
+  EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "filename_new.zip").name,
+            "filename.zip");
+  EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "version1.2").name,
+            "version1.1");
+  EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "version3.0").name,
+            "version2.0");
+  EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "_version").name, "version");
+  EXPECT_EQ(
+      diff_utils::GetOldFile(old_files_map, "update_engine_unittest").name,
+      "update_engine");
+  EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "bin/delta_generator").name,
+            "delta_generator");
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/ext2_filesystem.cc b/payload_generator/ext2_filesystem.cc
index 07ec371..06304f4 100644
--- a/payload_generator/ext2_filesystem.cc
+++ b/payload_generator/ext2_filesystem.cc
@@ -92,11 +92,11 @@
 
 int UpdateFileAndAppend(ext2_ino_t dir,
                         int entry,
-                        struct ext2_dir_entry *dirent,
+                        struct ext2_dir_entry* dirent,
                         int offset,
                         int blocksize,
-                        char *buf,
-                        void *priv_data) {
+                        char* buf,
+                        void* priv_data) {
   UpdateFileAndAppendState* state =
       static_cast<UpdateFileAndAppendState*>(priv_data);
   uint32_t file_type = dirent->name_len >> 8;
@@ -224,20 +224,21 @@
     // and triple indirect blocks (no data blocks). For directories and
     // the journal, all blocks are considered metadata blocks.
     int flags = it_ino < EXT2_GOOD_OLD_FIRST_INO ? 0 : BLOCK_FLAG_DATA_ONLY;
-    error = ext2fs_block_iterate2(filsys_, it_ino, flags,
+    error = ext2fs_block_iterate2(filsys_,
+                                  it_ino,
+                                  flags,
                                   nullptr,  // block_buf
                                   ProcessInodeAllBlocks,
                                   &file.extents);
 
     if (error) {
-      LOG(ERROR) << "Failed to enumerate inode " << it_ino
-                << " blocks (" << error << ")";
+      LOG(ERROR) << "Failed to enumerate inode " << it_ino << " blocks ("
+                 << error << ")";
       continue;
     }
     if (it_ino >= EXT2_GOOD_OLD_FIRST_INO) {
-      ext2fs_block_iterate2(filsys_, it_ino, 0, nullptr,
-                            AddMetadataBlocks,
-                            &inode_blocks);
+      ext2fs_block_iterate2(
+          filsys_, it_ino, 0, nullptr, AddMetadataBlocks, &inode_blocks);
     }
   }
   ext2fs_close_inode_scan(iscan);
@@ -273,9 +274,12 @@
     }
     ext2fs_free_mem(&dir_name);
 
-    error = ext2fs_dir_iterate2(
-        filsys_, dir_ino, 0, nullptr /* block_buf */,
-        UpdateFileAndAppend, &priv_data);
+    error = ext2fs_dir_iterate2(filsys_,
+                                dir_ino,
+                                0,
+                                nullptr /* block_buf */,
+                                UpdateFileAndAppend,
+                                &priv_data);
     if (error) {
       LOG(WARNING) << "Failed to enumerate files in directory "
                    << inodes[dir_ino].name << " (error " << error << ")";
@@ -328,9 +332,11 @@
 bool Ext2Filesystem::LoadSettings(brillo::KeyValueStore* store) const {
   // First search for the settings inode following symlinks if we find some.
   ext2_ino_t ino_num = 0;
-  errcode_t err = ext2fs_namei_follow(
-      filsys_, EXT2_ROOT_INO /* root */, EXT2_ROOT_INO /* cwd */,
-      "/etc/update_engine.conf", &ino_num);
+  errcode_t err = ext2fs_namei_follow(filsys_,
+                                      EXT2_ROOT_INO /* root */,
+                                      EXT2_ROOT_INO /* cwd */,
+                                      "/etc/update_engine.conf",
+                                      &ino_num);
   if (err != 0)
     return false;
 
@@ -340,7 +346,9 @@
 
   // Load the list of blocks and then the contents of the inodes.
   vector<Extent> extents;
-  err = ext2fs_block_iterate2(filsys_, ino_num, BLOCK_FLAG_DATA_ONLY,
+  err = ext2fs_block_iterate2(filsys_,
+                              ino_num,
+                              BLOCK_FLAG_DATA_ONLY,
                               nullptr,  // block_buf
                               ProcessInodeAllBlocks,
                               &extents);
@@ -352,8 +360,8 @@
   // Sparse holes in the settings file are not supported.
   if (EXT2_I_SIZE(&ino_data) > physical_size)
     return false;
-  if (!utils::ReadExtents(filename_, extents, &blob, physical_size,
-                          filsys_->blocksize))
+  if (!utils::ReadExtents(
+          filename_, extents, &blob, physical_size, filsys_->blocksize))
     return false;
 
   string text(blob.begin(), blob.begin() + EXT2_I_SIZE(&ino_data));
diff --git a/payload_generator/ext2_filesystem_unittest.cc b/payload_generator/ext2_filesystem_unittest.cc
index 5360e6c..54600e9 100644
--- a/payload_generator/ext2_filesystem_unittest.cc
+++ b/payload_generator/ext2_filesystem_unittest.cc
@@ -98,14 +98,13 @@
 // "generate_image.sh" script. The expected conditions of each file in these
 // images is encoded in the file name, as defined in the mentioned script.
 TEST_F(Ext2FilesystemTest, ParseGeneratedImages) {
-  const vector<string> kGeneratedImages = {
-      "disk_ext2_1k.img",
-      "disk_ext2_4k.img" };
+  const vector<string> kGeneratedImages = {"disk_ext2_1k.img",
+                                           "disk_ext2_4k.img"};
   base::FilePath build_path = GetBuildArtifactsPath().Append("gen");
   for (const string& fs_name : kGeneratedImages) {
     LOG(INFO) << "Testing " << fs_name;
-    unique_ptr<Ext2Filesystem> fs = Ext2Filesystem::CreateFromFile(
-        build_path.Append(fs_name).value());
+    unique_ptr<Ext2Filesystem> fs =
+        Ext2Filesystem::CreateFromFile(build_path.Append(fs_name).value());
     ASSERT_NE(nullptr, fs.get());
 
     vector<FilesystemInterface::File> files;
diff --git a/payload_generator/extent_ranges.cc b/payload_generator/extent_ranges.cc
index c1d3d63..0e3f087 100644
--- a/payload_generator/extent_ranges.cc
+++ b/payload_generator/extent_ranges.cc
@@ -85,7 +85,8 @@
   ExtentSet::iterator end_del = extent_set_.end();
   uint64_t del_blocks = 0;
   for (ExtentSet::iterator it = extent_set_.begin(), e = extent_set_.end();
-       it != e; ++it) {
+       it != e;
+       ++it) {
     if (ExtentsOverlapOrTouch(*it, extent)) {
       end_del = it;
       ++end_del;
@@ -129,7 +130,8 @@
   uint64_t del_blocks = 0;
   ExtentSet new_extents;
   for (ExtentSet::iterator it = extent_set_.begin(), e = extent_set_.end();
-       it != e; ++it) {
+       it != e;
+       ++it) {
     if (!ExtentsOverlap(*it, extent))
       continue;
 
@@ -142,7 +144,8 @@
 
     ExtentSet subtraction = SubtractOverlappingExtents(*it, extent);
     for (ExtentSet::iterator jt = subtraction.begin(), je = subtraction.end();
-         jt != je; ++jt) {
+         jt != je;
+         ++jt) {
       new_extents.insert(*jt);
       del_blocks -= jt->num_blocks();
     }
@@ -154,41 +157,47 @@
 
 void ExtentRanges::AddRanges(const ExtentRanges& ranges) {
   for (ExtentSet::const_iterator it = ranges.extent_set_.begin(),
-           e = ranges.extent_set_.end(); it != e; ++it) {
+                                 e = ranges.extent_set_.end();
+       it != e;
+       ++it) {
     AddExtent(*it);
   }
 }
 
 void ExtentRanges::SubtractRanges(const ExtentRanges& ranges) {
   for (ExtentSet::const_iterator it = ranges.extent_set_.begin(),
-           e = ranges.extent_set_.end(); it != e; ++it) {
+                                 e = ranges.extent_set_.end();
+       it != e;
+       ++it) {
     SubtractExtent(*it);
   }
 }
 
 void ExtentRanges::AddExtents(const vector<Extent>& extents) {
   for (vector<Extent>::const_iterator it = extents.begin(), e = extents.end();
-       it != e; ++it) {
+       it != e;
+       ++it) {
     AddExtent(*it);
   }
 }
 
 void ExtentRanges::SubtractExtents(const vector<Extent>& extents) {
   for (vector<Extent>::const_iterator it = extents.begin(), e = extents.end();
-       it != e; ++it) {
+       it != e;
+       ++it) {
     SubtractExtent(*it);
   }
 }
 
 void ExtentRanges::AddRepeatedExtents(
-    const ::google::protobuf::RepeatedPtrField<Extent> &exts) {
+    const ::google::protobuf::RepeatedPtrField<Extent>& exts) {
   for (int i = 0, e = exts.size(); i != e; ++i) {
     AddExtent(exts.Get(i));
   }
 }
 
 void ExtentRanges::SubtractRepeatedExtents(
-    const ::google::protobuf::RepeatedPtrField<Extent> &exts) {
+    const ::google::protobuf::RepeatedPtrField<Extent>& exts) {
   for (int i = 0, e = exts.size(); i != e; ++i) {
     SubtractExtent(exts.Get(i));
   }
@@ -214,8 +223,9 @@
 void ExtentRanges::Dump() const {
   LOG(INFO) << "ExtentRanges Dump. blocks: " << blocks_;
   for (ExtentSet::const_iterator it = extent_set_.begin(),
-           e = extent_set_.end();
-       it != e; ++it) {
+                                 e = extent_set_.end();
+       it != e;
+       ++it) {
     LOG(INFO) << "{" << it->start_block() << ", " << it->num_blocks() << "}";
   }
 }
@@ -227,16 +237,24 @@
   return ret;
 }
 
-vector<Extent> ExtentRanges::GetExtentsForBlockCount(
-    uint64_t count) const {
+Extent ExtentForBytes(uint64_t block_size,
+                      uint64_t start_bytes,
+                      uint64_t size_bytes) {
+  uint64_t start_block = start_bytes / block_size;
+  uint64_t end_block = utils::DivRoundUp(start_bytes + size_bytes, block_size);
+  return ExtentForRange(start_block, end_block - start_block);
+}
+
+vector<Extent> ExtentRanges::GetExtentsForBlockCount(uint64_t count) const {
   vector<Extent> out;
   if (count == 0)
     return out;
   uint64_t out_blocks = 0;
   CHECK(count <= blocks_);
   for (ExtentSet::const_iterator it = extent_set_.begin(),
-           e = extent_set_.end();
-       it != e; ++it) {
+                                 e = extent_set_.end();
+       it != e;
+       ++it) {
     const uint64_t blocks_needed = count - out_blocks;
     const Extent& extent = *it;
     out.push_back(extent);
@@ -277,8 +295,8 @@
         continue;
       if (iter->start_block() <= extent.start_block()) {
         // We need to cut blocks from the beginning of the |extent|.
-        uint64_t cut_blocks = iter->start_block() + iter->num_blocks() -
-            extent.start_block();
+        uint64_t cut_blocks =
+            iter->start_block() + iter->num_blocks() - extent.start_block();
         if (cut_blocks >= extent.num_blocks()) {
           extent.set_num_blocks(0);
           break;
@@ -288,9 +306,8 @@
       } else {
         // We need to cut blocks on the middle of the extent, possible up to the
         // end of it.
-        result.push_back(
-            ExtentForRange(extent.start_block(),
-                           iter->start_block() - extent.start_block()));
+        result.push_back(ExtentForRange(
+            extent.start_block(), iter->start_block() - extent.start_block()));
         uint64_t new_start = iter->start_block() + iter->num_blocks();
         uint64_t old_end = extent.start_block() + extent.num_blocks();
         if (new_start >= old_end) {
diff --git a/payload_generator/extent_ranges.h b/payload_generator/extent_ranges.h
index 198c834..62ffff4 100644
--- a/payload_generator/extent_ranges.h
+++ b/payload_generator/extent_ranges.h
@@ -41,6 +41,9 @@
 };
 
 Extent ExtentForRange(uint64_t start_block, uint64_t num_blocks);
+Extent ExtentForBytes(uint64_t block_size,
+                      uint64_t start_bytes,
+                      uint64_t size_bytes);
 
 class ExtentRanges {
  public:
@@ -54,9 +57,9 @@
   void AddExtents(const std::vector<Extent>& extents);
   void SubtractExtents(const std::vector<Extent>& extents);
   void AddRepeatedExtents(
-      const ::google::protobuf::RepeatedPtrField<Extent> &exts);
+      const ::google::protobuf::RepeatedPtrField<Extent>& exts);
   void SubtractRepeatedExtents(
-      const ::google::protobuf::RepeatedPtrField<Extent> &exts);
+      const ::google::protobuf::RepeatedPtrField<Extent>& exts);
   void AddRanges(const ExtentRanges& ranges);
   void SubtractRanges(const ExtentRanges& ranges);
 
diff --git a/payload_generator/extent_ranges_unittest.cc b/payload_generator/extent_ranges_unittest.cc
index 3705bac..2bcffed 100644
--- a/payload_generator/extent_ranges_unittest.cc
+++ b/payload_generator/extent_ranges_unittest.cc
@@ -51,73 +51,57 @@
   }
 }
 
-#define EXPECT_RANGE_EQ(ranges, var)                            \
-  do {                                                          \
-    ExpectRangeEq(ranges, var, arraysize(var), __LINE__);       \
+#define EXPECT_RANGE_EQ(ranges, var)                      \
+  do {                                                    \
+    ExpectRangeEq(ranges, var, arraysize(var), __LINE__); \
   } while (0)
 
-void ExpectRangesOverlapOrTouch(uint64_t a_start, uint64_t a_num,
-                                uint64_t b_start, uint64_t b_num) {
-  EXPECT_TRUE(ExtentRanges::ExtentsOverlapOrTouch(ExtentForRange(a_start,
-                                                                 a_num),
-                                                  ExtentForRange(b_start,
-                                                                 b_num)));
-  EXPECT_TRUE(ExtentRanges::ExtentsOverlapOrTouch(ExtentForRange(b_start,
-                                                                 b_num),
-                                                  ExtentForRange(a_start,
-                                                                 a_num)));
+void ExpectRangesOverlapOrTouch(uint64_t a_start,
+                                uint64_t a_num,
+                                uint64_t b_start,
+                                uint64_t b_num) {
+  EXPECT_TRUE(ExtentRanges::ExtentsOverlapOrTouch(
+      ExtentForRange(a_start, a_num), ExtentForRange(b_start, b_num)));
+  EXPECT_TRUE(ExtentRanges::ExtentsOverlapOrTouch(
+      ExtentForRange(b_start, b_num), ExtentForRange(a_start, a_num)));
 }
 
-void ExpectFalseRangesOverlapOrTouch(uint64_t a_start, uint64_t a_num,
-                                     uint64_t b_start, uint64_t b_num) {
-  EXPECT_FALSE(ExtentRanges::ExtentsOverlapOrTouch(ExtentForRange(a_start,
-                                                                  a_num),
-                                                   ExtentForRange(b_start,
-                                                                  b_num)));
-  EXPECT_FALSE(ExtentRanges::ExtentsOverlapOrTouch(ExtentForRange(b_start,
-                                                                  b_num),
-                                                   ExtentForRange(a_start,
-                                                                  a_num)));
-  EXPECT_FALSE(ExtentRanges::ExtentsOverlap(ExtentForRange(a_start,
-                                                           a_num),
-                                            ExtentForRange(b_start,
-                                                           b_num)));
-  EXPECT_FALSE(ExtentRanges::ExtentsOverlap(ExtentForRange(b_start,
-                                                           b_num),
-                                            ExtentForRange(a_start,
-                                                           a_num)));
+void ExpectFalseRangesOverlapOrTouch(uint64_t a_start,
+                                     uint64_t a_num,
+                                     uint64_t b_start,
+                                     uint64_t b_num) {
+  EXPECT_FALSE(ExtentRanges::ExtentsOverlapOrTouch(
+      ExtentForRange(a_start, a_num), ExtentForRange(b_start, b_num)));
+  EXPECT_FALSE(ExtentRanges::ExtentsOverlapOrTouch(
+      ExtentForRange(b_start, b_num), ExtentForRange(a_start, a_num)));
+  EXPECT_FALSE(ExtentRanges::ExtentsOverlap(ExtentForRange(a_start, a_num),
+                                            ExtentForRange(b_start, b_num)));
+  EXPECT_FALSE(ExtentRanges::ExtentsOverlap(ExtentForRange(b_start, b_num),
+                                            ExtentForRange(a_start, a_num)));
 }
 
-void ExpectRangesOverlap(uint64_t a_start, uint64_t a_num,
-                         uint64_t b_start, uint64_t b_num) {
-  EXPECT_TRUE(ExtentRanges::ExtentsOverlap(ExtentForRange(a_start,
-                                                          a_num),
-                                           ExtentForRange(b_start,
-                                                          b_num)));
-  EXPECT_TRUE(ExtentRanges::ExtentsOverlap(ExtentForRange(b_start,
-                                                          b_num),
-                                           ExtentForRange(a_start,
-                                                          a_num)));
-  EXPECT_TRUE(ExtentRanges::ExtentsOverlapOrTouch(ExtentForRange(a_start,
-                                                                 a_num),
-                                                  ExtentForRange(b_start,
-                                                                 b_num)));
-  EXPECT_TRUE(ExtentRanges::ExtentsOverlapOrTouch(ExtentForRange(b_start,
-                                                                 b_num),
-                                                  ExtentForRange(a_start,
-                                                                 a_num)));
+void ExpectRangesOverlap(uint64_t a_start,
+                         uint64_t a_num,
+                         uint64_t b_start,
+                         uint64_t b_num) {
+  EXPECT_TRUE(ExtentRanges::ExtentsOverlap(ExtentForRange(a_start, a_num),
+                                           ExtentForRange(b_start, b_num)));
+  EXPECT_TRUE(ExtentRanges::ExtentsOverlap(ExtentForRange(b_start, b_num),
+                                           ExtentForRange(a_start, a_num)));
+  EXPECT_TRUE(ExtentRanges::ExtentsOverlapOrTouch(
+      ExtentForRange(a_start, a_num), ExtentForRange(b_start, b_num)));
+  EXPECT_TRUE(ExtentRanges::ExtentsOverlapOrTouch(
+      ExtentForRange(b_start, b_num), ExtentForRange(a_start, a_num)));
 }
 
-void ExpectFalseRangesOverlap(uint64_t a_start, uint64_t a_num,
-                              uint64_t b_start, uint64_t b_num) {
-  EXPECT_FALSE(ExtentRanges::ExtentsOverlap(ExtentForRange(a_start,
-                                                           a_num),
-                                            ExtentForRange(b_start,
-                                                           b_num)));
-  EXPECT_FALSE(ExtentRanges::ExtentsOverlap(ExtentForRange(b_start,
-                                                           b_num),
-                                            ExtentForRange(a_start,
-                                                           a_num)));
+void ExpectFalseRangesOverlap(uint64_t a_start,
+                              uint64_t a_num,
+                              uint64_t b_start,
+                              uint64_t b_num) {
+  EXPECT_FALSE(ExtentRanges::ExtentsOverlap(ExtentForRange(a_start, a_num),
+                                            ExtentForRange(b_start, b_num)));
+  EXPECT_FALSE(ExtentRanges::ExtentsOverlap(ExtentForRange(b_start, b_num),
+                                            ExtentForRange(a_start, a_num)));
 }
 
 }  // namespace
@@ -179,35 +163,31 @@
     ranges.AddExtent(ExtentForRange(i, 50));
   }
   {
-    static const uint64_t expected[] = {
-      0, 2, 3, 1, 100, 50, 200, 50, 300, 50, 400, 50,
-      500, 50, 600, 50, 700, 50, 800, 50, 900, 50
-    };
+    static const uint64_t expected[] = {0,   2,  3,   1,  100, 50, 200, 50,
+                                        300, 50, 400, 50, 500, 50, 600, 50,
+                                        700, 50, 800, 50, 900, 50};
     EXPECT_RANGE_EQ(ranges, expected);
   }
 
   ranges.SubtractExtent(ExtentForRange(210, 410 - 210));
   {
-    static const uint64_t expected[] = {
-      0, 2, 3, 1, 100, 50, 200, 10, 410, 40, 500, 50,
-      600, 50, 700, 50, 800, 50, 900, 50
-    };
+    static const uint64_t expected[] = {0,   2,   3,   1,   100, 50,  200,
+                                        10,  410, 40,  500, 50,  600, 50,
+                                        700, 50,  800, 50,  900, 50};
     EXPECT_RANGE_EQ(ranges, expected);
   }
   ranges.AddExtent(ExtentForRange(100000, 0));
   {
-    static const uint64_t expected[] = {
-      0, 2, 3, 1, 100, 50, 200, 10, 410, 40, 500, 50,
-      600, 50, 700, 50, 800, 50, 900, 50
-    };
+    static const uint64_t expected[] = {0,   2,   3,   1,   100, 50,  200,
+                                        10,  410, 40,  500, 50,  600, 50,
+                                        700, 50,  800, 50,  900, 50};
     EXPECT_RANGE_EQ(ranges, expected);
   }
   ranges.SubtractExtent(ExtentForRange(3, 0));
   {
-    static const uint64_t expected[] = {
-      0, 2, 3, 1, 100, 50, 200, 10, 410, 40, 500, 50,
-      600, 50, 700, 50, 800, 50, 900, 50
-    };
+    static const uint64_t expected[] = {0,   2,   3,   1,   100, 50,  200,
+                                        10,  410, 40,  500, 50,  600, 50,
+                                        700, 50,  800, 50,  900, 50};
     EXPECT_RANGE_EQ(ranges, expected);
   }
 }
@@ -289,25 +269,20 @@
 
 TEST(ExtentRangesTest, FilterExtentRangesEmptyRanges) {
   ExtentRanges ranges;
-  EXPECT_EQ(vector<Extent>(),
-            FilterExtentRanges(vector<Extent>(), ranges));
-  EXPECT_EQ(
-      vector<Extent>{ ExtentForRange(50, 10) },
-      FilterExtentRanges(vector<Extent>{ ExtentForRange(50, 10) }, ranges));
+  EXPECT_EQ(vector<Extent>(), FilterExtentRanges(vector<Extent>(), ranges));
+  EXPECT_EQ(vector<Extent>{ExtentForRange(50, 10)},
+            FilterExtentRanges(vector<Extent>{ExtentForRange(50, 10)}, ranges));
   // Check that the empty Extents are ignored.
-  EXPECT_EQ(
-      (vector<Extent>{ ExtentForRange(10, 10), ExtentForRange(20, 10) }),
-      FilterExtentRanges(vector<Extent>{
-           ExtentForRange(10, 10),
-           ExtentForRange(3, 0),
-           ExtentForRange(20, 10) }, ranges));
+  EXPECT_EQ((vector<Extent>{ExtentForRange(10, 10), ExtentForRange(20, 10)}),
+            FilterExtentRanges(vector<Extent>{ExtentForRange(10, 10),
+                                              ExtentForRange(3, 0),
+                                              ExtentForRange(20, 10)},
+                               ranges));
 }
 
 TEST(ExtentRangesTest, FilterExtentRangesMultipleRanges) {
-  // Two overlaping extents, with three ranges to remove.
-  vector<Extent> extents {
-      ExtentForRange(10, 100),
-      ExtentForRange(30, 100) };
+  // Two overlapping extents, with three ranges to remove.
+  vector<Extent> extents{ExtentForRange(10, 100), ExtentForRange(30, 100)};
   ExtentRanges ranges;
   // This overlaps the beginning of the second extent.
   ranges.AddExtent(ExtentForRange(28, 3));
@@ -315,19 +290,17 @@
   ranges.AddExtent(ExtentForRange(70, 10));
   // This overlaps the end of the second extent.
   ranges.AddExtent(ExtentForRange(108, 6));
-  EXPECT_EQ(
-      (vector<Extent>{
-           // For the first extent:
-           ExtentForRange(10, 18),
-           ExtentForRange(31, 19),
-           ExtentForRange(60, 10),
-           ExtentForRange(80, 28),
-           // For the second extent:
-           ExtentForRange(31, 19),
-           ExtentForRange(60, 10),
-           ExtentForRange(80, 28),
-           ExtentForRange(114, 16)}),
-      FilterExtentRanges(extents, ranges));
+  EXPECT_EQ((vector<Extent>{// For the first extent:
+                            ExtentForRange(10, 18),
+                            ExtentForRange(31, 19),
+                            ExtentForRange(60, 10),
+                            ExtentForRange(80, 28),
+                            // For the second extent:
+                            ExtentForRange(31, 19),
+                            ExtentForRange(60, 10),
+                            ExtentForRange(80, 28),
+                            ExtentForRange(114, 16)}),
+            FilterExtentRanges(extents, ranges));
 }
 
 TEST(ExtentRangesTest, FilterExtentRangesOvelapping) {
@@ -336,10 +309,9 @@
   ranges.AddExtent(ExtentForRange(20, 5));
   // Requested extent overlaps with one of the ranges.
   EXPECT_EQ(vector<Extent>(),
-            FilterExtentRanges(vector<Extent>{
-                                   ExtentForRange(10, 1),
-                                   ExtentForRange(22, 1) },
-                               ranges));
+            FilterExtentRanges(
+                vector<Extent>{ExtentForRange(10, 1), ExtentForRange(22, 1)},
+                ranges));
 }
 
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/extent_utils.cc b/payload_generator/extent_utils.cc
index 47073f9..c0c7643 100644
--- a/payload_generator/extent_utils.cc
+++ b/payload_generator/extent_utils.cc
@@ -39,8 +39,9 @@
   // First try to extend the last extent in |extents|, if any.
   if (!extents->empty()) {
     Extent& extent = extents->back();
-    uint64_t next_block = extent.start_block() == kSparseHole ?
-        kSparseHole : extent.start_block() + extent.num_blocks();
+    uint64_t next_block = extent.start_block() == kSparseHole
+                              ? kSparseHole
+                              : extent.start_block() + extent.num_blocks();
     if (next_block == block) {
       extent.set_num_blocks(extent.num_blocks() + 1);
       return;
@@ -116,7 +117,8 @@
 }
 
 vector<Extent> ExtentsSublist(const vector<Extent>& extents,
-                              uint64_t block_offset, uint64_t block_count) {
+                              uint64_t block_offset,
+                              uint64_t block_count) {
   vector<Extent> result;
   uint64_t scanned_blocks = 0;
   if (block_count == 0)
diff --git a/payload_generator/extent_utils.h b/payload_generator/extent_utils.h
index f5fbb0e..9763b1f 100644
--- a/payload_generator/extent_utils.h
+++ b/payload_generator/extent_utils.h
@@ -34,7 +34,7 @@
 
 // Takes a collection (vector or RepeatedPtrField) of Extent and
 // returns a vector of the blocks referenced, in order.
-template<typename T>
+template <typename T>
 std::vector<uint64_t> ExpandExtents(const T& extents) {
   std::vector<uint64_t> ret;
   for (const auto& extent : extents) {
@@ -42,7 +42,8 @@
       ret.resize(ret.size() + extent.num_blocks(), kSparseHole);
     } else {
       for (uint64_t block = extent.start_block();
-           block < (extent.start_block() + extent.num_blocks()); block++) {
+           block < (extent.start_block() + extent.num_blocks());
+           block++) {
         ret.push_back(block);
       }
     }
@@ -64,8 +65,8 @@
 // Takes a pointer to extents |extents| and extents |extents_to_add|, and
 // merges them by adding |extents_to_add| to |extents| and normalizing.
 void ExtendExtents(
-  google::protobuf::RepeatedPtrField<Extent>* extents,
-  const google::protobuf::RepeatedPtrField<Extent>& extents_to_add);
+    google::protobuf::RepeatedPtrField<Extent>* extents,
+    const google::protobuf::RepeatedPtrField<Extent>& extents_to_add);
 
 // Takes a vector of extents and normalizes those extents. Expects the extents
 // to be sorted by start block. E.g. if |extents| is [(1, 2), (3, 5), (10, 2)]
@@ -77,7 +78,8 @@
 // blocks. The returned list skips the first |block_offset| blocks from the
 // |extents| and cotains |block_count| blocks (or less if |extents| is shorter).
 std::vector<Extent> ExtentsSublist(const std::vector<Extent>& extents,
-                                   uint64_t block_offset, uint64_t block_count);
+                                   uint64_t block_offset,
+                                   uint64_t block_count);
 
 bool operator==(const Extent& a, const Extent& b);
 
diff --git a/payload_generator/extent_utils_unittest.cc b/payload_generator/extent_utils_unittest.cc
index eef4385..5467aa5 100644
--- a/payload_generator/extent_utils_unittest.cc
+++ b/payload_generator/extent_utils_unittest.cc
@@ -86,10 +86,10 @@
   ExtendExtents(first_op.mutable_src_extents(), second_op.src_extents());
   vector<Extent> first_op_vec;
   ExtentsToVector(first_op.src_extents(), &first_op_vec);
-  EXPECT_EQ((vector<Extent>{
-      ExtentForRange(1, 1),
-      ExtentForRange(3, 3),
-      ExtentForRange(8, 2)}), first_op_vec);
+  EXPECT_EQ(
+      (vector<Extent>{
+          ExtentForRange(1, 1), ExtentForRange(3, 3), ExtentForRange(8, 2)}),
+      first_op_vec);
 }
 
 TEST(ExtentUtilsTest, NormalizeExtentsSimpleList) {
@@ -98,21 +98,19 @@
   NormalizeExtents(&extents);
   EXPECT_EQ(0U, extents.size());
 
-  extents = { ExtentForRange(0, 3) };
+  extents = {ExtentForRange(0, 3)};
   NormalizeExtents(&extents);
   EXPECT_EQ(1U, extents.size());
   EXPECT_EQ(ExtentForRange(0, 3), extents[0]);
 }
 
 TEST(ExtentUtilsTest, NormalizeExtentsTest) {
-  vector<Extent> extents = {
-      ExtentForRange(0, 3),
-      ExtentForRange(3, 2),
-      ExtentForRange(5, 1),
-      ExtentForRange(8, 4),
-      ExtentForRange(13, 1),
-      ExtentForRange(14, 2)
-  };
+  vector<Extent> extents = {ExtentForRange(0, 3),
+                            ExtentForRange(3, 2),
+                            ExtentForRange(5, 1),
+                            ExtentForRange(8, 4),
+                            ExtentForRange(13, 1),
+                            ExtentForRange(14, 2)};
   NormalizeExtents(&extents);
   EXPECT_EQ(3U, extents.size());
   EXPECT_EQ(ExtentForRange(0, 6), extents[0]);
@@ -122,42 +120,37 @@
 
 TEST(ExtentUtilsTest, ExtentsSublistTest) {
   vector<Extent> extents = {
-      ExtentForRange(10, 10),
-      ExtentForRange(30, 10),
-      ExtentForRange(50, 10)
-  };
+      ExtentForRange(10, 10), ExtentForRange(30, 10), ExtentForRange(50, 10)};
 
   // Simple empty result cases.
-  EXPECT_EQ(vector<Extent>(),
-            ExtentsSublist(extents, 1000, 20));
-  EXPECT_EQ(vector<Extent>(),
-            ExtentsSublist(extents, 5, 0));
-  EXPECT_EQ(vector<Extent>(),
-            ExtentsSublist(extents, 30, 1));
+  EXPECT_EQ(vector<Extent>(), ExtentsSublist(extents, 1000, 20));
+  EXPECT_EQ(vector<Extent>(), ExtentsSublist(extents, 5, 0));
+  EXPECT_EQ(vector<Extent>(), ExtentsSublist(extents, 30, 1));
 
   // Normal test cases.
-  EXPECT_EQ(vector<Extent>{ ExtentForRange(13, 2) },
+  EXPECT_EQ(vector<Extent>{ExtentForRange(13, 2)},
             ExtentsSublist(extents, 3, 2));
-  EXPECT_EQ(vector<Extent>{ ExtentForRange(15, 5) },
+  EXPECT_EQ(vector<Extent>{ExtentForRange(15, 5)},
             ExtentsSublist(extents, 5, 5));
-  EXPECT_EQ((vector<Extent>{ ExtentForRange(15, 5), ExtentForRange(30, 5) }),
+  EXPECT_EQ((vector<Extent>{ExtentForRange(15, 5), ExtentForRange(30, 5)}),
             ExtentsSublist(extents, 5, 10));
   EXPECT_EQ((vector<Extent>{
-                 ExtentForRange(13, 7),
-                 ExtentForRange(30, 10),
-                 ExtentForRange(50, 3), }),
+                ExtentForRange(13, 7),
+                ExtentForRange(30, 10),
+                ExtentForRange(50, 3),
+            }),
             ExtentsSublist(extents, 3, 20));
 
   // Extact match case.
-  EXPECT_EQ(vector<Extent>{ ExtentForRange(30, 10) },
+  EXPECT_EQ(vector<Extent>{ExtentForRange(30, 10)},
             ExtentsSublist(extents, 10, 10));
-  EXPECT_EQ(vector<Extent>{ ExtentForRange(50, 10) },
+  EXPECT_EQ(vector<Extent>{ExtentForRange(50, 10)},
             ExtentsSublist(extents, 20, 10));
 
   // Cases where the requested num_blocks is too big.
-  EXPECT_EQ(vector<Extent>{ ExtentForRange(53, 7) },
+  EXPECT_EQ(vector<Extent>{ExtentForRange(53, 7)},
             ExtentsSublist(extents, 23, 100));
-  EXPECT_EQ((vector<Extent>{ ExtentForRange(34, 6), ExtentForRange(50, 10) }),
+  EXPECT_EQ((vector<Extent>{ExtentForRange(34, 6), ExtentForRange(50, 10)}),
             ExtentsSublist(extents, 14, 100));
 }
 
diff --git a/payload_generator/fake_filesystem.cc b/payload_generator/fake_filesystem.cc
index 234e2f6..7448286 100644
--- a/payload_generator/fake_filesystem.cc
+++ b/payload_generator/fake_filesystem.cc
@@ -20,10 +20,8 @@
 
 namespace chromeos_update_engine {
 
-FakeFilesystem::FakeFilesystem(uint64_t block_size, uint64_t block_count) :
-    block_size_(block_size),
-    block_count_(block_count) {
-}
+FakeFilesystem::FakeFilesystem(uint64_t block_size, uint64_t block_count)
+    : block_size_(block_size), block_count_(block_count) {}
 
 size_t FakeFilesystem::GetBlockSize() const {
   return block_size_;
diff --git a/payload_generator/fake_filesystem.h b/payload_generator/fake_filesystem.h
index 1b13920..e41a7a2 100644
--- a/payload_generator/fake_filesystem.h
+++ b/payload_generator/fake_filesystem.h
@@ -47,9 +47,7 @@
 
   // Sets the PAYLOAD_MINOR_VERSION key stored by LoadSettings(). Use a negative
   // value to produce an error in LoadSettings().
-  void SetMinorVersion(int minor_version) {
-    minor_version_ = minor_version;
-  }
+  void SetMinorVersion(int minor_version) { minor_version_ = minor_version; }
 
  private:
   FakeFilesystem() = default;
diff --git a/payload_generator/filesystem_interface.h b/payload_generator/filesystem_interface.h
index b1506e4..d04295c 100644
--- a/payload_generator/filesystem_interface.h
+++ b/payload_generator/filesystem_interface.h
@@ -19,9 +19,9 @@
 
 // This class is used to abstract a filesystem and iterate the blocks
 // associated with the files and filesystem structures.
-// For the purposes of the update payload generation, a filesystem is a formated
-// partition composed by fixed-size blocks, since that's the interface used in
-// the update payload.
+// For the purposes of the update payload generation, a filesystem is a
+// formatted partition composed by fixed-size blocks, since that's the interface
+// used in the update payload.
 
 #include <sys/stat.h>
 #include <sys/types.h>
@@ -45,9 +45,7 @@
   // all sort of files, like symlinks, hardlinks, directories and even a file
   // entry representing the metadata, free space, journaling data, etc.
   struct File {
-    File() {
-      memset(&file_stat, 0, sizeof(file_stat));
-    }
+    File() { memset(&file_stat, 0, sizeof(file_stat)); }
 
     // The stat struct for the file. This is invalid (inode 0) for some
     // pseudo-files.
diff --git a/payload_generator/full_update_generator.cc b/payload_generator/full_update_generator.cc
index 482a789..94a43ab 100644
--- a/payload_generator/full_update_generator.cc
+++ b/payload_generator/full_update_generator.cc
@@ -95,14 +95,11 @@
   brillo::Blob buffer_in_(size_);
   brillo::Blob op_blob;
   ssize_t bytes_read = -1;
-  TEST_AND_RETURN_FALSE(utils::PReadAll(fd_,
-                                        buffer_in_.data(),
-                                        buffer_in_.size(),
-                                        offset_,
-                                        &bytes_read));
+  TEST_AND_RETURN_FALSE(utils::PReadAll(
+      fd_, buffer_in_.data(), buffer_in_.size(), offset_, &bytes_read));
   TEST_AND_RETURN_FALSE(bytes_read == static_cast<ssize_t>(size_));
 
-  InstallOperation_Type op_type;
+  InstallOperation::Type op_type;
   TEST_AND_RETURN_FALSE(diff_utils::GenerateBestFullOperation(
       buffer_in_, version_, &op_blob, &op_type));
 
@@ -140,10 +137,10 @@
 
   size_t chunk_blocks = full_chunk_size / config.block_size;
   size_t max_threads = diff_utils::GetMaxThreads();
-  LOG(INFO) << "Compressing partition " << new_part.name
-            << " from " << new_part.path << " splitting in chunks of "
-            << chunk_blocks << " blocks (" << config.block_size
-            << " bytes each) using " << max_threads << " threads";
+  LOG(INFO) << "Compressing partition " << new_part.name << " from "
+            << new_part.path << " splitting in chunks of " << chunk_blocks
+            << " blocks (" << config.block_size << " bytes each) using "
+            << max_threads << " threads";
 
   int in_fd = open(new_part.path.c_str(), O_RDONLY, 0);
   TEST_AND_RETURN_FALSE(in_fd >= 0);
@@ -152,7 +149,7 @@
   // We potentially have all the ChunkProcessors in memory but only
   // |max_threads| will actually hold a block in memory while we process.
   size_t partition_blocks = new_part.size / config.block_size;
-  size_t num_chunks = (partition_blocks + chunk_blocks - 1) / chunk_blocks;
+  size_t num_chunks = utils::DivRoundUp(partition_blocks, chunk_blocks);
   aops->resize(num_chunks);
   vector<ChunkProcessor> chunk_processors;
   chunk_processors.reserve(num_chunks);
@@ -161,14 +158,14 @@
   for (size_t i = 0; i < num_chunks; ++i) {
     size_t start_block = i * chunk_blocks;
     // The last chunk could be smaller.
-    size_t num_blocks = std::min(chunk_blocks,
-                                 partition_blocks - i * chunk_blocks);
+    size_t num_blocks =
+        std::min(chunk_blocks, partition_blocks - i * chunk_blocks);
 
     // Preset all the static information about the operations. The
     // ChunkProcessor will set the rest.
     AnnotatedOperation* aop = aops->data() + i;
-    aop->name = base::StringPrintf("<%s-operation-%" PRIuS ">",
-                                   new_part.name.c_str(), i);
+    aop->name = base::StringPrintf(
+        "<%s-operation-%" PRIuS ">", new_part.name.c_str(), i);
     Extent* dst_extent = aop->op.add_dst_extents();
     dst_extent->set_start_block(start_block);
     dst_extent->set_num_blocks(num_blocks);
diff --git a/payload_generator/full_update_generator.h b/payload_generator/full_update_generator.h
index d722028..e17dd37 100644
--- a/payload_generator/full_update_generator.h
+++ b/payload_generator/full_update_generator.h
@@ -37,12 +37,11 @@
   // must be a valid payload generation configuration for a full payload.
   // Populates |aops|, with data about the update operations, and writes
   // relevant data to |blob_file|.
-  bool GenerateOperations(
-      const PayloadGenerationConfig& config,
-      const PartitionConfig& old_part,
-      const PartitionConfig& new_part,
-      BlobFileWriter* blob_file,
-      std::vector<AnnotatedOperation>* aops) override;
+  bool GenerateOperations(const PayloadGenerationConfig& config,
+                          const PartitionConfig& old_part,
+                          const PartitionConfig& new_part,
+                          BlobFileWriter* blob_file,
+                          std::vector<AnnotatedOperation>* aops) override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(FullUpdateGenerator);
diff --git a/payload_generator/full_update_generator_unittest.cc b/payload_generator/full_update_generator_unittest.cc
index 6da4d10..e398125 100644
--- a/payload_generator/full_update_generator_unittest.cc
+++ b/payload_generator/full_update_generator_unittest.cc
@@ -40,15 +40,11 @@
     config_.hard_chunk_size = 128 * 1024;
     config_.block_size = 4096;
 
-    EXPECT_TRUE(utils::MakeTempFile("FullUpdateTest_partition.XXXXXX",
-                                    &new_part_conf.path,
-                                    nullptr));
-    EXPECT_TRUE(utils::MakeTempFile("FullUpdateTest_blobs.XXXXXX",
-                                    &out_blobs_path_,
-                                    &out_blobs_fd_));
+    new_part_conf.path = part_file_.path();
+    EXPECT_TRUE(utils::MakeTempFile(
+        "FullUpdateTest_blobs.XXXXXX", &out_blobs_path_, &out_blobs_fd_));
 
     blob_file_.reset(new BlobFileWriter(out_blobs_fd_, &out_blobs_length_));
-    part_path_unlinker_.reset(new ScopedPathUnlinker(new_part_conf.path));
     out_blobs_unlinker_.reset(new ScopedPathUnlinker(out_blobs_path_));
   }
 
@@ -62,9 +58,9 @@
   int out_blobs_fd_{-1};
   off_t out_blobs_length_{0};
   ScopedFdCloser out_blobs_fd_closer_{&out_blobs_fd_};
+  test_utils::ScopedTempFile part_file_{"FullUpdateTest_partition.XXXXXX"};
 
   std::unique_ptr<BlobFileWriter> blob_file_;
-  std::unique_ptr<ScopedPathUnlinker> part_path_unlinker_;
   std::unique_ptr<ScopedPathUnlinker> out_blobs_unlinker_;
 
   // FullUpdateGenerator under test.
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index 2729bc4..de0a091 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -14,28 +14,27 @@
 // limitations under the License.
 //
 
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <xz.h>
-
 #include <string>
 #include <vector>
 
+#include <base/files/file_path.h>
+#include <base/files/file_util.h>
 #include <base/logging.h>
 #include <base/strings/string_number_conversions.h>
 #include <base/strings/string_split.h>
 #include <brillo/flag_helper.h>
 #include <brillo/key_value_store.h>
+#include <brillo/message_loops/base_message_loop.h>
+#include <xz.h>
 
 #include "update_engine/common/fake_boot_control.h"
 #include "update_engine/common/fake_hardware.h"
+#include "update_engine/common/file_fetcher.h"
 #include "update_engine/common/prefs.h"
 #include "update_engine/common/terminator.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/delta_performer.h"
+#include "update_engine/payload_consumer/download_action.h"
+#include "update_engine/payload_consumer/filesystem_verifier_action.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/payload_generation_config.h"
@@ -57,17 +56,15 @@
 void ParseSignatureSizes(const string& signature_sizes_flag,
                          vector<int>* signature_sizes) {
   signature_sizes->clear();
-  vector<string> split_strings =
-      base::SplitString(signature_sizes_flag, ":", base::TRIM_WHITESPACE,
-                        base::SPLIT_WANT_ALL);
+  vector<string> split_strings = base::SplitString(
+      signature_sizes_flag, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
   for (const string& str : split_strings) {
     int size = 0;
     bool parsing_successful = base::StringToInt(str, &size);
-    LOG_IF(FATAL, !parsing_successful)
-        << "Invalid signature size: " << str;
+    LOG_IF(FATAL, !parsing_successful) << "Invalid signature size: " << str;
 
-    LOG_IF(FATAL, size != (2048 / 8)) <<
-        "Only signature sizes of 256 bytes are supported.";
+    LOG_IF(FATAL, size != (2048 / 8))
+        << "Only signature sizes of 256 bytes are supported.";
 
     signature_sizes->push_back(size);
   }
@@ -96,16 +93,16 @@
   image_info->set_version(version);
   image_info->set_key(key);
 
-  image_info->set_build_channel(
-      build_channel.empty() ? channel : build_channel);
+  image_info->set_build_channel(build_channel.empty() ? channel
+                                                      : build_channel);
 
-  image_info->set_build_version(
-      build_version.empty() ? version : build_version);
+  image_info->set_build_version(build_version.empty() ? version
+                                                      : build_version);
 
   return true;
 }
 
-void CalculateHashForSigning(const vector<int> &sizes,
+void CalculateHashForSigning(const vector<int>& sizes,
                              const string& out_hash_file,
                              const string& out_metadata_hash_file,
                              const string& in_file) {
@@ -116,12 +113,13 @@
       << "Must pass --out_hash_file to calculate hash for signing.";
 
   brillo::Blob payload_hash, metadata_hash;
-  CHECK(PayloadSigner::HashPayloadForSigning(in_file, sizes, &payload_hash,
-                                             &metadata_hash));
-  CHECK(utils::WriteFile(out_hash_file.c_str(), payload_hash.data(),
-                         payload_hash.size()));
+  CHECK(PayloadSigner::HashPayloadForSigning(
+      in_file, sizes, &payload_hash, &metadata_hash));
+  CHECK(utils::WriteFile(
+      out_hash_file.c_str(), payload_hash.data(), payload_hash.size()));
   if (!out_metadata_hash_file.empty())
-    CHECK(utils::WriteFile(out_metadata_hash_file.c_str(), metadata_hash.data(),
+    CHECK(utils::WriteFile(out_metadata_hash_file.c_str(),
+                           metadata_hash.data(),
                            metadata_hash.size()));
 
   LOG(INFO) << "Done calculating hash for signing.";
@@ -129,9 +127,8 @@
 
 void SignatureFileFlagToBlobs(const string& signature_file_flag,
                               vector<brillo::Blob>* signatures) {
-  vector<string> signature_files =
-      base::SplitString(signature_file_flag, ":", base::TRIM_WHITESPACE,
-                        base::SPLIT_WANT_ALL);
+  vector<string> signature_files = base::SplitString(
+      signature_file_flag, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
   for (const string& signature_file : signature_files) {
     brillo::Blob signature;
     CHECK(utils::ReadFile(signature_file, &signature));
@@ -145,18 +142,19 @@
                  const string& metadata_signature_file,
                  const string& out_metadata_size_file) {
   LOG(INFO) << "Signing payload.";
-  LOG_IF(FATAL, in_file.empty())
-      << "Must pass --in_file to sign payload.";
-  LOG_IF(FATAL, out_file.empty())
-      << "Must pass --out_file to sign payload.";
+  LOG_IF(FATAL, in_file.empty()) << "Must pass --in_file to sign payload.";
+  LOG_IF(FATAL, out_file.empty()) << "Must pass --out_file to sign payload.";
   LOG_IF(FATAL, payload_signature_file.empty())
-      << "Must pass --signature_file to sign payload.";
-  vector<brillo::Blob> signatures, metadata_signatures;
-  SignatureFileFlagToBlobs(payload_signature_file, &signatures);
+      << "Must pass --payload_signature_file to sign payload.";
+  vector<brillo::Blob> payload_signatures, metadata_signatures;
+  SignatureFileFlagToBlobs(payload_signature_file, &payload_signatures);
   SignatureFileFlagToBlobs(metadata_signature_file, &metadata_signatures);
   uint64_t final_metadata_size;
-  CHECK(PayloadSigner::AddSignatureToPayload(in_file, signatures,
-      metadata_signatures, out_file, &final_metadata_size));
+  CHECK(PayloadSigner::AddSignatureToPayload(in_file,
+                                             payload_signatures,
+                                             metadata_signatures,
+                                             out_file,
+                                             &final_metadata_size));
   LOG(INFO) << "Done signing payload. Final metadata size = "
             << final_metadata_size;
   if (!out_metadata_size_file.empty()) {
@@ -182,8 +180,20 @@
   return 0;
 }
 
-// TODO(deymo): This function is likely broken for deltas minor version 2 or
-// newer. Move this function to a new file and make the delta_performer
+class ApplyPayloadProcessorDelegate : public ActionProcessorDelegate {
+ public:
+  void ProcessingDone(const ActionProcessor* processor,
+                      ErrorCode code) override {
+    brillo::MessageLoop::current()->BreakLoop();
+    code_ = code;
+  }
+  void ProcessingStopped(const ActionProcessor* processor) override {
+    brillo::MessageLoop::current()->BreakLoop();
+  }
+  ErrorCode code_;
+};
+
+// TODO(deymo): Move this function to a new file and make the delta_performer
 // integration tests use this instead.
 bool ApplyPayload(const string& payload_file,
                   // Simply reuses the payload config used for payload
@@ -200,6 +210,14 @@
   install_plan.target_slot = 1;
   payload.type =
       config.is_delta ? InstallPayloadType::kDelta : InstallPayloadType::kFull;
+  payload.size = utils::FileSize(payload_file);
+  // TODO(senj): This hash is only correct for unsigned payload, need to support
+  // signed payload using PayloadSigner.
+  HashCalculator::RawHashOfFile(payload_file, payload.size, &payload.hash);
+  install_plan.payloads = {payload};
+  install_plan.download_url =
+      "file://" +
+      base::MakeAbsoluteFilePath(base::FilePath(payload_file)).value();
 
   for (size_t i = 0; i < config.target.partitions.size(); i++) {
     const string& part_name = config.target.partitions[i].name;
@@ -217,31 +235,34 @@
     }
 
     LOG(INFO) << "Install partition:"
-              << " source: " << source_path << " target: " << target_path;
+              << " source: " << source_path << "\ttarget: " << target_path;
   }
 
-  DeltaPerformer performer(&prefs,
-                           &fake_boot_control,
-                           &fake_hardware,
-                           nullptr,
-                           &install_plan,
-                           &payload,
-                           true);  // is_interactive
-
-  brillo::Blob buf(1024 * 1024);
-  int fd = open(payload_file.c_str(), O_RDONLY, 0);
-  CHECK_GE(fd, 0);
-  ScopedFdCloser fd_closer(&fd);
   xz_crc32_init();
-  for (off_t offset = 0;; offset += buf.size()) {
-    ssize_t bytes_read;
-    CHECK(utils::PReadAll(fd, buf.data(), buf.size(), offset, &bytes_read));
-    if (bytes_read == 0)
-      break;
-    TEST_AND_RETURN_FALSE(performer.Write(buf.data(), bytes_read));
-  }
-  CHECK_EQ(performer.Close(), 0);
-  DeltaPerformer::ResetUpdateProgress(&prefs, false);
+  brillo::BaseMessageLoop loop;
+  loop.SetAsCurrent();
+  auto install_plan_action = std::make_unique<InstallPlanAction>(install_plan);
+  auto download_action =
+      std::make_unique<DownloadAction>(&prefs,
+                                       &fake_boot_control,
+                                       &fake_hardware,
+                                       nullptr,
+                                       new FileFetcher(),
+                                       true /* interactive */);
+  auto filesystem_verifier_action =
+      std::make_unique<FilesystemVerifierAction>();
+
+  BondActions(install_plan_action.get(), download_action.get());
+  BondActions(download_action.get(), filesystem_verifier_action.get());
+  ActionProcessor processor;
+  ApplyPayloadProcessorDelegate delegate;
+  processor.set_delegate(&delegate);
+  processor.EnqueueAction(std::move(install_plan_action));
+  processor.EnqueueAction(std::move(download_action));
+  processor.EnqueueAction(std::move(filesystem_verifier_action));
+  processor.StartProcessing();
+  loop.Run();
+  CHECK_EQ(delegate.code_, ErrorCode::kSuccess);
   LOG(INFO) << "Completed applying " << (config.is_delta ? "delta" : "full")
             << " payload.";
   return true;
@@ -265,12 +286,14 @@
   DEFINE_string(new_image, "", "Path to the new rootfs");
   DEFINE_string(old_kernel, "", "Path to the old kernel partition image");
   DEFINE_string(new_kernel, "", "Path to the new kernel partition image");
-  DEFINE_string(old_partitions, "",
+  DEFINE_string(old_partitions,
+                "",
                 "Path to the old partitions. To pass multiple partitions, use "
                 "a single argument with a colon between paths, e.g. "
                 "/path/to/part:/path/to/part2::/path/to/last_part . Path can "
                 "be empty, but it has to match the order of partition_names.");
-  DEFINE_string(new_partitions, "",
+  DEFINE_string(new_partitions,
+                "",
                 "Path to the new partitions. To pass multiple partitions, use "
                 "a single argument with a colon between paths, e.g. "
                 "/path/to/part:/path/to/part2:/path/to/last_part . Path has "
@@ -287,52 +310,57 @@
                 "Path to the .map files associated with the partition files "
                 "in the new partition, similar to the -old_mapfiles flag.");
   DEFINE_string(partition_names,
-                string(kLegacyPartitionNameRoot) + ":" +
-                kLegacyPartitionNameKernel,
+                string(kPartitionNameRoot) + ":" + kPartitionNameKernel,
                 "Names of the partitions. To pass multiple names, use a single "
                 "argument with a colon between names, e.g. "
                 "name:name2:name3:last_name . Name can not be empty, and it "
                 "has to match the order of partitions.");
-  DEFINE_string(in_file, "",
+  DEFINE_string(in_file,
+                "",
                 "Path to input delta payload file used to hash/sign payloads "
                 "and apply delta over old_image (for debugging)");
   DEFINE_string(out_file, "", "Path to output delta payload file");
   DEFINE_string(out_hash_file, "", "Path to output hash file");
-  DEFINE_string(out_metadata_hash_file, "",
-                "Path to output metadata hash file");
-  DEFINE_string(out_metadata_size_file, "",
-                "Path to output metadata size file");
+  DEFINE_string(
+      out_metadata_hash_file, "", "Path to output metadata hash file");
+  DEFINE_string(
+      out_metadata_size_file, "", "Path to output metadata size file");
   DEFINE_string(private_key, "", "Path to private key in .pem format");
   DEFINE_string(public_key, "", "Path to public key in .pem format");
-  DEFINE_int32(public_key_version, -1,
-               "DEPRECATED. Key-check version # of client");
-  DEFINE_string(signature_size, "",
+  DEFINE_int32(
+      public_key_version, -1, "DEPRECATED. Key-check version # of client");
+  DEFINE_string(signature_size,
+                "",
                 "Raw signature size used for hash calculation. "
                 "You may pass in multiple sizes by colon separating them. E.g. "
                 "2048:2048:4096 will assume 3 signatures, the first two with "
                 "2048 size and the last 4096.");
-  DEFINE_string(signature_file, "",
+  DEFINE_string(payload_signature_file,
+                "",
                 "Raw signature file to sign payload with. To pass multiple "
                 "signatures, use a single argument with a colon between paths, "
                 "e.g. /path/to/sig:/path/to/next:/path/to/last_sig . Each "
                 "signature will be assigned a client version, starting from "
                 "kSignatureOriginalVersion.");
-  DEFINE_string(metadata_signature_file, "",
+  DEFINE_string(metadata_signature_file,
+                "",
                 "Raw signature file with the signature of the metadata hash. "
                 "To pass multiple signatures, use a single argument with a "
                 "colon between paths, "
                 "e.g. /path/to/sig:/path/to/next:/path/to/last_sig .");
-  DEFINE_int32(chunk_size, 200 * 1024 * 1024,
-               "Payload chunk size (-1 for whole files)");
+  DEFINE_int32(
+      chunk_size, 200 * 1024 * 1024, "Payload chunk size (-1 for whole files)");
   DEFINE_uint64(rootfs_partition_size,
-               chromeos_update_engine::kRootFSPartitionSize,
-               "RootFS partition size for the image once installed");
-  DEFINE_uint64(major_version, 1,
-               "The major version of the payload being generated.");
-  DEFINE_int32(minor_version, -1,
+                chromeos_update_engine::kRootFSPartitionSize,
+                "RootFS partition size for the image once installed");
+  DEFINE_uint64(
+      major_version, 2, "The major version of the payload being generated.");
+  DEFINE_int32(minor_version,
+               -1,
                "The minor version of the payload being generated "
                "(-1 means autodetect).");
-  DEFINE_string(properties_file, "",
+  DEFINE_string(properties_file,
+                "",
                 "If passed, dumps the payload properties of the payload passed "
                 "in --in_file and exits.");
   DEFINE_int64(max_timestamp,
@@ -340,46 +368,63 @@
                "The maximum timestamp of the OS allowed to apply this "
                "payload.");
 
-  DEFINE_string(old_channel, "",
+  DEFINE_string(old_channel,
+                "",
                 "The channel for the old image. 'dev-channel', 'npo-channel', "
                 "etc. Ignored, except during delta generation.");
-  DEFINE_string(old_board, "",
+  DEFINE_string(old_board,
+                "",
                 "The board for the old image. 'x86-mario', 'lumpy', "
                 "etc. Ignored, except during delta generation.");
-  DEFINE_string(old_version, "",
-                "The build version of the old image. 1.2.3, etc.");
-  DEFINE_string(old_key, "",
+  DEFINE_string(
+      old_version, "", "The build version of the old image. 1.2.3, etc.");
+  DEFINE_string(old_key,
+                "",
                 "The key used to sign the old image. 'premp', 'mp', 'mp-v3',"
                 " etc");
-  DEFINE_string(old_build_channel, "",
+  DEFINE_string(old_build_channel,
+                "",
                 "The channel for the build of the old image. 'dev-channel', "
                 "etc, but will never contain special channels such as "
                 "'npo-channel'. Ignored, except during delta generation.");
-  DEFINE_string(old_build_version, "",
+  DEFINE_string(old_build_version,
+                "",
                 "The version of the build containing the old image.");
 
-  DEFINE_string(new_channel, "",
+  DEFINE_string(new_channel,
+                "",
                 "The channel for the new image. 'dev-channel', 'npo-channel', "
                 "etc. Ignored, except during delta generation.");
-  DEFINE_string(new_board, "",
+  DEFINE_string(new_board,
+                "",
                 "The board for the new image. 'x86-mario', 'lumpy', "
                 "etc. Ignored, except during delta generation.");
-  DEFINE_string(new_version, "",
-                "The build version of the new image. 1.2.3, etc.");
-  DEFINE_string(new_key, "",
+  DEFINE_string(
+      new_version, "", "The build version of the new image. 1.2.3, etc.");
+  DEFINE_string(new_key,
+                "",
                 "The key used to sign the new image. 'premp', 'mp', 'mp-v3',"
                 " etc");
-  DEFINE_string(new_build_channel, "",
+  DEFINE_string(new_build_channel,
+                "",
                 "The channel for the build of the new image. 'dev-channel', "
                 "etc, but will never contain special channels such as "
                 "'npo-channel'. Ignored, except during delta generation.");
-  DEFINE_string(new_build_version, "",
+  DEFINE_string(new_build_version,
+                "",
                 "The version of the build containing the new image.");
-  DEFINE_string(new_postinstall_config_file, "",
+  DEFINE_string(new_postinstall_config_file,
+                "",
                 "A config file specifying postinstall related metadata. "
                 "Only allowed in major version 2 or newer.");
+  DEFINE_string(dynamic_partition_info_file,
+                "",
+                "An info file specifying dynamic partition metadata. "
+                "Only allowed in major version 2 or newer.");
 
-  brillo::FlagHelper::Init(argc, argv,
+  brillo::FlagHelper::Init(
+      argc,
+      argv,
       "Generates a payload to provide to ChromeOS' update_engine.\n\n"
       "This tool can create full payloads and also delta payloads if the src\n"
       "image is provided. It also provides debugging options to apply, sign\n"
@@ -387,10 +432,10 @@
   Terminator::Init();
 
   logging::LoggingSettings log_settings;
-  log_settings.log_file     = "delta_generator.log";
+  log_settings.log_file = "delta_generator.log";
   log_settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
-  log_settings.lock_log     = logging::LOCK_LOG_FILE;
-  log_settings.delete_old   = logging::APPEND_TO_OLD_LOG_FILE;
+  log_settings.lock_log = logging::LOCK_LOG_FILE;
+  log_settings.delete_old = logging::APPEND_TO_OLD_LOG_FILE;
 
   logging::InitLogging(log_settings);
 
@@ -402,13 +447,18 @@
 
   if (!FLAGS_out_hash_file.empty() || !FLAGS_out_metadata_hash_file.empty()) {
     CHECK(FLAGS_out_metadata_size_file.empty());
-    CalculateHashForSigning(signature_sizes, FLAGS_out_hash_file,
-                            FLAGS_out_metadata_hash_file, FLAGS_in_file);
+    CalculateHashForSigning(signature_sizes,
+                            FLAGS_out_hash_file,
+                            FLAGS_out_metadata_hash_file,
+                            FLAGS_in_file);
     return 0;
   }
-  if (!FLAGS_signature_file.empty()) {
-    SignPayload(FLAGS_in_file, FLAGS_out_file, FLAGS_signature_file,
-                FLAGS_metadata_signature_file, FLAGS_out_metadata_size_file);
+  if (!FLAGS_payload_signature_file.empty()) {
+    SignPayload(FLAGS_in_file,
+                FLAGS_out_file,
+                FLAGS_payload_signature_file,
+                FLAGS_metadata_signature_file,
+                FLAGS_out_metadata_size_file);
     return 0;
   }
   if (!FLAGS_public_key.empty()) {
@@ -435,17 +485,17 @@
         FLAGS_new_mapfiles, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
   }
 
-  partition_names =
-      base::SplitString(FLAGS_partition_names, ":", base::TRIM_WHITESPACE,
-                        base::SPLIT_WANT_ALL);
+  partition_names = base::SplitString(
+      FLAGS_partition_names, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
   CHECK(!partition_names.empty());
   if (FLAGS_major_version == kChromeOSMajorPayloadVersion ||
       FLAGS_new_partitions.empty()) {
     LOG_IF(FATAL, partition_names.size() != 2)
         << "To support more than 2 partitions, please use the "
         << "--new_partitions flag and major version 2.";
-    LOG_IF(FATAL, partition_names[0] != kLegacyPartitionNameRoot ||
-                  partition_names[1] != kLegacyPartitionNameKernel)
+    LOG_IF(FATAL,
+           partition_names[0] != kPartitionNameRoot ||
+               partition_names[1] != kPartitionNameKernel)
         << "To support non-default partition name, please use the "
         << "--new_partitions flag and major version 2.";
   }
@@ -454,9 +504,8 @@
     LOG_IF(FATAL, !FLAGS_new_image.empty() || !FLAGS_new_kernel.empty())
         << "--new_image and --new_kernel are deprecated, please use "
         << "--new_partitions for all partitions.";
-    new_partitions =
-        base::SplitString(FLAGS_new_partitions, ":", base::TRIM_WHITESPACE,
-                          base::SPLIT_WANT_ALL);
+    new_partitions = base::SplitString(
+        FLAGS_new_partitions, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
     CHECK(partition_names.size() == new_partitions.size());
 
     payload_config.is_delta = !FLAGS_old_partitions.empty();
@@ -468,8 +517,8 @@
     LOG(WARNING) << "--new_partitions is empty, using deprecated --new_image "
                  << "and --new_kernel flags.";
 
-    payload_config.is_delta = !FLAGS_old_image.empty() ||
-                              !FLAGS_old_kernel.empty();
+    payload_config.is_delta =
+        !FLAGS_old_image.empty() || !FLAGS_old_kernel.empty();
     LOG_IF(FATAL, !FLAGS_old_partitions.empty())
         << "Please use --new_partitions if you are using --old_partitions.";
   }
@@ -484,9 +533,10 @@
 
   if (payload_config.is_delta) {
     if (!FLAGS_old_partitions.empty()) {
-      old_partitions =
-          base::SplitString(FLAGS_old_partitions, ":", base::TRIM_WHITESPACE,
-                            base::SPLIT_WANT_ALL);
+      old_partitions = base::SplitString(FLAGS_old_partitions,
+                                         ":",
+                                         base::TRIM_WHITESPACE,
+                                         base::SPLIT_WANT_ALL);
       CHECK(old_partitions.size() == new_partitions.size());
     } else {
       old_partitions = {FLAGS_old_image, FLAGS_old_kernel};
@@ -524,6 +574,16 @@
   }
   CHECK(payload_config.target.LoadImageSize());
 
+  if (!FLAGS_dynamic_partition_info_file.empty()) {
+    LOG_IF(FATAL, FLAGS_major_version == kChromeOSMajorPayloadVersion)
+        << "Dynamic partition info is only allowed in major version 2 or "
+           "newer.";
+    brillo::KeyValueStore store;
+    CHECK(store.Load(base::FilePath(FLAGS_dynamic_partition_info_file)));
+    CHECK(payload_config.target.LoadDynamicPartitionMetadata(store));
+    CHECK(payload_config.target.ValidateDynamicPartitionMetadata());
+  }
+
   CHECK(!FLAGS_out_file.empty());
 
   // Ignore failures. These are optional arguments.
@@ -582,6 +642,9 @@
 
   payload_config.max_timestamp = FLAGS_max_timestamp;
 
+  if (payload_config.version.minor >= kVerityMinorPayloadVersion)
+    CHECK(payload_config.target.LoadVerityConfig());
+
   LOG(INFO) << "Generating " << (payload_config.is_delta ? "delta" : "full")
             << " update";
 
@@ -592,10 +655,8 @@
   }
 
   uint64_t metadata_size;
-  if (!GenerateUpdatePayloadFile(payload_config,
-                                 FLAGS_out_file,
-                                 FLAGS_private_key,
-                                 &metadata_size)) {
+  if (!GenerateUpdatePayloadFile(
+          payload_config, FLAGS_out_file, FLAGS_private_key, &metadata_size)) {
     return 1;
   }
   if (!FLAGS_out_metadata_size_file.empty()) {
diff --git a/payload_generator/graph_types.cc b/payload_generator/graph_types.cc
index 7da76f7..c03766d 100644
--- a/payload_generator/graph_types.cc
+++ b/payload_generator/graph_types.cc
@@ -20,4 +20,4 @@
 
 const Vertex::Index Vertex::kInvalidIndex = static_cast<Vertex::Index>(-1);
 
-}  // chromeos_update_engine
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/graph_types.h b/payload_generator/graph_types.h
index fee8575..f96b0f3 100644
--- a/payload_generator/graph_types.h
+++ b/payload_generator/graph_types.h
@@ -51,10 +51,7 @@
 };
 
 struct Vertex {
-  Vertex() :
-      valid(true),
-      index(-1),
-      lowlink(-1) {}
+  Vertex() : valid(true), index(-1), lowlink(-1) {}
   bool valid;
 
   typedef std::map<std::vector<Vertex>::size_type, EdgeProperties> EdgeMap;
diff --git a/payload_generator/graph_utils.cc b/payload_generator/graph_utils.cc
index 4829b21..7f5cf8f 100644
--- a/payload_generator/graph_utils.cc
+++ b/payload_generator/graph_utils.cc
@@ -39,17 +39,15 @@
   uint64_t weight = 0;
   const vector<Extent>& extents =
       graph[edge.first].out_edges.find(edge.second)->second.extents;
-  for (vector<Extent>::const_iterator it = extents.begin();
-       it != extents.end(); ++it) {
+  for (vector<Extent>::const_iterator it = extents.begin(); it != extents.end();
+       ++it) {
     if (it->start_block() != kSparseHole)
       weight += it->num_blocks();
   }
   return weight;
 }
 
-void AddReadBeforeDep(Vertex* src,
-                      Vertex::Index dst,
-                      uint64_t block) {
+void AddReadBeforeDep(Vertex* src, Vertex::Index dst, uint64_t block) {
   Vertex::EdgeMap::iterator edge_it = src->out_edges.find(dst);
   if (edge_it == src->out_edges.end()) {
     // Must create new edge
@@ -66,11 +64,13 @@
                              const vector<Extent>& extents) {
   // TODO(adlr): Be more efficient than adding each block individually.
   for (vector<Extent>::const_iterator it = extents.begin(), e = extents.end();
-       it != e; ++it) {
+       it != e;
+       ++it) {
     const Extent& extent = *it;
     for (uint64_t block = extent.start_block(),
-             block_end = extent.start_block() + extent.num_blocks();
-         block != block_end; ++block) {
+                  block_end = extent.start_block() + extent.num_blocks();
+         block != block_end;
+         ++block) {
       AddReadBeforeDep(src, dst, block);
     }
   }
@@ -79,7 +79,7 @@
 void DropWriteBeforeDeps(Vertex::EdgeMap* edge_map) {
   // Specially crafted for-loop for the map-iterate-delete dance.
   for (Vertex::EdgeMap::iterator it = edge_map->begin();
-       it != edge_map->end(); ) {
+       it != edge_map->end();) {
     if (!it->second.write_extents.empty())
       it->second.write_extents.clear();
     if (it->second.extents.empty()) {
@@ -101,7 +101,7 @@
 }
 
 namespace {
-template<typename T>
+template <typename T>
 void DumpExtents(const T& field, int prepend_space_count) {
   string header(prepend_space_count, ' ');
   for (const auto& extent : field) {
@@ -112,7 +112,9 @@
 
 void DumpOutEdges(const Vertex::EdgeMap& out_edges) {
   for (Vertex::EdgeMap::const_iterator it = out_edges.begin(),
-           e = out_edges.end(); it != e; ++it) {
+                                       e = out_edges.end();
+       it != e;
+       ++it) {
     LOG(INFO) << "    " << it->first << " read-before:";
     DumpExtents(it->second.extents, 6);
     LOG(INFO) << "      write-before:";
@@ -124,10 +126,9 @@
 void DumpGraph(const Graph& graph) {
   LOG(INFO) << "Graph length: " << graph.size();
   for (Graph::size_type i = 0, e = graph.size(); i != e; ++i) {
-    LOG(INFO) << i
-              << (graph[i].valid ? "" : "-INV")
-              << ": " << graph[i].aop.name
-              << ": " << InstallOperationTypeName(graph[i].aop.op.type());
+    LOG(INFO) << i << (graph[i].valid ? "" : "-INV") << ": "
+              << graph[i].aop.name << ": "
+              << InstallOperationTypeName(graph[i].aop.op.type());
     LOG(INFO) << "  src_extents:";
     DumpExtents(graph[i].aop.op.src_extents(), 4);
     LOG(INFO) << "  dst_extents:";
diff --git a/payload_generator/graph_utils.h b/payload_generator/graph_utils.h
index b32e666..7024215 100644
--- a/payload_generator/graph_utils.h
+++ b/payload_generator/graph_utils.h
@@ -35,9 +35,7 @@
 
 // These add a read-before dependency from graph[src] -> graph[dst]. If the dep
 // already exists, the block/s is/are added to the existing edge.
-void AddReadBeforeDep(Vertex* src,
-                      Vertex::Index dst,
-                      uint64_t block);
+void AddReadBeforeDep(Vertex* src, Vertex::Index dst, uint64_t block);
 void AddReadBeforeDepExtents(Vertex* src,
                              Vertex::Index dst,
                              const std::vector<Extent>& extents);
diff --git a/payload_generator/graph_utils_unittest.cc b/payload_generator/graph_utils_unittest.cc
index dddf815..07e7664 100644
--- a/payload_generator/graph_utils_unittest.cc
+++ b/payload_generator/graph_utils_unittest.cc
@@ -56,7 +56,6 @@
   EXPECT_EQ(4U, graph_utils::EdgeWeight(graph, make_pair(0, 1)));
 }
 
-
 TEST(GraphUtilsTest, DepsTest) {
   Graph graph(3);
 
@@ -74,8 +73,8 @@
     EXPECT_EQ(3U, extent.start_block());
     EXPECT_EQ(2U, extent.num_blocks());
   }
-  graph_utils::AddReadBeforeDepExtents(&graph[2], 1,
-    vector<Extent>(1, ExtentForRange(5, 2)));
+  graph_utils::AddReadBeforeDepExtents(
+      &graph[2], 1, vector<Extent>(1, ExtentForRange(5, 2)));
   EXPECT_EQ(1U, graph[2].out_edges.size());
   {
     Extent& extent = graph[2].out_edges[1].extents[0];
diff --git a/payload_generator/inplace_generator.cc b/payload_generator/inplace_generator.cc
index b858c2b..d553cc4 100644
--- a/payload_generator/inplace_generator.cc
+++ b/payload_generator/inplace_generator.cc
@@ -23,6 +23,8 @@
 #include <utility>
 #include <vector>
 
+#include <base/stl_util.h>
+
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/cycle_breaker.h"
@@ -84,7 +86,7 @@
 class IndexedInstallOperationsDstComparator {
  public:
   explicit IndexedInstallOperationsDstComparator(Graph* graph)
-    : graph_(graph) {}
+      : graph_(graph) {}
 
   // Compares the operations in the vertex a and b of graph_.
   bool operator()(size_t a, size_t b) const {
@@ -104,27 +106,25 @@
   }
 }
 
-void InplaceGenerator::SubstituteBlocks(
-    Vertex* vertex,
-    const vector<Extent>& remove_extents,
-    const vector<Extent>& replace_extents) {
+void InplaceGenerator::SubstituteBlocks(Vertex* vertex,
+                                        const vector<Extent>& remove_extents,
+                                        const vector<Extent>& replace_extents) {
   // First, expand out the blocks that op reads from
-  vector<uint64_t> read_blocks =
-      ExpandExtents(vertex->aop.op.src_extents());
+  vector<uint64_t> read_blocks = ExpandExtents(vertex->aop.op.src_extents());
   {
     // Expand remove_extents and replace_extents
     vector<uint64_t> remove_extents_expanded = ExpandExtents(remove_extents);
     vector<uint64_t> replace_extents_expanded = ExpandExtents(replace_extents);
     CHECK_EQ(remove_extents_expanded.size(), replace_extents_expanded.size());
     map<uint64_t, uint64_t> conversion;
-    for (vector<uint64_t>::size_type i = 0;
-         i < replace_extents_expanded.size(); i++) {
+    for (vector<uint64_t>::size_type i = 0; i < replace_extents_expanded.size();
+         i++) {
       conversion[remove_extents_expanded[i]] = replace_extents_expanded[i];
     }
     ApplyMap(&read_blocks, conversion);
     for (auto& edge_prop_pair : vertex->out_edges) {
-      vector<uint64_t> write_before_deps_expanded = ExpandExtents(
-          edge_prop_pair.second.write_extents);
+      vector<uint64_t> write_before_deps_expanded =
+          ExpandExtents(edge_prop_pair.second.write_extents);
       ApplyMap(&write_before_deps_expanded, conversion);
       edge_prop_pair.second.write_extents =
           CompressExtents(write_before_deps_expanded);
@@ -168,8 +168,8 @@
         << "Can't cut edge that has write-before relationship.";
 
     // make node depend on the copy operation
-    (*graph)[edge.first].out_edges.insert(make_pair(graph->size() - 1,
-                                                    cut_edge_properties));
+    (*graph)[edge.first].out_edges.insert(
+        make_pair(graph->size() - 1, cut_edge_properties));
 
     // Set src/dst extents and other proto variables for copy operation
     graph->back().aop.op.set_type(InstallOperation::MOVE);
@@ -177,15 +177,14 @@
                  graph->back().aop.op.mutable_src_extents());
     StoreExtents(cuts.back().tmp_extents,
                  graph->back().aop.op.mutable_dst_extents());
-    graph->back().aop.op.set_src_length(
-        graph_utils::EdgeWeight(*graph, edge) * kBlockSize);
+    graph->back().aop.op.set_src_length(graph_utils::EdgeWeight(*graph, edge) *
+                                        kBlockSize);
     graph->back().aop.op.set_dst_length(graph->back().aop.op.src_length());
 
     // make the dest node read from the scratch space
-    SubstituteBlocks(
-        &((*graph)[edge.second]),
-        (*graph)[edge.first].out_edges[edge.second].extents,
-        cuts.back().tmp_extents);
+    SubstituteBlocks(&((*graph)[edge.second]),
+                     (*graph)[edge.first].out_edges[edge.second].extents,
+                     cuts.back().tmp_extents);
 
     // delete the old edge
     CHECK_EQ(static_cast<Graph::size_type>(1),
@@ -204,11 +203,8 @@
 // Creates all the edges for the graph. Writers of a block point to
 // readers of the same block. This is because for an edge A->B, B
 // must complete before A executes.
-void InplaceGenerator::CreateEdges(
-    Graph* graph,
-    const vector<Block>& blocks) {
-  for (vector<Block>::size_type i = 0;
-       i < blocks.size(); i++) {
+void InplaceGenerator::CreateEdges(Graph* graph, const vector<Block>& blocks) {
+  for (vector<Block>::size_type i = 0; i < blocks.size(); i++) {
     // Blocks with both a reader and writer get an edge
     if (blocks[i].reader == Vertex::kInvalidIndex ||
         blocks[i].writer == Vertex::kInvalidIndex)
@@ -240,6 +236,7 @@
   bool operator()(const CutEdgeVertexes& a, const CutEdgeVertexes& b) {
     return table_[a.old_dst] < table_[b.old_dst];
   }
+
  private:
   const vector<vector<Vertex::Index>::size_type>& table_;
 };
@@ -250,8 +247,8 @@
     const vector<Vertex::Index>& op_indexes,
     vector<vector<Vertex::Index>::size_type>* reverse_op_indexes) {
   vector<vector<Vertex::Index>::size_type> table(op_indexes.size());
-  for (vector<Vertex::Index>::size_type i = 0, e = op_indexes.size();
-       i != e; ++i) {
+  for (vector<Vertex::Index>::size_type i = 0, e = op_indexes.size(); i != e;
+       ++i) {
     Vertex::Index node = op_indexes[i];
     if (table.size() < (node + 1)) {
       table.resize(node + 1);
@@ -262,8 +259,7 @@
 }
 
 void InplaceGenerator::SortCutsByTopoOrder(
-    const vector<Vertex::Index>& op_indexes,
-    vector<CutEdgeVertexes>* cuts) {
+    const vector<Vertex::Index>& op_indexes, vector<CutEdgeVertexes>* cuts) {
   // first, make a reverse lookup table.
   vector<vector<Vertex::Index>::size_type> table;
   GenerateReverseTopoOrderMap(op_indexes, &table);
@@ -272,13 +268,12 @@
 }
 
 void InplaceGenerator::MoveAndSortFullOpsToBack(
-    Graph* graph,
-    vector<Vertex::Index>* op_indexes) {
+    Graph* graph, vector<Vertex::Index>* op_indexes) {
   vector<Vertex::Index> ret;
   vector<Vertex::Index> full_ops;
   ret.reserve(op_indexes->size());
   for (auto op_index : *op_indexes) {
-    InstallOperation_Type type = (*graph)[op_index].aop.op.type();
+    InstallOperation::Type type = (*graph)[op_index].aop.op.type();
     if (type == InstallOperation::REPLACE ||
         type == InstallOperation::REPLACE_BZ) {
       full_ops.push_back(op_index);
@@ -289,7 +284,8 @@
   LOG(INFO) << "Stats: " << full_ops.size() << " full ops out of "
             << (full_ops.size() + ret.size()) << " total ops.";
   // Sort full ops according to their dst_extents.
-  sort(full_ops.begin(), full_ops.end(),
+  sort(full_ops.begin(),
+       full_ops.end(),
        IndexedInstallOperationsDstComparator(graph));
   ret.insert(ret.end(), full_ops.begin(), full_ops.end());
   op_indexes->swap(ret);
@@ -297,7 +293,7 @@
 
 namespace {
 
-template<typename T>
+template <typename T>
 bool TempBlocksExistInExtents(const T& extents) {
   for (const auto& extent : extents) {
     uint64_t start = extent.start_block();
@@ -329,11 +325,8 @@
   CHECK(!cuts.empty());
   set<Vertex::Index> deleted_nodes;
   for (const CutEdgeVertexes& cut : cuts) {
-    TEST_AND_RETURN_FALSE(InplaceGenerator::ConvertCutToFullOp(
-        graph,
-        cut,
-        new_part,
-        blob_file));
+    TEST_AND_RETURN_FALSE(
+        InplaceGenerator::ConvertCutToFullOp(graph, cut, new_part, blob_file));
     deleted_nodes.insert(cut.new_vertex);
   }
   deleted_nodes.insert(cuts[0].old_dst);
@@ -341,7 +334,7 @@
   vector<Vertex::Index> new_op_indexes;
   new_op_indexes.reserve(op_indexes->size());
   for (Vertex::Index vertex_index : *op_indexes) {
-    if (utils::SetContainsKey(deleted_nodes, vertex_index))
+    if (base::ContainsKey(deleted_nodes, vertex_index))
       continue;
     new_op_indexes.push_back(vertex_index);
   }
@@ -385,22 +378,25 @@
   SupplierVector block_suppliers;
   uint64_t scratch_blocks_found = 0;
   for (vector<Vertex::Index>::size_type i = (*reverse_op_indexes)[old_dst] + 1,
-           e = op_indexes->size(); i < e; ++i) {
+                                        e = op_indexes->size();
+       i < e;
+       ++i) {
     Vertex::Index test_node = (*op_indexes)[i];
     if (!(*graph)[test_node].valid)
       continue;
     // See if this node has sufficient blocks
     ExtentRanges ranges;
     ranges.AddRepeatedExtents((*graph)[test_node].aop.op.dst_extents());
-    ranges.SubtractExtent(ExtentForRange(
-        kTempBlockStart, kSparseHole - kTempBlockStart));
+    ranges.SubtractExtent(
+        ExtentForRange(kTempBlockStart, kSparseHole - kTempBlockStart));
     ranges.SubtractRepeatedExtents((*graph)[test_node].aop.op.src_extents());
     // For now, for simplicity, subtract out all blocks in read-before
     // dependencies.
-    for (Vertex::EdgeMap::const_iterator edge_i =
-             (*graph)[test_node].out_edges.begin(),
+    for (Vertex::EdgeMap::const_iterator
+             edge_i = (*graph)[test_node].out_edges.begin(),
              edge_e = (*graph)[test_node].out_edges.end();
-         edge_i != edge_e; ++edge_i) {
+         edge_i != edge_e;
+         ++edge_i) {
       ranges.SubtractExtents(edge_i->second.extents);
     }
 
@@ -416,8 +412,8 @@
 
     if (ranges.blocks() + scratch_blocks_found > blocks_needed) {
       // trim down ranges
-      vector<Extent> new_ranges = ranges.GetExtentsForBlockCount(
-          blocks_needed - scratch_blocks_found);
+      vector<Extent> new_ranges =
+          ranges.GetExtentsForBlockCount(blocks_needed - scratch_blocks_found);
       ranges = ExtentRanges();
       ranges.AddExtents(new_ranges);
     }
@@ -429,12 +425,8 @@
   }
   if (scratch_ranges.blocks() < blocks_needed) {
     LOG(INFO) << "Unable to find sufficient scratch";
-    TEST_AND_RETURN_FALSE(ConvertCutsToFull(graph,
-                                            new_part,
-                                            blob_file,
-                                            op_indexes,
-                                            reverse_op_indexes,
-                                            cuts));
+    TEST_AND_RETURN_FALSE(ConvertCutsToFull(
+        graph, new_part, blob_file, op_indexes, reverse_op_indexes, cuts));
     return true;
   }
   // Use the scratch we found
@@ -457,9 +449,8 @@
     scratch_ranges.SubtractExtents(real_extents);
 
     // Fix the old dest node w/ the real blocks
-    InplaceGenerator::SubstituteBlocks(&(*graph)[old_dst],
-                                         cut.tmp_extents,
-                                         real_extents);
+    InplaceGenerator::SubstituteBlocks(
+        &(*graph)[old_dst], cut.tmp_extents, real_extents);
 
     // Fix the new node w/ the real blocks. Since the new node is just a
     // copy operation, we can replace all the dest extents w/ the real
@@ -485,12 +476,12 @@
   // group of cuts w/ the same old_dst:
   vector<CutEdgeVertexes> cuts_group;
 
-  for (vector<CutEdgeVertexes>::size_type i = cuts.size() - 1, e = 0;
-       true ; --i) {
+  for (vector<CutEdgeVertexes>::size_type i = cuts.size() - 1, e = 0; true;
+       --i) {
     LOG(INFO) << "Fixing temp blocks in cut " << i
-              << ": old dst: " << cuts[i].old_dst << " new vertex: "
-              << cuts[i].new_vertex << " path: "
-              << (*graph)[cuts[i].old_dst].aop.name;
+              << ": old dst: " << cuts[i].old_dst
+              << " new vertex: " << cuts[i].new_vertex
+              << " path: " << (*graph)[cuts[i].old_dst].aop.name;
 
     if (cuts_group.empty() || (cuts_group[0].old_dst == cuts[i].old_dst)) {
       cuts_group.push_back(cuts[i]);
@@ -512,12 +503,8 @@
     }
   }
   CHECK(!cuts_group.empty());
-  TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
-                                                    new_part,
-                                                    blob_file,
-                                                    op_indexes,
-                                                    reverse_op_indexes,
-                                                    cuts_group));
+  TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(
+      graph, new_part, blob_file, op_indexes, reverse_op_indexes, cuts_group));
   return true;
 }
 
@@ -564,8 +551,7 @@
     // |new_extents| list of blocks and update the graph.
     vector<AnnotatedOperation> new_aop;
     vector<Extent> new_extents;
-    ExtentsToVector((*graph)[cut.old_dst].aop.op.dst_extents(),
-                    &new_extents);
+    ExtentsToVector((*graph)[cut.old_dst].aop.op.dst_extents(), &new_extents);
     TEST_AND_RETURN_FALSE(diff_utils::DeltaReadFile(
         &new_aop,
         "",  // old_part
@@ -580,7 +566,7 @@
         blob_file));
     TEST_AND_RETURN_FALSE(new_aop.size() == 1);
     TEST_AND_RETURN_FALSE(AddInstallOpToGraph(
-      graph, cut.old_dst, nullptr, new_aop.front().op, new_aop.front().name));
+        graph, cut.old_dst, nullptr, new_aop.front().op, new_aop.front().name));
 
     (*graph)[cut.old_dst].out_edges = out_edges;
 
@@ -634,12 +620,8 @@
   SortCutsByTopoOrder(*final_order, &cuts);
 
   if (!cuts.empty())
-    TEST_AND_RETURN_FALSE(AssignTempBlocks(graph,
-                                           new_part,
-                                           blob_file,
-                                           final_order,
-                                           &inverse_final_order,
-                                           cuts));
+    TEST_AND_RETURN_FALSE(AssignTempBlocks(
+        graph, new_part, blob_file, final_order, &inverse_final_order, cuts));
   LOG(INFO) << "Making sure all temp blocks have been allocated";
 
   // Remove the scratch node, if any
@@ -681,19 +663,19 @@
     const char* past_participle = (field == READER) ? "read" : "written";
     const google::protobuf::RepeatedPtrField<Extent>& extents =
         (field == READER) ? operation.src_extents() : operation.dst_extents();
-    Vertex::Index Block::*access_type = (field == READER) ?
-        &Block::reader : &Block::writer;
+    Vertex::Index Block::*access_type =
+        (field == READER) ? &Block::reader : &Block::writer;
 
     for (const Extent& extent : extents) {
       for (uint64_t block = extent.start_block();
-           block < (extent.start_block() + extent.num_blocks()); block++) {
+           block < (extent.start_block() + extent.num_blocks());
+           block++) {
         if ((*blocks)[block].*access_type != Vertex::kInvalidIndex) {
-          LOG(FATAL) << "Block " << block << " is already "
-                     << past_participle << " by "
-                     << (*blocks)[block].*access_type << "("
+          LOG(FATAL) << "Block " << block << " is already " << past_participle
+                     << " by " << (*blocks)[block].*access_type << "("
                      << graph[(*blocks)[block].*access_type].aop.name
-                     << ") and also " << vertex << "("
-                     << graph[vertex].aop.name << ")";
+                     << ") and also " << vertex << "(" << graph[vertex].aop.name
+                     << ")";
         }
         (*blocks)[block].*access_type = vertex;
       }
@@ -718,10 +700,7 @@
 
   if (blocks)
     TEST_AND_RETURN_FALSE(InplaceGenerator::AddInstallOpToBlocksVector(
-        (*graph)[vertex].aop.op,
-        *graph,
-        vertex,
-        blocks));
+        (*graph)[vertex].aop.op, *graph, vertex, blocks));
   return true;
 }
 
@@ -769,11 +748,7 @@
 
   vector<Vertex::Index> final_order;
   TEST_AND_RETURN_FALSE(ConvertGraphToDag(
-      &graph,
-      new_part.path,
-      blob_file,
-      &final_order,
-      scratch_vertex));
+      &graph, new_part.path, blob_file, &final_order, scratch_vertex));
 
   // Copy operations over to the |aops| vector in the final_order generated by
   // the topological sort.
@@ -786,21 +761,22 @@
   return true;
 }
 
-bool InplaceGenerator::GenerateOperations(
-    const PayloadGenerationConfig& config,
-    const PartitionConfig& old_part,
-    const PartitionConfig& new_part,
-    BlobFileWriter* blob_file,
-    vector<AnnotatedOperation>* aops) {
+bool InplaceGenerator::GenerateOperations(const PayloadGenerationConfig& config,
+                                          const PartitionConfig& old_part,
+                                          const PartitionConfig& new_part,
+                                          BlobFileWriter* blob_file,
+                                          vector<AnnotatedOperation>* aops) {
   TEST_AND_RETURN_FALSE(old_part.name == new_part.name);
   TEST_AND_RETURN_FALSE(config.version.major == kInPlacePayloadVersion.major);
   TEST_AND_RETURN_FALSE(config.version.minor == kInPlacePayloadVersion.minor);
 
-  ssize_t hard_chunk_blocks = (config.hard_chunk_size == -1 ? -1 :
-                               config.hard_chunk_size / config.block_size);
+  ssize_t hard_chunk_blocks =
+      (config.hard_chunk_size == -1
+           ? -1
+           : config.hard_chunk_size / config.block_size);
   size_t soft_chunk_blocks = config.soft_chunk_size / config.block_size;
   uint64_t partition_size = new_part.size;
-  if (new_part.name == kLegacyPartitionNameRoot)
+  if (new_part.name == kPartitionNameRoot)
     partition_size = config.rootfs_partition_size;
 
   LOG(INFO) << "Delta compressing " << new_part.name << " partition...";
diff --git a/payload_generator/inplace_generator.h b/payload_generator/inplace_generator.h
index f108639..e7298d2 100644
--- a/payload_generator/inplace_generator.h
+++ b/payload_generator/inplace_generator.h
@@ -97,8 +97,7 @@
   // Creates all the edges for the graph. Writers of a block point to
   // readers of the same block. This is because for an edge A->B, B
   // must complete before A executes.
-  static void CreateEdges(Graph* graph,
-                          const std::vector<Block>& blocks);
+  static void CreateEdges(Graph* graph, const std::vector<Block>& blocks);
 
   // Takes |op_indexes|, which is effectively a mapping from order in
   // which the op is performed -> graph vertex index, and produces the
@@ -109,15 +108,14 @@
 
   // Sorts the vector |cuts| by its |cuts[].old_dest| member. Order is
   // determined by the order of elements in op_indexes.
-  static void SortCutsByTopoOrder(
-      const std::vector<Vertex::Index>& op_indexes,
-      std::vector<CutEdgeVertexes>* cuts);
+  static void SortCutsByTopoOrder(const std::vector<Vertex::Index>& op_indexes,
+                                  std::vector<CutEdgeVertexes>* cuts);
 
   // Given a topologically sorted graph |op_indexes| and |graph|, alters
   // |op_indexes| to move all the full operations to the end of the vector.
   // Full operations should not be depended on, so this is safe.
   static void MoveAndSortFullOpsToBack(Graph* graph,
-                                std::vector<Vertex::Index>* op_indexes);
+                                       std::vector<Vertex::Index>* op_indexes);
 
   // Returns true iff there are no extents in the graph that refer to temp
   // blocks. Temp blocks are in the range [kTempBlockStart, kSparseHole).
@@ -227,12 +225,11 @@
   // a delta update with the minor version kInPlaceMinorPayloadVersion.
   // The operations are stored in |aops|. All the offsets in the operations
   // reference the data written to |blob_file|.
-  bool GenerateOperations(
-      const PayloadGenerationConfig& config,
-      const PartitionConfig& old_part,
-      const PartitionConfig& new_part,
-      BlobFileWriter* blob_file,
-      std::vector<AnnotatedOperation>* aops) override;
+  bool GenerateOperations(const PayloadGenerationConfig& config,
+                          const PartitionConfig& old_part,
+                          const PartitionConfig& new_part,
+                          BlobFileWriter* blob_file,
+                          std::vector<AnnotatedOperation>* aops) override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(InplaceGenerator);
diff --git a/payload_generator/inplace_generator_unittest.cc b/payload_generator/inplace_generator_unittest.cc
index 20ac50b..8028f36 100644
--- a/payload_generator/inplace_generator_unittest.cc
+++ b/payload_generator/inplace_generator_unittest.cc
@@ -17,6 +17,7 @@
 #include "update_engine/payload_generator/inplace_generator.h"
 
 #include <map>
+#include <memory>
 #include <set>
 #include <sstream>
 #include <string>
@@ -54,7 +55,7 @@
                const vector<Extent>& src_extents,
                const vector<Extent>& dst_extents,
                const string& path,
-               InstallOperation_Type type) {
+               InstallOperation::Type type) {
   out->aop.op.set_type(type);
   out->aop.name = path;
   StoreExtents(src_extents, out->aop.op.mutable_src_extents());
@@ -77,11 +78,12 @@
   return ret;
 }
 
-template<typename T>
+template <typename T>
 void DumpVect(const vector<T>& vect) {
   stringstream ss(stringstream::out);
   for (typename vector<T>::const_iterator it = vect.begin(), e = vect.end();
-       it != e; ++it) {
+       it != e;
+       ++it) {
     ss << *it << ", ";
   }
   LOG(INFO) << "{" << ss.str() << "}";
@@ -241,8 +243,8 @@
   cycle_breaker.BreakCycles(graph, &cut_edges);
 
   EXPECT_EQ(1U, cut_edges.size());
-  EXPECT_TRUE(cut_edges.end() != cut_edges.find(
-      std::pair<Vertex::Index, Vertex::Index>(1, 0)));
+  EXPECT_TRUE(cut_edges.end() !=
+              cut_edges.find(std::pair<Vertex::Index, Vertex::Index>(1, 0)));
 
   vector<CutEdgeVertexes> cuts;
   EXPECT_TRUE(InplaceGenerator::CutEdges(&graph, cut_edges, &cuts));
@@ -274,8 +276,8 @@
   // Ensure it only depends on the next node and the new temp node
   EXPECT_EQ(2U, graph[0].out_edges.size());
   EXPECT_TRUE(graph[0].out_edges.end() != graph[0].out_edges.find(1));
-  EXPECT_TRUE(graph[0].out_edges.end() != graph[0].out_edges.find(graph.size() -
-                                                                  1));
+  EXPECT_TRUE(graph[0].out_edges.end() !=
+              graph[0].out_edges.find(graph.size() - 1));
 
   // Check second node has unchanged extents
   EXPECT_EQ(2, graph[1].aop.op.src_extents_size());
@@ -737,8 +739,8 @@
     if (aop.op.type() == InstallOperation::MOVE) {
       move_ops++;
       for (const Extent& extent : aop.op.dst_extents()) {
-        EXPECT_LE(7U, extent.start_block()) << "On dst extents for aop: "
-                                            << aop;
+        EXPECT_LE(7U, extent.start_block())
+            << "On dst extents for aop: " << aop;
       }
     }
   }
diff --git a/payload_generator/operations_generator.h b/payload_generator/operations_generator.h
index 9127d7b..4d7322b 100644
--- a/payload_generator/operations_generator.h
+++ b/payload_generator/operations_generator.h
@@ -40,12 +40,11 @@
   // The operations generated will refer to offsets in the file |blob_file|,
   // where this function stores the output, but not necessarily in the same
   // order as they appear in the |aops|.
-  virtual bool GenerateOperations(
-      const PayloadGenerationConfig& config,
-      const PartitionConfig& old_part,
-      const PartitionConfig& new_part,
-      BlobFileWriter* blob_file,
-      std::vector<AnnotatedOperation>* aops) = 0;
+  virtual bool GenerateOperations(const PayloadGenerationConfig& config,
+                                  const PartitionConfig& old_part,
+                                  const PartitionConfig& new_part,
+                                  BlobFileWriter* blob_file,
+                                  std::vector<AnnotatedOperation>* aops) = 0;
 
  protected:
   OperationsGenerator() = default;
diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc
index f48d2a2..a111fd6 100644
--- a/payload_generator/payload_file.cc
+++ b/payload_generator/payload_file.cc
@@ -20,6 +20,7 @@
 
 #include <algorithm>
 #include <map>
+#include <utility>
 
 #include <base/strings/stringprintf.h>
 
@@ -40,10 +41,8 @@
 
 struct DeltaObject {
   DeltaObject(const string& in_name, const int in_type, const off_t in_size)
-      : name(in_name),
-        type(in_type),
-        size(in_size) {}
-  bool operator <(const DeltaObject& object) const {
+      : name(in_name), type(in_type), size(in_size) {}
+  bool operator<(const DeltaObject& object) const {
     return (size != object.size) ? (size < object.size) : (name < object.name);
   }
   string name;
@@ -74,6 +73,13 @@
 
   manifest_.set_block_size(config.block_size);
   manifest_.set_max_timestamp(config.max_timestamp);
+
+  if (major_version_ == kBrilloMajorPayloadVersion) {
+    if (config.target.dynamic_partition_metadata != nullptr)
+      *(manifest_.mutable_dynamic_partition_metadata()) =
+          *(config.target.dynamic_partition_metadata);
+  }
+
   return true;
 }
 
@@ -82,8 +88,8 @@
                                const vector<AnnotatedOperation>& aops) {
   // Check partitions order for Chrome OS
   if (major_version_ == kChromeOSMajorPayloadVersion) {
-    const vector<const char*> part_order = { kLegacyPartitionNameRoot,
-                                             kLegacyPartitionNameKernel };
+    const vector<const char*> part_order = {kPartitionNameRoot,
+                                            kPartitionNameKernel};
     TEST_AND_RETURN_FALSE(part_vec_.size() < part_order.size());
     TEST_AND_RETURN_FALSE(new_conf.name == part_order[part_vec_.size()]);
   }
@@ -91,12 +97,13 @@
   part.name = new_conf.name;
   part.aops = aops;
   part.postinstall = new_conf.postinstall;
+  part.verity = new_conf.verity;
   // Initialize the PartitionInfo objects if present.
   if (!old_conf.path.empty())
-    TEST_AND_RETURN_FALSE(diff_utils::InitializePartitionInfo(old_conf,
-                                                              &part.old_info));
-  TEST_AND_RETURN_FALSE(diff_utils::InitializePartitionInfo(new_conf,
-                                                            &part.new_info));
+    TEST_AND_RETURN_FALSE(
+        diff_utils::InitializePartitionInfo(old_conf, &part.old_info));
+  TEST_AND_RETURN_FALSE(
+      diff_utils::InitializePartitionInfo(new_conf, &part.new_info));
   part_vec_.push_back(std::move(part));
   return true;
 }
@@ -108,9 +115,7 @@
   // Reorder the data blobs with the manifest_.
   string ordered_blobs_path;
   TEST_AND_RETURN_FALSE(utils::MakeTempFile(
-      "CrAU_temp_data.ordered.XXXXXX",
-      &ordered_blobs_path,
-      nullptr));
+      "CrAU_temp_data.ordered.XXXXXX", &ordered_blobs_path, nullptr));
   ScopedPathUnlinker ordered_blobs_unlinker(ordered_blobs_path);
   TEST_AND_RETURN_FALSE(ReorderDataBlobs(data_blobs_path, ordered_blobs_path));
 
@@ -121,8 +126,8 @@
       if (!aop.op.has_data_offset())
         continue;
       if (aop.op.data_offset() != next_blob_offset) {
-        LOG(FATAL) << "bad blob offset! " << aop.op.data_offset() << " != "
-                   << next_blob_offset;
+        LOG(FATAL) << "bad blob offset! " << aop.op.data_offset()
+                   << " != " << next_blob_offset;
       }
       next_blob_offset += aop.op.data_length();
     }
@@ -144,6 +149,22 @@
           partition->set_filesystem_type(part.postinstall.filesystem_type);
         partition->set_postinstall_optional(part.postinstall.optional);
       }
+      if (!part.verity.IsEmpty()) {
+        if (part.verity.hash_tree_extent.num_blocks() != 0) {
+          *partition->mutable_hash_tree_data_extent() =
+              part.verity.hash_tree_data_extent;
+          *partition->mutable_hash_tree_extent() = part.verity.hash_tree_extent;
+          partition->set_hash_tree_algorithm(part.verity.hash_tree_algorithm);
+          if (!part.verity.hash_tree_salt.empty())
+            partition->set_hash_tree_salt(part.verity.hash_tree_salt.data(),
+                                          part.verity.hash_tree_salt.size());
+        }
+        if (part.verity.fec_extent.num_blocks() != 0) {
+          *partition->mutable_fec_data_extent() = part.verity.fec_data_extent;
+          *partition->mutable_fec_extent() = part.verity.fec_extent;
+          partition->set_fec_roots(part.verity.fec_roots);
+        }
+      }
       for (const AnnotatedOperation& aop : part.aops) {
         *partition->add_operations() = aop.op;
       }
@@ -153,7 +174,7 @@
         *(partition->mutable_new_partition_info()) = part.new_info;
     } else {
       // major_version_ == kChromeOSMajorPayloadVersion
-      if (part.name == kLegacyPartitionNameKernel) {
+      if (part.name == kPartitionNameKernel) {
         for (const AnnotatedOperation& aop : part.aops)
           *manifest_.add_kernel_install_operations() = aop.op;
         if (part.old_info.has_size() || part.old_info.has_hash())
@@ -175,17 +196,18 @@
   // manifest_.
   uint64_t signature_blob_length = 0;
   if (!private_key_path.empty()) {
-    TEST_AND_RETURN_FALSE(
-        PayloadSigner::SignatureBlobLength(vector<string>(1, private_key_path),
-                                           &signature_blob_length));
+    TEST_AND_RETURN_FALSE(PayloadSigner::SignatureBlobLength(
+        {private_key_path}, &signature_blob_length));
     PayloadSigner::AddSignatureToManifest(
-        next_blob_offset, signature_blob_length,
-        major_version_ == kChromeOSMajorPayloadVersion, &manifest_);
+        next_blob_offset,
+        signature_blob_length,
+        major_version_ == kChromeOSMajorPayloadVersion,
+        &manifest_);
   }
 
   // Serialize protobuf
   string serialized_manifest;
-  TEST_AND_RETURN_FALSE(manifest_.AppendToString(&serialized_manifest));
+  TEST_AND_RETURN_FALSE(manifest_.SerializeToString(&serialized_manifest));
 
   uint64_t metadata_size =
       sizeof(kDeltaMagic) + 2 * sizeof(uint64_t) + serialized_manifest.size();
@@ -204,8 +226,8 @@
   TEST_AND_RETURN_FALSE(WriteUint64AsBigEndian(&writer, major_version_));
 
   // Write protobuf length
-  TEST_AND_RETURN_FALSE(WriteUint64AsBigEndian(&writer,
-                                               serialized_manifest.size()));
+  TEST_AND_RETURN_FALSE(
+      WriteUint64AsBigEndian(&writer, serialized_manifest.size()));
 
   // Write metadata signature size.
   uint32_t metadata_signature_size = 0;
@@ -229,14 +251,12 @@
   // Write metadata signature blob.
   if (major_version_ == kBrilloMajorPayloadVersion &&
       !private_key_path.empty()) {
-    brillo::Blob metadata_hash, metadata_signature;
-    TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile(payload_file,
-                                                             metadata_size,
-                                                             &metadata_hash));
-    TEST_AND_RETURN_FALSE(
-        PayloadSigner::SignHashWithKeys(metadata_hash,
-                                        vector<string>(1, private_key_path),
-                                        &metadata_signature));
+    brillo::Blob metadata_hash;
+    TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile(
+        payload_file, metadata_size, &metadata_hash));
+    string metadata_signature;
+    TEST_AND_RETURN_FALSE(PayloadSigner::SignHashWithKeys(
+        metadata_hash, {private_key_path}, &metadata_signature));
     TEST_AND_RETURN_FALSE_ERRNO(
         writer.Write(metadata_signature.data(), metadata_signature.size()));
   }
@@ -260,16 +280,16 @@
   // Write payload signature blob.
   if (!private_key_path.empty()) {
     LOG(INFO) << "Signing the update...";
-    brillo::Blob signature_blob;
+    string signature;
     TEST_AND_RETURN_FALSE(PayloadSigner::SignPayload(
         payload_file,
-        vector<string>(1, private_key_path),
+        {private_key_path},
         metadata_size,
         metadata_signature_size,
         metadata_size + metadata_signature_size + manifest_.signatures_offset(),
-        &signature_blob));
+        &signature));
     TEST_AND_RETURN_FALSE_ERRNO(
-        writer.Write(signature_blob.data(), signature_blob.size()));
+        writer.Write(signature.data(), signature.size()));
   }
 
   ReportPayloadUsage(metadata_size);
@@ -277,9 +297,8 @@
   return true;
 }
 
-bool PayloadFile::ReorderDataBlobs(
-    const string& data_blobs_path,
-    const string& new_data_blobs_path) {
+bool PayloadFile::ReorderDataBlobs(const string& data_blobs_path,
+                                   const string& new_data_blobs_path) {
   int in_fd = open(data_blobs_path.c_str(), O_RDONLY, 0);
   TEST_AND_RETURN_FALSE_ERRNO(in_fd >= 0);
   ScopedFdCloser in_fd_closer(&in_fd);
@@ -325,37 +344,39 @@
 void PayloadFile::ReportPayloadUsage(uint64_t metadata_size) const {
   std::map<DeltaObject, int> object_counts;
   off_t total_size = 0;
+  int total_op = 0;
 
   for (const auto& part : part_vec_) {
+    string part_prefix = "<" + part.name + ">:";
     for (const AnnotatedOperation& aop : part.aops) {
-      DeltaObject delta(aop.name, aop.op.type(), aop.op.data_length());
+      DeltaObject delta(
+          part_prefix + aop.name, aop.op.type(), aop.op.data_length());
       object_counts[delta]++;
       total_size += aop.op.data_length();
     }
+    total_op += part.aops.size();
   }
 
   object_counts[DeltaObject("<manifest-metadata>", -1, metadata_size)] = 1;
   total_size += metadata_size;
 
-  static const char kFormatString[] = "%6.2f%% %10jd %-13s %s %d";
+  constexpr char kFormatString[] = "%6.2f%% %10jd %-13s %s %d\n";
   for (const auto& object_count : object_counts) {
     const DeltaObject& object = object_count.first;
-    LOG(INFO) << base::StringPrintf(
-        kFormatString,
-        object.size * 100.0 / total_size,
-        static_cast<intmax_t>(object.size),
-        (object.type >= 0 ? InstallOperationTypeName(
-                                static_cast<InstallOperation_Type>(object.type))
-                          : "-"),
-        object.name.c_str(),
-        object_count.second);
+    // Use printf() instead of LOG(INFO) because timestamp makes it difficult to
+    // compare two reports.
+    printf(kFormatString,
+           object.size * 100.0 / total_size,
+           object.size,
+           (object.type >= 0
+                ? InstallOperationTypeName(
+                      static_cast<InstallOperation::Type>(object.type))
+                : "-"),
+           object.name.c_str(),
+           object_count.second);
   }
-  LOG(INFO) << base::StringPrintf(kFormatString,
-                                  100.0,
-                                  static_cast<intmax_t>(total_size),
-                                  "",
-                                  "<total>",
-                                  1);
+  printf(kFormatString, 100.0, total_size, "", "<total>", total_op);
+  fflush(stdout);
 }
 
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/payload_file.h b/payload_generator/payload_file.h
index 7cc792a..9dc80a7 100644
--- a/payload_generator/payload_file.h
+++ b/payload_generator/payload_file.h
@@ -95,6 +95,7 @@
     PartitionInfo new_info;
 
     PostInstallConfig postinstall;
+    VerityConfig verity;
   };
 
   std::vector<Partition> part_vec_;
diff --git a/payload_generator/payload_file_unittest.cc b/payload_generator/payload_file_unittest.cc
index e8e7e14..45faebb 100644
--- a/payload_generator/payload_file_unittest.cc
+++ b/payload_generator/payload_file_unittest.cc
@@ -36,23 +36,16 @@
 };
 
 TEST_F(PayloadFileTest, ReorderBlobsTest) {
-  string orig_blobs;
-  EXPECT_TRUE(utils::MakeTempFile("ReorderBlobsTest.orig.XXXXXX", &orig_blobs,
-                                  nullptr));
-  ScopedPathUnlinker orig_blobs_unlinker(orig_blobs);
+  test_utils::ScopedTempFile orig_blobs("ReorderBlobsTest.orig.XXXXXX");
 
   // The operations have three blob and one gap (the whitespace):
   // Rootfs operation 1: [8, 3] bcd
   // Rootfs operation 2: [7, 1] a
   // Kernel operation 1: [0, 6] kernel
   string orig_data = "kernel abcd";
-  EXPECT_TRUE(
-      utils::WriteFile(orig_blobs.c_str(), orig_data.data(), orig_data.size()));
+  EXPECT_TRUE(test_utils::WriteFileString(orig_blobs.path(), orig_data));
 
-  string new_blobs;
-  EXPECT_TRUE(
-      utils::MakeTempFile("ReorderBlobsTest.new.XXXXXX", &new_blobs, nullptr));
-  ScopedPathUnlinker new_blobs_unlinker(new_blobs);
+  test_utils::ScopedTempFile new_blobs("ReorderBlobsTest.new.XXXXXX");
 
   payload_.part_vec_.resize(2);
 
@@ -71,12 +64,12 @@
   aop.op.set_data_length(6);
   payload_.part_vec_[1].aops = {aop};
 
-  EXPECT_TRUE(payload_.ReorderDataBlobs(orig_blobs, new_blobs));
+  EXPECT_TRUE(payload_.ReorderDataBlobs(orig_blobs.path(), new_blobs.path()));
 
   const vector<AnnotatedOperation>& part0_aops = payload_.part_vec_[0].aops;
   const vector<AnnotatedOperation>& part1_aops = payload_.part_vec_[1].aops;
   string new_data;
-  EXPECT_TRUE(utils::ReadFile(new_blobs, &new_data));
+  EXPECT_TRUE(utils::ReadFile(new_blobs.path(), &new_data));
   // Kernel blobs should appear at the end.
   EXPECT_EQ("bcdakernel", new_data);
 
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index 15d4ab5..648fe8b 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -16,22 +16,38 @@
 
 #include "update_engine/payload_generator/payload_generation_config.h"
 
+#include <algorithm>
+#include <map>
+#include <utility>
+
 #include <base/logging.h>
+#include <base/strings/string_number_conversions.h>
+#include <brillo/strings/string_utils.h>
 
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/delta_performer.h"
+#include "update_engine/payload_generator/boot_img_filesystem.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/delta_diff_utils.h"
 #include "update_engine/payload_generator/ext2_filesystem.h"
 #include "update_engine/payload_generator/mapfile_filesystem.h"
 #include "update_engine/payload_generator/raw_filesystem.h"
 
+using std::string;
+
 namespace chromeos_update_engine {
 
 bool PostInstallConfig::IsEmpty() const {
   return !run && path.empty() && filesystem_type.empty() && !optional;
 }
 
+bool VerityConfig::IsEmpty() const {
+  return hash_tree_data_extent.num_blocks() == 0 &&
+         hash_tree_extent.num_blocks() == 0 && hash_tree_algorithm.empty() &&
+         hash_tree_salt.empty() && fec_data_extent.num_blocks() == 0 &&
+         fec_extent.num_blocks() == 0 && fec_roots == 0;
+}
+
 bool PartitionConfig::ValidateExists() const {
   TEST_AND_RETURN_FALSE(!path.empty());
   TEST_AND_RETURN_FALSE(utils::FileExists(path.c_str()));
@@ -64,6 +80,12 @@
     }
   }
 
+  fs_interface = BootImgFilesystem::CreateFromFile(path);
+  if (fs_interface) {
+    TEST_AND_RETURN_FALSE(fs_interface->GetBlockSize() == kBlockSize);
+    return true;
+  }
+
   // Fall back to a RAW filesystem.
   TEST_AND_RETURN_FALSE(size % kBlockSize == 0);
   fs_interface = RawFilesystem::Create(
@@ -107,13 +129,79 @@
   return true;
 }
 
+bool ImageConfig::LoadDynamicPartitionMetadata(
+    const brillo::KeyValueStore& store) {
+  auto metadata = std::make_unique<DynamicPartitionMetadata>();
+  string buf;
+  if (!store.GetString("super_partition_groups", &buf)) {
+    LOG(ERROR) << "Dynamic partition info missing super_partition_groups.";
+    return false;
+  }
+  auto group_names = brillo::string_utils::Split(buf, " ");
+  for (const auto& group_name : group_names) {
+    DynamicPartitionGroup* group = metadata->add_groups();
+    group->set_name(group_name);
+    if (!store.GetString(group_name + "_size", &buf)) {
+      LOG(ERROR) << "Missing " << group_name + "_size.";
+      return false;
+    }
+
+    uint64_t max_size;
+    if (!base::StringToUint64(buf, &max_size)) {
+      LOG(ERROR) << group_name << "_size=" << buf << " is not an integer.";
+      return false;
+    }
+    group->set_size(max_size);
+
+    if (store.GetString(group_name + "_partition_list", &buf)) {
+      auto partition_names = brillo::string_utils::Split(buf, " ");
+      for (const auto& partition_name : partition_names) {
+        group->add_partition_names()->assign(partition_name);
+      }
+    }
+  }
+  dynamic_partition_metadata = std::move(metadata);
+  return true;
+}
+
+bool ImageConfig::ValidateDynamicPartitionMetadata() const {
+  if (dynamic_partition_metadata == nullptr) {
+    LOG(ERROR) << "dynamic_partition_metadata is not loaded.";
+    return false;
+  }
+
+  for (const auto& group : dynamic_partition_metadata->groups()) {
+    uint64_t sum_size = 0;
+    for (const auto& partition_name : group.partition_names()) {
+      auto partition_config = std::find_if(partitions.begin(),
+                                           partitions.end(),
+                                           [&partition_name](const auto& e) {
+                                             return e.name == partition_name;
+                                           });
+
+      if (partition_config == partitions.end()) {
+        LOG(ERROR) << "Cannot find partition " << partition_name
+                   << " which is in " << group.name() << "_partition_list";
+        return false;
+      }
+      sum_size += partition_config->size;
+    }
+
+    if (sum_size > group.size()) {
+      LOG(ERROR) << "Sum of sizes in " << group.name() << "_partition_list is "
+                 << sum_size << ", which is greater than " << group.name()
+                 << "_size (" << group.size() << ")";
+      return false;
+    }
+  }
+  return true;
+}
+
 bool ImageConfig::ImageInfoIsEmpty() const {
-  return image_info.board().empty()
-    && image_info.key().empty()
-    && image_info.channel().empty()
-    && image_info.version().empty()
-    && image_info.build_channel().empty()
-    && image_info.build_version().empty();
+  return image_info.board().empty() && image_info.key().empty() &&
+         image_info.channel().empty() && image_info.version().empty() &&
+         image_info.build_channel().empty() &&
+         image_info.build_version().empty();
 }
 
 PayloadVersion::PayloadVersion(uint64_t major_version, uint32_t minor_version) {
@@ -129,11 +217,12 @@
                         minor == kSourceMinorPayloadVersion ||
                         minor == kOpSrcHashMinorPayloadVersion ||
                         minor == kBrotliBsdiffMinorPayloadVersion ||
-                        minor == kPuffdiffMinorPayloadVersion);
+                        minor == kPuffdiffMinorPayloadVersion ||
+                        minor == kVerityMinorPayloadVersion);
   return true;
 }
 
-bool PayloadVersion::OperationAllowed(InstallOperation_Type operation) const {
+bool PayloadVersion::OperationAllowed(InstallOperation::Type operation) const {
   switch (operation) {
     // Full operations:
     case InstallOperation::REPLACE:
@@ -192,8 +281,9 @@
         TEST_AND_RETURN_FALSE(part.ValidateExists());
         TEST_AND_RETURN_FALSE(part.size % block_size == 0);
       }
-      // Source partition should not have postinstall.
+      // Source partition should not have postinstall or verity config.
       TEST_AND_RETURN_FALSE(part.postinstall.IsEmpty());
+      TEST_AND_RETURN_FALSE(part.verity.IsEmpty());
     }
 
     // If new_image_info is present, old_image_info must be present.
@@ -209,10 +299,12 @@
     TEST_AND_RETURN_FALSE(part.ValidateExists());
     TEST_AND_RETURN_FALSE(part.size % block_size == 0);
     if (version.minor == kInPlaceMinorPayloadVersion &&
-        part.name == kLegacyPartitionNameRoot)
+        part.name == kPartitionNameRoot)
       TEST_AND_RETURN_FALSE(rootfs_partition_size >= part.size);
     if (version.major == kChromeOSMajorPayloadVersion)
       TEST_AND_RETURN_FALSE(part.postinstall.IsEmpty());
+    if (version.minor < kVerityMinorPayloadVersion)
+      TEST_AND_RETURN_FALSE(part.verity.IsEmpty());
   }
 
   TEST_AND_RETURN_FALSE(hard_chunk_size == -1 ||
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index c553d29..584ac7b 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -24,6 +24,7 @@
 #include <vector>
 
 #include <brillo/key_value_store.h>
+#include <brillo/secure_blob.h>
 
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/filesystem_interface.h"
@@ -51,6 +52,34 @@
   bool optional = false;
 };
 
+// Data will be written to the payload and used for hash tree and FEC generation
+// at device update time.
+struct VerityConfig {
+  // Whether the verity config is empty.
+  bool IsEmpty() const;
+
+  // The extent for data covered by verity hash tree.
+  Extent hash_tree_data_extent;
+
+  // The extent to store verity hash tree.
+  Extent hash_tree_extent;
+
+  // The hash algorithm used in verity hash tree.
+  std::string hash_tree_algorithm;
+
+  // The salt used for verity hash tree.
+  brillo::Blob hash_tree_salt;
+
+  // The extent for data covered by FEC.
+  Extent fec_data_extent;
+
+  // The extent to store FEC.
+  Extent fec_extent;
+
+  // The number of FEC roots.
+  uint32_t fec_roots = 0;
+};
+
 struct PartitionConfig {
   explicit PartitionConfig(std::string name) : name(name) {}
 
@@ -86,6 +115,7 @@
   std::string name;
 
   PostInstallConfig postinstall;
+  VerityConfig verity;
 };
 
 // The ImageConfig struct describes a pair of binaries kernel and rootfs and the
@@ -104,6 +134,15 @@
   // Load postinstall config from a key value store.
   bool LoadPostInstallConfig(const brillo::KeyValueStore& store);
 
+  // Load verity config by parsing the partition images.
+  bool LoadVerityConfig();
+
+  // Load dynamic partition info from a key value store.
+  bool LoadDynamicPartitionMetadata(const brillo::KeyValueStore& store);
+
+  // Validate |dynamic_partition_metadata| against |partitions|.
+  bool ValidateDynamicPartitionMetadata() const;
+
   // Returns whether the |image_info| field is empty.
   bool ImageInfoIsEmpty() const;
 
@@ -113,6 +152,9 @@
 
   // The updated partitions.
   std::vector<PartitionConfig> partitions;
+
+  // The super partition metadata.
+  std::unique_ptr<DynamicPartitionMetadata> dynamic_partition_metadata;
 };
 
 struct PayloadVersion {
@@ -123,7 +165,7 @@
   bool Validate() const;
 
   // Return whether the passed |operation| is allowed by this payload.
-  bool OperationAllowed(InstallOperation_Type operation) const;
+  bool OperationAllowed(InstallOperation::Type operation) const;
 
   // Whether this payload version is a delta payload.
   bool IsDelta() const;
@@ -154,7 +196,7 @@
   // if is_full is false, so we are requested a delta payload.
   ImageConfig source;
 
-  // Wheter the requested payload is a delta payload.
+  // Whether the requested payload is a delta payload.
   bool is_delta = false;
 
   // The major/minor version of the payload.
diff --git a/payload_generator/payload_generation_config_android.cc b/payload_generator/payload_generation_config_android.cc
new file mode 100644
index 0000000..90c053f
--- /dev/null
+++ b/payload_generator/payload_generation_config_android.cc
@@ -0,0 +1,225 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/payload_generation_config.h"
+
+#include <base/logging.h>
+#include <base/strings/string_number_conversions.h>
+#include <base/strings/string_split.h>
+#include <brillo/secure_blob.h>
+#include <fec/io.h>
+#include <libavb/libavb.h>
+#include <verity/hash_tree_builder.h>
+
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/verity_writer_android.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+
+namespace chromeos_update_engine {
+
+namespace {
+bool AvbDescriptorCallback(const AvbDescriptor* descriptor, void* user_data) {
+  PartitionConfig* part = static_cast<PartitionConfig*>(user_data);
+  AvbDescriptor desc;
+  TEST_AND_RETURN_FALSE(
+      avb_descriptor_validate_and_byteswap(descriptor, &desc));
+  if (desc.tag != AVB_DESCRIPTOR_TAG_HASHTREE)
+    return true;
+
+  AvbHashtreeDescriptor hashtree;
+  TEST_AND_RETURN_FALSE(avb_hashtree_descriptor_validate_and_byteswap(
+      reinterpret_cast<const AvbHashtreeDescriptor*>(descriptor), &hashtree));
+  // We only support version 1 right now, will need to introduce a new
+  // payload minor version to support new dm verity version.
+  TEST_AND_RETURN_FALSE(hashtree.dm_verity_version == 1);
+  part->verity.hash_tree_algorithm =
+      reinterpret_cast<const char*>(hashtree.hash_algorithm);
+
+  const uint8_t* salt = reinterpret_cast<const uint8_t*>(descriptor) +
+                        sizeof(AvbHashtreeDescriptor) +
+                        hashtree.partition_name_len;
+  part->verity.hash_tree_salt.assign(salt, salt + hashtree.salt_len);
+
+  TEST_AND_RETURN_FALSE(hashtree.data_block_size ==
+                        part->fs_interface->GetBlockSize());
+  part->verity.hash_tree_data_extent =
+      ExtentForBytes(hashtree.data_block_size, 0, hashtree.image_size);
+
+  TEST_AND_RETURN_FALSE(hashtree.hash_block_size ==
+                        part->fs_interface->GetBlockSize());
+  part->verity.hash_tree_extent = ExtentForBytes(
+      hashtree.hash_block_size, hashtree.tree_offset, hashtree.tree_size);
+
+  part->verity.fec_data_extent =
+      ExtentForBytes(hashtree.data_block_size, 0, hashtree.fec_offset);
+  part->verity.fec_extent = ExtentForBytes(
+      hashtree.data_block_size, hashtree.fec_offset, hashtree.fec_size);
+  part->verity.fec_roots = hashtree.fec_num_roots;
+  return true;
+}
+
+// Generate hash tree and FEC based on the verity config and verify that it
+// matches the hash tree and FEC stored in the image.
+bool VerifyVerityConfig(const PartitionConfig& part) {
+  const size_t block_size = part.fs_interface->GetBlockSize();
+  if (part.verity.hash_tree_extent.num_blocks() != 0) {
+    auto hash_function =
+        HashTreeBuilder::HashFunction(part.verity.hash_tree_algorithm);
+    TEST_AND_RETURN_FALSE(hash_function != nullptr);
+    HashTreeBuilder hash_tree_builder(block_size, hash_function);
+    uint64_t data_size =
+        part.verity.hash_tree_data_extent.num_blocks() * block_size;
+    uint64_t tree_size = hash_tree_builder.CalculateSize(data_size);
+    TEST_AND_RETURN_FALSE(
+        tree_size == part.verity.hash_tree_extent.num_blocks() * block_size);
+    TEST_AND_RETURN_FALSE(
+        hash_tree_builder.Initialize(data_size, part.verity.hash_tree_salt));
+
+    brillo::Blob buffer;
+    for (uint64_t offset = part.verity.hash_tree_data_extent.start_block() *
+                           block_size,
+                  data_end = offset + data_size;
+         offset < data_end;) {
+      constexpr uint64_t kBufferSize = 1024 * 1024;
+      size_t bytes_to_read = std::min(kBufferSize, data_end - offset);
+      TEST_AND_RETURN_FALSE(
+          utils::ReadFileChunk(part.path, offset, bytes_to_read, &buffer));
+      TEST_AND_RETURN_FALSE(
+          hash_tree_builder.Update(buffer.data(), buffer.size()));
+      offset += buffer.size();
+      buffer.clear();
+    }
+    TEST_AND_RETURN_FALSE(hash_tree_builder.BuildHashTree());
+    TEST_AND_RETURN_FALSE(utils::ReadFileChunk(
+        part.path,
+        part.verity.hash_tree_extent.start_block() * block_size,
+        tree_size,
+        &buffer));
+    TEST_AND_RETURN_FALSE(hash_tree_builder.CheckHashTree(buffer));
+  }
+
+  if (part.verity.fec_extent.num_blocks() != 0) {
+    TEST_AND_RETURN_FALSE(VerityWriterAndroid::EncodeFEC(
+        part.path,
+        part.verity.fec_data_extent.start_block() * block_size,
+        part.verity.fec_data_extent.num_blocks() * block_size,
+        part.verity.fec_extent.start_block() * block_size,
+        part.verity.fec_extent.num_blocks() * block_size,
+        part.verity.fec_roots,
+        block_size,
+        true /* verify_mode */));
+  }
+  return true;
+}
+}  // namespace
+
+bool ImageConfig::LoadVerityConfig() {
+  for (PartitionConfig& part : partitions) {
+    // Parse AVB devices.
+    if (part.size > sizeof(AvbFooter)) {
+      uint64_t footer_offset = part.size - sizeof(AvbFooter);
+      brillo::Blob buffer;
+      TEST_AND_RETURN_FALSE(utils::ReadFileChunk(
+          part.path, footer_offset, sizeof(AvbFooter), &buffer));
+      if (memcmp(buffer.data(), AVB_FOOTER_MAGIC, AVB_FOOTER_MAGIC_LEN) == 0) {
+        LOG(INFO) << "Parsing verity config from AVB footer for " << part.name;
+        AvbFooter footer;
+        TEST_AND_RETURN_FALSE(avb_footer_validate_and_byteswap(
+            reinterpret_cast<const AvbFooter*>(buffer.data()), &footer));
+        buffer.clear();
+
+        TEST_AND_RETURN_FALSE(
+            footer.vbmeta_offset + sizeof(AvbVBMetaImageHeader) <= part.size);
+        TEST_AND_RETURN_FALSE(utils::ReadFileChunk(
+            part.path, footer.vbmeta_offset, footer.vbmeta_size, &buffer));
+        TEST_AND_RETURN_FALSE(avb_descriptor_foreach(
+            buffer.data(), buffer.size(), AvbDescriptorCallback, &part));
+      }
+    }
+
+    // Parse VB1.0 devices with FEC metadata, devices with hash tree without
+    // FEC will be skipped for now.
+    if (part.verity.IsEmpty() && part.size > FEC_BLOCKSIZE) {
+      brillo::Blob fec_metadata;
+      TEST_AND_RETURN_FALSE(utils::ReadFileChunk(part.path,
+                                                 part.size - FEC_BLOCKSIZE,
+                                                 sizeof(fec_header),
+                                                 &fec_metadata));
+      const fec_header* header =
+          reinterpret_cast<const fec_header*>(fec_metadata.data());
+      if (header->magic == FEC_MAGIC) {
+        LOG(INFO)
+            << "Parsing verity config from Verified Boot 1.0 metadata for "
+            << part.name;
+        const size_t block_size = part.fs_interface->GetBlockSize();
+        // FEC_VERITY_DISABLE skips verifying verity hash tree, because we will
+        // verify it ourselves later.
+        fec::io fh(part.path, O_RDONLY, FEC_VERITY_DISABLE);
+        TEST_AND_RETURN_FALSE(fh);
+        fec_verity_metadata verity_data;
+        if (fh.get_verity_metadata(verity_data)) {
+          auto verity_table = base::SplitString(verity_data.table,
+                                                " ",
+                                                base::KEEP_WHITESPACE,
+                                                base::SPLIT_WANT_ALL);
+          TEST_AND_RETURN_FALSE(verity_table.size() == 10);
+          size_t data_block_size = 0;
+          TEST_AND_RETURN_FALSE(
+              base::StringToSizeT(verity_table[3], &data_block_size));
+          TEST_AND_RETURN_FALSE(block_size == data_block_size);
+          size_t hash_block_size = 0;
+          TEST_AND_RETURN_FALSE(
+              base::StringToSizeT(verity_table[4], &hash_block_size));
+          TEST_AND_RETURN_FALSE(block_size == hash_block_size);
+          uint64_t num_data_blocks = 0;
+          TEST_AND_RETURN_FALSE(
+              base::StringToUint64(verity_table[5], &num_data_blocks));
+          part.verity.hash_tree_data_extent =
+              ExtentForRange(0, num_data_blocks);
+          uint64_t hash_start_block = 0;
+          TEST_AND_RETURN_FALSE(
+              base::StringToUint64(verity_table[6], &hash_start_block));
+          part.verity.hash_tree_algorithm = verity_table[7];
+          TEST_AND_RETURN_FALSE(base::HexStringToBytes(
+              verity_table[9], &part.verity.hash_tree_salt));
+          auto hash_function =
+              HashTreeBuilder::HashFunction(part.verity.hash_tree_algorithm);
+          TEST_AND_RETURN_FALSE(hash_function != nullptr);
+          HashTreeBuilder hash_tree_builder(block_size, hash_function);
+          uint64_t tree_size =
+              hash_tree_builder.CalculateSize(num_data_blocks * block_size);
+          part.verity.hash_tree_extent =
+              ExtentForRange(hash_start_block, tree_size / block_size);
+        }
+        fec_ecc_metadata ecc_data;
+        if (fh.get_ecc_metadata(ecc_data) && ecc_data.valid) {
+          TEST_AND_RETURN_FALSE(block_size == FEC_BLOCKSIZE);
+          part.verity.fec_data_extent = ExtentForRange(0, ecc_data.blocks);
+          part.verity.fec_extent =
+              ExtentForBytes(block_size, ecc_data.start, header->fec_size);
+          part.verity.fec_roots = ecc_data.roots;
+        }
+      }
+    }
+
+    if (!part.verity.IsEmpty()) {
+      TEST_AND_RETURN_FALSE(VerifyVerityConfig(part));
+    }
+  }
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/payload_generation_config_android_unittest.cc b/payload_generator/payload_generation_config_android_unittest.cc
new file mode 100644
index 0000000..53378c2
--- /dev/null
+++ b/payload_generator/payload_generation_config_android_unittest.cc
@@ -0,0 +1,197 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/payload_generation_config.h"
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/test_utils.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+#include "update_engine/payload_generator/extent_utils.h"
+
+namespace chromeos_update_engine {
+
+namespace {
+// dd if=/dev/zero of=part bs=4096 count=2
+// avbtool add_hashtree_footer --image part --partition_size $((24 * 4096))
+//     --partition_name system
+constexpr uint64_t kImageSize = 24 * 4096;
+
+// hexdump -s $((2 * 4096)) -n 64 -v -e '/1 "0x%02x, "' part
+constexpr uint64_t kHashTreeOffset = 2 * 4096;
+const uint8_t kHashTree[] = {
+    0x62, 0x4b, 0x5a, 0x4d, 0xa2, 0x97, 0xa0, 0xc8, 0x08, 0x03, 0xa6,
+    0x95, 0x4c, 0x4c, 0x7a, 0x2d, 0xac, 0x50, 0xde, 0x74, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62,
+    0x4b, 0x5a, 0x4d, 0xa2, 0x97, 0xa0, 0xc8, 0x08, 0x03, 0xa6, 0x95,
+    0x4c, 0x4c, 0x7a, 0x2d, 0xac, 0x50, 0xde, 0x74, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+// hexdump -s $((3 * 4096)) -n 128 -v -e '/1 "0x%02x, "' part
+constexpr uint64_t kFECOffset = 3 * 4096;
+const uint8_t kFEC[] = {
+    0xec, 0x8e, 0x93, 0xd8, 0xf9, 0xa3, 0xd6, 0x9b, 0xa4, 0x06, 0x5f, 0xc8,
+    0x6c, 0xcc, 0x4f, 0x87, 0x07, 0x0f, 0xac, 0xaf, 0x29, 0x8f, 0x97, 0x02,
+    0xb2, 0xfe, 0xb2, 0xfe, 0xe5, 0x9f, 0xf2, 0xdf, 0xe6, 0x4a, 0x36, 0x66,
+    0x04, 0xda, 0xa7, 0xd3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0xec, 0x8e, 0x93, 0xd8, 0xf9, 0xa3, 0xd6, 0x9b,
+    0xa4, 0x06, 0x5f, 0xc8, 0x6c, 0xcc, 0x4f, 0x87, 0x07, 0x0f, 0xac, 0xaf,
+    0x29, 0x8f, 0x97, 0x02, 0xb2, 0xfe, 0xb2, 0xfe, 0xe5, 0x9f, 0xf2, 0xdf,
+    0xe6, 0x4a, 0x36, 0x66, 0x04, 0xda, 0xa7, 0xd3, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+// hexdump -s $((5 * 4096)) -n 512 -v -e '/1 "0x%02x, "' part
+constexpr uint64_t kVBMetaImageOffset = 5 * 4096;
+const uint8_t kVBMetaImage[] = {
+    0x41, 0x56, 0x42, 0x30, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x76, 0x62, 0x74,
+    0x6f, 0x6f, 0x6c, 0x20, 0x31, 0x2e, 0x31, 0x2e, 0x30, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x00, 0x00, 0x00, 0x01,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,
+    0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x02,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x20, 0x00, 0x73, 0x68, 0x61, 0x31, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x14,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x1f, 0xab,
+    0x7a, 0x6b, 0xf6, 0xb1, 0x3a, 0x1f, 0xdb, 0x34, 0xa3, 0xfc, 0xc8, 0x73,
+    0x0b, 0x23, 0x61, 0xb3, 0x04, 0xe2, 0x4f, 0x6c, 0xd0, 0x1e, 0x39, 0x9d,
+    0xaa, 0x73, 0x35, 0x53, 0xa7, 0x74, 0x1f, 0x81, 0xd0, 0xa6, 0xa9, 0x5f,
+    0x19, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+// hexdump -s $((24 * 4096 - 64)) -n 64 -v -e '/1 "0x%02x, "' part
+constexpr uint64_t kAVBFooterOffset = 24 * 4096 - 64;
+const uint8_t kAVBFooter[] = {
+    0x41, 0x56, 0x42, 0x66, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+// avbtool info_image --image part | grep Salt | cut -d':' -f 2 | xxd -r -p |
+//     hexdump -v -e '/1 "0x%02x, "'
+const uint8_t kHashTreeSalt[] = {0x1f, 0xab, 0x7a, 0x6b, 0xf6, 0xb1, 0x3a,
+                                 0x1f, 0xdb, 0x34, 0xa3, 0xfc, 0xc8, 0x73,
+                                 0x0b, 0x23, 0x61, 0xb3, 0x04, 0xe2};
+
+brillo::Blob GetAVBPartition() {
+  brillo::Blob part(kImageSize);
+  memcpy(part.data() + kHashTreeOffset, kHashTree, sizeof(kHashTree));
+  memcpy(part.data() + kFECOffset, kFEC, sizeof(kFEC));
+  memcpy(part.data() + kVBMetaImageOffset, kVBMetaImage, sizeof(kVBMetaImage));
+  memcpy(part.data() + kAVBFooterOffset, kAVBFooter, sizeof(kAVBFooter));
+  return part;
+}
+
+}  // namespace
+
+class PayloadGenerationConfigAndroidTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    image_config_.partitions.emplace_back("system");
+    image_config_.partitions[0].path = temp_file_.path();
+  }
+
+  ImageConfig image_config_;
+  test_utils::ScopedTempFile temp_file_{
+      "PayloadGenerationConfigAndroidTest.XXXXXX"};
+};
+
+TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigSimpleTest) {
+  brillo::Blob part = GetAVBPartition();
+  test_utils::WriteFileVector(temp_file_.path(), part);
+  EXPECT_TRUE(image_config_.LoadImageSize());
+  EXPECT_TRUE(image_config_.partitions[0].OpenFilesystem());
+  EXPECT_TRUE(image_config_.LoadVerityConfig());
+  const VerityConfig& verity = image_config_.partitions[0].verity;
+  EXPECT_FALSE(verity.IsEmpty());
+  EXPECT_EQ(ExtentForRange(0, 2), verity.hash_tree_data_extent);
+  EXPECT_EQ(ExtentForRange(2, 1), verity.hash_tree_extent);
+  EXPECT_EQ("sha1", verity.hash_tree_algorithm);
+  brillo::Blob salt(kHashTreeSalt, std::end(kHashTreeSalt));
+  EXPECT_EQ(salt, verity.hash_tree_salt);
+  EXPECT_EQ(ExtentForRange(0, 3), verity.fec_data_extent);
+  EXPECT_EQ(ExtentForRange(3, 2), verity.fec_extent);
+  EXPECT_EQ(2u, verity.fec_roots);
+}
+
+TEST_F(PayloadGenerationConfigAndroidTest,
+       LoadVerityConfigInvalidHashTreeTest) {
+  brillo::Blob part = GetAVBPartition();
+  part[kHashTreeOffset] ^= 1;  // flip one bit
+  test_utils::WriteFileVector(temp_file_.path(), part);
+  EXPECT_TRUE(image_config_.LoadImageSize());
+  EXPECT_TRUE(image_config_.partitions[0].OpenFilesystem());
+  EXPECT_FALSE(image_config_.LoadVerityConfig());
+}
+
+TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigInvalidFECTest) {
+  brillo::Blob part = GetAVBPartition();
+  part[kFECOffset] ^= 1;  // flip one bit
+  test_utils::WriteFileVector(temp_file_.path(), part);
+  EXPECT_TRUE(image_config_.LoadImageSize());
+  EXPECT_TRUE(image_config_.partitions[0].OpenFilesystem());
+  EXPECT_FALSE(image_config_.LoadVerityConfig());
+}
+
+TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigEmptyImageTest) {
+  brillo::Blob part(kImageSize);
+  test_utils::WriteFileVector(temp_file_.path(), part);
+  EXPECT_TRUE(image_config_.LoadImageSize());
+  EXPECT_TRUE(image_config_.LoadVerityConfig());
+  EXPECT_TRUE(image_config_.partitions[0].verity.IsEmpty());
+}
+
+TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigTinyImageTest) {
+  test_utils::WriteFileString(temp_file_.path(), "tiny");
+  EXPECT_TRUE(image_config_.LoadImageSize());
+  EXPECT_TRUE(image_config_.LoadVerityConfig());
+  EXPECT_TRUE(image_config_.partitions[0].verity.IsEmpty());
+}
+
+}  // namespace chromeos_update_engine
diff --git a/boot_control_recovery_stub.cc b/payload_generator/payload_generation_config_chromeos.cc
similarity index 68%
rename from boot_control_recovery_stub.cc
rename to payload_generator/payload_generation_config_chromeos.cc
index 129c5d0..bb05aff 100644
--- a/boot_control_recovery_stub.cc
+++ b/payload_generator/payload_generation_config_chromeos.cc
@@ -1,5 +1,5 @@
 //
-// Copyright (C) 2016 The Android Open Source Project
+// Copyright (C) 2018 The Android Open Source Project
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -14,8 +14,12 @@
 // limitations under the License.
 //
 
-#include <hardware/hardware.h>
+#include "update_engine/payload_generator/payload_generation_config.h"
 
-hw_module_t HAL_MODULE_INFO_SYM = {
-  .id = "stub",
-};
+namespace chromeos_update_engine {
+
+bool ImageConfig::LoadVerityConfig() {
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/payload_generation_config_unittest.cc b/payload_generator/payload_generation_config_unittest.cc
index 3545056..70a3df3 100644
--- a/payload_generator/payload_generation_config_unittest.cc
+++ b/payload_generator/payload_generation_config_unittest.cc
@@ -16,6 +16,8 @@
 
 #include "update_engine/payload_generator/payload_generation_config.h"
 
+#include <utility>
+
 #include <gtest/gtest.h>
 
 namespace chromeos_update_engine {
@@ -51,4 +53,93 @@
   EXPECT_TRUE(image_config.partitions[0].postinstall.IsEmpty());
 }
 
+TEST_F(PayloadGenerationConfigTest, LoadDynamicPartitionMetadataTest) {
+  ImageConfig image_config;
+  brillo::KeyValueStore store;
+  ASSERT_TRUE(
+      store.LoadFromString("super_partition_groups=group_a group_b\n"
+                           "group_a_size=3221225472\n"
+                           "group_a_partition_list=system product_services\n"
+                           "group_b_size=2147483648\n"
+                           "group_b_partition_list=vendor\n"));
+  EXPECT_TRUE(image_config.LoadDynamicPartitionMetadata(store));
+  ASSERT_NE(nullptr, image_config.dynamic_partition_metadata);
+
+  ASSERT_EQ(2, image_config.dynamic_partition_metadata->groups_size());
+
+  const auto& group_a = image_config.dynamic_partition_metadata->groups(0);
+  EXPECT_EQ("group_a", group_a.name());
+  EXPECT_EQ(3221225472u, group_a.size());
+  ASSERT_EQ(2, group_a.partition_names_size());
+  EXPECT_EQ("system", group_a.partition_names(0));
+  EXPECT_EQ("product_services", group_a.partition_names(1));
+
+  const auto& group_b = image_config.dynamic_partition_metadata->groups(1);
+  EXPECT_EQ("group_b", group_b.name());
+  EXPECT_EQ(2147483648u, group_b.size());
+  ASSERT_EQ(1, group_b.partition_names_size());
+  EXPECT_EQ("vendor", group_b.partition_names(0));
+}
+
+TEST_F(PayloadGenerationConfigTest,
+       LoadDynamicPartitionMetadataMissingSizeTest) {
+  ImageConfig image_config;
+  brillo::KeyValueStore store;
+  ASSERT_TRUE(
+      store.LoadFromString("super_partition_groups=foo\n"
+                           "foo_partition_list=baz\n"));
+  EXPECT_FALSE(image_config.LoadDynamicPartitionMetadata(store));
+  EXPECT_EQ(nullptr, image_config.dynamic_partition_metadata);
+}
+
+TEST_F(PayloadGenerationConfigTest, LoadDynamicPartitionMetadataBadSizeTest) {
+  ImageConfig image_config;
+  brillo::KeyValueStore store;
+  ASSERT_TRUE(
+      store.LoadFromString("super_partition_groups=foo\n"
+                           "foo_size=bar\n"
+                           "foo_partition_list=baz\n"));
+  EXPECT_FALSE(image_config.LoadDynamicPartitionMetadata(store));
+  EXPECT_EQ(nullptr, image_config.dynamic_partition_metadata);
+}
+
+TEST_F(PayloadGenerationConfigTest, ValidateDynamicPartitionMetadata) {
+  ImageConfig image_config;
+
+  PartitionConfig system("system");
+  system.size = 2147483648u;
+  PartitionConfig product_services("product_services");
+  product_services.size = 1073741824u;
+
+  image_config.partitions.push_back(std::move(system));
+  image_config.partitions.push_back(std::move(product_services));
+
+  brillo::KeyValueStore store;
+  ASSERT_TRUE(
+      store.LoadFromString("super_partition_groups=foo\n"
+                           "foo_size=3221225472\n"
+                           "foo_partition_list=system product_services\n"));
+  EXPECT_TRUE(image_config.LoadDynamicPartitionMetadata(store));
+  EXPECT_NE(nullptr, image_config.dynamic_partition_metadata);
+
+  EXPECT_TRUE(image_config.ValidateDynamicPartitionMetadata());
+}
+
+TEST_F(PayloadGenerationConfigTest, ValidateDynamicPartitionMetadataTooBig) {
+  ImageConfig image_config;
+
+  PartitionConfig system("system");
+  system.size = 4294967296u;
+  image_config.partitions.push_back(std::move(system));
+
+  brillo::KeyValueStore store;
+  ASSERT_TRUE(
+      store.LoadFromString("super_partition_groups=foo\n"
+                           "foo_size=3221225472\n"
+                           "foo_partition_list=system\n"));
+  EXPECT_TRUE(image_config.LoadDynamicPartitionMetadata(store));
+  EXPECT_NE(nullptr, image_config.dynamic_partition_metadata);
+
+  EXPECT_FALSE(image_config.ValidateDynamicPartitionMetadata());
+}
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc
index 0b47dd4..cbca7fe 100644
--- a/payload_generator/payload_signer.cc
+++ b/payload_generator/payload_signer.cc
@@ -25,8 +25,6 @@
 #include <base/strings/string_split.h>
 #include <base/strings/string_util.h>
 #include <brillo/data_encoding.h>
-#include <brillo/streams/file_stream.h>
-#include <brillo/streams/stream.h>
 #include <openssl/err.h>
 #include <openssl/pem.h>
 
@@ -35,6 +33,7 @@
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/delta_performer.h"
 #include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_consumer/payload_metadata.h"
 #include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/payload_file.h"
@@ -53,39 +52,36 @@
 const uint32_t kSignatureMessageLegacyVersion = 1;
 
 // Given raw |signatures|, packs them into a protobuf and serializes it into a
-// binary blob. Returns true on success, false otherwise.
-bool ConvertSignatureToProtobufBlob(const vector<brillo::Blob>& signatures,
-                                    brillo::Blob* out_signature_blob) {
+// string. Returns true on success, false otherwise.
+bool ConvertSignaturesToProtobuf(const vector<brillo::Blob>& signatures,
+                                 string* out_serialized_signature) {
   // Pack it into a protobuf
   Signatures out_message;
   for (const brillo::Blob& signature : signatures) {
-    Signatures_Signature* sig_message = out_message.add_signatures();
+    Signatures::Signature* sig_message = out_message.add_signatures();
     // Set all the signatures with the same version number.
     sig_message->set_version(kSignatureMessageLegacyVersion);
     sig_message->set_data(signature.data(), signature.size());
   }
 
   // Serialize protobuf
-  string serialized;
-  TEST_AND_RETURN_FALSE(out_message.AppendToString(&serialized));
-  out_signature_blob->insert(out_signature_blob->end(),
-                             serialized.begin(),
-                             serialized.end());
-  LOG(INFO) << "Signature blob size: " << out_signature_blob->size();
+  TEST_AND_RETURN_FALSE(
+      out_message.SerializeToString(out_serialized_signature));
+  LOG(INFO) << "Signature blob size: " << out_serialized_signature->size();
   return true;
 }
 
-// Given an unsigned payload under |payload_path| and the |signature_blob| and
-// |metadata_signature_blob| generates an updated payload that includes the
+// Given an unsigned payload under |payload_path| and the |payload_signature|
+// and |metadata_signature| generates an updated payload that includes the
 // signatures. It populates |out_metadata_size| with the size of the final
 // manifest after adding the dummy signature operation, and
 // |out_signatures_offset| with the expected offset for the new blob, and
-// |out_metadata_signature_size| which will be size of |metadata_signature_blob|
+// |out_metadata_signature_size| which will be size of |metadata_signature|
 // if the payload major version supports metadata signature, 0 otherwise.
 // Returns true on success, false otherwise.
 bool AddSignatureBlobToPayload(const string& payload_path,
-                               const brillo::Blob& signature_blob,
-                               const brillo::Blob& metadata_signature_blob,
+                               const string& payload_signature,
+                               const string& metadata_signature,
                                brillo::Blob* out_payload,
                                uint64_t* out_metadata_size,
                                uint32_t* out_metadata_signature_size,
@@ -93,47 +89,43 @@
   uint64_t manifest_offset = 20;
   const int kProtobufSizeOffset = 12;
 
-  DeltaArchiveManifest manifest;
-  uint64_t metadata_size, major_version;
-  uint32_t metadata_signature_size;
-  TEST_AND_RETURN_FALSE(
-      PayloadSigner::LoadPayloadMetadata(payload_path,
-                                         nullptr,
-                                         &manifest,
-                                         &major_version,
-                                         &metadata_size,
-                                         &metadata_signature_size));
-
   brillo::Blob payload;
   TEST_AND_RETURN_FALSE(utils::ReadFile(payload_path, &payload));
-
-  if (major_version == kBrilloMajorPayloadVersion) {
+  PayloadMetadata payload_metadata;
+  TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadHeader(payload));
+  uint64_t metadata_size = payload_metadata.GetMetadataSize();
+  uint32_t metadata_signature_size =
+      payload_metadata.GetMetadataSignatureSize();
+  if (payload_metadata.GetMajorVersion() == kBrilloMajorPayloadVersion) {
     // Write metadata signature size in header.
-    uint32_t metadata_signature_size_be =
-        htobe32(metadata_signature_blob.size());
-    memcpy(payload.data() + manifest_offset, &metadata_signature_size_be,
+    uint32_t metadata_signature_size_be = htobe32(metadata_signature.size());
+    memcpy(payload.data() + manifest_offset,
+           &metadata_signature_size_be,
            sizeof(metadata_signature_size_be));
     manifest_offset += sizeof(metadata_signature_size_be);
     // Replace metadata signature.
     payload.erase(payload.begin() + metadata_size,
                   payload.begin() + metadata_size + metadata_signature_size);
     payload.insert(payload.begin() + metadata_size,
-                   metadata_signature_blob.begin(),
-                   metadata_signature_blob.end());
-    metadata_signature_size = metadata_signature_blob.size();
+                   metadata_signature.begin(),
+                   metadata_signature.end());
+    metadata_signature_size = metadata_signature.size();
     LOG(INFO) << "Metadata signature size: " << metadata_signature_size;
   }
 
+  DeltaArchiveManifest manifest;
+  TEST_AND_RETURN_FALSE(payload_metadata.GetManifest(payload, &manifest));
+
   // Is there already a signature op in place?
   if (manifest.has_signatures_size()) {
     // The signature op is tied to the size of the signature blob, but not it's
     // contents. We don't allow the manifest to change if there is already an op
     // present, because that might invalidate previously generated
     // hashes/signatures.
-    if (manifest.signatures_size() != signature_blob.size()) {
+    if (manifest.signatures_size() != payload_signature.size()) {
       LOG(ERROR) << "Attempt to insert different signature sized blob. "
                  << "(current:" << manifest.signatures_size()
-                 << "new:" << signature_blob.size() << ")";
+                 << "new:" << payload_signature.size() << ")";
       return false;
     }
 
@@ -142,8 +134,8 @@
     // Updates the manifest to include the signature operation.
     PayloadSigner::AddSignatureToManifest(
         payload.size() - metadata_size - metadata_signature_size,
-        signature_blob.size(),
-        major_version == kChromeOSMajorPayloadVersion,
+        payload_signature.size(),
+        payload_metadata.GetMajorVersion() == kChromeOSMajorPayloadVersion,
         &manifest);
 
     // Updates the payload to include the new manifest.
@@ -164,13 +156,13 @@
     LOG(INFO) << "Updated payload size: " << payload.size();
     LOG(INFO) << "Updated metadata size: " << metadata_size;
   }
-  uint64_t signatures_offset = metadata_size + metadata_signature_size +
-                               manifest.signatures_offset();
+  uint64_t signatures_offset =
+      metadata_size + metadata_signature_size + manifest.signatures_offset();
   LOG(INFO) << "Signature Blob Offset: " << signatures_offset;
   payload.resize(signatures_offset);
   payload.insert(payload.begin() + signatures_offset,
-                 signature_blob.begin(),
-                 signature_blob.end());
+                 payload_signature.begin(),
+                 payload_signature.end());
 
   *out_payload = std::move(payload);
   *out_metadata_size = metadata_size;
@@ -191,9 +183,8 @@
                               brillo::Blob* out_metadata_hash) {
   if (out_metadata_hash) {
     // Calculates the hash on the manifest.
-    TEST_AND_RETURN_FALSE(
-        HashCalculator::RawHashOfBytes(payload.data(), metadata_size,
-                                       out_metadata_hash));
+    TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfBytes(
+        payload.data(), metadata_size, out_metadata_hash));
   }
   if (out_hash_data) {
     // Calculates the hash on the updated payload. Note that we skip metadata
@@ -231,95 +222,26 @@
     Extent* dummy_extent = dummy_op->add_dst_extents();
     // Tell the dummy op to write this data to a big sparse hole
     dummy_extent->set_start_block(kSparseHole);
-    dummy_extent->set_num_blocks((signature_blob_length + kBlockSize - 1) /
-                                 kBlockSize);
+    dummy_extent->set_num_blocks(
+        utils::DivRoundUp(signature_blob_length, kBlockSize));
   }
 }
 
-bool PayloadSigner::LoadPayloadMetadata(const string& payload_path,
-                                        brillo::Blob* out_payload_metadata,
-                                        DeltaArchiveManifest* out_manifest,
-                                        uint64_t* out_major_version,
-                                        uint64_t* out_metadata_size,
-                                        uint32_t* out_metadata_signature_size) {
-  brillo::StreamPtr payload_file =
-      brillo::FileStream::Open(base::FilePath(payload_path),
-                               brillo::Stream::AccessMode::READ,
-                               brillo::FileStream::Disposition::OPEN_EXISTING,
-                               nullptr);
-  TEST_AND_RETURN_FALSE(payload_file);
-  brillo::Blob payload_metadata;
-
-  payload_metadata.resize(kMaxPayloadHeaderSize);
-  TEST_AND_RETURN_FALSE(payload_file->ReadAllBlocking(
-      payload_metadata.data(), payload_metadata.size(), nullptr));
-
-  const uint8_t* read_pointer = payload_metadata.data();
-  TEST_AND_RETURN_FALSE(
-      memcmp(read_pointer, kDeltaMagic, sizeof(kDeltaMagic)) == 0);
-  read_pointer += sizeof(kDeltaMagic);
-
-  uint64_t major_version;
-  memcpy(&major_version, read_pointer, sizeof(major_version));
-  read_pointer += sizeof(major_version);
-  major_version = be64toh(major_version);
-  TEST_AND_RETURN_FALSE(major_version == kChromeOSMajorPayloadVersion ||
-                        major_version == kBrilloMajorPayloadVersion);
-  if (out_major_version)
-    *out_major_version = major_version;
-
-  uint64_t manifest_size = 0;
-  memcpy(&manifest_size, read_pointer, sizeof(manifest_size));
-  read_pointer += sizeof(manifest_size);
-  manifest_size = be64toh(manifest_size);
-
-  uint32_t metadata_signature_size = 0;
-  if (major_version == kBrilloMajorPayloadVersion) {
-    memcpy(&metadata_signature_size, read_pointer,
-           sizeof(metadata_signature_size));
-    read_pointer += sizeof(metadata_signature_size);
-    metadata_signature_size = be32toh(metadata_signature_size);
-  }
-  if (out_metadata_signature_size)
-    *out_metadata_signature_size = metadata_signature_size;
-
-  uint64_t header_size = read_pointer - payload_metadata.data();
-  uint64_t metadata_size = header_size + manifest_size;
-  if (out_metadata_size)
-    *out_metadata_size = metadata_size;
-
-  size_t bytes_read = payload_metadata.size();
-  payload_metadata.resize(metadata_size);
-  TEST_AND_RETURN_FALSE(
-      payload_file->ReadAllBlocking(payload_metadata.data() + bytes_read,
-                                    payload_metadata.size() - bytes_read,
-                                    nullptr));
-  if (out_manifest) {
-    TEST_AND_RETURN_FALSE(out_manifest->ParseFromArray(
-        payload_metadata.data() + header_size, manifest_size));
-  }
-  if (out_payload_metadata)
-    *out_payload_metadata = std::move(payload_metadata);
-  return true;
-}
-
 bool PayloadSigner::VerifySignedPayload(const string& payload_path,
                                         const string& public_key_path) {
-  DeltaArchiveManifest manifest;
-  uint64_t metadata_size;
-  uint32_t metadata_signature_size;
-  TEST_AND_RETURN_FALSE(LoadPayloadMetadata(payload_path,
-                                            nullptr,
-                                            &manifest,
-                                            nullptr,
-                                            &metadata_size,
-                                            &metadata_signature_size));
   brillo::Blob payload;
   TEST_AND_RETURN_FALSE(utils::ReadFile(payload_path, &payload));
+  PayloadMetadata payload_metadata;
+  TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadHeader(payload));
+  DeltaArchiveManifest manifest;
+  TEST_AND_RETURN_FALSE(payload_metadata.GetManifest(payload, &manifest));
   TEST_AND_RETURN_FALSE(manifest.has_signatures_offset() &&
                         manifest.has_signatures_size());
-  uint64_t signatures_offset = metadata_size + metadata_signature_size +
-                               manifest.signatures_offset();
+  uint64_t metadata_size = payload_metadata.GetMetadataSize();
+  uint32_t metadata_signature_size =
+      payload_metadata.GetMetadataSignatureSize();
+  uint64_t signatures_offset =
+      metadata_size + metadata_signature_size + manifest.signatures_offset();
   CHECK_EQ(payload.size(), signatures_offset + manifest.signatures_size());
   brillo::Blob payload_hash, metadata_hash;
   TEST_AND_RETURN_FALSE(CalculateHashFromPayload(payload,
@@ -328,19 +250,19 @@
                                                  signatures_offset,
                                                  &payload_hash,
                                                  &metadata_hash));
-  brillo::Blob signature_blob(payload.begin() + signatures_offset,
-                              payload.end());
+  string signature(payload.begin() + signatures_offset, payload.end());
+  string public_key;
+  TEST_AND_RETURN_FALSE(utils::ReadFile(public_key_path, &public_key));
   TEST_AND_RETURN_FALSE(PayloadVerifier::PadRSA2048SHA256Hash(&payload_hash));
-  TEST_AND_RETURN_FALSE(PayloadVerifier::VerifySignature(
-      signature_blob, public_key_path, payload_hash));
+  TEST_AND_RETURN_FALSE(
+      PayloadVerifier::VerifySignature(signature, public_key, payload_hash));
   if (metadata_signature_size) {
-    signature_blob.assign(payload.begin() + metadata_size,
-                          payload.begin() + metadata_size +
-                          metadata_signature_size);
+    signature.assign(payload.begin() + metadata_size,
+                     payload.begin() + metadata_size + metadata_signature_size);
     TEST_AND_RETURN_FALSE(
         PayloadVerifier::PadRSA2048SHA256Hash(&metadata_hash));
-    TEST_AND_RETURN_FALSE(PayloadVerifier::VerifySignature(
-        signature_blob, public_key_path, metadata_hash));
+    TEST_AND_RETURN_FALSE(
+        PayloadVerifier::VerifySignature(signature, public_key, metadata_hash));
   }
   return true;
 }
@@ -384,15 +306,15 @@
 
 bool PayloadSigner::SignHashWithKeys(const brillo::Blob& hash_data,
                                      const vector<string>& private_key_paths,
-                                     brillo::Blob* out_signature_blob) {
+                                     string* out_serialized_signature) {
   vector<brillo::Blob> signatures;
   for (const string& path : private_key_paths) {
     brillo::Blob signature;
     TEST_AND_RETURN_FALSE(SignHash(hash_data, path, &signature));
     signatures.push_back(signature);
   }
-  TEST_AND_RETURN_FALSE(ConvertSignatureToProtobufBlob(signatures,
-                                                       out_signature_blob));
+  TEST_AND_RETURN_FALSE(
+      ConvertSignaturesToProtobuf(signatures, out_serialized_signature));
   return true;
 }
 
@@ -401,7 +323,7 @@
                                 const uint64_t metadata_size,
                                 const uint32_t metadata_signature_size,
                                 const uint64_t signatures_offset,
-                                brillo::Blob* out_signature_blob) {
+                                string* out_serialized_signature) {
   brillo::Blob payload;
   TEST_AND_RETURN_FALSE(utils::ReadFile(unsigned_payload_path, &payload));
   brillo::Blob hash_data;
@@ -411,19 +333,17 @@
                                                  signatures_offset,
                                                  &hash_data,
                                                  nullptr));
-  TEST_AND_RETURN_FALSE(SignHashWithKeys(hash_data,
-                                         private_key_paths,
-                                         out_signature_blob));
+  TEST_AND_RETURN_FALSE(
+      SignHashWithKeys(hash_data, private_key_paths, out_serialized_signature));
   return true;
 }
 
 bool PayloadSigner::SignatureBlobLength(const vector<string>& private_key_paths,
                                         uint64_t* out_length) {
   DCHECK(out_length);
-  brillo::Blob x_blob(1, 'x'), hash_blob, sig_blob;
-  TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfBytes(x_blob.data(),
-                                                       x_blob.size(),
-                                                       &hash_blob));
+  brillo::Blob hash_blob;
+  TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfData({'x'}, &hash_blob));
+  string sig_blob;
   TEST_AND_RETURN_FALSE(
       SignHashWithKeys(hash_blob, private_key_paths, &sig_blob));
   *out_length = sig_blob.size();
@@ -440,17 +360,16 @@
   for (int signature_size : signature_sizes) {
     signatures.emplace_back(signature_size, 0);
   }
-  brillo::Blob signature_blob;
-  TEST_AND_RETURN_FALSE(ConvertSignatureToProtobufBlob(signatures,
-                                                       &signature_blob));
+  string signature;
+  TEST_AND_RETURN_FALSE(ConvertSignaturesToProtobuf(signatures, &signature));
 
   brillo::Blob payload;
   uint64_t metadata_size, signatures_offset;
   uint32_t metadata_signature_size;
   // Prepare payload for hashing.
   TEST_AND_RETURN_FALSE(AddSignatureBlobToPayload(payload_path,
-                                                  signature_blob,
-                                                  signature_blob,
+                                                  signature,
+                                                  signature,
                                                   &payload,
                                                   &metadata_size,
                                                   &metadata_signature_size,
@@ -469,33 +388,31 @@
     const vector<brillo::Blob>& payload_signatures,
     const vector<brillo::Blob>& metadata_signatures,
     const string& signed_payload_path,
-    uint64_t *out_metadata_size) {
+    uint64_t* out_metadata_size) {
   // TODO(petkov): Reduce memory usage -- the payload is manipulated in memory.
 
   // Loads the payload and adds the signature op to it.
-  brillo::Blob signature_blob, metadata_signature_blob;
-  TEST_AND_RETURN_FALSE(ConvertSignatureToProtobufBlob(payload_signatures,
-                                                       &signature_blob));
+  string payload_signature, metadata_signature;
+  TEST_AND_RETURN_FALSE(
+      ConvertSignaturesToProtobuf(payload_signatures, &payload_signature));
   if (!metadata_signatures.empty()) {
     TEST_AND_RETURN_FALSE(
-        ConvertSignatureToProtobufBlob(metadata_signatures,
-                                       &metadata_signature_blob));
+        ConvertSignaturesToProtobuf(metadata_signatures, &metadata_signature));
   }
   brillo::Blob payload;
   uint64_t signatures_offset;
   uint32_t metadata_signature_size;
   TEST_AND_RETURN_FALSE(AddSignatureBlobToPayload(payload_path,
-                                                  signature_blob,
-                                                  metadata_signature_blob,
+                                                  payload_signature,
+                                                  metadata_signature,
                                                   &payload,
                                                   out_metadata_size,
                                                   &metadata_signature_size,
                                                   &signatures_offset));
 
   LOG(INFO) << "Signed payload size: " << payload.size();
-  TEST_AND_RETURN_FALSE(utils::WriteFile(signed_payload_path.c_str(),
-                                         payload.data(),
-                                         payload.size()));
+  TEST_AND_RETURN_FALSE(utils::WriteFile(
+      signed_payload_path.c_str(), payload.data(), payload.size()));
   return true;
 }
 
@@ -506,14 +423,11 @@
   // Calculates the hash on the updated payload. Note that the payload includes
   // the signature op but doesn't include the signature blob at the end.
   brillo::Blob metadata_hash;
-  TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfBytes(metadata,
-                                                       metadata_size,
-                                                       &metadata_hash));
+  TEST_AND_RETURN_FALSE(
+      HashCalculator::RawHashOfBytes(metadata, metadata_size, &metadata_hash));
 
   brillo::Blob signature;
-  TEST_AND_RETURN_FALSE(SignHash(metadata_hash,
-                                 private_key_path,
-                                 &signature));
+  TEST_AND_RETURN_FALSE(SignHash(metadata_hash, private_key_path, &signature));
 
   *out_signature = brillo::data_encoding::Base64Encode(signature);
   return true;
@@ -521,20 +435,15 @@
 
 bool PayloadSigner::ExtractPayloadProperties(
     const string& payload_path, brillo::KeyValueStore* properties) {
-  DeltaArchiveManifest manifest;
-  brillo::Blob payload_metadata;
-  uint64_t major_version, metadata_size;
-  uint32_t metadata_signature_size;
-  uint64_t file_size = utils::FileSize(payload_path);
-
+  brillo::Blob payload;
   TEST_AND_RETURN_FALSE(
-      PayloadSigner::LoadPayloadMetadata(payload_path,
-                                         &payload_metadata,
-                                         &manifest,
-                                         &major_version,
-                                         &metadata_size,
-                                         &metadata_signature_size));
+      utils::ReadFileChunk(payload_path, 0, kMaxPayloadHeaderSize, &payload));
 
+  PayloadMetadata payload_metadata;
+  TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadHeader(payload));
+  uint64_t metadata_size = payload_metadata.GetMetadataSize();
+
+  uint64_t file_size = utils::FileSize(payload_path);
   properties->SetString(kPayloadPropertyFileSize, std::to_string(file_size));
   properties->SetString(kPayloadPropertyMetadataSize,
                         std::to_string(metadata_size));
@@ -543,8 +452,10 @@
   TEST_AND_RETURN_FALSE(
       HashCalculator::RawHashOfFile(payload_path, file_size, &file_hash) ==
       static_cast<off_t>(file_size));
-  TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfBytes(
-      payload_metadata.data(), payload_metadata.size(), &metadata_hash));
+
+  TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile(
+                            payload_path, metadata_size, &metadata_hash) ==
+                        static_cast<off_t>(metadata_size));
 
   properties->SetString(kPayloadPropertyFileHash,
                         brillo::data_encoding::Base64Encode(file_hash));
diff --git a/payload_generator/payload_signer.h b/payload_generator/payload_signer.h
index 00e32fa..7854e12 100644
--- a/payload_generator/payload_signer.h
+++ b/payload_generator/payload_signer.h
@@ -33,20 +33,6 @@
 
 class PayloadSigner {
  public:
-  // Reads the payload metadata from the given |payload_path| into the
-  // |out_payload_metadata| vector if not null. It also parses the manifest
-  // protobuf in the payload and returns it in |out_manifest| if not null, along
-  // with the major version of the payload in |out_major_version| if not null,
-  // the size of the entire metadata in |out_metadata_size| and the size of
-  // metadata signature in |out_metadata_signature_size| if not null. Returns
-  // whether a valid payload metadata was found and parsed.
-  static bool LoadPayloadMetadata(const std::string& payload_path,
-                                  brillo::Blob* out_payload_metadata,
-                                  DeltaArchiveManifest* out_manifest,
-                                  uint64_t* out_major_version,
-                                  uint64_t* out_metadata_size,
-                                  uint32_t* out_metadata_signature_size);
-
   // Returns true if the payload in |payload_path| is signed and its hash can be
   // verified using the public key in |public_key_path| with the signature
   // of a given version in the signature blob. Returns false otherwise.
@@ -68,17 +54,17 @@
                        brillo::Blob* out_signature);
 
   // Sign |hash_data| blob with all private keys in |private_key_paths|, then
-  // convert the signatures to protobuf blob.
+  // convert the signatures to serialized protobuf.
   static bool SignHashWithKeys(
       const brillo::Blob& hash_data,
       const std::vector<std::string>& private_key_paths,
-      brillo::Blob* out_signature_blob);
+      std::string* out_serialized_signature);
 
   // Given an unsigned payload in |unsigned_payload_path|, private keys in
   // |private_key_path|, metadata size in |metadata_size|, metadata signature
   // size in |metadata_signature_size| and signatures offset in
   // |signatures_offset|, calculates the payload signature blob into
-  // |out_signature_blob|. Note that the payload must already have an
+  // |out_serialized_signature|. Note that the payload must already have an
   // updated manifest that includes the dummy signature op and correct metadata
   // signature size in header. Returns true on success, false otherwise.
   static bool SignPayload(const std::string& unsigned_payload_path,
@@ -86,13 +72,12 @@
                           const uint64_t metadata_size,
                           const uint32_t metadata_signature_size,
                           const uint64_t signatures_offset,
-                          brillo::Blob* out_signature_blob);
+                          std::string* out_serialized_signature);
 
-  // Returns the length of out_signature_blob that will result in a call
+  // Returns the length of out_serialized_signature that will result in a call
   // to SignPayload with the given private keys. Returns true on success.
   static bool SignatureBlobLength(
-      const std::vector<std::string>& private_key_paths,
-      uint64_t* out_length);
+      const std::vector<std::string>& private_key_paths, uint64_t* out_length);
 
   // Given an unsigned payload in |payload_path|,
   // this method does two things:
diff --git a/payload_generator/payload_signer_unittest.cc b/payload_generator/payload_signer_unittest.cc
index 62b6e7a..0b863b1 100644
--- a/payload_generator/payload_signer_unittest.cc
+++ b/payload_generator/payload_signer_unittest.cc
@@ -51,12 +51,10 @@
 // Generated by:
 // echo -n 'This is some data to sign.' | openssl dgst -sha256 -binary |
 //   hexdump -v -e '" " 8/1 "0x%02x, " "\n"'
-const uint8_t kDataHash[] = {
-  0x7a, 0x07, 0xa6, 0x44, 0x08, 0x86, 0x20, 0xa6,
-  0xc1, 0xf8, 0xd9, 0x02, 0x05, 0x63, 0x0d, 0xb7,
-  0xfc, 0x2b, 0xa0, 0xa9, 0x7c, 0x9d, 0x1d, 0x8c,
-  0x01, 0xf5, 0x78, 0x6d, 0xc5, 0x11, 0xb4, 0x06
-};
+const uint8_t kDataHash[] = {0x7a, 0x07, 0xa6, 0x44, 0x08, 0x86, 0x20, 0xa6,
+                             0xc1, 0xf8, 0xd9, 0x02, 0x05, 0x63, 0x0d, 0xb7,
+                             0xfc, 0x2b, 0xa0, 0xa9, 0x7c, 0x9d, 0x1d, 0x8c,
+                             0x01, 0xf5, 0x78, 0x6d, 0xc5, 0x11, 0xb4, 0x06};
 
 // Generated with openssl 1.0, which at the time of this writing, you need
 // to download and install yourself. Here's my command:
@@ -64,57 +62,40 @@
 //    ~/local/bin/openssl pkeyutl -sign -inkey unittest_key.pem -pkeyopt
 //    digest:sha256 | hexdump -v -e '" " 8/1 "0x%02x, " "\n"'
 const uint8_t kDataSignature[] = {
-  0x9f, 0x86, 0x25, 0x8b, 0xf3, 0xcc, 0xe3, 0x95,
-  0x5f, 0x45, 0x83, 0xb2, 0x66, 0xf0, 0x2a, 0xcf,
-  0xb7, 0xaa, 0x52, 0x25, 0x7a, 0xdd, 0x9d, 0x65,
-  0xe5, 0xd6, 0x02, 0x4b, 0x37, 0x99, 0x53, 0x06,
-  0xc2, 0xc9, 0x37, 0x36, 0x25, 0x62, 0x09, 0x4f,
-  0x6b, 0x22, 0xf8, 0xb3, 0x89, 0x14, 0x98, 0x1a,
-  0xbc, 0x30, 0x90, 0x4a, 0x43, 0xf5, 0xea, 0x2e,
-  0xf0, 0xa4, 0xba, 0xc3, 0xa7, 0xa3, 0x44, 0x70,
-  0xd6, 0xc4, 0x89, 0xd8, 0x45, 0x71, 0xbb, 0xee,
-  0x59, 0x87, 0x3d, 0xd5, 0xe5, 0x40, 0x22, 0x3d,
-  0x73, 0x7e, 0x2a, 0x58, 0x93, 0x8e, 0xcb, 0x9c,
-  0xf2, 0xbb, 0x4a, 0xc9, 0xd2, 0x2c, 0x52, 0x42,
-  0xb0, 0xd1, 0x13, 0x22, 0xa4, 0x78, 0xc7, 0xc6,
-  0x3e, 0xf1, 0xdc, 0x4c, 0x7b, 0x2d, 0x40, 0xda,
-  0x58, 0xac, 0x4a, 0x11, 0x96, 0x3d, 0xa0, 0x01,
-  0xf6, 0x96, 0x74, 0xf6, 0x6c, 0x0c, 0x49, 0x69,
-  0x4e, 0xc1, 0x7e, 0x9f, 0x2a, 0x42, 0xdd, 0x15,
-  0x6b, 0x37, 0x2e, 0x3a, 0xa7, 0xa7, 0x6d, 0x91,
-  0x13, 0xe8, 0x59, 0xde, 0xfe, 0x99, 0x07, 0xd9,
-  0x34, 0x0f, 0x17, 0xb3, 0x05, 0x4c, 0xd2, 0xc6,
-  0x82, 0xb7, 0x38, 0x36, 0x63, 0x1d, 0x9e, 0x21,
-  0xa6, 0x32, 0xef, 0xf1, 0x65, 0xe6, 0xed, 0x95,
-  0x25, 0x9b, 0x61, 0xe0, 0xba, 0x86, 0xa1, 0x7f,
-  0xf8, 0xa5, 0x4a, 0x32, 0x1f, 0x15, 0x20, 0x8a,
-  0x41, 0xc5, 0xb0, 0xd9, 0x4a, 0xda, 0x85, 0xf3,
-  0xdc, 0xa0, 0x98, 0x5d, 0x1d, 0x18, 0x9d, 0x2e,
-  0x42, 0xea, 0x69, 0x13, 0x74, 0x3c, 0x74, 0xf7,
-  0x6d, 0x43, 0xb0, 0x63, 0x90, 0xdb, 0x04, 0xd5,
-  0x05, 0xc9, 0x73, 0x1f, 0x6c, 0xd6, 0xfa, 0x46,
-  0x4e, 0x0f, 0x33, 0x58, 0x5b, 0x0d, 0x1b, 0x55,
-  0x39, 0xb9, 0x0f, 0x43, 0x37, 0xc0, 0x06, 0x0c,
-  0x29, 0x93, 0x43, 0xc7, 0x43, 0xb9, 0xab, 0x7d
-};
+    0x9f, 0x86, 0x25, 0x8b, 0xf3, 0xcc, 0xe3, 0x95, 0x5f, 0x45, 0x83, 0xb2,
+    0x66, 0xf0, 0x2a, 0xcf, 0xb7, 0xaa, 0x52, 0x25, 0x7a, 0xdd, 0x9d, 0x65,
+    0xe5, 0xd6, 0x02, 0x4b, 0x37, 0x99, 0x53, 0x06, 0xc2, 0xc9, 0x37, 0x36,
+    0x25, 0x62, 0x09, 0x4f, 0x6b, 0x22, 0xf8, 0xb3, 0x89, 0x14, 0x98, 0x1a,
+    0xbc, 0x30, 0x90, 0x4a, 0x43, 0xf5, 0xea, 0x2e, 0xf0, 0xa4, 0xba, 0xc3,
+    0xa7, 0xa3, 0x44, 0x70, 0xd6, 0xc4, 0x89, 0xd8, 0x45, 0x71, 0xbb, 0xee,
+    0x59, 0x87, 0x3d, 0xd5, 0xe5, 0x40, 0x22, 0x3d, 0x73, 0x7e, 0x2a, 0x58,
+    0x93, 0x8e, 0xcb, 0x9c, 0xf2, 0xbb, 0x4a, 0xc9, 0xd2, 0x2c, 0x52, 0x42,
+    0xb0, 0xd1, 0x13, 0x22, 0xa4, 0x78, 0xc7, 0xc6, 0x3e, 0xf1, 0xdc, 0x4c,
+    0x7b, 0x2d, 0x40, 0xda, 0x58, 0xac, 0x4a, 0x11, 0x96, 0x3d, 0xa0, 0x01,
+    0xf6, 0x96, 0x74, 0xf6, 0x6c, 0x0c, 0x49, 0x69, 0x4e, 0xc1, 0x7e, 0x9f,
+    0x2a, 0x42, 0xdd, 0x15, 0x6b, 0x37, 0x2e, 0x3a, 0xa7, 0xa7, 0x6d, 0x91,
+    0x13, 0xe8, 0x59, 0xde, 0xfe, 0x99, 0x07, 0xd9, 0x34, 0x0f, 0x17, 0xb3,
+    0x05, 0x4c, 0xd2, 0xc6, 0x82, 0xb7, 0x38, 0x36, 0x63, 0x1d, 0x9e, 0x21,
+    0xa6, 0x32, 0xef, 0xf1, 0x65, 0xe6, 0xed, 0x95, 0x25, 0x9b, 0x61, 0xe0,
+    0xba, 0x86, 0xa1, 0x7f, 0xf8, 0xa5, 0x4a, 0x32, 0x1f, 0x15, 0x20, 0x8a,
+    0x41, 0xc5, 0xb0, 0xd9, 0x4a, 0xda, 0x85, 0xf3, 0xdc, 0xa0, 0x98, 0x5d,
+    0x1d, 0x18, 0x9d, 0x2e, 0x42, 0xea, 0x69, 0x13, 0x74, 0x3c, 0x74, 0xf7,
+    0x6d, 0x43, 0xb0, 0x63, 0x90, 0xdb, 0x04, 0xd5, 0x05, 0xc9, 0x73, 0x1f,
+    0x6c, 0xd6, 0xfa, 0x46, 0x4e, 0x0f, 0x33, 0x58, 0x5b, 0x0d, 0x1b, 0x55,
+    0x39, 0xb9, 0x0f, 0x43, 0x37, 0xc0, 0x06, 0x0c, 0x29, 0x93, 0x43, 0xc7,
+    0x43, 0xb9, 0xab, 0x7d};
 
 namespace {
-void SignSampleData(brillo::Blob* out_signature_blob,
-                    const vector<string>& private_keys) {
-  brillo::Blob data_blob(std::begin(kDataToSign),
-                         std::begin(kDataToSign) + strlen(kDataToSign));
+void SignSampleData(string* out_signature, const vector<string>& private_keys) {
   uint64_t length = 0;
   EXPECT_TRUE(PayloadSigner::SignatureBlobLength(private_keys, &length));
   EXPECT_GT(length, 0U);
   brillo::Blob hash_blob;
-  EXPECT_TRUE(HashCalculator::RawHashOfBytes(data_blob.data(),
-                                             data_blob.size(),
-                                             &hash_blob));
-  EXPECT_TRUE(PayloadSigner::SignHashWithKeys(
-      hash_blob,
-      private_keys,
-      out_signature_blob));
-  EXPECT_EQ(length, out_signature_blob->size());
+  EXPECT_TRUE(HashCalculator::RawHashOfBytes(
+      kDataToSign, strlen(kDataToSign), &hash_blob));
+  EXPECT_TRUE(
+      PayloadSigner::SignHashWithKeys(hash_blob, private_keys, out_signature));
+  EXPECT_EQ(length, out_signature->size());
 }
 }  // namespace
 
@@ -124,57 +105,20 @@
     PayloadVerifier::PadRSA2048SHA256Hash(&padded_hash_data_);
   }
 
-  void DoWriteAndLoadPayloadTest(const PayloadGenerationConfig& config) {
-    PayloadFile payload;
-    payload.Init(config);
-    string payload_path;
-    EXPECT_TRUE(utils::MakeTempFile("payload.XXXXXX", &payload_path, nullptr));
-    ScopedPathUnlinker payload_path_unlinker(payload_path);
-    uint64_t metadata_size;
-    EXPECT_TRUE(
-        payload.WritePayload(payload_path, "/dev/null", "", &metadata_size));
-    brillo::Blob payload_metadata_blob;
-    DeltaArchiveManifest manifest;
-    uint64_t load_metadata_size, load_major_version;
-    EXPECT_TRUE(PayloadSigner::LoadPayloadMetadata(payload_path,
-                                                   &payload_metadata_blob,
-                                                   &manifest,
-                                                   &load_major_version,
-                                                   &load_metadata_size,
-                                                   nullptr));
-    EXPECT_EQ(metadata_size, payload_metadata_blob.size());
-    EXPECT_EQ(config.version.major, load_major_version);
-    EXPECT_EQ(metadata_size, load_metadata_size);
-  }
-
   brillo::Blob padded_hash_data_{std::begin(kDataHash), std::end(kDataHash)};
 };
 
-TEST_F(PayloadSignerTest, LoadPayloadV1Test) {
-  PayloadGenerationConfig config;
-  config.version.major = kChromeOSMajorPayloadVersion;
-  DoWriteAndLoadPayloadTest(config);
-}
-
-TEST_F(PayloadSignerTest, LoadPayloadV2Test) {
-  PayloadGenerationConfig config;
-  config.version.major = kBrilloMajorPayloadVersion;
-  DoWriteAndLoadPayloadTest(config);
-}
-
 TEST_F(PayloadSignerTest, SignSimpleTextTest) {
-  brillo::Blob signature_blob;
-  SignSampleData(&signature_blob,
-                 {GetBuildArtifactsPath(kUnittestPrivateKeyPath)});
+  string signature;
+  SignSampleData(&signature, {GetBuildArtifactsPath(kUnittestPrivateKeyPath)});
 
   // Check the signature itself
   Signatures signatures;
-  EXPECT_TRUE(signatures.ParseFromArray(signature_blob.data(),
-                                        signature_blob.size()));
+  EXPECT_TRUE(signatures.ParseFromString(signature));
   EXPECT_EQ(1, signatures.signatures_size());
-  const Signatures_Signature& signature = signatures.signatures(0);
-  EXPECT_EQ(1U, signature.version());
-  const string& sig_data = signature.data();
+  const Signatures::Signature& sig = signatures.signatures(0);
+  EXPECT_EQ(1U, sig.version());
+  const string& sig_data = sig.data();
   ASSERT_EQ(arraysize(kDataSignature), sig_data.size());
   for (size_t i = 0; i < arraysize(kDataSignature); i++) {
     EXPECT_EQ(kDataSignature[i], static_cast<uint8_t>(sig_data[i]));
@@ -182,83 +126,80 @@
 }
 
 TEST_F(PayloadSignerTest, VerifyAllSignatureTest) {
-  brillo::Blob signature_blob;
-  SignSampleData(&signature_blob,
+  string signature;
+  SignSampleData(&signature,
                  {GetBuildArtifactsPath(kUnittestPrivateKeyPath),
                   GetBuildArtifactsPath(kUnittestPrivateKey2Path)});
 
   // Either public key should pass the verification.
+  string public_key;
+  EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(kUnittestPublicKeyPath),
+                              &public_key));
   EXPECT_TRUE(PayloadVerifier::VerifySignature(
-      signature_blob,
-      GetBuildArtifactsPath(kUnittestPublicKeyPath),
-      padded_hash_data_));
+      signature, public_key, padded_hash_data_));
+  EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(kUnittestPublicKey2Path),
+                              &public_key));
   EXPECT_TRUE(PayloadVerifier::VerifySignature(
-      signature_blob,
-      GetBuildArtifactsPath(kUnittestPublicKey2Path),
-      padded_hash_data_));
+      signature, public_key, padded_hash_data_));
 }
 
 TEST_F(PayloadSignerTest, VerifySignatureTest) {
-  brillo::Blob signature_blob;
-  SignSampleData(&signature_blob,
-                 {GetBuildArtifactsPath(kUnittestPrivateKeyPath)});
+  string signature;
+  SignSampleData(&signature, {GetBuildArtifactsPath(kUnittestPrivateKeyPath)});
 
+  string public_key;
+  EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(kUnittestPublicKeyPath),
+                              &public_key));
   EXPECT_TRUE(PayloadVerifier::VerifySignature(
-      signature_blob,
-      GetBuildArtifactsPath(kUnittestPublicKeyPath),
-      padded_hash_data_));
+      signature, public_key, padded_hash_data_));
   // Passing the invalid key should fail the verification.
-  EXPECT_FALSE(PayloadVerifier::VerifySignature(
-      signature_blob,
-      GetBuildArtifactsPath(kUnittestPublicKey2Path),
-      padded_hash_data_));
+  EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(kUnittestPublicKey2Path),
+                              &public_key));
+  EXPECT_TRUE(PayloadVerifier::VerifySignature(
+      signature, public_key, padded_hash_data_));
 }
 
 TEST_F(PayloadSignerTest, SkipMetadataSignatureTest) {
-  string payload_path;
-  EXPECT_TRUE(utils::MakeTempFile("payload.XXXXXX", &payload_path, nullptr));
-  ScopedPathUnlinker payload_path_unlinker(payload_path);
-
+  test_utils::ScopedTempFile payload_file("payload.XXXXXX");
   PayloadGenerationConfig config;
   config.version.major = kBrilloMajorPayloadVersion;
   PayloadFile payload;
   EXPECT_TRUE(payload.Init(config));
   uint64_t metadata_size;
-  EXPECT_TRUE(
-      payload.WritePayload(payload_path, "/dev/null", "", &metadata_size));
+  EXPECT_TRUE(payload.WritePayload(
+      payload_file.path(), "/dev/null", "", &metadata_size));
   const vector<int> sizes = {256};
   brillo::Blob unsigned_payload_hash, unsigned_metadata_hash;
-  EXPECT_TRUE(PayloadSigner::HashPayloadForSigning(
-      payload_path, sizes, &unsigned_payload_hash, &unsigned_metadata_hash));
+  EXPECT_TRUE(PayloadSigner::HashPayloadForSigning(payload_file.path(),
+                                                   sizes,
+                                                   &unsigned_payload_hash,
+                                                   &unsigned_metadata_hash));
   EXPECT_TRUE(
-      payload.WritePayload(payload_path,
+      payload.WritePayload(payload_file.path(),
                            "/dev/null",
                            GetBuildArtifactsPath(kUnittestPrivateKeyPath),
                            &metadata_size));
   brillo::Blob signed_payload_hash, signed_metadata_hash;
   EXPECT_TRUE(PayloadSigner::HashPayloadForSigning(
-      payload_path, sizes, &signed_payload_hash, &signed_metadata_hash));
+      payload_file.path(), sizes, &signed_payload_hash, &signed_metadata_hash));
   EXPECT_EQ(unsigned_payload_hash, signed_payload_hash);
   EXPECT_EQ(unsigned_metadata_hash, signed_metadata_hash);
 }
 
 TEST_F(PayloadSignerTest, VerifySignedPayloadTest) {
-  string payload_path;
-  EXPECT_TRUE(utils::MakeTempFile("payload.XXXXXX", &payload_path, nullptr));
-  ScopedPathUnlinker payload_path_unlinker(payload_path);
-
+  test_utils::ScopedTempFile payload_file("payload.XXXXXX");
   PayloadGenerationConfig config;
   config.version.major = kBrilloMajorPayloadVersion;
   PayloadFile payload;
   EXPECT_TRUE(payload.Init(config));
   uint64_t metadata_size;
   EXPECT_TRUE(
-      payload.WritePayload(payload_path,
+      payload.WritePayload(payload_file.path(),
                            "/dev/null",
                            GetBuildArtifactsPath(kUnittestPrivateKeyPath),
                            &metadata_size));
   EXPECT_TRUE(PayloadSigner::VerifySignedPayload(
-      payload_path, GetBuildArtifactsPath(kUnittestPublicKeyPath)));
+      payload_file.path(), GetBuildArtifactsPath(kUnittestPublicKeyPath)));
 }
 
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/raw_filesystem.cc b/payload_generator/raw_filesystem.cc
index 2fb1400..f1f0890 100644
--- a/payload_generator/raw_filesystem.cc
+++ b/payload_generator/raw_filesystem.cc
@@ -16,6 +16,8 @@
 
 #include "update_engine/payload_generator/raw_filesystem.h"
 
+#include <memory>
+
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/extent_ranges.h"
 #include "update_engine/update_metadata.pb.h"
@@ -24,8 +26,9 @@
 
 namespace chromeos_update_engine {
 
-unique_ptr<RawFilesystem> RawFilesystem::Create(
-      const std::string& filename, uint64_t block_size, uint64_t block_count) {
+unique_ptr<RawFilesystem> RawFilesystem::Create(const std::string& filename,
+                                                uint64_t block_size,
+                                                uint64_t block_count) {
   unique_ptr<RawFilesystem> result(new RawFilesystem());
   result->filename_ = filename;
   result->block_size_ = block_size;
@@ -45,7 +48,7 @@
   files->clear();
   File file;
   file.name = filename_;
-  file.extents = { ExtentForRange(0, block_count_) };
+  file.extents = {ExtentForRange(0, block_count_)};
   files->push_back(file);
   return true;
 }
diff --git a/payload_generator/raw_filesystem.h b/payload_generator/raw_filesystem.h
index 0aecd81..0ecbc9c 100644
--- a/payload_generator/raw_filesystem.h
+++ b/payload_generator/raw_filesystem.h
@@ -22,6 +22,7 @@
 
 #include "update_engine/payload_generator/filesystem_interface.h"
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -29,8 +30,9 @@
 
 class RawFilesystem : public FilesystemInterface {
  public:
-  static std::unique_ptr<RawFilesystem> Create(
-      const std::string& filename, uint64_t block_size, uint64_t block_count);
+  static std::unique_ptr<RawFilesystem> Create(const std::string& filename,
+                                               uint64_t block_size,
+                                               uint64_t block_count);
   virtual ~RawFilesystem() = default;
 
   // FilesystemInterface overrides.
diff --git a/payload_generator/squashfs_filesystem.cc b/payload_generator/squashfs_filesystem.cc
index c98ad12..6c892f5 100644
--- a/payload_generator/squashfs_filesystem.cc
+++ b/payload_generator/squashfs_filesystem.cc
@@ -44,14 +44,6 @@
 
 namespace {
 
-Extent ExtentForBytes(uint64_t block_size,
-                      uint64_t start_bytes,
-                      uint64_t size_bytes) {
-  uint64_t start_block = start_bytes / block_size;
-  uint64_t end_block = (start_bytes + size_bytes + block_size - 1) / block_size;
-  return ExtentForRange(start_block, end_block - start_block);
-}
-
 // The size of the squashfs super block.
 constexpr size_t kSquashfsSuperBlockSize = 96;
 constexpr uint64_t kSquashfsCompressedBit = 1 << 24;
@@ -192,8 +184,7 @@
   for (const auto& file : files_) {
     file_extents.AddExtents(file.extents);
   }
-  vector<Extent> full = {
-      ExtentForRange(0, (size_ + kBlockSize - 1) / kBlockSize)};
+  vector<Extent> full = {ExtentForBytes(kBlockSize, 0, size_)};
   auto metadata_extents = FilterExtentRanges(full, file_extents);
   // For now there should be at most two extents. One for superblock and one for
   // metadata at the end. Just create appropriate files with <metadata-i> name.
diff --git a/payload_generator/tarjan.cc b/payload_generator/tarjan.cc
index 98e29f9..2d4ca31 100644
--- a/payload_generator/tarjan.cc
+++ b/payload_generator/tarjan.cc
@@ -19,8 +19,7 @@
 #include <vector>
 
 #include <base/logging.h>
-
-#include "update_engine/common/utils.h"
+#include <base/stl_util.h>
 
 using std::min;
 using std::vector;
@@ -53,15 +52,16 @@
   index_++;
   stack_.push_back(vertex);
   for (Vertex::EdgeMap::iterator it = (*graph)[vertex].out_edges.begin();
-       it != (*graph)[vertex].out_edges.end(); ++it) {
+       it != (*graph)[vertex].out_edges.end();
+       ++it) {
     Vertex::Index vertex_next = it->first;
     if ((*graph)[vertex_next].index == kInvalidIndex) {
       Tarjan(vertex_next, graph);
-      (*graph)[vertex].lowlink = min((*graph)[vertex].lowlink,
-                                     (*graph)[vertex_next].lowlink);
-    } else if (utils::VectorContainsValue(stack_, vertex_next)) {
-      (*graph)[vertex].lowlink = min((*graph)[vertex].lowlink,
-                                     (*graph)[vertex_next].index);
+      (*graph)[vertex].lowlink =
+          min((*graph)[vertex].lowlink, (*graph)[vertex_next].lowlink);
+    } else if (base::ContainsValue(stack_, vertex_next)) {
+      (*graph)[vertex].lowlink =
+          min((*graph)[vertex].lowlink, (*graph)[vertex_next].index);
     }
   }
   if ((*graph)[vertex].lowlink == (*graph)[vertex].index) {
@@ -73,7 +73,7 @@
       component.push_back(other_vertex);
     } while (other_vertex != vertex && !stack_.empty());
 
-    if (utils::VectorContainsValue(component, required_vertex_)) {
+    if (base::ContainsValue(component, required_vertex_)) {
       components_.resize(components_.size() + 1);
       component.swap(components_.back());
     }
diff --git a/payload_generator/tarjan.h b/payload_generator/tarjan.h
index 50cf563..39ac4e4 100644
--- a/payload_generator/tarjan.h
+++ b/payload_generator/tarjan.h
@@ -38,6 +38,7 @@
   void Execute(Vertex::Index vertex,
                Graph* graph,
                std::vector<Vertex::Index>* out);
+
  private:
   void Tarjan(Vertex::Index vertex, Graph* graph);
 
diff --git a/payload_generator/tarjan_unittest.cc b/payload_generator/tarjan_unittest.cc
index c29cbdc..b271227 100644
--- a/payload_generator/tarjan_unittest.cc
+++ b/payload_generator/tarjan_unittest.cc
@@ -20,9 +20,9 @@
 #include <utility>
 
 #include <base/logging.h>
+#include <base/stl_util.h>
 #include <gtest/gtest.h>
 
-#include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/graph_types.h"
 
 using std::make_pair;
@@ -66,11 +66,11 @@
     tarjan.Execute(i, &graph, &vertex_indexes);
 
     EXPECT_EQ(5U, vertex_indexes.size());
-    EXPECT_TRUE(utils::VectorContainsValue(vertex_indexes, n_a));
-    EXPECT_TRUE(utils::VectorContainsValue(vertex_indexes, n_b));
-    EXPECT_TRUE(utils::VectorContainsValue(vertex_indexes, n_c));
-    EXPECT_TRUE(utils::VectorContainsValue(vertex_indexes, n_d));
-    EXPECT_TRUE(utils::VectorContainsValue(vertex_indexes, n_e));
+    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_a));
+    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_b));
+    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_c));
+    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_d));
+    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_e));
   }
 
   {
@@ -78,7 +78,7 @@
     tarjan.Execute(n_f, &graph, &vertex_indexes);
 
     EXPECT_EQ(1U, vertex_indexes.size());
-    EXPECT_TRUE(utils::VectorContainsValue(vertex_indexes, n_f));
+    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_f));
   }
 
   for (Vertex::Index i = n_g; i <= n_h; i++) {
@@ -86,8 +86,8 @@
     tarjan.Execute(i, &graph, &vertex_indexes);
 
     EXPECT_EQ(2U, vertex_indexes.size());
-    EXPECT_TRUE(utils::VectorContainsValue(vertex_indexes, n_g));
-    EXPECT_TRUE(utils::VectorContainsValue(vertex_indexes, n_h));
+    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_g));
+    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_h));
   }
 }
 
diff --git a/payload_generator/topological_sort.cc b/payload_generator/topological_sort.cc
index f164336..0abd708 100644
--- a/payload_generator/topological_sort.cc
+++ b/payload_generator/topological_sort.cc
@@ -37,7 +37,8 @@
   visited_nodes->insert(node);
   // Visit all children.
   for (Vertex::EdgeMap::const_iterator it = graph[node].out_edges.begin();
-       it != graph[node].out_edges.end(); ++it) {
+       it != graph[node].out_edges.end();
+       ++it) {
     TopologicalSortVisit(graph, visited_nodes, nodes, it->first);
   }
   // Visit this node.
diff --git a/payload_generator/topological_sort_unittest.cc b/payload_generator/topological_sort_unittest.cc
index 1d866a7..aa296d8 100644
--- a/payload_generator/topological_sort_unittest.cc
+++ b/payload_generator/topological_sort_unittest.cc
@@ -33,7 +33,7 @@
 namespace {
 // Returns true if the value is found in vect. If found, the index is stored
 // in out_index if out_index is not null.
-template<typename T>
+template <typename T>
 bool IndexOf(const vector<T>& vect,
              const T& value,
              typename vector<T>::size_type* out_index) {
@@ -84,7 +84,8 @@
     vector<Vertex::Index>::size_type src_index = 0;
     EXPECT_TRUE(IndexOf(sorted, i, &src_index));
     for (Vertex::EdgeMap::const_iterator it = graph[i].out_edges.begin();
-         it != graph[i].out_edges.end(); ++it) {
+         it != graph[i].out_edges.end();
+         ++it) {
       vector<Vertex::Index>::size_type dst_index = 0;
       EXPECT_TRUE(IndexOf(sorted, it->first, &dst_index));
       EXPECT_LT(dst_index, src_index);
diff --git a/payload_generator/xz_android.cc b/payload_generator/xz_android.cc
index f3b836d..41c55f7 100644
--- a/payload_generator/xz_android.cc
+++ b/payload_generator/xz_android.cc
@@ -16,12 +16,14 @@
 
 #include "update_engine/payload_generator/xz.h"
 
-#include <7zCrc.h>
-#include <Xz.h>
-#include <XzEnc.h>
+#include <elf.h>
+#include <endian.h>
 
 #include <algorithm>
 
+#include <7zCrc.h>
+#include <Xz.h>
+#include <XzEnc.h>
 #include <base/logging.h>
 
 namespace {
@@ -34,9 +36,8 @@
     Read = &BlobReaderStream::ReadStatic;
   }
 
-  static SRes ReadStatic(void* p, void* buf, size_t* size) {
-    auto* self =
-        static_cast<BlobReaderStream*>(reinterpret_cast<ISeqInStream*>(p));
+  static SRes ReadStatic(const ISeqInStream* p, void* buf, size_t* size) {
+    auto* self = static_cast<BlobReaderStream*>(const_cast<ISeqInStream*>(p));
     *size = std::min(*size, self->data_.size() - self->pos_);
     memcpy(buf, self->data_.data() + self->pos_, *size);
     self->pos_ += *size;
@@ -55,9 +56,10 @@
     Write = &BlobWriterStream::WriteStatic;
   }
 
-  static size_t WriteStatic(void* p, const void* buf, size_t size) {
-    auto* self =
-        static_cast<BlobWriterStream*>(reinterpret_cast<ISeqOutStream*>(p));
+  static size_t WriteStatic(const ISeqOutStream* p,
+                            const void* buf,
+                            size_t size) {
+    auto* self = static_cast<const BlobWriterStream*>(p);
     const uint8_t* buffer = reinterpret_cast<const uint8_t*>(buf);
     self->data_->reserve(self->data_->size() + size);
     self->data_->insert(self->data_->end(), buffer, buffer + size);
@@ -67,6 +69,37 @@
   brillo::Blob* data_;
 };
 
+// Returns the filter id to be used to compress |data|.
+// Only BCJ filter for x86 and ARM ELF file are supported, returns 0 otherwise.
+int GetFilterID(const brillo::Blob& data) {
+  if (data.size() < sizeof(Elf32_Ehdr) ||
+      memcmp(data.data(), ELFMAG, SELFMAG) != 0)
+    return 0;
+
+  const Elf32_Ehdr* header = reinterpret_cast<const Elf32_Ehdr*>(data.data());
+
+  // Only little-endian is supported.
+  if (header->e_ident[EI_DATA] != ELFDATA2LSB)
+    return 0;
+
+  switch (le16toh(header->e_machine)) {
+    case EM_386:
+    case EM_X86_64:
+      return XZ_ID_X86;
+    case EM_ARM:
+      // Both ARM and ARM Thumb instructions could be found in the same ARM ELF
+      // file. We choose to use the ARM Thumb filter here because testing shows
+      // that it usually works better than the ARM filter.
+      return XZ_ID_ARMT;
+#ifdef EM_AARCH64
+    case EM_AARCH64:
+      // Neither the ARM nor the ARM Thumb filter works well with AArch64.
+      return 0;
+#endif
+  }
+  return 0;
+}
+
 }  // namespace
 
 namespace chromeos_update_engine {
@@ -97,7 +130,6 @@
 
   // LZMA2 compression properties.
   CLzma2EncProps lzma2Props;
-  props.lzma2Props = &lzma2Props;
   Lzma2EncProps_Init(&lzma2Props);
   // LZMA compression "level 6" requires 9 MB of RAM to decompress in the worst
   // case.
@@ -106,6 +138,9 @@
   // The input size data is used to reduce the dictionary size if possible.
   lzma2Props.lzmaProps.reduceSize = in.size();
   Lzma2EncProps_Normalize(&lzma2Props);
+  props.lzma2Props = lzma2Props;
+
+  props.filterProps.id = GetFilterID(in);
 
   BlobWriterStream out_writer(out);
   BlobReaderStream in_reader(in);
diff --git a/payload_generator/xz_chromeos.cc b/payload_generator/xz_chromeos.cc
index a8cda4e..2ff9458 100644
--- a/payload_generator/xz_chromeos.cc
+++ b/payload_generator/xz_chromeos.cc
@@ -16,13 +16,39 @@
 
 #include "update_engine/payload_generator/xz.h"
 
+#include <base/logging.h>
+#include <lzma.h>
+
 namespace chromeos_update_engine {
 
 void XzCompressInit() {}
 
 bool XzCompress(const brillo::Blob& in, brillo::Blob* out) {
-  // No Xz compressor implementation in Chrome OS delta_generator builds.
-  return false;
+  out->clear();
+  if (in.empty())
+    return true;
+
+  // Resize the output buffer to get enough memory for writing the compressed
+  // data.
+  out->resize(lzma_stream_buffer_bound(in.size()));
+
+  const uint32_t kLzmaPreset = 6;
+  size_t out_pos = 0;
+  int rc = lzma_easy_buffer_encode(kLzmaPreset,
+                                   LZMA_CHECK_NONE,  // We do not need CRC.
+                                   nullptr,
+                                   in.data(),
+                                   in.size(),
+                                   out->data(),
+                                   &out_pos,
+                                   out->size());
+  if (rc != LZMA_OK) {
+    LOG(ERROR) << "Failed to compress data to LZMA stream with return code: "
+               << rc;
+    return false;
+  }
+  out->resize(out_pos);
+  return true;
 }
 
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/zip_unittest.cc b/payload_generator/zip_unittest.cc
index c750eb7..e357b15 100644
--- a/payload_generator/zip_unittest.cc
+++ b/payload_generator/zip_unittest.cc
@@ -62,7 +62,6 @@
                   static_cast<const uint8_t*>(bytes) + count);
     return true;
   }
-  bool EndImpl() override { return true; }
 
  private:
   brillo::Blob* data_;
@@ -75,8 +74,6 @@
   // Init() parameters are ignored by the testing MemoryExtentWriter.
   bool ok = writer->Init(nullptr, {}, 1);
   ok = writer->Write(in.data(), in.size()) && ok;
-  // Call End() even if the Write failed.
-  ok = writer->End() && ok;
   return ok;
 }
 
@@ -115,12 +112,7 @@
   }
 };
 
-#ifdef __ANDROID__
 typedef ::testing::Types<BzipTest, XzTest> ZipTestTypes;
-#else
-// Chrome OS implementation of Xz compressor just returns false.
-typedef ::testing::Types<BzipTest> ZipTestTypes;
-#endif  // __ANDROID__
 
 TYPED_TEST_CASE(ZipTest, ZipTestTypes);
 
@@ -140,7 +132,7 @@
   brillo::Blob decompressed;
   EXPECT_TRUE(this->ZipDecompress(out, &decompressed));
   EXPECT_EQ(in.size(), decompressed.size());
-  EXPECT_TRUE(!memcmp(in.data(), decompressed.data(), in.size()));
+  EXPECT_EQ(0, memcmp(in.data(), decompressed.data(), in.size()));
 }
 
 TYPED_TEST(ZipTest, PoorCompressionTest) {
@@ -170,4 +162,18 @@
   EXPECT_EQ(0U, out.size());
 }
 
+TYPED_TEST(ZipTest, CompressELFTest) {
+  string path = test_utils::GetBuildArtifactsPath("delta_generator");
+  brillo::Blob in;
+  utils::ReadFile(path, &in);
+  brillo::Blob out;
+  EXPECT_TRUE(this->ZipCompress(in, &out));
+  EXPECT_LT(out.size(), in.size());
+  EXPECT_GT(out.size(), 0U);
+  brillo::Blob decompressed;
+  EXPECT_TRUE(this->ZipDecompress(out, &decompressed));
+  EXPECT_EQ(in.size(), decompressed.size());
+  EXPECT_EQ(0, memcmp(in.data(), decompressed.data(), in.size()));
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_state.cc b/payload_state.cc
index 4992606..a6c3620 100644
--- a/payload_state.cc
+++ b/payload_state.cc
@@ -55,6 +55,9 @@
 // We want to randomize retry attempts after the backoff by +/- 6 hours.
 static const uint32_t kMaxBackoffFuzzMinutes = 12 * 60;
 
+// Limit persisting current update duration uptime to once per second
+static const uint64_t kUptimeResolution = 1;
+
 PayloadState::PayloadState()
     : prefs_(nullptr),
       using_p2p_for_downloading_(false),
@@ -64,9 +67,9 @@
       url_index_(0),
       url_failure_count_(0),
       url_switch_count_(0),
+      rollback_happened_(false),
       attempt_num_bytes_downloaded_(0),
       attempt_connection_type_(metrics::ConnectionType::kUnknown),
-      attempt_error_code_(ErrorCode::kSuccess),
       attempt_type_(AttemptType::kUpdate) {
   for (int i = 0; i <= kNumDownloadSources; i++)
     total_bytes_downloaded_[i] = current_bytes_downloaded_[i] = 0;
@@ -94,6 +97,7 @@
   }
   LoadNumReboots();
   LoadNumResponsesSeen();
+  LoadRollbackHappened();
   LoadRollbackVersion();
   LoadP2PFirstAttemptTimestamp();
   LoadP2PNumAttempts();
@@ -189,7 +193,7 @@
 
   attempt_type_ = attempt_type;
 
-  ClockInterface *clock = system_state_->clock();
+  ClockInterface* clock = system_state_->clock();
   attempt_start_time_boot_ = clock->GetBootTime();
   attempt_start_time_monotonic_ = clock->GetMonotonicTime();
   attempt_num_bytes_downloaded_ = 0;
@@ -242,7 +246,6 @@
           metrics::RollbackResult::kSuccess);
       break;
   }
-  attempt_error_code_ = ErrorCode::kSuccess;
 
   // Reset the number of responses seen since it counts from the last
   // successful update, e.g. now.
@@ -254,9 +257,8 @@
 
 void PayloadState::UpdateFailed(ErrorCode error) {
   ErrorCode base_error = utils::GetBaseErrorCode(error);
-  LOG(INFO) << "Updating payload state for error code: " << base_error
-            << " (" << utils::ErrorCodeToString(base_error) << ")";
-  attempt_error_code_ = base_error;
+  LOG(INFO) << "Updating payload state for error code: " << base_error << " ("
+            << utils::ErrorCodeToString(base_error) << ")";
 
   if (candidate_urls_.size() == 0) {
     // This means we got this error even before we got a valid Omaha response
@@ -278,7 +280,6 @@
       break;
   }
 
-
   switch (base_error) {
     // Errors which are good indicators of a problem with a particular URL or
     // the protocol used in the URL or entities in the communication channel
@@ -306,17 +307,18 @@
     case ErrorCode::kUnsupportedMajorPayloadVersion:
     case ErrorCode::kUnsupportedMinorPayloadVersion:
     case ErrorCode::kPayloadTimestampError:
+    case ErrorCode::kVerityCalculationError:
       IncrementUrlIndex();
       break;
 
-    // Errors which seem to be just transient network/communication related
-    // failures and do not indicate any inherent problem with the URL itself.
-    // So, we should keep the current URL but just increment the
-    // failure count to give it more chances. This way, while we maximize our
-    // chances of downloading from the URLs that appear earlier in the response
-    // (because download from a local server URL that appears earlier in a
-    // response is preferable than downloading from the next URL which could be
-    // a internet URL and thus could be more expensive).
+      // Errors which seem to be just transient network/communication related
+      // failures and do not indicate any inherent problem with the URL itself.
+      // So, we should keep the current URL but just increment the
+      // failure count to give it more chances. This way, while we maximize our
+      // chances of downloading from the URLs that appear earlier in the
+      // response (because download from a local server URL that appears earlier
+      // in a response is preferable than downloading from the next URL which
+      // could be a internet URL and thus could be more expensive).
 
     case ErrorCode::kError:
     case ErrorCode::kDownloadTransferError:
@@ -358,26 +360,30 @@
     case ErrorCode::kOmahaRequestXMLHasEntityDecl:
     case ErrorCode::kFilesystemVerifierError:
     case ErrorCode::kUserCanceled:
+    case ErrorCode::kOmahaUpdateIgnoredOverCellular:
     case ErrorCode::kUpdatedButNotActive:
+    case ErrorCode::kNoUpdate:
+    case ErrorCode::kRollbackNotPossible:
+    case ErrorCode::kFirstActiveOmahaPingSentPersistenceError:
       LOG(INFO) << "Not incrementing URL index or failure count for this error";
       break;
 
-    case ErrorCode::kSuccess:                            // success code
-    case ErrorCode::kUmaReportedMax:                     // not an error code
-    case ErrorCode::kOmahaRequestHTTPResponseBase:       // aggregated already
-    case ErrorCode::kDevModeFlag:                       // not an error code
-    case ErrorCode::kResumedFlag:                        // not an error code
-    case ErrorCode::kTestImageFlag:                      // not an error code
-    case ErrorCode::kTestOmahaUrlFlag:                   // not an error code
-    case ErrorCode::kSpecialFlags:                       // not an error code
+    case ErrorCode::kSuccess:                       // success code
+    case ErrorCode::kUmaReportedMax:                // not an error code
+    case ErrorCode::kOmahaRequestHTTPResponseBase:  // aggregated already
+    case ErrorCode::kDevModeFlag:                   // not an error code
+    case ErrorCode::kResumedFlag:                   // not an error code
+    case ErrorCode::kTestImageFlag:                 // not an error code
+    case ErrorCode::kTestOmahaUrlFlag:              // not an error code
+    case ErrorCode::kSpecialFlags:                  // not an error code
       // These shouldn't happen. Enumerating these  explicitly here so that we
       // can let the compiler warn about new error codes that are added to
       // action_processor.h but not added here.
       LOG(WARNING) << "Unexpected error code for UpdateFailed";
       break;
 
-    // Note: Not adding a default here so as to let the compiler warn us of
-    // any new enums that were added in the .h but not listed in this switch.
+      // Note: Not adding a default here so as to let the compiler warn us of
+      // any new enums that were added in the .h but not listed in this switch.
   }
 }
 
@@ -408,9 +414,11 @@
     }
   }
 
-  if (!system_state_->hardware()->IsOfficialBuild()) {
+  if (!system_state_->hardware()->IsOfficialBuild() &&
+      !prefs_->Exists(kPrefsNoIgnoreBackoff)) {
     // Backoffs are needed only for official builds. We do not want any delays
-    // or update failures due to backoffs during testing or development.
+    // or update failures due to backoffs during testing or development. Unless
+    // the |kPrefsNoIgnoreBackoff| is manually set.
     LOG(INFO) << "No backoffs for test/dev images. "
               << "Can proceed with the download";
     return false;
@@ -522,8 +530,8 @@
   // We don't want all retries to happen exactly at the same time when
   // retrying after backoff. So add some random minutes to fuzz.
   int fuzz_minutes = utils::FuzzInt(0, kMaxBackoffFuzzMinutes);
-  TimeDelta next_backoff_interval = TimeDelta::FromDays(num_days) +
-                                    TimeDelta::FromMinutes(fuzz_minutes);
+  TimeDelta next_backoff_interval =
+      TimeDelta::FromDays(num_days) + TimeDelta::FromMinutes(fuzz_minutes);
   LOG(INFO) << "Incrementing the backoff expiry time by "
             << utils::FormatTimeDelta(next_backoff_interval);
   SetBackoffExpiryTime(Time::Now() + next_backoff_interval);
@@ -591,10 +599,10 @@
 
   int64_t payload_bytes_downloaded = attempt_num_bytes_downloaded_;
 
-  ClockInterface *clock = system_state_->clock();
+  ClockInterface* clock = system_state_->clock();
   TimeDelta duration = clock->GetBootTime() - attempt_start_time_boot_;
-  TimeDelta duration_uptime = clock->GetMonotonicTime() -
-      attempt_start_time_monotonic_;
+  TimeDelta duration_uptime =
+      clock->GetMonotonicTime() - attempt_start_time_monotonic_;
 
   int64_t payload_download_speed_bps = 0;
   int64_t usec = duration_uptime.InMicroseconds();
@@ -607,7 +615,7 @@
   DownloadSource download_source = current_download_source_;
 
   metrics::DownloadErrorCode payload_download_error_code =
-    metrics::DownloadErrorCode::kUnset;
+      metrics::DownloadErrorCode::kUnset;
   ErrorCode internal_error_code = ErrorCode::kSuccess;
   metrics::AttemptResult attempt_result = metrics_utils::GetAttemptResult(code);
 
@@ -721,8 +729,8 @@
 
   int download_overhead_percentage = 0;
   if (successful_bytes > 0) {
-    download_overhead_percentage = (total_bytes - successful_bytes) * 100ULL /
-                                   successful_bytes;
+    download_overhead_percentage =
+        (total_bytes - successful_bytes) * 100ULL / successful_bytes;
   }
 
   int url_switch_count = static_cast<int>(url_switch_count_);
@@ -732,6 +740,7 @@
   SetNumReboots(0);
 
   TimeDelta duration = GetUpdateDuration();
+  TimeDelta duration_uptime = GetUpdateDurationUptime();
 
   prefs_->Delete(kPrefsUpdateTimestampStart);
   prefs_->Delete(kPrefsUpdateDurationUptime);
@@ -752,6 +761,7 @@
       total_bytes_by_source,
       download_overhead_percentage,
       duration,
+      duration_uptime,
       reboot_count,
       url_switch_count);
 }
@@ -787,6 +797,7 @@
   SetP2PNumAttempts(0);
   SetP2PFirstAttemptTimestamp(Time());  // Set to null time
   SetScatteringWaitPeriod(TimeDelta());
+  SetStagingWaitPeriod(TimeDelta());
 }
 
 void PayloadState::ResetRollbackVersion() {
@@ -875,7 +886,7 @@
   full_payload_attempt_number_ = full_payload_attempt_number;
   LOG(INFO) << "Full Payload Attempt Number = " << full_payload_attempt_number_;
   prefs_->SetInt64(kPrefsFullPayloadAttemptNumber,
-      full_payload_attempt_number_);
+                   full_payload_attempt_number_);
 }
 
 void PayloadState::SetPayloadIndex(size_t payload_index) {
@@ -909,7 +920,7 @@
 
 void PayloadState::LoadScatteringWaitPeriod() {
   SetScatteringWaitPeriod(TimeDelta::FromSeconds(
-      GetPersistedValue(kPrefsWallClockWaitPeriod, prefs_)));
+      GetPersistedValue(kPrefsWallClockScatteringWaitPeriod, prefs_)));
 }
 
 void PayloadState::SetScatteringWaitPeriod(TimeDelta wait_period) {
@@ -918,10 +929,27 @@
   LOG(INFO) << "Scattering Wait Period (seconds) = "
             << scattering_wait_period_.InSeconds();
   if (scattering_wait_period_.InSeconds() > 0) {
-    prefs_->SetInt64(kPrefsWallClockWaitPeriod,
+    prefs_->SetInt64(kPrefsWallClockScatteringWaitPeriod,
                      scattering_wait_period_.InSeconds());
   } else {
-    prefs_->Delete(kPrefsWallClockWaitPeriod);
+    prefs_->Delete(kPrefsWallClockScatteringWaitPeriod);
+  }
+}
+
+void PayloadState::LoadStagingWaitPeriod() {
+  SetStagingWaitPeriod(TimeDelta::FromSeconds(
+      GetPersistedValue(kPrefsWallClockStagingWaitPeriod, prefs_)));
+}
+
+void PayloadState::SetStagingWaitPeriod(TimeDelta wait_period) {
+  CHECK(prefs_);
+  staging_wait_period_ = wait_period;
+  LOG(INFO) << "Staging Wait Period (days) =" << staging_wait_period_.InDays();
+  if (staging_wait_period_.InSeconds() > 0) {
+    prefs_->SetInt64(kPrefsWallClockStagingWaitPeriod,
+                     staging_wait_period_.InSeconds());
+  } else {
+    prefs_->Delete(kPrefsWallClockStagingWaitPeriod);
   }
 }
 
@@ -978,8 +1006,8 @@
 
 TimeDelta PayloadState::GetUpdateDuration() {
   Time end_time = update_timestamp_end_.is_null()
-    ? system_state_->clock()->GetWallclockTime() :
-      update_timestamp_end_;
+                      ? system_state_->clock()->GetWallclockTime()
+                      : update_timestamp_end_;
   return end_time - update_timestamp_start_;
 }
 
@@ -1009,8 +1037,7 @@
   TimeDelta duration_according_to_stored_time = now - stored_time;
   if (duration_according_to_stored_time < -kDurationSlack) {
     LOG(ERROR) << "The UpdateTimestampStart value ("
-               << utils::ToString(stored_time)
-               << ") in persisted state is "
+               << utils::ToString(stored_time) << ") in persisted state is "
                << utils::FormatTimeDelta(duration_according_to_stored_time)
                << " in the future. Resetting.";
     stored_time = now;
@@ -1057,8 +1084,7 @@
   if (diff < -kDurationSlack) {
     LOG(ERROR) << "The UpdateDurationUptime value ("
                << utils::FormatTimeDelta(stored_delta)
-               << ") in persisted state is "
-               << utils::FormatTimeDelta(diff)
+               << ") in persisted state is " << utils::FormatTimeDelta(diff)
                << " larger than the wall-clock delta. Resetting.";
     stored_delta = update_duration_current_;
   }
@@ -1070,6 +1096,25 @@
   SetNumReboots(GetPersistedValue(kPrefsNumReboots, prefs_));
 }
 
+void PayloadState::LoadRollbackHappened() {
+  CHECK(powerwash_safe_prefs_);
+  bool rollback_happened = false;
+  powerwash_safe_prefs_->GetBoolean(kPrefsRollbackHappened, &rollback_happened);
+  SetRollbackHappened(rollback_happened);
+}
+
+void PayloadState::SetRollbackHappened(bool rollback_happened) {
+  CHECK(powerwash_safe_prefs_);
+  LOG(INFO) << "Setting rollback-happened to " << rollback_happened << ".";
+  rollback_happened_ = rollback_happened;
+  if (rollback_happened) {
+    powerwash_safe_prefs_->SetBoolean(kPrefsRollbackHappened,
+                                      rollback_happened);
+  } else {
+    powerwash_safe_prefs_->Delete(kPrefsRollbackHappened);
+  }
+}
+
 void PayloadState::LoadRollbackVersion() {
   CHECK(powerwash_safe_prefs_);
   string rollback_version;
@@ -1081,7 +1126,7 @@
 
 void PayloadState::SetRollbackVersion(const string& rollback_version) {
   CHECK(powerwash_safe_prefs_);
-  LOG(INFO) << "Blacklisting version "<< rollback_version;
+  LOG(INFO) << "Blacklisting version " << rollback_version;
   rollback_version_ = rollback_version;
   powerwash_safe_prefs_->SetString(kPrefsRollbackVersion, rollback_version);
 }
@@ -1108,9 +1153,12 @@
 void PayloadState::CalculateUpdateDurationUptime() {
   Time now = system_state_->clock()->GetMonotonicTime();
   TimeDelta uptime_since_last_update = now - update_duration_uptime_timestamp_;
-  TimeDelta new_uptime = update_duration_uptime_ + uptime_since_last_update;
-  // We're frequently called so avoid logging this write
-  SetUpdateDurationUptimeExtended(new_uptime, now, false);
+
+  if (uptime_since_last_update > TimeDelta::FromSeconds(kUptimeResolution)) {
+    TimeDelta new_uptime = update_duration_uptime_ + uptime_since_last_update;
+    // We're frequently called so avoid logging this write
+    SetUpdateDurationUptimeExtended(new_uptime, now, false);
+  }
 }
 
 string PayloadState::GetPrefsKey(const string& prefix, DownloadSource source) {
@@ -1122,10 +1170,9 @@
   SetCurrentBytesDownloaded(source, GetPersistedValue(key, prefs_), true);
 }
 
-void PayloadState::SetCurrentBytesDownloaded(
-    DownloadSource source,
-    uint64_t current_bytes_downloaded,
-    bool log) {
+void PayloadState::SetCurrentBytesDownloaded(DownloadSource source,
+                                             uint64_t current_bytes_downloaded,
+                                             bool log) {
   CHECK(prefs_);
 
   if (source >= kNumDownloadSources)
@@ -1146,10 +1193,9 @@
   SetTotalBytesDownloaded(source, GetPersistedValue(key, prefs_), true);
 }
 
-void PayloadState::SetTotalBytesDownloaded(
-    DownloadSource source,
-    uint64_t total_bytes_downloaded,
-    bool log) {
+void PayloadState::SetTotalBytesDownloaded(DownloadSource source,
+                                           uint64_t total_bytes_downloaded,
+                                           bool log) {
   CHECK(prefs_);
 
   if (source >= kNumDownloadSources)
@@ -1161,9 +1207,8 @@
   // Persist.
   string prefs_key = GetPrefsKey(kPrefsTotalBytesDownloaded, source);
   prefs_->SetInt64(prefs_key, total_bytes_downloaded);
-  LOG_IF(INFO, log) << "Total bytes downloaded for "
-                    << utils::ToString(source) << " = "
-                    << GetTotalBytesDownloaded(source);
+  LOG_IF(INFO, log) << "Total bytes downloaded for " << utils::ToString(source)
+                    << " = " << GetTotalBytesDownloaded(source);
 }
 
 void PayloadState::LoadNumResponsesSeen() {
@@ -1349,8 +1394,7 @@
 bool PayloadState::P2PAttemptAllowed() {
   if (p2p_num_attempts_ > kMaxP2PAttempts) {
     LOG(INFO) << "Number of p2p attempts is " << p2p_num_attempts_
-              << " which is greater than "
-              << kMaxP2PAttempts
+              << " which is greater than " << kMaxP2PAttempts
               << " - disallowing p2p.";
     return false;
   }
@@ -1367,8 +1411,8 @@
       LOG(INFO) << "Time spent attempting p2p is "
                 << utils::FormatTimeDelta(time_spent_attempting_p2p)
                 << " which is greater than "
-                << utils::FormatTimeDelta(TimeDelta::FromSeconds(
-                       kMaxP2PAttemptTimeSeconds))
+                << utils::FormatTimeDelta(
+                       TimeDelta::FromSeconds(kMaxP2PAttemptTimeSeconds))
                 << " - disallowing p2p.";
       return false;
     }
diff --git a/payload_state.h b/payload_state.h
index 24e9900..5ef1220 100644
--- a/payload_state.h
+++ b/payload_state.h
@@ -80,22 +80,17 @@
   }
 
   inline std::string GetCurrentUrl() override {
-    return candidate_urls_.size() && candidate_urls_[payload_index_].size()
+    return (payload_index_ < candidate_urls_.size() &&
+            url_index_ < candidate_urls_[payload_index_].size())
                ? candidate_urls_[payload_index_][url_index_]
                : "";
   }
 
-  inline uint32_t GetUrlFailureCount() override {
-    return url_failure_count_;
-  }
+  inline uint32_t GetUrlFailureCount() override { return url_failure_count_; }
 
-  inline uint32_t GetUrlSwitchCount() override {
-    return url_switch_count_;
-  }
+  inline uint32_t GetUrlSwitchCount() override { return url_switch_count_; }
 
-  inline int GetNumResponsesSeen() override {
-    return num_responses_seen_;
-  }
+  inline int GetNumResponsesSeen() override { return num_responses_seen_; }
 
   inline base::Time GetBackoffExpiryTime() override {
     return backoff_expiry_time_;
@@ -113,15 +108,15 @@
     return source < kNumDownloadSources ? total_bytes_downloaded_[source] : 0;
   }
 
-  inline uint32_t GetNumReboots() override {
-    return num_reboots_;
-  }
+  inline uint32_t GetNumReboots() override { return num_reboots_; }
 
   void UpdateEngineStarted() override;
 
-  inline std::string GetRollbackVersion() override {
-    return rollback_version_;
-  }
+  inline bool GetRollbackHappened() override { return rollback_happened_; }
+
+  void SetRollbackHappened(bool rollback_happened) override;
+
+  inline std::string GetRollbackVersion() override { return rollback_version_; }
 
   int GetP2PNumAttempts() override;
   base::Time GetP2PFirstAttemptTimestamp() override;
@@ -132,9 +127,7 @@
     return using_p2p_for_downloading_;
   }
 
-  bool GetUsingP2PForSharing() const override {
-    return using_p2p_for_sharing_;
-  }
+  bool GetUsingP2PForSharing() const override { return using_p2p_for_sharing_; }
 
   base::TimeDelta GetScatteringWaitPeriod() override {
     return scattering_wait_period_;
@@ -142,17 +135,11 @@
 
   void SetScatteringWaitPeriod(base::TimeDelta wait_period) override;
 
-  void SetP2PUrl(const std::string& url) override {
-    p2p_url_ = url;
-  }
+  void SetStagingWaitPeriod(base::TimeDelta wait_period) override;
 
-  std::string GetP2PUrl() const override {
-    return p2p_url_;
-  }
+  void SetP2PUrl(const std::string& url) override { p2p_url_ = url; }
 
-  inline ErrorCode GetAttemptErrorCode() const override {
-    return attempt_error_code_;
-  }
+  std::string GetP2PUrl() const override { return p2p_url_; }
 
   bool NextPayload() override;
 
@@ -166,6 +153,7 @@
   FRIEND_TEST(PayloadStateTest, RebootAfterUpdateFailedMetric);
   FRIEND_TEST(PayloadStateTest, RebootAfterUpdateSucceed);
   FRIEND_TEST(PayloadStateTest, RebootAfterCanceledUpdate);
+  FRIEND_TEST(PayloadStateTest, RollbackHappened);
   FRIEND_TEST(PayloadStateTest, RollbackVersion);
   FRIEND_TEST(PayloadStateTest, UpdateSuccessWithWipedPrefs);
 
@@ -341,7 +329,7 @@
 
   // Loads the number of bytes that have been currently downloaded through the
   // previous attempts from the persisted state for the given source. It's
-  // reset to 0 everytime we begin a full update and is continued from previous
+  // reset to 0 every time we begin a full update and is continued from previous
   // attempt if we're resuming the update.
   void LoadCurrentBytesDownloaded(DownloadSource source);
 
@@ -353,7 +341,7 @@
 
   // Loads the total number of bytes that have been downloaded (since the last
   // successful update) from the persisted state for the given source. It's
-  // reset to 0 everytime we successfully apply an update and counts the bytes
+  // reset to 0 every time we successfully apply an update and counts the bytes
   // downloaded for both successful and failed attempts since then.
   void LoadTotalBytesDownloaded(DownloadSource source);
 
@@ -363,6 +351,10 @@
                                uint64_t total_bytes_downloaded,
                                bool log);
 
+  // Loads whether rollback has happened on this device since the last update
+  // check where policy was available. This info is preserved over powerwash.
+  void LoadRollbackHappened();
+
   // Loads the blacklisted version from our prefs file.
   void LoadRollbackVersion();
 
@@ -374,9 +366,10 @@
   void ResetRollbackVersion();
 
   inline uint32_t GetUrlIndex() {
-    return url_index_ ? std::min(candidate_urls_[payload_index_].size() - 1,
-                                 url_index_)
-                      : 0;
+    return (url_index_ != 0 && payload_index_ < candidate_urls_.size())
+               ? std::min(candidate_urls_[payload_index_].size() - 1,
+                          url_index_)
+               : 0;
   }
 
   // Computes the list of candidate URLs from the total list of payload URLs in
@@ -401,8 +394,6 @@
   // increments num_reboots.
   void UpdateNumReboots();
 
-
-
   // Loads the |kPrefsP2PFirstAttemptTimestamp| state variable from disk
   // into |p2p_first_attempt_timestamp_|.
   void LoadP2PFirstAttemptTimestamp();
@@ -419,6 +410,9 @@
   // Loads the persisted scattering wallclock-based wait period.
   void LoadScatteringWaitPeriod();
 
+  // Loads the persisted staging wallclock-based wait period.
+  void LoadStagingWaitPeriod();
+
   // Get the total size of all payloads.
   int64_t GetPayloadSize();
 
@@ -489,7 +483,7 @@
   int32_t url_switch_count_;
 
   // The current download source based on the current URL. This value is
-  // not persisted as it can be recomputed everytime we update the URL.
+  // not persisted as it can be recomputed every time we update the URL.
   // We're storing this so as not to recompute this on every few bytes of
   // data we read from the socket.
   DownloadSource current_download_source_;
@@ -550,6 +544,11 @@
   // allowed as per device policy.
   std::vector<std::vector<std::string>> candidate_urls_;
 
+  // This stores whether rollback has happened since the last time device policy
+  // was available during update check. When this is set, we're preventing
+  // forced updates to avoid update-rollback loops.
+  bool rollback_happened_;
+
   // This stores a blacklisted version set as part of rollback. When we rollback
   // we store the version of the os from which we are rolling back from in order
   // to guarantee that we do not re-update to it on the next au attempt after
@@ -568,15 +567,15 @@
   // The connection type when the attempt started.
   metrics::ConnectionType attempt_connection_type_;
 
-  // The attempt error code when the attempt finished.
-  ErrorCode attempt_error_code_;
-
   // Whether we're currently rolling back.
   AttemptType attempt_type_;
 
   // The current scattering wallclock-based wait period.
   base::TimeDelta scattering_wait_period_;
 
+  // The current staging wallclock-based wait period.
+  base::TimeDelta staging_wait_period_;
+
   DISALLOW_COPY_AND_ASSIGN(PayloadState);
 };
 
diff --git a/payload_state_interface.h b/payload_state_interface.h
index 4aa25e3..d384a0e 100644
--- a/payload_state_interface.h
+++ b/payload_state_interface.h
@@ -155,6 +155,16 @@
   // Called at update_engine startup to do various house-keeping.
   virtual void UpdateEngineStarted() = 0;
 
+  // Returns whether a rollback happened since the last update check with policy
+  // present.
+  virtual bool GetRollbackHappened() = 0;
+
+  // Sets whether rollback has happened on this device since the last update
+  // check where policy was available. This info is preserved over powerwash.
+  // This prevents forced updates happening on a rolled back device before
+  // device policy is available.
+  virtual void SetRollbackHappened(bool rollback_happened) = 0;
+
   // Returns the version from before a rollback if our last update was a
   // rollback.
   virtual std::string GetRollbackVersion() = 0;
@@ -192,10 +202,12 @@
   // Sets/gets the P2P download URL, if one is to be used.
   virtual void SetP2PUrl(const std::string& url) = 0;
   virtual std::string GetP2PUrl() const = 0;
-  virtual ErrorCode GetAttemptErrorCode() const = 0;
 
   // Switch to next payload.
   virtual bool NextPayload() = 0;
+
+  // Sets and persists the staging wallclock-based wait period.
+  virtual void SetStagingWaitPeriod(base::TimeDelta wait_period) = 0;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/payload_state_unittest.cc b/payload_state_unittest.cc
index f1c3835..869c24e 100644
--- a/payload_state_unittest.cc
+++ b/payload_state_unittest.cc
@@ -37,28 +37,28 @@
 using base::Time;
 using base::TimeDelta;
 using std::string;
+using testing::_;
 using testing::AnyNumber;
 using testing::AtLeast;
 using testing::Mock;
 using testing::NiceMock;
 using testing::Return;
 using testing::SetArgPointee;
-using testing::_;
 
 namespace chromeos_update_engine {
 
 const char* kCurrentBytesDownloadedFromHttps =
-  "current-bytes-downloaded-from-HttpsServer";
+    "current-bytes-downloaded-from-HttpsServer";
 const char* kTotalBytesDownloadedFromHttps =
-  "total-bytes-downloaded-from-HttpsServer";
+    "total-bytes-downloaded-from-HttpsServer";
 const char* kCurrentBytesDownloadedFromHttp =
-  "current-bytes-downloaded-from-HttpServer";
+    "current-bytes-downloaded-from-HttpServer";
 const char* kTotalBytesDownloadedFromHttp =
-  "total-bytes-downloaded-from-HttpServer";
+    "total-bytes-downloaded-from-HttpServer";
 const char* kCurrentBytesDownloadedFromHttpPeer =
-  "current-bytes-downloaded-from-HttpPeer";
+    "current-bytes-downloaded-from-HttpPeer";
 const char* kTotalBytesDownloadedFromHttpPeer =
-  "total-bytes-downloaded-from-HttpPeer";
+    "total-bytes-downloaded-from-HttpPeer";
 
 static void SetupPayloadStateWith2Urls(string hash,
                                        bool http_enabled,
@@ -103,7 +103,7 @@
   EXPECT_EQ(expected_response_sign, stored_response_sign);
 }
 
-class PayloadStateTest : public ::testing::Test { };
+class PayloadStateTest : public ::testing::Test {};
 
 TEST(PayloadStateTest, SetResponseWorksWithEmptyResponse) {
   OmahaResponse response;
@@ -111,23 +111,23 @@
   NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
   EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsFullPayloadAttemptNumber, 0))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsBackoffExpiryTime, 0)).Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlIndex, 0)).Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlFailureCount, 0))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsUpdateTimestampStart, _))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsUpdateDurationUptime, _))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttps, 0))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttp, 0))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttpPeer, 0))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsNumReboots, 0)).Times(AtLeast(1));
   PayloadState payload_state;
   EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
@@ -154,27 +154,24 @@
   NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
   EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kPrefsFullPayloadAttemptNumber, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kPrefsBackoffExpiryTime, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlIndex, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlFailureCount, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kPrefsUpdateTimestampStart, _))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kPrefsUpdateDurationUptime, _))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttps, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttp, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttpPeer, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kPrefsNumReboots, 0))
       .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kPrefsFullPayloadAttemptNumber, 0))
+      .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kPrefsBackoffExpiryTime, 0)).Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlIndex, 0)).Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlFailureCount, 0))
+      .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kPrefsUpdateTimestampStart, _))
+      .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kPrefsUpdateDurationUptime, _))
+      .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttps, 0))
+      .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttp, 0))
+      .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttpPeer, 0))
+      .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kPrefsNumReboots, 0)).Times(AtLeast(1));
   PayloadState payload_state;
   EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
   payload_state.SetResponse(response);
@@ -209,23 +206,20 @@
   NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
   EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kPrefsFullPayloadAttemptNumber, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kPrefsBackoffExpiryTime, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlIndex, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlFailureCount, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttps, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttp, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttpPeer, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kPrefsNumReboots, 0))
       .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kPrefsFullPayloadAttemptNumber, 0))
+      .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kPrefsBackoffExpiryTime, 0)).Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlIndex, 0)).Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlFailureCount, 0))
+      .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttps, 0))
+      .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttp, 0))
+      .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttpPeer, 0))
+      .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kPrefsNumReboots, 0)).Times(AtLeast(1));
 
   PayloadState payload_state;
   EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
@@ -259,13 +253,13 @@
   EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
   // Payload attempt should start with 0 and then advance to 1.
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 1))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsFullPayloadAttemptNumber, 0))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsFullPayloadAttemptNumber, 1))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsBackoffExpiryTime, _)).Times(AtLeast(2));
 
   // Reboots will be set
@@ -278,7 +272,7 @@
   // Failure count should be called each times url index is set, so that's
   // 4 times for this test.
   EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlFailureCount, 0))
-    .Times(AtLeast(4));
+      .Times(AtLeast(4));
 
   EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
 
@@ -361,18 +355,18 @@
 
   EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0))
-    .Times(AtLeast(2));
+      .Times(AtLeast(2));
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 1))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 2))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
 
   EXPECT_CALL(*prefs, SetInt64(kPrefsFullPayloadAttemptNumber, 0))
-    .Times(AtLeast(2));
+      .Times(AtLeast(2));
   EXPECT_CALL(*prefs, SetInt64(kPrefsFullPayloadAttemptNumber, 1))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsFullPayloadAttemptNumber, 2))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
 
   EXPECT_CALL(*prefs, SetInt64(kPrefsBackoffExpiryTime, _)).Times(AtLeast(4));
 
@@ -380,29 +374,28 @@
   EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlIndex, 1)).Times(AtLeast(2));
 
   EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlFailureCount, 0))
-    .Times(AtLeast(7));
+      .Times(AtLeast(7));
   EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlFailureCount, 1))
-    .Times(AtLeast(2));
+      .Times(AtLeast(2));
   EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlFailureCount, 2))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
 
   EXPECT_CALL(*prefs, SetInt64(kPrefsUpdateTimestampStart, _))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsUpdateDurationUptime, _))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
 
   EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttps, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttp, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttpPeer, 0))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttp, progress_bytes))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kTotalBytesDownloadedFromHttp, progress_bytes))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs, SetInt64(kPrefsNumReboots, 0))
       .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttp, 0))
+      .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttpPeer, 0))
+      .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kCurrentBytesDownloadedFromHttp, progress_bytes))
+      .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kTotalBytesDownloadedFromHttp, progress_bytes))
+      .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kPrefsNumReboots, 0)).Times(AtLeast(1));
 
   EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
 
@@ -507,22 +500,20 @@
 
   EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 1))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
 
   EXPECT_CALL(*prefs, SetInt64(kPrefsFullPayloadAttemptNumber, 0))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsFullPayloadAttemptNumber, 1))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
 
-  EXPECT_CALL(*prefs, SetInt64(kPrefsBackoffExpiryTime, _))
-    .Times(AtLeast(2));
+  EXPECT_CALL(*prefs, SetInt64(kPrefsBackoffExpiryTime, _)).Times(AtLeast(2));
 
-  EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlIndex, 0))
-    .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlIndex, 0)).Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlFailureCount, 0))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
 
   EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
 
@@ -548,21 +539,19 @@
 
   EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 1))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
 
   // kPrefsFullPayloadAttemptNumber is not incremented for delta payloads.
   EXPECT_CALL(*prefs, SetInt64(kPrefsFullPayloadAttemptNumber, 0))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
 
-  EXPECT_CALL(*prefs, SetInt64(kPrefsBackoffExpiryTime, _))
-    .Times(1);
+  EXPECT_CALL(*prefs, SetInt64(kPrefsBackoffExpiryTime, _)).Times(1);
 
-  EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlIndex, 0))
-    .Times(AtLeast(1));
+  EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlIndex, 0)).Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlFailureCount, 0))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
 
   EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
 
@@ -607,15 +596,14 @@
   EXPECT_CALL(*prefs2, Exists(_)).WillRepeatedly(Return(true));
   EXPECT_CALL(*prefs2, GetInt64(_, _)).Times(AtLeast(1));
   EXPECT_CALL(*prefs2, GetInt64(kPrefsPayloadAttemptNumber, _))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs2, GetInt64(kPrefsFullPayloadAttemptNumber, _))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
   EXPECT_CALL(*prefs2, GetInt64(kPrefsCurrentUrlIndex, _))
       .WillRepeatedly(DoAll(SetArgPointee<1>(2), Return(true)));
   EXPECT_CALL(*prefs2, GetInt64(kPrefsCurrentUrlFailureCount, _))
-    .Times(AtLeast(1));
-  EXPECT_CALL(*prefs2, GetInt64(kPrefsUrlSwitchCount, _))
-    .Times(AtLeast(1));
+      .Times(AtLeast(1));
+  EXPECT_CALL(*prefs2, GetInt64(kPrefsUrlSwitchCount, _)).Times(AtLeast(1));
 
   // Note: This will be a different payload object, but the response should
   // have the same hash as before so as to not trivially reset because the
@@ -639,7 +627,7 @@
   PayloadState payload_state;
   FakeSystemState fake_system_state;
   OmahaRequestParams params(&fake_system_state);
-  params.Init("", "", true);  // is_interactive = True.
+  params.Init("", "", true);  // interactive = True.
   fake_system_state.set_request_params(&params);
 
   EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
@@ -662,7 +650,7 @@
   PayloadState payload_state;
   FakeSystemState fake_system_state;
   OmahaRequestParams params(&fake_system_state);
-  params.Init("", "", false);  // is_interactive = False.
+  params.Init("", "", false);  // interactive = False.
   fake_system_state.set_request_params(&params);
 
   EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
@@ -719,7 +707,7 @@
                                      TimeDelta expected_days) {
   payload_state->DownloadComplete();
   EXPECT_EQ(expected_attempt_number,
-      payload_state->GetFullPayloadAttemptNumber());
+            payload_state->GetFullPayloadAttemptNumber());
   EXPECT_TRUE(payload_state->ShouldBackoffDownload());
   Time backoff_expiry_time = payload_state->GetBackoffExpiryTime();
   // Add 1 hour extra to the 6 hour fuzz check to tolerate edge cases.
@@ -741,16 +729,16 @@
   SetupPayloadStateWith2Urls(
       "Hash8939", true, false, &payload_state, &response);
 
-  CheckPayloadBackoffState(&payload_state, 1,  TimeDelta::FromDays(1));
-  CheckPayloadBackoffState(&payload_state, 2,  TimeDelta::FromDays(2));
-  CheckPayloadBackoffState(&payload_state, 3,  TimeDelta::FromDays(4));
-  CheckPayloadBackoffState(&payload_state, 4,  TimeDelta::FromDays(8));
-  CheckPayloadBackoffState(&payload_state, 5,  TimeDelta::FromDays(16));
-  CheckPayloadBackoffState(&payload_state, 6,  TimeDelta::FromDays(16));
-  CheckPayloadBackoffState(&payload_state, 7,  TimeDelta::FromDays(16));
-  CheckPayloadBackoffState(&payload_state, 8,  TimeDelta::FromDays(16));
-  CheckPayloadBackoffState(&payload_state, 9,  TimeDelta::FromDays(16));
-  CheckPayloadBackoffState(&payload_state, 10,  TimeDelta::FromDays(16));
+  CheckPayloadBackoffState(&payload_state, 1, TimeDelta::FromDays(1));
+  CheckPayloadBackoffState(&payload_state, 2, TimeDelta::FromDays(2));
+  CheckPayloadBackoffState(&payload_state, 3, TimeDelta::FromDays(4));
+  CheckPayloadBackoffState(&payload_state, 4, TimeDelta::FromDays(8));
+  CheckPayloadBackoffState(&payload_state, 5, TimeDelta::FromDays(16));
+  CheckPayloadBackoffState(&payload_state, 6, TimeDelta::FromDays(16));
+  CheckPayloadBackoffState(&payload_state, 7, TimeDelta::FromDays(16));
+  CheckPayloadBackoffState(&payload_state, 8, TimeDelta::FromDays(16));
+  CheckPayloadBackoffState(&payload_state, 9, TimeDelta::FromDays(16));
+  CheckPayloadBackoffState(&payload_state, 10, TimeDelta::FromDays(16));
 }
 
 TEST(PayloadStateTest, BackoffLogicCanBeDisabled) {
@@ -820,8 +808,8 @@
             payload_state.GetCurrentBytesDownloaded(kDownloadSourceHttpServer));
   EXPECT_EQ(http_total,
             payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpServer));
-  EXPECT_EQ(0U, payload_state.GetCurrentBytesDownloaded(
-                 kDownloadSourceHttpsServer));
+  EXPECT_EQ(
+      0U, payload_state.GetCurrentBytesDownloaded(kDownloadSourceHttpsServer));
   EXPECT_EQ(https_total,
             payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpsServer));
 
@@ -837,8 +825,9 @@
             payload_state.GetCurrentBytesDownloaded(kDownloadSourceHttpServer));
   EXPECT_EQ(http_total,
             payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpServer));
-  EXPECT_EQ(second_chunk, payload_state.GetCurrentBytesDownloaded(
-              kDownloadSourceHttpsServer));
+  EXPECT_EQ(
+      second_chunk,
+      payload_state.GetCurrentBytesDownloaded(kDownloadSourceHttpsServer));
   EXPECT_EQ(https_total,
             payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpsServer));
 
@@ -854,8 +843,9 @@
             payload_state.GetCurrentBytesDownloaded(kDownloadSourceHttpServer));
   EXPECT_EQ(http_total,
             payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpServer));
-  EXPECT_EQ(second_chunk, payload_state.GetCurrentBytesDownloaded(
-                 kDownloadSourceHttpsServer));
+  EXPECT_EQ(
+      second_chunk,
+      payload_state.GetCurrentBytesDownloaded(kDownloadSourceHttpsServer));
   EXPECT_EQ(https_total,
             payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpsServer));
 
@@ -871,7 +861,7 @@
 
   EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
               ReportSuccessfulUpdateMetrics(
-                  1, _, kPayloadTypeFull, _, _, 314, _, _, 3));
+                  1, _, kPayloadTypeFull, _, _, 314, _, _, _, 3));
 
   payload_state.UpdateSucceeded();
 
@@ -880,8 +870,8 @@
             payload_state.GetCurrentBytesDownloaded(kDownloadSourceHttpServer));
   EXPECT_EQ(0U,
             payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpServer));
-  EXPECT_EQ(0U, payload_state.GetCurrentBytesDownloaded(
-                 kDownloadSourceHttpsServer));
+  EXPECT_EQ(
+      0U, payload_state.GetCurrentBytesDownloaded(kDownloadSourceHttpsServer));
   EXPECT_EQ(0U,
             payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpsServer));
   EXPECT_EQ(0, payload_state.GetNumResponsesSeen());
@@ -920,6 +910,7 @@
                   _,
                   _,
                   _,
+                  _,
                   _))
       .Times(1);
 
@@ -943,8 +934,8 @@
             payload_state.GetCurrentBytesDownloaded(kDownloadSourceHttpServer));
   EXPECT_EQ(num_bytes,
             payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpServer));
-  EXPECT_EQ(0U, payload_state.GetCurrentBytesDownloaded(
-                 kDownloadSourceHttpsServer));
+  EXPECT_EQ(
+      0U, payload_state.GetCurrentBytesDownloaded(kDownloadSourceHttpsServer));
   EXPECT_EQ(0U,
             payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpsServer));
 
@@ -984,6 +975,37 @@
   EXPECT_EQ(0U, payload_state.GetNumReboots());
 }
 
+TEST(PayloadStateTest, RollbackHappened) {
+  FakeSystemState fake_system_state;
+  PayloadState payload_state;
+
+  NiceMock<MockPrefs>* mock_powerwash_safe_prefs =
+      fake_system_state.mock_powerwash_safe_prefs();
+  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+
+  // Verify pre-conditions are good.
+  EXPECT_FALSE(payload_state.GetRollbackHappened());
+
+  // Set to true.
+  EXPECT_CALL(*mock_powerwash_safe_prefs,
+              SetBoolean(kPrefsRollbackHappened, true));
+  payload_state.SetRollbackHappened(true);
+  EXPECT_TRUE(payload_state.GetRollbackHappened());
+
+  // Set to false.
+  EXPECT_CALL(*mock_powerwash_safe_prefs, Delete(kPrefsRollbackHappened));
+  payload_state.SetRollbackHappened(false);
+  EXPECT_FALSE(payload_state.GetRollbackHappened());
+
+  // Let's verify we can reload it correctly.
+  EXPECT_CALL(*mock_powerwash_safe_prefs, GetBoolean(kPrefsRollbackHappened, _))
+      .WillOnce(DoAll(SetArgPointee<1>(true), Return(true)));
+  EXPECT_CALL(*mock_powerwash_safe_prefs,
+              SetBoolean(kPrefsRollbackHappened, true));
+  payload_state.LoadRollbackHappened();
+  EXPECT_TRUE(payload_state.GetRollbackHappened());
+}
+
 TEST(PayloadStateTest, RollbackVersion) {
   FakeSystemState fake_system_state;
   PayloadState payload_state;
@@ -1001,8 +1023,8 @@
   params.Init(rollback_version, "", false);
   fake_system_state.set_request_params(&params);
 
-  EXPECT_CALL(*mock_powerwash_safe_prefs, SetString(kPrefsRollbackVersion,
-                                                    rollback_version));
+  EXPECT_CALL(*mock_powerwash_safe_prefs,
+              SetString(kPrefsRollbackVersion, rollback_version));
   payload_state.Rollback();
 
   EXPECT_EQ(rollback_version, payload_state.GetRollbackVersion());
@@ -1010,11 +1032,10 @@
   // Change it up a little and verify we load it correctly.
   rollback_version = "2345.0.1";
   // Let's verify we can reload it correctly.
-  EXPECT_CALL(*mock_powerwash_safe_prefs, GetString(
-      kPrefsRollbackVersion, _)).WillOnce(DoAll(
-          SetArgPointee<1>(rollback_version), Return(true)));
-  EXPECT_CALL(*mock_powerwash_safe_prefs, SetString(kPrefsRollbackVersion,
-                                                    rollback_version));
+  EXPECT_CALL(*mock_powerwash_safe_prefs, GetString(kPrefsRollbackVersion, _))
+      .WillOnce(DoAll(SetArgPointee<1>(rollback_version), Return(true)));
+  EXPECT_CALL(*mock_powerwash_safe_prefs,
+              SetString(kPrefsRollbackVersion, rollback_version));
   payload_state.LoadRollbackVersion();
   EXPECT_EQ(rollback_version, payload_state.GetRollbackVersion());
 
@@ -1305,9 +1326,9 @@
 
   // Simulate a successful download and update.
   payload_state.DownloadComplete();
-  EXPECT_CALL(
-      *fake_system_state.mock_metrics_reporter(),
-      ReportSuccessfulUpdateMetrics(_, _, kPayloadTypeDelta, _, _, _, _, _, _));
+  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+              ReportSuccessfulUpdateMetrics(
+                  _, _, kPayloadTypeDelta, _, _, _, _, _, _, _));
   payload_state.UpdateSucceeded();
 
   // Mock the request to a request where the delta was disabled but Omaha sends
@@ -1321,9 +1342,9 @@
 
   payload_state.DownloadComplete();
 
-  EXPECT_CALL(
-      *fake_system_state.mock_metrics_reporter(),
-      ReportSuccessfulUpdateMetrics(_, _, kPayloadTypeDelta, _, _, _, _, _, _));
+  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+              ReportSuccessfulUpdateMetrics(
+                  _, _, kPayloadTypeDelta, _, _, _, _, _, _, _));
   payload_state.UpdateSucceeded();
 }
 
@@ -1346,7 +1367,7 @@
 
   EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
               ReportSuccessfulUpdateMetrics(
-                  _, _, kPayloadTypeForcedFull, _, _, _, _, _, _));
+                  _, _, kPayloadTypeForcedFull, _, _, _, _, _, _, _));
   payload_state.UpdateSucceeded();
 }
 
@@ -1368,9 +1389,9 @@
   // Simulate a successful download and update.
   payload_state.DownloadComplete();
 
-  EXPECT_CALL(
-      *fake_system_state.mock_metrics_reporter(),
-      ReportSuccessfulUpdateMetrics(_, _, kPayloadTypeFull, _, _, _, _, _, _));
+  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+              ReportSuccessfulUpdateMetrics(
+                  _, _, kPayloadTypeFull, _, _, _, _, _, _, _));
   payload_state.UpdateSucceeded();
 }
 
@@ -1538,8 +1559,8 @@
   EXPECT_TRUE(payload_state.P2PAttemptAllowed());
 
   // Set clock to half the deadline - this should work.
-  fake_clock.SetWallclockTime(epoch +
-      TimeDelta::FromSeconds(kMaxP2PAttemptTimeSeconds) / 2);
+  fake_clock.SetWallclockTime(
+      epoch + TimeDelta::FromSeconds(kMaxP2PAttemptTimeSeconds) / 2);
   EXPECT_TRUE(payload_state.P2PAttemptAllowed());
 
   // Check that the first attempt timestamp hasn't changed just
@@ -1547,13 +1568,13 @@
   EXPECT_EQ(epoch, payload_state.GetP2PFirstAttemptTimestamp());
 
   // Set clock to _just_ before the deadline - this should work.
-  fake_clock.SetWallclockTime(epoch +
-      TimeDelta::FromSeconds(kMaxP2PAttemptTimeSeconds - 1));
+  fake_clock.SetWallclockTime(
+      epoch + TimeDelta::FromSeconds(kMaxP2PAttemptTimeSeconds - 1));
   EXPECT_TRUE(payload_state.P2PAttemptAllowed());
 
   // Set clock to _just_ after the deadline - this should not work.
-  fake_clock.SetWallclockTime(epoch +
-      TimeDelta::FromSeconds(kMaxP2PAttemptTimeSeconds + 1));
+  fake_clock.SetWallclockTime(
+      epoch + TimeDelta::FromSeconds(kMaxP2PAttemptTimeSeconds + 1));
   EXPECT_FALSE(payload_state.P2PAttemptAllowed());
 }
 
diff --git a/power_manager_android.cc b/power_manager_android.cc
index 6b7e880..63a0351 100644
--- a/power_manager_android.cc
+++ b/power_manager_android.cc
@@ -16,6 +16,8 @@
 
 #include "update_engine/power_manager_android.h"
 
+#include <memory>
+
 #include <base/logging.h>
 
 namespace chromeos_update_engine {
@@ -24,7 +26,7 @@
 std::unique_ptr<PowerManagerInterface> CreatePowerManager() {
   return std::unique_ptr<PowerManagerInterface>(new PowerManagerAndroid());
 }
-}
+}  // namespace power_manager
 
 bool PowerManagerAndroid::RequestReboot() {
   LOG(WARNING) << "PowerManager not implemented.";
diff --git a/power_manager_chromeos.cc b/power_manager_chromeos.cc
index 23fb032..531d367 100644
--- a/power_manager_chromeos.cc
+++ b/power_manager_chromeos.cc
@@ -29,7 +29,7 @@
 std::unique_ptr<PowerManagerInterface> CreatePowerManager() {
   return std::unique_ptr<PowerManagerInterface>(new PowerManagerChromeOS());
 }
-}
+}  // namespace power_manager
 
 PowerManagerChromeOS::PowerManagerChromeOS()
     : power_manager_proxy_(DBusConnection::Get()->GetDBus()) {}
diff --git a/power_manager_chromeos.h b/power_manager_chromeos.h
index ad49889..eeb14d8 100644
--- a/power_manager_chromeos.h
+++ b/power_manager_chromeos.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_POWER_MANAGER_H_
-#define UPDATE_ENGINE_POWER_MANAGER_H_
+#ifndef UPDATE_ENGINE_POWER_MANAGER_CHROMEOS_H_
+#define UPDATE_ENGINE_POWER_MANAGER_CHROMEOS_H_
 
 #include <base/macros.h>
 #include <power_manager/dbus-proxies.h>
@@ -41,4 +41,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_POWER_MANAGER_H_
+#endif  // UPDATE_ENGINE_POWER_MANAGER_CHROMEOS_H_
diff --git a/power_manager_interface.h b/power_manager_interface.h
index be059ec..8f77650 100644
--- a/power_manager_interface.h
+++ b/power_manager_interface.h
@@ -40,7 +40,7 @@
 namespace power_manager {
 // Factory function which create a PowerManager.
 std::unique_ptr<PowerManagerInterface> CreatePowerManager();
-}
+}  // namespace power_manager
 
 }  // namespace chromeos_update_engine
 
diff --git a/pylintrc b/pylintrc
index 80a7605..33adec2 100644
--- a/pylintrc
+++ b/pylintrc
@@ -26,7 +26,7 @@
 
 # Add files or directories to the blacklist. They should be base names, not
 # paths.
-ignore=CVS,.svn,.git
+ignore=CVS,.svn,.git,update_metadata_pb2.py
 
 # Pickle collected data for later comparisons.
 persistent=yes
diff --git a/real_system_state.cc b/real_system_state.cc
index 8e7ad51..2f18b4d 100644
--- a/real_system_state.cc
+++ b/real_system_state.cc
@@ -18,6 +18,7 @@
 
 #include <memory>
 #include <string>
+#include <utility>
 
 #include <base/bind.h>
 #include <base/files/file_util.h>
@@ -31,9 +32,11 @@
 #include "update_engine/common/boot_control.h"
 #include "update_engine/common/boot_control_stub.h"
 #include "update_engine/common/constants.h"
+#include "update_engine/common/dlcservice.h"
 #include "update_engine/common/hardware.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/metrics_reporter_omaha.h"
+#include "update_engine/update_boot_flags_action.h"
 #if USE_DBUS
 #include "update_engine/dbus_connection.h"
 #endif  // USE_DBUS
@@ -62,13 +65,13 @@
 
   hardware_ = hardware::CreateHardware();
   if (!hardware_) {
-    LOG(ERROR) << "Error intializing the HardwareInterface.";
+    LOG(ERROR) << "Error initializing the HardwareInterface.";
     return false;
   }
 
 #if USE_CHROME_KIOSK_APP
-  libcros_proxy_.reset(new org::chromium::LibCrosServiceInterfaceProxy(
-      DBusConnection::Get()->GetDBus(), chromeos::kLibCrosServiceName));
+  kiosk_app_proxy_.reset(new org::chromium::KioskAppServiceInterfaceProxy(
+      DBusConnection::Get()->GetDBus(), chromeos::kKioskAppServiceName));
 #endif  // USE_CHROME_KIOSK_APP
 
   LOG_IF(INFO, !hardware_->IsNormalBootMode()) << "Booted in dev mode.";
@@ -76,13 +79,19 @@
 
   connection_manager_ = connection_manager::CreateConnectionManager(this);
   if (!connection_manager_) {
-    LOG(ERROR) << "Error intializing the ConnectionManagerInterface.";
+    LOG(ERROR) << "Error initializing the ConnectionManagerInterface.";
     return false;
   }
 
   power_manager_ = power_manager::CreatePowerManager();
   if (!power_manager_) {
-    LOG(ERROR) << "Error intializing the PowerManagerInterface.";
+    LOG(ERROR) << "Error initializing the PowerManagerInterface.";
+    return false;
+  }
+
+  dlcservice_ = CreateDlcService();
+  if (!dlcservice_) {
+    LOG(ERROR) << "Error initializing the DlcServiceInterface.";
     return false;
   }
 
@@ -140,8 +149,8 @@
       new CertificateChecker(prefs_.get(), &openssl_wrapper_));
   certificate_checker_->Init();
 
-  update_attempter_.reset(new UpdateAttempter(this,
-                                              certificate_checker_.get()));
+  update_attempter_.reset(
+      new UpdateAttempter(this, certificate_checker_.get()));
 
   // Initialize the UpdateAttempter before the UpdateManager.
   update_attempter_->Init();
@@ -150,7 +159,7 @@
   chromeos_update_manager::State* um_state =
       chromeos_update_manager::DefaultStateFactory(&policy_provider_,
 #if USE_CHROME_KIOSK_APP
-                                                   libcros_proxy_.get(),
+                                                   kiosk_app_proxy_.get(),
 #else
                                                    nullptr,
 #endif  // USE_CHROME_KIOSK_APP
@@ -160,21 +169,37 @@
     LOG(ERROR) << "Failed to initialize the Update Manager.";
     return false;
   }
-  update_manager_.reset(
-      new chromeos_update_manager::UpdateManager(
-          &clock_, base::TimeDelta::FromSeconds(5),
-          base::TimeDelta::FromHours(12), um_state));
+  update_manager_.reset(new chromeos_update_manager::UpdateManager(
+      &clock_,
+      base::TimeDelta::FromSeconds(5),
+      base::TimeDelta::FromHours(12),
+      um_state));
 
   // The P2P Manager depends on the Update Manager for its initialization.
-  p2p_manager_.reset(P2PManager::Construct(
-          nullptr, &clock_, update_manager_.get(), "cros_au",
-          kMaxP2PFilesToKeep, base::TimeDelta::FromDays(kMaxP2PFileAgeDays)));
+  p2p_manager_.reset(
+      P2PManager::Construct(nullptr,
+                            &clock_,
+                            update_manager_.get(),
+                            "cros_au",
+                            kMaxP2PFilesToKeep,
+                            base::TimeDelta::FromDays(kMaxP2PFileAgeDays)));
 
   if (!payload_state_.Initialize(this)) {
     LOG(ERROR) << "Failed to initialize the payload state object.";
     return false;
   }
 
+  // For devices that are not rollback enabled (ie. consumer devices),
+  // initialize max kernel key version to 0xfffffffe, which is logically
+  // infinity.
+  if (policy_provider_.IsConsumerDevice()) {
+    if (!hardware()->SetMaxKernelKeyRollforward(
+            chromeos_update_manager::kRollforwardInfinity)) {
+      LOG(ERROR) << "Failed to set kernel_max_rollforward to infinity for"
+                 << " consumer devices";
+    }
+  }
+
   // All is well. Initialization successful.
   return true;
 }
@@ -183,23 +208,28 @@
   // Initiate update checks.
   update_attempter_->ScheduleUpdates();
 
+  auto update_boot_flags_action =
+      std::make_unique<UpdateBootFlagsAction>(boot_control_.get());
+  processor_.EnqueueAction(std::move(update_boot_flags_action));
   // Update boot flags after 45 seconds.
   MessageLoop::current()->PostDelayedTask(
       FROM_HERE,
-      base::Bind(&UpdateAttempter::UpdateBootFlags,
-                 base::Unretained(update_attempter_.get())),
+      base::Bind(&ActionProcessor::StartProcessing,
+                 base::Unretained(&processor_)),
       base::TimeDelta::FromSeconds(45));
 
   // Broadcast the update engine status on startup to ensure consistent system
   // state on crashes.
-  MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
-      &UpdateAttempter::BroadcastStatus,
-      base::Unretained(update_attempter_.get())));
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(&UpdateAttempter::BroadcastStatus,
+                 base::Unretained(update_attempter_.get())));
 
   // Run the UpdateEngineStarted() method on |update_attempter|.
-  MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
-      &UpdateAttempter::UpdateEngineStarted,
-      base::Unretained(update_attempter_.get())));
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(&UpdateAttempter::UpdateEngineStarted,
+                 base::Unretained(update_attempter_.get())));
   return true;
 }
 
diff --git a/real_system_state.h b/real_system_state.h
index 49f7c31..4712008 100644
--- a/real_system_state.h
+++ b/real_system_state.h
@@ -25,12 +25,13 @@
 #include <policy/device_policy.h>
 
 #if USE_CHROME_KIOSK_APP
-#include <libcros/dbus-proxies.h>
+#include <kiosk-app/dbus-proxies.h>
 #endif  // USE_CHROME_KIOSK_APP
 
 #include "update_engine/certificate_checker.h"
 #include "update_engine/common/boot_control_interface.h"
 #include "update_engine/common/clock.h"
+#include "update_engine/common/dlcservice_interface.h"
 #include "update_engine/common/hardware_interface.h"
 #include "update_engine/common/prefs.h"
 #include "update_engine/connection_manager_interface.h"
@@ -126,15 +127,23 @@
 
   inline bool system_rebooted() override { return system_rebooted_; }
 
+  inline DlcServiceInterface* dlcservice() override {
+    return dlcservice_.get();
+  }
+
  private:
   // Real DBus proxies using the DBus connection.
 #if USE_CHROME_KIOSK_APP
-  std::unique_ptr<org::chromium::LibCrosServiceInterfaceProxy> libcros_proxy_;
+  std::unique_ptr<org::chromium::KioskAppServiceInterfaceProxy>
+      kiosk_app_proxy_;
 #endif  // USE_CHROME_KIOSK_APP
 
   // Interface for the power manager.
   std::unique_ptr<PowerManagerInterface> power_manager_;
 
+  // Interface for dlcservice.
+  std::unique_ptr<DlcServiceInterface> dlcservice_;
+
   // Interface for the clock.
   std::unique_ptr<BootControlInterface> boot_control_;
 
@@ -184,6 +193,8 @@
   // rebooted. Important for tracking whether you are running instance of the
   // update engine on first boot or due to a crash/restart.
   bool system_rebooted_{false};
+
+  ActionProcessor processor_;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/sample_omaha_v3_response.xml b/sample_omaha_v3_response.xml
index abba523..1aec1f2 100644
--- a/sample_omaha_v3_response.xml
+++ b/sample_omaha_v3_response.xml
@@ -1,18 +1,21 @@
+<?xml version="1.0" encoding="UTF-8"?>
 <response protocol="3.0" server="prod">
-  <daystart elapsed_seconds="56652"/>
-  <app appid="{90f229ce-83e2-4faf-8479-e368a34938b1}" status="ok">
+  <daystart elapsed_days="4086" elapsed_seconds="62499"/>
+  <app appid="{C166AF52-7EE9-4F08-AAA7-B4B895A9F336}" cohort="1:3:" cohortname="caroline_beta" status="ok">
+    <ping status="ok"/>
     <updatecheck status="ok">
       <urls>
-        <url codebase="https://storage.googleapis.com/chromeos-releases-public/canary-channel/canary-channel/3095.0.0/"/>
+        <url codebase="http://dl.google.com/chromeos/caroline/10323.52.0/beta-channel/"/>
+        <url codebase="https://dl.google.com/chromeos/caroline/10323.52.0/beta-channel/"/>
       </urls>
-      <manifest version="3095.0.0">
-        <packages>
-          <package hash="HVOmp67vBjPdvpWmOC2Uw4UDwsc=" name="chromeos_3095.0.0_x86-zgb_canary-channel_full_mp-v2.bin-df37843370ddf1e3819a2afeaa934faa.signed" required="true" size="400752559"/>
-        </packages>
+      <manifest version="10323.52.0">
         <actions>
-          <action event="update" run="chromeos_3095.0.0_x86-zgb_canary-channel_full_mp-v2.bin-df37843370ddf1e3819a2afeaa934faa.signed"/>
-          <action ChromeOSVersion="3095.0.0" ChromeVersion="24.0.1307.0" IsDelta="true" IsDeltaPayload="false" MaxDaysToScatter="14" MetadataSignatureRsa="xXrO/LahHlKk3YmqEf1qE0PN587Sc2IJV+FN7J7x1h49waNQIy/QwYO4LaOySgETe5JZXtkAEzzqakfJwxQ2pVfzj1GkExwjd5LTn1He2GvA73B8fKbS4bfP7dbUFwD5039xCwf1U2gezFViOiOPiVURx/pEsdhv+Cqx/3HbjIuj5au2dooSyDxLC5AnODzAKyYfAcjMuiLON+9SqmctJW+VjzdY9SbJAnkH2qqVjFyBKAXsYT+hOTIJ3MJpg8OSVxMMtGB99PxbOJ52F37d2Y5Fws/AUkNnNEsan/WRJA1kuWoS6rpeR8JQYuVhLiK2u/KpOcvMVRw3Q2VUxtcAGw==" MetadataSize="58315" event="postinstall" sha256="DIAVxoI+8NpsudUawOA5U92VHlaxQBS3ejN4EPM6T2A="/>
+          <action event="update" run="chromeos_10323.46.0-10323.52.0_caroline_beta-channel_delta_mp.bin-f5c4e5e263c4c119d7d22e0f18a586e5.signed"/>
+          <action ChromeOSVersion="10323.52.0" ChromeVersion="65.0.3325.148" IsDelta="true" IsDeltaPayload="true" MaxDaysToScatter="14" MetadataSignatureRsa="tkrOiIQn2GMQzjLckjiiOyuyV+RqupNW50t6JlFWOhAzWM8dm1qrJVYTYlULxTVlx4BHijbNuX7+OYk6zhRuxuceY7sUwrCM2yxERZ/sDLA5wF0u/8KLP7qrDKL2OIk9JJhF0EdLPylUAEt6vWW4pbYRFhK0girgWIPSdqdjkfHNTKWEUtcQ3iAAB8AvLNOyGP/en0makFvSVXZ8Mq95UrSwWMYFdVmWdVkyRtLYSwLaz5J45y3DQuk3YjeaHhRlH/AQ3OJXX6rjTCwgyiddAccOalwFVwrczq6AUs5S+/vWAMqi+7YfCPgjRdPPIhRJVKcIiAPb8RNXlP+rigGwew==" MetadataSize="414487" event="postinstall" sha256="kONiEAWQV7UyBjOoFBcKDz0OkUx0yRuIGzse4O6rmDs="/>
         </actions>
+        <packages>
+          <package fp="1.90e36210059057b5320633a814170a0f3d0e914c74c91b881b3b1ee0eeab983b" hash="gD1W+dPZiNEhz3f3odCtfL81Yi8=" hash_sha256="90e36210059057b5320633a814170a0f3d0e914c74c91b881b3b1ee0eeab983b" name="chromeos_10323.46.0-10323.52.0_caroline_beta-channel_delta_mp.bin-f5c4e5e263c4c119d7d22e0f18a586e5.signed" required="true" size="29981022"/>
+        </packages>
       </manifest>
     </updatecheck>
   </app>
diff --git a/scripts/blockdiff.py b/scripts/blockdiff.py
index 1dc60a6..5793def 100755
--- a/scripts/blockdiff.py
+++ b/scripts/blockdiff.py
@@ -1,14 +1,26 @@
 #!/usr/bin/python2
 #
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Block diff utility."""
 
 from __future__ import print_function
 
-import optparse
+# pylint: disable=import-error
+import argparse
 import sys
 
 
@@ -71,28 +83,25 @@
 
 def main(argv):
   # Parse command-line arguments.
-  parser = optparse.OptionParser(
-      usage='Usage: %prog FILE1 FILE2',
-      description='Compare FILE1 and FILE2 by blocks.')
+  parser = argparse.ArgumentParser(
+      description='Compare FILE1 and FILE2 by blocks.',
+      formatter_class=argparse.ArgumentDefaultsHelpFormatter)
 
-  parser.add_option('-b', '--block-size', metavar='NUM', type=int, default=4096,
-                    help='the block size to use (default: %default)')
-  parser.add_option('-m', '--max-length', metavar='NUM', type=int, default=-1,
-                    help='maximum number of bytes to compared')
+  parser.add_argument('-b', '--block-size', metavar='NUM', type=int,
+                      default=4096, help='the block size to use')
+  parser.add_argument('-m', '--max-length', metavar='NUM', type=int, default=-1,
+                      help='maximum number of bytes to compare')
+  parser.add_argument('file1', metavar='FILE1')
+  parser.add_argument('file2', metavar='FILE2')
 
-  opts, args = parser.parse_args(argv[1:])
-
-  try:
-    name1, name2 = args
-  except ValueError:
-    parser.error('unexpected number of arguments')
+  args = parser.parse_args(argv[1:])
 
   # Perform the block diff.
   try:
-    with open(name1) as file1:
-      with open(name2) as file2:
-        diff_list = BlockDiff(opts.block_size, file1, file2, name1, name2,
-                              opts.max_length)
+    with open(args.file1) as file1:
+      with open(args.file2) as file2:
+        diff_list = BlockDiff(args.block_size, file1, file2,
+                              args.file1, args.file2, args.max_length)
   except BlockDiffError as e:
     print('Error: ' % e, file=sys.stderr)
     return 2
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index 65c63f5..c88709c 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -25,6 +25,7 @@
 #  sign        generate a signed payload
 #  properties  generate a properties file from a payload
 #  verify      verify a payload by recreating a target image.
+#  check       verify a payload using paycheck (static testing)
 #
 #  Generate command arguments:
 #  --payload             generated unsigned payload output file
@@ -67,6 +68,9 @@
 #  --payload             payload input file
 #  --source_image        verify payload to the specified source image.
 #  --target_image        the target image to verify upon.
+#
+#  Check command arguments:
+#     Symmetrical with the verify command.
 
 
 # Exit codes:
@@ -82,11 +86,11 @@
 }
 
 # Loads shflags. We first look at the default install location; then look for
-# crosutils (chroot); finally check our own directory (au-generator zipfile).
+# crosutils (chroot); finally check our own directory.
 load_shflags() {
   local my_dir="$(dirname "$(readlink -f "$0")")"
   local path
-  for path in /usr/share/misc {/usr/lib/crosutils,"${my_dir}"}/lib/shflags; do
+  for path in /usr/share/misc "${my_dir}"/lib/shflags; do
     if [[ -r "${path}/shflags" ]]; then
       . "${path}/shflags" || die "Could not load ${path}/shflags."
       return
@@ -102,7 +106,9 @@
 for signing."
 HELP_SIGN="sign: Insert the signatures into the unsigned payload."
 HELP_PROPERTIES="properties: Extract payload properties to a file."
-HELP_VERIFY="verify: Verify a (signed) update payload."
+HELP_VERIFY="verify: Verify a (signed) update payload using delta_generator."
+HELP_CHECK="check: Check a (signed) update payload using paycheck (static \
+testing)."
 
 usage() {
   echo "Supported commands:"
@@ -112,13 +118,14 @@
   echo "${HELP_SIGN}"
   echo "${HELP_PROPERTIES}"
   echo "${HELP_VERIFY}"
+  echo "${HELP_CHECK}"
   echo
   echo "Use: \"$0 <command> --help\" for more options."
 }
 
 # Check that a command is specified.
 if [[ $# -lt 1 ]]; then
-  echo "Please specify a command [generate|hash|sign|properties]"
+  echo "Please specify a command [generate|hash|sign|properties|verify|check]"
   exit 1
 fi
 
@@ -147,6 +154,10 @@
     FLAGS_HELP="${HELP_VERIFY}"
     ;;
 
+  check)
+    FLAGS_HELP="${HELP_CHECK}"
+    ;;
+
   *)
     echo "Unrecognized command: \"${COMMAND}\"" >&2
     usage >&2
@@ -202,7 +213,7 @@
     "Path to output the extracted property files. If '-' is passed stdout will \
 be used."
 fi
-if [[ "${COMMAND}" == "verify" ]]; then
+if [[ "${COMMAND}" == "verify" || "${COMMAND}" == "check" ]]; then
   DEFINE_string payload "" \
     "Path to the input payload file."
   DEFINE_string target_image "" \
@@ -247,6 +258,9 @@
 # Path to the postinstall config file in target image if exists.
 POSTINSTALL_CONFIG_FILE=""
 
+# Path to the dynamic partition info file in target image if exists.
+DYNAMIC_PARTITION_INFO_FILE=""
+
 # read_option_int <file.txt> <option_key> [default_value]
 #
 # Reads the unsigned integer value associated with |option_key| in a key=value
@@ -268,14 +282,12 @@
 
 # truncate_file <file_path> <file_size>
 #
-# Truncate the given |file_path| to |file_size| using perl.
+# Truncate the given |file_path| to |file_size| using python.
 # The truncate binary might not be available.
 truncate_file() {
   local file_path="$1"
   local file_size="$2"
-  perl -e "open(FILE, \"+<\", \$ARGV[0]); \
-           truncate(FILE, ${file_size}); \
-           close(FILE);" "${file_path}"
+  python -c "open(\"${file_path}\", 'a').truncate(${file_size})"
 }
 
 # Create a temporary file in the work_dir with an optional pattern name.
@@ -324,7 +336,7 @@
 
   # Brillo images are zip files. We detect the 4-byte magic header of the zip
   # file.
-  local magic=$(head --bytes=4 "${image}" | hexdump -e '1/1 "%.2x"')
+  local magic=$(xxd -p -l4 "${image}")
   if [[ "${magic}" == "504b0304" ]]; then
     echo "Detected .zip file, extracting Brillo image."
     extract_image_brillo "$@"
@@ -359,31 +371,85 @@
 
   cros_generate_update_payload --extract \
     --image "${image}" \
-    --kern_path "${kernel}" --root_path "${root}" \
-    --work_dir "${FLAGS_work_dir}" --outside_chroot
+    --kern_path "${kernel}" --root_path "${root}"
 
-  # Chrome OS uses major_version 1 payloads for all versions, even if the
-  # updater supports a newer major version.
-  FORCE_MAJOR_VERSION="1"
+  # Chrome OS now uses major_version 2 payloads for all boards.
+  # See crbug.com/794404 for more information.
+  FORCE_MAJOR_VERSION="2"
 
-  # When generating legacy Chrome OS images, we need to use "boot" and "system"
-  # for the partition names to be compatible with updating Brillo devices with
-  # Chrome OS images.
-  eval ${partitions_array}[boot]=\""${kernel}"\"
-  eval ${partitions_array}[system]=\""${root}"\"
+  eval ${partitions_array}[kernel]=\""${kernel}"\"
+  eval ${partitions_array}[root]=\""${root}"\"
 
   if [[ -n "${partitions_order}" ]]; then
-    eval "${partitions_order}=( \"system\" \"boot\" )"
+    eval "${partitions_order}=( \"root\" \"kernel\" )"
   fi
 
   local part varname
-  for part in boot system; do
+  for part in kernel root; do
     varname="${partitions_array}[${part}]"
     printf "md5sum of %s: " "${varname}"
     md5sum "${!varname}"
   done
 }
 
+# extract_partition_brillo <target_files.zip> <partitions_array> <partition>
+#     <part_file> <part_map_file>
+#
+# Extract the <partition> from target_files zip file into <part_file> and its
+# map file into <part_map_file>.
+extract_partition_brillo() {
+  local image="$1"
+  local partitions_array="$2"
+  local part="$3"
+  local part_file="$4"
+  local part_map_file="$5"
+
+  # For each partition, we in turn look for its image file under IMAGES/ and
+  # RADIO/ in the given target_files zip file.
+  local path path_in_zip
+  for path in IMAGES RADIO; do
+    if unzip -l "${image}" "${path}/${part}.img" >/dev/null; then
+      path_in_zip="${path}"
+      break
+    fi
+  done
+  [[ -n "${path_in_zip}" ]] || die "Failed to find ${part}.img"
+  unzip -p "${image}" "${path_in_zip}/${part}.img" >"${part_file}"
+
+  # If the partition is stored as an Android sparse image file, we need to
+  # convert them to a raw image for the update.
+  local magic=$(xxd -p -l4 "${part_file}")
+  if [[ "${magic}" == "3aff26ed" ]]; then
+    local temp_sparse=$(create_tempfile "${part}.sparse.XXXXXX")
+    echo "Converting Android sparse image ${part}.img to RAW."
+    mv "${part_file}" "${temp_sparse}"
+    simg2img "${temp_sparse}" "${part_file}"
+    rm -f "${temp_sparse}"
+  fi
+
+  # Extract the .map file (if one is available).
+  unzip -p "${image}" "${path_in_zip}/${part}.map" >"${part_map_file}" \
+    2>/dev/null || true
+
+  # delta_generator only supports images multiple of 4 KiB. For target images
+  # we pad the data with zeros if needed, but for source images we truncate
+  # down the data since the last block of the old image could be padded on
+  # disk with unknown data.
+  local filesize=$(stat -c%s "${part_file}")
+  if [[ $(( filesize % 4096 )) -ne 0 ]]; then
+    if [[ "${partitions_array}" == "SRC_PARTITIONS" ]]; then
+      echo "Rounding DOWN partition ${part}.img to a multiple of 4 KiB."
+      : $(( filesize = filesize & -4096 ))
+    else
+      echo "Rounding UP partition ${part}.img to a multiple of 4 KiB."
+      : $(( filesize = (filesize + 4095) & -4096 ))
+    fi
+    truncate_file "${part_file}" "${filesize}"
+  fi
+
+  echo "Extracted ${partitions_array}[${part}]: ${filesize} bytes"
+}
+
 # extract_image_brillo <target_files.zip> <partitions_array> [partitions_order]
 #
 # Extract the A/B updated partitions from a Brillo target_files zip file into
@@ -409,7 +475,7 @@
   else
     warn "No ab_partitions.txt found. Using default."
   fi
-  echo "List of A/B partitions: ${partitions[@]}"
+  echo "List of A/B partitions for ${partitions_array}: ${partitions[@]}"
 
   if [[ -n "${partitions_order}" ]]; then
     eval "${partitions_order}=(${partitions[@]})"
@@ -448,61 +514,67 @@
         >"${postinstall_config}"; then
       POSTINSTALL_CONFIG_FILE="${postinstall_config}"
     fi
+    local dynamic_partitions_info=$(create_tempfile "dynamic_partitions_info.XXXXXX")
+    CLEANUP_FILES+=("${dynamic_partitions_info}")
+    if unzip -p "${image}" "META/dynamic_partitions_info.txt" \
+        >"${dynamic_partitions_info}"; then
+      DYNAMIC_PARTITION_INFO_FILE="${dynamic_partitions_info}"
+    fi
   fi
 
-  local part part_file temp_raw filesize
+  local part
   for part in "${partitions[@]}"; do
-    part_file=$(create_tempfile "${part}.img.XXXXXX")
-    CLEANUP_FILES+=("${part_file}")
-    unzip -p "${image}" "IMAGES/${part}.img" >"${part_file}"
-
-    # If the partition is stored as an Android sparse image file, we need to
-    # convert them to a raw image for the update.
-    local magic=$(head --bytes=4 "${part_file}" | hexdump -e '1/1 "%.2x"')
-    if [[ "${magic}" == "3aff26ed" ]]; then
-      temp_raw=$(create_tempfile "${part}.raw.XXXXXX")
-      CLEANUP_FILES+=("${temp_raw}")
-      echo "Converting Android sparse image ${part}.img to RAW."
-      simg2img "${part_file}" "${temp_raw}"
-      # At this point, we can drop the contents of the old part_file file, but
-      # we can't delete the file because it will be deleted in cleanup.
-      true >"${part_file}"
-      part_file="${temp_raw}"
-    fi
-
-    # Extract the .map file (if one is available).
-    part_map_file=$(create_tempfile "${part}.map.XXXXXX")
-    CLEANUP_FILES+=("${part_map_file}")
-    unzip -p "${image}" "IMAGES/${part}.map" >"${part_map_file}" || \
-      part_map_file=""
-
-    # delta_generator only supports images multiple of 4 KiB. For target images
-    # we pad the data with zeros if needed, but for source images we truncate
-    # down the data since the last block of the old image could be padded on
-    # disk with unknown data.
-    filesize=$(stat -c%s "${part_file}")
-    if [[ $(( filesize % 4096 )) -ne 0 ]]; then
-      if [[ "${partitions_array}" == "SRC_PARTITIONS" ]]; then
-        echo "Rounding DOWN partition ${part}.img to a multiple of 4 KiB."
-        : $(( filesize = filesize & -4096 ))
-        if [[ ${filesize} == 0 ]]; then
-          echo "Source partition ${part}.img is empty after rounding down," \
-            "skipping."
-          continue
-        fi
-      else
-        echo "Rounding UP partition ${part}.img to a multiple of 4 KiB."
-        : $(( filesize = (filesize + 4095) & -4096 ))
-      fi
-      truncate_file "${part_file}" "${filesize}"
-    fi
-
+    local part_file=$(create_tempfile "${part}.img.XXXXXX")
+    local part_map_file=$(create_tempfile "${part}.map.XXXXXX")
+    CLEANUP_FILES+=("${part_file}" "${part_map_file}")
+    # Extract partitions in background.
+    extract_partition_brillo "${image}" "${partitions_array}" "${part}" \
+        "${part_file}" "${part_map_file}" &
     eval "${partitions_array}[\"${part}\"]=\"${part_file}\""
     eval "${partitions_array}_MAP[\"${part}\"]=\"${part_map_file}\""
-    echo "Extracted ${partitions_array}[${part}]: ${filesize} bytes"
   done
 }
 
+# cleanup_partition_array <partitions_array>
+#
+# Remove all empty files in <partitions_array>.
+cleanup_partition_array() {
+  local partitions_array="$1"
+  # Have to use eval to iterate over associative array keys with variable array
+  # names, we should change it to use nameref once bash 4.3 is available
+  # everywhere.
+  for part in $(eval "echo \${!${partitions_array}[@]}"); do
+    local path="${partitions_array}[$part]"
+    if [[ ! -s "${!path}" ]]; then
+      eval "unset ${partitions_array}[${part}]"
+    fi
+  done
+}
+
+extract_payload_images() {
+  local payload_type=$1
+  echo "Extracting images for ${payload_type} update."
+
+  if [[ "${payload_type}" == "delta" ]]; then
+    extract_image "${FLAGS_source_image}" SRC_PARTITIONS
+  fi
+  extract_image "${FLAGS_target_image}" DST_PARTITIONS PARTITIONS_ORDER
+  # Wait for all subprocesses.
+  wait
+  cleanup_partition_array SRC_PARTITIONS
+  cleanup_partition_array SRC_PARTITIONS_MAP
+  cleanup_partition_array DST_PARTITIONS
+  cleanup_partition_array DST_PARTITIONS_MAP
+}
+
+get_payload_type() {
+  if [[ -z "${FLAGS_source_image}" ]]; then
+    echo "full"
+  else
+    echo "delta"
+  fi
+}
+
 validate_generate() {
   [[ -n "${FLAGS_payload}" ]] ||
     die "You must specify an output filename with --payload FILENAME"
@@ -512,21 +584,12 @@
 }
 
 cmd_generate() {
-  local payload_type="delta"
-  if [[ -z "${FLAGS_source_image}" ]]; then
-    payload_type="full"
-  fi
-
-  echo "Extracting images for ${payload_type} update."
-
-  extract_image "${FLAGS_target_image}" DST_PARTITIONS PARTITIONS_ORDER
-  if [[ "${payload_type}" == "delta" ]]; then
-    extract_image "${FLAGS_source_image}" SRC_PARTITIONS
-  fi
+  local payload_type=$(get_payload_type)
+  extract_payload_images ${payload_type}
 
   echo "Generating ${payload_type} update."
   # Common payload args:
-  GENERATOR_ARGS=( -out_file="${FLAGS_payload}" )
+  GENERATOR_ARGS=( --out_file="${FLAGS_payload}" )
 
   local part old_partitions="" new_partitions="" partition_names=""
   local old_mapfiles="" new_mapfiles=""
@@ -547,16 +610,16 @@
 
   # Target image args:
   GENERATOR_ARGS+=(
-    -partition_names="${partition_names}"
-    -new_partitions="${new_partitions}"
-    -new_mapfiles="${new_mapfiles}"
+    --partition_names="${partition_names}"
+    --new_partitions="${new_partitions}"
+    --new_mapfiles="${new_mapfiles}"
   )
 
   if [[ "${payload_type}" == "delta" ]]; then
     # Source image args:
     GENERATOR_ARGS+=(
-      -old_partitions="${old_partitions}"
-      -old_mapfiles="${old_mapfiles}"
+      --old_partitions="${old_partitions}"
+      --old_mapfiles="${old_mapfiles}"
     )
     if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
       GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
@@ -581,6 +644,12 @@
     )
   fi
 
+  if [[ -n "{DYNAMIC_PARTITION_INFO_FILE}" ]]; then
+    GENERATOR_ARGS+=(
+      --dynamic_partition_info_file="${DYNAMIC_PARTITION_INFO_FILE}"
+    )
+  fi
+
   echo "Running delta_generator with args: ${GENERATOR_ARGS[@]}"
   "${GENERATOR}" "${GENERATOR_ARGS[@]}"
 
@@ -604,10 +673,10 @@
 
 cmd_hash() {
   "${GENERATOR}" \
-      -in_file="${FLAGS_unsigned_payload}" \
-      -signature_size="${FLAGS_signature_size}" \
-      -out_hash_file="${FLAGS_payload_hash_file}" \
-      -out_metadata_hash_file="${FLAGS_metadata_hash_file}"
+      --in_file="${FLAGS_unsigned_payload}" \
+      --signature_size="${FLAGS_signature_size}" \
+      --out_hash_file="${FLAGS_payload_hash_file}" \
+      --out_metadata_hash_file="${FLAGS_metadata_hash_file}"
 
   echo "Done generating hash."
 }
@@ -634,11 +703,11 @@
 
 cmd_sign() {
   GENERATOR_ARGS=(
-    -in_file="${FLAGS_unsigned_payload}"
-    -signature_size="${FLAGS_signature_size}"
-    -signature_file="${FLAGS_payload_signature_file}"
-    -metadata_signature_file="${FLAGS_metadata_signature_file}"
-    -out_file="${FLAGS_payload}"
+    --in_file="${FLAGS_unsigned_payload}"
+    --signature_size="${FLAGS_signature_size}"
+    --payload_signature_file="${FLAGS_payload_signature_file}"
+    --metadata_signature_file="${FLAGS_metadata_signature_file}"
+    --out_file="${FLAGS_payload}"
   )
 
   if [[ -n "${FLAGS_metadata_size_file}" ]]; then
@@ -659,11 +728,11 @@
 
 cmd_properties() {
   "${GENERATOR}" \
-      -in_file="${FLAGS_payload}" \
-      -properties_file="${FLAGS_properties_file}"
+      --in_file="${FLAGS_payload}" \
+      --properties_file="${FLAGS_properties_file}"
 }
 
-validate_verify() {
+validate_verify_and_check() {
   [[ -n "${FLAGS_payload}" ]] ||
     die "Error: you must specify an input filename with --payload FILENAME"
 
@@ -672,17 +741,8 @@
 }
 
 cmd_verify() {
-  local payload_type="delta"
-  if [[ -z "${FLAGS_source_image}" ]]; then
-    payload_type="full"
-  fi
-
-  echo "Extracting images for ${payload_type} update."
-
-  if [[ "${payload_type}" == "delta" ]]; then
-    extract_image "${FLAGS_source_image}" SRC_PARTITIONS
-  fi
-  extract_image "${FLAGS_target_image}" DST_PARTITIONS PARTITIONS_ORDER
+  local payload_type=$(get_payload_type)
+  extract_payload_images ${payload_type}
 
   declare -A TMP_PARTITIONS
   for part in "${PARTITIONS_ORDER[@]}"; do
@@ -697,7 +757,7 @@
 
   echo "Verifying ${payload_type} update."
   # Common payload args:
-  GENERATOR_ARGS=( -in_file="${FLAGS_payload}" )
+  GENERATOR_ARGS=( --in_file="${FLAGS_payload}" )
 
   local part old_partitions="" new_partitions="" partition_names=""
   for part in "${PARTITIONS_ORDER[@]}"; do
@@ -713,14 +773,14 @@
 
   # Target image args:
   GENERATOR_ARGS+=(
-    -partition_names="${partition_names}"
-    -new_partitions="${new_partitions}"
+    --partition_names="${partition_names}"
+    --new_partitions="${new_partitions}"
   )
 
   if [[ "${payload_type}" == "delta" ]]; then
     # Source image args:
     GENERATOR_ARGS+=(
-      -old_partitions="${old_partitions}"
+      --old_partitions="${old_partitions}"
     )
   fi
 
@@ -730,24 +790,54 @@
 
   echo "Running delta_generator to verify ${payload_type} payload with args: \
 ${GENERATOR_ARGS[@]}"
-  "${GENERATOR}" "${GENERATOR_ARGS[@]}"
+  "${GENERATOR}" "${GENERATOR_ARGS[@]}" || true
 
-  if [[ $? -eq 0 ]]; then
-    echo "Done applying ${payload_type} update."
-    echo "Checking the newly generated partitions against the target partitions"
-    for part in "${PARTITIONS_ORDER[@]}"; do
-      cmp "${TMP_PARTITIONS[${part}]}" "${DST_PARTITIONS[${part}]}"
-      local not_str=""
-      if [[ $? -ne 0 ]]; then
-        not_str="in"
-      fi
-      echo "The new partition (${part}) is ${not_str}valid."
-    done
-  else
-    echo "Failed to apply ${payload_type} update."
+  echo "Done applying ${payload_type} update."
+  echo "Checking the newly generated partitions against the target partitions"
+  local need_pause=false
+  for part in "${PARTITIONS_ORDER[@]}"; do
+    local not_str=""
+    if ! cmp "${TMP_PARTITIONS[${part}]}" "${DST_PARTITIONS[${part}]}"; then
+      not_str="in"
+      need_pause=true
+    fi
+    echo "The new partition (${part}) is ${not_str}valid."
+  done
+  # All images will be cleaned up when script exits, pause here to give a chance
+  # to inspect the images.
+  if [[ "$need_pause" == true ]]; then
+    read -n1 -r -s -p "Paused to investigate invalid partitions, \
+press any key to exit."
   fi
 }
 
+cmd_check() {
+  local payload_type=$(get_payload_type)
+  extract_payload_images ${payload_type}
+
+  local part dst_partitions="" src_partitions=""
+  for part in "${PARTITIONS_ORDER[@]}"; do
+    if [[ -n "${dst_partitions}" ]]; then
+      dst_partitions+=" "
+      src_partitions+=" "
+    fi
+    dst_partitions+="${DST_PARTITIONS[${part}]}"
+    src_partitions+="${SRC_PARTITIONS[${part}]:-}"
+  done
+
+  # Common payload args:
+  PAYCHECK_ARGS=( "${FLAGS_payload}" --type ${payload_type} \
+    --part_names ${PARTITIONS_ORDER[@]} \
+    --dst_part_paths ${dst_partitions} )
+
+  if [[ ! -z "${SRC_PARTITIONS[@]}" ]]; then
+    PAYCHECK_ARGS+=( --src_part_paths ${src_partitions} )
+  fi
+
+  echo "Checking ${payload_type} update."
+  check_update_payload ${PAYCHECK_ARGS[@]} --check
+}
+
 # Sanity check that the real generator exists:
 GENERATOR="$(which delta_generator || true)"
 [[ -x "${GENERATOR}" ]] || die "can't find delta_generator"
@@ -765,7 +855,10 @@
   properties) validate_properties
               cmd_properties
               ;;
-  verify) validate_verify
+  verify) validate_verify_and_check
           cmd_verify
           ;;
+  check) validate_verify_and_check
+         cmd_check
+         ;;
 esac
diff --git a/scripts/paycheck.py b/scripts/paycheck.py
index 8df1bf0..9d61778 100755
--- a/scripts/paycheck.py
+++ b/scripts/paycheck.py
@@ -1,16 +1,33 @@
 #!/usr/bin/python2
 #
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Command-line tool for checking and applying Chrome OS update payloads."""
 
 from __future__ import print_function
 
-import optparse
+# pylint: disable=import-error
+import argparse
+import filecmp
 import os
 import sys
+import tempfile
+
+from update_payload import common
+from update_payload import error
 
 lib_dir = os.path.join(os.path.dirname(__file__), 'lib')
 if os.path.exists(lib_dir) and os.path.isdir(lib_dir):
@@ -29,17 +46,12 @@
     argv: command-line arguments to parse (excluding the program name)
 
   Returns:
-    A tuple (opts, payload, extra_args), where `opts' are the options
-    returned by the parser, `payload' is the name of the payload file
-    (mandatory argument) and `extra_args' are any additional command-line
-    arguments.
+    Returns the arguments returned by the argument parser.
   """
-  parser = optparse.OptionParser(
-      usage=('Usage: %prog [OPTION...] PAYLOAD [DST_KERN DST_ROOT '
-             '[SRC_KERN SRC_ROOT]]'),
-      description=('Applies a Chrome OS update PAYLOAD to SRC_KERN and '
-                   'SRC_ROOT emitting DST_KERN and DST_ROOT, respectively. '
-                   'SRC_KERN and SRC_ROOT are only needed for delta payloads. '
+  parser = argparse.ArgumentParser(
+      description=('Applies a Chrome OS update PAYLOAD to src_kern and '
+                   'src_root emitting dst_kern and dst_root, respectively. '
+                   'src_kern and src_root are only needed for delta payloads. '
                    'When no partitions are provided, verifies the payload '
                    'integrity.'),
       epilog=('Note: a payload may verify correctly but fail to apply, and '
@@ -47,186 +59,241 @@
               'vs dynamic correctness. A payload that both verifies and '
               'applies correctly should be safe for use by the Chrome OS '
               'Update Engine. Use --check to verify a payload prior to '
-              'applying it.'))
+              'applying it.'),
+      formatter_class=argparse.RawDescriptionHelpFormatter
+  )
 
-  check_opts = optparse.OptionGroup(parser, 'Checking payload integrity')
-  check_opts.add_option('-c', '--check', action='store_true', default=False,
-                        help=('force payload integrity check (e.g. before '
-                              'applying)'))
-  check_opts.add_option('-D', '--describe', action='store_true', default=False,
-                        help='Print a friendly description of the payload.')
-  check_opts.add_option('-r', '--report', metavar='FILE',
-                        help="dump payload report (`-' for stdout)")
-  check_opts.add_option('-t', '--type', metavar='TYPE', dest='assert_type',
-                        help=("assert that payload is either `%s' or `%s'" %
-                              (_TYPE_FULL, _TYPE_DELTA)))
-  check_opts.add_option('-z', '--block-size', metavar='NUM', default=0,
-                        type='int',
-                        help='assert a non-default (4096) payload block size')
-  check_opts.add_option('-u', '--allow-unhashed', action='store_true',
-                        default=False, help='allow unhashed operations')
-  check_opts.add_option('-d', '--disabled_tests', metavar='TESTLIST',
-                        default=(),
-                        help=('comma-separated list of tests to disable; '
-                              'available values: ' +
-                              ', '.join(update_payload.CHECKS_TO_DISABLE)))
-  check_opts.add_option('-k', '--key', metavar='FILE',
-                        help=('Override standard key used for signature '
-                              'validation'))
-  check_opts.add_option('-m', '--meta-sig', metavar='FILE',
-                        help='verify metadata against its signature')
-  check_opts.add_option('-p', '--root-part-size', metavar='NUM',
-                        default=0, type='int',
-                        help=('override rootfs partition size auto-inference'))
-  check_opts.add_option('-P', '--kern-part-size', metavar='NUM',
-                        default=0, type='int',
-                        help=('override kernel partition size auto-inference'))
-  parser.add_option_group(check_opts)
+  check_args = parser.add_argument_group('Checking payload integrity')
+  check_args.add_argument('-c', '--check', action='store_true', default=False,
+                          help=('force payload integrity check (e.g. before '
+                                'applying)'))
+  check_args.add_argument('-D', '--describe', action='store_true',
+                          default=False,
+                          help='Print a friendly description of the payload.')
+  check_args.add_argument('-r', '--report', metavar='FILE',
+                          help="dump payload report (`-' for stdout)")
+  check_args.add_argument('-t', '--type', dest='assert_type',
+                          help='assert the payload type',
+                          choices=[_TYPE_FULL, _TYPE_DELTA])
+  check_args.add_argument('-z', '--block-size', metavar='NUM', default=0,
+                          type=int,
+                          help='assert a non-default (4096) payload block size')
+  check_args.add_argument('-u', '--allow-unhashed', action='store_true',
+                          default=False, help='allow unhashed operations')
+  check_args.add_argument('-d', '--disabled_tests', default=(), metavar='',
+                          help=('space separated list of tests to disable. '
+                                'allowed options include: ' +
+                                ', '.join(update_payload.CHECKS_TO_DISABLE)),
+                          choices=update_payload.CHECKS_TO_DISABLE)
+  check_args.add_argument('-k', '--key', metavar='FILE',
+                          help=('override standard key used for signature '
+                                'validation'))
+  check_args.add_argument('-m', '--meta-sig', metavar='FILE',
+                          help='verify metadata against its signature')
+  check_args.add_argument('-s', '--metadata-size', metavar='NUM', default=0,
+                          help='the metadata size to verify with the one in'
+                          ' payload')
+  # TODO(tbrindus): deprecated in favour of --part_sizes
+  check_args.add_argument('-p', '--root-part-size', metavar='NUM',
+                          default=0, type=int,
+                          help='override rootfs partition size auto-inference')
+  check_args.add_argument('-P', '--kern-part-size', metavar='NUM',
+                          default=0, type=int,
+                          help='override kernel partition size auto-inference')
+  check_args.add_argument('--part_sizes', metavar='NUM', nargs='+', type=int,
+                          help='override partition size auto-inference')
 
-  trace_opts = optparse.OptionGroup(parser, 'Applying payload')
-  trace_opts.add_option('-x', '--extract-bsdiff', action='store_true',
-                        default=False,
-                        help=('use temp input/output files with BSDIFF '
-                              'operations (not in-place)'))
-  trace_opts.add_option('--bspatch-path', metavar='FILE',
-                        help=('use the specified bspatch binary'))
-  trace_opts.add_option('--puffpatch-path', metavar='FILE',
-                        help=('use the specified puffpatch binary'))
-  parser.add_option_group(trace_opts)
+  apply_args = parser.add_argument_group('Applying payload')
+  # TODO(ahassani): Extent extract-bsdiff to puffdiff too.
+  apply_args.add_argument('-x', '--extract-bsdiff', action='store_true',
+                          default=False,
+                          help=('use temp input/output files with BSDIFF '
+                                'operations (not in-place)'))
+  apply_args.add_argument('--bspatch-path', metavar='FILE',
+                          help='use the specified bspatch binary')
+  apply_args.add_argument('--puffpatch-path', metavar='FILE',
+                          help='use the specified puffpatch binary')
+  # TODO(tbrindus): deprecated in favour of --dst_part_paths
+  apply_args.add_argument('--dst_kern', metavar='FILE',
+                          help='destination kernel partition file')
+  apply_args.add_argument('--dst_root', metavar='FILE',
+                          help='destination root partition file')
+  # TODO(tbrindus): deprecated in favour of --src_part_paths
+  apply_args.add_argument('--src_kern', metavar='FILE',
+                          help='source kernel partition file')
+  apply_args.add_argument('--src_root', metavar='FILE',
+                          help='source root partition file')
+  # TODO(tbrindus): deprecated in favour of --out_dst_part_paths
+  apply_args.add_argument('--out_dst_kern', metavar='FILE',
+                          help='created destination kernel partition file')
+  apply_args.add_argument('--out_dst_root', metavar='FILE',
+                          help='created destination root partition file')
 
-  trace_opts = optparse.OptionGroup(parser, 'Block tracing')
-  trace_opts.add_option('-b', '--root-block', metavar='BLOCK', type='int',
-                        help='trace the origin for a rootfs block')
-  trace_opts.add_option('-B', '--kern-block', metavar='BLOCK', type='int',
-                        help='trace the origin for a kernel block')
-  trace_opts.add_option('-s', '--skip', metavar='NUM', default='0', type='int',
-                        help='skip first NUM occurrences of traced block')
-  parser.add_option_group(trace_opts)
+  apply_args.add_argument('--src_part_paths', metavar='FILE', nargs='+',
+                          help='source partitition files')
+  apply_args.add_argument('--dst_part_paths', metavar='FILE', nargs='+',
+                          help='destination partition files')
+  apply_args.add_argument('--out_dst_part_paths', metavar='FILE', nargs='+',
+                          help='created destination partition files')
+
+  parser.add_argument('payload', metavar='PAYLOAD', help='the payload file')
+  parser.add_argument('--part_names', metavar='NAME', nargs='+',
+                      help='names of partitions')
 
   # Parse command-line arguments.
-  opts, args = parser.parse_args(argv)
+  args = parser.parse_args(argv)
 
-  # Validate a value given to --type, if any.
-  if opts.assert_type not in (None, _TYPE_FULL, _TYPE_DELTA):
-    parser.error('invalid argument to --type: %s' % opts.assert_type)
+  # TODO(tbrindus): temporary workaround to keep old-style flags from breaking
+  # without having to handle both types in our code. Remove after flag usage is
+  # removed from calling scripts.
+  args.part_names = args.part_names or [common.KERNEL, common.ROOTFS]
+  args.part_sizes = args.part_sizes or [args.kern_part_size,
+                                        args.root_part_size]
+  args.src_part_paths = args.src_part_paths or [args.src_kern, args.src_root]
+  args.dst_part_paths = args.dst_part_paths or [args.dst_kern, args.dst_root]
+  args.out_dst_part_paths = args.out_dst_part_paths or [args.out_dst_kern,
+                                                        args.out_dst_root]
 
-  # Convert and validate --disabled_tests value list, if provided.
-  if opts.disabled_tests:
-    opts.disabled_tests = opts.disabled_tests.split(',')
-    for test in opts.disabled_tests:
-      if test not in update_payload.CHECKS_TO_DISABLE:
-        parser.error('invalid argument to --disabled_tests: %s' % test)
-
-  # Ensure consistent use of block tracing options.
-  do_block_trace = not (opts.root_block is None and opts.kern_block is None)
-  if opts.skip and not do_block_trace:
-    parser.error('--skip must be used with either --root-block or --kern-block')
+  # Make sure we don't have new dependencies on old flags by deleting them from
+  # the namespace here.
+  for old in ['kern_part_size', 'root_part_size', 'src_kern', 'src_root',
+              'dst_kern', 'dst_root', 'out_dst_kern', 'out_dst_root']:
+    delattr(args, old)
 
   # There are several options that imply --check.
-  opts.check = (opts.check or opts.report or opts.assert_type or
-                opts.block_size or opts.allow_unhashed or
-                opts.disabled_tests or opts.meta_sig or opts.key or
-                opts.root_part_size or opts.kern_part_size)
+  args.check = (args.check or args.report or args.assert_type or
+                args.block_size or args.allow_unhashed or
+                args.disabled_tests or args.meta_sig or args.key or
+                any(args.part_sizes) or args.metadata_size)
 
-  # Check number of arguments, enforce payload type accordingly.
-  if len(args) == 3:
-    if opts.assert_type == _TYPE_DELTA:
-      parser.error('%s payload requires source partition arguments' %
-                   _TYPE_DELTA)
-    opts.assert_type = _TYPE_FULL
-  elif len(args) == 5:
-    if opts.assert_type == _TYPE_FULL:
-      parser.error('%s payload does not accept source partition arguments' %
-                   _TYPE_FULL)
-    opts.assert_type = _TYPE_DELTA
-  elif len(args) == 1:
-    # Not applying payload; if block tracing not requested either, do an
-    # integrity check.
-    if not do_block_trace:
-      opts.check = True
-    if opts.extract_bsdiff:
-      parser.error('--extract-bsdiff can only be used when applying payloads')
-    if opts.bspatch_path:
-      parser.error('--bspatch-path can only be used when applying payloads')
-    if opts.puffpatch_path:
-      parser.error('--puffpatch-path can only be used when applying payloads')
+  for arg in ['part_sizes', 'src_part_paths', 'dst_part_paths',
+              'out_dst_part_paths']:
+    if len(args.part_names) != len(getattr(args, arg, [])):
+      parser.error('partitions in --%s do not match --part_names' % arg)
+
+  if all(args.dst_part_paths) or all(args.out_dst_part_paths):
+    if all(args.src_part_paths):
+      if args.assert_type == _TYPE_FULL:
+        parser.error('%s payload does not accept source partition arguments'
+                     % _TYPE_FULL)
+      else:
+        args.assert_type = _TYPE_DELTA
+    else:
+      if args.assert_type == _TYPE_DELTA:
+        parser.error('%s payload requires source partitions arguments'
+                     % _TYPE_DELTA)
+      else:
+        args.assert_type = _TYPE_FULL
   else:
-    parser.error('unexpected number of arguments')
+    # Not applying payload.
+    if args.extract_bsdiff:
+      parser.error('--extract-bsdiff can only be used when applying payloads')
+    if args.bspatch_path:
+      parser.error('--bspatch-path can only be used when applying payloads')
+    if args.puffpatch_path:
+      parser.error('--puffpatch-path can only be used when applying payloads')
 
   # By default, look for a metadata-signature file with a name based on the name
   # of the payload we are checking. We only do it if check was triggered.
-  if opts.check and not opts.meta_sig:
-    default_meta_sig = args[0] + '.metadata-signature'
+  if args.check and not args.meta_sig:
+    default_meta_sig = args.payload + '.metadata-signature'
     if os.path.isfile(default_meta_sig):
-      opts.meta_sig = default_meta_sig
-      print('Using default metadata signature', opts.meta_sig, file=sys.stderr)
+      args.meta_sig = default_meta_sig
+      print('Using default metadata signature', args.meta_sig, file=sys.stderr)
 
-  return opts, args[0], args[1:]
+  return args
 
 
 def main(argv):
   # Parse and validate arguments.
-  options, payload_file_name, extra_args = ParseArguments(argv[1:])
+  args = ParseArguments(argv[1:])
 
-  with open(payload_file_name) as payload_file:
+  with open(args.payload) as payload_file:
     payload = update_payload.Payload(payload_file)
     try:
       # Initialize payload.
       payload.Init()
 
-      if options.describe:
+      if args.describe:
         payload.Describe()
 
       # Perform payload integrity checks.
-      if options.check:
+      if args.check:
         report_file = None
         do_close_report_file = False
         metadata_sig_file = None
         try:
-          if options.report:
-            if options.report == '-':
+          if args.report:
+            if args.report == '-':
               report_file = sys.stdout
             else:
-              report_file = open(options.report, 'w')
+              report_file = open(args.report, 'w')
               do_close_report_file = True
 
-          metadata_sig_file = options.meta_sig and open(options.meta_sig)
+          part_sizes = dict(zip(args.part_names, args.part_sizes))
+          metadata_sig_file = args.meta_sig and open(args.meta_sig)
           payload.Check(
-              pubkey_file_name=options.key,
+              pubkey_file_name=args.key,
               metadata_sig_file=metadata_sig_file,
+              metadata_size=int(args.metadata_size),
               report_out_file=report_file,
-              assert_type=options.assert_type,
-              block_size=int(options.block_size),
-              rootfs_part_size=options.root_part_size,
-              kernel_part_size=options.kern_part_size,
-              allow_unhashed=options.allow_unhashed,
-              disabled_tests=options.disabled_tests)
+              assert_type=args.assert_type,
+              block_size=int(args.block_size),
+              part_sizes=part_sizes,
+              allow_unhashed=args.allow_unhashed,
+              disabled_tests=args.disabled_tests)
         finally:
           if metadata_sig_file:
             metadata_sig_file.close()
           if do_close_report_file:
             report_file.close()
 
-      # Trace blocks.
-      if options.root_block is not None:
-        payload.TraceBlock(options.root_block, options.skip, sys.stdout, False)
-      if options.kern_block is not None:
-        payload.TraceBlock(options.kern_block, options.skip, sys.stdout, True)
-
       # Apply payload.
-      if extra_args:
-        dargs = {'bsdiff_in_place': not options.extract_bsdiff}
-        if options.bspatch_path:
-          dargs['bspatch_path'] = options.bspatch_path
-        if options.puffpatch_path:
-          dargs['puffpatch_path'] = options.puffpatch_path
-        if options.assert_type == _TYPE_DELTA:
-          dargs['old_kernel_part'] = extra_args[2]
-          dargs['old_rootfs_part'] = extra_args[3]
+      if all(args.dst_part_paths) or all(args.out_dst_part_paths):
+        dargs = {'bsdiff_in_place': not args.extract_bsdiff}
+        if args.bspatch_path:
+          dargs['bspatch_path'] = args.bspatch_path
+        if args.puffpatch_path:
+          dargs['puffpatch_path'] = args.puffpatch_path
+        if args.assert_type == _TYPE_DELTA:
+          dargs['old_parts'] = dict(zip(args.part_names, args.src_part_paths))
 
-        payload.Apply(extra_args[0], extra_args[1], **dargs)
+        out_dst_parts = {}
+        file_handles = []
+        if all(args.out_dst_part_paths):
+          for name, path in zip(args.part_names, args.out_dst_part_paths):
+            handle = open(path, 'w+')
+            file_handles.append(handle)
+            out_dst_parts[name] = handle.name
+        else:
+          for name in args.part_names:
+            handle = tempfile.NamedTemporaryFile()
+            file_handles.append(handle)
+            out_dst_parts[name] = handle.name
 
-    except update_payload.PayloadError, e:
+        payload.Apply(out_dst_parts, **dargs)
+
+        # If destination kernel and rootfs partitions are not given, then this
+        # just becomes an apply operation with no check.
+        if all(args.dst_part_paths):
+          # Prior to comparing, add the unused space past the filesystem
+          # boundary in the new target partitions to become the same size as
+          # the given partitions. This will truncate to larger size.
+          for part_name, out_dst_part, dst_part in zip(args.part_names,
+                                                       file_handles,
+                                                       args.dst_part_paths):
+            out_dst_part.truncate(os.path.getsize(dst_part))
+
+            # Compare resulting partitions with the ones from the target image.
+            if not filecmp.cmp(out_dst_part.name, dst_part):
+              raise error.PayloadError(
+                  'Resulting %s partition corrupted.' % part_name)
+
+        # Close the output files. If args.out_dst_* was not given, then these
+        # files are created as temp files and will be deleted upon close().
+        for handle in file_handles:
+          handle.close()
+    except error.PayloadError, e:
       sys.stderr.write('Error: %s\n' % e)
       return 1
 
diff --git a/scripts/payload_info.py b/scripts/payload_info.py
new file mode 100755
index 0000000..09a7cf7
--- /dev/null
+++ b/scripts/payload_info.py
@@ -0,0 +1,249 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""payload_info: Show information about an update payload."""
+
+from __future__ import print_function
+
+import argparse
+import itertools
+import sys
+import textwrap
+
+import update_payload
+
+MAJOR_PAYLOAD_VERSION_CHROMEOS = 1
+MAJOR_PAYLOAD_VERSION_BRILLO = 2
+
+def DisplayValue(key, value):
+  """Print out a key, value pair with values left-aligned."""
+  if value != None:
+    print('%-*s %s' % (28, key + ':', value))
+  else:
+    raise ValueError('Cannot display an empty value.')
+
+
+def DisplayHexData(data, indent=0):
+  """Print out binary data as a hex values."""
+  for off in range(0, len(data), 16):
+    chunk = data[off:off + 16]
+    print(' ' * indent +
+          ' '.join('%.2x' % ord(c) for c in chunk) +
+          '   ' * (16 - len(chunk)) +
+          ' | ' +
+          ''.join(c if 32 <= ord(c) < 127 else '.' for c in chunk))
+
+
+class PayloadCommand(object):
+  """Show basic information about an update payload.
+
+  This command parses an update payload and displays information from
+  its header and manifest.
+  """
+
+  def __init__(self, options):
+    self.options = options
+    self.payload = None
+
+  def _DisplayHeader(self):
+    """Show information from the payload header."""
+    header = self.payload.header
+    DisplayValue('Payload version', header.version)
+    DisplayValue('Manifest length', header.manifest_len)
+
+  def _DisplayManifest(self):
+    """Show information from the payload manifest."""
+    manifest = self.payload.manifest
+    if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
+      DisplayValue('Number of partitions', len(manifest.partitions))
+      for partition in manifest.partitions:
+        DisplayValue('  Number of "%s" ops' % partition.partition_name,
+                     len(partition.operations))
+    else:
+      DisplayValue('Number of operations', len(manifest.install_operations))
+      DisplayValue('Number of kernel ops',
+                   len(manifest.kernel_install_operations))
+    DisplayValue('Block size', manifest.block_size)
+    DisplayValue('Minor version', manifest.minor_version)
+
+  def _DisplaySignatures(self):
+    """Show information about the signatures from the manifest."""
+    header = self.payload.header
+    if header.metadata_signature_len:
+      offset = header.size + header.manifest_len
+      DisplayValue('Metadata signatures blob',
+                   'file_offset=%d (%d bytes)' %
+                   (offset, header.metadata_signature_len))
+      # pylint: disable=invalid-unary-operand-type
+      signatures_blob = self.payload.ReadDataBlob(
+          -header.metadata_signature_len,
+          header.metadata_signature_len)
+      self._DisplaySignaturesBlob('Metadata', signatures_blob)
+    else:
+      print('No metadata signatures stored in the payload')
+
+    manifest = self.payload.manifest
+    if manifest.HasField('signatures_offset'):
+      signature_msg = 'blob_offset=%d' % manifest.signatures_offset
+      if manifest.signatures_size:
+        signature_msg += ' (%d bytes)' % manifest.signatures_size
+      DisplayValue('Payload signatures blob', signature_msg)
+      signatures_blob = self.payload.ReadDataBlob(manifest.signatures_offset,
+                                                  manifest.signatures_size)
+      self._DisplaySignaturesBlob('Payload', signatures_blob)
+    else:
+      print('No payload signatures stored in the payload')
+
+  @staticmethod
+  def _DisplaySignaturesBlob(signature_name, signatures_blob):
+    """Show information about the signatures blob."""
+    signatures = update_payload.update_metadata_pb2.Signatures()
+    signatures.ParseFromString(signatures_blob)
+    print('%s signatures: (%d entries)' %
+          (signature_name, len(signatures.signatures)))
+    for signature in signatures.signatures:
+      print('  version=%s, hex_data: (%d bytes)' %
+            (signature.version if signature.HasField('version') else None,
+             len(signature.data)))
+      DisplayHexData(signature.data, indent=4)
+
+
+  def _DisplayOps(self, name, operations):
+    """Show information about the install operations from the manifest.
+
+    The list shown includes operation type, data offset, data length, source
+    extents, source length, destination extents, and destinations length.
+
+    Args:
+      name: The name you want displayed above the operation table.
+      operations: The install_operations object that you want to display
+                  information about.
+    """
+    def _DisplayExtents(extents, name):
+      """Show information about extents."""
+      num_blocks = sum([ext.num_blocks for ext in extents])
+      ext_str = ' '.join(
+          '(%s,%s)' % (ext.start_block, ext.num_blocks) for ext in extents)
+      # Make extent list wrap around at 80 chars.
+      ext_str = '\n      '.join(textwrap.wrap(ext_str, 74))
+      extent_plural = 's' if len(extents) > 1 else ''
+      block_plural = 's' if num_blocks > 1 else ''
+      print('    %s: %d extent%s (%d block%s)' %
+            (name, len(extents), extent_plural, num_blocks, block_plural))
+      print('      %s' % ext_str)
+
+    op_dict = update_payload.common.OpType.NAMES
+    print('%s:' % name)
+    for op, op_count in itertools.izip(operations, itertools.count()):
+      print('  %d: %s' % (op_count, op_dict[op.type]))
+      if op.HasField('data_offset'):
+        print('    Data offset: %s' % op.data_offset)
+      if op.HasField('data_length'):
+        print('    Data length: %s' % op.data_length)
+      if op.src_extents:
+        _DisplayExtents(op.src_extents, 'Source')
+      if op.dst_extents:
+        _DisplayExtents(op.dst_extents, 'Destination')
+
+  def _GetStats(self, manifest):
+    """Returns various statistics about a payload file.
+
+    Returns a dictionary containing the number of blocks read during payload
+    application, the number of blocks written, and the number of seeks done
+    when writing during operation application.
+    """
+    read_blocks = 0
+    written_blocks = 0
+    num_write_seeks = 0
+    if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
+      partitions_operations = [part.operations for part in manifest.partitions]
+    else:
+      partitions_operations = [manifest.install_operations,
+                               manifest.kernel_install_operations]
+    for operations in partitions_operations:
+      last_ext = None
+      for curr_op in operations:
+        read_blocks += sum([ext.num_blocks for ext in curr_op.src_extents])
+        written_blocks += sum([ext.num_blocks for ext in curr_op.dst_extents])
+        for curr_ext in curr_op.dst_extents:
+          # See if the extent is contiguous with the last extent seen.
+          if last_ext and (curr_ext.start_block !=
+                           last_ext.start_block + last_ext.num_blocks):
+            num_write_seeks += 1
+          last_ext = curr_ext
+
+    if manifest.minor_version == 1:
+      # Rootfs and kernel are written during the filesystem copy in version 1.
+      written_blocks += manifest.old_rootfs_info.size / manifest.block_size
+      written_blocks += manifest.old_kernel_info.size / manifest.block_size
+    # Old and new rootfs and kernel are read once during verification
+    read_blocks += manifest.old_rootfs_info.size / manifest.block_size
+    read_blocks += manifest.old_kernel_info.size / manifest.block_size
+    read_blocks += manifest.new_rootfs_info.size / manifest.block_size
+    read_blocks += manifest.new_kernel_info.size / manifest.block_size
+    stats = {'read_blocks': read_blocks,
+             'written_blocks': written_blocks,
+             'num_write_seeks': num_write_seeks}
+    return stats
+
+  def _DisplayStats(self, manifest):
+    stats = self._GetStats(manifest)
+    DisplayValue('Blocks read', stats['read_blocks'])
+    DisplayValue('Blocks written', stats['written_blocks'])
+    DisplayValue('Seeks when writing', stats['num_write_seeks'])
+
+  def Run(self):
+    """Parse the update payload and display information from it."""
+    self.payload = update_payload.Payload(self.options.payload_file)
+    self.payload.Init()
+    self._DisplayHeader()
+    self._DisplayManifest()
+    if self.options.signatures:
+      self._DisplaySignatures()
+    if self.options.stats:
+      self._DisplayStats(self.payload.manifest)
+    if self.options.list_ops:
+      print()
+      if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
+        for partition in self.payload.manifest.partitions:
+          self._DisplayOps('%s install operations' % partition.partition_name,
+                           partition.operations)
+      else:
+        self._DisplayOps('Install operations',
+                         self.payload.manifest.install_operations)
+        self._DisplayOps('Kernel install operations',
+                         self.payload.manifest.kernel_install_operations)
+
+
+def main():
+  parser = argparse.ArgumentParser(
+      description='Show information about an update payload.')
+  parser.add_argument('payload_file', type=file,
+                      help='The update payload file.')
+  parser.add_argument('--list_ops', default=False, action='store_true',
+                      help='List the install operations and their extents.')
+  parser.add_argument('--stats', default=False, action='store_true',
+                      help='Show information about overall input/output.')
+  parser.add_argument('--signatures', default=False, action='store_true',
+                      help='Show signatures stored in the payload.')
+  args = parser.parse_args()
+
+  PayloadCommand(args).Run()
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/scripts/payload_info_unittest.py b/scripts/payload_info_unittest.py
new file mode 100755
index 0000000..a4ee9d5
--- /dev/null
+++ b/scripts/payload_info_unittest.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python2
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Unit testing payload_info.py."""
+
+from __future__ import print_function
+
+import StringIO
+import collections
+import mock
+import sys
+import unittest
+
+import payload_info
+import update_payload
+
+from contextlib import contextmanager
+
+from update_payload import update_metadata_pb2
+
+class FakePayloadError(Exception):
+  """A generic error when using the FakePayload."""
+
+class FakeOption(object):
+  """Fake options object for testing."""
+
+  def __init__(self, **kwargs):
+    self.list_ops = False
+    self.stats = False
+    self.signatures = False
+    for key, val in kwargs.iteritems():
+      setattr(self, key, val)
+    if not hasattr(self, 'payload_file'):
+      self.payload_file = None
+
+class FakeOp(object):
+  """Fake manifest operation for testing."""
+
+  def __init__(self, src_extents, dst_extents, op_type, **kwargs):
+    self.src_extents = src_extents
+    self.dst_extents = dst_extents
+    self.type = op_type
+    for key, val in kwargs.iteritems():
+      setattr(self, key, val)
+
+  def HasField(self, field):
+    return hasattr(self, field)
+
+class FakePartition(object):
+  """Fake PartitionUpdate field for testing."""
+
+  def __init__(self, partition_name, operations):
+    self.partition_name = partition_name
+    self.operations = operations
+
+class FakeManifest(object):
+  """Fake manifest for testing."""
+
+  def __init__(self, major_version):
+    FakeExtent = collections.namedtuple('FakeExtent',
+                                        ['start_block', 'num_blocks'])
+    self.install_operations = [FakeOp([],
+                                      [FakeExtent(1, 1), FakeExtent(2, 2)],
+                                      update_payload.common.OpType.REPLACE_BZ,
+                                      dst_length=3*4096,
+                                      data_offset=1,
+                                      data_length=1)]
+    self.kernel_install_operations = [FakeOp(
+        [FakeExtent(1, 1)],
+        [FakeExtent(x, x) for x in xrange(20)],
+        update_payload.common.OpType.SOURCE_COPY,
+        src_length=4096)]
+    if major_version == payload_info.MAJOR_PAYLOAD_VERSION_BRILLO:
+      self.partitions = [FakePartition('root', self.install_operations),
+                         FakePartition('kernel',
+                                       self.kernel_install_operations)]
+      self.install_operations = self.kernel_install_operations = []
+    self.block_size = 4096
+    self.minor_version = 4
+    FakePartInfo = collections.namedtuple('FakePartInfo', ['size'])
+    self.old_rootfs_info = FakePartInfo(1 * 4096)
+    self.old_kernel_info = FakePartInfo(2 * 4096)
+    self.new_rootfs_info = FakePartInfo(3 * 4096)
+    self.new_kernel_info = FakePartInfo(4 * 4096)
+    self.signatures_offset = None
+    self.signatures_size = None
+
+  def HasField(self, field_name):
+    """Fake HasField method based on the python members."""
+    return hasattr(self, field_name) and getattr(self, field_name) is not None
+
+class FakeHeader(object):
+  """Fake payload header for testing."""
+
+  def __init__(self, version, manifest_len, metadata_signature_len):
+    self.version = version
+    self.manifest_len = manifest_len
+    self.metadata_signature_len = metadata_signature_len
+
+  @property
+  def size(self):
+    return (20 if self.version == payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS
+            else 24)
+
+class FakePayload(object):
+  """Fake payload for testing."""
+
+  def __init__(self, major_version):
+    self._header = FakeHeader(major_version, 222, 0)
+    self.header = None
+    self._manifest = FakeManifest(major_version)
+    self.manifest = None
+
+    self._blobs = {}
+    self._payload_signatures = update_metadata_pb2.Signatures()
+    self._metadata_signatures = update_metadata_pb2.Signatures()
+
+  def Init(self):
+    """Fake Init that sets header and manifest.
+
+    Failing to call Init() will not make header and manifest available to the
+    test.
+    """
+    self.header = self._header
+    self.manifest = self._manifest
+
+  def ReadDataBlob(self, offset, length):
+    """Return the blob that should be present at the offset location"""
+    if not offset in self._blobs:
+      raise FakePayloadError('Requested blob at unknown offset %d' % offset)
+    blob = self._blobs[offset]
+    if len(blob) != length:
+      raise FakePayloadError('Read blob with the wrong length (expect: %d, '
+                             'actual: %d)' % (len(blob), length))
+    return blob
+
+  @staticmethod
+  def _AddSignatureToProto(proto, **kwargs):
+    """Add a new Signature element to the passed proto."""
+    new_signature = proto.signatures.add()
+    for key, val in kwargs.iteritems():
+      setattr(new_signature, key, val)
+
+  def AddPayloadSignature(self, **kwargs):
+    self._AddSignatureToProto(self._payload_signatures, **kwargs)
+    blob = self._payload_signatures.SerializeToString()
+    self._manifest.signatures_offset = 1234
+    self._manifest.signatures_size = len(blob)
+    self._blobs[self._manifest.signatures_offset] = blob
+
+  def AddMetadataSignature(self, **kwargs):
+    self._AddSignatureToProto(self._metadata_signatures, **kwargs)
+    if self._header.metadata_signature_len:
+      del self._blobs[-self._header.metadata_signature_len]
+    blob = self._metadata_signatures.SerializeToString()
+    self._header.metadata_signature_len = len(blob)
+    self._blobs[-len(blob)] = blob
+
+class PayloadCommandTest(unittest.TestCase):
+  """Test class for our PayloadCommand class."""
+
+  @contextmanager
+  def OutputCapturer(self):
+    """A tool for capturing the sys.stdout"""
+    stdout = sys.stdout
+    try:
+      sys.stdout = StringIO.StringIO()
+      yield sys.stdout
+    finally:
+      sys.stdout = stdout
+
+  def TestCommand(self, payload_cmd, payload, expected_out):
+    """A tool for testing a payload command.
+
+    It tests that a payload command which runs with a given payload produces a
+    correct output.
+    """
+    with mock.patch.object(update_payload, 'Payload', return_value=payload), \
+         self.OutputCapturer() as output:
+      payload_cmd.Run()
+    self.assertEquals(output.getvalue(), expected_out)
+
+  def testDisplayValue(self):
+    """Verify that DisplayValue prints what we expect."""
+    with self.OutputCapturer() as output:
+      payload_info.DisplayValue('key', 'value')
+    self.assertEquals(output.getvalue(), 'key:                         value\n')
+
+  def testRun(self):
+    """Verify that Run parses and displays the payload like we expect."""
+    payload_cmd = payload_info.PayloadCommand(FakeOption(action='show'))
+    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
+    expected_out = """Payload version:             1
+Manifest length:             222
+Number of operations:        1
+Number of kernel ops:        1
+Block size:                  4096
+Minor version:               4
+"""
+    self.TestCommand(payload_cmd, payload, expected_out)
+
+  def testListOpsOnVersion1(self):
+    """Verify that the --list_ops option gives the correct output."""
+    payload_cmd = payload_info.PayloadCommand(
+        FakeOption(list_ops=True, action='show'))
+    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
+    expected_out = """Payload version:             1
+Manifest length:             222
+Number of operations:        1
+Number of kernel ops:        1
+Block size:                  4096
+Minor version:               4
+
+Install operations:
+  0: REPLACE_BZ
+    Data offset: 1
+    Data length: 1
+    Destination: 2 extents (3 blocks)
+      (1,1) (2,2)
+Kernel install operations:
+  0: SOURCE_COPY
+    Source: 1 extent (1 block)
+      (1,1)
+    Destination: 20 extents (190 blocks)
+      (0,0) (1,1) (2,2) (3,3) (4,4) (5,5) (6,6) (7,7) (8,8) (9,9) (10,10)
+      (11,11) (12,12) (13,13) (14,14) (15,15) (16,16) (17,17) (18,18) (19,19)
+"""
+    self.TestCommand(payload_cmd, payload, expected_out)
+
+  def testListOpsOnVersion2(self):
+    """Verify that the --list_ops option gives the correct output."""
+    payload_cmd = payload_info.PayloadCommand(
+        FakeOption(list_ops=True, action='show'))
+    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO)
+    expected_out = """Payload version:             2
+Manifest length:             222
+Number of partitions:        2
+  Number of "root" ops:      1
+  Number of "kernel" ops:    1
+Block size:                  4096
+Minor version:               4
+
+root install operations:
+  0: REPLACE_BZ
+    Data offset: 1
+    Data length: 1
+    Destination: 2 extents (3 blocks)
+      (1,1) (2,2)
+kernel install operations:
+  0: SOURCE_COPY
+    Source: 1 extent (1 block)
+      (1,1)
+    Destination: 20 extents (190 blocks)
+      (0,0) (1,1) (2,2) (3,3) (4,4) (5,5) (6,6) (7,7) (8,8) (9,9) (10,10)
+      (11,11) (12,12) (13,13) (14,14) (15,15) (16,16) (17,17) (18,18) (19,19)
+"""
+    self.TestCommand(payload_cmd, payload, expected_out)
+
+  def testStatsOnVersion1(self):
+    """Verify that the --stats option works correctly."""
+    payload_cmd = payload_info.PayloadCommand(
+        FakeOption(stats=True, action='show'))
+    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
+    expected_out = """Payload version:             1
+Manifest length:             222
+Number of operations:        1
+Number of kernel ops:        1
+Block size:                  4096
+Minor version:               4
+Blocks read:                 11
+Blocks written:              193
+Seeks when writing:          18
+"""
+    self.TestCommand(payload_cmd, payload, expected_out)
+
+  def testStatsOnVersion2(self):
+    """Verify that the --stats option works correctly on version 2."""
+    payload_cmd = payload_info.PayloadCommand(
+        FakeOption(stats=True, action='show'))
+    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO)
+    expected_out = """Payload version:             2
+Manifest length:             222
+Number of partitions:        2
+  Number of "root" ops:      1
+  Number of "kernel" ops:    1
+Block size:                  4096
+Minor version:               4
+Blocks read:                 11
+Blocks written:              193
+Seeks when writing:          18
+"""
+    self.TestCommand(payload_cmd, payload, expected_out)
+
+  def testEmptySignatures(self):
+    """Verify that the --signatures option works with unsigned payloads."""
+    payload_cmd = payload_info.PayloadCommand(
+        FakeOption(action='show', signatures=True))
+    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
+    expected_out = """Payload version:             1
+Manifest length:             222
+Number of operations:        1
+Number of kernel ops:        1
+Block size:                  4096
+Minor version:               4
+No metadata signatures stored in the payload
+No payload signatures stored in the payload
+"""
+    self.TestCommand(payload_cmd, payload, expected_out)
+
+  def testSignatures(self):
+    """Verify that the --signatures option shows the present signatures."""
+    payload_cmd = payload_info.PayloadCommand(
+        FakeOption(action='show', signatures=True))
+    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO)
+    payload.AddPayloadSignature(version=1,
+                                data='12345678abcdefgh\x00\x01\x02\x03')
+    payload.AddPayloadSignature(data='I am a signature so access is yes.')
+    payload.AddMetadataSignature(data='\x00\x0a\x0c')
+    expected_out = """Payload version:             2
+Manifest length:             222
+Number of partitions:        2
+  Number of "root" ops:      1
+  Number of "kernel" ops:    1
+Block size:                  4096
+Minor version:               4
+Metadata signatures blob:    file_offset=246 (7 bytes)
+Metadata signatures: (1 entries)
+  version=None, hex_data: (3 bytes)
+    00 0a 0c                                        | ...
+Payload signatures blob:     blob_offset=1234 (64 bytes)
+Payload signatures: (2 entries)
+  version=1, hex_data: (20 bytes)
+    31 32 33 34 35 36 37 38 61 62 63 64 65 66 67 68 | 12345678abcdefgh
+    00 01 02 03                                     | ....
+  version=None, hex_data: (34 bytes)
+    49 20 61 6d 20 61 20 73 69 67 6e 61 74 75 72 65 | I am a signature
+    20 73 6f 20 61 63 63 65 73 73 20 69 73 20 79 65 |  so access is ye
+    73 2e                                           | s.
+"""
+    self.TestCommand(payload_cmd, payload, expected_out)
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/scripts/run_unittests b/scripts/run_unittests
index c8e713d..0d301ba 100755
--- a/scripts/run_unittests
+++ b/scripts/run_unittests
@@ -25,4 +25,6 @@
   python -m update_payload."${filename%.*}"
 done
 
+./payload_info_unittest.py
+
 exit 0
diff --git a/scripts/test_paycheck.sh b/scripts/test_paycheck.sh
index c395db4..239b984 100755
--- a/scripts/test_paycheck.sh
+++ b/scripts/test_paycheck.sh
@@ -1,8 +1,19 @@
 #!/bin/bash
 #
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 # A test script for paycheck.py and the update_payload.py library.
 #
@@ -21,9 +32,6 @@
 #   payload type. Another artifact is a human-readable payload report, which
 #   is output to stdout to be inspected by the user.
 #
-# - It performs a random block trace on the delta payload (both kernel and
-#   rootfs blocks), dumping the traces to stdout for the user to inspect.
-#
 # - It applies old_full_payload to yield old kernel (old_kern.part) and rootfs
 #   (old_root.part) partitions.
 #
@@ -37,11 +45,9 @@
 #   ensure that they are binary identical.
 #
 # If all steps have completed successfully we know with high certainty that
-# paycheck.py (and hence update_payload.py) correctly parses both full and
-# delta payloads, and applies them to yield the expected result. We also know
-# that tracing works, to the extent it does not crash. Manual inspection of
-# payload reports and block traces will improve this our confidence and are
-# strongly encouraged. Finally, each paycheck.py execution is timed.
+# paycheck.py (and hence update_payload.py) correctly parses both full and delta
+# payloads, and applies them to yield the expected result. Finally, each
+# paycheck.py execution is timed.
 
 
 # Stop on errors, unset variables.
@@ -55,6 +61,7 @@
 NEW_DELTA_ROOT_PART=new_delta_root.part
 NEW_FULL_KERN_PART=new_full_kern.part
 NEW_FULL_ROOT_PART=new_full_root.part
+CROS_PARTS="kernel root"
 
 
 log() {
@@ -80,35 +87,30 @@
   time ${paycheck} -t ${payload_type} ${payload_file}
 }
 
-trace_kern_block() {
-  payload_file=$1
-  block=$2
-  time ${paycheck} -B ${block} ${payload_file}
-}
-
-trace_root_block() {
-  payload_file=$1
-  block=$2
-  time ${paycheck} -b ${block} ${payload_file}
-}
-
 apply_full_payload() {
   payload_file=$1
-  dst_kern_part="$2/$3"
-  dst_root_part="$2/$4"
+  out_dst_kern_part="$2/$3"
+  out_dst_root_part="$2/$4"
 
-  time ${paycheck} ${payload_file} ${dst_kern_part} ${dst_root_part}
+  time ${paycheck} ${payload_file} \
+    --part_names ${CROS_PARTS} \
+    --out_dst_part_paths ${out_dst_kern_part} ${out_dst_root_part}
 }
 
 apply_delta_payload() {
   payload_file=$1
-  dst_kern_part="$2/$3"
-  dst_root_part="$2/$4"
-  src_kern_part="$2/$5"
-  src_root_part="$2/$6"
+  out_dst_kern_part="$2/$3"
+  out_dst_root_part="$2/$4"
+  dst_kern_part="$2/$5"
+  dst_root_part="$2/$6"
+  src_kern_part="$2/$7"
+  src_root_part="$2/$8"
 
-  time ${paycheck} ${payload_file} ${dst_kern_part} ${dst_root_part} \
-    ${src_kern_part} ${src_root_part}
+  time ${paycheck} ${payload_file} \
+    --part_names ${CROS_PARTS} \
+    --out_dst_part_paths ${out_dst_kern_part} ${out_dst_root_part} \
+    --dst_part_paths ${dst_kern_part} ${dst_root_part} \
+    --src_part_paths ${src_kern_part} ${src_root_part}
 }
 
 main() {
@@ -135,15 +137,6 @@
   check_payload "${delta_payload}" delta
   log "Done"
 
-  # Trace a random block between 0-1024 on all payloads.
-  block=$((RANDOM * 1024 / 32767))
-  log "Tracing a random block (${block}) in full/delta payloads..."
-  trace_kern_block "${new_full_payload}" ${block}
-  trace_root_block "${new_full_payload}" ${block}
-  trace_kern_block "${delta_payload}" ${block}
-  trace_root_block "${delta_payload}" ${block}
-  log "Done"
-
   # Apply full/delta payloads and verify results are identical.
   tmpdir="$(mktemp -d --tmpdir test_paycheck.XXXXXXXX)"
   log "Initiating application of payloads at $tmpdir"
@@ -153,16 +146,17 @@
     "${OLD_ROOT_PART}"
   log "Done"
 
-  log "Applying delta payload to old partitions..."
-  apply_delta_payload "${delta_payload}" "${tmpdir}" "${NEW_DELTA_KERN_PART}" \
-    "${NEW_DELTA_ROOT_PART}" "${OLD_KERN_PART}" "${OLD_ROOT_PART}"
-  log "Done"
-
   log "Applying new full payload..."
   apply_full_payload "${new_full_payload}" "${tmpdir}" "${NEW_FULL_KERN_PART}" \
     "${NEW_FULL_ROOT_PART}"
   log "Done"
 
+  log "Applying delta payload to old partitions..."
+  apply_delta_payload "${delta_payload}" "${tmpdir}" "${NEW_DELTA_KERN_PART}" \
+    "${NEW_DELTA_ROOT_PART}" "${NEW_FULL_KERN_PART}" \
+    "${NEW_FULL_ROOT_PART}" "${OLD_KERN_PART}" "${OLD_ROOT_PART}"
+  log "Done"
+
   log "Comparing results of delta and new full updates..."
   diff "${tmpdir}/${NEW_FULL_KERN_PART}" "${tmpdir}/${NEW_DELTA_KERN_PART}"
   diff "${tmpdir}/${NEW_FULL_ROOT_PART}" "${tmpdir}/${NEW_DELTA_ROOT_PART}"
diff --git a/scripts/update_device.py b/scripts/update_device.py
index 64cfbe3..5c19b89 100755
--- a/scripts/update_device.py
+++ b/scripts/update_device.py
@@ -89,7 +89,9 @@
 
     otazip = zipfile.ZipFile(otafilename, 'r')
     payload_info = otazip.getinfo(self.OTA_PAYLOAD_BIN)
-    self.offset = payload_info.header_offset + len(payload_info.FileHeader())
+    self.offset = payload_info.header_offset
+    self.offset += zipfile.sizeFileHeader
+    self.offset += len(payload_info.extra) + len(payload_info.filename)
     self.size = payload_info.file_size
     self.properties = otazip.read(self.OTA_PAYLOAD_PROPERTIES_TXT)
 
diff --git a/scripts/update_payload/__init__.py b/scripts/update_payload/__init__.py
index e4a5588..8ee95e2 100644
--- a/scripts/update_payload/__init__.py
+++ b/scripts/update_payload/__init__.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Library for processing, verifying and applying Chrome OS update payloads."""
 
diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py
index 6db7664..21d8e87 100644
--- a/scripts/update_payload/applier.py
+++ b/scripts/update_payload/applier.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Applying a Chrome OS update payload.
 
@@ -18,6 +30,20 @@
 import bz2
 import hashlib
 import itertools
+# Not everywhere we can have the lzma library so we ignore it if we didn't have
+# it because it is not going to be used. For example, 'cros flash' uses
+# devserver code which eventually loads this file, but the lzma library is not
+# included in the client test devices, and it is not necessary to do so. But
+# lzma is not used in 'cros flash' so it should be fine. Python 3.x include
+# lzma, but for backward compatibility with Python 2.7, backports-lzma is
+# needed.
+try:
+  import lzma
+except ImportError:
+  try:
+    from backports import lzma
+  except ImportError:
+    pass
 import os
 import shutil
 import subprocess
@@ -216,7 +242,7 @@
     self.truncate_to_expected_size = truncate_to_expected_size
 
   def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
-    """Applies a REPLACE{,_BZ} operation.
+    """Applies a REPLACE{,_BZ,_XZ} operation.
 
     Args:
       op: the operation object
@@ -235,6 +261,10 @@
     if op.type == common.OpType.REPLACE_BZ:
       out_data = bz2.decompress(out_data)
       data_length = len(out_data)
+    elif op.type == common.OpType.REPLACE_XZ:
+      # pylint: disable=no-member
+      out_data = lzma.decompress(out_data)
+      data_length = len(out_data)
 
     # Write data to blocks specified in dst extents.
     data_start = 0
@@ -508,7 +538,8 @@
       # Read data blob.
       data = self.payload.ReadDataBlob(op.data_offset, op.data_length)
 
-      if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
+      if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
+                     common.OpType.REPLACE_XZ):
         self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size)
       elif op.type == common.OpType.MOVE:
         self._ApplyMoveOperation(op, op_name, new_part_file)
@@ -555,10 +586,7 @@
       if self.minor_version == common.INPLACE_MINOR_PAYLOAD_VERSION:
         # Copy the src partition to the dst one; make sure we don't truncate it.
         shutil.copyfile(old_part_file_name, new_part_file_name)
-      elif (self.minor_version == common.SOURCE_MINOR_PAYLOAD_VERSION or
-            self.minor_version == common.OPSRCHASH_MINOR_PAYLOAD_VERSION or
-            self.minor_version == common.BROTLI_BSDIFF_MINOR_PAYLOAD_VERSION or
-            self.minor_version == common.PUFFDIFF_MINOR_PAYLOAD_VERSION):
+      elif self.minor_version >= common.SOURCE_MINOR_PAYLOAD_VERSION:
         # In minor version >= 2, we don't want to copy the partitions, so
         # instead just make the new partition file.
         open(new_part_file_name, 'w').close()
@@ -591,46 +619,63 @@
       _VerifySha256(new_part_file, new_part_info.hash,
                     'new ' + part_name, length=new_part_info.size)
 
-  def Run(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
-          old_rootfs_part=None):
+  def Run(self, new_parts, old_parts=None):
     """Applier entry point, invoking all update operations.
 
     Args:
-      new_kernel_part: name of dest kernel partition file
-      new_rootfs_part: name of dest rootfs partition file
-      old_kernel_part: name of source kernel partition file (optional)
-      old_rootfs_part: name of source rootfs partition file (optional)
+      new_parts: map of partition name to dest partition file
+      old_parts: map of partition name to source partition file (optional)
 
     Raises:
       PayloadError if payload application failed.
     """
+    if old_parts is None:
+      old_parts = {}
+
     self.payload.ResetFile()
 
-    # Make sure the arguments are sane and match the payload.
-    if not (new_kernel_part and new_rootfs_part):
-      raise PayloadError('missing dst {kernel,rootfs} partitions')
+    new_part_info = {}
+    old_part_info = {}
+    install_operations = []
 
-    if not (old_kernel_part or old_rootfs_part):
-      if not self.payload.IsFull():
-        raise PayloadError('trying to apply a non-full update without src '
-                           '{kernel,rootfs} partitions')
-    elif old_kernel_part and old_rootfs_part:
-      if not self.payload.IsDelta():
-        raise PayloadError('trying to apply a non-delta update onto src '
-                           '{kernel,rootfs} partitions')
+    manifest = self.payload.manifest
+    if self.payload.header.version == 1:
+      for real_name, proto_name in common.CROS_PARTITIONS:
+        new_part_info[real_name] = getattr(manifest, 'new_%s_info' % proto_name)
+        old_part_info[real_name] = getattr(manifest, 'old_%s_info' % proto_name)
+
+      install_operations.append((common.ROOTFS, manifest.install_operations))
+      install_operations.append((common.KERNEL,
+                                 manifest.kernel_install_operations))
+    else:
+      for part in manifest.partitions:
+        name = part.partition_name
+        new_part_info[name] = part.new_partition_info
+        old_part_info[name] = part.old_partition_info
+        install_operations.append((name, part.operations))
+
+    part_names = set(new_part_info.keys())  # Equivalently, old_part_info.keys()
+
+    # Make sure the arguments are sane and match the payload.
+    new_part_names = set(new_parts.keys())
+    if new_part_names != part_names:
+      raise PayloadError('missing dst partition(s) %s' %
+                         ', '.join(part_names - new_part_names))
+
+    old_part_names = set(old_parts.keys())
+    if part_names - old_part_names:
+      if self.payload.IsDelta():
+        raise PayloadError('trying to apply a delta update without src '
+                           'partition(s) %s' %
+                           ', '.join(part_names - old_part_names))
+    elif old_part_names == part_names:
+      if self.payload.IsFull():
+        raise PayloadError('trying to apply a full update onto src partitions')
     else:
       raise PayloadError('not all src partitions provided')
 
-    # Apply update to rootfs.
-    self._ApplyToPartition(
-        self.payload.manifest.install_operations, 'rootfs',
-        'install_operations', new_rootfs_part,
-        self.payload.manifest.new_rootfs_info, old_rootfs_part,
-        self.payload.manifest.old_rootfs_info)
-
-    # Apply update to kernel update.
-    self._ApplyToPartition(
-        self.payload.manifest.kernel_install_operations, 'kernel',
-        'kernel_install_operations', new_kernel_part,
-        self.payload.manifest.new_kernel_info, old_kernel_part,
-        self.payload.manifest.old_kernel_info)
+    for name, operations in install_operations:
+      # Apply update to partition.
+      self._ApplyToPartition(
+          operations, name, '%s_install_operations' % name, new_parts[name],
+          new_part_info[name], old_parts.get(name, None), old_part_info[name])
diff --git a/scripts/update_payload/block_tracer.py b/scripts/update_payload/block_tracer.py
deleted file mode 100644
index 5caf7e3..0000000
--- a/scripts/update_payload/block_tracer.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Tracing block data source through a Chrome OS update payload.
-
-This module is used internally by the main Payload class for tracing block
-content through an update payload. This is a useful feature in debugging
-payload applying functionality in this package. The interface for invoking the
-tracer is as follows:
-
-  tracer = PayloadBlockTracer(payload)
-  tracer.Run(...)
-
-"""
-
-from __future__ import print_function
-
-from update_payload import common
-
-
-#
-# Payload block tracing.
-#
-class PayloadBlockTracer(object):
-  """Tracing the origin of block data through update instructions.
-
-  This is a short-lived object whose purpose is to isolate the logic used for
-  tracing the origin of destination partition blocks.
-
-  """
-
-  def __init__(self, payload):
-    assert payload.is_init, 'uninitialized update payload'
-    self.payload = payload
-
-  @staticmethod
-  def _TraceBlock(block, skip, trace_out_file, operations, base_name):
-    """Trace the origin of a given block through a sequence of operations.
-
-    This method tries to map the given dest block to the corresponding source
-    block from which its content originates in the course of an update. It
-    further tries to trace transitive origins through MOVE operations. It is
-    rather efficient, doing the actual tracing by means of a single reverse
-    sweep through the operation sequence. It dumps a log of operations and
-    source blocks responsible for the data in the given dest block to the
-    provided output file.
-
-    Args:
-      block: the block number to trace
-      skip: number of initial transitive origins to ignore
-      trace_out_file: a file object to dump the trace to
-      operations: the sequence of operations
-      base_name: name of the operation sequence
-    """
-    # Traverse operations backwards.
-    for op, op_name in common.OperationIter(operations, base_name,
-                                            reverse=True):
-      total_block_offset = 0
-      found = False
-
-      # Is the traced block mentioned in the dest extents?
-      for dst_ex, dst_ex_name in common.ExtentIter(op.dst_extents,
-                                                   op_name + '.dst_extents'):
-        if (block >= dst_ex.start_block
-            and block < dst_ex.start_block + dst_ex.num_blocks):
-          if skip:
-            skip -= 1
-          else:
-            total_block_offset += block - dst_ex.start_block
-            trace_out_file.write(
-                '%d: %s: found %s (total block offset: %d)\n' %
-                (block, dst_ex_name, common.FormatExtent(dst_ex),
-                 total_block_offset))
-            found = True
-            break
-
-        total_block_offset += dst_ex.num_blocks
-
-      if found:
-        # Don't trace further, unless it's a MOVE.
-        if op.type != common.OpType.MOVE:
-          break
-
-        # For MOVE, find corresponding source block and keep tracing.
-        for src_ex, src_ex_name in common.ExtentIter(op.src_extents,
-                                                     op_name + '.src_extents'):
-          if total_block_offset < src_ex.num_blocks:
-            block = src_ex.start_block + total_block_offset
-            trace_out_file.write(
-                '%s:  mapped to %s (%d)\n' %
-                (src_ex_name, common.FormatExtent(src_ex), block))
-            break
-
-          total_block_offset -= src_ex.num_blocks
-
-  def Run(self, block, skip, trace_out_file, is_kernel):
-    """Block tracer entry point, invoking the actual search.
-
-    Args:
-      block: the block number whose origin to trace
-      skip: the number of first origin mappings to skip
-      trace_out_file: file object to dump the trace to
-      is_kernel: trace through kernel (True) or rootfs (False) operations
-    """
-    if is_kernel:
-      operations = self.payload.manifest.kernel_install_operations
-      base_name = 'kernel_install_operations'
-    else:
-      operations = self.payload.manifest.install_operations
-      base_name = 'install_operations'
-
-    self._TraceBlock(block, skip, trace_out_file, operations, base_name)
diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py
index 21d99a0..6d17fbe 100644
--- a/scripts/update_payload/checker.py
+++ b/scripts/update_payload/checker.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Verifying the integrity of a Chrome OS update payload.
 
@@ -16,6 +28,7 @@
 
 import array
 import base64
+import collections
 import hashlib
 import itertools
 import os
@@ -58,6 +71,7 @@
     3: (_TYPE_DELTA,),
     4: (_TYPE_DELTA,),
     5: (_TYPE_DELTA,),
+    6: (_TYPE_DELTA,),
 }
 
 _OLD_DELTA_USABLE_PART_SIZE = 2 * 1024 * 1024 * 1024
@@ -318,11 +332,12 @@
     # Reset state; these will be assigned when the manifest is checked.
     self.sigs_offset = 0
     self.sigs_size = 0
-    self.old_rootfs_fs_size = 0
-    self.old_kernel_fs_size = 0
-    self.new_rootfs_fs_size = 0
-    self.new_kernel_fs_size = 0
+    self.old_part_info = {}
+    self.new_part_info = {}
+    self.new_fs_sizes = collections.defaultdict(int)
+    self.old_fs_sizes = collections.defaultdict(int)
     self.minor_version = None
+    self.major_version = None
 
   @staticmethod
   def _CheckElem(msg, name, report, is_mandatory, is_submsg, convert=str,
@@ -352,22 +367,56 @@
     Raises:
       error.PayloadError if a mandatory element is missing.
     """
+    element_result = collections.namedtuple('element_result', ['msg', 'report'])
+
     if not msg.HasField(name):
       if is_mandatory:
         raise error.PayloadError('%smissing mandatory %s %r.' %
                                  (msg_name + ' ' if msg_name else '',
                                   'sub-message' if is_submsg else 'field',
                                   name))
-      return None, None
+      return element_result(None, None)
 
     value = getattr(msg, name)
     if is_submsg:
-      return value, report and report.AddSubReport(name)
+      return element_result(value, report and report.AddSubReport(name))
     else:
       if report:
         report.AddField(name, convert(value), linebreak=linebreak,
                         indent=indent)
-      return value, None
+      return element_result(value, None)
+
+  @staticmethod
+  def _CheckRepeatedElemNotPresent(msg, field_name, msg_name):
+    """Checks that a repeated element is not specified in the message.
+
+    Args:
+      msg: The message containing the element.
+      field_name: The name of the element.
+      msg_name: The name of the message object (for error reporting).
+
+    Raises:
+      error.PayloadError if the repeated element is present or non-empty.
+    """
+    if getattr(msg, field_name, None):
+      raise error.PayloadError('%sfield %r not empty.' %
+                               (msg_name + ' ' if msg_name else '', field_name))
+
+  @staticmethod
+  def _CheckElemNotPresent(msg, field_name, msg_name):
+    """Checks that an element is not specified in the message.
+
+    Args:
+      msg: The message containing the element.
+      field_name: The name of the element.
+      msg_name: The name of the message object (for error reporting).
+
+    Raises:
+      error.PayloadError if the repeated element is present.
+    """
+    if msg.HasField(field_name):
+      raise error.PayloadError('%sfield %r exists.' %
+                               (msg_name + ' ' if msg_name else '', field_name))
 
   @staticmethod
   def _CheckMandatoryField(msg, field_name, report, msg_name, convert=str,
@@ -417,6 +466,22 @@
                                 ' in ' + obj_name if obj_name else ''))
 
   @staticmethod
+  def _CheckPresentIffMany(vals, name, obj_name):
+    """Checks that a set of vals and names imply every other element.
+
+    Args:
+      vals: The set of values to be compared.
+      name: The name of the objects holding the corresponding value.
+      obj_name: Name of the object containing these values.
+
+    Raises:
+      error.PayloadError if assertion does not hold.
+    """
+    if any(vals) and not all(vals):
+      raise error.PayloadError('%r is not present in all values%s.' %
+                               (name, ' in ' + obj_name if obj_name else ''))
+
+  @staticmethod
   def _Run(cmd, send_data=None):
     """Runs a subprocess, returns its output.
 
@@ -528,13 +593,12 @@
       raise error.PayloadError('Unsupported minor version: %d' %
                                self.minor_version)
 
-  def _CheckManifest(self, report, rootfs_part_size=0, kernel_part_size=0):
+  def _CheckManifest(self, report, part_sizes=None):
     """Checks the payload manifest.
 
     Args:
       report: A report object to add to.
-      rootfs_part_size: Size of the rootfs partition in bytes.
-      kernel_part_size: Size of the kernel partition in bytes.
+      part_sizes: Map of partition label to partition size in bytes.
 
     Returns:
       A tuple consisting of the partition block size used during the update
@@ -543,6 +607,9 @@
     Raises:
       error.PayloadError if any of the checks fail.
     """
+    self.major_version = self.payload.header.version
+
+    part_sizes = collections.defaultdict(int, part_sizes)
     manifest = self.payload.manifest
     report.AddSection('manifest')
 
@@ -561,39 +628,57 @@
     self._CheckPresentIff(self.sigs_offset, self.sigs_size,
                           'signatures_offset', 'signatures_size', 'manifest')
 
-    # Check: old_kernel_info <==> old_rootfs_info.
-    oki_msg, oki_report = self._CheckOptionalSubMsg(manifest,
-                                                    'old_kernel_info', report)
-    ori_msg, ori_report = self._CheckOptionalSubMsg(manifest,
-                                                    'old_rootfs_info', report)
-    self._CheckPresentIff(oki_msg, ori_msg, 'old_kernel_info',
-                          'old_rootfs_info', 'manifest')
-    if oki_msg:  # equivalently, ori_msg
+    if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
+      for real_name, proto_name in common.CROS_PARTITIONS:
+        self.old_part_info[real_name] = self._CheckOptionalSubMsg(
+            manifest, 'old_%s_info' % proto_name, report)
+        self.new_part_info[real_name] = self._CheckMandatorySubMsg(
+            manifest, 'new_%s_info' % proto_name, report, 'manifest')
+
+      # Check: old_kernel_info <==> old_rootfs_info.
+      self._CheckPresentIff(self.old_part_info[common.KERNEL].msg,
+                            self.old_part_info[common.ROOTFS].msg,
+                            'old_kernel_info', 'old_rootfs_info', 'manifest')
+    else:
+      for part in manifest.partitions:
+        name = part.partition_name
+        self.old_part_info[name] = self._CheckOptionalSubMsg(
+            part, 'old_partition_info', report)
+        self.new_part_info[name] = self._CheckMandatorySubMsg(
+            part, 'new_partition_info', report, 'manifest.partitions')
+
+      # Check: Old-style partition infos should not be specified.
+      for _, part in common.CROS_PARTITIONS:
+        self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest')
+        self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest')
+
+      # Check: If old_partition_info is specified anywhere, it must be
+      # specified everywhere.
+      old_part_msgs = [part.msg for part in self.old_part_info.values() if part]
+      self._CheckPresentIffMany(old_part_msgs, 'old_partition_info',
+                                'manifest.partitions')
+
+    is_delta = any(part and part.msg for part in self.old_part_info.values())
+    if is_delta:
       # Assert/mark delta payload.
       if self.payload_type == _TYPE_FULL:
         raise error.PayloadError(
             'Apparent full payload contains old_{kernel,rootfs}_info.')
       self.payload_type = _TYPE_DELTA
 
-      # Check: {size, hash} present in old_{kernel,rootfs}_info.
-      self.old_kernel_fs_size = self._CheckMandatoryField(
-          oki_msg, 'size', oki_report, 'old_kernel_info')
-      self._CheckMandatoryField(oki_msg, 'hash', oki_report, 'old_kernel_info',
-                                convert=common.FormatSha256)
-      self.old_rootfs_fs_size = self._CheckMandatoryField(
-          ori_msg, 'size', ori_report, 'old_rootfs_info')
-      self._CheckMandatoryField(ori_msg, 'hash', ori_report, 'old_rootfs_info',
-                                convert=common.FormatSha256)
+      for part, (msg, part_report) in self.old_part_info.iteritems():
+        # Check: {size, hash} present in old_{kernel,rootfs}_info.
+        field = 'old_%s_info' % part
+        self.old_fs_sizes[part] = self._CheckMandatoryField(msg, 'size',
+                                                            part_report, field)
+        self._CheckMandatoryField(msg, 'hash', part_report, field,
+                                  convert=common.FormatSha256)
 
-      # Check: old_{kernel,rootfs} size must fit in respective partition.
-      if kernel_part_size and self.old_kernel_fs_size > kernel_part_size:
-        raise error.PayloadError(
-            'Old kernel content (%d) exceed partition size (%d).' %
-            (self.old_kernel_fs_size, kernel_part_size))
-      if rootfs_part_size and self.old_rootfs_fs_size > rootfs_part_size:
-        raise error.PayloadError(
-            'Old rootfs content (%d) exceed partition size (%d).' %
-            (self.old_rootfs_fs_size, rootfs_part_size))
+        # Check: old_{kernel,rootfs} size must fit in respective partition.
+        if self.old_fs_sizes[part] > part_sizes[part] > 0:
+          raise error.PayloadError(
+              'Old %s content (%d) exceed partition size (%d).' %
+              (part, self.old_fs_sizes[part], part_sizes[part]))
     else:
       # Assert/mark full payload.
       if self.payload_type == _TYPE_DELTA:
@@ -601,31 +686,19 @@
             'Apparent delta payload missing old_{kernel,rootfs}_info.')
       self.payload_type = _TYPE_FULL
 
-    # Check: new_kernel_info present; contains {size, hash}.
-    nki_msg, nki_report = self._CheckMandatorySubMsg(
-        manifest, 'new_kernel_info', report, 'manifest')
-    self.new_kernel_fs_size = self._CheckMandatoryField(
-        nki_msg, 'size', nki_report, 'new_kernel_info')
-    self._CheckMandatoryField(nki_msg, 'hash', nki_report, 'new_kernel_info',
-                              convert=common.FormatSha256)
+    # Check: new_{kernel,rootfs}_info present; contains {size, hash}.
+    for part, (msg, part_report) in self.new_part_info.iteritems():
+      field = 'new_%s_info' % part
+      self.new_fs_sizes[part] = self._CheckMandatoryField(msg, 'size',
+                                                          part_report, field)
+      self._CheckMandatoryField(msg, 'hash', part_report, field,
+                                convert=common.FormatSha256)
 
-    # Check: new_rootfs_info present; contains {size, hash}.
-    nri_msg, nri_report = self._CheckMandatorySubMsg(
-        manifest, 'new_rootfs_info', report, 'manifest')
-    self.new_rootfs_fs_size = self._CheckMandatoryField(
-        nri_msg, 'size', nri_report, 'new_rootfs_info')
-    self._CheckMandatoryField(nri_msg, 'hash', nri_report, 'new_rootfs_info',
-                              convert=common.FormatSha256)
-
-    # Check: new_{kernel,rootfs} size must fit in respective partition.
-    if kernel_part_size and self.new_kernel_fs_size > kernel_part_size:
-      raise error.PayloadError(
-          'New kernel content (%d) exceed partition size (%d).' %
-          (self.new_kernel_fs_size, kernel_part_size))
-    if rootfs_part_size and self.new_rootfs_fs_size > rootfs_part_size:
-      raise error.PayloadError(
-          'New rootfs content (%d) exceed partition size (%d).' %
-          (self.new_rootfs_fs_size, rootfs_part_size))
+      # Check: new_{kernel,rootfs} size must fit in respective partition.
+      if self.new_fs_sizes[part] > part_sizes[part] > 0:
+        raise error.PayloadError(
+            'New %s content (%d) exceed partition size (%d).' %
+            (part, self.new_fs_sizes[part], part_sizes[part]))
 
     # Check: minor_version makes sense for the payload type. This check should
     # run after the payload type has been set.
@@ -702,7 +775,7 @@
     return total_num_blocks
 
   def _CheckReplaceOperation(self, op, data_length, total_dst_blocks, op_name):
-    """Specific checks for REPLACE/REPLACE_BZ operations.
+    """Specific checks for REPLACE/REPLACE_BZ/REPLACE_XZ operations.
 
     Args:
       op: The operation object from the manifest.
@@ -726,7 +799,7 @@
                                            self.block_size,
                                            op_name + '.data_length', 'dst')
     else:
-      # Check: data_length must be smaller than the alotted dst blocks.
+      # Check: data_length must be smaller than the allotted dst blocks.
       if data_length >= total_dst_blocks * self.block_size:
         raise error.PayloadError(
             '%s: data_length (%d) must be less than allotted dst block '
@@ -851,7 +924,7 @@
     if data_length is None:
       raise error.PayloadError('%s: missing data_{offset,length}.' % op_name)
 
-    # Check: data_length is strictly smaller than the alotted dst blocks.
+    # Check: data_length is strictly smaller than the allotted dst blocks.
     if data_length >= total_dst_blocks * self.block_size:
       raise error.PayloadError(
           '%s: data_length (%d) must be smaller than allotted dst space '
@@ -997,6 +1070,10 @@
     # Type-specific checks.
     if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
       self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
+    elif (op.type == common.OpType.REPLACE_XZ and
+          (self.minor_version >= 3 or
+           self.major_version >= common.BRILLO_MAJOR_PAYLOAD_VERSION)):
+      self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
     elif op.type == common.OpType.MOVE and self.minor_version == 1:
       self._CheckMoveOperation(op, data_offset, total_src_blocks,
                                total_dst_blocks, op_name)
@@ -1071,6 +1148,7 @@
     op_counts = {
         common.OpType.REPLACE: 0,
         common.OpType.REPLACE_BZ: 0,
+        common.OpType.REPLACE_XZ: 0,
         common.OpType.MOVE: 0,
         common.OpType.ZERO: 0,
         common.OpType.BSDIFF: 0,
@@ -1083,6 +1161,7 @@
     op_blob_totals = {
         common.OpType.REPLACE: 0,
         common.OpType.REPLACE_BZ: 0,
+        common.OpType.REPLACE_XZ: 0,
         # MOVE operations don't have blobs.
         common.OpType.BSDIFF: 0,
         # SOURCE_COPY operations don't have blobs.
@@ -1174,16 +1253,19 @@
 
     last_ops_section = (self.payload.manifest.kernel_install_operations or
                         self.payload.manifest.install_operations)
-    fake_sig_op = last_ops_section[-1]
-    # Check: signatures_{offset,size} must match the last (fake) operation.
-    if not (fake_sig_op.type == common.OpType.REPLACE and
-            self.sigs_offset == fake_sig_op.data_offset and
-            self.sigs_size == fake_sig_op.data_length):
-      raise error.PayloadError(
-          'Signatures_{offset,size} (%d+%d) does not match last operation '
-          '(%d+%d).' %
-          (self.sigs_offset, self.sigs_size, fake_sig_op.data_offset,
-           fake_sig_op.data_length))
+
+    # Only major version 1 has the fake signature OP at the end.
+    if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
+      fake_sig_op = last_ops_section[-1]
+      # Check: signatures_{offset,size} must match the last (fake) operation.
+      if not (fake_sig_op.type == common.OpType.REPLACE and
+              self.sigs_offset == fake_sig_op.data_offset and
+              self.sigs_size == fake_sig_op.data_length):
+        raise error.PayloadError('Signatures_{offset,size} (%d+%d) does not'
+                                 ' match last operation (%d+%d).' %
+                                 (self.sigs_offset, self.sigs_size,
+                                  fake_sig_op.data_offset,
+                                  fake_sig_op.data_length))
 
     # Compute the checksum of all data up to signature blob.
     # TODO(garnold) we're re-reading the whole data section into a string
@@ -1210,17 +1292,16 @@
         raise error.PayloadError('Unknown signature version (%d).' %
                                  sig.version)
 
-  def Run(self, pubkey_file_name=None, metadata_sig_file=None,
-          rootfs_part_size=0, kernel_part_size=0, report_out_file=None):
+  def Run(self, pubkey_file_name=None, metadata_sig_file=None, metadata_size=0,
+          part_sizes=None, report_out_file=None):
     """Checker entry point, invoking all checks.
 
     Args:
       pubkey_file_name: Public key used for signature verification.
       metadata_sig_file: Metadata signature, if verification is desired.
-      rootfs_part_size: The size of rootfs partitions in bytes (default: infer
-                        based on payload type and version).
-      kernel_part_size: The size of kernel partitions in bytes (default: use
-                        reported filesystem size).
+      metadata_size: Metadata size, if verification is desired.
+      part_sizes: Mapping of partition label to size in bytes (default: infer
+        based on payload type and version or filesystem).
       report_out_file: File object to dump the report to.
 
     Raises:
@@ -1237,6 +1318,12 @@
     self.payload.ResetFile()
 
     try:
+      # Check metadata_size (if provided).
+      if metadata_size and self.payload.metadata_size != metadata_size:
+        raise error.PayloadError('Invalid payload metadata size in payload(%d) '
+                                 'vs given(%d)' % (self.payload.metadata_size,
+                                                   metadata_size))
+
       # Check metadata signature (if provided).
       if metadata_sig_file:
         metadata_sig = base64.b64decode(metadata_sig_file.read())
@@ -1247,65 +1334,84 @@
       # Part 1: Check the file header.
       report.AddSection('header')
       # Check: Payload version is valid.
-      if self.payload.header.version != 1:
+      if self.payload.header.version not in (1, 2):
         raise error.PayloadError('Unknown payload version (%d).' %
                                  self.payload.header.version)
       report.AddField('version', self.payload.header.version)
       report.AddField('manifest len', self.payload.header.manifest_len)
 
       # Part 2: Check the manifest.
-      self._CheckManifest(report, rootfs_part_size, kernel_part_size)
+      self._CheckManifest(report, part_sizes)
       assert self.payload_type, 'payload type should be known by now'
 
-      # Infer the usable partition size when validating rootfs operations:
-      # - If rootfs partition size was provided, use that.
-      # - Otherwise, if this is an older delta (minor version < 2), stick with
-      #   a known constant size. This is necessary because older deltas may
-      #   exceed the filesystem size when moving data blocks around.
-      # - Otherwise, use the encoded filesystem size.
-      new_rootfs_usable_size = self.new_rootfs_fs_size
-      old_rootfs_usable_size = self.old_rootfs_fs_size
-      if rootfs_part_size:
-        new_rootfs_usable_size = rootfs_part_size
-        old_rootfs_usable_size = rootfs_part_size
-      elif self.payload_type == _TYPE_DELTA and self.minor_version in (None, 1):
-        new_rootfs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
-        old_rootfs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
+      manifest = self.payload.manifest
 
-      # Part 3: Examine rootfs operations.
-      # TODO(garnold)(chromium:243559) only default to the filesystem size if
-      # no explicit size provided *and* the partition size is not embedded in
-      # the payload; see issue for more details.
-      report.AddSection('rootfs operations')
-      total_blob_size = self._CheckOperations(
-          self.payload.manifest.install_operations, report,
-          'install_operations', self.old_rootfs_fs_size,
-          self.new_rootfs_fs_size, old_rootfs_usable_size,
-          new_rootfs_usable_size, 0, False)
+      # Part 3: Examine partition operations.
+      install_operations = []
+      if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
+        # partitions field should not ever exist in major version 1 payloads
+        self._CheckRepeatedElemNotPresent(manifest, 'partitions', 'manifest')
 
-      # Part 4: Examine kernel operations.
-      # TODO(garnold)(chromium:243559) as above.
-      report.AddSection('kernel operations')
-      total_blob_size += self._CheckOperations(
-          self.payload.manifest.kernel_install_operations, report,
-          'kernel_install_operations', self.old_kernel_fs_size,
-          self.new_kernel_fs_size,
-          kernel_part_size if kernel_part_size else self.old_kernel_fs_size,
-          kernel_part_size if kernel_part_size else self.new_kernel_fs_size,
-          total_blob_size, True)
+        install_operations.append((common.ROOTFS, manifest.install_operations))
+        install_operations.append((common.KERNEL,
+                                   manifest.kernel_install_operations))
+
+      else:
+        self._CheckRepeatedElemNotPresent(manifest, 'install_operations',
+                                          'manifest')
+        self._CheckRepeatedElemNotPresent(manifest, 'kernel_install_operations',
+                                          'manifest')
+
+        for update in manifest.partitions:
+          install_operations.append((update.partition_name, update.operations))
+
+      total_blob_size = 0
+      for part, operations in install_operations:
+        report.AddSection('%s operations' % part)
+
+        new_fs_usable_size = self.new_fs_sizes[part]
+        old_fs_usable_size = self.old_fs_sizes[part]
+
+        if part_sizes.get(part, None):
+          new_fs_usable_size = old_fs_usable_size = part_sizes[part]
+        # Infer the usable partition size when validating rootfs operations:
+        # - If rootfs partition size was provided, use that.
+        # - Otherwise, if this is an older delta (minor version < 2), stick with
+        #   a known constant size. This is necessary because older deltas may
+        #   exceed the filesystem size when moving data blocks around.
+        # - Otherwise, use the encoded filesystem size.
+        elif self.payload_type == _TYPE_DELTA and part == common.ROOTFS and \
+            self.minor_version in (None, 1):
+          new_fs_usable_size = old_fs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
+
+        # TODO(garnold)(chromium:243559) only default to the filesystem size if
+        # no explicit size provided *and* the partition size is not embedded in
+        # the payload; see issue for more details.
+        total_blob_size += self._CheckOperations(
+            operations, report, '%s_install_operations' % part,
+            self.old_fs_sizes[part], self.new_fs_sizes[part],
+            old_fs_usable_size, new_fs_usable_size, total_blob_size,
+            (self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION
+             and part == common.KERNEL))
 
       # Check: Operations data reach the end of the payload file.
       used_payload_size = self.payload.data_offset + total_blob_size
+      # Major versions 2 and higher have a signature at the end, so it should be
+      # considered in the total size of the image.
+      if (self.major_version >= common.BRILLO_MAJOR_PAYLOAD_VERSION and
+          self.sigs_size):
+        used_payload_size += self.sigs_size
+
       if used_payload_size != payload_file_size:
         raise error.PayloadError(
             'Used payload size (%d) different from actual file size (%d).' %
             (used_payload_size, payload_file_size))
 
-      # Part 5: Handle payload signatures message.
+      # Part 4: Handle payload signatures message.
       if self.check_payload_sig and self.sigs_size:
         self._CheckSignatures(report, pubkey_file_name)
 
-      # Part 6: Summary.
+      # Part 5: Summary.
       report.AddSection('summary')
       report.AddField('update type', self.payload_type)
 
diff --git a/scripts/update_payload/checker_unittest.py b/scripts/update_payload/checker_unittest.py
index a21c2ba..7e52233 100755
--- a/scripts/update_payload/checker_unittest.py
+++ b/scripts/update_payload/checker_unittest.py
@@ -1,8 +1,19 @@
 #!/usr/bin/python2
 #
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Unit testing checker.py."""
 
@@ -474,13 +485,16 @@
                    fail_bad_nki or fail_bad_nri or fail_old_kernel_fs_size or
                    fail_old_rootfs_fs_size or fail_new_kernel_fs_size or
                    fail_new_rootfs_fs_size)
+    part_sizes = {
+        common.ROOTFS: rootfs_part_size,
+        common.KERNEL: kernel_part_size
+    }
+
     if should_fail:
       self.assertRaises(PayloadError, payload_checker._CheckManifest, report,
-                        rootfs_part_size, kernel_part_size)
+                        part_sizes)
     else:
-      self.assertIsNone(payload_checker._CheckManifest(report,
-                                                       rootfs_part_size,
-                                                       kernel_part_size))
+      self.assertIsNone(payload_checker._CheckManifest(report, part_sizes))
 
   def testCheckLength(self):
     """Tests _CheckLength()."""
@@ -620,6 +634,41 @@
         PayloadError, payload_checker._CheckReplaceOperation,
         op, data_length, (data_length + block_size - 1) / block_size, 'foo')
 
+  def testCheckReplaceXzOperation(self):
+    """Tests _CheckReplaceOperation() where op.type == REPLACE_XZ."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    block_size = payload_checker.block_size
+    data_length = block_size * 3
+
+    op = self.mox.CreateMock(
+        update_metadata_pb2.InstallOperation)
+    op.type = common.OpType.REPLACE_XZ
+
+    # Pass.
+    op.src_extents = []
+    self.assertIsNone(
+        payload_checker._CheckReplaceOperation(
+            op, data_length, (data_length + block_size - 1) / block_size + 5,
+            'foo'))
+
+    # Fail, src extents founds.
+    op.src_extents = ['bar']
+    self.assertRaises(
+        PayloadError, payload_checker._CheckReplaceOperation,
+        op, data_length, (data_length + block_size - 1) / block_size + 5, 'foo')
+
+    # Fail, missing data.
+    op.src_extents = []
+    self.assertRaises(
+        PayloadError, payload_checker._CheckReplaceOperation,
+        op, None, (data_length + block_size - 1) / block_size, 'foo')
+
+    # Fail, too few blocks to justify XZ.
+    op.src_extents = []
+    self.assertRaises(
+        PayloadError, payload_checker._CheckReplaceOperation,
+        op, data_length, (data_length + block_size - 1) / block_size, 'foo')
+
   def testCheckMoveOperation_Pass(self):
     """Tests _CheckMoveOperation(); pass case."""
     payload_checker = checker.PayloadChecker(self.MockPayload())
@@ -792,8 +841,8 @@
     """Parametric testing of _CheckOperation().
 
     Args:
-      op_type_name: 'REPLACE', 'REPLACE_BZ', 'MOVE', 'BSDIFF', 'SOURCE_COPY',
-        'SOURCE_BSDIFF', BROTLI_BSDIFF or 'PUFFDIFF'.
+      op_type_name: 'REPLACE', 'REPLACE_BZ', 'REPLACE_XZ', 'MOVE', 'BSDIFF',
+        'SOURCE_COPY', 'SOURCE_BSDIFF', BROTLI_BSDIFF or 'PUFFDIFF'.
       is_last: Whether we're testing the last operation in a sequence.
       allow_signature: Whether we're testing a signature-capable operation.
       allow_unhashed: Whether we're allowing to not hash the data.
@@ -842,12 +891,16 @@
                           self.NewExtentList((1, 16)))
         total_src_blocks = 16
 
+    # TODO(tbrindus): add major version 2 tests.
+    payload_checker.major_version = common.CHROMEOS_MAJOR_PAYLOAD_VERSION
     if op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
       payload_checker.minor_version = 0
     elif op_type in (common.OpType.MOVE, common.OpType.BSDIFF):
       payload_checker.minor_version = 2 if fail_bad_minor_version else 1
     elif op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF):
       payload_checker.minor_version = 1 if fail_bad_minor_version else 2
+    if op_type == common.OpType.REPLACE_XZ:
+      payload_checker.minor_version = 2 if fail_bad_minor_version else 3
     elif op_type in (common.OpType.ZERO, common.OpType.DISCARD,
                      common.OpType.BROTLI_BSDIFF):
       payload_checker.minor_version = 3 if fail_bad_minor_version else 4
@@ -1037,7 +1090,10 @@
     report = checker._PayloadReport()
 
     # We have to check the manifest first in order to set signature attributes.
-    payload_checker._CheckManifest(report, rootfs_part_size, kernel_part_size)
+    payload_checker._CheckManifest(report, {
+        common.ROOTFS: rootfs_part_size,
+        common.KERNEL: kernel_part_size
+    })
 
     should_fail = (fail_empty_sigs_blob or fail_missing_pseudo_op or
                    fail_mismatched_pseudo_op or fail_sig_missing_fields or
@@ -1079,8 +1135,8 @@
 
   def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided,
                 fail_wrong_payload_type, fail_invalid_block_size,
-                fail_mismatched_block_size, fail_excess_data,
-                fail_rootfs_part_size_exceeded,
+                fail_mismatched_metadata_size, fail_mismatched_block_size,
+                fail_excess_data, fail_rootfs_part_size_exceeded,
                 fail_kernel_part_size_exceeded):
     """Tests Run()."""
     # Generate a test payload. For this test, we generate a full update that
@@ -1130,6 +1186,11 @@
     else:
       use_block_size = block_size
 
+    # For the unittests 246 is the value that generated for the payload.
+    metadata_size = 246
+    if fail_mismatched_metadata_size:
+      metadata_size += 1
+
     kwargs = {
         'payload_gen_dargs': {
             'privkey_file_name': test_utils._PRIVKEY_FILE_NAME,
@@ -1146,11 +1207,15 @@
       payload_checker = _GetPayloadChecker(payload_gen.WriteToFileWithData,
                                            **kwargs)
 
-      kwargs = {'pubkey_file_name': test_utils._PUBKEY_FILE_NAME,
-                'rootfs_part_size': rootfs_part_size,
-                'kernel_part_size': kernel_part_size}
+      kwargs = {
+          'pubkey_file_name': test_utils._PUBKEY_FILE_NAME,
+          'metadata_size': metadata_size,
+          'part_sizes': {
+              common.KERNEL: kernel_part_size,
+              common.ROOTFS: rootfs_part_size}}
+
       should_fail = (fail_wrong_payload_type or fail_mismatched_block_size or
-                     fail_excess_data or
+                     fail_mismatched_metadata_size or fail_excess_data or
                      fail_rootfs_part_size_exceeded or
                      fail_kernel_part_size_exceeded)
       if should_fail:
@@ -1170,10 +1235,13 @@
   """Returns True iff the combination of arguments represents a valid test."""
   op_type = _OpTypeByName(op_type_name)
 
-  # REPLACE/REPLACE_BZ operations don't read data from src partition. They are
-  # compatible with all valid minor versions, so we don't need to check that.
-  if (op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ) and (
-      fail_src_extents or fail_src_length or fail_bad_minor_version)):
+  # REPLACE/REPLACE_BZ/REPLACE_XZ operations don't read data from src
+  # partition. They are compatible with all valid minor versions, so we don't
+  # need to check that.
+  if (op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
+                  common.OpType.REPLACE_XZ) and (fail_src_extents or
+                                                 fail_src_length or
+                                                 fail_bad_minor_version)):
     return False
 
   # MOVE and SOURCE_COPY operations don't carry data.
@@ -1259,8 +1327,8 @@
 
   # Add all _CheckOperation() test cases.
   AddParametricTests('CheckOperation',
-                     {'op_type_name': ('REPLACE', 'REPLACE_BZ', 'MOVE',
-                                       'BSDIFF', 'SOURCE_COPY',
+                     {'op_type_name': ('REPLACE', 'REPLACE_BZ', 'REPLACE_XZ',
+                                       'MOVE', 'BSDIFF', 'SOURCE_COPY',
                                        'SOURCE_BSDIFF', 'PUFFDIFF',
                                        'BROTLI_BSDIFF'),
                       'is_last': (True, False),
@@ -1302,6 +1370,7 @@
                       'kernel_part_size_provided': (True, False),
                       'fail_wrong_payload_type': (True, False),
                       'fail_invalid_block_size': (True, False),
+                      'fail_mismatched_metadata_size': (True, False),
                       'fail_mismatched_block_size': (True, False),
                       'fail_excess_data': (True, False),
                       'fail_rootfs_part_size_exceeded': (True, False),
diff --git a/scripts/update_payload/common.py b/scripts/update_payload/common.py
index ac05ccd..9061a75 100644
--- a/scripts/update_payload/common.py
+++ b/scripts/update_payload/common.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Utilities for update payload processing."""
 
@@ -30,6 +42,11 @@
 BROTLI_BSDIFF_MINOR_PAYLOAD_VERSION = 4
 PUFFDIFF_MINOR_PAYLOAD_VERSION = 5
 
+KERNEL = 'kernel'
+ROOTFS = 'root'
+# Tuple of (name in system, name in protobuf).
+CROS_PARTITIONS = ((KERNEL, KERNEL), (ROOTFS, 'rootfs'))
+
 #
 # Payload operation types.
 #
diff --git a/scripts/update_payload/error.py b/scripts/update_payload/error.py
index 8b9cadd..6f95433 100644
--- a/scripts/update_payload/error.py
+++ b/scripts/update_payload/error.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Payload handling errors."""
 
diff --git a/scripts/update_payload/format_utils.py b/scripts/update_payload/format_utils.py
index 2c3775c..6248ba9 100644
--- a/scripts/update_payload/format_utils.py
+++ b/scripts/update_payload/format_utils.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Various formatting functions."""
 
diff --git a/scripts/update_payload/format_utils_unittest.py b/scripts/update_payload/format_utils_unittest.py
index 7153f9e..42ea621 100755
--- a/scripts/update_payload/format_utils_unittest.py
+++ b/scripts/update_payload/format_utils_unittest.py
@@ -1,8 +1,19 @@
 #!/usr/bin/python2
 #
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Unit tests for format_utils.py."""
 
diff --git a/scripts/update_payload/histogram.py b/scripts/update_payload/histogram.py
index f72db61..1ac2ab5 100644
--- a/scripts/update_payload/histogram.py
+++ b/scripts/update_payload/histogram.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Histogram generation tools."""
 
diff --git a/scripts/update_payload/histogram_unittest.py b/scripts/update_payload/histogram_unittest.py
index 643bb32..e757dd0 100755
--- a/scripts/update_payload/histogram_unittest.py
+++ b/scripts/update_payload/histogram_unittest.py
@@ -1,8 +1,19 @@
 #!/usr/bin/python2
 #
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Unit tests for histogram.py."""
 
diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py
index 8d9a20e..2a0cb58 100644
--- a/scripts/update_payload/payload.py
+++ b/scripts/update_payload/payload.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Tools for reading, verifying and applying Chrome OS update payloads."""
 
@@ -10,7 +22,6 @@
 import struct
 
 from update_payload import applier
-from update_payload import block_tracer
 from update_payload import checker
 from update_payload import common
 from update_payload import update_metadata_pb2
@@ -262,19 +273,19 @@
     return not self.IsDelta()
 
   def Check(self, pubkey_file_name=None, metadata_sig_file=None,
-            report_out_file=None, assert_type=None, block_size=0,
-            rootfs_part_size=0, kernel_part_size=0, allow_unhashed=False,
+            metadata_size=0, report_out_file=None, assert_type=None,
+            block_size=0, part_sizes=None, allow_unhashed=False,
             disabled_tests=()):
     """Checks the payload integrity.
 
     Args:
       pubkey_file_name: public key used for signature verification
       metadata_sig_file: metadata signature, if verification is desired
+      metadata_size: metadata size, if verification is desired
       report_out_file: file object to dump the report to
       assert_type: assert that payload is either 'full' or 'delta'
       block_size: expected filesystem / payload block size
-      rootfs_part_size: the size of (physical) rootfs partitions in bytes
-      kernel_part_size: the size of (physical) kernel partitions in bytes
+      part_sizes: map of partition label to (physical) size in bytes
       allow_unhashed: allow unhashed operation blobs
       disabled_tests: list of tests to disable
 
@@ -289,20 +300,18 @@
         allow_unhashed=allow_unhashed, disabled_tests=disabled_tests)
     helper.Run(pubkey_file_name=pubkey_file_name,
                metadata_sig_file=metadata_sig_file,
-               rootfs_part_size=rootfs_part_size,
-               kernel_part_size=kernel_part_size,
+               metadata_size=metadata_size,
+               part_sizes=part_sizes,
                report_out_file=report_out_file)
 
-  def Apply(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
-            old_rootfs_part=None, bsdiff_in_place=True, bspatch_path=None,
-            puffpatch_path=None, truncate_to_expected_size=True):
+  def Apply(self, new_parts, old_parts=None, bsdiff_in_place=True,
+            bspatch_path=None, puffpatch_path=None,
+            truncate_to_expected_size=True):
     """Applies the update payload.
 
     Args:
-      new_kernel_part: name of dest kernel partition file
-      new_rootfs_part: name of dest rootfs partition file
-      old_kernel_part: name of source kernel partition file (optional)
-      old_rootfs_part: name of source rootfs partition file (optional)
+      new_parts: map of partition name to dest partition file
+      old_parts: map of partition name to partition file (optional)
       bsdiff_in_place: whether to perform BSDIFF operations in-place (optional)
       bspatch_path: path to the bspatch binary (optional)
       puffpatch_path: path to the puffpatch binary (optional)
@@ -320,26 +329,4 @@
         self, bsdiff_in_place=bsdiff_in_place, bspatch_path=bspatch_path,
         puffpatch_path=puffpatch_path,
         truncate_to_expected_size=truncate_to_expected_size)
-    helper.Run(new_kernel_part, new_rootfs_part,
-               old_kernel_part=old_kernel_part,
-               old_rootfs_part=old_rootfs_part)
-
-  def TraceBlock(self, block, skip, trace_out_file, is_kernel):
-    """Traces the origin(s) of a given dest partition block.
-
-    The tracing tries to find origins transitively, when possible (it currently
-    only works for move operations, where the mapping of src/dst is
-    one-to-one). It will dump a list of operations and source blocks
-    responsible for the data in the given dest block.
-
-    Args:
-      block: the block number whose origin to trace
-      skip: the number of first origin mappings to skip
-      trace_out_file: file object to dump the trace to
-      is_kernel: trace through kernel (True) or rootfs (False) operations
-    """
-    self._AssertInit()
-
-    # Create a short-lived payload block tracer object and run it.
-    helper = block_tracer.PayloadBlockTracer(self)
-    helper.Run(block, skip, trace_out_file, is_kernel)
+    helper.Run(new_parts, old_parts=old_parts)
diff --git a/scripts/update_payload/test_utils.py b/scripts/update_payload/test_utils.py
index 38712fb..1e2259d 100644
--- a/scripts/update_payload/test_utils.py
+++ b/scripts/update_payload/test_utils.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Utilities for unit testing."""
 
@@ -276,7 +288,7 @@
 
     Args:
       is_kernel: whether this is a kernel (True) or rootfs (False) operation
-      op_type: one of REPLACE, REPLACE_BZ, MOVE or BSDIFF
+      op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ, MOVE or BSDIFF
       src_extents: list of (start, length) pairs indicating src block ranges
       src_length: size of the src data in bytes (needed for BSDIFF)
       dst_extents: list of (start, length) pairs indicating dst block ranges
diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py
index 595f2f6..7f1648b 100644
--- a/scripts/update_payload/update_metadata_pb2.py
+++ b/scripts/update_payload/update_metadata_pb2.py
@@ -13,7 +13,7 @@
 DESCRIPTOR = _descriptor.FileDescriptor(
   name='update_metadata.proto',
   package='chromeos_update_engine',
-  serialized_pb='\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xe6\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\r\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\r\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xa5\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x0c\n\x08PUFFDIFF\x10\t\x12\x11\n\rBROTLI_BSDIFF\x10\n\"\xa6\x03\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\"\xc4\x05\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdateB\x02H\x03')
+  serialized_pb='\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xe6\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xa5\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"Y\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\"\xb1\x06\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadataB\x02H\x03')
 
 
 
@@ -48,23 +48,23 @@
       options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='ZERO', index=6, number=6,
+      name='REPLACE_XZ', index=6, number=8,
       options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='DISCARD', index=7, number=7,
+      name='ZERO', index=7, number=6,
       options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='REPLACE_XZ', index=8, number=8,
+      name='DISCARD', index=8, number=7,
       options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='PUFFDIFF', index=9, number=9,
+      name='BROTLI_BSDIFF', index=9, number=10,
       options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='BROTLI_BSDIFF', index=10, number=10,
+      name='PUFFDIFF', index=10, number=9,
       options=None,
       type=None),
   ],
@@ -286,14 +286,14 @@
       options=None),
     _descriptor.FieldDescriptor(
       name='data_offset', full_name='chromeos_update_engine.InstallOperation.data_offset', index=1,
-      number=2, type=13, cpp_type=3, label=1,
+      number=2, type=4, cpp_type=4, label=1,
       has_default_value=False, default_value=0,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       options=None),
     _descriptor.FieldDescriptor(
       name='data_length', full_name='chromeos_update_engine.InstallOperation.data_length', index=2,
-      number=3, type=13, cpp_type=3, label=1,
+      number=3, type=4, cpp_type=4, label=1,
       has_default_value=False, default_value=0,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
@@ -425,6 +425,55 @@
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       options=None),
+    _descriptor.FieldDescriptor(
+      name='hash_tree_data_extent', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_data_extent', index=9,
+      number=10, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='hash_tree_extent', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_extent', index=10,
+      number=11, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='hash_tree_algorithm', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_algorithm', index=11,
+      number=12, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='hash_tree_salt', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_salt', index=12,
+      number=13, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value="",
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='fec_data_extent', full_name='chromeos_update_engine.PartitionUpdate.fec_data_extent', index=13,
+      number=14, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='fec_extent', full_name='chromeos_update_engine.PartitionUpdate.fec_extent', index=14,
+      number=15, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='fec_roots', full_name='chromeos_update_engine.PartitionUpdate.fec_roots', index=15,
+      number=16, type=13, cpp_type=3, label=1,
+      has_default_value=True, default_value=2,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
   ],
   extensions=[
   ],
@@ -435,7 +484,77 @@
   is_extendable=False,
   extension_ranges=[],
   serialized_start=880,
-  serialized_end=1302,
+  serialized_end=1607,
+)
+
+
+_DYNAMICPARTITIONGROUP = _descriptor.Descriptor(
+  name='DynamicPartitionGroup',
+  full_name='chromeos_update_engine.DynamicPartitionGroup',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='chromeos_update_engine.DynamicPartitionGroup.name', index=0,
+      number=1, type=9, cpp_type=9, label=2,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='size', full_name='chromeos_update_engine.DynamicPartitionGroup.size', index=1,
+      number=2, type=4, cpp_type=4, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='partition_names', full_name='chromeos_update_engine.DynamicPartitionGroup.partition_names', index=2,
+      number=3, type=9, cpp_type=9, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=1609,
+  serialized_end=1685,
+)
+
+
+_DYNAMICPARTITIONMETADATA = _descriptor.Descriptor(
+  name='DynamicPartitionMetadata',
+  full_name='chromeos_update_engine.DynamicPartitionMetadata',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='groups', full_name='chromeos_update_engine.DynamicPartitionMetadata.groups', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=1687,
+  serialized_end=1776,
 )
 
 
@@ -537,6 +656,20 @@
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       options=None),
+    _descriptor.FieldDescriptor(
+      name='max_timestamp', full_name='chromeos_update_engine.DeltaArchiveManifest.max_timestamp', index=13,
+      number=14, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='dynamic_partition_metadata', full_name='chromeos_update_engine.DeltaArchiveManifest.dynamic_partition_metadata', index=14,
+      number=15, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
   ],
   extensions=[
   ],
@@ -546,8 +679,8 @@
   options=None,
   is_extendable=False,
   extension_ranges=[],
-  serialized_start=1305,
-  serialized_end=2013,
+  serialized_start=1779,
+  serialized_end=2596,
 )
 
 _SIGNATURES_SIGNATURE.containing_type = _SIGNATURES;
@@ -560,6 +693,11 @@
 _PARTITIONUPDATE.fields_by_name['old_partition_info'].message_type = _PARTITIONINFO
 _PARTITIONUPDATE.fields_by_name['new_partition_info'].message_type = _PARTITIONINFO
 _PARTITIONUPDATE.fields_by_name['operations'].message_type = _INSTALLOPERATION
+_PARTITIONUPDATE.fields_by_name['hash_tree_data_extent'].message_type = _EXTENT
+_PARTITIONUPDATE.fields_by_name['hash_tree_extent'].message_type = _EXTENT
+_PARTITIONUPDATE.fields_by_name['fec_data_extent'].message_type = _EXTENT
+_PARTITIONUPDATE.fields_by_name['fec_extent'].message_type = _EXTENT
+_DYNAMICPARTITIONMETADATA.fields_by_name['groups'].message_type = _DYNAMICPARTITIONGROUP
 _DELTAARCHIVEMANIFEST.fields_by_name['install_operations'].message_type = _INSTALLOPERATION
 _DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations'].message_type = _INSTALLOPERATION
 _DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info'].message_type = _PARTITIONINFO
@@ -569,12 +707,15 @@
 _DELTAARCHIVEMANIFEST.fields_by_name['old_image_info'].message_type = _IMAGEINFO
 _DELTAARCHIVEMANIFEST.fields_by_name['new_image_info'].message_type = _IMAGEINFO
 _DELTAARCHIVEMANIFEST.fields_by_name['partitions'].message_type = _PARTITIONUPDATE
+_DELTAARCHIVEMANIFEST.fields_by_name['dynamic_partition_metadata'].message_type = _DYNAMICPARTITIONMETADATA
 DESCRIPTOR.message_types_by_name['Extent'] = _EXTENT
 DESCRIPTOR.message_types_by_name['Signatures'] = _SIGNATURES
 DESCRIPTOR.message_types_by_name['PartitionInfo'] = _PARTITIONINFO
 DESCRIPTOR.message_types_by_name['ImageInfo'] = _IMAGEINFO
 DESCRIPTOR.message_types_by_name['InstallOperation'] = _INSTALLOPERATION
 DESCRIPTOR.message_types_by_name['PartitionUpdate'] = _PARTITIONUPDATE
+DESCRIPTOR.message_types_by_name['DynamicPartitionGroup'] = _DYNAMICPARTITIONGROUP
+DESCRIPTOR.message_types_by_name['DynamicPartitionMetadata'] = _DYNAMICPARTITIONMETADATA
 DESCRIPTOR.message_types_by_name['DeltaArchiveManifest'] = _DELTAARCHIVEMANIFEST
 
 class Extent(_message.Message):
@@ -619,6 +760,18 @@
 
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionUpdate)
 
+class DynamicPartitionGroup(_message.Message):
+  __metaclass__ = _reflection.GeneratedProtocolMessageType
+  DESCRIPTOR = _DYNAMICPARTITIONGROUP
+
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionGroup)
+
+class DynamicPartitionMetadata(_message.Message):
+  __metaclass__ = _reflection.GeneratedProtocolMessageType
+  DESCRIPTOR = _DYNAMICPARTITIONMETADATA
+
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionMetadata)
+
 class DeltaArchiveManifest(_message.Message):
   __metaclass__ = _reflection.GeneratedProtocolMessageType
   DESCRIPTOR = _DELTAARCHIVEMANIFEST
diff --git a/sideload_main.cc b/sideload_main.cc
index ddb312e..818fa5c 100644
--- a/sideload_main.cc
+++ b/sideload_main.cc
@@ -40,13 +40,8 @@
 
 using std::string;
 using std::vector;
-using update_engine::UpdateStatus;
 using update_engine::UpdateEngineStatus;
-
-namespace {
-// The root directory used for temporary files in update_engine_sideload.
-const char kSideloadRootTempDir[] = "/tmp/update_engine_sideload";
-}  // namespace
+using update_engine::UpdateStatus;
 
 namespace chromeos_update_engine {
 namespace {
@@ -208,10 +203,6 @@
   // xz-embedded requires to initialize its CRC-32 table once on startup.
   xz_crc32_init();
 
-  // When called from recovery, /data is not accessible, so we need to use
-  // /tmp for temporary files.
-  chromeos_update_engine::utils::SetRootTempDir(kSideloadRootTempDir);
-
   vector<string> headers = base::SplitString(
       FLAGS_headers, "\n", base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
 
diff --git a/system_state.h b/system_state.h
index 1b0ad08..f46cbcf 100644
--- a/system_state.h
+++ b/system_state.h
@@ -37,6 +37,7 @@
 class BootControlInterface;
 class ClockInterface;
 class ConnectionManagerInterface;
+class DlcServiceInterface;
 class HardwareInterface;
 class MetricsReporterInterface;
 class OmahaRequestParams;
@@ -109,6 +110,9 @@
   // restarted. Important for tracking whether you are running instance of the
   // update engine on first boot or due to a crash/restart.
   virtual bool system_rebooted() = 0;
+
+  // Returns a pointer to the DlcServiceInterface singleton.
+  virtual DlcServiceInterface* dlcservice() = 0;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/tar_bunzip2.gypi b/tar_bunzip2.gypi
index 8c6614a..4d1be28 100644
--- a/tar_bunzip2.gypi
+++ b/tar_bunzip2.gypi
@@ -21,9 +21,6 @@
     {
       'rule_name': 'tar-bunzip2',
       'extension': 'bz2',
-      'inputs': [
-        '<(RULE_INPUT_PATH)',
-      ],
       'outputs': [
         # The .flag file is used to mark the timestamp of the file extraction
         # and re-run this action if a new .bz2 file is generated.
diff --git a/test_http_server.cc b/test_http_server.cc
index 93aa11c..4536f37 100644
--- a/test_http_server.cc
+++ b/test_http_server.cc
@@ -48,14 +48,12 @@
 
 #include "update_engine/common/http_common.h"
 
-
 // HTTP end-of-line delimiter; sorry, this needs to be a macro.
 #define EOL "\r\n"
 
 using std::string;
 using std::vector;
 
-
 namespace chromeos_update_engine {
 
 static const char* kListeningMsgPrefix = "listening on port ";
@@ -93,8 +91,7 @@
   } while (!base::EndsWith(headers, EOL EOL, base::CompareCase::SENSITIVE));
 
   LOG(INFO) << "got headers:\n--8<------8<------8<------8<----\n"
-            << headers
-            << "\n--8<------8<------8<------8<----";
+            << headers << "\n--8<------8<------8<------8<----";
   request->raw_headers = headers;
 
   // Break header into lines.
@@ -105,7 +102,8 @@
       base::SPLIT_WANT_ALL);
 
   // Decode URL line.
-  vector<string> terms = base::SplitString(lines[0], base::kWhitespaceASCII,
+  vector<string> terms = base::SplitString(lines[0],
+                                           base::kWhitespaceASCII,
                                            base::KEEP_WHITESPACE,
                                            base::SPLIT_WANT_NONEMPTY);
   CHECK_EQ(terms.size(), static_cast<vector<string>::size_type>(3));
@@ -116,12 +114,14 @@
   // Decode remaining lines.
   size_t i;
   for (i = 1; i < lines.size(); i++) {
-    terms = base::SplitString(lines[i], base::kWhitespaceASCII,
-                              base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
+    terms = base::SplitString(lines[i],
+                              base::kWhitespaceASCII,
+                              base::KEEP_WHITESPACE,
+                              base::SPLIT_WANT_NONEMPTY);
 
     if (terms[0] == "Range:") {
       CHECK_EQ(terms.size(), static_cast<vector<string>::size_type>(2));
-      string &range = terms[1];
+      string& range = terms[1];
       LOG(INFO) << "range attribute: " << range;
       CHECK(base::StartsWith(range, "bytes=", base::CompareCase::SENSITIVE) &&
             range.find('-') != string::npos);
@@ -130,12 +130,13 @@
       if (range.find('-') < range.length() - 1)
         request->end_offset = atoll(range.c_str() + range.find('-') + 1) + 1;
       request->return_code = kHttpResponsePartialContent;
-      string tmp_str = base::StringPrintf("decoded range offsets: "
-                                               "start=%jd end=",
-                                               (intmax_t)request->start_offset);
+      string tmp_str = base::StringPrintf(
+          "decoded range offsets: "
+          "start=%jd end=",
+          (intmax_t)request->start_offset);
       if (request->end_offset > 0)
-        base::StringAppendF(&tmp_str, "%jd (non-inclusive)",
-                            (intmax_t)request->end_offset);
+        base::StringAppendF(
+            &tmp_str, "%jd (non-inclusive)", (intmax_t)request->end_offset);
       else
         base::StringAppendF(&tmp_str, "unspecified");
       LOG(INFO) << tmp_str;
@@ -162,7 +163,7 @@
 ssize_t WriteString(int fd, const string& str) {
   const size_t total_size = str.size();
   size_t remaining_size = total_size;
-  char const *data = str.data();
+  char const* data = str.data();
 
   while (remaining_size) {
     ssize_t written = write(fd, data, remaining_size);
@@ -179,38 +180,38 @@
 }
 
 // Writes the headers of an HTTP response into a file.
-ssize_t WriteHeaders(int fd, const off_t start_offset, const off_t end_offset,
+ssize_t WriteHeaders(int fd,
+                     const off_t start_offset,
+                     const off_t end_offset,
                      HttpResponseCode return_code) {
   ssize_t written = 0, ret;
 
   ret = WriteString(fd,
                     string("HTTP/1.1 ") + Itoa(return_code) + " " +
-                    GetHttpResponseDescription(return_code) +
-                    EOL
-                    "Content-Type: application/octet-stream" EOL);
+                        GetHttpResponseDescription(return_code) +
+                        EOL "Content-Type: application/octet-stream" EOL);
   if (ret < 0)
     return -1;
   written += ret;
 
   // Compute content legnth.
-  const off_t content_length = end_offset - start_offset;;
+  const off_t content_length = end_offset - start_offset;
 
   // A start offset that equals the end offset indicates that the response
   // should contain the full range of bytes in the requested resource.
   if (start_offset || start_offset == end_offset) {
-    ret = WriteString(fd,
-                      string("Accept-Ranges: bytes" EOL
-                             "Content-Range: bytes ") +
-                      Itoa(start_offset == end_offset ? 0 : start_offset) +
-                      "-" + Itoa(end_offset - 1) + "/" + Itoa(end_offset) +
-                      EOL);
+    ret = WriteString(
+        fd,
+        string("Accept-Ranges: bytes" EOL "Content-Range: bytes ") +
+            Itoa(start_offset == end_offset ? 0 : start_offset) + "-" +
+            Itoa(end_offset - 1) + "/" + Itoa(end_offset) + EOL);
     if (ret < 0)
       return -1;
     written += ret;
   }
 
-  ret = WriteString(fd, string("Content-Length: ") + Itoa(content_length) +
-                    EOL EOL);
+  ret = WriteString(
+      fd, string("Content-Length: ") + Itoa(content_length) + EOL EOL);
   if (ret < 0)
     return -1;
   written += ret;
@@ -221,8 +222,11 @@
 // Writes a predetermined payload of lines of ascending bytes to a file. The
 // first byte of output is appropriately offset with respect to the request line
 // length.  Returns the number of successfully written bytes.
-size_t WritePayload(int fd, const off_t start_offset, const off_t end_offset,
-                    const char first_byte, const size_t line_len) {
+size_t WritePayload(int fd,
+                    const off_t start_offset,
+                    const off_t end_offset,
+                    const char first_byte,
+                    const size_t line_len) {
   CHECK_LE(start_offset, end_offset);
   CHECK_GT(line_len, static_cast<size_t>(0));
 
@@ -248,14 +252,14 @@
   if (start_modulo) {
     string partial = line.substr(start_modulo, remaining_len);
     ssize_t ret = WriteString(fd, partial);
-    if ((success = (ret >= 0 && (size_t) ret == partial.length())))
+    if ((success = (ret >= 0 && (size_t)ret == partial.length())))
       remaining_len -= partial.length();
   }
 
   // Output full lines up to the maximal line boundary below the end offset.
   while (success && remaining_len >= line_len) {
     ssize_t ret = WriteString(fd, line);
-    if ((success = (ret >= 0 && (size_t) ret == line_len)))
+    if ((success = (ret >= 0 && (size_t)ret == line_len)))
       remaining_len -= line_len;
   }
 
@@ -263,7 +267,7 @@
   if (success && remaining_len) {
     string partial = line.substr(0, remaining_len);
     ssize_t ret = WriteString(fd, partial);
-    if ((success = (ret >= 0 && (size_t) ret == partial.length())))
+    if ((success = (ret >= 0 && (size_t)ret == partial.length())))
       remaining_len -= partial.length();
   }
 
@@ -271,7 +275,8 @@
 }
 
 // Write default payload lines of the form 'abcdefghij'.
-inline size_t WritePayload(int fd, const off_t start_offset,
+inline size_t WritePayload(int fd,
+                           const off_t start_offset,
                            const off_t end_offset) {
   return WritePayload(fd, start_offset, end_offset, 'a', 10);
 }
@@ -279,17 +284,19 @@
 // Send an empty response, then kill the server.
 void HandleQuit(int fd) {
   WriteHeaders(fd, 0, 0, kHttpResponseOk);
-  LOG(INFO) << "pid(" << getpid() <<  "): HTTP server exiting ...";
+  LOG(INFO) << "pid(" << getpid() << "): HTTP server exiting ...";
   exit(RC_OK);
 }
 
-
 // Generates an HTTP response with payload corresponding to requested offsets
 // and length.  Optionally, truncate the payload at a given length and add a
 // pause midway through the transfer.  Returns the total number of bytes
 // delivered or -1 for error.
-ssize_t HandleGet(int fd, const HttpRequest& request, const size_t total_length,
-                  const size_t truncate_length, const int sleep_every,
+ssize_t HandleGet(int fd,
+                  const HttpRequest& request,
+                  const size_t total_length,
+                  const size_t truncate_length,
+                  const int sleep_every,
                   const int sleep_secs) {
   ssize_t ret;
   size_t written = 0;
@@ -301,14 +308,14 @@
                  << ") exceeds total length (" << total_length
                  << "), generating error response ("
                  << kHttpResponseReqRangeNotSat << ")";
-    return WriteHeaders(fd, total_length, total_length,
-                        kHttpResponseReqRangeNotSat);
+    return WriteHeaders(
+        fd, total_length, total_length, kHttpResponseReqRangeNotSat);
   }
 
   // Obtain end offset, adjust to fit in total payload length and ensure it does
   // not preceded the start offset.
-  size_t end_offset = (request.end_offset > 0 ?
-                       request.end_offset : total_length);
+  size_t end_offset =
+      (request.end_offset > 0 ? request.end_offset : total_length);
   if (end_offset < start_offset) {
     LOG(WARNING) << "end offset (" << end_offset << ") precedes start offset ("
                  << start_offset << "), generating error response";
@@ -324,8 +331,8 @@
   LOG(INFO) << "generating response header: range=" << start_offset << "-"
             << (end_offset - 1) << "/" << (end_offset - start_offset)
             << ", return code=" << request.return_code;
-  if ((ret = WriteHeaders(fd, start_offset, end_offset,
-                          request.return_code)) < 0)
+  if ((ret = WriteHeaders(fd, start_offset, end_offset, request.return_code)) <
+      0)
     return -1;
   LOG(INFO) << ret << " header bytes written";
   written += ret;
@@ -371,7 +378,8 @@
   return written;
 }
 
-ssize_t HandleGet(int fd, const HttpRequest& request,
+ssize_t HandleGet(int fd,
+                  const HttpRequest& request,
                   const size_t total_length) {
   return HandleGet(fd, request, total_length, 0, 0, 0);
 }
@@ -388,15 +396,15 @@
   HttpResponseCode code = StringToHttpResponseCode(url.c_str());
   url.erase(0, url_start);
   url = "http://" + request.host + url;
-  const char *status = GetHttpResponseDescription(code);
+  const char* status = GetHttpResponseDescription(code);
   if (!status)
     CHECK(false) << "Unrecognized redirection code: " << code;
   LOG(INFO) << "Code: " << code << " " << status;
   LOG(INFO) << "New URL: " << url;
 
   ssize_t ret;
-  if ((ret = WriteString(fd, "HTTP/1.1 " + Itoa(code) + " " +
-                         status + EOL)) < 0)
+  if ((ret = WriteString(fd, "HTTP/1.1 " + Itoa(code) + " " + status + EOL)) <
+      0)
     return;
   WriteString(fd, "Location: " + url + EOL);
 }
@@ -425,8 +433,10 @@
 // Generate an error response if the requested offset is nonzero, up to a given
 // maximal number of successive failures.  The error generated is an "Internal
 // Server Error" (500).
-ssize_t HandleErrorIfOffset(int fd, const HttpRequest& request,
-                            size_t end_offset, int max_fails) {
+ssize_t HandleErrorIfOffset(int fd,
+                            const HttpRequest& request,
+                            size_t end_offset,
+                            int max_fails) {
   static int num_fails = 0;
 
   if (request.start_offset > 0 && num_fails < max_fails) {
@@ -437,8 +447,8 @@
 
     const string data("This is an error page.");
 
-    if ((ret = WriteHeaders(fd, 0, data.size(),
-                            kHttpResponseInternalServerError)) < 0)
+    if ((ret = WriteHeaders(
+             fd, 0, data.size(), kHttpResponseInternalServerError)) < 0)
       return -1;
     written += ret;
 
@@ -464,7 +474,8 @@
 void HandleHang(int fd) {
   LOG(INFO) << "Hanging until the other side of the connection is closed.";
   char c;
-  while (HANDLE_EINTR(read(fd, &c, 1)) > 0) {}
+  while (HANDLE_EINTR(read(fd, &c, 1)) > 0) {
+  }
 }
 
 void HandleDefault(int fd, const HttpRequest& request) {
@@ -475,36 +486,33 @@
 
   if ((ret = WriteHeaders(fd, start_offset, size, request.return_code)) < 0)
     return;
-  WriteString(fd, (start_offset < static_cast<off_t>(size) ?
-                   data.substr(start_offset) : ""));
+  WriteString(
+      fd,
+      (start_offset < static_cast<off_t>(size) ? data.substr(start_offset)
+                                               : ""));
 }
 
-
 // Break a URL into terms delimited by slashes.
 class UrlTerms {
  public:
-  UrlTerms(const string &url, size_t num_terms) {
+  UrlTerms(const string& url, size_t num_terms) {
     // URL must be non-empty and start with a slash.
     CHECK_GT(url.size(), static_cast<size_t>(0));
     CHECK_EQ(url[0], '/');
 
     // Split it into terms delimited by slashes, omitting the preceding slash.
-    terms = base::SplitString(url.substr(1), "/", base::KEEP_WHITESPACE,
-                              base::SPLIT_WANT_ALL);
+    terms = base::SplitString(
+        url.substr(1), "/", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
 
     // Ensure expected length.
     CHECK_EQ(terms.size(), num_terms);
   }
 
-  inline string Get(const off_t index) const {
-    return terms[index];
-  }
-  inline const char *GetCStr(const off_t index) const {
+  inline const string& Get(const off_t index) const { return terms[index]; }
+  inline const char* GetCStr(const off_t index) const {
     return Get(index).c_str();
   }
-  inline int GetInt(const off_t index) const {
-    return atoi(GetCStr(index));
-  }
+  inline int GetInt(const off_t index) const { return atoi(GetCStr(index)); }
   inline size_t GetSizeT(const off_t index) const {
     return static_cast<size_t>(atol(GetCStr(index)));
   }
@@ -517,8 +525,8 @@
   HttpRequest request;
   ParseRequest(fd, &request);
 
-  string &url = request.url;
-  LOG(INFO) << "pid(" << getpid() <<  "): handling url " << url;
+  string& url = request.url;
+  LOG(INFO) << "pid(" << getpid() << "): handling url " << url;
   if (url == "/quitquitquit") {
     HandleQuit(fd);
   } else if (base::StartsWith(
@@ -527,14 +535,18 @@
     HandleGet(fd, request, terms.GetSizeT(1));
   } else if (base::StartsWith(url, "/flaky/", base::CompareCase::SENSITIVE)) {
     const UrlTerms terms(url, 5);
-    HandleGet(fd, request, terms.GetSizeT(1), terms.GetSizeT(2),
-              terms.GetInt(3), terms.GetInt(4));
+    HandleGet(fd,
+              request,
+              terms.GetSizeT(1),
+              terms.GetSizeT(2),
+              terms.GetInt(3),
+              terms.GetInt(4));
   } else if (url.find("/redirect/") == 0) {
     HandleRedirect(fd, request);
   } else if (url == "/error") {
     HandleError(fd, request);
-  } else if (base::StartsWith(url, "/error-if-offset/",
-                              base::CompareCase::SENSITIVE)) {
+  } else if (base::StartsWith(
+                 url, "/error-if-offset/", base::CompareCase::SENSITIVE)) {
     const UrlTerms terms(url, 3);
     HandleErrorIfOffset(fd, request, terms.GetSizeT(1), terms.GetInt(2));
   } else if (url == "/echo-headers") {
@@ -552,15 +564,14 @@
 
 using namespace chromeos_update_engine;  // NOLINT(build/namespaces)
 
-
-void usage(const char *prog_arg) {
-  fprintf(
-      stderr,
-      "Usage: %s [ FILE ]\n"
-      "Once accepting connections, the following is written to FILE (or "
-      "stdout):\n"
-      "\"%sN\" (where N is an integer port number)\n",
-      basename(prog_arg), kListeningMsgPrefix);
+void usage(const char* prog_arg) {
+  fprintf(stderr,
+          "Usage: %s [ FILE ]\n"
+          "Once accepting connections, the following is written to FILE (or "
+          "stdout):\n"
+          "\"%sN\" (where N is an integer port number)\n",
+          basename(prog_arg),
+          kListeningMsgPrefix);
 }
 
 int main(int argc, char** argv) {
@@ -594,15 +605,16 @@
   {
     // Get rid of "Address in use" error
     int tr = 1;
-    if (setsockopt(listen_fd, SOL_SOCKET, SO_REUSEADDR, &tr,
-                   sizeof(int)) == -1) {
+    if (setsockopt(listen_fd, SOL_SOCKET, SO_REUSEADDR, &tr, sizeof(int)) ==
+        -1) {
       perror("setsockopt");
       exit(RC_ERR_SETSOCKOPT);
     }
   }
 
   // Bind the socket and set for listening.
-  if (bind(listen_fd, reinterpret_cast<struct sockaddr *>(&server_addr),
+  if (bind(listen_fd,
+           reinterpret_cast<struct sockaddr*>(&server_addr),
            sizeof(server_addr)) < 0) {
     perror("bind");
     exit(RC_ERR_BIND);
@@ -615,7 +627,8 @@
   // Check the actual port.
   struct sockaddr_in bound_addr = sockaddr_in();
   socklen_t bound_addr_len = sizeof(bound_addr);
-  if (getsockname(listen_fd, reinterpret_cast<struct sockaddr*>(&bound_addr),
+  if (getsockname(listen_fd,
+                  reinterpret_cast<struct sockaddr*>(&bound_addr),
                   &bound_addr_len) < 0) {
     perror("getsockname");
     exit(RC_ERR_GETSOCKNAME);
@@ -638,7 +651,7 @@
     close(report_fd);
 
   while (1) {
-    LOG(INFO) << "pid(" << getpid() <<  "): waiting to accept new connection";
+    LOG(INFO) << "pid(" << getpid() << "): waiting to accept new connection";
     int client_fd = accept(listen_fd, nullptr, nullptr);
     LOG(INFO) << "got past accept";
     if (client_fd < 0)
diff --git a/testrunner.cc b/testrunner.cc
index 81d4548..db0b347 100644
--- a/testrunner.cc
+++ b/testrunner.cc
@@ -26,7 +26,7 @@
 #include "update_engine/common/terminator.h"
 #include "update_engine/payload_generator/xz.h"
 
-int main(int argc, char **argv) {
+int main(int argc, char** argv) {
   LOG(INFO) << "started";
   base::AtExitManager exit_manager;
   // xz-embedded requires to initialize its CRC-32 table once on startup.
diff --git a/update_attempter.cc b/update_attempter.cc
index 9cef154..ee571db 100644
--- a/update_attempter.cc
+++ b/update_attempter.cc
@@ -20,7 +20,6 @@
 
 #include <algorithm>
 #include <memory>
-#include <set>
 #include <string>
 #include <utility>
 #include <vector>
@@ -31,6 +30,7 @@
 #include <base/rand_util.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
+#include <base/time/time.h>
 #include <brillo/data_encoding.h>
 #include <brillo/errors/error_codes.h>
 #include <brillo/message_loops/message_loop.h>
@@ -42,6 +42,7 @@
 #include "update_engine/common/boot_control_interface.h"
 #include "update_engine/common/clock_interface.h"
 #include "update_engine/common/constants.h"
+#include "update_engine/common/dlcservice_interface.h"
 #include "update_engine/common/hardware_interface.h"
 #include "update_engine/common/platform_constants.h"
 #include "update_engine/common/prefs_interface.h"
@@ -59,7 +60,9 @@
 #include "update_engine/payload_state_interface.h"
 #include "update_engine/power_manager_interface.h"
 #include "update_engine/system_state.h"
+#include "update_engine/update_boot_flags_action.h"
 #include "update_engine/update_manager/policy.h"
+#include "update_engine/update_manager/policy_utils.h"
 #include "update_engine/update_manager/update_manager.h"
 #include "update_engine/update_status_utils.h"
 
@@ -69,11 +72,11 @@
 using base::TimeDelta;
 using base::TimeTicks;
 using brillo::MessageLoop;
+using chromeos_update_manager::CalculateStagingCase;
 using chromeos_update_manager::EvalStatus;
 using chromeos_update_manager::Policy;
+using chromeos_update_manager::StagingCase;
 using chromeos_update_manager::UpdateCheckParams;
-using std::set;
-using std::shared_ptr;
 using std::string;
 using std::vector;
 using update_engine::UpdateAttemptFlags;
@@ -97,12 +100,7 @@
 const char kScheduledAUTestURLRequest[] = "autest-scheduled";
 }  // namespace
 
-// Turns a generic ErrorCode::kError to a generic error code specific
-// to |action| (e.g., ErrorCode::kFilesystemVerifierError). If |code| is
-// not ErrorCode::kError, or the action is not matched, returns |code|
-// unchanged.
-ErrorCode GetErrorCodeForAction(AbstractAction* action,
-                                     ErrorCode code) {
+ErrorCode GetErrorCodeForAction(AbstractAction* action, ErrorCode code) {
   if (code != ErrorCode::kError)
     return code;
 
@@ -123,7 +121,8 @@
                                  CertificateChecker* cert_checker)
     : processor_(new ActionProcessor()),
       system_state_(system_state),
-      cert_checker_(cert_checker) {}
+      cert_checker_(cert_checker),
+      is_install_(false) {}
 
 UpdateAttempter::~UpdateAttempter() {
   // CertificateChecker might not be initialized in unittests.
@@ -152,19 +151,20 @@
     status_ = UpdateStatus::IDLE;
 }
 
-void UpdateAttempter::ScheduleUpdates() {
+bool UpdateAttempter::ScheduleUpdates() {
   if (IsUpdateRunningOrScheduled())
-    return;
+    return false;
 
   chromeos_update_manager::UpdateManager* const update_manager =
       system_state_->update_manager();
   CHECK(update_manager);
-  Callback<void(EvalStatus, const UpdateCheckParams&)> callback = Bind(
-      &UpdateAttempter::OnUpdateScheduled, base::Unretained(this));
+  Callback<void(EvalStatus, const UpdateCheckParams&)> callback =
+      Bind(&UpdateAttempter::OnUpdateScheduled, base::Unretained(this));
   // We limit the async policy request to a reasonably short time, to avoid a
   // starvation due to a transient bug.
   update_manager->AsyncPolicyRequest(callback, &Policy::UpdateCheckAllowed);
   waiting_for_scheduled_check_ = true;
+  return true;
 }
 
 void UpdateAttempter::CertificateChecked(ServerToCheck server_to_check,
@@ -189,7 +189,7 @@
                    << "is wrong.";
       // In this case, report daily metrics to reset.
     } else {
-      if (time_reported_since.InSeconds() < 24*60*60) {
+      if (time_reported_since.InSeconds() < 24 * 60 * 60) {
         LOG(INFO) << "Last reported daily metrics "
                   << utils::FormatTimeDelta(time_reported_since) << " ago.";
         return false;
@@ -221,7 +221,7 @@
     return;
   }
 
-  Time lsb_release_timestamp = utils::TimeFromStructTimespec(&sb.st_ctim);
+  Time lsb_release_timestamp = Time::FromTimeSpec(sb.st_ctim);
   Time now = system_state_->clock()->GetWallclockTime();
   TimeDelta age = now - lsb_release_timestamp;
   if (age.InSeconds() < 0) {
@@ -238,6 +238,7 @@
                              const string& omaha_url,
                              const string& target_channel,
                              const string& target_version_prefix,
+                             bool rollback_allowed,
                              bool obey_proxies,
                              bool interactive) {
   // This is normally called frequently enough so it's appropriate to use as a
@@ -246,10 +247,6 @@
   // timeout event.
   CheckAndReportDailyMetrics();
 
-  // Notify of the new update attempt, clearing prior interactive requests.
-  if (forced_update_pending_callback_.get())
-    forced_update_pending_callback_->Run(false, false);
-
   fake_update_success_ = false;
   if (status_ == UpdateStatus::UPDATED_NEED_REBOOT) {
     // Although we have applied an update, we still want to ping Omaha
@@ -276,6 +273,7 @@
                              omaha_url,
                              target_channel,
                              target_version_prefix,
+                             rollback_allowed,
                              obey_proxies,
                              interactive)) {
     return;
@@ -290,10 +288,7 @@
   // checks in the case where a response is not received.
   UpdateLastCheckedTime();
 
-  // Just in case we didn't update boot flags yet, make sure they're updated
-  // before any update processing starts.
-  start_action_processor_ = true;
-  UpdateBootFlags();
+  ScheduleProcessingStart();
 }
 
 void UpdateAttempter::RefreshDevicePolicy() {
@@ -351,6 +346,7 @@
                                             const string& omaha_url,
                                             const string& target_channel,
                                             const string& target_version_prefix,
+                                            bool rollback_allowed,
                                             bool obey_proxies,
                                             bool interactive) {
   http_response_code_ = 0;
@@ -359,10 +355,21 @@
   // Refresh the policy before computing all the update parameters.
   RefreshDevicePolicy();
 
+  // Check whether we need to clear the rollback-happened preference after
+  // policy is available again.
+  UpdateRollbackHappened();
+
   // Update the target version prefix.
   omaha_request_params_->set_target_version_prefix(target_version_prefix);
 
-  CalculateScatteringParams(interactive);
+  // Set whether rollback is allowed.
+  omaha_request_params_->set_rollback_allowed(rollback_allowed);
+
+  CalculateStagingParams(interactive);
+  // If staging_wait_time_ wasn't set, staging is off, use scattering instead.
+  if (staging_wait_time_.InSeconds() == 0) {
+    CalculateScatteringParams(interactive);
+  }
 
   CalculateP2PParams(interactive);
   if (payload_state->GetUsingP2PForDownloading() ||
@@ -377,9 +384,7 @@
     }
   }
 
-  if (!omaha_request_params_->Init(app_version,
-                                   omaha_url,
-                                   interactive)) {
+  if (!omaha_request_params_->Init(app_version, omaha_url, interactive)) {
     LOG(ERROR) << "Unable to initialize Omaha request params.";
     return false;
   }
@@ -392,8 +397,8 @@
     // Pass in false for powerwash_allowed until we add it to the policy
     // protobuf.
     string error_message;
-    if (!omaha_request_params_->SetTargetChannel(target_channel, false,
-                                                 &error_message)) {
+    if (!omaha_request_params_->SetTargetChannel(
+            target_channel, false, &error_message)) {
       LOG(ERROR) << "Setting the channel failed: " << error_message;
     }
 
@@ -404,9 +409,14 @@
     // target channel.
     omaha_request_params_->UpdateDownloadChannel();
   }
+  // Set the DLC module ID list.
+  omaha_request_params_->set_dlc_module_ids(dlc_module_ids_);
+  omaha_request_params_->set_is_install(is_install_);
 
   LOG(INFO) << "target_version_prefix = "
             << omaha_request_params_->target_version_prefix()
+            << ", rollback_allowed = "
+            << omaha_request_params_->rollback_allowed()
             << ", scatter_factor_in_seconds = "
             << utils::FormatSecs(scatter_factor_.InSeconds());
 
@@ -414,8 +424,9 @@
             << omaha_request_params_->wall_clock_based_wait_enabled()
             << ", Update Check Count Wait Enabled = "
             << omaha_request_params_->update_check_count_wait_enabled()
-            << ", Waiting Period = " << utils::FormatSecs(
-               omaha_request_params_->waiting_period().InSeconds());
+            << ", Waiting Period = "
+            << utils::FormatSecs(
+                   omaha_request_params_->waiting_period().InSeconds());
 
   LOG(INFO) << "Use p2p For Downloading = "
             << payload_state->GetUsingP2PForDownloading()
@@ -435,9 +446,9 @@
   } else if (base::RandInt(0, 4) == 0) {
     obeying_proxies_ = false;
   }
-  LOG_IF(INFO, !obeying_proxies_) << "To help ensure updates work, this update "
-      "check we are ignoring the proxy settings and using "
-      "direct connections.";
+  LOG_IF(INFO, !obeying_proxies_)
+      << "To help ensure updates work, this update check we are ignoring the "
+      << "proxy settings and using direct connections.";
 
   DisableDeltaUpdateIfNeeded();
   return true;
@@ -452,7 +463,7 @@
     int64_t new_scatter_factor_in_secs = 0;
     device_policy->GetScatterFactorInSeconds(&new_scatter_factor_in_secs);
     if (new_scatter_factor_in_secs < 0)  // sanitize input, just in case.
-      new_scatter_factor_in_secs  = 0;
+      new_scatter_factor_in_secs = 0;
     scatter_factor_ = TimeDelta::FromSeconds(new_scatter_factor_in_secs);
   }
 
@@ -482,7 +493,8 @@
     if (omaha_request_params_->waiting_period().InSeconds() == 0) {
       // First case. Check if we have a suitable value to set for
       // the waiting period.
-      if (prefs_->GetInt64(kPrefsWallClockWaitPeriod, &wait_period_in_secs) &&
+      if (prefs_->GetInt64(kPrefsWallClockScatteringWaitPeriod,
+                           &wait_period_in_secs) &&
           wait_period_in_secs > 0 &&
           wait_period_in_secs <= scatter_factor_.InSeconds()) {
         // This means:
@@ -492,10 +504,10 @@
         // generating a new random value to improve the chances of a good
         // distribution for scattering.
         omaha_request_params_->set_waiting_period(
-          TimeDelta::FromSeconds(wait_period_in_secs));
-        LOG(INFO) << "Using persisted wall-clock waiting period: " <<
-            utils::FormatSecs(
-                omaha_request_params_->waiting_period().InSeconds());
+            TimeDelta::FromSeconds(wait_period_in_secs));
+        LOG(INFO) << "Using persisted wall-clock waiting period: "
+                  << utils::FormatSecs(
+                         omaha_request_params_->waiting_period().InSeconds());
       } else {
         // This means there's no persisted value for the waiting period
         // available or its value is invalid given the new scatter_factor value.
@@ -515,9 +527,9 @@
     } else {
       // Neither the first time scattering is enabled nor the scattering value
       // changed. Nothing to do.
-      LOG(INFO) << "Keeping current wall-clock waiting period: " <<
-          utils::FormatSecs(
-              omaha_request_params_->waiting_period().InSeconds());
+      LOG(INFO) << "Keeping current wall-clock waiting period: "
+                << utils::FormatSecs(
+                       omaha_request_params_->waiting_period().InSeconds());
     }
 
     // The invariant at this point is that omaha_request_params_->waiting_period
@@ -533,7 +545,7 @@
     // the update check count value, we'll turn that on as well.
     bool decrement_succeeded = DecrementUpdateCheckCount();
     omaha_request_params_->set_update_check_count_wait_enabled(
-      decrement_succeeded);
+        decrement_succeeded);
   } else {
     // This means the scattering feature is turned off or disabled for
     // this particular update check. Make sure to disable
@@ -542,7 +554,7 @@
     omaha_request_params_->set_wall_clock_based_wait_enabled(false);
     omaha_request_params_->set_update_check_count_wait_enabled(false);
     omaha_request_params_->set_waiting_period(TimeDelta::FromSeconds(0));
-    prefs_->Delete(kPrefsWallClockWaitPeriod);
+    prefs_->Delete(kPrefsWallClockScatteringWaitPeriod);
     prefs_->Delete(kPrefsUpdateCheckCount);
     // Don't delete the UpdateFirstSeenAt file as we don't want manual checks
     // that result in no-updates (e.g. due to server side throttling) to
@@ -552,11 +564,12 @@
 }
 
 void UpdateAttempter::GenerateNewWaitingPeriod() {
-  omaha_request_params_->set_waiting_period(TimeDelta::FromSeconds(
-      base::RandInt(1, scatter_factor_.InSeconds())));
+  omaha_request_params_->set_waiting_period(
+      TimeDelta::FromSeconds(base::RandInt(1, scatter_factor_.InSeconds())));
 
-  LOG(INFO) << "Generated new wall-clock waiting period: " << utils::FormatSecs(
-                omaha_request_params_->waiting_period().InSeconds());
+  LOG(INFO) << "Generated new wall-clock waiting period: "
+            << utils::FormatSecs(
+                   omaha_request_params_->waiting_period().InSeconds());
 
   // Do a best-effort to persist this in all cases. Even if the persistence
   // fails, we'll still be able to scatter based on our in-memory value.
@@ -566,15 +579,43 @@
       omaha_request_params_->waiting_period());
 }
 
-void UpdateAttempter::BuildPostInstallActions(
-    InstallPlanAction* previous_action) {
-  shared_ptr<PostinstallRunnerAction> postinstall_runner_action(
-      new PostinstallRunnerAction(system_state_->boot_control(),
-                                  system_state_->hardware()));
-  postinstall_runner_action->set_delegate(this);
-  actions_.push_back(shared_ptr<AbstractAction>(postinstall_runner_action));
-  BondActions(previous_action,
-              postinstall_runner_action.get());
+void UpdateAttempter::CalculateStagingParams(bool interactive) {
+  bool oobe_complete = system_state_->hardware()->IsOOBEEnabled() &&
+                       system_state_->hardware()->IsOOBEComplete(nullptr);
+  auto device_policy = system_state_->device_policy();
+  StagingCase staging_case = StagingCase::kOff;
+  if (device_policy && !interactive && oobe_complete) {
+    staging_wait_time_ = omaha_request_params_->waiting_period();
+    staging_case = CalculateStagingCase(
+        device_policy, prefs_, &staging_wait_time_, &staging_schedule_);
+  }
+  switch (staging_case) {
+    case StagingCase::kOff:
+      // Staging is off, get rid of persisted value.
+      prefs_->Delete(kPrefsWallClockStagingWaitPeriod);
+      // Set |staging_wait_time_| to its default value so scattering can still
+      // be turned on
+      staging_wait_time_ = TimeDelta();
+      break;
+    // Let the cases fall through since they just add, and never remove, steps
+    // to turning staging on.
+    case StagingCase::kNoSavedValue:
+      prefs_->SetInt64(kPrefsWallClockStagingWaitPeriod,
+                       staging_wait_time_.InDays());
+    case StagingCase::kSetStagingFromPref:
+      omaha_request_params_->set_waiting_period(staging_wait_time_);
+    case StagingCase::kNoAction:
+      // Staging is on, enable wallclock based wait so that its values get used.
+      omaha_request_params_->set_wall_clock_based_wait_enabled(true);
+      // Use UpdateCheckCount if possible to prevent devices updating all at
+      // once.
+      omaha_request_params_->set_update_check_count_wait_enabled(
+          DecrementUpdateCheckCount());
+      // Scattering should not be turned on if staging is on, delete the
+      // existing scattering configuration.
+      prefs_->Delete(kPrefsWallClockScatteringWaitPeriod);
+      scatter_factor_ = TimeDelta();
+  }
 }
 
 void UpdateAttempter::BuildUpdateActions(bool interactive) {
@@ -582,85 +623,79 @@
   processor_->set_delegate(this);
 
   // Actions:
-  std::unique_ptr<LibcurlHttpFetcher> update_check_fetcher(
-      new LibcurlHttpFetcher(GetProxyResolver(), system_state_->hardware()));
+  auto update_check_fetcher = std::make_unique<LibcurlHttpFetcher>(
+      GetProxyResolver(), system_state_->hardware());
   update_check_fetcher->set_server_to_check(ServerToCheck::kUpdate);
   // Try harder to connect to the network, esp when not interactive.
   // See comment in libcurl_http_fetcher.cc.
   update_check_fetcher->set_no_network_max_retries(interactive ? 1 : 3);
-  shared_ptr<OmahaRequestAction> update_check_action(
-      new OmahaRequestAction(system_state_,
-                             nullptr,
-                             std::move(update_check_fetcher),
-                             false));
-  shared_ptr<OmahaResponseHandlerAction> response_handler_action(
-      new OmahaResponseHandlerAction(system_state_));
-
-  shared_ptr<OmahaRequestAction> download_started_action(new OmahaRequestAction(
+  auto update_check_action = std::make_unique<OmahaRequestAction>(
+      system_state_, nullptr, std::move(update_check_fetcher), false);
+  auto response_handler_action =
+      std::make_unique<OmahaResponseHandlerAction>(system_state_);
+  auto update_boot_flags_action =
+      std::make_unique<UpdateBootFlagsAction>(system_state_->boot_control());
+  auto download_started_action = std::make_unique<OmahaRequestAction>(
       system_state_,
       new OmahaEvent(OmahaEvent::kTypeUpdateDownloadStarted),
       std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
                                            system_state_->hardware()),
-      false));
+      false);
 
   LibcurlHttpFetcher* download_fetcher =
       new LibcurlHttpFetcher(GetProxyResolver(), system_state_->hardware());
   download_fetcher->set_server_to_check(ServerToCheck::kDownload);
   if (interactive)
     download_fetcher->set_max_retry_count(kDownloadMaxRetryCountInteractive);
-  shared_ptr<DownloadAction> download_action(
-      new DownloadAction(prefs_,
-                         system_state_->boot_control(),
-                         system_state_->hardware(),
-                         system_state_,
-                         download_fetcher,  // passes ownership
-                         interactive));
-  shared_ptr<OmahaRequestAction> download_finished_action(
-      new OmahaRequestAction(
-          system_state_,
-          new OmahaEvent(OmahaEvent::kTypeUpdateDownloadFinished),
-          std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
-                                               system_state_->hardware()),
-          false));
-  shared_ptr<FilesystemVerifierAction> filesystem_verifier_action(
-      new FilesystemVerifierAction());
-  shared_ptr<OmahaRequestAction> update_complete_action(
-      new OmahaRequestAction(system_state_,
-                             new OmahaEvent(OmahaEvent::kTypeUpdateComplete),
-                             std::make_unique<LibcurlHttpFetcher>(
-                                 GetProxyResolver(), system_state_->hardware()),
-                             false));
-
+  auto download_action =
+      std::make_unique<DownloadAction>(prefs_,
+                                       system_state_->boot_control(),
+                                       system_state_->hardware(),
+                                       system_state_,
+                                       download_fetcher,  // passes ownership
+                                       interactive);
   download_action->set_delegate(this);
-  response_handler_action_ = response_handler_action;
-  download_action_ = download_action;
 
-  actions_.push_back(shared_ptr<AbstractAction>(update_check_action));
-  actions_.push_back(shared_ptr<AbstractAction>(response_handler_action));
-  actions_.push_back(shared_ptr<AbstractAction>(download_started_action));
-  actions_.push_back(shared_ptr<AbstractAction>(download_action));
-  actions_.push_back(shared_ptr<AbstractAction>(download_finished_action));
-  actions_.push_back(shared_ptr<AbstractAction>(filesystem_verifier_action));
+  auto download_finished_action = std::make_unique<OmahaRequestAction>(
+      system_state_,
+      new OmahaEvent(OmahaEvent::kTypeUpdateDownloadFinished),
+      std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
+                                           system_state_->hardware()),
+      false);
+  auto filesystem_verifier_action =
+      std::make_unique<FilesystemVerifierAction>();
+  auto update_complete_action = std::make_unique<OmahaRequestAction>(
+      system_state_,
+      new OmahaEvent(OmahaEvent::kTypeUpdateComplete),
+      std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
+                                           system_state_->hardware()),
+      false);
+
+  auto postinstall_runner_action = std::make_unique<PostinstallRunnerAction>(
+      system_state_->boot_control(), system_state_->hardware());
+  postinstall_runner_action->set_delegate(this);
 
   // Bond them together. We have to use the leaf-types when calling
   // BondActions().
-  BondActions(update_check_action.get(),
-              response_handler_action.get());
-  BondActions(response_handler_action.get(),
-              download_action.get());
-  BondActions(download_action.get(),
-              filesystem_verifier_action.get());
-  BuildPostInstallActions(filesystem_verifier_action.get());
+  BondActions(update_check_action.get(), response_handler_action.get());
+  BondActions(response_handler_action.get(), download_action.get());
+  BondActions(download_action.get(), filesystem_verifier_action.get());
+  BondActions(filesystem_verifier_action.get(),
+              postinstall_runner_action.get());
 
-  actions_.push_back(shared_ptr<AbstractAction>(update_complete_action));
-
-  // Enqueue the actions
-  for (const shared_ptr<AbstractAction>& action : actions_) {
-    processor_->EnqueueAction(action.get());
-  }
+  processor_->EnqueueAction(std::move(update_check_action));
+  processor_->EnqueueAction(std::move(response_handler_action));
+  processor_->EnqueueAction(std::move(update_boot_flags_action));
+  processor_->EnqueueAction(std::move(download_started_action));
+  processor_->EnqueueAction(std::move(download_action));
+  processor_->EnqueueAction(std::move(download_finished_action));
+  processor_->EnqueueAction(std::move(filesystem_verifier_action));
+  processor_->EnqueueAction(std::move(postinstall_runner_action));
+  processor_->EnqueueAction(std::move(update_complete_action));
 }
 
 bool UpdateAttempter::Rollback(bool powerwash) {
+  is_install_ = false;
   if (!CanRollback()) {
     return false;
   }
@@ -688,39 +723,32 @@
   }
 
   LOG(INFO) << "Setting rollback options.";
-  InstallPlan install_plan;
-
-  install_plan.target_slot = GetRollbackSlot();
-  install_plan.source_slot = system_state_->boot_control()->GetCurrentSlot();
+  install_plan_.reset(new InstallPlan());
+  install_plan_->target_slot = GetRollbackSlot();
+  install_plan_->source_slot = system_state_->boot_control()->GetCurrentSlot();
 
   TEST_AND_RETURN_FALSE(
-      install_plan.LoadPartitionsFromSlots(system_state_->boot_control()));
-  install_plan.powerwash_required = powerwash;
+      install_plan_->LoadPartitionsFromSlots(system_state_->boot_control()));
+  install_plan_->powerwash_required = powerwash;
 
   LOG(INFO) << "Using this install plan:";
-  install_plan.Dump();
+  install_plan_->Dump();
 
-  shared_ptr<InstallPlanAction> install_plan_action(
-      new InstallPlanAction(install_plan));
-  actions_.push_back(shared_ptr<AbstractAction>(install_plan_action));
-
-  BuildPostInstallActions(install_plan_action.get());
-
-  // Enqueue the actions
-  for (const shared_ptr<AbstractAction>& action : actions_) {
-    processor_->EnqueueAction(action.get());
-  }
+  auto install_plan_action =
+      std::make_unique<InstallPlanAction>(*install_plan_);
+  auto postinstall_runner_action = std::make_unique<PostinstallRunnerAction>(
+      system_state_->boot_control(), system_state_->hardware());
+  postinstall_runner_action->set_delegate(this);
+  BondActions(install_plan_action.get(), postinstall_runner_action.get());
+  processor_->EnqueueAction(std::move(install_plan_action));
+  processor_->EnqueueAction(std::move(postinstall_runner_action));
 
   // Update the payload state for Rollback.
   system_state_->payload_state()->Rollback();
 
   SetStatusAndNotify(UpdateStatus::ATTEMPTING_ROLLBACK);
 
-  // Just in case we didn't update boot flags yet, make sure they're updated
-  // before any update processing starts. This also schedules the start of the
-  // actions we just posted.
-  start_action_processor_ = true;
-  UpdateBootFlags();
+  ScheduleProcessingStart();
   return true;
 }
 
@@ -762,6 +790,8 @@
 bool UpdateAttempter::CheckForUpdate(const string& app_version,
                                      const string& omaha_url,
                                      UpdateAttemptFlags flags) {
+  dlc_module_ids_.clear();
+  is_install_ = false;
   bool interactive = !(flags & UpdateAttemptFlags::kFlagNonInteractive);
 
   if (interactive && status_ != UpdateStatus::IDLE) {
@@ -803,6 +833,9 @@
   }
 
   if (forced_update_pending_callback_.get()) {
+    if (!system_state_->dlcservice()->GetInstalled(&dlc_module_ids_)) {
+      dlc_module_ids_.clear();
+    }
     // Make sure that a scheduling request is made prior to calling the forced
     // update pending callback.
     ScheduleUpdates();
@@ -812,12 +845,45 @@
   return true;
 }
 
+bool UpdateAttempter::CheckForInstall(const vector<string>& dlc_module_ids,
+                                      const string& omaha_url) {
+  dlc_module_ids_ = dlc_module_ids;
+  is_install_ = true;
+  forced_omaha_url_.clear();
+
+  // Certain conditions must be met to allow setting custom version and update
+  // server URLs. However, kScheduledAUTestURLRequest and kAUTestURLRequest are
+  // always allowed regardless of device state.
+  if (IsAnyUpdateSourceAllowed()) {
+    forced_omaha_url_ = omaha_url;
+  }
+  if (omaha_url == kScheduledAUTestURLRequest) {
+    forced_omaha_url_ = constants::kOmahaDefaultAUTestURL;
+  } else if (omaha_url == kAUTestURLRequest) {
+    forced_omaha_url_ = constants::kOmahaDefaultAUTestURL;
+  }
+
+  if (!ScheduleUpdates()) {
+    if (forced_update_pending_callback_.get()) {
+      // Make sure that a scheduling request is made prior to calling the forced
+      // update pending callback.
+      ScheduleUpdates();
+      forced_update_pending_callback_->Run(true, true);
+      return true;
+    }
+    return false;
+  }
+  return true;
+}
+
 bool UpdateAttempter::RebootIfNeeded() {
+#ifdef __ANDROID__
   if (status_ != UpdateStatus::UPDATED_NEED_REBOOT) {
     LOG(INFO) << "Reboot requested, but status is "
               << UpdateStatusToString(status_) << ", so not rebooting.";
     return false;
   }
+#endif  // __ANDROID__
 
   if (system_state_->power_manager()->RequestReboot())
     return true;
@@ -863,11 +929,10 @@
       return;
     }
 
-    LOG(INFO) << "Running "
-              << (params.is_interactive ? "interactive" : "periodic")
+    LOG(INFO) << "Running " << (params.interactive ? "interactive" : "periodic")
               << " update.";
 
-    if (!params.is_interactive) {
+    if (!params.interactive) {
       // Cache the update attempt flags that will be used by this update attempt
       // so that they can't be changed mid-way through.
       current_update_attempt_flags_ = update_attempt_flags_;
@@ -876,8 +941,13 @@
     LOG(INFO) << "Update attempt flags in use = 0x" << std::hex
               << current_update_attempt_flags_;
 
-    Update(forced_app_version_, forced_omaha_url_, params.target_channel,
-           params.target_version_prefix, false, params.is_interactive);
+    Update(forced_app_version_,
+           forced_omaha_url_,
+           params.target_channel,
+           params.target_version_prefix,
+           params.rollback_allowed,
+           /*obey_proxies=*/false,
+           params.interactive);
     // Always clear the forced app_version and omaha_url after an update attempt
     // so the next update uses the defaults.
     forced_app_version_.clear();
@@ -900,11 +970,23 @@
   last_checked_time_ = system_state_->clock()->GetWallclockTime().ToTimeT();
 }
 
+void UpdateAttempter::UpdateRollbackHappened() {
+  DCHECK(system_state_);
+  DCHECK(system_state_->payload_state());
+  DCHECK(policy_provider_);
+  if (system_state_->payload_state()->GetRollbackHappened() &&
+      (policy_provider_->device_policy_is_loaded() ||
+       policy_provider_->IsConsumerDevice())) {
+    // Rollback happened, but we already went through OOBE and policy is
+    // present or it's a consumer device.
+    system_state_->payload_state()->SetRollbackHappened(false);
+  }
+}
+
 // Delegate methods:
 void UpdateAttempter::ProcessingDone(const ActionProcessor* processor,
                                      ErrorCode code) {
   LOG(INFO) << "Processing Done.";
-  actions_.clear();
 
   // Reset cpu shares back to normal.
   cpu_limiter_.StopLimiter();
@@ -912,6 +994,10 @@
   // reset the state that's only valid for a single update pass
   current_update_attempt_flags_ = UpdateAttemptFlags::kNone;
 
+  if (forced_update_pending_callback_.get())
+    // Clear prior interactive requests once the processor is done.
+    forced_update_pending_callback_->Run(false, false);
+
   if (status_ == UpdateStatus::REPORTING_ERROR_EVENT) {
     LOG(INFO) << "Error event sent.";
 
@@ -923,11 +1009,18 @@
       return;
     }
     LOG(INFO) << "Booted from FW B and tried to install new firmware, "
-        "so requesting reboot from user.";
+                 "so requesting reboot from user.";
   }
 
+  attempt_error_code_ = utils::GetBaseErrorCode(code);
+
   if (code == ErrorCode::kSuccess) {
-    WriteUpdateCompletedMarker();
+    // For install operation, we do not mark update complete since we do not
+    // need reboot.
+    if (!is_install_)
+      WriteUpdateCompletedMarker();
+    ReportTimeToUpdateAppliedMetric();
+
     prefs_->SetInt64(kPrefsDeltaUpdateFailures, 0);
     prefs_->SetString(kPrefsPreviousVersion,
                       omaha_request_params_->app_version());
@@ -946,26 +1039,39 @@
     // way.
     prefs_->Delete(kPrefsUpdateCheckCount);
     system_state_->payload_state()->SetScatteringWaitPeriod(TimeDelta());
+    system_state_->payload_state()->SetStagingWaitPeriod(TimeDelta());
     prefs_->Delete(kPrefsUpdateFirstSeenAt);
 
+    if (is_install_) {
+      LOG(INFO) << "DLC successfully installed, no reboot needed.";
+      SetStatusAndNotify(UpdateStatus::IDLE);
+      ScheduleUpdates();
+      return;
+    }
+
     SetStatusAndNotify(UpdateStatus::UPDATED_NEED_REBOOT);
     ScheduleUpdates();
     LOG(INFO) << "Update successfully applied, waiting to reboot.";
 
-    // This pointer is null during rollback operations, and the stats
-    // don't make much sense then anyway.
-    if (response_handler_action_) {
-      const InstallPlan& install_plan =
-          response_handler_action_->install_plan();
-
+    // |install_plan_| is null during rollback operations, and the stats don't
+    // make much sense then anyway.
+    if (install_plan_) {
       // Generate an unique payload identifier.
       string target_version_uid;
-      for (const auto& payload : install_plan.payloads) {
+      for (const auto& payload : install_plan_->payloads) {
         target_version_uid +=
             brillo::data_encoding::Base64Encode(payload.hash) + ":" +
             payload.metadata_signature + ":";
       }
 
+      // If we just downloaded a rollback image, we should preserve this fact
+      // over the following powerwash.
+      if (install_plan_->is_rollback) {
+        system_state_->payload_state()->SetRollbackHappened(true);
+        system_state_->metrics_reporter()->ReportEnterpriseRollbackMetrics(
+            /*success=*/true, install_plan_->version);
+      }
+
       // Expect to reboot into the new version to send the proper metric during
       // next boot.
       system_state_->payload_state()->ExpectRebootInNewVersion(
@@ -974,8 +1080,7 @@
       // If we just finished a rollback, then we expect to have no Omaha
       // response. Otherwise, it's an error.
       if (system_state_->payload_state()->GetRollbackVersion().empty()) {
-        LOG(ERROR) << "Can't send metrics because expected "
-            "response_handler_action_ missing.";
+        LOG(ERROR) << "Can't send metrics because there was no Omaha response";
       }
     }
     return;
@@ -993,9 +1098,11 @@
   // Reset cpu shares back to normal.
   cpu_limiter_.StopLimiter();
   download_progress_ = 0.0;
+  if (forced_update_pending_callback_.get())
+    // Clear prior interactive requests once the processor is done.
+    forced_update_pending_callback_->Run(false, false);
   SetStatusAndNotify(UpdateStatus::IDLE);
   ScheduleUpdates();
-  actions_.clear();
   error_event_.reset(nullptr);
 }
 
@@ -1028,9 +1135,23 @@
         consecutive_failed_update_checks_ = 0;
       }
 
+      const OmahaResponse& omaha_response =
+          omaha_request_action->GetOutputObject();
       // Store the server-dictated poll interval, if any.
       server_dictated_poll_interval_ =
-          std::max(0, omaha_request_action->GetOutputObject().poll_interval);
+          std::max(0, omaha_response.poll_interval);
+
+      // This update is ignored by omaha request action because update over
+      // cellular connection is not allowed. Needs to ask for user's permissions
+      // to update.
+      if (code == ErrorCode::kOmahaUpdateIgnoredOverCellular) {
+        new_version_ = omaha_response.version;
+        new_payload_size_ = 0;
+        for (const auto& package : omaha_response.packages) {
+          new_payload_size_ += package.size;
+        }
+        SetStatusAndNotify(UpdateStatus::NEED_PERMISSION_TO_UPDATE);
+      }
     }
   } else if (type == OmahaResponseHandlerAction::StaticType()) {
     // Depending on the returned error code, note that an update is available.
@@ -1041,13 +1162,15 @@
       // callback is invoked. This avoids notifying the user that a download
       // has started in cases when the server and the client are unable to
       // initiate the download.
-      CHECK(action == response_handler_action_.get());
-      auto plan = response_handler_action_->install_plan();
+      auto omaha_response_handler_action =
+          static_cast<OmahaResponseHandlerAction*>(action);
+      install_plan_.reset(
+          new InstallPlan(omaha_response_handler_action->install_plan()));
       UpdateLastCheckedTime();
-      new_version_ = plan.version;
-      new_system_version_ = plan.system_version;
+      new_version_ = install_plan_->version;
+      new_system_version_ = install_plan_->system_version;
       new_payload_size_ = 0;
-      for (const auto& payload : plan.payloads)
+      for (const auto& payload : install_plan_->payloads)
         new_payload_size_ += payload.size;
       cpu_limiter_.StartLimiter();
       SetStatusAndNotify(UpdateStatus::UPDATE_AVAILABLE);
@@ -1058,12 +1181,28 @@
     // If the current state is at or past the download phase, count the failure
     // in case a switch to full update becomes necessary. Ignore network
     // transfer timeouts and failures.
-    if (status_ >= UpdateStatus::DOWNLOADING &&
-        code != ErrorCode::kDownloadTransferError) {
-      MarkDeltaUpdateFailure();
+    if (code != ErrorCode::kDownloadTransferError) {
+      switch (status_) {
+        case UpdateStatus::IDLE:
+        case UpdateStatus::CHECKING_FOR_UPDATE:
+        case UpdateStatus::UPDATE_AVAILABLE:
+        case UpdateStatus::NEED_PERMISSION_TO_UPDATE:
+          break;
+        case UpdateStatus::DOWNLOADING:
+        case UpdateStatus::VERIFYING:
+        case UpdateStatus::FINALIZING:
+        case UpdateStatus::UPDATED_NEED_REBOOT:
+        case UpdateStatus::REPORTING_ERROR_EVENT:
+        case UpdateStatus::ATTEMPTING_ROLLBACK:
+        case UpdateStatus::DISABLED:
+          MarkDeltaUpdateFailure();
+          break;
+      }
     }
-    // On failure, schedule an error event to be sent to Omaha.
-    CreatePendingErrorEvent(action, code);
+    if (code != ErrorCode::kNoUpdate) {
+      // On failure, schedule an error event to be sent to Omaha.
+      CreatePendingErrorEvent(action, code);
+    }
     return;
   }
   // Find out which action completed (successfully).
@@ -1124,7 +1263,7 @@
       // no-op.
       return true;
 
-    case UpdateStatus::UPDATED_NEED_REBOOT:  {
+    case UpdateStatus::UPDATED_NEED_REBOOT: {
       bool ret_value = true;
       status_ = UpdateStatus::IDLE;
 
@@ -1142,7 +1281,7 @@
       // Mark the current slot as successful again, since marking it as active
       // may reset the successful bit. We ignore the result of whether marking
       // the current slot as successful worked.
-      if (!boot_control->MarkBootSuccessfulAsync(Bind([](bool successful){})))
+      if (!boot_control->MarkBootSuccessfulAsync(Bind([](bool successful) {})))
         ret_value = false;
 
       // Notify the PayloadState that the successful payload was canceled.
@@ -1176,38 +1315,6 @@
   return true;
 }
 
-void UpdateAttempter::UpdateBootFlags() {
-  if (update_boot_flags_running_) {
-    LOG(INFO) << "Update boot flags running, nothing to do.";
-    return;
-  }
-  if (updated_boot_flags_) {
-    LOG(INFO) << "Already updated boot flags. Skipping.";
-    if (start_action_processor_) {
-      ScheduleProcessingStart();
-    }
-    return;
-  }
-  // This is purely best effort. Failures should be logged by Subprocess. Run
-  // the script asynchronously to avoid blocking the event loop regardless of
-  // the script runtime.
-  update_boot_flags_running_ = true;
-  LOG(INFO) << "Marking booted slot as good.";
-  if (!system_state_->boot_control()->MarkBootSuccessfulAsync(Bind(
-          &UpdateAttempter::CompleteUpdateBootFlags, base::Unretained(this)))) {
-    LOG(ERROR) << "Failed to mark current boot as successful.";
-    CompleteUpdateBootFlags(false);
-  }
-}
-
-void UpdateAttempter::CompleteUpdateBootFlags(bool successful) {
-  update_boot_flags_running_ = false;
-  updated_boot_flags_ = true;
-  if (start_action_processor_) {
-    ScheduleProcessingStart();
-  }
-}
-
 void UpdateAttempter::BroadcastStatus() {
   UpdateEngineStatus broadcast_status;
   // Use common method for generating the current status.
@@ -1219,21 +1326,19 @@
   last_notify_time_ = TimeTicks::Now();
 }
 
-uint32_t UpdateAttempter::GetErrorCodeFlags()  {
+uint32_t UpdateAttempter::GetErrorCodeFlags() {
   uint32_t flags = 0;
 
   if (!system_state_->hardware()->IsNormalBootMode())
     flags |= static_cast<uint32_t>(ErrorCode::kDevModeFlag);
 
-  if (response_handler_action_.get() &&
-      response_handler_action_->install_plan().is_resume)
+  if (install_plan_ && install_plan_->is_resume)
     flags |= static_cast<uint32_t>(ErrorCode::kResumedFlag);
 
   if (!system_state_->hardware()->IsOfficialBuild())
     flags |= static_cast<uint32_t>(ErrorCode::kTestImageFlag);
 
-  if (omaha_request_params_->update_url() !=
-      constants::kOmahaDefaultProductionURL) {
+  if (!omaha_request_params_->IsUpdateUrlOfficial()) {
     flags |= static_cast<uint32_t>(ErrorCode::kTestOmahaUrlFlag);
   }
 
@@ -1263,22 +1368,12 @@
 
 void UpdateAttempter::CreatePendingErrorEvent(AbstractAction* action,
                                               ErrorCode code) {
-  if (error_event_.get()) {
+  if (error_event_.get() || status_ == UpdateStatus::REPORTING_ERROR_EVENT) {
     // This shouldn't really happen.
     LOG(WARNING) << "There's already an existing pending error event.";
     return;
   }
 
-  // For now assume that a generic Omaha response action failure means that
-  // there's no update so don't send an event. Also, double check that the
-  // failure has not occurred while sending an error event -- in which case
-  // don't schedule another. This shouldn't really happen but just in case...
-  if ((action->Type() == OmahaResponseHandlerAction::StaticType() &&
-       code == ErrorCode::kError) ||
-      status_ == UpdateStatus::REPORTING_ERROR_EVENT) {
-    return;
-  }
-
   // Classify the code to generate the appropriate result so that
   // the Borgmon charts show up the results correctly.
   // Do this before calling GetErrorCodeForAction which could potentially
@@ -1300,11 +1395,10 @@
   fake_update_success_ = code == ErrorCode::kPostinstallBootedFromFirmwareB;
 
   // Compute the final error code with all the bit flags to be sent to Omaha.
-  code = static_cast<ErrorCode>(
-      static_cast<uint32_t>(code) | GetErrorCodeFlags());
-  error_event_.reset(new OmahaEvent(OmahaEvent::kTypeUpdateComplete,
-                                    event_result,
-                                    code));
+  code =
+      static_cast<ErrorCode>(static_cast<uint32_t>(code) | GetErrorCodeFlags());
+  error_event_.reset(
+      new OmahaEvent(OmahaEvent::kTypeUpdateComplete, event_result, code));
 }
 
 bool UpdateAttempter::ScheduleErrorEventAction() {
@@ -1314,16 +1408,21 @@
   LOG(ERROR) << "Update failed.";
   system_state_->payload_state()->UpdateFailed(error_event_->error_code);
 
+  // Send metrics if it was a rollback.
+  if (install_plan_ && install_plan_->is_rollback) {
+    system_state_->metrics_reporter()->ReportEnterpriseRollbackMetrics(
+        /*success=*/false, install_plan_->version);
+  }
+
   // Send it to Omaha.
   LOG(INFO) << "Reporting the error event";
-  shared_ptr<OmahaRequestAction> error_event_action(
-      new OmahaRequestAction(system_state_,
-                             error_event_.release(),  // Pass ownership.
-                             std::make_unique<LibcurlHttpFetcher>(
-                                 GetProxyResolver(), system_state_->hardware()),
-                             false));
-  actions_.push_back(shared_ptr<AbstractAction>(error_event_action));
-  processor_->EnqueueAction(error_event_action.get());
+  auto error_event_action = std::make_unique<OmahaRequestAction>(
+      system_state_,
+      error_event_.release(),  // Pass ownership.
+      std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
+                                           system_state_->hardware()),
+      false);
+  processor_->EnqueueAction(std::move(error_event_action));
   SetStatusAndNotify(UpdateStatus::REPORTING_ERROR_EVENT);
   processor_->StartProcessing();
   return true;
@@ -1331,7 +1430,6 @@
 
 void UpdateAttempter::ScheduleProcessingStart() {
   LOG(INFO) << "Scheduling an action processor start.";
-  start_action_processor_ = false;
   MessageLoop::current()->PostTask(
       FROM_HERE,
       Bind([](ActionProcessor* processor) { processor->StartProcessing(); },
@@ -1361,15 +1459,14 @@
 
 void UpdateAttempter::PingOmaha() {
   if (!processor_->IsRunning()) {
-    shared_ptr<OmahaRequestAction> ping_action(new OmahaRequestAction(
+    auto ping_action = std::make_unique<OmahaRequestAction>(
         system_state_,
         nullptr,
         std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
                                              system_state_->hardware()),
-        true));
-    actions_.push_back(shared_ptr<OmahaRequestAction>(ping_action));
+        true);
     processor_->set_delegate(nullptr);
-    processor_->EnqueueAction(ping_action.get());
+    processor_->EnqueueAction(std::move(ping_action));
     // Call StartProcessing() synchronously here to avoid any race conditions
     // caused by multiple outstanding ping Omaha requests.  If we call
     // StartProcessing() asynchronously, the device can be suspended before we
@@ -1395,7 +1492,6 @@
   ScheduleUpdates();
 }
 
-
 bool UpdateAttempter::DecrementUpdateCheckCount() {
   int64_t update_check_count_value;
 
@@ -1427,7 +1523,7 @@
 
     // Write out the new value of update_check_count_value.
     if (prefs_->SetInt64(kPrefsUpdateCheckCount, update_check_count_value)) {
-      // We successfully wrote out te new value, so enable the
+      // We successfully wrote out the new value, so enable the
       // update check based wait.
       LOG(INFO) << "New update check count = " << update_check_count_value;
       return true;
@@ -1444,7 +1540,6 @@
   return false;
 }
 
-
 void UpdateAttempter::UpdateEngineStarted() {
   // If we just booted into a new update, keep the previous OS version
   // in case we rebooted because of a crash of the old version, so we
@@ -1505,7 +1600,7 @@
   return true;
 }
 
-bool UpdateAttempter::GetBootTimeAtUpdate(Time *out_boot_time) {
+bool UpdateAttempter::GetBootTimeAtUpdate(Time* out_boot_time) {
   // In case of an update_engine restart without a reboot, we stored the boot_id
   // when the update was completed by setting a pref, so we can check whether
   // the last update was on this boot or a previous one.
@@ -1537,7 +1632,7 @@
           waiting_for_scheduled_check_);
 }
 
-bool UpdateAttempter::IsAnyUpdateSourceAllowed() {
+bool UpdateAttempter::IsAnyUpdateSourceAllowed() const {
   // We allow updates from any source if either of these are true:
   //  * The device is running an unofficial (dev/test) image.
   //  * The debugd dev features are accessible (i.e. in devmode with no owner).
@@ -1558,4 +1653,26 @@
   return false;
 }
 
+void UpdateAttempter::ReportTimeToUpdateAppliedMetric() {
+  const policy::DevicePolicy* device_policy = system_state_->device_policy();
+  if (device_policy && device_policy->IsEnterpriseEnrolled()) {
+    vector<policy::DevicePolicy::WeeklyTimeInterval> parsed_intervals;
+    bool has_time_restrictions =
+        device_policy->GetDisallowedTimeIntervals(&parsed_intervals);
+
+    int64_t update_first_seen_at_int;
+    if (system_state_->prefs()->Exists(kPrefsUpdateFirstSeenAt)) {
+      if (system_state_->prefs()->GetInt64(kPrefsUpdateFirstSeenAt,
+                                           &update_first_seen_at_int)) {
+        TimeDelta update_delay =
+            system_state_->clock()->GetWallclockTime() -
+            Time::FromInternalValue(update_first_seen_at_int);
+        system_state_->metrics_reporter()
+            ->ReportEnterpriseUpdateSeenToDownloadDays(has_time_restrictions,
+                                                       update_delay.InDays());
+      }
+    }
+  }
+}
+
 }  // namespace chromeos_update_engine
diff --git a/update_attempter.h b/update_attempter.h
index 76e93a2..c27f8a4 100644
--- a/update_attempter.h
+++ b/update_attempter.h
@@ -36,14 +36,15 @@
 #include "update_engine/client_library/include/update_engine/update_status.h"
 #include "update_engine/common/action_processor.h"
 #include "update_engine/common/cpu_limiter.h"
+#include "update_engine/common/proxy_resolver.h"
 #include "update_engine/omaha_request_params.h"
 #include "update_engine/omaha_response_handler_action.h"
 #include "update_engine/payload_consumer/download_action.h"
 #include "update_engine/payload_consumer/postinstall_runner_action.h"
-#include "update_engine/proxy_resolver.h"
 #include "update_engine/service_observer_interface.h"
 #include "update_engine/system_state.h"
 #include "update_engine/update_manager/policy.h"
+#include "update_engine/update_manager/staging_utils.h"
 #include "update_engine/update_manager/update_manager.h"
 
 namespace policy {
@@ -52,8 +53,6 @@
 
 namespace chromeos_update_engine {
 
-class UpdateEngineAdaptor;
-
 class UpdateAttempter : public ActionProcessorDelegate,
                         public DownloadActionDelegate,
                         public CertificateChecker::Observer,
@@ -70,7 +69,8 @@
   void Init();
 
   // Initiates scheduling of update checks.
-  virtual void ScheduleUpdates();
+  // Returns true if update check is scheduled.
+  virtual bool ScheduleUpdates();
 
   // Checks for update and, if a newer version is available, attempts to update
   // the system. Non-empty |in_app_version| or |in_update_url| prevents
@@ -83,6 +83,7 @@
                       const std::string& omaha_url,
                       const std::string& target_channel,
                       const std::string& target_version_prefix,
+                      bool rollback_allowed,
                       bool obey_proxies,
                       bool interactive);
 
@@ -107,16 +108,6 @@
   // Returns the current status in the out param. Returns true on success.
   virtual bool GetStatus(update_engine::UpdateEngineStatus* out_status);
 
-  // Runs chromeos-setgoodkernel, whose responsibility it is to mark the
-  // currently booted partition has high priority/permanent/etc. The execution
-  // is asynchronous. On completion, the action processor may be started
-  // depending on the |start_action_processor_| field. Note that every update
-  // attempt goes through this method.
-  void UpdateBootFlags();
-
-  // Called when the boot flags have been updated.
-  void CompleteUpdateBootFlags(bool success);
-
   UpdateStatus status() const { return status_; }
 
   int http_response_code() const { return http_response_code_; }
@@ -132,7 +123,7 @@
   // Returns the update attempt flags that are in place for the current update
   // attempt.  These are cached at the start of an update attempt so that they
   // remain constant throughout the process.
-  virtual UpdateAttemptFlags GetCurrentUpdateAttemptFlags() {
+  virtual UpdateAttemptFlags GetCurrentUpdateAttemptFlags() const {
     return current_update_attempt_flags_;
   }
 
@@ -145,6 +136,10 @@
                               const std::string& omaha_url,
                               UpdateAttemptFlags flags);
 
+  // This is the version of CheckForUpdate called by AttemptInstall API.
+  virtual bool CheckForInstall(const std::vector<std::string>& dlc_module_ids,
+                               const std::string& omaha_url);
+
   // This is the internal entry point for going through a rollback. This will
   // attempt to run the postinstall on the non-active partition and set it as
   // the partition to boot from. If |powerwash| is True, perform a powerwash
@@ -160,7 +155,7 @@
   BootControlInterface::Slot GetRollbackSlot() const;
 
   // Initiates a reboot if the current state is
-  // UPDATED_NEED_REBOOT. Returns true on sucess, false otherwise.
+  // UPDATED_NEED_REBOOT. Returns true on success, false otherwise.
   bool RebootIfNeeded();
 
   // DownloadActionDelegate methods:
@@ -177,9 +172,7 @@
   // Broadcasts the current status to all observers.
   void BroadcastStatus();
 
-  // Returns the special flags to be added to ErrorCode values based on the
-  // parameters used in the current update attempt.
-  uint32_t GetErrorCodeFlags();
+  ErrorCode GetAttemptErrorCode() const { return attempt_error_code_; }
 
   // Called at update_engine startup to do various house-keeping.
   void UpdateEngineStarted();
@@ -193,12 +186,12 @@
   // Stores in |out_boot_time| the boottime (CLOCK_BOOTTIME) recorded at the
   // time of the last successful update in the current boot. Returns false if
   // there wasn't a successful update in the current boot.
-  virtual bool GetBootTimeAtUpdate(base::Time *out_boot_time);
+  virtual bool GetBootTimeAtUpdate(base::Time* out_boot_time);
 
   // Returns a version OS version that was being used before the last reboot,
-  // and if that reboot happended to be into an update (current version).
+  // and if that reboot happened to be into an update (current version).
   // This will return an empty string otherwise.
-  std::string const& GetPrevVersion() const { return prev_version_; }
+  const std::string& GetPrevVersion() const { return prev_version_; }
 
   // Returns the number of consecutive failed update checks.
   virtual unsigned int consecutive_failed_update_checks() const {
@@ -218,8 +211,7 @@
   // Note that only one callback can be set, so effectively at most one client
   // can be notified.
   virtual void set_forced_update_pending_callback(
-      base::Callback<void(bool, bool)>*  // NOLINT(readability/function)
-      callback) {
+      base::Callback<void(bool, bool)>* callback) {
     forced_update_pending_callback_.reset(callback);
   }
 
@@ -227,7 +219,7 @@
   // we want to restrict updates to known safe sources, but under certain
   // conditions it's useful to allow updating from anywhere (e.g. to allow
   // 'cros flash' to function properly).
-  virtual bool IsAnyUpdateSourceAllowed();
+  bool IsAnyUpdateSourceAllowed() const;
 
   // Add and remove a service observer.
   void AddObserver(ServiceObserverInterface* observer) {
@@ -245,9 +237,6 @@
   void ClearObservers() { service_observers_.clear(); }
 
  private:
-  // Update server URL for automated lab test.
-  static const char* const kTestUpdateUrl;
-
   // Friend declarations for testing purposes.
   friend class UpdateAttempterUnderTest;
   friend class UpdateAttempterTest;
@@ -257,20 +246,36 @@
   FRIEND_TEST(UpdateAttempterTest, BootTimeInUpdateMarkerFile);
   FRIEND_TEST(UpdateAttempterTest, BroadcastCompleteDownloadTest);
   FRIEND_TEST(UpdateAttempterTest, ChangeToDownloadingOnReceivedBytesTest);
+  FRIEND_TEST(UpdateAttempterTest, CheckForUpdateAUDlcTest);
   FRIEND_TEST(UpdateAttempterTest, CreatePendingErrorEventTest);
   FRIEND_TEST(UpdateAttempterTest, CreatePendingErrorEventResumedTest);
   FRIEND_TEST(UpdateAttempterTest, DisableDeltaUpdateIfNeededTest);
   FRIEND_TEST(UpdateAttempterTest, DownloadProgressAccumulationTest);
+  FRIEND_TEST(UpdateAttempterTest, InstallSetsStatusIdle);
   FRIEND_TEST(UpdateAttempterTest, MarkDeltaUpdateFailureTest);
   FRIEND_TEST(UpdateAttempterTest, PingOmahaTest);
   FRIEND_TEST(UpdateAttempterTest, ReportDailyMetrics);
+  FRIEND_TEST(UpdateAttempterTest, RollbackNotAllowed);
+  FRIEND_TEST(UpdateAttempterTest, RollbackAfterInstall);
+  FRIEND_TEST(UpdateAttempterTest, RollbackAllowed);
+  FRIEND_TEST(UpdateAttempterTest, RollbackAllowedSetAndReset);
+  FRIEND_TEST(UpdateAttempterTest, RollbackMetricsNotRollbackFailure);
+  FRIEND_TEST(UpdateAttempterTest, RollbackMetricsNotRollbackSuccess);
+  FRIEND_TEST(UpdateAttempterTest, RollbackMetricsRollbackFailure);
+  FRIEND_TEST(UpdateAttempterTest, RollbackMetricsRollbackSuccess);
   FRIEND_TEST(UpdateAttempterTest, ScheduleErrorEventActionNoEventTest);
   FRIEND_TEST(UpdateAttempterTest, ScheduleErrorEventActionTest);
+  FRIEND_TEST(UpdateAttempterTest, SetRollbackHappenedNotRollback);
+  FRIEND_TEST(UpdateAttempterTest, SetRollbackHappenedRollback);
   FRIEND_TEST(UpdateAttempterTest, TargetVersionPrefixSetAndReset);
+  FRIEND_TEST(UpdateAttempterTest, UpdateAfterInstall);
   FRIEND_TEST(UpdateAttempterTest, UpdateAttemptFlagsCachedAtUpdateStart);
   FRIEND_TEST(UpdateAttempterTest, UpdateDeferredByPolicyTest);
   FRIEND_TEST(UpdateAttempterTest, UpdateIsNotRunningWhenUpdateAvailable);
-  FRIEND_TEST(UpdateAttempterTest, UpdateTest);
+
+  // Returns the special flags to be added to ErrorCode values based on the
+  // parameters used in the current update attempt.
+  uint32_t GetErrorCodeFlags();
 
   // CertificateChecker::Observer method.
   // Report metrics about the certificate being checked.
@@ -313,12 +318,10 @@
 
   ProxyResolver* GetProxyResolver() {
 #if USE_CHROME_NETWORK_PROXY
-    return obeying_proxies_ ?
-        reinterpret_cast<ProxyResolver*>(&chrome_proxy_resolver_) :
-        reinterpret_cast<ProxyResolver*>(&direct_proxy_resolver_);
-#else
-    return &direct_proxy_resolver_;
+    if (obeying_proxies_)
+      return &chrome_proxy_resolver_;
 #endif  // USE_CHROME_NETWORK_PROXY
+    return &direct_proxy_resolver_;
   }
 
   // Sends a ping to Omaha.
@@ -330,11 +333,12 @@
 
   // Helper method of Update() to calculate the update-related parameters
   // from various sources and set the appropriate state. Please refer to
-  // Update() method for the meaning of the parametes.
+  // Update() method for the meaning of the parameters.
   bool CalculateUpdateParams(const std::string& app_version,
                              const std::string& omaha_url,
                              const std::string& target_channel,
                              const std::string& target_version_prefix,
+                             bool rollback_allowed,
                              bool obey_proxies,
                              bool interactive);
 
@@ -342,19 +346,13 @@
   // which type of scattering is enabled, etc.) and also updates/deletes
   // the corresponding prefs file used in scattering. Should be called
   // only after the device policy has been loaded and set in the system_state_.
-  void CalculateScatteringParams(bool is_interactive);
+  void CalculateScatteringParams(bool interactive);
 
   // Sets a random value for the waiting period to wait for before downloading
   // an update, if one available. This value will be upperbounded by the
   // scatter factor value specified from policy.
   void GenerateNewWaitingPeriod();
 
-  // Helper method of Update() and Rollback() to construct the sequence of
-  // actions to be performed for the postinstall.
-  // |previous_action| is the previous action to get
-  // bonded with the install_plan that gets passed to postinstall.
-  void BuildPostInstallActions(InstallPlanAction* previous_action);
-
   // Helper method of Update() to construct the sequence of actions to
   // be performed for an update check. Please refer to
   // Update() method for the meaning of the parameters.
@@ -396,15 +394,31 @@
   // Updates the time an update was last attempted to the current time.
   void UpdateLastCheckedTime();
 
+  // Checks whether we need to clear the rollback-happened preference after
+  // policy is available again.
+  void UpdateRollbackHappened();
+
   // Returns whether an update is currently running or scheduled.
   bool IsUpdateRunningOrScheduled();
 
+  void CalculateStagingParams(bool interactive);
+
+  // Reports a metric that tracks the time from when the update was first seen
+  // to the time when the update was finally downloaded and applied. This metric
+  // will only be reported for enterprise enrolled devices.
+  void ReportTimeToUpdateAppliedMetric();
+
   // Last status notification timestamp used for throttling. Use monotonic
   // TimeTicks to ensure that notifications are sent even if the system clock is
   // set back in the middle of an update.
   base::TimeTicks last_notify_time_;
 
-  std::vector<std::shared_ptr<AbstractAction>> actions_;
+  // Our two proxy resolvers
+  DirectProxyResolver direct_proxy_resolver_;
+#if USE_CHROME_NETWORK_PROXY
+  ChromeBrowserProxyResolver chrome_proxy_resolver_;
+#endif  // USE_CHROME_NETWORK_PROXY
+
   std::unique_ptr<ActionProcessor> processor_;
 
   // External state of the system outside the update_engine process
@@ -417,11 +431,8 @@
   // The list of services observing changes in the updater.
   std::set<ServiceObserverInterface*> service_observers_;
 
-  // Pointer to the OmahaResponseHandlerAction in the actions_ vector.
-  std::shared_ptr<OmahaResponseHandlerAction> response_handler_action_;
-
-  // Pointer to the DownloadAction in the actions_ vector.
-  std::shared_ptr<DownloadAction> download_action_;
+  // The install plan.
+  std::unique_ptr<InstallPlan> install_plan_;
 
   // Pointer to the preferences store interface. This is just a cached
   // copy of system_state->prefs() because it's used in many methods and
@@ -437,6 +448,9 @@
   // HTTP server response code from the last HTTP request action.
   int http_response_code_ = 0;
 
+  // The attempt error code when the update attempt finished.
+  ErrorCode attempt_error_code_ = ErrorCode::kSuccess;
+
   // CPU limiter during the update.
   CPULimiter cpu_limiter_;
 
@@ -464,26 +478,6 @@
   // If true, this update cycle we are obeying proxies
   bool obeying_proxies_ = true;
 
-  // Our two proxy resolvers
-  DirectProxyResolver direct_proxy_resolver_;
-#if USE_CHROME_NETWORK_PROXY
-  ChromeBrowserProxyResolver chrome_proxy_resolver_;
-#endif  // USE_CHROME_NETWORK_PROXY
-
-  // Originally, both of these flags are false. Once UpdateBootFlags is called,
-  // |update_boot_flags_running_| is set to true. As soon as UpdateBootFlags
-  // completes its asynchronous run, |update_boot_flags_running_| is reset to
-  // false and |updated_boot_flags_| is set to true. From that point on there
-  // will be no more changes to these flags.
-  //
-  // True if UpdateBootFlags has completed.
-  bool updated_boot_flags_ = false;
-  // True if UpdateBootFlags is running.
-  bool update_boot_flags_running_ = false;
-
-  // True if the action processor needs to be started by the boot flag updater.
-  bool start_action_processor_ = false;
-
   // Used for fetching information about the device policy.
   std::unique_ptr<policy::PolicyProvider> policy_provider_;
 
@@ -514,9 +508,26 @@
   std::string forced_app_version_;
   std::string forced_omaha_url_;
 
+  // A list of DLC module IDs.
+  std::vector<std::string> dlc_module_ids_;
+  // Whether the operation is install (write to the current slot not the
+  // inactive slot).
+  bool is_install_;
+
+  // If this is not TimeDelta(), then that means staging is turned on.
+  base::TimeDelta staging_wait_time_;
+  chromeos_update_manager::StagingSchedule staging_schedule_;
+
   DISALLOW_COPY_AND_ASSIGN(UpdateAttempter);
 };
 
+// Turns a generic ErrorCode::kError to a generic error code specific
+// to |action| (e.g., ErrorCode::kFilesystemVerifierError). If |code| is
+// not ErrorCode::kError, or the action is not matched, returns |code|
+// unchanged.
+
+ErrorCode GetErrorCodeForAction(AbstractAction* action, ErrorCode code);
+
 }  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_UPDATE_ATTEMPTER_H_
diff --git a/update_attempter_android.cc b/update_attempter_android.cc
index 406e40a..1cc8505 100644
--- a/update_attempter_android.cc
+++ b/update_attempter_android.cc
@@ -25,7 +25,6 @@
 #include <base/bind.h>
 #include <base/logging.h>
 #include <base/strings/string_number_conversions.h>
-#include <brillo/bind_lambda.h>
 #include <brillo/data_encoding.h>
 #include <brillo/message_loops/message_loop.h>
 #include <brillo/strings/string_utils.h>
@@ -47,6 +46,7 @@
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/payload_metadata.h"
 #include "update_engine/payload_consumer/postinstall_runner_action.h"
+#include "update_engine/update_boot_flags_action.h"
 #include "update_engine/update_status_utils.h"
 
 #ifndef _UE_SIDELOAD
@@ -79,7 +79,7 @@
 
 // Log and set the error on the passed ErrorPtr.
 bool LogAndSetError(brillo::ErrorPtr* error,
-                    const tracked_objects::Location& location,
+                    const base::Location& location,
                     const string& reason) {
   brillo::Error::AddTo(error, location, kErrorDomain, kGenericError, reason);
   LOG(ERROR) << "Replying with failure: " << location.ToString() << ": "
@@ -138,7 +138,7 @@
     return LogAndSetError(
         error, FROM_HERE, "An update already applied, waiting for reboot");
   }
-  if (ongoing_update_) {
+  if (processor_->IsRunning()) {
     return LogAndSetError(
         error, FROM_HERE, "Already processing an update, cancel it first.");
   }
@@ -219,13 +219,24 @@
   // c) RUN_POST_INSTALL is set to 0.
   if (install_plan_.is_resume && prefs_->Exists(kPrefsPostInstallSucceeded)) {
     bool post_install_succeeded = false;
-    prefs_->GetBoolean(kPrefsPostInstallSucceeded, &post_install_succeeded);
-    if (post_install_succeeded) {
+    if (prefs_->GetBoolean(kPrefsPostInstallSucceeded,
+                           &post_install_succeeded) &&
+        post_install_succeeded) {
       install_plan_.run_post_install =
           GetHeaderAsBool(headers[kPayloadPropertyRunPostInstall], true);
     }
   }
 
+  // Skip writing verity if we're resuming and verity has already been written.
+  install_plan_.write_verity = true;
+  if (install_plan_.is_resume && prefs_->Exists(kPrefsVerityWritten)) {
+    bool verity_written = false;
+    if (prefs_->GetBoolean(kPrefsVerityWritten, &verity_written) &&
+        verity_written) {
+      install_plan_.write_verity = false;
+    }
+  }
+
   NetworkId network_id = kDefaultNetworkId;
   if (!headers[kPayloadPropertyNetworkId].empty()) {
     if (!base::StringToUint64(headers[kPayloadPropertyNetworkId],
@@ -246,43 +257,53 @@
   LOG(INFO) << "Using this install plan:";
   install_plan_.Dump();
 
-  BuildUpdateActions(payload_url);
+  HttpFetcher* fetcher = nullptr;
+  if (FileFetcher::SupportedUrl(payload_url)) {
+    DLOG(INFO) << "Using FileFetcher for file URL.";
+    fetcher = new FileFetcher();
+  } else {
+#ifdef _UE_SIDELOAD
+    LOG(FATAL) << "Unsupported sideload URI: " << payload_url;
+#else
+    LibcurlHttpFetcher* libcurl_fetcher =
+        new LibcurlHttpFetcher(&proxy_resolver_, hardware_);
+    libcurl_fetcher->set_server_to_check(ServerToCheck::kDownload);
+    fetcher = libcurl_fetcher;
+#endif  // _UE_SIDELOAD
+  }
   // Setup extra headers.
-  HttpFetcher* fetcher = download_action_->http_fetcher();
   if (!headers[kPayloadPropertyAuthorization].empty())
     fetcher->SetHeader("Authorization", headers[kPayloadPropertyAuthorization]);
   if (!headers[kPayloadPropertyUserAgent].empty())
     fetcher->SetHeader("User-Agent", headers[kPayloadPropertyUserAgent]);
 
-  SetStatusAndNotify(UpdateStatus::UPDATE_AVAILABLE);
-  ongoing_update_ = true;
+  BuildUpdateActions(fetcher);
 
-  // Just in case we didn't update boot flags yet, make sure they're updated
-  // before any update processing starts. This will start the update process.
-  UpdateBootFlags();
+  SetStatusAndNotify(UpdateStatus::UPDATE_AVAILABLE);
 
   UpdatePrefsOnUpdateStart(install_plan_.is_resume);
   // TODO(xunchang) report the metrics for unresumable updates
 
+  ScheduleProcessingStart();
   return true;
 }
 
 bool UpdateAttempterAndroid::SuspendUpdate(brillo::ErrorPtr* error) {
-  if (!ongoing_update_)
+  if (!processor_->IsRunning())
     return LogAndSetError(error, FROM_HERE, "No ongoing update to suspend.");
   processor_->SuspendProcessing();
   return true;
 }
 
 bool UpdateAttempterAndroid::ResumeUpdate(brillo::ErrorPtr* error) {
-  if (!ongoing_update_)
+  if (!processor_->IsRunning())
     return LogAndSetError(error, FROM_HERE, "No ongoing update to resume.");
   processor_->ResumeProcessing();
   return true;
 }
 
 bool UpdateAttempterAndroid::CancelUpdate(brillo::ErrorPtr* error) {
-  if (!ongoing_update_)
+  if (!processor_->IsRunning())
     return LogAndSetError(error, FROM_HERE, "No ongoing update to cancel.");
   processor_->StopProcessing();
   return true;
@@ -296,7 +317,7 @@
     case UpdateStatus::IDLE:
       return true;
 
-    case UpdateStatus::UPDATED_NEED_REBOOT:  {
+    case UpdateStatus::UPDATED_NEED_REBOOT: {
       // Remove the reboot marker so that if the machine is rebooted
       // after resetting to idle state, it doesn't go back to
       // UpdateStatus::UPDATED_NEED_REBOOT state.
@@ -310,14 +331,12 @@
       // Mark the current slot as successful again, since marking it as active
       // may reset the successful bit. We ignore the result of whether marking
       // the current slot as successful worked.
-      if (!boot_control_->MarkBootSuccessfulAsync(Bind([](bool successful){})))
+      if (!boot_control_->MarkBootSuccessfulAsync(Bind([](bool successful) {})))
         ret_value = false;
 
       if (!ret_value) {
         return LogAndSetError(
-            error,
-            FROM_HERE,
-            "Failed to reset the status to ");
+            error, FROM_HERE, "Failed to reset the status to ");
       }
 
       SetStatusAndNotify(UpdateStatus::IDLE);
@@ -349,8 +368,7 @@
   }
   ErrorCode errorcode;
   PayloadMetadata payload_metadata;
-  if (payload_metadata.ParsePayloadHeader(
-          metadata, kBrilloMajorPayloadVersion, &errorcode) !=
+  if (payload_metadata.ParsePayloadHeader(metadata, &errorcode) !=
       MetadataParseResult::kSuccess) {
     return LogAndSetError(error,
                           FROM_HERE,
@@ -376,8 +394,13 @@
         "Failed to read metadata and signature from " + metadata_filename);
   }
   fd->Close();
-  errorcode = payload_metadata.ValidateMetadataSignature(
-      metadata, "", base::FilePath(constants::kUpdatePayloadPublicKeyPath));
+
+  string public_key;
+  if (!utils::ReadFile(constants::kUpdatePayloadPublicKeyPath, &public_key)) {
+    return LogAndSetError(error, FROM_HERE, "Failed to read public key.");
+  }
+  errorcode =
+      payload_metadata.ValidateMetadataSignature(metadata, "", public_key);
   if (errorcode != ErrorCode::kSuccess) {
     return LogAndSetError(error,
                           FROM_HERE,
@@ -488,6 +511,8 @@
   }
   if (type == DownloadAction::StaticType()) {
     SetStatusAndNotify(UpdateStatus::FINALIZING);
+  } else if (type == FilesystemVerifierAction::StaticType()) {
+    prefs_->SetBoolean(kPrefsVerityWritten, true);
   }
 }
 
@@ -537,27 +562,6 @@
   }
 }
 
-void UpdateAttempterAndroid::UpdateBootFlags() {
-  if (updated_boot_flags_) {
-    LOG(INFO) << "Already updated boot flags. Skipping.";
-    CompleteUpdateBootFlags(true);
-    return;
-  }
-  // This is purely best effort.
-  LOG(INFO) << "Marking booted slot as good.";
-  if (!boot_control_->MarkBootSuccessfulAsync(
-          Bind(&UpdateAttempterAndroid::CompleteUpdateBootFlags,
-               base::Unretained(this)))) {
-    LOG(ERROR) << "Failed to mark current boot as successful.";
-    CompleteUpdateBootFlags(false);
-  }
-}
-
-void UpdateAttempterAndroid::CompleteUpdateBootFlags(bool successful) {
-  updated_boot_flags_ = true;
-  ScheduleProcessingStart();
-}
-
 void UpdateAttempterAndroid::ScheduleProcessingStart() {
   LOG(INFO) << "Scheduling an action processor start.";
   brillo::MessageLoop::current()->PostTask(
@@ -572,13 +576,13 @@
     return;
   }
 
+  boot_control_->Cleanup();
+
   download_progress_ = 0;
-  actions_.clear();
   UpdateStatus new_status =
       (error_code == ErrorCode::kSuccess ? UpdateStatus::UPDATED_NEED_REBOOT
                                          : UpdateStatus::IDLE);
   SetStatusAndNotify(new_status);
-  ongoing_update_ = false;
 
   // The network id is only applicable to one download attempt and once it's
   // done the network id should not be re-used anymore.
@@ -592,6 +596,9 @@
   CollectAndReportUpdateMetricsOnUpdateFinished(error_code);
   ClearMetricsPrefs();
   if (error_code == ErrorCode::kSuccess) {
+    // We should only reset the PayloadAttemptNumber if the update succeeds, or
+    // we switch to a different payload.
+    prefs_->Delete(kPrefsPayloadAttemptNumber);
     metrics_utils::SetSystemUpdatedMarker(clock_.get(), prefs_);
     // Clear the total bytes downloaded if and only if the update succeeds.
     prefs_->SetInt64(kPrefsTotalBytesDownloaded, 0);
@@ -612,51 +619,29 @@
   last_notify_time_ = TimeTicks::Now();
 }
 
-void UpdateAttempterAndroid::BuildUpdateActions(const string& url) {
+void UpdateAttempterAndroid::BuildUpdateActions(HttpFetcher* fetcher) {
   CHECK(!processor_->IsRunning());
   processor_->set_delegate(this);
 
   // Actions:
-  shared_ptr<InstallPlanAction> install_plan_action(
-      new InstallPlanAction(install_plan_));
-
-  HttpFetcher* download_fetcher = nullptr;
-  if (FileFetcher::SupportedUrl(url)) {
-    DLOG(INFO) << "Using FileFetcher for file URL.";
-    download_fetcher = new FileFetcher();
-  } else {
-#ifdef _UE_SIDELOAD
-    LOG(FATAL) << "Unsupported sideload URI: " << url;
-#else
-    LibcurlHttpFetcher* libcurl_fetcher =
-        new LibcurlHttpFetcher(&proxy_resolver_, hardware_);
-    libcurl_fetcher->set_server_to_check(ServerToCheck::kDownload);
-    download_fetcher = libcurl_fetcher;
-#endif  // _UE_SIDELOAD
-  }
-  shared_ptr<DownloadAction> download_action(
-      new DownloadAction(prefs_,
-                         boot_control_,
-                         hardware_,
-                         nullptr,           // system_state, not used.
-                         download_fetcher,  // passes ownership
-                         true /* is_interactive */));
-  shared_ptr<FilesystemVerifierAction> filesystem_verifier_action(
-      new FilesystemVerifierAction());
-
-  shared_ptr<PostinstallRunnerAction> postinstall_runner_action(
-      new PostinstallRunnerAction(boot_control_, hardware_));
-
+  auto update_boot_flags_action =
+      std::make_unique<UpdateBootFlagsAction>(boot_control_);
+  auto install_plan_action = std::make_unique<InstallPlanAction>(install_plan_);
+  auto download_action =
+      std::make_unique<DownloadAction>(prefs_,
+                                       boot_control_,
+                                       hardware_,
+                                       nullptr,  // system_state, not used.
+                                       fetcher,  // passes ownership
+                                       true /* interactive */);
   download_action->set_delegate(this);
   download_action->set_base_offset(base_offset_);
-  download_action_ = download_action;
+  auto filesystem_verifier_action =
+      std::make_unique<FilesystemVerifierAction>();
+  auto postinstall_runner_action =
+      std::make_unique<PostinstallRunnerAction>(boot_control_, hardware_);
   postinstall_runner_action->set_delegate(this);
 
-  actions_.push_back(shared_ptr<AbstractAction>(install_plan_action));
-  actions_.push_back(shared_ptr<AbstractAction>(download_action));
-  actions_.push_back(shared_ptr<AbstractAction>(filesystem_verifier_action));
-  actions_.push_back(shared_ptr<AbstractAction>(postinstall_runner_action));
-
   // Bond them together. We have to use the leaf-types when calling
   // BondActions().
   BondActions(install_plan_action.get(), download_action.get());
@@ -664,9 +649,11 @@
   BondActions(filesystem_verifier_action.get(),
               postinstall_runner_action.get());
 
-  // Enqueue the actions.
-  for (const shared_ptr<AbstractAction>& action : actions_)
-    processor_->EnqueueAction(action.get());
+  processor_->EnqueueAction(std::move(update_boot_flags_action));
+  processor_->EnqueueAction(std::move(install_plan_action));
+  processor_->EnqueueAction(std::move(download_action));
+  processor_->EnqueueAction(std::move(filesystem_verifier_action));
+  processor_->EnqueueAction(std::move(postinstall_runner_action));
 }
 
 bool UpdateAttempterAndroid::WriteUpdateCompletedMarker() {
@@ -705,10 +692,12 @@
 
   metrics::AttemptResult attempt_result =
       metrics_utils::GetAttemptResult(error_code);
-  Time attempt_start_time = Time::FromInternalValue(
+  Time boot_time_start = Time::FromInternalValue(
+      metrics_utils::GetPersistedValue(kPrefsUpdateBootTimestampStart, prefs_));
+  Time monotonic_time_start = Time::FromInternalValue(
       metrics_utils::GetPersistedValue(kPrefsUpdateTimestampStart, prefs_));
-  TimeDelta duration = clock_->GetBootTime() - attempt_start_time;
-  TimeDelta duration_uptime = clock_->GetMonotonicTime() - attempt_start_time;
+  TimeDelta duration = clock_->GetBootTime() - boot_time_start;
+  TimeDelta duration_uptime = clock_->GetMonotonicTime() - monotonic_time_start;
 
   metrics_reporter_->ReportUpdateAttemptMetrics(
       nullptr,  // system_state
@@ -758,6 +747,7 @@
         num_bytes_downloaded,
         download_overhead_percentage,
         duration,
+        duration_uptime,
         static_cast<int>(reboot_count),
         0);  // url_switch_count
   }
@@ -813,6 +803,11 @@
   metrics_utils::LoadAndReportTimeToReboot(
       metrics_reporter_.get(), prefs_, clock_.get());
   ClearMetricsPrefs();
+
+  // Also reset the update progress if the build version has changed.
+  if (!DeltaPerformer::ResetUpdateProgress(prefs_, false)) {
+    LOG(WARNING) << "Unable to reset the update progress.";
+  }
 }
 
 // Save the update start time. Reset the reboot count and attempt number if the
@@ -826,17 +821,17 @@
         metrics_utils::GetPersistedValue(kPrefsPayloadAttemptNumber, prefs_);
     metrics_utils::SetPayloadAttemptNumber(attempt_number + 1, prefs_);
   }
-  Time update_start_time = clock_->GetMonotonicTime();
-  metrics_utils::SetUpdateTimestampStart(update_start_time, prefs_);
+  metrics_utils::SetUpdateTimestampStart(clock_->GetMonotonicTime(), prefs_);
+  metrics_utils::SetUpdateBootTimestampStart(clock_->GetBootTime(), prefs_);
 }
 
 void UpdateAttempterAndroid::ClearMetricsPrefs() {
   CHECK(prefs_);
   prefs_->Delete(kPrefsCurrentBytesDownloaded);
   prefs_->Delete(kPrefsNumReboots);
-  prefs_->Delete(kPrefsPayloadAttemptNumber);
   prefs_->Delete(kPrefsSystemUpdatedMarker);
   prefs_->Delete(kPrefsUpdateTimestampStart);
+  prefs_->Delete(kPrefsUpdateBootTimestampStart);
 }
 
 }  // namespace chromeos_update_engine
diff --git a/update_attempter_android.h b/update_attempter_android.h
index f00692e..c4710ad 100644
--- a/update_attempter_android.h
+++ b/update_attempter_android.h
@@ -93,14 +93,6 @@
  private:
   friend class UpdateAttempterAndroidTest;
 
-  // Asynchronously marks the current slot as successful if needed. If already
-  // marked as good, CompleteUpdateBootFlags() is called starting the action
-  // processor.
-  void UpdateBootFlags();
-
-  // Called when the boot flags have been updated.
-  void CompleteUpdateBootFlags(bool success);
-
   // Schedules an event loop callback to start the action processor. This is
   // scheduled asynchronously to unblock the event loop.
   void ScheduleProcessingStart();
@@ -114,8 +106,9 @@
   void SetStatusAndNotify(UpdateStatus status);
 
   // Helper method to construct the sequence of actions to be performed for
-  // applying an update from the given |url|.
-  void BuildUpdateActions(const std::string& url);
+  // applying an update using a given HttpFetcher. The ownership of |fetcher| is
+  // passed to this function.
+  void BuildUpdateActions(HttpFetcher* fetcher);
 
   // Writes to the processing completed marker. Does nothing if
   // |update_completed_marker_| is empty.
@@ -129,7 +122,10 @@
   // payload_id.
   // |KprefsNumReboots|: number of reboots when applying the current update.
   // |kPrefsSystemUpdatedMarker|: end timestamp of the last successful update.
-  // |kPrefsUpdateTimestampStart|: start timestamp of the current update.
+  // |kPrefsUpdateTimestampStart|: start timestamp in monotonic time of the
+  // current update.
+  // |kPrefsUpdateBootTimestampStart|: start timestamp in boot time of
+  // the current update.
   // |kPrefsCurrentBytesDownloaded|: number of bytes downloaded for the current
   // payload_id.
   // |kPrefsTotalBytesDownloaded|: number of bytes downloaded in total since
@@ -150,13 +146,14 @@
   void UpdatePrefsAndReportUpdateMetricsOnReboot();
 
   // Prefs to update:
-  //   |kPrefsPayloadAttemptNumber|, |kPrefsUpdateTimestampStart|
+  //   |kPrefsPayloadAttemptNumber|, |kPrefsUpdateTimestampStart|,
+  //   |kPrefsUpdateBootTimestampStart|
   void UpdatePrefsOnUpdateStart(bool is_resume);
 
   // Prefs to delete:
-  //   |kPrefsNumReboots|, |kPrefsPayloadAttemptNumber|,
+  //   |kPrefsNumReboots|, |kPrefsCurrentBytesDownloaded|
   //   |kPrefsSystemUpdatedMarker|, |kPrefsUpdateTimestampStart|,
-  //   |kPrefsCurrentBytesDownloaded|
+  //   |kPrefsUpdateBootTimestampStart|
   void ClearMetricsPrefs();
 
   DaemonStateInterface* daemon_state_;
@@ -171,19 +168,12 @@
   // set back in the middle of an update.
   base::TimeTicks last_notify_time_;
 
-  // The list of actions and action processor that runs them asynchronously.
-  // Only used when |ongoing_update_| is true.
-  std::vector<std::shared_ptr<AbstractAction>> actions_;
+  // Only direct proxy supported.
+  DirectProxyResolver proxy_resolver_;
+
+  // The processor for running Actions.
   std::unique_ptr<ActionProcessor> processor_;
 
-  // Pointer to the DownloadAction in the actions_ vector.
-  std::shared_ptr<DownloadAction> download_action_;
-
-  // Whether there is an ongoing update. This implies that an update was started
-  // but not finished yet. This value will be true even if the update was
-  // suspended.
-  bool ongoing_update_{false};
-
   // The InstallPlan used during the ongoing update.
   InstallPlan install_plan_;
 
@@ -194,16 +184,9 @@
   // The offset in the payload file where the CrAU part starts.
   int64_t base_offset_{0};
 
-  // Only direct proxy supported.
-  DirectProxyResolver proxy_resolver_;
-
   // Helper class to select the network to use during the update.
   std::unique_ptr<NetworkSelectorInterface> network_selector_;
 
-  // Whether we have marked the current slot as good. This step is required
-  // before applying an update to the other slot.
-  bool updated_boot_flags_ = false;
-
   std::unique_ptr<ClockInterface> clock_;
 
   std::unique_ptr<MetricsReporterInterface> metrics_reporter_;
diff --git a/update_attempter_android_unittest.cc b/update_attempter_android_unittest.cc
index 94452df..3be0b7e 100644
--- a/update_attempter_android_unittest.cc
+++ b/update_attempter_android_unittest.cc
@@ -111,22 +111,24 @@
   update_attempter_android_.Init();
   // Check that we reset the metric prefs.
   EXPECT_FALSE(prefs_.Exists(kPrefsNumReboots));
-  EXPECT_FALSE(prefs_.Exists(kPrefsPayloadAttemptNumber));
   EXPECT_FALSE(prefs_.Exists(kPrefsUpdateTimestampStart));
   EXPECT_FALSE(prefs_.Exists(kPrefsSystemUpdatedMarker));
+  // PayloadAttemptNumber should persist across reboots.
+  EXPECT_TRUE(prefs_.Exists(kPrefsPayloadAttemptNumber));
 }
 
 TEST_F(UpdateAttempterAndroidTest, ReportMetricsOnUpdateTerminated) {
   prefs_.SetInt64(kPrefsNumReboots, 3);
   prefs_.SetInt64(kPrefsPayloadAttemptNumber, 2);
   prefs_.SetString(kPrefsPreviousVersion, "56789");
+  prefs_.SetInt64(kPrefsUpdateBootTimestampStart, 10000);
   prefs_.SetInt64(kPrefsUpdateTimestampStart, 12345);
 
   Time boot_time = Time::FromInternalValue(22345);
   Time up_time = Time::FromInternalValue(21345);
   clock_->SetBootTime(boot_time);
   clock_->SetMonotonicTime(up_time);
-  TimeDelta duration = boot_time - Time::FromInternalValue(12345);
+  TimeDelta duration = boot_time - Time::FromInternalValue(10000);
   TimeDelta duration_uptime = up_time - Time::FromInternalValue(12345);
   EXPECT_CALL(
       *metrics_reporter_,
@@ -140,7 +142,8 @@
                                  ErrorCode::kSuccess))
       .Times(1);
   EXPECT_CALL(*metrics_reporter_,
-              ReportSuccessfulUpdateMetrics(2, 0, _, _, _, _, duration, 3, _))
+              ReportSuccessfulUpdateMetrics(
+                  2, 0, _, _, _, _, duration, duration_uptime, 3, _))
       .Times(1);
 
   SetUpdateStatus(UpdateStatus::UPDATE_AVAILABLE);
@@ -181,10 +184,11 @@
                   125,
                   _,
                   _,
+                  _,
                   _))
       .Times(1);
 
-  // The first update fails after receving 50 bytes in total.
+  // The first update fails after receiving 50 bytes in total.
   update_attempter_android_.BytesReceived(30, 50, 200);
   update_attempter_android_.ProcessingDone(nullptr, ErrorCode::kError);
   EXPECT_EQ(
diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc
index 240e4ec..579c736 100644
--- a/update_attempter_unittest.cc
+++ b/update_attempter_unittest.cc
@@ -28,7 +28,9 @@
 #include <gtest/gtest.h>
 #include <policy/libpolicy.h>
 #include <policy/mock_device_policy.h>
+#include <policy/mock_libpolicy.h>
 
+#include "update_engine/common/dlcservice_interface.h"
 #include "update_engine/common/fake_clock.h"
 #include "update_engine/common/fake_prefs.h"
 #include "update_engine/common/mock_action.h"
@@ -47,22 +49,28 @@
 #include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/postinstall_runner_action.h"
+#include "update_engine/update_boot_flags_action.h"
 
 using base::Time;
 using base::TimeDelta;
 using chromeos_update_manager::EvalStatus;
+using chromeos_update_manager::StagingSchedule;
 using chromeos_update_manager::UpdateCheckParams;
+using policy::DevicePolicy;
 using std::string;
 using std::unique_ptr;
+using std::vector;
 using testing::_;
 using testing::DoAll;
 using testing::Field;
 using testing::InSequence;
 using testing::Ne;
 using testing::NiceMock;
+using testing::Pointee;
 using testing::Property;
 using testing::Return;
 using testing::ReturnPointee;
+using testing::ReturnRef;
 using testing::SaveArg;
 using testing::SetArgPointee;
 using update_engine::UpdateAttemptFlags;
@@ -71,6 +79,17 @@
 
 namespace chromeos_update_engine {
 
+namespace {
+
+class MockDlcService : public DlcServiceInterface {
+ public:
+  MOCK_METHOD1(GetInstalled, bool(vector<string>*));
+};
+
+}  // namespace
+
+const char kRollbackVersion[] = "10575.39.2";
+
 // Test a subclass rather than the main class directly so that we can mock out
 // methods within the class. There're explicit unit tests for the mocked out
 // methods.
@@ -81,13 +100,14 @@
 
   // Wrap the update scheduling method, allowing us to opt out of scheduled
   // updates for testing purposes.
-  void ScheduleUpdates() override {
+  bool ScheduleUpdates() override {
     schedule_updates_called_ = true;
     if (do_schedule_updates_) {
       UpdateAttempter::ScheduleUpdates();
     } else {
       LOG(INFO) << "[TEST] Update scheduling disabled.";
     }
+    return true;
   }
   void EnableScheduleUpdates() { do_schedule_updates_ = true; }
   void DisableScheduleUpdates() { do_schedule_updates_ = false; }
@@ -111,10 +131,13 @@
     // Override system state members.
     fake_system_state_.set_connection_manager(&mock_connection_manager);
     fake_system_state_.set_update_attempter(&attempter_);
+    fake_system_state_.set_dlcservice(&mock_dlcservice_);
     loop_.SetAsCurrent();
 
     certificate_checker_.Init();
 
+    attempter_.set_forced_update_pending_callback(
+        new base::Callback<void(bool, bool)>(base::Bind([](bool, bool) {})));
     // Finish initializing the attempter.
     attempter_.Init();
   }
@@ -165,13 +188,20 @@
   void P2PEnabledInteractiveStart();
   void P2PEnabledStartingFailsStart();
   void P2PEnabledHousekeepingFailsStart();
+  void ResetRollbackHappenedStart(bool is_consumer,
+                                  bool is_policy_available,
+                                  bool expected_reset);
+  // Staging related callbacks.
+  void SetUpStagingTest(const StagingSchedule& schedule, FakePrefs* prefs);
+  void CheckStagingOff();
+  void StagingSetsPrefsAndTurnsOffScatteringStart();
+  void StagingOffIfInteractiveStart();
+  void StagingOffIfOobeStart();
 
   bool actual_using_p2p_for_downloading() {
     return actual_using_p2p_for_downloading_;
   }
-  bool actual_using_p2p_for_sharing() {
-    return actual_using_p2p_for_sharing_;
-  }
+  bool actual_using_p2p_for_sharing() { return actual_using_p2p_for_sharing_; }
 
   base::MessageLoopForIO base_loop_;
   brillo::BaseMessageLoop loop_{&base_loop_};
@@ -180,6 +210,7 @@
   UpdateAttempterUnderTest attempter_{&fake_system_state_};
   OpenSSLWrapper openssl_wrapper_;
   CertificateChecker certificate_checker_;
+  MockDlcService mock_dlcservice_;
 
   NiceMock<MockActionProcessor>* processor_;
   NiceMock<MockPrefs>* prefs_;  // Shortcut to fake_system_state_->mock_prefs().
@@ -204,7 +235,7 @@
                         nullptr,
                         nullptr,
                         fetcher.release(),
-                        false /* is_interactive */);
+                        false /* interactive */);
   EXPECT_CALL(*prefs_, GetInt64(kPrefsDeltaUpdateFailures, _)).Times(0);
   attempter_.ActionCompleted(nullptr, &action, ErrorCode::kSuccess);
   EXPECT_EQ(UpdateStatus::FINALIZING, attempter_.status());
@@ -305,8 +336,8 @@
 TEST_F(UpdateAttempterTest, ActionCompletedOmahaRequestTest) {
   unique_ptr<MockHttpFetcher> fetcher(new MockHttpFetcher("", 0, nullptr));
   fetcher->FailTransfer(500);  // Sets the HTTP response code.
-  OmahaRequestAction action(&fake_system_state_, nullptr,
-                            std::move(fetcher), false);
+  OmahaRequestAction action(
+      &fake_system_state_, nullptr, std::move(fetcher), false);
   ObjectCollectorAction<OmahaResponse> collector_action;
   BondActions(&action, &collector_action);
   OmahaResponse response;
@@ -331,29 +362,27 @@
 }
 
 TEST_F(UpdateAttempterTest, GetErrorCodeForActionTest) {
-  extern ErrorCode GetErrorCodeForAction(AbstractAction* action,
-                                              ErrorCode code);
   EXPECT_EQ(ErrorCode::kSuccess,
             GetErrorCodeForAction(nullptr, ErrorCode::kSuccess));
 
   FakeSystemState fake_system_state;
-  OmahaRequestAction omaha_request_action(&fake_system_state, nullptr,
-                                          nullptr, false);
+  OmahaRequestAction omaha_request_action(
+      &fake_system_state, nullptr, nullptr, false);
   EXPECT_EQ(ErrorCode::kOmahaRequestError,
             GetErrorCodeForAction(&omaha_request_action, ErrorCode::kError));
   OmahaResponseHandlerAction omaha_response_handler_action(&fake_system_state_);
-  EXPECT_EQ(ErrorCode::kOmahaResponseHandlerError,
-            GetErrorCodeForAction(&omaha_response_handler_action,
-                                  ErrorCode::kError));
+  EXPECT_EQ(
+      ErrorCode::kOmahaResponseHandlerError,
+      GetErrorCodeForAction(&omaha_response_handler_action, ErrorCode::kError));
   FilesystemVerifierAction filesystem_verifier_action;
-  EXPECT_EQ(ErrorCode::kFilesystemVerifierError,
-            GetErrorCodeForAction(&filesystem_verifier_action,
-                                  ErrorCode::kError));
+  EXPECT_EQ(
+      ErrorCode::kFilesystemVerifierError,
+      GetErrorCodeForAction(&filesystem_verifier_action, ErrorCode::kError));
   PostinstallRunnerAction postinstall_runner_action(
       fake_system_state.fake_boot_control(), fake_system_state.fake_hardware());
-  EXPECT_EQ(ErrorCode::kPostinstallRunnerError,
-            GetErrorCodeForAction(&postinstall_runner_action,
-                                  ErrorCode::kError));
+  EXPECT_EQ(
+      ErrorCode::kPostinstallRunnerError,
+      GetErrorCodeForAction(&postinstall_runner_action, ErrorCode::kError));
   MockAction action_mock;
   EXPECT_CALL(action_mock, Type()).WillOnce(Return("MockAction"));
   EXPECT_EQ(ErrorCode::kError,
@@ -367,15 +396,15 @@
   attempter_.DisableDeltaUpdateIfNeeded();
   EXPECT_TRUE(attempter_.omaha_request_params_->delta_okay());
   EXPECT_CALL(*prefs_, GetInt64(kPrefsDeltaUpdateFailures, _))
-      .WillOnce(DoAll(
-          SetArgPointee<1>(UpdateAttempter::kMaxDeltaUpdateFailures - 1),
-          Return(true)));
+      .WillOnce(
+          DoAll(SetArgPointee<1>(UpdateAttempter::kMaxDeltaUpdateFailures - 1),
+                Return(true)));
   attempter_.DisableDeltaUpdateIfNeeded();
   EXPECT_TRUE(attempter_.omaha_request_params_->delta_okay());
   EXPECT_CALL(*prefs_, GetInt64(kPrefsDeltaUpdateFailures, _))
-      .WillOnce(DoAll(
-          SetArgPointee<1>(UpdateAttempter::kMaxDeltaUpdateFailures),
-          Return(true)));
+      .WillOnce(
+          DoAll(SetArgPointee<1>(UpdateAttempter::kMaxDeltaUpdateFailures),
+                Return(true)));
   attempter_.DisableDeltaUpdateIfNeeded();
   EXPECT_FALSE(attempter_.omaha_request_params_->delta_okay());
   EXPECT_CALL(*prefs_, GetInt64(_, _)).Times(0);
@@ -388,16 +417,17 @@
       .WillOnce(Return(false))
       .WillOnce(DoAll(SetArgPointee<1>(-1), Return(true)))
       .WillOnce(DoAll(SetArgPointee<1>(1), Return(true)))
-      .WillOnce(DoAll(
-          SetArgPointee<1>(UpdateAttempter::kMaxDeltaUpdateFailures),
-          Return(true)));
+      .WillOnce(
+          DoAll(SetArgPointee<1>(UpdateAttempter::kMaxDeltaUpdateFailures),
+                Return(true)));
   EXPECT_CALL(*prefs_, SetInt64(Ne(kPrefsDeltaUpdateFailures), _))
       .WillRepeatedly(Return(true));
   EXPECT_CALL(*prefs_, SetInt64(kPrefsDeltaUpdateFailures, 1)).Times(2);
   EXPECT_CALL(*prefs_, SetInt64(kPrefsDeltaUpdateFailures, 2));
-  EXPECT_CALL(*prefs_, SetInt64(kPrefsDeltaUpdateFailures,
-                               UpdateAttempter::kMaxDeltaUpdateFailures + 1));
-  for (int i = 0; i < 4; i ++)
+  EXPECT_CALL(*prefs_,
+              SetInt64(kPrefsDeltaUpdateFailures,
+                       UpdateAttempter::kMaxDeltaUpdateFailures + 1));
+  for (int i = 0; i < 4; i++)
     attempter_.MarkDeltaUpdateFailure();
 }
 
@@ -418,14 +448,13 @@
 
 TEST_F(UpdateAttempterTest, ScheduleErrorEventActionTest) {
   EXPECT_CALL(*processor_,
-              EnqueueAction(Property(&AbstractAction::Type,
-                                     OmahaRequestAction::StaticType())));
+              EnqueueAction(Pointee(Property(
+                  &AbstractAction::Type, OmahaRequestAction::StaticType()))));
   EXPECT_CALL(*processor_, StartProcessing());
   ErrorCode err = ErrorCode::kError;
   EXPECT_CALL(*fake_system_state_.mock_payload_state(), UpdateFailed(err));
-  attempter_.error_event_.reset(new OmahaEvent(OmahaEvent::kTypeUpdateComplete,
-                                               OmahaEvent::kResultError,
-                                               err));
+  attempter_.error_event_.reset(new OmahaEvent(
+      OmahaEvent::kTypeUpdateComplete, OmahaEvent::kResultError, err));
   attempter_.ScheduleErrorEventAction();
   EXPECT_EQ(UpdateStatus::REPORTING_ERROR_EVENT, attempter_.status());
 }
@@ -433,22 +462,26 @@
 namespace {
 // Actions that will be built as part of an update check.
 const string kUpdateActionTypes[] = {  // NOLINT(runtime/string)
-  OmahaRequestAction::StaticType(),
-  OmahaResponseHandlerAction::StaticType(),
-  OmahaRequestAction::StaticType(),
-  DownloadAction::StaticType(),
-  OmahaRequestAction::StaticType(),
-  FilesystemVerifierAction::StaticType(),
-  PostinstallRunnerAction::StaticType(),
-  OmahaRequestAction::StaticType()
-};
+    OmahaRequestAction::StaticType(),
+    OmahaResponseHandlerAction::StaticType(),
+    UpdateBootFlagsAction::StaticType(),
+    OmahaRequestAction::StaticType(),
+    DownloadAction::StaticType(),
+    OmahaRequestAction::StaticType(),
+    FilesystemVerifierAction::StaticType(),
+    PostinstallRunnerAction::StaticType(),
+    OmahaRequestAction::StaticType()};
 
 // Actions that will be built as part of a user-initiated rollback.
-const string kRollbackActionTypes[] = {  // NOLINT(runtime/string)
-  InstallPlanAction::StaticType(),
-  PostinstallRunnerAction::StaticType(),
+const string kRollbackActionTypes[] = {
+    // NOLINT(runtime/string)
+    InstallPlanAction::StaticType(),
+    PostinstallRunnerAction::StaticType(),
 };
 
+const StagingSchedule kValidStagingSchedule = {
+    {4, 10}, {10, 40}, {19, 70}, {26, 100}};
+
 }  // namespace
 
 void UpdateAttempterTest::UpdateTestStart() {
@@ -458,7 +491,8 @@
   // point by calling RefreshDevicePolicy.
   auto device_policy = std::make_unique<policy::MockDevicePolicy>();
   EXPECT_CALL(*device_policy, LoadPolicy())
-      .Times(testing::AtLeast(1)).WillRepeatedly(Return(true));
+      .Times(testing::AtLeast(1))
+      .WillRepeatedly(Return(true));
   attempter_.policy_provider_.reset(
       new policy::PolicyProvider(std::move(device_policy)));
 
@@ -466,13 +500,13 @@
     InSequence s;
     for (size_t i = 0; i < arraysize(kUpdateActionTypes); ++i) {
       EXPECT_CALL(*processor_,
-                  EnqueueAction(Property(&AbstractAction::Type,
-                                         kUpdateActionTypes[i])));
+                  EnqueueAction(Pointee(
+                      Property(&AbstractAction::Type, kUpdateActionTypes[i]))));
     }
     EXPECT_CALL(*processor_, StartProcessing());
   }
 
-  attempter_.Update("", "", "", "", false, false);
+  attempter_.Update("", "", "", "", false, false, false);
   loop_.PostTask(FROM_HERE,
                  base::Bind(&UpdateAttempterTest::UpdateTestVerify,
                             base::Unretained(this)));
@@ -481,37 +515,25 @@
 void UpdateAttempterTest::UpdateTestVerify() {
   EXPECT_EQ(0, attempter_.http_response_code());
   EXPECT_EQ(&attempter_, processor_->delegate());
-  EXPECT_EQ(arraysize(kUpdateActionTypes), attempter_.actions_.size());
-  for (size_t i = 0; i < arraysize(kUpdateActionTypes); ++i) {
-    EXPECT_EQ(kUpdateActionTypes[i], attempter_.actions_[i]->Type());
-  }
-  EXPECT_EQ(attempter_.response_handler_action_.get(),
-            attempter_.actions_[1].get());
-  AbstractAction* action_3 = attempter_.actions_[3].get();
-  ASSERT_NE(nullptr, action_3);
-  ASSERT_EQ(DownloadAction::StaticType(), action_3->Type());
-  DownloadAction* download_action = static_cast<DownloadAction*>(action_3);
-  EXPECT_EQ(&attempter_, download_action->delegate());
   EXPECT_EQ(UpdateStatus::CHECKING_FOR_UPDATE, attempter_.status());
   loop_.BreakLoop();
 }
 
-void UpdateAttempterTest::RollbackTestStart(
-    bool enterprise_rollback, bool valid_slot) {
+void UpdateAttempterTest::RollbackTestStart(bool enterprise_rollback,
+                                            bool valid_slot) {
   // Create a device policy so that we can change settings.
   auto device_policy = std::make_unique<policy::MockDevicePolicy>();
   EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
   fake_system_state_.set_device_policy(device_policy.get());
   if (enterprise_rollback) {
     // We return an empty owner as this is an enterprise.
-    EXPECT_CALL(*device_policy, GetOwner(_)).WillRepeatedly(
-        DoAll(SetArgPointee<0>(string("")),
-        Return(true)));
+    EXPECT_CALL(*device_policy, GetOwner(_))
+        .WillRepeatedly(DoAll(SetArgPointee<0>(string("")), Return(true)));
   } else {
     // We return a fake owner as this is an owned consumer device.
-    EXPECT_CALL(*device_policy, GetOwner(_)).WillRepeatedly(
-        DoAll(SetArgPointee<0>(string("fake.mail@fake.com")),
-        Return(true)));
+    EXPECT_CALL(*device_policy, GetOwner(_))
+        .WillRepeatedly(DoAll(SetArgPointee<0>(string("fake.mail@fake.com")),
+                              Return(true)));
   }
 
   attempter_.policy_provider_.reset(
@@ -530,15 +552,15 @@
   // We only allow rollback on devices that are not enterprise enrolled and
   // which have a valid slot to rollback to.
   if (!enterprise_rollback && valid_slot) {
-     is_rollback_allowed = true;
+    is_rollback_allowed = true;
   }
 
   if (is_rollback_allowed) {
     InSequence s;
     for (size_t i = 0; i < arraysize(kRollbackActionTypes); ++i) {
       EXPECT_CALL(*processor_,
-                  EnqueueAction(Property(&AbstractAction::Type,
-                                         kRollbackActionTypes[i])));
+                  EnqueueAction(Pointee(Property(&AbstractAction::Type,
+                                                 kRollbackActionTypes[i]))));
     }
     EXPECT_CALL(*processor_, StartProcessing());
 
@@ -555,19 +577,9 @@
 void UpdateAttempterTest::RollbackTestVerify() {
   // Verifies the actions that were enqueued.
   EXPECT_EQ(&attempter_, processor_->delegate());
-  EXPECT_EQ(arraysize(kRollbackActionTypes), attempter_.actions_.size());
-  for (size_t i = 0; i < arraysize(kRollbackActionTypes); ++i) {
-    EXPECT_EQ(kRollbackActionTypes[i], attempter_.actions_[i]->Type());
-  }
   EXPECT_EQ(UpdateStatus::ATTEMPTING_ROLLBACK, attempter_.status());
-  AbstractAction* action_0 = attempter_.actions_[0].get();
-  ASSERT_NE(nullptr, action_0);
-  ASSERT_EQ(InstallPlanAction::StaticType(), action_0->Type());
-  InstallPlanAction* install_plan_action =
-      static_cast<InstallPlanAction*>(action_0);
-  InstallPlan* install_plan = install_plan_action->install_plan();
-  EXPECT_EQ(0U, install_plan->partitions.size());
-  EXPECT_EQ(install_plan->powerwash_required, true);
+  EXPECT_EQ(0U, attempter_.install_plan_->partitions.size());
+  EXPECT_EQ(attempter_.install_plan_->powerwash_required, true);
   loop_.BreakLoop();
 }
 
@@ -580,7 +592,8 @@
   loop_.PostTask(FROM_HERE,
                  base::Bind(&UpdateAttempterTest::RollbackTestStart,
                             base::Unretained(this),
-                            false, true));
+                            false,
+                            true));
   loop_.Run();
 }
 
@@ -588,7 +601,8 @@
   loop_.PostTask(FROM_HERE,
                  base::Bind(&UpdateAttempterTest::RollbackTestStart,
                             base::Unretained(this),
-                            false, false));
+                            false,
+                            false));
   loop_.Run();
 }
 
@@ -596,14 +610,15 @@
   loop_.PostTask(FROM_HERE,
                  base::Bind(&UpdateAttempterTest::RollbackTestStart,
                             base::Unretained(this),
-                            true, true));
+                            true,
+                            true));
   loop_.Run();
 }
 
 void UpdateAttempterTest::PingOmahaTestStart() {
   EXPECT_CALL(*processor_,
-              EnqueueAction(Property(&AbstractAction::Type,
-                                     OmahaRequestAction::StaticType())));
+              EnqueueAction(Pointee(Property(
+                  &AbstractAction::Type, OmahaRequestAction::StaticType()))));
   EXPECT_CALL(*processor_, StartProcessing());
   attempter_.PingOmaha();
   ScheduleQuitMainLoop();
@@ -637,10 +652,8 @@
 }
 
 TEST_F(UpdateAttempterTest, CreatePendingErrorEventResumedTest) {
-  OmahaResponseHandlerAction *response_action =
-      new OmahaResponseHandlerAction(&fake_system_state_);
-  response_action->install_plan_.is_resume = true;
-  attempter_.response_handler_action_.reset(response_action);
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_resume = true;
   MockAction action;
   const ErrorCode kCode = ErrorCode::kInstallDeviceOpenError;
   attempter_.CreatePendingErrorEvent(&action, kCode);
@@ -648,10 +661,9 @@
   EXPECT_EQ(OmahaEvent::kTypeUpdateComplete, attempter_.error_event_->type);
   EXPECT_EQ(OmahaEvent::kResultError, attempter_.error_event_->result);
   EXPECT_EQ(
-      static_cast<ErrorCode>(
-          static_cast<int>(kCode) |
-          static_cast<int>(ErrorCode::kResumedFlag) |
-          static_cast<int>(ErrorCode::kTestOmahaUrlFlag)),
+      static_cast<ErrorCode>(static_cast<int>(kCode) |
+                             static_cast<int>(ErrorCode::kResumedFlag) |
+                             static_cast<int>(ErrorCode::kTestOmahaUrlFlag)),
       attempter_.error_event_->error_code);
 }
 
@@ -694,7 +706,7 @@
   fake_system_state_.set_p2p_manager(&mock_p2p_manager);
   mock_p2p_manager.fake().SetP2PEnabled(false);
   EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0);
-  attempter_.Update("", "", "", "", false, false);
+  attempter_.Update("", "", "", "", false, false, false);
   EXPECT_FALSE(actual_using_p2p_for_downloading_);
   EXPECT_FALSE(actual_using_p2p_for_sharing());
   ScheduleQuitMainLoop();
@@ -716,7 +728,7 @@
   mock_p2p_manager.fake().SetEnsureP2PRunningResult(false);
   mock_p2p_manager.fake().SetPerformHousekeepingResult(false);
   EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0);
-  attempter_.Update("", "", "", "", false, false);
+  attempter_.Update("", "", "", "", false, false, false);
   EXPECT_FALSE(actual_using_p2p_for_downloading());
   EXPECT_FALSE(actual_using_p2p_for_sharing());
   ScheduleQuitMainLoop();
@@ -739,7 +751,7 @@
   mock_p2p_manager.fake().SetEnsureP2PRunningResult(true);
   mock_p2p_manager.fake().SetPerformHousekeepingResult(false);
   EXPECT_CALL(mock_p2p_manager, PerformHousekeeping());
-  attempter_.Update("", "", "", "", false, false);
+  attempter_.Update("", "", "", "", false, false, false);
   EXPECT_FALSE(actual_using_p2p_for_downloading());
   EXPECT_FALSE(actual_using_p2p_for_sharing());
   ScheduleQuitMainLoop();
@@ -761,7 +773,7 @@
   mock_p2p_manager.fake().SetEnsureP2PRunningResult(true);
   mock_p2p_manager.fake().SetPerformHousekeepingResult(true);
   EXPECT_CALL(mock_p2p_manager, PerformHousekeeping());
-  attempter_.Update("", "", "", "", false, false);
+  attempter_.Update("", "", "", "", false, false, false);
   EXPECT_TRUE(actual_using_p2p_for_downloading());
   EXPECT_TRUE(actual_using_p2p_for_sharing());
   ScheduleQuitMainLoop();
@@ -784,7 +796,13 @@
   mock_p2p_manager.fake().SetEnsureP2PRunningResult(true);
   mock_p2p_manager.fake().SetPerformHousekeepingResult(true);
   EXPECT_CALL(mock_p2p_manager, PerformHousekeeping());
-  attempter_.Update("", "", "", "", false, true /* interactive */);
+  attempter_.Update("",
+                    "",
+                    "",
+                    "",
+                    false,
+                    false,
+                    /*interactive=*/true);
   EXPECT_FALSE(actual_using_p2p_for_downloading());
   EXPECT_TRUE(actual_using_p2p_for_sharing());
   ScheduleQuitMainLoop();
@@ -808,14 +826,13 @@
   fake_system_state_.set_device_policy(device_policy.get());
 
   EXPECT_CALL(*device_policy, GetScatterFactorInSeconds(_))
-      .WillRepeatedly(DoAll(
-          SetArgPointee<0>(scatter_factor_in_seconds),
-          Return(true)));
+      .WillRepeatedly(
+          DoAll(SetArgPointee<0>(scatter_factor_in_seconds), Return(true)));
 
   attempter_.policy_provider_.reset(
       new policy::PolicyProvider(std::move(device_policy)));
 
-  attempter_.Update("", "", "", "", false, false);
+  attempter_.Update("", "", "", "", false, false, false);
   EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
 
   ScheduleQuitMainLoop();
@@ -847,14 +864,13 @@
   fake_system_state_.set_device_policy(device_policy.get());
 
   EXPECT_CALL(*device_policy, GetScatterFactorInSeconds(_))
-      .WillRepeatedly(DoAll(
-          SetArgPointee<0>(scatter_factor_in_seconds),
-          Return(true)));
+      .WillRepeatedly(
+          DoAll(SetArgPointee<0>(scatter_factor_in_seconds), Return(true)));
 
   attempter_.policy_provider_.reset(
       new policy::PolicyProvider(std::move(device_policy)));
 
-  attempter_.Update("", "", "", "", false, false);
+  attempter_.Update("", "", "", "", false, false, false);
   EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
 
   // Make sure the file still exists.
@@ -870,7 +886,7 @@
   // However, if the count is already 0, it's not decremented. Test that.
   initial_value = 0;
   EXPECT_TRUE(fake_prefs.SetInt64(kPrefsUpdateCheckCount, initial_value));
-  attempter_.Update("", "", "", "", false, false);
+  attempter_.Update("", "", "", "", false, false, false);
   EXPECT_TRUE(fake_prefs.Exists(kPrefsUpdateCheckCount));
   EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &new_value));
   EXPECT_EQ(initial_value, new_value);
@@ -879,9 +895,11 @@
 }
 
 TEST_F(UpdateAttempterTest, NoScatteringDoneDuringManualUpdateTestStart) {
-  loop_.PostTask(FROM_HERE, base::Bind(
-      &UpdateAttempterTest::NoScatteringDoneDuringManualUpdateTestStart,
-      base::Unretained(this)));
+  loop_.PostTask(
+      FROM_HERE,
+      base::Bind(
+          &UpdateAttempterTest::NoScatteringDoneDuringManualUpdateTestStart,
+          base::Unretained(this)));
   loop_.Run();
 }
 
@@ -895,7 +913,8 @@
   fake_system_state_.fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch());
   fake_system_state_.set_prefs(&fake_prefs);
 
-  EXPECT_TRUE(fake_prefs.SetInt64(kPrefsWallClockWaitPeriod, initial_value));
+  EXPECT_TRUE(
+      fake_prefs.SetInt64(kPrefsWallClockScatteringWaitPeriod, initial_value));
   EXPECT_TRUE(fake_prefs.SetInt64(kPrefsUpdateCheckCount, initial_value));
 
   // make sure scatter_factor is non-zero as scattering is disabled
@@ -907,22 +926,27 @@
   fake_system_state_.set_device_policy(device_policy.get());
 
   EXPECT_CALL(*device_policy, GetScatterFactorInSeconds(_))
-      .WillRepeatedly(DoAll(
-          SetArgPointee<0>(scatter_factor_in_seconds),
-          Return(true)));
+      .WillRepeatedly(
+          DoAll(SetArgPointee<0>(scatter_factor_in_seconds), Return(true)));
 
   attempter_.policy_provider_.reset(
       new policy::PolicyProvider(std::move(device_policy)));
 
   // Trigger an interactive check so we can test that scattering is disabled.
-  attempter_.Update("", "", "", "", false, true);
+  attempter_.Update("",
+                    "",
+                    "",
+                    "",
+                    false,
+                    false,
+                    /*interactive=*/true);
   EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
 
   // Make sure scattering is disabled for manual (i.e. user initiated) update
   // checks and all artifacts are removed.
   EXPECT_FALSE(
       attempter_.omaha_request_params_->wall_clock_based_wait_enabled());
-  EXPECT_FALSE(fake_prefs.Exists(kPrefsWallClockWaitPeriod));
+  EXPECT_FALSE(fake_prefs.Exists(kPrefsWallClockScatteringWaitPeriod));
   EXPECT_EQ(0, attempter_.omaha_request_params_->waiting_period().InSeconds());
   EXPECT_FALSE(
       attempter_.omaha_request_params_->update_check_count_wait_enabled());
@@ -931,6 +955,125 @@
   ScheduleQuitMainLoop();
 }
 
+void UpdateAttempterTest::SetUpStagingTest(const StagingSchedule& schedule,
+                                           FakePrefs* prefs) {
+  attempter_.prefs_ = prefs;
+  fake_system_state_.set_prefs(prefs);
+
+  int64_t initial_value = 8;
+  EXPECT_TRUE(
+      prefs->SetInt64(kPrefsWallClockScatteringWaitPeriod, initial_value));
+  EXPECT_TRUE(prefs->SetInt64(kPrefsUpdateCheckCount, initial_value));
+  attempter_.scatter_factor_ = TimeDelta::FromSeconds(20);
+
+  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
+  EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
+  fake_system_state_.set_device_policy(device_policy.get());
+  EXPECT_CALL(*device_policy, GetDeviceUpdateStagingSchedule(_))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(schedule), Return(true)));
+
+  attempter_.policy_provider_.reset(
+      new policy::PolicyProvider(std::move(device_policy)));
+}
+
+TEST_F(UpdateAttempterTest, StagingSetsPrefsAndTurnsOffScattering) {
+  loop_.PostTask(
+      FROM_HERE,
+      base::Bind(
+          &UpdateAttempterTest::StagingSetsPrefsAndTurnsOffScatteringStart,
+          base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::StagingSetsPrefsAndTurnsOffScatteringStart() {
+  // Tests that staging sets its prefs properly and turns off scattering.
+  fake_system_state_.fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch());
+  FakePrefs fake_prefs;
+  SetUpStagingTest(kValidStagingSchedule, &fake_prefs);
+
+  attempter_.Update("", "", "", "", false, false, false);
+  // Check that prefs have the correct values.
+  int64_t update_count;
+  EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &update_count));
+  int64_t waiting_time_days;
+  EXPECT_TRUE(fake_prefs.GetInt64(kPrefsWallClockStagingWaitPeriod,
+                                  &waiting_time_days));
+  EXPECT_GT(waiting_time_days, 0);
+  // Update count should have been decremented.
+  EXPECT_EQ(7, update_count);
+  // Check that Omaha parameters were updated correctly.
+  EXPECT_TRUE(
+      attempter_.omaha_request_params_->update_check_count_wait_enabled());
+  EXPECT_TRUE(
+      attempter_.omaha_request_params_->wall_clock_based_wait_enabled());
+  EXPECT_EQ(waiting_time_days,
+            attempter_.omaha_request_params_->waiting_period().InDays());
+  // Check class variables.
+  EXPECT_EQ(waiting_time_days, attempter_.staging_wait_time_.InDays());
+  EXPECT_EQ(kValidStagingSchedule, attempter_.staging_schedule_);
+  // Check that scattering is turned off
+  EXPECT_EQ(0, attempter_.scatter_factor_.InSeconds());
+  EXPECT_FALSE(fake_prefs.Exists(kPrefsWallClockScatteringWaitPeriod));
+
+  ScheduleQuitMainLoop();
+}
+
+void UpdateAttempterTest::CheckStagingOff() {
+  // Check that all prefs were removed.
+  EXPECT_FALSE(attempter_.prefs_->Exists(kPrefsUpdateCheckCount));
+  EXPECT_FALSE(attempter_.prefs_->Exists(kPrefsWallClockScatteringWaitPeriod));
+  EXPECT_FALSE(attempter_.prefs_->Exists(kPrefsWallClockStagingWaitPeriod));
+  // Check that the Omaha parameters have the correct value.
+  EXPECT_EQ(0, attempter_.omaha_request_params_->waiting_period().InDays());
+  EXPECT_EQ(attempter_.omaha_request_params_->waiting_period(),
+            attempter_.staging_wait_time_);
+  EXPECT_FALSE(
+      attempter_.omaha_request_params_->update_check_count_wait_enabled());
+  EXPECT_FALSE(
+      attempter_.omaha_request_params_->wall_clock_based_wait_enabled());
+  // Check that scattering is turned off too.
+  EXPECT_EQ(0, attempter_.scatter_factor_.InSeconds());
+}
+
+TEST_F(UpdateAttempterTest, StagingOffIfInteractive) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::StagingOffIfInteractiveStart,
+                            base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::StagingOffIfInteractiveStart() {
+  // Tests that staging is turned off when an interactive update is requested.
+  fake_system_state_.fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch());
+  FakePrefs fake_prefs;
+  SetUpStagingTest(kValidStagingSchedule, &fake_prefs);
+
+  attempter_.Update("", "", "", "", false, false, /* interactive = */ true);
+  CheckStagingOff();
+
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, StagingOffIfOobe) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::StagingOffIfOobeStart,
+                            base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::StagingOffIfOobeStart() {
+  // Tests that staging is turned off if OOBE hasn't been completed.
+  fake_system_state_.fake_hardware()->SetIsOOBEEnabled(true);
+  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
+  FakePrefs fake_prefs;
+  SetUpStagingTest(kValidStagingSchedule, &fake_prefs);
+
+  attempter_.Update("", "", "", "", false, false, /* interactive = */ true);
+  CheckStagingOff();
+
+  ScheduleQuitMainLoop();
+}
+
 // Checks that we only report daily metrics at most every 24 hours.
 TEST_F(UpdateAttempterTest, ReportDailyMetrics) {
   FakeClock fake_clock;
@@ -1029,6 +1172,21 @@
   EXPECT_FALSE(attempter_.IsAnyUpdateSourceAllowed());
 }
 
+TEST_F(UpdateAttempterTest, CheckForUpdateAUDlcTest) {
+  fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
+  fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false);
+
+  const string dlc_module_id = "a_dlc_module_id";
+  vector<string> dlc_module_ids = {dlc_module_id};
+  ON_CALL(mock_dlcservice_, GetInstalled(testing::_))
+      .WillByDefault(DoAll(testing::SetArgPointee<0>(dlc_module_ids),
+                           testing::Return(true)));
+
+  attempter_.CheckForUpdate("", "autest", UpdateAttemptFlags::kNone);
+  EXPECT_EQ(attempter_.dlc_module_ids_.size(), 1);
+  EXPECT_EQ(attempter_.dlc_module_ids_[0], dlc_module_id);
+}
+
 TEST_F(UpdateAttempterTest, CheckForUpdateAUTest) {
   fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
   fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false);
@@ -1043,38 +1201,93 @@
   EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url());
 }
 
+TEST_F(UpdateAttempterTest, CheckForInstallTest) {
+  fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
+  fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false);
+  attempter_.CheckForInstall({}, "autest");
+  EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url());
+
+  attempter_.CheckForInstall({}, "autest-scheduled");
+  EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url());
+
+  attempter_.CheckForInstall({}, "http://omaha.phishing");
+  EXPECT_EQ("", attempter_.forced_omaha_url());
+}
+
+TEST_F(UpdateAttempterTest, InstallSetsStatusIdle) {
+  attempter_.CheckForInstall({}, "http://foo.bar");
+  attempter_.status_ = UpdateStatus::DOWNLOADING;
+  EXPECT_TRUE(attempter_.is_install_);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+  UpdateEngineStatus status;
+  attempter_.GetStatus(&status);
+  // Should set status to idle after an install operation.
+  EXPECT_EQ(UpdateStatus::IDLE, status.status);
+}
+
+TEST_F(UpdateAttempterTest, RollbackAfterInstall) {
+  attempter_.is_install_ = true;
+  attempter_.Rollback(false);
+  EXPECT_FALSE(attempter_.is_install_);
+}
+
+TEST_F(UpdateAttempterTest, UpdateAfterInstall) {
+  attempter_.is_install_ = true;
+  attempter_.CheckForUpdate("", "", UpdateAttemptFlags::kNone);
+  EXPECT_FALSE(attempter_.is_install_);
+}
+
 TEST_F(UpdateAttempterTest, TargetVersionPrefixSetAndReset) {
-  attempter_.CalculateUpdateParams("", "", "", "1234", false, false);
+  attempter_.CalculateUpdateParams("", "", "", "1234", false, false, false);
   EXPECT_EQ("1234",
             fake_system_state_.request_params()->target_version_prefix());
 
-  attempter_.CalculateUpdateParams("", "", "", "", false, false);
+  attempter_.CalculateUpdateParams("", "", "", "", false, false, false);
   EXPECT_TRUE(
       fake_system_state_.request_params()->target_version_prefix().empty());
 }
 
+TEST_F(UpdateAttempterTest, RollbackAllowedSetAndReset) {
+  attempter_.CalculateUpdateParams("",
+                                   "",
+                                   "",
+                                   "1234",
+                                   /*rollback_allowed=*/true,
+                                   false,
+                                   false);
+  EXPECT_TRUE(fake_system_state_.request_params()->rollback_allowed());
+
+  attempter_.CalculateUpdateParams("",
+                                   "",
+                                   "",
+                                   "1234",
+                                   /*rollback_allowed=*/false,
+                                   false,
+                                   false);
+  EXPECT_FALSE(fake_system_state_.request_params()->rollback_allowed());
+}
+
 TEST_F(UpdateAttempterTest, UpdateDeferredByPolicyTest) {
   // Construct an OmahaResponseHandlerAction that has processed an InstallPlan,
   // but the update is being deferred by the Policy.
-  OmahaResponseHandlerAction* response_action =
-      new OmahaResponseHandlerAction(&fake_system_state_);
-  response_action->install_plan_.version = "a.b.c.d";
-  response_action->install_plan_.system_version = "b.c.d.e";
-  response_action->install_plan_.payloads.push_back(
+  OmahaResponseHandlerAction response_action(&fake_system_state_);
+  response_action.install_plan_.version = "a.b.c.d";
+  response_action.install_plan_.system_version = "b.c.d.e";
+  response_action.install_plan_.payloads.push_back(
       {.size = 1234ULL, .type = InstallPayloadType::kFull});
-  attempter_.response_handler_action_.reset(response_action);
   // Inform the UpdateAttempter that the OmahaResponseHandlerAction has
   // completed, with the deferred-update error code.
   attempter_.ActionCompleted(
-      nullptr, response_action, ErrorCode::kOmahaUpdateDeferredPerPolicy);
+      nullptr, &response_action, ErrorCode::kOmahaUpdateDeferredPerPolicy);
   {
     UpdateEngineStatus status;
     attempter_.GetStatus(&status);
     EXPECT_EQ(UpdateStatus::UPDATE_AVAILABLE, status.status);
-    EXPECT_EQ(response_action->install_plan_.version, status.new_version);
-    EXPECT_EQ(response_action->install_plan_.system_version,
+    EXPECT_TRUE(attempter_.install_plan_);
+    EXPECT_EQ(attempter_.install_plan_->version, status.new_version);
+    EXPECT_EQ(attempter_.install_plan_->system_version,
               status.new_system_version);
-    EXPECT_EQ(response_action->install_plan_.payloads[0].size,
+    EXPECT_EQ(attempter_.install_plan_->payloads[0].size,
               status.new_size_bytes);
   }
   // An "error" event should have been created to tell Omaha that the update is
@@ -1093,10 +1306,10 @@
     UpdateEngineStatus status;
     attempter_.GetStatus(&status);
     EXPECT_EQ(UpdateStatus::REPORTING_ERROR_EVENT, status.status);
-    EXPECT_EQ(response_action->install_plan_.version, status.new_version);
-    EXPECT_EQ(response_action->install_plan_.system_version,
+    EXPECT_EQ(response_action.install_plan_.version, status.new_version);
+    EXPECT_EQ(response_action.install_plan_.system_version,
               status.new_system_version);
-    EXPECT_EQ(response_action->install_plan_.payloads[0].size,
+    EXPECT_EQ(response_action.install_plan_.payloads[0].size,
               status.new_size_bytes);
   }
 }
@@ -1118,6 +1331,20 @@
             attempter_.GetCurrentUpdateAttemptFlags());
 }
 
+TEST_F(UpdateAttempterTest, RollbackNotAllowed) {
+  UpdateCheckParams params = {.updates_enabled = true,
+                              .rollback_allowed = false};
+  attempter_.OnUpdateScheduled(EvalStatus::kSucceeded, params);
+  EXPECT_FALSE(fake_system_state_.request_params()->rollback_allowed());
+}
+
+TEST_F(UpdateAttempterTest, RollbackAllowed) {
+  UpdateCheckParams params = {.updates_enabled = true,
+                              .rollback_allowed = true};
+  attempter_.OnUpdateScheduled(EvalStatus::kSucceeded, params);
+  EXPECT_TRUE(fake_system_state_.request_params()->rollback_allowed());
+}
+
 TEST_F(UpdateAttempterTest, InteractiveUpdateUsesPassedRestrictions) {
   attempter_.SetUpdateAttemptFlags(UpdateAttemptFlags::kFlagRestrictDownload);
 
@@ -1139,4 +1366,204 @@
             attempter_.GetCurrentUpdateAttemptFlags());
 }
 
+void UpdateAttempterTest::ResetRollbackHappenedStart(bool is_consumer,
+                                                     bool is_policy_loaded,
+                                                     bool expected_reset) {
+  EXPECT_CALL(*fake_system_state_.mock_payload_state(), GetRollbackHappened())
+      .WillRepeatedly(Return(true));
+  auto mock_policy_provider =
+      std::make_unique<NiceMock<policy::MockPolicyProvider>>();
+  EXPECT_CALL(*mock_policy_provider, IsConsumerDevice())
+      .WillRepeatedly(Return(is_consumer));
+  EXPECT_CALL(*mock_policy_provider, device_policy_is_loaded())
+      .WillRepeatedly(Return(is_policy_loaded));
+  const policy::MockDevicePolicy device_policy;
+  EXPECT_CALL(*mock_policy_provider, GetDevicePolicy())
+      .WillRepeatedly(ReturnRef(device_policy));
+  EXPECT_CALL(*fake_system_state_.mock_payload_state(),
+              SetRollbackHappened(false))
+      .Times(expected_reset ? 1 : 0);
+  attempter_.policy_provider_ = std::move(mock_policy_provider);
+  attempter_.Update("", "", "", "", false, false, false);
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, ResetRollbackHappenedOobe) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::ResetRollbackHappenedStart,
+                            base::Unretained(this),
+                            /*is_consumer=*/false,
+                            /*is_policy_loaded=*/false,
+                            /*expected_reset=*/false));
+  loop_.Run();
+}
+
+TEST_F(UpdateAttempterTest, ResetRollbackHappenedConsumer) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::ResetRollbackHappenedStart,
+                            base::Unretained(this),
+                            /*is_consumer=*/true,
+                            /*is_policy_loaded=*/false,
+                            /*expected_reset=*/true));
+  loop_.Run();
+}
+
+TEST_F(UpdateAttempterTest, ResetRollbackHappenedEnterprise) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::ResetRollbackHappenedStart,
+                            base::Unretained(this),
+                            /*is_consumer=*/false,
+                            /*is_policy_loaded=*/true,
+                            /*expected_reset=*/true));
+  loop_.Run();
+}
+
+TEST_F(UpdateAttempterTest, SetRollbackHappenedRollback) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = true;
+
+  EXPECT_CALL(*fake_system_state_.mock_payload_state(),
+              SetRollbackHappened(true))
+      .Times(1);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
+TEST_F(UpdateAttempterTest, SetRollbackHappenedNotRollback) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = false;
+
+  EXPECT_CALL(*fake_system_state_.mock_payload_state(),
+              SetRollbackHappened(true))
+      .Times(0);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
+TEST_F(UpdateAttempterTest, RollbackMetricsRollbackSuccess) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = true;
+  attempter_.install_plan_->version = kRollbackVersion;
+
+  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
+              ReportEnterpriseRollbackMetrics(true, kRollbackVersion))
+      .Times(1);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
+TEST_F(UpdateAttempterTest, RollbackMetricsNotRollbackSuccess) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = false;
+  attempter_.install_plan_->version = kRollbackVersion;
+
+  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
+              ReportEnterpriseRollbackMetrics(_, _))
+      .Times(0);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
+TEST_F(UpdateAttempterTest, RollbackMetricsRollbackFailure) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = true;
+  attempter_.install_plan_->version = kRollbackVersion;
+
+  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
+              ReportEnterpriseRollbackMetrics(false, kRollbackVersion))
+      .Times(1);
+  MockAction action;
+  attempter_.CreatePendingErrorEvent(&action, ErrorCode::kRollbackNotPossible);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kRollbackNotPossible);
+}
+
+TEST_F(UpdateAttempterTest, RollbackMetricsNotRollbackFailure) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = false;
+  attempter_.install_plan_->version = kRollbackVersion;
+
+  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
+              ReportEnterpriseRollbackMetrics(_, _))
+      .Times(0);
+  MockAction action;
+  attempter_.CreatePendingErrorEvent(&action, ErrorCode::kRollbackNotPossible);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kRollbackNotPossible);
+}
+
+TEST_F(UpdateAttempterTest, TimeToUpdateAppliedMetricFailure) {
+  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
+              ReportEnterpriseUpdateSeenToDownloadDays(_, _))
+      .Times(0);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kOmahaUpdateDeferredPerPolicy);
+}
+
+TEST_F(UpdateAttempterTest, TimeToUpdateAppliedOnNonEnterprise) {
+  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
+  fake_system_state_.set_device_policy(device_policy.get());
+  // Make device policy return that this is not enterprise enrolled
+  EXPECT_CALL(*device_policy, IsEnterpriseEnrolled()).WillOnce(Return(false));
+
+  // Ensure that the metric is not recorded.
+  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
+              ReportEnterpriseUpdateSeenToDownloadDays(_, _))
+      .Times(0);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
+TEST_F(UpdateAttempterTest,
+       TimeToUpdateAppliedWithTimeRestrictionMetricSuccess) {
+  constexpr int kDaysToUpdate = 15;
+  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
+  fake_system_state_.set_device_policy(device_policy.get());
+  // Make device policy return that this is enterprise enrolled
+  EXPECT_CALL(*device_policy, IsEnterpriseEnrolled()).WillOnce(Return(true));
+  // Pretend that there's a time restriction policy in place
+  EXPECT_CALL(*device_policy, GetDisallowedTimeIntervals(_))
+      .WillOnce(Return(true));
+
+  FakePrefs fake_prefs;
+  Time update_first_seen_at = Time::Now();
+  fake_prefs.SetInt64(kPrefsUpdateFirstSeenAt,
+                      update_first_seen_at.ToInternalValue());
+
+  FakeClock fake_clock;
+  Time update_finished_at =
+      update_first_seen_at + TimeDelta::FromDays(kDaysToUpdate);
+  fake_clock.SetWallclockTime(update_finished_at);
+
+  fake_system_state_.set_clock(&fake_clock);
+  fake_system_state_.set_prefs(&fake_prefs);
+
+  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
+              ReportEnterpriseUpdateSeenToDownloadDays(true, kDaysToUpdate))
+      .Times(1);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
+TEST_F(UpdateAttempterTest,
+       TimeToUpdateAppliedWithoutTimeRestrictionMetricSuccess) {
+  constexpr int kDaysToUpdate = 15;
+  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
+  fake_system_state_.set_device_policy(device_policy.get());
+  // Make device policy return that this is enterprise enrolled
+  EXPECT_CALL(*device_policy, IsEnterpriseEnrolled()).WillOnce(Return(true));
+  // Pretend that there's no time restriction policy in place
+  EXPECT_CALL(*device_policy, GetDisallowedTimeIntervals(_))
+      .WillOnce(Return(false));
+
+  FakePrefs fake_prefs;
+  Time update_first_seen_at = Time::Now();
+  fake_prefs.SetInt64(kPrefsUpdateFirstSeenAt,
+                      update_first_seen_at.ToInternalValue());
+
+  FakeClock fake_clock;
+  Time update_finished_at =
+      update_first_seen_at + TimeDelta::FromDays(kDaysToUpdate);
+  fake_clock.SetWallclockTime(update_finished_at);
+
+  fake_system_state_.set_clock(&fake_clock);
+  fake_system_state_.set_prefs(&fake_prefs);
+
+  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
+              ReportEnterpriseUpdateSeenToDownloadDays(false, kDaysToUpdate))
+      .Times(1);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
 }  // namespace chromeos_update_engine
diff --git a/update_boot_flags_action.cc b/update_boot_flags_action.cc
new file mode 100644
index 0000000..97ef7f2
--- /dev/null
+++ b/update_boot_flags_action.cc
@@ -0,0 +1,68 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_boot_flags_action.h"
+
+#include <base/bind.h>
+#include <base/logging.h>
+
+#include "update_engine/common/boot_control.h"
+
+namespace chromeos_update_engine {
+
+bool UpdateBootFlagsAction::updated_boot_flags_ = false;
+bool UpdateBootFlagsAction::is_running_ = false;
+
+void UpdateBootFlagsAction::PerformAction() {
+  if (is_running_) {
+    LOG(INFO) << "Update boot flags running, nothing to do.";
+    processor_->ActionComplete(this, ErrorCode::kSuccess);
+    return;
+  }
+  if (updated_boot_flags_) {
+    LOG(INFO) << "Already updated boot flags. Skipping.";
+    processor_->ActionComplete(this, ErrorCode::kSuccess);
+    return;
+  }
+
+  // This is purely best effort. Failures should be logged by Subprocess. Run
+  // the script asynchronously to avoid blocking the event loop regardless of
+  // the script runtime.
+  is_running_ = true;
+  LOG(INFO) << "Marking booted slot as good.";
+  if (!boot_control_->MarkBootSuccessfulAsync(
+          base::Bind(&UpdateBootFlagsAction::CompleteUpdateBootFlags,
+                     base::Unretained(this)))) {
+    CompleteUpdateBootFlags(false);
+  }
+}
+
+void UpdateBootFlagsAction::CompleteUpdateBootFlags(bool successful) {
+  is_running_ = false;
+  if (!successful) {
+    // We ignore the failure for now because if the updating boot flags is flaky
+    // or has a bug in a specific release, then blocking the update can cause
+    // devices to stay behind even though we could have updated the system and
+    // fixed the issue regardless of this failure.
+    //
+    // TODO(ahassani): Add new error code metric for kUpdateBootFlagsFailed.
+    LOG(ERROR) << "Updating boot flags failed, but ignoring its failure.";
+  }
+  updated_boot_flags_ = true;
+  processor_->ActionComplete(this, ErrorCode::kSuccess);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/update_boot_flags_action.h b/update_boot_flags_action.h
new file mode 100644
index 0000000..afa2c3f
--- /dev/null
+++ b/update_boot_flags_action.h
@@ -0,0 +1,59 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <string>
+
+#include "update_engine/common/action.h"
+#include "update_engine/common/boot_control_interface.h"
+
+#include <gtest/gtest_prod.h>
+
+namespace chromeos_update_engine {
+
+class UpdateBootFlagsAction : public AbstractAction {
+ public:
+  explicit UpdateBootFlagsAction(BootControlInterface* boot_control)
+      : boot_control_(boot_control) {}
+
+  void PerformAction() override;
+
+  static std::string StaticType() { return "UpdateBootFlagsAction"; }
+  std::string Type() const override { return StaticType(); }
+
+  void CompleteUpdateBootFlags(bool successful);
+
+ private:
+  FRIEND_TEST(UpdateBootFlagsActionTest, SimpleTest);
+  FRIEND_TEST(UpdateBootFlagsActionTest, DoubleActionTest);
+
+  // Originally, both of these flags are false. Once UpdateBootFlags is called,
+  // |is_running_| is set to true. As soon as UpdateBootFlags completes its
+  // asynchronous run, |is_running_| is reset to false and |updated_boot_flags_|
+  // is set to true. From that point on there will be no more changes to these
+  // flags.
+  //
+  // True if have updated the boot flags.
+  static bool updated_boot_flags_;
+  // True if we are still updating the boot flags.
+  static bool is_running_;
+
+  // Used for setting the boot flag.
+  BootControlInterface* boot_control_;
+
+  DISALLOW_COPY_AND_ASSIGN(UpdateBootFlagsAction);
+};
+
+}  // namespace chromeos_update_engine
diff --git a/update_boot_flags_action_unittest.cc b/update_boot_flags_action_unittest.cc
new file mode 100644
index 0000000..1b2bfa5
--- /dev/null
+++ b/update_boot_flags_action_unittest.cc
@@ -0,0 +1,69 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_boot_flags_action.h"
+
+#include <memory>
+#include <utility>
+
+#include <base/bind.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/fake_system_state.h"
+
+namespace chromeos_update_engine {
+
+class UpdateBootFlagsActionTest : public ::testing::Test {
+ public:
+  FakeSystemState fake_system_state_;
+};
+
+TEST_F(UpdateBootFlagsActionTest, SimpleTest) {
+  auto boot_control = fake_system_state_.fake_boot_control();
+  auto action = std::make_unique<UpdateBootFlagsAction>(boot_control);
+  ActionProcessor processor;
+  processor.EnqueueAction(std::move(action));
+
+  EXPECT_FALSE(UpdateBootFlagsAction::updated_boot_flags_);
+  EXPECT_FALSE(UpdateBootFlagsAction::is_running_);
+  processor.StartProcessing();
+  EXPECT_TRUE(UpdateBootFlagsAction::updated_boot_flags_);
+  EXPECT_FALSE(UpdateBootFlagsAction::is_running_);
+}
+
+TEST_F(UpdateBootFlagsActionTest, DoubleActionTest) {
+  // Reset the static flags.
+  UpdateBootFlagsAction::updated_boot_flags_ = false;
+  UpdateBootFlagsAction::is_running_ = false;
+
+  auto boot_control = fake_system_state_.fake_boot_control();
+  auto action1 = std::make_unique<UpdateBootFlagsAction>(boot_control);
+  auto action2 = std::make_unique<UpdateBootFlagsAction>(boot_control);
+  ActionProcessor processor1, processor2;
+  processor1.EnqueueAction(std::move(action1));
+  processor2.EnqueueAction(std::move(action2));
+
+  EXPECT_FALSE(UpdateBootFlagsAction::updated_boot_flags_);
+  EXPECT_FALSE(UpdateBootFlagsAction::is_running_);
+  processor1.StartProcessing();
+  EXPECT_TRUE(UpdateBootFlagsAction::updated_boot_flags_);
+  EXPECT_FALSE(UpdateBootFlagsAction::is_running_);
+  processor2.StartProcessing();
+  EXPECT_TRUE(UpdateBootFlagsAction::updated_boot_flags_);
+  EXPECT_FALSE(UpdateBootFlagsAction::is_running_);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/update_engine.conf b/update_engine.conf
index 3358411..af213ad 100644
--- a/update_engine.conf
+++ b/update_engine.conf
@@ -1,2 +1,2 @@
 PAYLOAD_MAJOR_VERSION=2
-PAYLOAD_MINOR_VERSION=5
+PAYLOAD_MINOR_VERSION=6
diff --git a/update_engine.gyp b/update_engine.gyp
index 1ff4d7f..754b314 100644
--- a/update_engine.gyp
+++ b/update_engine.gyp
@@ -13,6 +13,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
+
+# TODO: Rename these files to pass this check.
+# gyplint: disable=GypLintSourceFileNames
 {
   'variables': {
     'USE_chrome_network_proxy': '1',
@@ -23,6 +26,11 @@
       'deps': [
         'libbrillo-<(libbase_ver)',
         'libchrome-<(libbase_ver)',
+        # system_api depends on protobuf (or protobuf-lite). It must appear
+        # before protobuf here or the linker flags won't be in the right
+        # order.
+        'system_api',
+        'protobuf-lite',
       ],
       # The -DUSE_* flags are passed from platform2.py. We use sane defaults
       # here when these USE flags are not defined. You can set the default value
@@ -50,6 +58,7 @@
       '_POSIX_C_SOURCE=199309L',
       'USE_BINDER=<(USE_binder)',
       'USE_DBUS=<(USE_dbus)',
+      'USE_FEC=0',
       'USE_HWID_OVERRIDE=<(USE_hwid_override)',
       'USE_CHROME_KIOSK_APP=<(USE_chrome_kiosk_app)',
       'USE_CHROME_NETWORK_PROXY=<(USE_chrome_network_proxy)',
@@ -72,17 +81,6 @@
       'variables': {
         'proto_in_dir': '.',
         'proto_out_dir': 'include/update_engine',
-        'exported_deps': [
-          'protobuf-lite',
-        ],
-        'deps': ['<@(exported_deps)'],
-      },
-      'all_dependent_settings': {
-        'variables': {
-          'deps': [
-            '<@(exported_deps)',
-          ],
-        },
       },
       'sources': [
         'update_metadata.proto',
@@ -103,16 +101,16 @@
       'includes': ['../../../platform2/common-mk/generate-dbus-adaptors.gypi'],
     },
     {
-      'target_name': 'update_engine-dbus-libcros-client',
+      'target_name': 'update_engine-dbus-kiosk-app-client',
       'type': 'none',
       'actions': [{
-        'action_name': 'update_engine-dbus-libcros-client-action',
+        'action_name': 'update_engine-dbus-kiosk-app-client-action',
         'variables': {
-          'mock_output_file': 'include/libcros/dbus-proxy-mocks.h',
-          'proxy_output_file': 'include/libcros/dbus-proxies.h',
+          'mock_output_file': 'include/kiosk-app/dbus-proxy-mocks.h',
+          'proxy_output_file': 'include/kiosk-app/dbus-proxies.h',
         },
         'sources': [
-          'dbus_bindings/org.chromium.LibCrosService.dbus-xml',
+          'dbus_bindings/org.chromium.KioskAppService.dbus-xml',
         ],
         'includes': ['../../../platform2/common-mk/generate-dbus-proxies.gypi'],
       }],
@@ -124,12 +122,13 @@
       'dependencies': [
         'update_metadata-protos',
       ],
-      #TODO(deymo): Remove unused dependencies once we stop including files
+      # TODO(deymo): Remove unused dependencies once we stop including files
       # from the root directory.
       'variables': {
         'exported_deps': [
           'libcrypto',
           'xz-embedded',
+          'libbspatch',
           'libpuffpatch',
         ],
         'deps': ['<@(exported_deps)'],
@@ -148,7 +147,6 @@
           ],
         },
         'libraries': [
-          '-lbspatch',
           '-lbz2',
           '-lrt',
         ],
@@ -167,6 +165,7 @@
         'common/multi_range_http_fetcher.cc',
         'common/platform_constants_chromeos.cc',
         'common/prefs.cc',
+        'common/proxy_resolver.cc',
         'common/subprocess.cc',
         'common/terminator.cc',
         'common/utils.cc',
@@ -186,6 +185,7 @@
         'payload_consumer/payload_metadata.cc',
         'payload_consumer/payload_verifier.cc',
         'payload_consumer/postinstall_runner_action.cc',
+        'payload_consumer/verity_writer_stub.cc',
         'payload_consumer/xz_extent_writer.cc',
       ],
       'conditions': [
@@ -225,6 +225,13 @@
           'libupdate_engine-client',
           'vboot_host',
         ],
+        'conditions':[
+          ['USE_dlc == 1', {
+            'exported_deps' : [
+              'libdlcservice-client',
+            ],
+          }],
+        ],
         'deps': ['<@(exported_deps)'],
       },
       'all_dependent_settings': {
@@ -268,10 +275,10 @@
         'p2p_manager.cc',
         'payload_state.cc',
         'power_manager_chromeos.cc',
-        'proxy_resolver.cc',
         'real_system_state.cc',
         'shill_proxy.cc',
         'update_attempter.cc',
+        'update_boot_flags_action.cc',
         'update_manager/boxed_value.cc',
         'update_manager/chromeos_policy.cc',
         'update_manager/default_policy.cc',
@@ -291,8 +298,11 @@
         'update_manager/real_system_provider.cc',
         'update_manager/real_time_provider.cc',
         'update_manager/real_updater_provider.cc',
+        'update_manager/staging_utils.cc',
         'update_manager/state_factory.cc',
         'update_manager/update_manager.cc',
+        'update_manager/update_time_restrictions_policy_impl.cc',
+        'update_manager/weekly_time.cc',
         'update_status_utils.cc',
       ],
       'conditions': [
@@ -303,7 +313,17 @@
         }],
         ['USE_chrome_kiosk_app == 1', {
           'dependencies': [
-            'update_engine-dbus-libcros-client',
+            'update_engine-dbus-kiosk-app-client',
+          ],
+        }],
+        ['USE_dlc == 1', {
+          'sources': [
+            'dlcservice_chromeos.cc',
+          ],
+        }],
+        ['USE_dlc == 0', {
+          'sources': [
+            'common/dlcservice_stub.cc',
           ],
         }],
       ],
@@ -363,7 +383,9 @@
       'variables': {
         'exported_deps': [
           'ext2fs',
+          'libbsdiff',
           'libpuffdiff',
+          'liblzma',
         ],
         'deps': ['<@(exported_deps)'],
       },
@@ -380,15 +402,14 @@
             '<@(exported_deps)',
           ],
         },
-        'libraries': [
-          '-lbsdiff',
-        ],
       },
       'sources': [
+        'common/file_fetcher.cc',
         'payload_generator/ab_generator.cc',
         'payload_generator/annotated_operation.cc',
         'payload_generator/blob_file_writer.cc',
         'payload_generator/block_mapping.cc',
+        'payload_generator/boot_img_filesystem.cc',
         'payload_generator/bzip.cc',
         'payload_generator/cycle_breaker.cc',
         'payload_generator/deflate_utils.cc',
@@ -403,6 +424,7 @@
         'payload_generator/inplace_generator.cc',
         'payload_generator/mapfile_filesystem.cc',
         'payload_generator/payload_file.cc',
+        'payload_generator/payload_generation_config_chromeos.cc',
         'payload_generator/payload_generation_config.cc',
         'payload_generator/payload_signer.cc',
         'payload_generator/raw_filesystem.cc',
@@ -429,6 +451,31 @@
         'payload_generator/generate_delta_main.cc',
       ],
     },
+    {
+      'target_name': 'update_engine_test_libs',
+      'type': 'static_library',
+      'variables': {
+        'deps': [
+          'libshill-client-test',
+        ],
+      },
+      'dependencies': [
+        'libupdate_engine',
+      ],
+      'includes': [
+        '../../../platform2/common-mk/common_test.gypi',
+      ],
+      'sources': [
+        'common/fake_prefs.cc',
+        'common/mock_http_fetcher.cc',
+        'common/test_utils.cc',
+        'fake_shill_proxy.cc',
+        'fake_system_state.cc',
+        'payload_consumer/fake_file_descriptor.cc',
+        'payload_generator/fake_filesystem.cc',
+        'update_manager/umtest_utils.cc',
+      ],
+    },
   ],
   'conditions': [
     ['USE_test == 1', {
@@ -493,8 +540,8 @@
           'dependencies': [
             'libupdate_engine',
             'libpayload_generator',
+            'update_engine_test_libs',
           ],
-          'includes': ['../../../platform2/common-mk/common_test.gypi'],
           'sources': [
             'boot_control_chromeos_unittest.cc',
             'certificate_checker_unittest.cc',
@@ -502,21 +549,16 @@
             'common/action_processor_unittest.cc',
             'common/action_unittest.cc',
             'common/cpu_limiter_unittest.cc',
-            'common/fake_prefs.cc',
-            'common/file_fetcher.cc',  # Only required for tests.
             'common/hash_calculator_unittest.cc',
             'common/http_fetcher_unittest.cc',
             'common/hwid_override_unittest.cc',
-            'common/mock_http_fetcher.cc',
             'common/prefs_unittest.cc',
+            'common/proxy_resolver_unittest.cc',
             'common/subprocess_unittest.cc',
             'common/terminator_unittest.cc',
-            'common/test_utils.cc',
             'common/utils_unittest.cc',
             'common_service_unittest.cc',
             'connection_manager_unittest.cc',
-            'fake_shill_proxy.cc',
-            'fake_system_state.cc',
             'hardware_chromeos_unittest.cc',
             'image_properties_chromeos_unittest.cc',
             'metrics_reporter_omaha_unittest.cc',
@@ -533,7 +575,6 @@
             'payload_consumer/download_action_unittest.cc',
             'payload_consumer/extent_reader_unittest.cc',
             'payload_consumer/extent_writer_unittest.cc',
-            'payload_consumer/fake_file_descriptor.cc',
             'payload_consumer/file_descriptor_utils_unittest.cc',
             'payload_consumer/file_writer_unittest.cc',
             'payload_consumer/filesystem_verifier_action_unittest.cc',
@@ -542,13 +583,13 @@
             'payload_generator/ab_generator_unittest.cc',
             'payload_generator/blob_file_writer_unittest.cc',
             'payload_generator/block_mapping_unittest.cc',
+            'payload_generator/boot_img_filesystem_unittest.cc',
             'payload_generator/cycle_breaker_unittest.cc',
             'payload_generator/deflate_utils_unittest.cc',
             'payload_generator/delta_diff_utils_unittest.cc',
             'payload_generator/ext2_filesystem_unittest.cc',
             'payload_generator/extent_ranges_unittest.cc',
             'payload_generator/extent_utils_unittest.cc',
-            'payload_generator/fake_filesystem.cc',
             'payload_generator/full_update_generator_unittest.cc',
             'payload_generator/graph_utils_unittest.cc',
             'payload_generator/inplace_generator_unittest.cc',
@@ -561,9 +602,9 @@
             'payload_generator/topological_sort_unittest.cc',
             'payload_generator/zip_unittest.cc',
             'payload_state_unittest.cc',
-            'proxy_resolver_unittest.cc',
             'testrunner.cc',
             'update_attempter_unittest.cc',
+            'update_boot_flags_action_unittest.cc',
             'update_manager/boxed_value_unittest.cc',
             'update_manager/chromeos_policy_unittest.cc',
             'update_manager/evaluation_context_unittest.cc',
@@ -575,9 +616,36 @@
             'update_manager/real_system_provider_unittest.cc',
             'update_manager/real_time_provider_unittest.cc',
             'update_manager/real_updater_provider_unittest.cc',
-            'update_manager/umtest_utils.cc',
+            'update_manager/staging_utils_unittest.cc',
             'update_manager/update_manager_unittest.cc',
+            'update_manager/update_time_restrictions_policy_impl_unittest.cc',
             'update_manager/variable_unittest.cc',
+            'update_manager/weekly_time_unittest.cc',
+          ],
+        },
+      ],
+    }],
+    # Fuzzer target.
+    ['USE_fuzzer == 1', {
+      'targets': [
+        {
+          'target_name': 'update_engine_omaha_request_action_fuzzer',
+          'type': 'executable',
+          'variables': {
+            'deps': [
+              'libbrillo-test-<(libbase_ver)',
+              'libchrome-test-<(libbase_ver)',
+            ],
+          },
+          'includes': [
+            '../../../platform2/common-mk/common_fuzzer.gypi',
+          ],
+          'dependencies': [
+            'libupdate_engine',
+            'update_engine_test_libs',
+          ],
+          'sources': [
+            'omaha_request_action_fuzzer.cc',
           ],
         },
       ],
diff --git a/update_engine/update_metadata.proto b/update_engine/update_metadata.proto
new file mode 120000
index 0000000..d33cea3
--- /dev/null
+++ b/update_engine/update_metadata.proto
@@ -0,0 +1 @@
+../update_metadata.proto
\ No newline at end of file
diff --git a/update_engine_client.cc b/update_engine_client.cc
index bb19632..7446041 100644
--- a/update_engine_client.cc
+++ b/update_engine_client.cc
@@ -26,6 +26,7 @@
 #include <base/command_line.h>
 #include <base/logging.h>
 #include <base/macros.h>
+#include <base/strings/string_split.h>
 #include <base/threading/platform_thread.h>
 #include <brillo/daemons/daemon.h>
 #include <brillo/flag_helper.h>
@@ -60,15 +61,15 @@
 
 class UpdateEngineClient : public brillo::Daemon {
  public:
-  UpdateEngineClient(int argc, char** argv) : argc_(argc), argv_(argv) {
-  }
+  UpdateEngineClient(int argc, char** argv) : argc_(argc), argv_(argv) {}
 
   ~UpdateEngineClient() override = default;
 
  protected:
   int OnInit() override {
     int ret = Daemon::OnInit();
-    if (ret != EX_OK) return ret;
+    if (ret != EX_OK)
+      return ret;
 
     client_ = update_engine::UpdateEngineClient::CreateInstance();
 
@@ -139,8 +140,11 @@
 };
 
 void WatchingStatusUpdateHandler::HandleStatusUpdate(
-    int64_t last_checked_time, double progress, UpdateStatus current_operation,
-    const string& new_version, int64_t new_size) {
+    int64_t last_checked_time,
+    double progress,
+    UpdateStatus current_operation,
+    const string& new_version,
+    int64_t new_size) {
   LOG(INFO) << "Got status update:";
   LOG(INFO) << "  last_checked_time: " << last_checked_time;
   LOG(INFO) << "  progress: " << progress;
@@ -159,8 +163,11 @@
 
   int retry_count = kShowStatusRetryCount;
   while (retry_count > 0) {
-    if (client_->GetStatus(&last_checked_time, &progress, &current_op,
-                           &new_version, &new_size)) {
+    if (client_->GetStatus(&last_checked_time,
+                           &progress,
+                           &current_op,
+                           &new_version,
+                           &new_size)) {
       break;
     }
     if (--retry_count == 0) {
@@ -174,8 +181,11 @@
   printf("LAST_CHECKED_TIME=%" PRIi64
          "\nPROGRESS=%f\nCURRENT_OP=%s\n"
          "NEW_VERSION=%s\nNEW_SIZE=%" PRIi64 "\n",
-         last_checked_time, progress, UpdateStatusToString(current_op),
-         new_version.c_str(), new_size);
+         last_checked_time,
+         progress,
+         UpdateStatusToString(current_op),
+         new_version.c_str(),
+         new_size);
 
   return true;
 }
@@ -187,8 +197,11 @@
   string new_version;
   int64_t new_size = 0;
 
-  if (!client_->GetStatus(&last_checked_time, &progress, &current_op,
-                          &new_version, &new_size)) {
+  if (!client_->GetStatus(&last_checked_time,
+                          &progress,
+                          &current_op,
+                          &new_version,
+                          &new_size)) {
     return 1;
   }
 
@@ -243,61 +256,77 @@
 
 int UpdateEngineClient::ProcessFlags() {
   DEFINE_string(app_version, "", "Force the current app version.");
-  DEFINE_string(channel, "",
+  DEFINE_string(channel,
+                "",
                 "Set the target channel. The device will be powerwashed if the "
                 "target channel is more stable than the current channel unless "
                 "--nopowerwash is specified.");
   DEFINE_bool(check_for_update, false, "Initiate check for updates.");
   DEFINE_string(
       cohort_hint, "", "Set the current cohort hint to the passed value.");
-  DEFINE_bool(follow, false,
+  DEFINE_bool(follow,
+              false,
               "Wait for any update operations to complete."
               "Exit status is 0 if the update succeeded, and 1 otherwise.");
   DEFINE_bool(interactive, true, "Mark the update request as interactive.");
   DEFINE_string(omaha_url, "", "The URL of the Omaha update server.");
-  DEFINE_string(p2p_update, "",
+  DEFINE_string(p2p_update,
+                "",
                 "Enables (\"yes\") or disables (\"no\") the peer-to-peer update"
                 " sharing.");
-  DEFINE_bool(powerwash, true,
+  DEFINE_bool(powerwash,
+              true,
               "When performing rollback or channel change, "
               "do a powerwash or allow it respectively.");
   DEFINE_bool(reboot, false, "Initiate a reboot if needed.");
-  DEFINE_bool(is_reboot_needed, false,
+  DEFINE_bool(is_reboot_needed,
+              false,
               "Exit status 0 if reboot is needed, "
               "2 if reboot is not needed or 1 if an error occurred.");
-  DEFINE_bool(block_until_reboot_is_needed, false,
+  DEFINE_bool(block_until_reboot_is_needed,
+              false,
               "Blocks until reboot is "
               "needed. Returns non-zero exit status if an error occurred.");
   DEFINE_bool(reset_status, false, "Sets the status in update_engine to idle.");
-  DEFINE_bool(rollback, false,
+  DEFINE_bool(rollback,
+              false,
               "Perform a rollback to the previous partition. The device will "
               "be powerwashed unless --nopowerwash is specified.");
-  DEFINE_bool(can_rollback, false,
+  DEFINE_bool(can_rollback,
+              false,
               "Shows whether rollback partition "
               "is available.");
   DEFINE_bool(show_channel, false, "Show the current and target channels.");
   DEFINE_bool(show_cohort_hint, false, "Show the current cohort hint.");
-  DEFINE_bool(show_p2p_update, false,
+  DEFINE_bool(show_p2p_update,
+              false,
               "Show the current setting for peer-to-peer update sharing.");
-  DEFINE_bool(show_update_over_cellular, false,
+  DEFINE_bool(show_update_over_cellular,
+              false,
               "Show the current setting for updates over cellular networks.");
   DEFINE_bool(status, false, "Print the status to stdout.");
-  DEFINE_bool(update, false,
+  DEFINE_bool(update,
+              false,
               "Forces an update and waits for it to complete. "
               "Implies --follow.");
-  DEFINE_string(update_over_cellular, "",
+  DEFINE_string(update_over_cellular,
+                "",
                 "Enables (\"yes\") or disables (\"no\") the updates over "
                 "cellular networks.");
-  DEFINE_bool(watch_for_updates, false,
+  DEFINE_bool(watch_for_updates,
+              false,
               "Listen for status updates and print them to the screen.");
-  DEFINE_bool(prev_version, false,
+  DEFINE_bool(prev_version,
+              false,
               "Show the previous OS version used before the update reboot.");
   DEFINE_bool(last_attempt_error, false, "Show the last attempt error.");
   DEFINE_bool(eol_status, false, "Show the current end-of-life status.");
+  DEFINE_bool(install, false, "Requests an install.");
+  DEFINE_string(dlc_module_ids, "", "colon-separated list of DLC IDs.");
 
   // Boilerplate init commands.
   base::CommandLine::Init(argc_, argv_);
-  brillo::FlagHelper::Init(argc_, argv_, "Chromium OS Update Engine Client");
+  brillo::FlagHelper::Init(argc_, argv_, "A/B Update Engine Client");
 
   // Ensure there are no positional arguments.
   const vector<string> positional_args =
@@ -396,7 +425,7 @@
     string rollback_partition;
 
     if (!client_->GetRollbackPartition(&rollback_partition)) {
-      LOG(ERROR) << "Error while querying rollback partition availabilty.";
+      LOG(ERROR) << "Error while querying rollback partition availability.";
       return 1;
     }
 
@@ -458,10 +487,11 @@
       LOG(INFO) << "Target Channel (pending update): " << target_channel;
   }
 
-  bool do_update_request = FLAGS_check_for_update | FLAGS_update |
-                           !FLAGS_app_version.empty() |
+  bool do_update_request = FLAGS_check_for_update || FLAGS_update ||
+                           !FLAGS_app_version.empty() ||
                            !FLAGS_omaha_url.empty();
-  if (FLAGS_update) FLAGS_follow = true;
+  if (FLAGS_update)
+    FLAGS_follow = true;
 
   if (do_update_request && FLAGS_rollback) {
     LOG(ERROR) << "Incompatible flags specified with rollback."
@@ -477,6 +507,30 @@
     }
   }
 
+  if (FLAGS_install) {
+    // Parse DLC module IDs.
+    vector<string> dlc_module_ids;
+    if (!FLAGS_dlc_module_ids.empty()) {
+      dlc_module_ids = base::SplitString(FLAGS_dlc_module_ids,
+                                         ":",
+                                         base::TRIM_WHITESPACE,
+                                         base::SPLIT_WANT_ALL);
+    }
+    if (dlc_module_ids.empty()) {
+      LOG(ERROR) << "dlc_module_ids is empty:" << FLAGS_dlc_module_ids;
+      return 1;
+    }
+    if (!client_->AttemptInstall(FLAGS_omaha_url, dlc_module_ids)) {
+      LOG(ERROR) << "AttemptInstall failed.";
+      return 1;
+    }
+    return 0;
+  } else if (!FLAGS_dlc_module_ids.empty()) {
+    LOG(ERROR) << "dlc_module_ids is not empty while install is not set:"
+               << FLAGS_dlc_module_ids;
+    return 1;
+  }
+
   // Initiate an update check, if necessary.
   if (do_update_request) {
     LOG_IF(WARNING, FLAGS_reboot) << "-reboot flag ignored.";
@@ -486,8 +540,8 @@
       LOG(INFO) << "Forcing an update by setting app_version to ForcedUpdate.";
     }
     LOG(INFO) << "Initiating update check and install.";
-    if (!client_->AttemptUpdate(app_version, FLAGS_omaha_url,
-                                FLAGS_interactive)) {
+    if (!client_->AttemptUpdate(
+            app_version, FLAGS_omaha_url, FLAGS_interactive)) {
       LOG(ERROR) << "Error checking for update.";
       return 1;
     }
diff --git a/update_engine_client_android.cc b/update_engine_client_android.cc
index 267f6e9..6863799 100644
--- a/update_engine_client_android.cc
+++ b/update_engine_client_android.cc
@@ -49,8 +49,7 @@
 
 class UpdateEngineClientAndroid : public brillo::Daemon {
  public:
-  UpdateEngineClientAndroid(int argc, char** argv) : argc_(argc), argv_(argv) {
-  }
+  UpdateEngineClientAndroid(int argc, char** argv) : argc_(argc), argv_(argv) {}
 
   int ExitWhenIdle(const Status& status);
   int ExitWhenIdle(int return_code);
@@ -83,8 +82,8 @@
   brillo::BinderWatcher binder_watcher_;
 };
 
-Status UpdateEngineClientAndroid::UECallback::onStatusUpdate(
-    int status_code, float progress) {
+Status UpdateEngineClientAndroid::UECallback::onStatusUpdate(int status_code,
+                                                             float progress) {
   update_engine::UpdateStatus status =
       static_cast<update_engine::UpdateStatus>(status_code);
   LOG(INFO) << "onStatusUpdate(" << UpdateStatusToString(status) << " ("
@@ -113,10 +112,12 @@
   DEFINE_string(payload,
                 "http://127.0.0.1:8080/payload",
                 "The URI to the update payload to use.");
-  DEFINE_int64(offset, 0,
+  DEFINE_int64(offset,
+               0,
                "The offset in the payload where the CrAU update starts. "
                "Used when --update is passed.");
-  DEFINE_int64(size, 0,
+  DEFINE_int64(size,
+               0,
                "The size of the CrAU part of the payload. If 0 is passed, it "
                "will be autodetected. Used when --update is passed.");
   DEFINE_string(headers,
@@ -124,6 +125,14 @@
                 "A list of key-value pairs, one element of the list per line. "
                 "Used when --update is passed.");
 
+  DEFINE_bool(verify,
+              false,
+              "Given payload metadata, verify if the payload is applicable.");
+  DEFINE_string(metadata,
+                "/data/ota_package/metadata",
+                "The path to the update payload metadata. "
+                "Used when --verify is passed.");
+
   DEFINE_bool(suspend, false, "Suspend an ongoing update and exit.");
   DEFINE_bool(resume, false, "Resume a suspended update.");
   DEFINE_bool(cancel, false, "Cancel the ongoing update and exit.");
@@ -182,6 +191,15 @@
     return ExitWhenIdle(service_->resetStatus());
   }
 
+  if (FLAGS_verify) {
+    bool applicable = false;
+    Status status = service_->verifyPayloadApplicable(
+        android::String16{FLAGS_metadata.data(), FLAGS_metadata.size()},
+        &applicable);
+    LOG(INFO) << "Payload is " << (applicable ? "" : "not ") << "applicable.";
+    return ExitWhenIdle(status);
+  }
+
   if (FLAGS_follow) {
     // Register a callback object with the service.
     callback_ = new UECallback(this);
@@ -247,7 +265,7 @@
 }  // namespace chromeos_update_engine
 
 int main(int argc, char** argv) {
-  chromeos_update_engine::internal::UpdateEngineClientAndroid client(
-      argc, argv);
+  chromeos_update_engine::internal::UpdateEngineClientAndroid client(argc,
+                                                                     argv);
   return client.Run();
 }
diff --git a/update_manager/android_things_policy.cc b/update_manager/android_things_policy.cc
index 5fbda46..4afcf12 100644
--- a/update_manager/android_things_policy.cc
+++ b/update_manager/android_things_policy.cc
@@ -53,7 +53,9 @@
   result->updates_enabled = true;
   result->target_channel.clear();
   result->target_version_prefix.clear();
-  result->is_interactive = false;
+  result->rollback_allowed = false;
+  result->rollback_allowed_milestones = -1;
+  result->interactive = false;
 
   // Build a list of policies to consult.  Note that each policy may modify the
   // result structure, even if it signals kContinue.
@@ -68,12 +70,12 @@
       // A/B updates
       &enough_slots_ab_updates_policy,
 
-      // Unofficial builds should not perform periodic update checks.
-      &only_update_official_builds_policy,
-
       // Check to see if an interactive update was requested.
       &interactive_update_policy,
 
+      // Unofficial builds should not perform periodic update checks.
+      &only_update_official_builds_policy,
+
       // Ensure that periodic update checks are timed properly.
       &next_update_check_time_policy,
   };
diff --git a/update_manager/android_things_policy_unittest.cc b/update_manager/android_things_policy_unittest.cc
index 8a50bc2..6961efc 100644
--- a/update_manager/android_things_policy_unittest.cc
+++ b/update_manager/android_things_policy_unittest.cc
@@ -97,7 +97,7 @@
   ExpectPolicyStatus(
       EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmAndroidThingsPolicyTest,
@@ -140,7 +140,7 @@
   ExpectPolicyStatus(
       EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_TRUE(result.is_interactive);
+  EXPECT_TRUE(result.interactive);
 }
 
 TEST_F(UmAndroidThingsPolicyTest,
@@ -156,7 +156,7 @@
   ExpectPolicyStatus(
       EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmAndroidThingsPolicyTest, UpdateCanBeAppliedOk) {
diff --git a/update_manager/boxed_value.cc b/update_manager/boxed_value.cc
index a437c02..cee1ece 100644
--- a/update_manager/boxed_value.cc
+++ b/update_manager/boxed_value.cc
@@ -26,8 +26,10 @@
 
 #include "update_engine/common/utils.h"
 #include "update_engine/connection_utils.h"
+#include "update_engine/update_manager/rollback_prefs.h"
 #include "update_engine/update_manager/shill_provider.h"
 #include "update_engine/update_manager/updater_provider.h"
+#include "update_engine/update_manager/weekly_time.h"
 
 using chromeos_update_engine::ConnectionTethering;
 using chromeos_update_engine::ConnectionType;
@@ -40,68 +42,87 @@
 // Template instantiation for common types; used in BoxedValue::ToString().
 // Keep in sync with boxed_value_unitttest.cc.
 
-template<>
+template <>
 string BoxedValue::ValuePrinter<string>(const void* value) {
   const string* val = reinterpret_cast<const string*>(value);
   return *val;
 }
 
-template<>
+template <>
 string BoxedValue::ValuePrinter<int>(const void* value) {
   const int* val = reinterpret_cast<const int*>(value);
+#if BASE_VER < 576279
   return base::IntToString(*val);
+#else
+  return base::NumberToString(*val);
+#endif
 }
 
-template<>
+template <>
 string BoxedValue::ValuePrinter<unsigned int>(const void* value) {
   const unsigned int* val = reinterpret_cast<const unsigned int*>(value);
+#if BASE_VER < 576279
   return base::UintToString(*val);
+#else
+  return base::NumberToString(*val);
+#endif
 }
 
-template<>
+template <>
 string BoxedValue::ValuePrinter<int64_t>(const void* value) {
   const int64_t* val = reinterpret_cast<const int64_t*>(value);
+#if BASE_VER < 576279
   return base::Int64ToString(*val);
+#else
+  return base::NumberToString(*val);
+#endif
 }
 
-template<>
+template <>
 string BoxedValue::ValuePrinter<uint64_t>(const void* value) {
-  const uint64_t* val =
-    reinterpret_cast<const uint64_t*>(value);
-  return base::Uint64ToString(static_cast<uint64_t>(*val));
+  const uint64_t* val = reinterpret_cast<const uint64_t*>(value);
+#if BASE_VER < 576279
+  return base::Uint64ToString(*val);
+#else
+  return base::NumberToString(*val);
+#endif
 }
 
-template<>
+template <>
 string BoxedValue::ValuePrinter<bool>(const void* value) {
   const bool* val = reinterpret_cast<const bool*>(value);
   return *val ? "true" : "false";
 }
 
-template<>
+template <>
 string BoxedValue::ValuePrinter<double>(const void* value) {
   const double* val = reinterpret_cast<const double*>(value);
+#if BASE_VER < 576279
   return base::DoubleToString(*val);
+#else
+  return base::NumberToString(*val);
+#endif
 }
 
-template<>
+template <>
 string BoxedValue::ValuePrinter<base::Time>(const void* value) {
   const base::Time* val = reinterpret_cast<const base::Time*>(value);
   return chromeos_update_engine::utils::ToString(*val);
 }
 
-template<>
+template <>
 string BoxedValue::ValuePrinter<base::TimeDelta>(const void* value) {
   const base::TimeDelta* val = reinterpret_cast<const base::TimeDelta*>(value);
   return chromeos_update_engine::utils::FormatTimeDelta(*val);
 }
 
-template<>
+template <>
 string BoxedValue::ValuePrinter<ConnectionType>(const void* value) {
   const ConnectionType* val = reinterpret_cast<const ConnectionType*>(value);
   return StringForConnectionType(*val);
 }
 
-template<>
+template <>
 string BoxedValue::ValuePrinter<set<ConnectionType>>(const void* value) {
   string ret = "";
   const set<ConnectionType>* val =
@@ -115,7 +136,7 @@
   return ret;
 }
 
-template<>
+template <>
 string BoxedValue::ValuePrinter<ConnectionTethering>(const void* value) {
   const ConnectionTethering* val =
       reinterpret_cast<const ConnectionTethering*>(value);
@@ -133,7 +154,30 @@
   return "Unknown";
 }
 
-template<>
+template <>
+string BoxedValue::ValuePrinter<RollbackToTargetVersion>(const void* value) {
+  const RollbackToTargetVersion* val =
+      reinterpret_cast<const RollbackToTargetVersion*>(value);
+  switch (*val) {
+    case RollbackToTargetVersion::kUnspecified:
+      return "Unspecified";
+    case RollbackToTargetVersion::kDisabled:
+      return "Disabled";
+    case RollbackToTargetVersion::kRollbackAndPowerwash:
+      return "Rollback and powerwash";
+    case RollbackToTargetVersion::kRollbackAndRestoreIfPossible:
+      return "Rollback and restore if possible";
+    case RollbackToTargetVersion::kRollbackOnlyIfRestorePossible:
+      return "Rollback only if restore is possible";
+    case RollbackToTargetVersion::kMaxValue:
+      NOTREACHED();
+      return "Max value";
+  }
+  NOTREACHED();
+  return "Unknown";
+}
+
+template <>
 string BoxedValue::ValuePrinter<Stage>(const void* value) {
   const Stage* val = reinterpret_cast<const Stage*>(value);
   switch (*val) {
@@ -160,7 +204,7 @@
   return "Unknown";
 }
 
-template<>
+template <>
 string BoxedValue::ValuePrinter<UpdateRequestStatus>(const void* value) {
   const UpdateRequestStatus* val =
       reinterpret_cast<const UpdateRequestStatus*>(value);
@@ -191,4 +235,23 @@
   return retval;
 }
 
+template <>
+string BoxedValue::ValuePrinter<WeeklyTimeInterval>(const void* value) {
+  const WeeklyTimeInterval* val =
+      reinterpret_cast<const WeeklyTimeInterval*>(value);
+  return val->ToString();
+}
+
+template <>
+string BoxedValue::ValuePrinter<WeeklyTimeIntervalVector>(const void* value) {
+  const WeeklyTimeIntervalVector* val =
+      reinterpret_cast<const WeeklyTimeIntervalVector*>(value);
+
+  string retval = "Disallowed intervals:\n";
+  for (const auto& interval : *val) {
+    retval += interval.ToString() + "\n";
+  }
+  return retval;
+}
+
 }  // namespace chromeos_update_manager
diff --git a/update_manager/boxed_value.h b/update_manager/boxed_value.h
index 5f41835..62b4b9d 100644
--- a/update_manager/boxed_value.h
+++ b/update_manager/boxed_value.h
@@ -60,18 +60,20 @@
 
   // Creates a BoxedValue for the passed pointer |value|. The BoxedValue keeps
   // the ownership of this pointer and can't be released.
-  template<typename T>
+  template <typename T>
   explicit BoxedValue(const T* value)
-    : value_(static_cast<const void*>(value)), deleter_(ValueDeleter<T>),
-      printer_(ValuePrinter<T>) {}
+      : value_(static_cast<const void*>(value)),
+        deleter_(ValueDeleter<T>),
+        printer_(ValuePrinter<T>) {}
 
   // The move constructor takes ownership of the pointer since the semantics of
   // it allows to render the passed BoxedValue undefined. You need to use the
   // move constructor explicitly preventing it from accidental references,
   // like in:
   //   BoxedValue new_box(std::move(other_box));
-  BoxedValue(BoxedValue&& other)  // NOLINT(build/c++11)
-      : value_(other.value_), deleter_(other.deleter_),
+  BoxedValue(BoxedValue&& other) noexcept
+      : value_(other.value_),
+        deleter_(other.deleter_),
         printer_(other.printer_) {
     other.value_ = nullptr;
     other.deleter_ = nullptr;
@@ -96,14 +98,14 @@
   }
 
   // Static method to call the destructor of the right type.
-  template<typename T>
+  template <typename T>
   static void ValueDeleter(const void* value) {
     delete reinterpret_cast<const T*>(value);
   }
 
   // Static method to print a type. See boxed_value.cc for common
   // instantiations.
-  template<typename T>
+  template <typename T>
   static std::string ValuePrinter(const void* value);
 
  private:
diff --git a/update_manager/boxed_value_unittest.cc b/update_manager/boxed_value_unittest.cc
index 4aeaec8..2fa94ec 100644
--- a/update_manager/boxed_value_unittest.cc
+++ b/update_manager/boxed_value_unittest.cc
@@ -26,9 +26,11 @@
 #include <base/strings/stringprintf.h>
 #include <base/time/time.h>
 
+#include "update_engine/update_manager/rollback_prefs.h"
 #include "update_engine/update_manager/shill_provider.h"
 #include "update_engine/update_manager/umtest_utils.h"
 #include "update_engine/update_manager/updater_provider.h"
+#include "update_engine/update_manager/weekly_time.h"
 
 using base::Time;
 using base::TimeDelta;
@@ -49,14 +51,14 @@
   ~DeleterMarker() { *marker_ = true; }
 
  private:
-  friend string BoxedValue::ValuePrinter<DeleterMarker>(const void *);
+  friend string BoxedValue::ValuePrinter<DeleterMarker>(const void*);
 
   // Pointer to the bool marker.
   bool* marker_;
 };
 
-template<>
-string BoxedValue::ValuePrinter<DeleterMarker>(const void *value) {
+template <>
+string BoxedValue::ValuePrinter<DeleterMarker>(const void* value) {
   const DeleterMarker* val = reinterpret_cast<const DeleterMarker*>(value);
   return base::StringPrintf("DeleterMarker:%s",
                             *val->marker_ ? "true" : "false");
@@ -110,8 +112,7 @@
 }
 
 TEST(UmBoxedValueTest, StringToString) {
-  EXPECT_EQ("Hej Verden!",
-            BoxedValue(new string("Hej Verden!")).ToString());
+  EXPECT_EQ("Hej Verden!", BoxedValue(new string("Hej Verden!")).ToString());
 }
 
 TEST(UmBoxedValueTest, IntToString) {
@@ -120,8 +121,8 @@
 
 TEST(UmBoxedValueTest, Int64ToString) {
   // -123456789012345 doesn't fit in 32-bit integers.
-  EXPECT_EQ("-123456789012345", BoxedValue(
-      new int64_t(-123456789012345LL)).ToString());
+  EXPECT_EQ("-123456789012345",
+            BoxedValue(new int64_t(-123456789012345LL)).ToString());
 }
 
 TEST(UmBoxedValueTest, UnsignedIntToString) {
@@ -132,8 +133,8 @@
 
 TEST(UmBoxedValueTest, UnsignedInt64ToString) {
   // 18446744073709551615 is the biggest possible 64-bit unsigned integer.
-  EXPECT_EQ("18446744073709551615", BoxedValue(
-      new uint64_t(18446744073709551615ULL)).ToString());
+  EXPECT_EQ("18446744073709551615",
+            BoxedValue(new uint64_t(18446744073709551615ULL)).ToString());
 }
 
 TEST(UmBoxedValueTest, BoolToString) {
@@ -153,43 +154,72 @@
 
 TEST(UmBoxedValueTest, TimeDeltaToString) {
   // 12345 seconds is 3 hours, 25 minutes and 45 seconds.
-  EXPECT_EQ("3h25m45s",
-            BoxedValue(new TimeDelta(TimeDelta::FromSeconds(12345)))
-            .ToString());
+  EXPECT_EQ(
+      "3h25m45s",
+      BoxedValue(new TimeDelta(TimeDelta::FromSeconds(12345))).ToString());
 }
 
 TEST(UmBoxedValueTest, ConnectionTypeToString) {
-  EXPECT_EQ("ethernet",
-            BoxedValue(new ConnectionType(ConnectionType::kEthernet))
-            .ToString());
+  EXPECT_EQ(
+      "Disconnected",
+      BoxedValue(new ConnectionType(ConnectionType::kDisconnected)).ToString());
+  EXPECT_EQ(
+      "ethernet",
+      BoxedValue(new ConnectionType(ConnectionType::kEthernet)).ToString());
   EXPECT_EQ("wifi",
             BoxedValue(new ConnectionType(ConnectionType::kWifi)).ToString());
   EXPECT_EQ("wimax",
             BoxedValue(new ConnectionType(ConnectionType::kWimax)).ToString());
-  EXPECT_EQ("bluetooth",
-            BoxedValue(new ConnectionType(ConnectionType::kBluetooth))
-            .ToString());
-  EXPECT_EQ("cellular",
-            BoxedValue(new ConnectionType(ConnectionType::kCellular))
-            .ToString());
-  EXPECT_EQ("Unknown",
-            BoxedValue(new ConnectionType(ConnectionType::kUnknown))
-            .ToString());
+  EXPECT_EQ(
+      "bluetooth",
+      BoxedValue(new ConnectionType(ConnectionType::kBluetooth)).ToString());
+  EXPECT_EQ(
+      "cellular",
+      BoxedValue(new ConnectionType(ConnectionType::kCellular)).ToString());
+  EXPECT_EQ(
+      "Unknown",
+      BoxedValue(new ConnectionType(ConnectionType::kUnknown)).ToString());
 }
 
 TEST(UmBoxedValueTest, ConnectionTetheringToString) {
-  EXPECT_EQ("Not Detected",
-            BoxedValue(new ConnectionTethering(
-                ConnectionTethering::kNotDetected)).ToString());
+  EXPECT_EQ(
+      "Not Detected",
+      BoxedValue(new ConnectionTethering(ConnectionTethering::kNotDetected))
+          .ToString());
   EXPECT_EQ("Suspected",
             BoxedValue(new ConnectionTethering(ConnectionTethering::kSuspected))
-            .ToString());
+                .ToString());
   EXPECT_EQ("Confirmed",
             BoxedValue(new ConnectionTethering(ConnectionTethering::kConfirmed))
-            .ToString());
+                .ToString());
   EXPECT_EQ("Unknown",
             BoxedValue(new ConnectionTethering(ConnectionTethering::kUnknown))
-            .ToString());
+                .ToString());
+}
+
+TEST(UmBoxedValueTest, RollbackToTargetVersionToString) {
+  EXPECT_EQ("Unspecified",
+            BoxedValue(new RollbackToTargetVersion(
+                           RollbackToTargetVersion::kUnspecified))
+                .ToString());
+  EXPECT_EQ("Disabled",
+            BoxedValue(
+                new RollbackToTargetVersion(RollbackToTargetVersion::kDisabled))
+                .ToString());
+  EXPECT_EQ("Rollback and powerwash",
+            BoxedValue(new RollbackToTargetVersion(
+                           RollbackToTargetVersion::kRollbackAndPowerwash))
+                .ToString());
+  EXPECT_EQ(
+      "Rollback and restore if possible",
+      BoxedValue(new RollbackToTargetVersion(
+                     RollbackToTargetVersion::kRollbackAndRestoreIfPossible))
+          .ToString());
+  EXPECT_EQ(
+      "Rollback only if restore is possible",
+      BoxedValue(new RollbackToTargetVersion(
+                     RollbackToTargetVersion::kRollbackOnlyIfRestorePossible))
+          .ToString());
 }
 
 TEST(UmBoxedValueTest, SetConnectionTypeToString) {
@@ -204,18 +234,15 @@
 }
 
 TEST(UmBoxedValueTest, StageToString) {
-  EXPECT_EQ("Idle",
-            BoxedValue(new Stage(Stage::kIdle)).ToString());
+  EXPECT_EQ("Idle", BoxedValue(new Stage(Stage::kIdle)).ToString());
   EXPECT_EQ("Checking For Update",
             BoxedValue(new Stage(Stage::kCheckingForUpdate)).ToString());
   EXPECT_EQ("Update Available",
             BoxedValue(new Stage(Stage::kUpdateAvailable)).ToString());
   EXPECT_EQ("Downloading",
             BoxedValue(new Stage(Stage::kDownloading)).ToString());
-  EXPECT_EQ("Verifying",
-            BoxedValue(new Stage(Stage::kVerifying)).ToString());
-  EXPECT_EQ("Finalizing",
-            BoxedValue(new Stage(Stage::kFinalizing)).ToString());
+  EXPECT_EQ("Verifying", BoxedValue(new Stage(Stage::kVerifying)).ToString());
+  EXPECT_EQ("Finalizing", BoxedValue(new Stage(Stage::kFinalizing)).ToString());
   EXPECT_EQ("Updated, Need Reboot",
             BoxedValue(new Stage(Stage::kUpdatedNeedReboot)).ToString());
   EXPECT_EQ("Reporting Error Event",
@@ -242,4 +269,34 @@
                 .ToString());
 }
 
+TEST(UmBoxedValueTest, WeeklyTimeIntervalToString) {
+  EXPECT_EQ("Start: day_of_week=2 time=100\nEnd: day_of_week=4 time=200",
+            BoxedValue(new WeeklyTimeInterval(
+                           WeeklyTime(2, TimeDelta::FromMinutes(100)),
+                           WeeklyTime(4, TimeDelta::FromMinutes(200))))
+                .ToString());
+  EXPECT_EQ("Start: day_of_week=1 time=10\nEnd: day_of_week=1 time=20",
+            BoxedValue(new WeeklyTimeInterval(
+                           WeeklyTime(1, TimeDelta::FromMinutes(10)),
+                           WeeklyTime(1, TimeDelta::FromMinutes(20))))
+                .ToString());
+}
+
+TEST(UmBoxedValueTest, WeeklyTimeIntervalVectorToString) {
+  WeeklyTimeIntervalVector intervals;
+  intervals.emplace_back(WeeklyTime(5, TimeDelta::FromMinutes(10)),
+                         WeeklyTime(1, TimeDelta::FromMinutes(30)));
+  EXPECT_EQ(
+      "Disallowed intervals:\nStart: day_of_week=5 time=10\nEnd: "
+      "day_of_week=1 time=30\n",
+      BoxedValue(new WeeklyTimeIntervalVector(intervals)).ToString());
+  intervals.emplace_back(WeeklyTime(1, TimeDelta::FromMinutes(5)),
+                         WeeklyTime(6, TimeDelta::FromMinutes(1000)));
+  EXPECT_EQ(
+      "Disallowed intervals:\nStart: day_of_week=5 time=10\nEnd: "
+      "day_of_week=1 time=30\nStart: day_of_week=1 time=5\nEnd: day_of_week=6 "
+      "time=1000\n",
+      BoxedValue(new WeeklyTimeIntervalVector(intervals)).ToString());
+}
+
 }  // namespace chromeos_update_manager
diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc
index 916acd3..bdb88f8 100644
--- a/update_manager/chromeos_policy.cc
+++ b/update_manager/chromeos_policy.cc
@@ -36,6 +36,7 @@
 #include "update_engine/update_manager/out_of_box_experience_policy_impl.h"
 #include "update_engine/update_manager/policy_utils.h"
 #include "update_engine/update_manager/shill_provider.h"
+#include "update_engine/update_manager/update_time_restrictions_policy_impl.h"
 
 using base::Time;
 using base::TimeDelta;
@@ -86,6 +87,7 @@
     case ErrorCode::kUnsupportedMajorPayloadVersion:
     case ErrorCode::kUnsupportedMinorPayloadVersion:
     case ErrorCode::kPayloadTimestampError:
+    case ErrorCode::kVerityCalculationError:
       LOG(INFO) << "Advancing download URL due to error "
                 << chromeos_update_engine::utils::ErrorCodeToString(err_code)
                 << " (" << static_cast<int>(err_code) << ")";
@@ -142,28 +144,32 @@
     case ErrorCode::kOmahaRequestXMLHasEntityDecl:
     case ErrorCode::kFilesystemVerifierError:
     case ErrorCode::kUserCanceled:
+    case ErrorCode::kOmahaUpdateIgnoredOverCellular:
     case ErrorCode::kUpdatedButNotActive:
+    case ErrorCode::kNoUpdate:
+    case ErrorCode::kRollbackNotPossible:
+    case ErrorCode::kFirstActiveOmahaPingSentPersistenceError:
       LOG(INFO) << "Not changing URL index or failure count due to error "
                 << chromeos_update_engine::utils::ErrorCodeToString(err_code)
                 << " (" << static_cast<int>(err_code) << ")";
       return false;
 
-    case ErrorCode::kSuccess:                            // success code
-    case ErrorCode::kUmaReportedMax:                     // not an error code
-    case ErrorCode::kOmahaRequestHTTPResponseBase:       // aggregated already
-    case ErrorCode::kDevModeFlag:                        // not an error code
-    case ErrorCode::kResumedFlag:                        // not an error code
-    case ErrorCode::kTestImageFlag:                      // not an error code
-    case ErrorCode::kTestOmahaUrlFlag:                   // not an error code
-    case ErrorCode::kSpecialFlags:                       // not an error code
+    case ErrorCode::kSuccess:                       // success code
+    case ErrorCode::kUmaReportedMax:                // not an error code
+    case ErrorCode::kOmahaRequestHTTPResponseBase:  // aggregated already
+    case ErrorCode::kDevModeFlag:                   // not an error code
+    case ErrorCode::kResumedFlag:                   // not an error code
+    case ErrorCode::kTestImageFlag:                 // not an error code
+    case ErrorCode::kTestOmahaUrlFlag:              // not an error code
+    case ErrorCode::kSpecialFlags:                  // not an error code
       // These shouldn't happen. Enumerating these  explicitly here so that we
       // can let the compiler warn about new error codes that are added to
       // action_processor.h but not added here.
       LOG(WARNING) << "Unexpected error "
                    << chromeos_update_engine::utils::ErrorCodeToString(err_code)
                    << " (" << static_cast<int>(err_code) << ")";
-    // Note: Not adding a default here so as to let the compiler warn us of
-    // any new enums that were added in the .h but not listed in this switch.
+      // Note: Not adding a default here so as to let the compiler warn us of
+      // any new enums that were added in the .h but not listed in this switch.
   }
   return false;
 }
@@ -171,8 +177,8 @@
 // Checks whether |url| can be used under given download restrictions.
 bool IsUrlUsable(const string& url, bool http_allowed) {
   return http_allowed ||
-         !base::StartsWith(url, "http://",
-                           base::CompareCase::INSENSITIVE_ASCII);
+         !base::StartsWith(
+             url, "http://", base::CompareCase::INSENSITIVE_ASCII);
 }
 
 }  // namespace
@@ -192,14 +198,17 @@
 const int ChromeOSPolicy::kMaxP2PAttempts = 10;
 const int ChromeOSPolicy::kMaxP2PAttemptsPeriodInSeconds = 5 * 24 * 60 * 60;
 
-EvalStatus ChromeOSPolicy::UpdateCheckAllowed(
-    EvaluationContext* ec, State* state, string* error,
-    UpdateCheckParams* result) const {
+EvalStatus ChromeOSPolicy::UpdateCheckAllowed(EvaluationContext* ec,
+                                              State* state,
+                                              string* error,
+                                              UpdateCheckParams* result) const {
   // Set the default return values.
   result->updates_enabled = true;
   result->target_channel.clear();
   result->target_version_prefix.clear();
-  result->is_interactive = false;
+  result->rollback_allowed = false;
+  result->rollback_allowed_milestones = -1;
+  result->interactive = false;
 
   EnoughSlotsAbUpdatesPolicyImpl enough_slots_ab_updates_policy;
   EnterpriseDevicePolicyImpl enterprise_device_policy;
@@ -255,8 +264,33 @@
                                               std::string* error,
                                               ErrorCode* result,
                                               InstallPlan* install_plan) const {
-  *result = ErrorCode::kSuccess;
-  return EvalStatus::kSucceeded;
+  UpdateTimeRestrictionsPolicyImpl update_time_restrictions_policy;
+  InteractiveUpdatePolicyImpl interactive_update_policy;
+
+  vector<Policy const*> policies_to_consult = {
+      // Check to see if an interactive update has been requested.
+      &interactive_update_policy,
+
+      // Do not apply or download an update if we are inside one of the
+      // restricted times.
+      &update_time_restrictions_policy,
+  };
+
+  EvalStatus status = ConsultPolicies(policies_to_consult,
+                                      &Policy::UpdateCanBeApplied,
+                                      ec,
+                                      state,
+                                      error,
+                                      result,
+                                      install_plan);
+  if (EvalStatus::kContinue != status) {
+    return status;
+  } else {
+    // The update can proceed.
+    LOG(INFO) << "Allowing update to be applied.";
+    *result = ErrorCode::kSuccess;
+    return EvalStatus::kSucceeded;
+  }
 }
 
 EvalStatus ChromeOSPolicy::UpdateCanStart(
@@ -310,8 +344,8 @@
   bool is_scattering_active = false;
   EvalStatus scattering_status = EvalStatus::kSucceeded;
 
-  const bool* device_policy_is_loaded_p = ec->GetValue(
-      dp_provider->var_device_policy_is_loaded());
+  const bool* device_policy_is_loaded_p =
+      ec->GetValue(dp_provider->var_device_policy_is_loaded());
   if (device_policy_is_loaded_p && *device_policy_is_loaded_p) {
     // Check whether scattering applies to this update attempt. We should not be
     // scattering if this is an interactive update check, or if OOBE is enabled
@@ -324,14 +358,14 @@
     bool is_scattering_applicable = false;
     result->scatter_wait_period = kZeroInterval;
     result->scatter_check_threshold = 0;
-    if (!update_state.is_interactive) {
-      const bool* is_oobe_enabled_p = ec->GetValue(
-          state->config_provider()->var_is_oobe_enabled());
+    if (!update_state.interactive) {
+      const bool* is_oobe_enabled_p =
+          ec->GetValue(state->config_provider()->var_is_oobe_enabled());
       if (is_oobe_enabled_p && !(*is_oobe_enabled_p)) {
         is_scattering_applicable = true;
       } else {
-        const bool* is_oobe_complete_p = ec->GetValue(
-            state->system_provider()->var_is_oobe_complete());
+        const bool* is_oobe_complete_p =
+            ec->GetValue(state->system_provider()->var_is_oobe_complete());
         is_scattering_applicable = (is_oobe_complete_p && *is_oobe_complete_p);
       }
     }
@@ -339,8 +373,8 @@
     // Compute scattering values.
     if (is_scattering_applicable) {
       UpdateScatteringResult scatter_result;
-      scattering_status = UpdateScattering(ec, state, error, &scatter_result,
-                                           update_state);
+      scattering_status =
+          UpdateScattering(ec, state, error, &scatter_result, update_state);
       if (scattering_status == EvalStatus::kFailed) {
         return EvalStatus::kFailed;
       } else {
@@ -372,7 +406,7 @@
     // interactive, and other limits haven't been reached.
     if (update_state.p2p_downloading_disabled) {
       LOG(INFO) << "Blocked P2P downloading because it is disabled by Omaha.";
-    } else if (update_state.is_interactive) {
+    } else if (update_state.interactive) {
       LOG(INFO) << "Blocked P2P downloading because update is interactive.";
     } else if (update_state.p2p_num_attempts >= kMaxP2PAttempts) {
       LOG(INFO) << "Blocked P2P downloading as it was attempted too many "
@@ -428,22 +462,21 @@
 // updates over a cellular network (disabled by default). We may want to
 // revisit this semantics, allowing greater flexibility in defining specific
 // permissions over all types of networks.
-EvalStatus ChromeOSPolicy::UpdateDownloadAllowed(
-    EvaluationContext* ec,
-    State* state,
-    string* error,
-    bool* result) const {
+EvalStatus ChromeOSPolicy::UpdateDownloadAllowed(EvaluationContext* ec,
+                                                 State* state,
+                                                 string* error,
+                                                 bool* result) const {
   // Get the current connection type.
   ShillProvider* const shill_provider = state->shill_provider();
-  const ConnectionType* conn_type_p = ec->GetValue(
-      shill_provider->var_conn_type());
+  const ConnectionType* conn_type_p =
+      ec->GetValue(shill_provider->var_conn_type());
   POLICY_CHECK_VALUE_AND_FAIL(conn_type_p, error);
   ConnectionType conn_type = *conn_type_p;
 
   // If we're tethering, treat it as a cellular connection.
   if (conn_type != ConnectionType::kCellular) {
-    const ConnectionTethering* conn_tethering_p = ec->GetValue(
-        shill_provider->var_conn_tethering());
+    const ConnectionTethering* conn_tethering_p =
+        ec->GetValue(shill_provider->var_conn_tethering());
     POLICY_CHECK_VALUE_AND_FAIL(conn_tethering_p, error);
     if (*conn_tethering_p == ConnectionTethering::kConfirmed)
       conn_type = ConnectionType::kCellular;
@@ -480,11 +513,11 @@
   // Check whether the device policy specifically allows this connection.
   if (device_policy_can_override) {
     DevicePolicyProvider* const dp_provider = state->device_policy_provider();
-    const bool* device_policy_is_loaded_p = ec->GetValue(
-        dp_provider->var_device_policy_is_loaded());
+    const bool* device_policy_is_loaded_p =
+        ec->GetValue(dp_provider->var_device_policy_is_loaded());
     if (device_policy_is_loaded_p && *device_policy_is_loaded_p) {
-      const set<ConnectionType>* allowed_conn_types_p = ec->GetValue(
-          dp_provider->var_allowed_connection_types_for_update());
+      const set<ConnectionType>* allowed_conn_types_p =
+          ec->GetValue(dp_provider->var_allowed_connection_types_for_update());
       if (allowed_conn_types_p) {
         if (allowed_conn_types_p->count(conn_type)) {
           *result = true;
@@ -493,8 +526,8 @@
       } else if (conn_type == ConnectionType::kCellular) {
         // Local user settings can allow updates over cellular iff a policy was
         // loaded but no allowed connections were specified in it.
-        const bool* update_over_cellular_allowed_p = ec->GetValue(
-            state->updater_provider()->var_cellular_enabled());
+        const bool* update_over_cellular_allowed_p =
+            ec->GetValue(state->updater_provider()->var_cellular_enabled());
         if (update_over_cellular_allowed_p && *update_over_cellular_allowed_p)
           *result = true;
       }
@@ -514,11 +547,11 @@
   // explicitly allowed, we allow it if the device is enterprise enrolled (that
   // is, missing or empty owner string).
   DevicePolicyProvider* const dp_provider = state->device_policy_provider();
-  const bool* device_policy_is_loaded_p = ec->GetValue(
-      dp_provider->var_device_policy_is_loaded());
+  const bool* device_policy_is_loaded_p =
+      ec->GetValue(dp_provider->var_device_policy_is_loaded());
   if (device_policy_is_loaded_p && *device_policy_is_loaded_p) {
-    const bool* policy_au_p2p_enabled_p = ec->GetValue(
-        dp_provider->var_au_p2p_enabled());
+    const bool* policy_au_p2p_enabled_p =
+        ec->GetValue(dp_provider->var_au_p2p_enabled());
     if (policy_au_p2p_enabled_p) {
       enabled = *policy_au_p2p_enabled_p;
     } else {
@@ -531,8 +564,8 @@
   // Enable P2P, if so mandated by the updater configuration. This is additive
   // to whether or not P2P is enabled by device policy.
   if (!enabled) {
-    const bool* updater_p2p_enabled_p = ec->GetValue(
-        state->updater_provider()->var_p2p_enabled());
+    const bool* updater_p2p_enabled_p =
+        ec->GetValue(state->updater_provider()->var_p2p_enabled());
     enabled = updater_p2p_enabled_p && *updater_p2p_enabled_p;
   }
 
@@ -552,7 +585,9 @@
 }
 
 EvalStatus ChromeOSPolicy::UpdateBackoffAndDownloadUrl(
-    EvaluationContext* ec, State* state, string* error,
+    EvaluationContext* ec,
+    State* state,
+    string* error,
     UpdateBackoffAndDownloadUrlResult* result,
     const UpdateState& update_state) const {
   // Sanity checks.
@@ -564,15 +599,15 @@
   result->url_idx = -1;
   result->url_num_errors = 0;
 
-  const bool* is_official_build_p = ec->GetValue(
-      state->system_provider()->var_is_official_build());
+  const bool* is_official_build_p =
+      ec->GetValue(state->system_provider()->var_is_official_build());
   bool is_official_build = (is_official_build_p ? *is_official_build_p : true);
 
   // Check whether backoff is enabled.
   bool may_backoff = false;
   if (update_state.is_backoff_disabled) {
     LOG(INFO) << "Backoff disabled by Omaha.";
-  } else if (update_state.is_interactive) {
+  } else if (update_state.interactive) {
     LOG(INFO) << "No backoff for interactive updates.";
   } else if (update_state.is_delta_payload) {
     LOG(INFO) << "No backoff for delta payloads.";
@@ -594,11 +629,11 @@
   bool http_allowed = true;
   if (is_official_build) {
     DevicePolicyProvider* const dp_provider = state->device_policy_provider();
-    const bool* device_policy_is_loaded_p = ec->GetValue(
-        dp_provider->var_device_policy_is_loaded());
+    const bool* device_policy_is_loaded_p =
+        ec->GetValue(dp_provider->var_device_policy_is_loaded());
     if (device_policy_is_loaded_p && *device_policy_is_loaded_p) {
-      const bool* policy_http_downloads_enabled_p = ec->GetValue(
-          dp_provider->var_http_downloads_enabled());
+      const bool* policy_http_downloads_enabled_p =
+          ec->GetValue(dp_provider->var_http_downloads_enabled());
       http_allowed = (!policy_http_downloads_enabled_p ||
                       *policy_http_downloads_enabled_p);
     }
@@ -720,8 +755,8 @@
     const uint64_t* seed = ec->GetValue(state->random_provider()->var_seed());
     POLICY_CHECK_VALUE_AND_FAIL(seed, error);
     PRNG prng(*seed);
-    int exp = min(update_state.num_failures,
-                       static_cast<int>(sizeof(int)) * 8 - 2);
+    int exp =
+        min(update_state.num_failures, static_cast<int>(sizeof(int)) * 8 - 2);
     TimeDelta backoff_interval = TimeDelta::FromDays(min(
         1 << exp,
         kNextUpdateCheckPolicyConstants.attempt_backoff_max_interval_in_days));
@@ -761,14 +796,14 @@
   DevicePolicyProvider* const dp_provider = state->device_policy_provider();
 
   // Ensure that a device policy is loaded.
-  const bool* device_policy_is_loaded_p = ec->GetValue(
-      dp_provider->var_device_policy_is_loaded());
+  const bool* device_policy_is_loaded_p =
+      ec->GetValue(dp_provider->var_device_policy_is_loaded());
   if (!(device_policy_is_loaded_p && *device_policy_is_loaded_p))
     return EvalStatus::kSucceeded;
 
   // Is scattering enabled by policy?
-  const TimeDelta* scatter_factor_p = ec->GetValue(
-      dp_provider->var_scatter_factor());
+  const TimeDelta* scatter_factor_p =
+      ec->GetValue(dp_provider->var_scatter_factor());
   if (!scatter_factor_p || *scatter_factor_p == kZeroInterval)
     return EvalStatus::kSucceeded;
 
@@ -800,9 +835,8 @@
   // one.
   int check_threshold = update_state.scatter_check_threshold;
   if (check_threshold == 0) {
-    check_threshold = prng.RandMinMax(
-        update_state.scatter_check_threshold_min,
-        update_state.scatter_check_threshold_max);
+    check_threshold = prng.RandMinMax(update_state.scatter_check_threshold_min,
+                                      update_state.scatter_check_threshold_max);
   }
 
   // If the update check threshold is not within allowed range then nullify it.
diff --git a/update_manager/chromeos_policy.h b/update_manager/chromeos_policy.h
index 67c0d15..ded5164 100644
--- a/update_manager/chromeos_policy.h
+++ b/update_manager/chromeos_policy.h
@@ -54,9 +54,10 @@
   ~ChromeOSPolicy() override {}
 
   // Policy overrides.
-  EvalStatus UpdateCheckAllowed(
-      EvaluationContext* ec, State* state, std::string* error,
-      UpdateCheckParams* result) const override;
+  EvalStatus UpdateCheckAllowed(EvaluationContext* ec,
+                                State* state,
+                                std::string* error,
+                                UpdateCheckParams* result) const override;
 
   EvalStatus UpdateCanBeApplied(
       EvaluationContext* ec,
@@ -65,31 +66,27 @@
       chromeos_update_engine::ErrorCode* result,
       chromeos_update_engine::InstallPlan* install_plan) const override;
 
-  EvalStatus UpdateCanStart(
-      EvaluationContext* ec,
-      State* state,
-      std::string* error,
-      UpdateDownloadParams* result,
-      UpdateState update_state) const override;
+  EvalStatus UpdateCanStart(EvaluationContext* ec,
+                            State* state,
+                            std::string* error,
+                            UpdateDownloadParams* result,
+                            UpdateState update_state) const override;
 
-  EvalStatus UpdateDownloadAllowed(
-      EvaluationContext* ec,
-      State* state,
-      std::string* error,
-      bool* result) const override;
+  EvalStatus UpdateDownloadAllowed(EvaluationContext* ec,
+                                   State* state,
+                                   std::string* error,
+                                   bool* result) const override;
 
-  EvalStatus P2PEnabled(
-      EvaluationContext* ec,
-      State* state,
-      std::string* error,
-      bool* result) const override;
+  EvalStatus P2PEnabled(EvaluationContext* ec,
+                        State* state,
+                        std::string* error,
+                        bool* result) const override;
 
-  EvalStatus P2PEnabledChanged(
-      EvaluationContext* ec,
-      State* state,
-      std::string* error,
-      bool* result,
-      bool prev_result) const override;
+  EvalStatus P2PEnabledChanged(EvaluationContext* ec,
+                               State* state,
+                               std::string* error,
+                               bool* result,
+                               bool prev_result) const override;
 
  protected:
   // Policy override.
@@ -114,6 +111,8 @@
               UpdateCanStartAllowedP2PDownloadingBlockedDueToNumAttempts);
   FRIEND_TEST(UmChromeOSPolicyTest,
               UpdateCanStartAllowedP2PDownloadingBlockedDueToAttemptsPeriod);
+  FRIEND_TEST(UmChromeOSPolicyTest,
+              UpdateCheckAllowedNextUpdateCheckOutsideDisallowedInterval);
 
   // Auxiliary constant (zero by default).
   const base::TimeDelta kZeroInterval;
@@ -149,7 +148,9 @@
   // In any of these cases, returns |EvalStatus::kSucceeded|. If an error
   // occurred, returns |EvalStatus::kFailed|.
   EvalStatus UpdateBackoffAndDownloadUrl(
-      EvaluationContext* ec, State* state, std::string* error,
+      EvaluationContext* ec,
+      State* state,
+      std::string* error,
       UpdateBackoffAndDownloadUrlResult* result,
       const UpdateState& update_state) const;
 
@@ -165,7 +166,8 @@
   // threshold, or zero if no such threshold is known. If not scattering, or if
   // any of the scattering values has changed, returns |EvalStatus::kSucceeded|;
   // otherwise, |EvalStatus::kAskMeAgainLater|.
-  EvalStatus UpdateScattering(EvaluationContext* ec, State* state,
+  EvalStatus UpdateScattering(EvaluationContext* ec,
+                              State* state,
                               std::string* error,
                               UpdateScatteringResult* result,
                               const UpdateState& update_state) const;
diff --git a/update_manager/chromeos_policy_unittest.cc b/update_manager/chromeos_policy_unittest.cc
index df29e8c..5341ebb 100644
--- a/update_manager/chromeos_policy_unittest.cc
+++ b/update_manager/chromeos_policy_unittest.cc
@@ -21,12 +21,14 @@
 
 #include "update_engine/update_manager/next_update_check_policy_impl.h"
 #include "update_engine/update_manager/policy_test_utils.h"
+#include "update_engine/update_manager/weekly_time.h"
 
 using base::Time;
 using base::TimeDelta;
 using chromeos_update_engine::ConnectionTethering;
 using chromeos_update_engine::ConnectionType;
 using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::InstallPlan;
 using std::set;
 using std::string;
 
@@ -47,8 +49,7 @@
     UmPolicyTestBase::SetUpDefaultState();
 
     // OOBE is enabled by default.
-    fake_state_.config_provider()->var_is_oobe_enabled()->reset(
-        new bool(true));
+    fake_state_.config_provider()->var_is_oobe_enabled()->reset(new bool(true));
 
     // For the purpose of the tests, this is an official build and OOBE was
     // completed.
@@ -60,10 +61,10 @@
     fake_state_.system_provider()->var_num_slots()->reset(new unsigned int(2));
 
     // Connection is wifi, untethered.
-    fake_state_.shill_provider()->var_conn_type()->
-        reset(new ConnectionType(ConnectionType::kWifi));
-    fake_state_.shill_provider()->var_conn_tethering()->
-        reset(new ConnectionTethering(ConnectionTethering::kNotDetected));
+    fake_state_.shill_provider()->var_conn_type()->reset(
+        new ConnectionType(ConnectionType::kWifi));
+    fake_state_.shill_provider()->var_conn_tethering()->reset(
+        new ConnectionTethering(ConnectionTethering::kNotDetected));
   }
 
   // Sets up a default device policy that does not impose any restrictions
@@ -73,16 +74,21 @@
         new bool(true));
     fake_state_.device_policy_provider()->var_update_disabled()->reset(
         new bool(false));
-    fake_state_.device_policy_provider()->
-        var_allowed_connection_types_for_update()->reset(nullptr);
+    fake_state_.device_policy_provider()
+        ->var_allowed_connection_types_for_update()
+        ->reset(nullptr);
     fake_state_.device_policy_provider()->var_scatter_factor()->reset(
         new TimeDelta());
     fake_state_.device_policy_provider()->var_http_downloads_enabled()->reset(
         new bool(true));
     fake_state_.device_policy_provider()->var_au_p2p_enabled()->reset(
         new bool(false));
-    fake_state_.device_policy_provider()->var_release_channel_delegated()->
-        reset(new bool(true));
+    fake_state_.device_policy_provider()
+        ->var_release_channel_delegated()
+        ->reset(new bool(true));
+    fake_state_.device_policy_provider()
+        ->var_disallowed_time_intervals()
+        ->reset(new WeeklyTimeIntervalVector());
   }
 
   // Configures the policy to return a desired value from UpdateCheckAllowed by
@@ -105,6 +111,67 @@
       curr_time -= TimeDelta::FromSeconds(1);
     fake_clock_.SetWallclockTime(curr_time);
   }
+
+  // Sets the policies required for a kiosk app to control Chrome OS version:
+  // - AllowKioskAppControlChromeVersion = True
+  // - UpdateDisabled = True
+  // In the kiosk app manifest:
+  // - RequiredPlatformVersion = 1234.
+  void SetKioskAppControlsChromeOsVersion() {
+    fake_state_.device_policy_provider()
+        ->var_allow_kiosk_app_control_chrome_version()
+        ->reset(new bool(true));
+    fake_state_.device_policy_provider()->var_update_disabled()->reset(
+        new bool(true));
+    fake_state_.system_provider()->var_kiosk_required_platform_version()->reset(
+        new string("1234."));
+  }
+
+  // Sets up a test with the value of RollbackToTargetVersion policy (and
+  // whether it's set), and returns the value of
+  // UpdateCheckParams.rollback_allowed.
+  bool TestRollbackAllowed(bool set_policy,
+                           RollbackToTargetVersion rollback_to_target_version) {
+    // Update check is allowed, response includes attributes for use in the
+    // request.
+    SetUpdateCheckAllowed(true);
+
+    if (set_policy) {
+      // Override RollbackToTargetVersion device policy attribute.
+      fake_state_.device_policy_provider()
+          ->var_rollback_to_target_version()
+          ->reset(new RollbackToTargetVersion(rollback_to_target_version));
+    }
+
+    UpdateCheckParams result;
+    ExpectPolicyStatus(
+        EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
+    return result.rollback_allowed;
+  }
+
+  // Sets up a test with the given intervals and the current fake wallclock
+  // time.
+  void TestDisallowedTimeIntervals(const WeeklyTimeIntervalVector& intervals,
+                                   const ErrorCode& expected_error_code,
+                                   bool kiosk) {
+    SetUpDefaultTimeProvider();
+    if (kiosk)
+      fake_state_.device_policy_provider()
+          ->var_auto_launched_kiosk_app_id()
+          ->reset(new string("myapp"));
+    fake_state_.device_policy_provider()
+        ->var_disallowed_time_intervals()
+        ->reset(new WeeklyTimeIntervalVector(intervals));
+
+    // Check that |expected_status| matches the value of UpdateCheckAllowed
+    ErrorCode result;
+    InstallPlan install_plan;
+    ExpectPolicyStatus(EvalStatus::kSucceeded,
+                       &Policy::UpdateCanBeApplied,
+                       &result,
+                       &install_plan);
+    EXPECT_EQ(result, expected_error_code);
+  }
 };
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedWaitsForTheTimeout) {
@@ -129,18 +196,18 @@
   fake_state_.updater_provider()->var_last_checked_time()->reset(
       new Time(last_checked_time));
   fake_clock_.SetWallclockTime(next_update_check - TimeDelta::FromSeconds(1));
-  ExpectPolicyStatus(EvalStatus::kAskMeAgainLater,
-                     &Policy::UpdateCheckAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result);
 
   SetUpDefaultClock();
   SetUpDefaultState();
   fake_state_.updater_provider()->var_last_checked_time()->reset(
       new Time(last_checked_time));
   fake_clock_.SetWallclockTime(next_update_check + TimeDelta::FromSeconds(1));
-  ExpectPolicyStatus(EvalStatus::kSucceeded,
-                     &Policy::UpdateCheckAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedWaitsForOOBE) {
@@ -162,12 +229,11 @@
   fake_state_.updater_provider()->var_last_checked_time()->reset(
       new Time(last_checked_time));
   fake_clock_.SetWallclockTime(next_update_check + TimeDelta::FromSeconds(1));
-  fake_state_.system_provider()->var_is_oobe_complete()->reset(
-      new bool(false));
+  fake_state_.system_provider()->var_is_oobe_complete()->reset(new bool(false));
 
   UpdateCheckParams result;
-  ExpectPolicyStatus(EvalStatus::kAskMeAgainLater,
-                     &Policy::UpdateCheckAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result);
 
   // Now check that it is allowed if OOBE is completed.
   SetUpDefaultClock();
@@ -175,10 +241,10 @@
   fake_state_.updater_provider()->var_last_checked_time()->reset(
       new Time(last_checked_time));
   fake_clock_.SetWallclockTime(next_update_check + TimeDelta::FromSeconds(1));
-  ExpectPolicyStatus(EvalStatus::kSucceeded,
-                     &Policy::UpdateCheckAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedWithAttributes) {
@@ -187,20 +253,82 @@
   SetUpdateCheckAllowed(true);
 
   // Override specific device policy attributes.
-  fake_state_.device_policy_provider()->var_target_version_prefix()->
-      reset(new string("1.2"));
-  fake_state_.device_policy_provider()->var_release_channel_delegated()->
-      reset(new bool(false));
-  fake_state_.device_policy_provider()->var_release_channel()->
-      reset(new string("foo-channel"));
+  fake_state_.device_policy_provider()->var_target_version_prefix()->reset(
+      new string("1.2"));
+  fake_state_.device_policy_provider()
+      ->var_rollback_allowed_milestones()
+      ->reset(new int(5));
+  fake_state_.device_policy_provider()->var_release_channel_delegated()->reset(
+      new bool(false));
+  fake_state_.device_policy_provider()->var_release_channel()->reset(
+      new string("foo-channel"));
 
   UpdateCheckParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded,
-                     &Policy::UpdateCheckAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
   EXPECT_EQ("1.2", result.target_version_prefix);
+  EXPECT_EQ(5, result.rollback_allowed_milestones);
   EXPECT_EQ("foo-channel", result.target_channel);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackAndPowerwash) {
+  EXPECT_TRUE(TestRollbackAllowed(
+      true, RollbackToTargetVersion::kRollbackAndPowerwash));
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackAndRestoreIfPossible) {
+  // We're doing rollback even if we don't support data save and restore.
+  EXPECT_TRUE(TestRollbackAllowed(
+      true, RollbackToTargetVersion::kRollbackAndRestoreIfPossible));
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackOnlyIfRestorePossible) {
+  // We're not allowed to do rollback until we support data save and restore.
+  EXPECT_FALSE(TestRollbackAllowed(
+      true, RollbackToTargetVersion::kRollbackOnlyIfRestorePossible));
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackDisabled) {
+  EXPECT_FALSE(TestRollbackAllowed(true, RollbackToTargetVersion::kDisabled));
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackUnspecified) {
+  EXPECT_FALSE(
+      TestRollbackAllowed(true, RollbackToTargetVersion::kUnspecified));
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackNotSet) {
+  EXPECT_FALSE(
+      TestRollbackAllowed(false, RollbackToTargetVersion::kUnspecified));
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedKioskRollbackAllowed) {
+  SetKioskAppControlsChromeOsVersion();
+
+  EXPECT_TRUE(TestRollbackAllowed(
+      true, RollbackToTargetVersion::kRollbackAndPowerwash));
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedKioskRollbackDisabled) {
+  SetKioskAppControlsChromeOsVersion();
+
+  EXPECT_FALSE(TestRollbackAllowed(true, RollbackToTargetVersion::kDisabled));
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedKioskRollbackUnspecified) {
+  SetKioskAppControlsChromeOsVersion();
+
+  EXPECT_FALSE(
+      TestRollbackAllowed(true, RollbackToTargetVersion::kUnspecified));
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedKioskRollbackNotSet) {
+  SetKioskAppControlsChromeOsVersion();
+
+  EXPECT_FALSE(
+      TestRollbackAllowed(false, RollbackToTargetVersion::kUnspecified));
 }
 
 TEST_F(UmChromeOSPolicyTest,
@@ -212,8 +340,8 @@
       new bool(false));
 
   UpdateCheckParams result;
-  ExpectPolicyStatus(EvalStatus::kAskMeAgainLater,
-                     &Policy::UpdateCheckAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result);
 }
 
 TEST_F(UmChromeOSPolicyTest,
@@ -225,8 +353,8 @@
   fake_state_.system_provider()->var_num_slots()->reset(new unsigned int(1));
 
   UpdateCheckParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded,
-                     &Policy::UpdateCheckAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_FALSE(result.updates_enabled);
 }
 
@@ -239,8 +367,8 @@
       new bool(true));
 
   UpdateCheckParams result;
-  ExpectPolicyStatus(EvalStatus::kAskMeAgainLater,
-                     &Policy::UpdateCheckAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result);
 }
 
 TEST_F(UmChromeOSPolicyTest,
@@ -253,10 +381,10 @@
       new UpdateRequestStatus(UpdateRequestStatus::kInteractive));
 
   UpdateCheckParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded,
-                     &Policy::UpdateCheckAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_TRUE(result.is_interactive);
+  EXPECT_TRUE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedForcedUpdateRequestedPeriodic) {
@@ -268,32 +396,24 @@
       new UpdateRequestStatus(UpdateRequestStatus::kPeriodic));
 
   UpdateCheckParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded,
-                     &Policy::UpdateCheckAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedKioskPin) {
   // Update check is allowed.
   SetUpdateCheckAllowed(true);
 
-  // A typical setup for kiosk pin policy: AU disabled, allow kiosk to pin
-  // and there is a kiosk required platform version.
-  fake_state_.device_policy_provider()->var_update_disabled()->reset(
-      new bool(true));
-  fake_state_.device_policy_provider()
-      ->var_allow_kiosk_app_control_chrome_version()
-      ->reset(new bool(true));
-  fake_state_.system_provider()->var_kiosk_required_platform_version()->reset(
-      new string("1234.0.0"));
+  SetKioskAppControlsChromeOsVersion();
 
   UpdateCheckParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded,
-                     &Policy::UpdateCheckAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_EQ("1234.0.0", result.target_version_prefix);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_EQ("1234.", result.target_version_prefix);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedDisabledWhenNoKioskPin) {
@@ -309,8 +429,8 @@
       ->reset(new bool(false));
 
   UpdateCheckParams result;
-  ExpectPolicyStatus(EvalStatus::kAskMeAgainLater,
-                     &Policy::UpdateCheckAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedKioskPinWithNoRequiredVersion) {
@@ -329,11 +449,11 @@
       new string());
 
   UpdateCheckParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded,
-                     &Policy::UpdateCheckAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
   EXPECT_TRUE(result.target_version_prefix.empty());
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest,
@@ -349,8 +469,8 @@
       nullptr);
 
   UpdateCheckParams result;
-  ExpectPolicyStatus(EvalStatus::kAskMeAgainLater,
-                     &Policy::UpdateCheckAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCanStartFailsCheckAllowedError) {
@@ -363,8 +483,8 @@
   // Check that the UpdateCanStart fails.
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromMinutes(10));
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kFailed,
-                     &Policy::UpdateCanStart, &result, update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kFailed, &Policy::UpdateCanStart, &result, update_state);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCanStartNotAllowedCheckDue) {
@@ -376,8 +496,8 @@
   // Check that the UpdateCanStart returns false.
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromMinutes(10));
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded,
-                     &Policy::UpdateCanStart, &result, update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_FALSE(result.update_can_start);
   EXPECT_EQ(UpdateCannotStartReason::kCheckDue, result.cannot_start_reason);
   EXPECT_EQ(0, result.download_url_idx);
@@ -394,8 +514,8 @@
   // Check that the UpdateCanStart returns true with no further attributes.
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromMinutes(10));
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded,
-                     &Policy::UpdateCanStart, &result, update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_FALSE(result.p2p_downloading_allowed);
   EXPECT_FALSE(result.p2p_sharing_allowed);
@@ -414,8 +534,8 @@
   // Check that the UpdateCanStart returns true.
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromMinutes(10));
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded,
-                     &Policy::UpdateCanStart, &result, update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_FALSE(result.p2p_downloading_allowed);
   EXPECT_FALSE(result.p2p_sharing_allowed);
@@ -436,17 +556,19 @@
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(10));
   update_state.download_errors_max = 1;
   update_state.download_errors.emplace_back(
-      0, ErrorCode::kDownloadTransferError,
+      0,
+      ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(8));
   update_state.download_errors.emplace_back(
-      0, ErrorCode::kDownloadTransferError,
+      0,
+      ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(2));
 
   // Check that UpdateCanStart returns false and a new backoff expiry is
   // generated.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_FALSE(result.update_can_start);
   EXPECT_EQ(UpdateCannotStartReason::kBackoff, result.cannot_start_reason);
   EXPECT_TRUE(result.do_increment_failures);
@@ -464,10 +586,12 @@
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(10));
   update_state.download_errors_max = 1;
   update_state.download_errors.emplace_back(
-      0, ErrorCode::kDownloadTransferError,
+      0,
+      ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(8));
   update_state.download_errors.emplace_back(
-      0, ErrorCode::kDownloadTransferError,
+      0,
+      ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(2));
   update_state.failures_last_updated = curr_time;
   update_state.backoff_expiry = curr_time + TimeDelta::FromMinutes(3);
@@ -475,8 +599,10 @@
   // Check that UpdateCanStart returns false and a new backoff expiry is
   // generated.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kAskMeAgainLater, &Policy::UpdateCanStart,
-                     &result, update_state);
+  ExpectPolicyStatus(EvalStatus::kAskMeAgainLater,
+                     &Policy::UpdateCanStart,
+                     &result,
+                     update_state);
   EXPECT_FALSE(result.update_can_start);
   EXPECT_EQ(UpdateCannotStartReason::kBackoff, result.cannot_start_reason);
   EXPECT_FALSE(result.do_increment_failures);
@@ -493,10 +619,12 @@
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(10));
   update_state.download_errors_max = 1;
   update_state.download_errors.emplace_back(
-      0, ErrorCode::kDownloadTransferError,
+      0,
+      ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(8));
   update_state.download_errors.emplace_back(
-      0, ErrorCode::kDownloadTransferError,
+      0,
+      ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(2));
   update_state.failures_last_updated = curr_time - TimeDelta::FromSeconds(1);
   update_state.backoff_expiry = curr_time - TimeDelta::FromSeconds(1);
@@ -504,8 +632,8 @@
   // Check that UpdateCanStart returns false and a new backoff expiry is
   // generated.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart,
-                     &result, update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_EQ(UpdateCannotStartReason::kUndefined, result.cannot_start_reason);
   EXPECT_EQ(0, result.download_url_idx);
@@ -525,18 +653,20 @@
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(10));
   update_state.download_errors_max = 1;
   update_state.download_errors.emplace_back(
-      0, ErrorCode::kDownloadTransferError,
+      0,
+      ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(8));
   update_state.download_errors.emplace_back(
-      0, ErrorCode::kDownloadTransferError,
+      0,
+      ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(2));
   update_state.is_backoff_disabled = true;
 
   // Check that UpdateCanStart returns false and a new backoff expiry is
   // generated.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_EQ(UpdateCannotStartReason::kUndefined, result.cannot_start_reason);
   EXPECT_EQ(0, result.download_url_idx);
@@ -556,18 +686,20 @@
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(10));
   update_state.download_errors_max = 1;
   update_state.download_errors.emplace_back(
-      0, ErrorCode::kDownloadTransferError,
+      0,
+      ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(8));
   update_state.download_errors.emplace_back(
-      0, ErrorCode::kDownloadTransferError,
+      0,
+      ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(2));
-  update_state.is_interactive = true;
+  update_state.interactive = true;
 
   // Check that UpdateCanStart returns false and a new backoff expiry is
   // generated.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_EQ(UpdateCannotStartReason::kUndefined, result.cannot_start_reason);
   EXPECT_EQ(0, result.download_url_idx);
@@ -587,18 +719,20 @@
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(10));
   update_state.download_errors_max = 1;
   update_state.download_errors.emplace_back(
-      0, ErrorCode::kDownloadTransferError,
+      0,
+      ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(8));
   update_state.download_errors.emplace_back(
-      0, ErrorCode::kDownloadTransferError,
+      0,
+      ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(2));
   update_state.is_delta_payload = true;
 
   // Check that UpdateCanStart returns false and a new backoff expiry is
   // generated.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_EQ(UpdateCannotStartReason::kUndefined, result.cannot_start_reason);
   EXPECT_EQ(0, result.download_url_idx);
@@ -618,20 +752,22 @@
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(10));
   update_state.download_errors_max = 1;
   update_state.download_errors.emplace_back(
-      0, ErrorCode::kDownloadTransferError,
+      0,
+      ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(8));
   update_state.download_errors.emplace_back(
-      0, ErrorCode::kDownloadTransferError,
+      0,
+      ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(2));
 
-  fake_state_.system_provider()->var_is_official_build()->
-      reset(new bool(false));
+  fake_state_.system_provider()->var_is_official_build()->reset(
+      new bool(false));
 
   // Check that UpdateCanStart returns false and a new backoff expiry is
   // generated.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_EQ(UpdateCannotStartReason::kUndefined, result.cannot_start_reason);
   EXPECT_EQ(0, result.download_url_idx);
@@ -657,8 +793,8 @@
   // Check that the UpdateCanStart fails.
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(1));
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kFailed,
-                     &Policy::UpdateCanStart, &result, update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kFailed, &Policy::UpdateCanStart, &result, update_state);
 }
 
 TEST_F(UmChromeOSPolicyTest,
@@ -671,14 +807,13 @@
   fake_state_.device_policy_provider()->var_scatter_factor()->reset(
       new TimeDelta(TimeDelta::FromMinutes(2)));
 
-
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(1));
 
   // Check that the UpdateCanStart returns false and a new wait period
   // generated.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_FALSE(result.update_can_start);
   EXPECT_EQ(UpdateCannotStartReason::kScattering, result.cannot_start_reason);
   EXPECT_LT(TimeDelta(), result.scatter_wait_period);
@@ -701,8 +836,10 @@
   // Check that the UpdateCanStart returns false and a new wait period
   // generated.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kAskMeAgainLater, &Policy::UpdateCanStart,
-                     &result, update_state);
+  ExpectPolicyStatus(EvalStatus::kAskMeAgainLater,
+                     &Policy::UpdateCanStart,
+                     &result,
+                     update_state);
   EXPECT_FALSE(result.update_can_start);
   EXPECT_EQ(UpdateCannotStartReason::kScattering, result.cannot_start_reason);
   EXPECT_EQ(TimeDelta::FromSeconds(35), result.scatter_wait_period);
@@ -727,8 +864,8 @@
 
   // Check that the UpdateCanStart returns false.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_FALSE(result.update_can_start);
   EXPECT_EQ(UpdateCannotStartReason::kScattering, result.cannot_start_reason);
   EXPECT_LE(2, result.scatter_check_threshold);
@@ -751,8 +888,8 @@
 
   // Check that the UpdateCanStart returns false.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_FALSE(result.update_can_start);
   EXPECT_EQ(UpdateCannotStartReason::kScattering, result.cannot_start_reason);
   EXPECT_EQ(3, result.scatter_check_threshold);
@@ -776,8 +913,8 @@
 
   // Check that the UpdateCanStart returns true.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_EQ(TimeDelta(), result.scatter_wait_period);
   EXPECT_EQ(0, result.scatter_check_threshold);
@@ -798,15 +935,15 @@
       new TimeDelta(TimeDelta::FromSeconds(1)));
 
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(1));
-  update_state.is_interactive = true;
+  update_state.interactive = true;
   update_state.scatter_check_threshold = 0;
   update_state.scatter_check_threshold_min = 2;
   update_state.scatter_check_threshold_max = 5;
 
   // Check that the UpdateCanStart returns true.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_EQ(TimeDelta(), result.scatter_wait_period);
   EXPECT_EQ(0, result.scatter_check_threshold);
@@ -816,8 +953,7 @@
   EXPECT_FALSE(result.do_increment_failures);
 }
 
-TEST_F(UmChromeOSPolicyTest,
-       UpdateCanStartAllowedOobePreventsScattering) {
+TEST_F(UmChromeOSPolicyTest, UpdateCanStartAllowedOobePreventsScattering) {
   // The UpdateCanStart policy returns true; device policy is loaded and
   // scattering would have applied, except that OOBE was not completed and so it
   // is suppressed.
@@ -828,15 +964,15 @@
   fake_state_.system_provider()->var_is_oobe_complete()->reset(new bool(false));
 
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(1));
-  update_state.is_interactive = true;
+  update_state.interactive = true;
   update_state.scatter_check_threshold = 0;
   update_state.scatter_check_threshold_min = 2;
   update_state.scatter_check_threshold_max = 5;
 
   // Check that the UpdateCanStart returns true.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_EQ(TimeDelta(), result.scatter_wait_period);
   EXPECT_EQ(0, result.scatter_check_threshold);
@@ -861,8 +997,8 @@
   // Check that the UpdateCanStart returns true.
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromMinutes(10));
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_TRUE(result.p2p_downloading_allowed);
   EXPECT_TRUE(result.p2p_sharing_allowed);
@@ -885,8 +1021,8 @@
   // Check that the UpdateCanStart returns true.
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromMinutes(10));
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_TRUE(result.p2p_downloading_allowed);
   EXPECT_TRUE(result.p2p_sharing_allowed);
@@ -914,15 +1050,14 @@
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromMinutes(10));
   update_state.p2p_downloading_disabled = true;
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_FALSE(result.p2p_downloading_allowed);
   EXPECT_TRUE(result.p2p_sharing_allowed);
 }
 
-TEST_F(UmChromeOSPolicyTest,
-       UpdateCanStartAllowedP2PSharingBlockedDueToOmaha) {
+TEST_F(UmChromeOSPolicyTest, UpdateCanStartAllowedP2PSharingBlockedDueToOmaha) {
   // The UpdateCanStart policy returns true; device policy permits HTTP, but
   // policy blocks P2P sharing because Omaha forbids it.  P2P downloading is
   // still permitted.
@@ -939,8 +1074,8 @@
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromMinutes(10));
   update_state.p2p_sharing_disabled = true;
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_TRUE(result.p2p_downloading_allowed);
   EXPECT_FALSE(result.p2p_sharing_allowed);
@@ -964,8 +1099,8 @@
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromMinutes(10));
   update_state.p2p_num_attempts = ChromeOSPolicy::kMaxP2PAttempts;
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_FALSE(result.p2p_downloading_allowed);
   EXPECT_TRUE(result.p2p_sharing_allowed);
@@ -990,11 +1125,11 @@
   update_state.p2p_num_attempts = 1;
   update_state.p2p_first_attempted =
       fake_clock_.GetWallclockTime() -
-      TimeDelta::FromSeconds(
-          ChromeOSPolicy::kMaxP2PAttemptsPeriodInSeconds + 1);
+      TimeDelta::FromSeconds(ChromeOSPolicy::kMaxP2PAttemptsPeriodInSeconds +
+                             1);
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_FALSE(result.p2p_downloading_allowed);
   EXPECT_TRUE(result.p2p_sharing_allowed);
@@ -1011,14 +1146,14 @@
   // Override specific device policy attributes.
   fake_state_.device_policy_provider()->var_http_downloads_enabled()->reset(
       new bool(false));
-  fake_state_.system_provider()->var_is_official_build()->
-      reset(new bool(false));
+  fake_state_.system_provider()->var_is_official_build()->reset(
+      new bool(false));
 
   // Check that the UpdateCanStart returns true.
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromMinutes(10));
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_EQ(0, result.download_url_idx);
   EXPECT_TRUE(result.download_url_allowed);
@@ -1042,8 +1177,8 @@
 
   // Check that the UpdateCanStart returns true.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_EQ(1, result.download_url_idx);
   EXPECT_TRUE(result.download_url_allowed);
@@ -1072,8 +1207,8 @@
 
   // Check that the UpdateCanStart returns true.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_EQ(0, result.download_url_idx);
   EXPECT_TRUE(result.download_url_allowed);
@@ -1101,8 +1236,8 @@
 
   // Check that the UpdateCanStart returns true.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_EQ(1, result.download_url_idx);
   EXPECT_TRUE(result.download_url_allowed);
@@ -1122,13 +1257,14 @@
   update_state.num_checks = 10;
   update_state.download_urls.emplace_back("http://another/fake/url/");
   update_state.download_errors.emplace_back(
-      0, ErrorCode::kPayloadHashMismatchError,
+      0,
+      ErrorCode::kPayloadHashMismatchError,
       fake_clock_.GetWallclockTime() - TimeDelta::FromSeconds(1));
 
   // Check that the UpdateCanStart returns true.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_EQ(1, result.download_url_idx);
   EXPECT_TRUE(result.download_url_allowed);
@@ -1150,13 +1286,14 @@
   update_state.is_backoff_disabled = true;
   update_state.download_urls.emplace_back("http://another/fake/url/");
   update_state.download_errors.emplace_back(
-      1, ErrorCode::kPayloadHashMismatchError,
+      1,
+      ErrorCode::kPayloadHashMismatchError,
       fake_clock_.GetWallclockTime() - TimeDelta::FromSeconds(1));
 
   // Check that the UpdateCanStart returns true.
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_EQ(0, result.download_url_idx);
   EXPECT_TRUE(result.download_url_allowed);
@@ -1182,8 +1319,8 @@
   // Check that the UpdateCanStart returns false.
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromMinutes(10));
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_FALSE(result.update_can_start);
   EXPECT_EQ(UpdateCannotStartReason::kCannotDownload,
             result.cannot_start_reason);
@@ -1209,8 +1346,8 @@
   // Check that the UpdateCanStart returns true.
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromMinutes(10));
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_TRUE(result.p2p_downloading_allowed);
   EXPECT_TRUE(result.p2p_sharing_allowed);
@@ -1241,8 +1378,8 @@
   // Check that the UpdateCanStart returns true.
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromMinutes(10));
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_TRUE(result.p2p_downloading_allowed);
   EXPECT_TRUE(result.p2p_sharing_allowed);
@@ -1255,24 +1392,24 @@
 TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedEthernetDefault) {
   // Ethernet is always allowed.
 
-  fake_state_.shill_provider()->var_conn_type()->
-      reset(new ConnectionType(ConnectionType::kEthernet));
+  fake_state_.shill_provider()->var_conn_type()->reset(
+      new ConnectionType(ConnectionType::kEthernet));
 
   bool result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded,
-                     &Policy::UpdateDownloadAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result);
   EXPECT_TRUE(result);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedWifiDefault) {
   // Wifi is allowed if not tethered.
 
-  fake_state_.shill_provider()->var_conn_type()->
-      reset(new ConnectionType(ConnectionType::kWifi));
+  fake_state_.shill_provider()->var_conn_type()->reset(
+      new ConnectionType(ConnectionType::kWifi));
 
   bool result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded,
-                     &Policy::UpdateDownloadAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result);
   EXPECT_TRUE(result);
 }
 
@@ -1280,45 +1417,44 @@
        UpdateCurrentConnectionNotAllowedWifiTetheredDefault) {
   // Tethered wifi is not allowed by default.
 
-  fake_state_.shill_provider()->var_conn_type()->
-      reset(new ConnectionType(ConnectionType::kWifi));
-  fake_state_.shill_provider()->var_conn_tethering()->
-      reset(new ConnectionTethering(ConnectionTethering::kConfirmed));
+  fake_state_.shill_provider()->var_conn_type()->reset(
+      new ConnectionType(ConnectionType::kWifi));
+  fake_state_.shill_provider()->var_conn_tethering()->reset(
+      new ConnectionTethering(ConnectionTethering::kConfirmed));
 
   bool result;
-  ExpectPolicyStatus(EvalStatus::kAskMeAgainLater,
-                     &Policy::UpdateDownloadAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kAskMeAgainLater, &Policy::UpdateDownloadAllowed, &result);
 }
 
-TEST_F(UmChromeOSPolicyTest,
-       UpdateDownloadAllowedWifiTetheredPolicyOverride) {
+TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedWifiTetheredPolicyOverride) {
   // Tethered wifi can be allowed by policy.
 
-  fake_state_.shill_provider()->var_conn_type()->
-      reset(new ConnectionType(ConnectionType::kWifi));
-  fake_state_.shill_provider()->var_conn_tethering()->
-      reset(new ConnectionTethering(ConnectionTethering::kConfirmed));
+  fake_state_.shill_provider()->var_conn_type()->reset(
+      new ConnectionType(ConnectionType::kWifi));
+  fake_state_.shill_provider()->var_conn_tethering()->reset(
+      new ConnectionTethering(ConnectionTethering::kConfirmed));
   set<ConnectionType> allowed_connections;
   allowed_connections.insert(ConnectionType::kCellular);
-  fake_state_.device_policy_provider()->
-      var_allowed_connection_types_for_update()->
-      reset(new set<ConnectionType>(allowed_connections));
+  fake_state_.device_policy_provider()
+      ->var_allowed_connection_types_for_update()
+      ->reset(new set<ConnectionType>(allowed_connections));
 
   bool result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded,
-                     &Policy::UpdateDownloadAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result);
   EXPECT_TRUE(result);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedWimaxDefault) {
   // Wimax is always allowed.
 
-  fake_state_.shill_provider()->var_conn_type()->
-      reset(new ConnectionType(ConnectionType::kWifi));
+  fake_state_.shill_provider()->var_conn_type()->reset(
+      new ConnectionType(ConnectionType::kWifi));
 
   bool result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded,
-                     &Policy::UpdateDownloadAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result);
   EXPECT_TRUE(result);
 }
 
@@ -1326,80 +1462,76 @@
        UpdateCurrentConnectionNotAllowedBluetoothDefault) {
   // Bluetooth is never allowed.
 
-  fake_state_.shill_provider()->var_conn_type()->
-      reset(new ConnectionType(ConnectionType::kBluetooth));
+  fake_state_.shill_provider()->var_conn_type()->reset(
+      new ConnectionType(ConnectionType::kBluetooth));
 
   bool result;
-  ExpectPolicyStatus(EvalStatus::kAskMeAgainLater,
-                     &Policy::UpdateDownloadAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kAskMeAgainLater, &Policy::UpdateDownloadAllowed, &result);
 }
 
 TEST_F(UmChromeOSPolicyTest,
        UpdateCurrentConnectionNotAllowedBluetoothPolicyCannotOverride) {
   // Bluetooth cannot be allowed even by policy.
 
-  fake_state_.shill_provider()->var_conn_type()->
-      reset(new ConnectionType(ConnectionType::kBluetooth));
+  fake_state_.shill_provider()->var_conn_type()->reset(
+      new ConnectionType(ConnectionType::kBluetooth));
   set<ConnectionType> allowed_connections;
   allowed_connections.insert(ConnectionType::kBluetooth);
-  fake_state_.device_policy_provider()->
-      var_allowed_connection_types_for_update()->
-      reset(new set<ConnectionType>(allowed_connections));
+  fake_state_.device_policy_provider()
+      ->var_allowed_connection_types_for_update()
+      ->reset(new set<ConnectionType>(allowed_connections));
 
   bool result;
-  ExpectPolicyStatus(EvalStatus::kAskMeAgainLater,
-                     &Policy::UpdateDownloadAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kAskMeAgainLater, &Policy::UpdateDownloadAllowed, &result);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCurrentConnectionNotAllowedCellularDefault) {
   // Cellular is not allowed by default.
 
-  fake_state_.shill_provider()->var_conn_type()->
-      reset(new ConnectionType(ConnectionType::kCellular));
+  fake_state_.shill_provider()->var_conn_type()->reset(
+      new ConnectionType(ConnectionType::kCellular));
 
   bool result;
-  ExpectPolicyStatus(EvalStatus::kAskMeAgainLater,
-                     &Policy::UpdateDownloadAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kAskMeAgainLater, &Policy::UpdateDownloadAllowed, &result);
 }
 
-TEST_F(UmChromeOSPolicyTest,
-       UpdateDownloadAllowedCellularPolicyOverride) {
+TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedCellularPolicyOverride) {
   // Update over cellular can be enabled by policy.
 
-  fake_state_.shill_provider()->var_conn_type()->
-      reset(new ConnectionType(ConnectionType::kCellular));
+  fake_state_.shill_provider()->var_conn_type()->reset(
+      new ConnectionType(ConnectionType::kCellular));
   set<ConnectionType> allowed_connections;
   allowed_connections.insert(ConnectionType::kCellular);
-  fake_state_.device_policy_provider()->
-      var_allowed_connection_types_for_update()->
-      reset(new set<ConnectionType>(allowed_connections));
+  fake_state_.device_policy_provider()
+      ->var_allowed_connection_types_for_update()
+      ->reset(new set<ConnectionType>(allowed_connections));
 
   bool result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded,
-                     &Policy::UpdateDownloadAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result);
   EXPECT_TRUE(result);
 }
 
-TEST_F(UmChromeOSPolicyTest,
-       UpdateDownloadAllowedCellularUserOverride) {
+TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedCellularUserOverride) {
   // Update over cellular can be enabled by user settings, but only if policy
   // is present and does not determine allowed connections.
 
-  fake_state_.shill_provider()->var_conn_type()->
-      reset(new ConnectionType(ConnectionType::kCellular));
+  fake_state_.shill_provider()->var_conn_type()->reset(
+      new ConnectionType(ConnectionType::kCellular));
   set<ConnectionType> allowed_connections;
   allowed_connections.insert(ConnectionType::kCellular);
-  fake_state_.updater_provider()->var_cellular_enabled()->
-      reset(new bool(true));
+  fake_state_.updater_provider()->var_cellular_enabled()->reset(new bool(true));
 
   bool result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded,
-                     &Policy::UpdateDownloadAllowed, &result);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result);
   EXPECT_TRUE(result);
 }
 
-TEST_F(UmChromeOSPolicyTest,
-       UpdateCanStartAllowedScatteringSupressedDueToP2P) {
+TEST_F(UmChromeOSPolicyTest, UpdateCanStartAllowedScatteringSupressedDueToP2P) {
   // The UpdateCanStart policy returns true; scattering should have applied, but
   // P2P download is allowed. Scattering values are nonetheless returned, and so
   // are download URL values, albeit the latter are not allowed to be used.
@@ -1413,8 +1545,8 @@
   update_state.scatter_wait_period = TimeDelta::FromSeconds(35);
 
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart,
-                     &result, update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_EQ(0, result.download_url_idx);
   EXPECT_FALSE(result.download_url_allowed);
@@ -1426,8 +1558,7 @@
   EXPECT_EQ(0, result.scatter_check_threshold);
 }
 
-TEST_F(UmChromeOSPolicyTest,
-       UpdateCanStartAllowedBackoffSupressedDueToP2P) {
+TEST_F(UmChromeOSPolicyTest, UpdateCanStartAllowedBackoffSupressedDueToP2P) {
   // The UpdateCanStart policy returns true; backoff should have applied, but
   // P2P download is allowed. Backoff values are nonetheless returned, and so
   // are download URL values, albeit the latter are not allowed to be used.
@@ -1438,16 +1569,18 @@
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(10));
   update_state.download_errors_max = 1;
   update_state.download_errors.emplace_back(
-      0, ErrorCode::kDownloadTransferError,
+      0,
+      ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(8));
   update_state.download_errors.emplace_back(
-      0, ErrorCode::kDownloadTransferError,
+      0,
+      ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(2));
   fake_state_.updater_provider()->var_p2p_enabled()->reset(new bool(true));
 
   UpdateDownloadParams result;
-  ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result,
-                     update_state);
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCanStart, &result, update_state);
   EXPECT_TRUE(result.update_can_start);
   EXPECT_EQ(0, result.download_url_idx);
   EXPECT_FALSE(result.download_url_allowed);
@@ -1492,8 +1625,52 @@
 
 TEST_F(UmChromeOSPolicyTest, P2PEnabledChangedBlocks) {
   bool result;
-  ExpectPolicyStatus(EvalStatus::kAskMeAgainLater, &Policy::P2PEnabledChanged,
-                     &result, false);
+  ExpectPolicyStatus(
+      EvalStatus::kAskMeAgainLater, &Policy::P2PEnabledChanged, &result, false);
+}
+
+TEST_F(UmChromeOSPolicyTest,
+       UpdateCanBeAppliedForcedUpdatesDisablesTimeRestrictions) {
+  Time curr_time = fake_clock_.GetWallclockTime();
+  fake_state_.updater_provider()->var_forced_update_requested()->reset(
+      new UpdateRequestStatus(UpdateRequestStatus::kInteractive));
+  // Should return kAskMeAgainLater when updated are not forced.
+  TestDisallowedTimeIntervals(
+      {WeeklyTimeInterval(
+          WeeklyTime::FromTime(curr_time),
+          WeeklyTime::FromTime(curr_time + TimeDelta::FromMinutes(1)))},
+      ErrorCode::kSuccess,
+      /* kiosk = */ true);
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCanBeAppliedFailsInDisallowedTime) {
+  Time curr_time = fake_clock_.GetWallclockTime();
+  TestDisallowedTimeIntervals(
+      {WeeklyTimeInterval(
+          WeeklyTime::FromTime(curr_time),
+          WeeklyTime::FromTime(curr_time + TimeDelta::FromMinutes(1)))},
+      ErrorCode::kOmahaUpdateDeferredPerPolicy,
+      /* kiosk = */ true);
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCanBeAppliedOutsideDisallowedTime) {
+  Time curr_time = fake_clock_.GetWallclockTime();
+  TestDisallowedTimeIntervals(
+      {WeeklyTimeInterval(
+          WeeklyTime::FromTime(curr_time - TimeDelta::FromHours(3)),
+          WeeklyTime::FromTime(curr_time))},
+      ErrorCode::kSuccess,
+      /* kiosk = */ true);
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCanBeAppliedPassesOnNonKiosk) {
+  Time curr_time = fake_clock_.GetWallclockTime();
+  TestDisallowedTimeIntervals(
+      {WeeklyTimeInterval(
+          WeeklyTime::FromTime(curr_time),
+          WeeklyTime::FromTime(curr_time + TimeDelta::FromMinutes(1)))},
+      ErrorCode::kSuccess,
+      /* kiosk = */ false);
 }
 
 }  // namespace chromeos_update_manager
diff --git a/update_manager/default_policy.cc b/update_manager/default_policy.cc
index 5da1520..81ab795 100644
--- a/update_manager/default_policy.cc
+++ b/update_manager/default_policy.cc
@@ -34,13 +34,16 @@
 DefaultPolicy::DefaultPolicy(chromeos_update_engine::ClockInterface* clock)
     : clock_(clock), aux_state_(new DefaultPolicyState()) {}
 
-EvalStatus DefaultPolicy::UpdateCheckAllowed(
-    EvaluationContext* ec, State* state, std::string* error,
-    UpdateCheckParams* result) const {
+EvalStatus DefaultPolicy::UpdateCheckAllowed(EvaluationContext* ec,
+                                             State* state,
+                                             std::string* error,
+                                             UpdateCheckParams* result) const {
   result->updates_enabled = true;
   result->target_channel.clear();
   result->target_version_prefix.clear();
-  result->is_interactive = false;
+  result->rollback_allowed = false;
+  result->rollback_allowed_milestones = -1;  // No version rolls should happen.
+  result->interactive = false;
 
   // Ensure that the minimum interval is set. If there's no clock, this defaults
   // to always allowing the update.
@@ -65,12 +68,11 @@
   return EvalStatus::kSucceeded;
 }
 
-EvalStatus DefaultPolicy::UpdateCanStart(
-    EvaluationContext* ec,
-    State* state,
-    std::string* error,
-    UpdateDownloadParams* result,
-    const UpdateState update_state) const {
+EvalStatus DefaultPolicy::UpdateCanStart(EvaluationContext* ec,
+                                         State* state,
+                                         std::string* error,
+                                         UpdateDownloadParams* result,
+                                         const UpdateState update_state) const {
   result->update_can_start = true;
   result->cannot_start_reason = UpdateCannotStartReason::kUndefined;
   result->download_url_idx = 0;
@@ -85,30 +87,27 @@
   return EvalStatus::kSucceeded;
 }
 
-EvalStatus DefaultPolicy::UpdateDownloadAllowed(
-    EvaluationContext* ec,
-    State* state,
-    std::string* error,
-    bool* result) const {
+EvalStatus DefaultPolicy::UpdateDownloadAllowed(EvaluationContext* ec,
+                                                State* state,
+                                                std::string* error,
+                                                bool* result) const {
   *result = true;
   return EvalStatus::kSucceeded;
 }
 
-EvalStatus DefaultPolicy::P2PEnabled(
-    EvaluationContext* ec,
-    State* state,
-    std::string* error,
-    bool* result) const {
+EvalStatus DefaultPolicy::P2PEnabled(EvaluationContext* ec,
+                                     State* state,
+                                     std::string* error,
+                                     bool* result) const {
   *result = false;
   return EvalStatus::kSucceeded;
 }
 
-EvalStatus DefaultPolicy::P2PEnabledChanged(
-    EvaluationContext* ec,
-    State* state,
-    std::string* error,
-    bool* result,
-    bool prev_result) const {
+EvalStatus DefaultPolicy::P2PEnabledChanged(EvaluationContext* ec,
+                                            State* state,
+                                            std::string* error,
+                                            bool* result,
+                                            bool prev_result) const {
   // This policy will always prohibit P2P, so this is signaling to the caller
   // that the decision is final (because the current value is the same as the
   // previous one) and there's no need to issue another call.
diff --git a/update_manager/default_policy.h b/update_manager/default_policy.h
index 136ca35..1b284f4 100644
--- a/update_manager/default_policy.h
+++ b/update_manager/default_policy.h
@@ -65,9 +65,10 @@
   ~DefaultPolicy() override {}
 
   // Policy overrides.
-  EvalStatus UpdateCheckAllowed(
-      EvaluationContext* ec, State* state, std::string* error,
-      UpdateCheckParams* result) const override;
+  EvalStatus UpdateCheckAllowed(EvaluationContext* ec,
+                                State* state,
+                                std::string* error,
+                                UpdateCheckParams* result) const override;
 
   EvalStatus UpdateCanBeApplied(
       EvaluationContext* ec,
@@ -76,22 +77,27 @@
       chromeos_update_engine::ErrorCode* result,
       chromeos_update_engine::InstallPlan* install_plan) const override;
 
-  EvalStatus UpdateCanStart(
-      EvaluationContext* ec, State* state, std::string* error,
-      UpdateDownloadParams* result,
-      UpdateState update_state) const override;
+  EvalStatus UpdateCanStart(EvaluationContext* ec,
+                            State* state,
+                            std::string* error,
+                            UpdateDownloadParams* result,
+                            UpdateState update_state) const override;
 
-  EvalStatus UpdateDownloadAllowed(
-      EvaluationContext* ec, State* state, std::string* error,
-      bool* result) const override;
+  EvalStatus UpdateDownloadAllowed(EvaluationContext* ec,
+                                   State* state,
+                                   std::string* error,
+                                   bool* result) const override;
 
-  EvalStatus P2PEnabled(
-      EvaluationContext* ec, State* state, std::string* error,
-      bool* result) const override;
+  EvalStatus P2PEnabled(EvaluationContext* ec,
+                        State* state,
+                        std::string* error,
+                        bool* result) const override;
 
-  EvalStatus P2PEnabledChanged(
-      EvaluationContext* ec, State* state, std::string* error,
-      bool* result, bool prev_result) const override;
+  EvalStatus P2PEnabledChanged(EvaluationContext* ec,
+                               State* state,
+                               std::string* error,
+                               bool* result,
+                               bool prev_result) const override;
 
  protected:
   // Policy override.
diff --git a/update_manager/device_policy_provider.h b/update_manager/device_policy_provider.h
index 3537d13..873282e 100644
--- a/update_manager/device_policy_provider.h
+++ b/update_manager/device_policy_provider.h
@@ -24,8 +24,10 @@
 #include <policy/libpolicy.h>
 
 #include "update_engine/update_manager/provider.h"
+#include "update_engine/update_manager/rollback_prefs.h"
 #include "update_engine/update_manager/shill_provider.h"
 #include "update_engine/update_manager/variable.h"
+#include "update_engine/update_manager/weekly_time.h"
 
 namespace chromeos_update_manager {
 
@@ -46,6 +48,15 @@
 
   virtual Variable<std::string>* var_target_version_prefix() = 0;
 
+  // Variable returning what should happen if the target_version_prefix is
+  // earlier than the current Chrome OS version.
+  virtual Variable<RollbackToTargetVersion>*
+  var_rollback_to_target_version() = 0;
+
+  // Variable returning the number of Chrome milestones rollback should be
+  // possible. Rollback protection will be postponed by this many versions.
+  virtual Variable<int>* var_rollback_allowed_milestones() = 0;
+
   // Returns a non-negative scatter interval used for updates.
   virtual Variable<base::TimeDelta>* var_scatter_factor() = 0;
 
@@ -53,7 +64,7 @@
   // identifiers returned are consistent with the ones returned by the
   // ShillProvider.
   virtual Variable<std::set<chromeos_update_engine::ConnectionType>>*
-      var_allowed_connection_types_for_update() = 0;
+  var_allowed_connection_types_for_update() = 0;
 
   // Variable stating the name of the device owner. For enterprise enrolled
   // devices, this will be an empty string.
@@ -65,6 +76,15 @@
 
   virtual Variable<bool>* var_allow_kiosk_app_control_chrome_version() = 0;
 
+  // Variable that contains the app that is to be run when launched in kiosk
+  // mode. If the device is not in kiosk-mode this should be empty.
+  virtual Variable<std::string>* var_auto_launched_kiosk_app_id() = 0;
+
+  // Variable that contains the time intervals during the week for which update
+  // checks are disallowed.
+  virtual Variable<WeeklyTimeIntervalVector>*
+  var_disallowed_time_intervals() = 0;
+
  protected:
   DevicePolicyProvider() {}
 
diff --git a/update_manager/enterprise_device_policy_impl.cc b/update_manager/enterprise_device_policy_impl.cc
index 94518a1..a3430ef 100644
--- a/update_manager/enterprise_device_policy_impl.cc
+++ b/update_manager/enterprise_device_policy_impl.cc
@@ -55,6 +55,7 @@
       }
     }
 
+    // By default, result->rollback_allowed is false.
     if (kiosk_app_control_chrome_version) {
       // Get the required platform version from Chrome.
       const string* kiosk_required_platform_version_p =
@@ -66,11 +67,10 @@
       }
 
       result->target_version_prefix = *kiosk_required_platform_version_p;
-      LOG(INFO) << "Allow kiosk app to control Chrome version policy is set,"
-                << ", target version is "
-                << (kiosk_required_platform_version_p
-                        ? *kiosk_required_platform_version_p
-                        : std::string("latest"));
+      LOG(INFO) << "Allow kiosk app to control Chrome version policy is set, "
+                << "target version is " << result->target_version_prefix;
+      // TODO(hunyadym): Add support for allowing rollback using the manifest
+      // (if policy doesn't specify otherwise).
     } else {
       // Determine whether a target version prefix is dictated by policy.
       const string* target_version_prefix_p =
@@ -79,6 +79,48 @@
         result->target_version_prefix = *target_version_prefix_p;
     }
 
+    // Policy always overwrites whether rollback is allowed by the kiosk app
+    // manifest.
+    const RollbackToTargetVersion* rollback_to_target_version_p =
+        ec->GetValue(dp_provider->var_rollback_to_target_version());
+    if (rollback_to_target_version_p) {
+      switch (*rollback_to_target_version_p) {
+        case RollbackToTargetVersion::kUnspecified:
+          // We leave the default or the one specified by the kiosk app.
+          break;
+        case RollbackToTargetVersion::kDisabled:
+          LOG(INFO) << "Policy disables rollbacks.";
+          result->rollback_allowed = false;
+          break;
+        case RollbackToTargetVersion::kRollbackAndPowerwash:
+          LOG(INFO) << "Policy allows rollbacks with powerwash.";
+          result->rollback_allowed = true;
+          break;
+        case RollbackToTargetVersion::kRollbackAndRestoreIfPossible:
+          LOG(INFO)
+              << "Policy allows rollbacks, also tries to restore if possible.";
+          // We don't support restore yet, but policy still allows rollback.
+          result->rollback_allowed = true;
+          break;
+        case RollbackToTargetVersion::kRollbackOnlyIfRestorePossible:
+          LOG(INFO) << "Policy only allows rollbacks if restore is possible.";
+          // We don't support restore yet, policy doesn't allow rollback in this
+          // case.
+          result->rollback_allowed = false;
+          break;
+        case RollbackToTargetVersion::kMaxValue:
+          NOTREACHED();
+          // Don't add a default case to let the compiler warn about newly
+          // added enum values which should be added here.
+      }
+    }
+
+    // Determine allowed milestones for rollback
+    const int* rollback_allowed_milestones_p =
+        ec->GetValue(dp_provider->var_rollback_allowed_milestones());
+    if (rollback_allowed_milestones_p)
+      result->rollback_allowed_milestones = *rollback_allowed_milestones_p;
+
     // Determine whether a target channel is dictated by policy.
     const bool* release_channel_delegated_p =
         ec->GetValue(dp_provider->var_release_channel_delegated());
diff --git a/update_manager/evaluation_context-inl.h b/update_manager/evaluation_context-inl.h
index 937adf4..59d85da 100644
--- a/update_manager/evaluation_context-inl.h
+++ b/update_manager/evaluation_context-inl.h
@@ -23,7 +23,7 @@
 
 namespace chromeos_update_manager {
 
-template<typename T>
+template <typename T>
 const T* EvaluationContext::GetValue(Variable<T>* var) {
   if (var == nullptr) {
     LOG(ERROR) << "GetValue received an uninitialized variable.";
@@ -37,16 +37,15 @@
 
   // Get the value from the variable if not found on the cache.
   std::string errmsg;
-  const T* result = var->GetValue(RemainingTime(evaluation_monotonic_deadline_),
-                                  &errmsg);
+  const T* result =
+      var->GetValue(RemainingTime(evaluation_monotonic_deadline_), &errmsg);
   if (result == nullptr) {
     LOG(WARNING) << "Error reading Variable " << var->GetName() << ": \""
-        << errmsg << "\"";
+                 << errmsg << "\"";
   }
   // Cache the value for the next time. The map of CachedValues keeps the
   // ownership of the pointer until the map is destroyed.
-  value_cache_.emplace(
-    static_cast<BaseVariable*>(var), BoxedValue(result));
+  value_cache_.emplace(static_cast<BaseVariable*>(var), BoxedValue(result));
   return result;
 }
 
diff --git a/update_manager/evaluation_context.cc b/update_manager/evaluation_context.cc
index b6c7b91..e796fec 100644
--- a/update_manager/evaluation_context.cc
+++ b/update_manager/evaluation_context.cc
@@ -43,8 +43,7 @@
 // Returns whether |curr_time| surpassed |ref_time|; if not, also checks whether
 // |ref_time| is sooner than the current value of |*reeval_time|, in which case
 // the latter is updated to the former.
-bool IsTimeGreaterThanHelper(Time ref_time, Time curr_time,
-                             Time* reeval_time) {
+bool IsTimeGreaterThanHelper(Time ref_time, Time curr_time, Time* reeval_time) {
   if (curr_time > ref_time)
     return true;
   // Remember the nearest reference we've checked against in this evaluation.
@@ -104,8 +103,8 @@
 }
 
 Time EvaluationContext::MonotonicDeadline(TimeDelta timeout) {
-  return (timeout.is_max() ? Time::Max() :
-          clock_->GetMonotonicTime() + timeout);
+  return (timeout.is_max() ? Time::Max()
+                           : clock_->GetMonotonicTime() + timeout);
 }
 
 void EvaluationContext::ValueChanged(BaseVariable* var) {
@@ -130,13 +129,13 @@
 }
 
 bool EvaluationContext::IsWallclockTimeGreaterThan(Time timestamp) {
-  return IsTimeGreaterThanHelper(timestamp, evaluation_start_wallclock_,
-                                 &reevaluation_time_wallclock_);
+  return IsTimeGreaterThanHelper(
+      timestamp, evaluation_start_wallclock_, &reevaluation_time_wallclock_);
 }
 
 bool EvaluationContext::IsMonotonicTimeGreaterThan(Time timestamp) {
-  return IsTimeGreaterThanHelper(timestamp, evaluation_start_monotonic_,
-                                 &reevaluation_time_monotonic_);
+  return IsTimeGreaterThanHelper(
+      timestamp, evaluation_start_monotonic_, &reevaluation_time_monotonic_);
 }
 
 void EvaluationContext::ResetEvaluation() {
@@ -147,7 +146,7 @@
   evaluation_monotonic_deadline_ = MonotonicDeadline(evaluation_timeout_);
 
   // Remove the cached values of non-const variables
-  for (auto it = value_cache_.begin(); it != value_cache_.end(); ) {
+  for (auto it = value_cache_.begin(); it != value_cache_.end();) {
     if (it->first->GetMode() == kVariableModeConst) {
       ++it;
     } else {
diff --git a/update_manager/evaluation_context.h b/update_manager/evaluation_context.h
index df5816a..c68c430 100644
--- a/update_manager/evaluation_context.h
+++ b/update_manager/evaluation_context.h
@@ -73,8 +73,10 @@
   EvaluationContext(chromeos_update_engine::ClockInterface* clock,
                     base::TimeDelta evaluation_timeout)
       : EvaluationContext(
-          clock, evaluation_timeout, base::TimeDelta::Max(),
-          std::unique_ptr<base::Callback<void(EvaluationContext*)>>()) {}
+            clock,
+            evaluation_timeout,
+            base::TimeDelta::Max(),
+            std::unique_ptr<base::Callback<void(EvaluationContext*)>>()) {}
   ~EvaluationContext();
 
   // Returns a pointer to the value returned by the passed variable |var|. The
@@ -83,7 +85,7 @@
   // passed Variable changes it.
   //
   // In case of error, a null value is returned.
-  template<typename T>
+  template <typename T>
   const T* GetValue(Variable<T>* var);
 
   // Returns whether the evaluation time has surpassed |timestamp|, on either
@@ -114,7 +116,7 @@
   // there's no cached variable, this method returns false.
   //
   // Right before the passed closure is called the EvaluationContext is
-  // reseted, removing all the non-const cached values.
+  // reset, removing all the non-const cached values.
   bool RunOnValueChangeOrTimeout(base::Closure callback);
 
   // Returns a textual representation of the evaluation context,
@@ -163,8 +165,7 @@
 
   // The TaskId returned by the message loop identifying the timeout callback.
   // Used for canceling the timeout callback.
-  brillo::MessageLoop::TaskId timeout_event_ =
-      brillo::MessageLoop::kTaskIdNull;
+  brillo::MessageLoop::TaskId timeout_event_ = brillo::MessageLoop::kTaskIdNull;
 
   // Whether a timeout event firing marks the expiration of the evaluation
   // context.
diff --git a/update_manager/evaluation_context_unittest.cc b/update_manager/evaluation_context_unittest.cc
index 1e61db7..eb42eb7 100644
--- a/update_manager/evaluation_context_unittest.cc
+++ b/update_manager/evaluation_context_unittest.cc
@@ -20,6 +20,7 @@
 #include <string>
 
 #include <base/bind.h>
+#include <base/bind_helpers.h>
 #include <brillo/message_loops/fake_message_loop.h>
 #include <brillo/message_loops/message_loop_utils.h>
 #include <gtest/gtest.h>
@@ -40,16 +41,14 @@
 using chromeos_update_engine::FakeClock;
 using std::string;
 using std::unique_ptr;
+using testing::_;
 using testing::Return;
 using testing::StrictMock;
-using testing::_;
 
 namespace chromeos_update_manager {
 
 namespace {
 
-void DoNothing() {}
-
 // Sets the value of the passed pointer to true.
 void SetTrue(bool* value) {
   *value = true;
@@ -59,15 +58,17 @@
   return *value;
 }
 
-template<typename T>
+template <typename T>
 void ReadVar(scoped_refptr<EvaluationContext> ec, Variable<T>* var) {
   ec->GetValue(var);
 }
 
 // Runs |evaluation|; if the value pointed by |count_p| is greater than zero,
 // decrement it and schedule a reevaluation; otherwise, writes true to |done_p|.
-void EvaluateRepeatedly(Closure evaluation, scoped_refptr<EvaluationContext> ec,
-                        int* count_p, bool* done_p) {
+void EvaluateRepeatedly(Closure evaluation,
+                        scoped_refptr<EvaluationContext> ec,
+                        int* count_p,
+                        bool* done_p) {
   evaluation.Run();
 
   // Schedule reevaluation if needed.
@@ -92,7 +93,9 @@
     // Mar 2, 2006 1:23:45 UTC.
     fake_clock_.SetWallclockTime(Time::FromTimeT(1141262625));
     eval_ctx_ = new EvaluationContext(
-        &fake_clock_, default_timeout_, default_timeout_,
+        &fake_clock_,
+        default_timeout_,
+        default_timeout_,
         unique_ptr<base::Callback<void(EvaluationContext*)>>(nullptr));
   }
 
@@ -134,10 +137,10 @@
   FakeVariable<string> fake_const_var_ = {"fake_const", kVariableModeConst};
   FakeVariable<string> fake_poll_var_ = {"fake_poll",
                                          TimeDelta::FromSeconds(1)};
-  StrictMock<MockVariable<string>> mock_var_async_ {
-    "mock_var_async", kVariableModeAsync};
-  StrictMock<MockVariable<string>> mock_var_poll_ {
-    "mock_var_poll", kVariableModePoll};
+  StrictMock<MockVariable<string>> mock_var_async_{"mock_var_async",
+                                                   kVariableModeAsync};
+  StrictMock<MockVariable<string>> mock_var_poll_{"mock_var_poll",
+                                                  kVariableModePoll};
 };
 
 TEST_F(UmEvaluationContextTest, GetValueFails) {
@@ -207,7 +210,13 @@
   fake_const_var_.reset(new string("Hello world!"));
   EXPECT_EQ(*eval_ctx_->GetValue(&fake_const_var_), "Hello world!");
 
-  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(
+#if BASE_VER < 576279
+      Bind(&base::DoNothing)
+#else
+      base::DoNothing()
+#endif
+          ));
 }
 
 // Test that reevaluation occurs when an async variable it depends on changes.
@@ -277,11 +286,23 @@
   EXPECT_TRUE(value);
 
   // Ensure that we cannot reschedule an evaluation.
-  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(
+#if BASE_VER < 576279
+      Bind(&base::DoNothing)
+#else
+      base::DoNothing()
+#endif
+          ));
 
   // Ensure that we can reschedule an evaluation after resetting expiration.
   eval_ctx_->ResetExpiration();
-  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(
+#if BASE_VER < 576279
+      Bind(&base::DoNothing)
+#else
+      base::DoNothing()
+#endif
+          ));
 }
 
 // Test that we clear the events when destroying the EvaluationContext.
@@ -312,8 +333,8 @@
   evaluation.Run();
 
   // Schedule repeated reevaluations.
-  Closure closure = Bind(EvaluateRepeatedly, evaluation, eval_ctx_,
-                         &num_reevaluations, &done);
+  Closure closure = Bind(
+      EvaluateRepeatedly, evaluation, eval_ctx_, &num_reevaluations, &done);
   ASSERT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(closure));
   MessageLoopRunUntil(MessageLoop::current(),
                       TimeDelta::FromSeconds(10),
@@ -327,7 +348,13 @@
   fake_poll_var_.reset(new string("Polled value"));
   eval_ctx_->GetValue(&fake_async_var_);
   eval_ctx_->GetValue(&fake_poll_var_);
-  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(
+#if BASE_VER < 576279
+      Bind(&base::DoNothing)
+#else
+      base::DoNothing()
+#endif
+          ));
   // TearDown() checks for leaked observers on this async_variable, which means
   // that our object is still alive after removing its reference.
 }
@@ -361,13 +388,12 @@
 }
 
 TEST_F(UmEvaluationContextTest, TimeoutUpdatesWithMonotonicTime) {
-  fake_clock_.SetMonotonicTime(
-      fake_clock_.GetMonotonicTime() + TimeDelta::FromSeconds(1));
+  fake_clock_.SetMonotonicTime(fake_clock_.GetMonotonicTime() +
+                               TimeDelta::FromSeconds(1));
 
   TimeDelta timeout = default_timeout_ - TimeDelta::FromSeconds(1);
 
-  EXPECT_CALL(mock_var_async_, GetValue(timeout, _))
-      .WillOnce(Return(nullptr));
+  EXPECT_CALL(mock_var_async_, GetValue(timeout, _)).WillOnce(Return(nullptr));
   EXPECT_EQ(nullptr, eval_ctx_->GetValue(&mock_var_async_));
 }
 
@@ -376,20 +402,20 @@
   // Advance the time on the clock but don't call ResetEvaluation yet.
   fake_clock_.SetWallclockTime(cur_time + TimeDelta::FromSeconds(4));
 
-  EXPECT_TRUE(eval_ctx_->IsWallclockTimeGreaterThan(
-          cur_time - TimeDelta::FromSeconds(1)));
+  EXPECT_TRUE(eval_ctx_->IsWallclockTimeGreaterThan(cur_time -
+                                                    TimeDelta::FromSeconds(1)));
   EXPECT_FALSE(eval_ctx_->IsWallclockTimeGreaterThan(cur_time));
   EXPECT_FALSE(eval_ctx_->IsWallclockTimeGreaterThan(
-          cur_time + TimeDelta::FromSeconds(1)));
+      cur_time + TimeDelta::FromSeconds(1)));
   // Call ResetEvaluation now, which should use the new evaluation time.
   eval_ctx_->ResetEvaluation();
 
   cur_time = fake_clock_.GetWallclockTime();
-  EXPECT_TRUE(eval_ctx_->IsWallclockTimeGreaterThan(
-          cur_time - TimeDelta::FromSeconds(1)));
+  EXPECT_TRUE(eval_ctx_->IsWallclockTimeGreaterThan(cur_time -
+                                                    TimeDelta::FromSeconds(1)));
   EXPECT_FALSE(eval_ctx_->IsWallclockTimeGreaterThan(cur_time));
   EXPECT_FALSE(eval_ctx_->IsWallclockTimeGreaterThan(
-          cur_time + TimeDelta::FromSeconds(1)));
+      cur_time + TimeDelta::FromSeconds(1)));
 }
 
 TEST_F(UmEvaluationContextTest, ResetEvaluationResetsTimesMonotonic) {
@@ -397,20 +423,20 @@
   // Advance the time on the clock but don't call ResetEvaluation yet.
   fake_clock_.SetMonotonicTime(cur_time + TimeDelta::FromSeconds(4));
 
-  EXPECT_TRUE(eval_ctx_->IsMonotonicTimeGreaterThan(
-          cur_time - TimeDelta::FromSeconds(1)));
+  EXPECT_TRUE(eval_ctx_->IsMonotonicTimeGreaterThan(cur_time -
+                                                    TimeDelta::FromSeconds(1)));
   EXPECT_FALSE(eval_ctx_->IsMonotonicTimeGreaterThan(cur_time));
   EXPECT_FALSE(eval_ctx_->IsMonotonicTimeGreaterThan(
-          cur_time + TimeDelta::FromSeconds(1)));
+      cur_time + TimeDelta::FromSeconds(1)));
   // Call ResetEvaluation now, which should use the new evaluation time.
   eval_ctx_->ResetEvaluation();
 
   cur_time = fake_clock_.GetMonotonicTime();
-  EXPECT_TRUE(eval_ctx_->IsMonotonicTimeGreaterThan(
-          cur_time - TimeDelta::FromSeconds(1)));
+  EXPECT_TRUE(eval_ctx_->IsMonotonicTimeGreaterThan(cur_time -
+                                                    TimeDelta::FromSeconds(1)));
   EXPECT_FALSE(eval_ctx_->IsMonotonicTimeGreaterThan(cur_time));
   EXPECT_FALSE(eval_ctx_->IsMonotonicTimeGreaterThan(
-          cur_time + TimeDelta::FromSeconds(1)));
+      cur_time + TimeDelta::FromSeconds(1)));
 }
 
 TEST_F(UmEvaluationContextTest,
@@ -420,7 +446,13 @@
 
   // The "false" from IsWallclockTimeGreaterThan means that's not that timestamp
   // yet, so this should schedule a callback for when that happens.
-  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(
+#if BASE_VER < 576279
+      Bind(&base::DoNothing)
+#else
+      base::DoNothing()
+#endif
+          ));
 }
 
 TEST_F(UmEvaluationContextTest,
@@ -430,7 +462,13 @@
 
   // The "false" from IsMonotonicTimeGreaterThan means that's not that timestamp
   // yet, so this should schedule a callback for when that happens.
-  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(
+#if BASE_VER < 576279
+      Bind(&base::DoNothing)
+#else
+      base::DoNothing()
+#endif
+          ));
 }
 
 TEST_F(UmEvaluationContextTest,
@@ -443,7 +481,13 @@
       fake_clock_.GetWallclockTime() - TimeDelta::FromSeconds(1)));
 
   // Callback should not be scheduled.
-  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(
+#if BASE_VER < 576279
+      Bind(&base::DoNothing)
+#else
+      base::DoNothing()
+#endif
+          ));
 }
 
 TEST_F(UmEvaluationContextTest,
@@ -456,7 +500,13 @@
       fake_clock_.GetMonotonicTime() - TimeDelta::FromSeconds(1)));
 
   // Callback should not be scheduled.
-  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(
+#if BASE_VER < 576279
+      Bind(&base::DoNothing)
+#else
+      base::DoNothing()
+#endif
+          ));
 }
 
 TEST_F(UmEvaluationContextTest, DumpContext) {
@@ -474,16 +524,17 @@
   // Note that the variables are printed in alphabetical order. Also
   // see UmEvaluationContextText::SetUp() where the values used for
   // |evaluation_start_{monotonic,wallclock| are set.
-  EXPECT_EQ("{\n"
-            "   \"evaluation_start_monotonic\": \"4/22/2009 19:25:00 GMT\",\n"
-            "   \"evaluation_start_wallclock\": \"3/2/2006 1:23:45 GMT\",\n"
-            "   \"variables\": {\n"
-            "      \"fail_var\": \"(no value)\",\n"
-            "      \"fake_int\": \"42\",\n"
-            "      \"fake_poll\": \"Hello \\\"world\\\"!\"\n"
-            "   }\n"
-            "}",
-            eval_ctx_->DumpContext());
+  EXPECT_EQ(
+      "{\n"
+      "   \"evaluation_start_monotonic\": \"4/22/2009 19:25:00 GMT\",\n"
+      "   \"evaluation_start_wallclock\": \"3/2/2006 1:23:45 GMT\",\n"
+      "   \"variables\": {\n"
+      "      \"fail_var\": \"(no value)\",\n"
+      "      \"fake_int\": \"42\",\n"
+      "      \"fake_poll\": \"Hello \\\"world\\\"!\"\n"
+      "   }\n"
+      "}",
+      eval_ctx_->DumpContext());
 }
 
 }  // namespace chromeos_update_manager
diff --git a/update_manager/fake_config_provider.h b/update_manager/fake_config_provider.h
index 6a324df..7e6c35b 100644
--- a/update_manager/fake_config_provider.h
+++ b/update_manager/fake_config_provider.h
@@ -32,8 +32,8 @@
   }
 
  private:
-  FakeVariable<bool> var_is_oobe_enabled_{  // NOLINT(whitespace/braces)
-      "is_oobe_enabled", kVariableModeConst};
+  FakeVariable<bool> var_is_oobe_enabled_{"is_oobe_enabled",
+                                          kVariableModeConst};
 
   DISALLOW_COPY_AND_ASSIGN(FakeConfigProvider);
 };
diff --git a/update_manager/fake_device_policy_provider.h b/update_manager/fake_device_policy_provider.h
index 9e4f5b7..7cd4d7b 100644
--- a/update_manager/fake_device_policy_provider.h
+++ b/update_manager/fake_device_policy_provider.h
@@ -50,18 +50,25 @@
     return &var_target_version_prefix_;
   }
 
+  FakeVariable<RollbackToTargetVersion>* var_rollback_to_target_version()
+      override {
+    return &var_rollback_to_target_version_;
+  }
+
+  FakeVariable<int>* var_rollback_allowed_milestones() override {
+    return &var_rollback_allowed_milestones_;
+  }
+
   FakeVariable<base::TimeDelta>* var_scatter_factor() override {
     return &var_scatter_factor_;
   }
 
   FakeVariable<std::set<chromeos_update_engine::ConnectionType>>*
-      var_allowed_connection_types_for_update() override {
+  var_allowed_connection_types_for_update() override {
     return &var_allowed_connection_types_for_update_;
   }
 
-  FakeVariable<std::string>* var_owner() override {
-    return &var_owner_;
-  }
+  FakeVariable<std::string>* var_owner() override { return &var_owner_; }
 
   FakeVariable<bool>* var_http_downloads_enabled() override {
     return &var_http_downloads_enabled_;
@@ -75,28 +82,44 @@
     return &var_allow_kiosk_app_control_chrome_version_;
   }
 
+  FakeVariable<std::string>* var_auto_launched_kiosk_app_id() override {
+    return &var_auto_launched_kiosk_app_id_;
+  }
+
+  FakeVariable<WeeklyTimeIntervalVector>* var_disallowed_time_intervals()
+      override {
+    return &var_disallowed_time_intervals_;
+  }
+
  private:
-  FakeVariable<bool> var_device_policy_is_loaded_{
-      "policy_is_loaded", kVariableModePoll};
-  FakeVariable<std::string> var_release_channel_{
-      "release_channel", kVariableModePoll};
-  FakeVariable<bool> var_release_channel_delegated_{
-      "release_channel_delegated", kVariableModePoll};
-  FakeVariable<bool> var_update_disabled_{
-      "update_disabled", kVariableModePoll};
-  FakeVariable<std::string> var_target_version_prefix_{
-      "target_version_prefix", kVariableModePoll};
-  FakeVariable<base::TimeDelta> var_scatter_factor_{
-      "scatter_factor", kVariableModePoll};
+  FakeVariable<bool> var_device_policy_is_loaded_{"policy_is_loaded",
+                                                  kVariableModePoll};
+  FakeVariable<std::string> var_release_channel_{"release_channel",
+                                                 kVariableModePoll};
+  FakeVariable<bool> var_release_channel_delegated_{"release_channel_delegated",
+                                                    kVariableModePoll};
+  FakeVariable<bool> var_update_disabled_{"update_disabled", kVariableModePoll};
+  FakeVariable<std::string> var_target_version_prefix_{"target_version_prefix",
+                                                       kVariableModePoll};
+  FakeVariable<RollbackToTargetVersion> var_rollback_to_target_version_{
+      "rollback_to_target_version", kVariableModePoll};
+  FakeVariable<int> var_rollback_allowed_milestones_{
+      "rollback_allowed_milestones", kVariableModePoll};
+  FakeVariable<base::TimeDelta> var_scatter_factor_{"scatter_factor",
+                                                    kVariableModePoll};
   FakeVariable<std::set<chromeos_update_engine::ConnectionType>>
       var_allowed_connection_types_for_update_{
           "allowed_connection_types_for_update", kVariableModePoll};
   FakeVariable<std::string> var_owner_{"owner", kVariableModePoll};
-  FakeVariable<bool> var_http_downloads_enabled_{
-      "http_downloads_enabled", kVariableModePoll};
+  FakeVariable<bool> var_http_downloads_enabled_{"http_downloads_enabled",
+                                                 kVariableModePoll};
   FakeVariable<bool> var_au_p2p_enabled_{"au_p2p_enabled", kVariableModePoll};
   FakeVariable<bool> var_allow_kiosk_app_control_chrome_version_{
       "allow_kiosk_app_control_chrome_version", kVariableModePoll};
+  FakeVariable<std::string> var_auto_launched_kiosk_app_id_{
+      "auto_launched_kiosk_app_id", kVariableModePoll};
+  FakeVariable<WeeklyTimeIntervalVector> var_disallowed_time_intervals_{
+      "disallowed_time_intervals", kVariableModePoll};
 
   DISALLOW_COPY_AND_ASSIGN(FakeDevicePolicyProvider);
 };
diff --git a/update_manager/fake_shill_provider.h b/update_manager/fake_shill_provider.h
index 7f1c8f5..7a23507 100644
--- a/update_manager/fake_shill_provider.h
+++ b/update_manager/fake_shill_provider.h
@@ -27,9 +27,7 @@
  public:
   FakeShillProvider() {}
 
-  FakeVariable<bool>* var_is_connected() override {
-    return &var_is_connected_;
-  }
+  FakeVariable<bool>* var_is_connected() override { return &var_is_connected_; }
 
   FakeVariable<chromeos_update_engine::ConnectionType>* var_conn_type()
       override {
@@ -37,7 +35,7 @@
   }
 
   FakeVariable<chromeos_update_engine::ConnectionTethering>*
-      var_conn_tethering() override {
+  var_conn_tethering() override {
     return &var_conn_tethering_;
   }
 
@@ -51,8 +49,8 @@
       "conn_type", kVariableModePoll};
   FakeVariable<chromeos_update_engine::ConnectionTethering> var_conn_tethering_{
       "conn_tethering", kVariableModePoll};
-  FakeVariable<base::Time> var_conn_last_changed_{
-      "conn_last_changed", kVariableModePoll};
+  FakeVariable<base::Time> var_conn_last_changed_{"conn_last_changed",
+                                                  kVariableModePoll};
 
   DISALLOW_COPY_AND_ASSIGN(FakeShillProvider);
 };
diff --git a/update_manager/fake_state.h b/update_manager/fake_state.h
index fd7a88c..0fd584f 100644
--- a/update_manager/fake_state.h
+++ b/update_manager/fake_state.h
@@ -46,29 +46,19 @@
   ~FakeState() override {}
 
   // Downcasted getters to access the fake instances during testing.
-  FakeConfigProvider* config_provider() override {
-    return &config_provider_;
-  }
+  FakeConfigProvider* config_provider() override { return &config_provider_; }
 
   FakeDevicePolicyProvider* device_policy_provider() override {
     return &device_policy_provider_;
   }
 
-  FakeRandomProvider* random_provider() override {
-    return &random_provider_;
-  }
+  FakeRandomProvider* random_provider() override { return &random_provider_; }
 
-  FakeShillProvider* shill_provider() override {
-    return &shill_provider_;
-  }
+  FakeShillProvider* shill_provider() override { return &shill_provider_; }
 
-  FakeSystemProvider* system_provider() override {
-    return &system_provider_;
-  }
+  FakeSystemProvider* system_provider() override { return &system_provider_; }
 
-  FakeTimeProvider* time_provider() override {
-    return &time_provider_;
-  }
+  FakeTimeProvider* time_provider() override { return &time_provider_; }
 
   FakeUpdaterProvider* updater_provider() override {
     return &updater_provider_;
diff --git a/update_manager/fake_system_provider.h b/update_manager/fake_system_provider.h
index 0f4dff4..f54951b 100644
--- a/update_manager/fake_system_provider.h
+++ b/update_manager/fake_system_provider.h
@@ -17,9 +17,12 @@
 #ifndef UPDATE_ENGINE_UPDATE_MANAGER_FAKE_SYSTEM_PROVIDER_H_
 #define UPDATE_ENGINE_UPDATE_MANAGER_FAKE_SYSTEM_PROVIDER_H_
 
-#include "update_engine/update_manager/fake_variable.h"
 #include "update_engine/update_manager/system_provider.h"
 
+#include <string>
+
+#include "update_engine/update_manager/fake_variable.h"
+
 namespace chromeos_update_manager {
 
 // Fake implementation of the SystemProvider base class.
@@ -48,12 +51,12 @@
   }
 
  private:
-  FakeVariable<bool> var_is_normal_boot_mode_{  // NOLINT(whitespace/braces)
-    "is_normal_boot_mode", kVariableModeConst};
-  FakeVariable<bool> var_is_official_build_{  // NOLINT(whitespace/braces)
-    "is_official_build", kVariableModeConst};
-  FakeVariable<bool> var_is_oobe_complete_{  // NOLINT(whitespace/braces)
-    "is_oobe_complete", kVariableModePoll};
+  FakeVariable<bool> var_is_normal_boot_mode_{"is_normal_boot_mode",
+                                              kVariableModeConst};
+  FakeVariable<bool> var_is_official_build_{"is_official_build",
+                                            kVariableModeConst};
+  FakeVariable<bool> var_is_oobe_complete_{"is_oobe_complete",
+                                           kVariableModePoll};
   FakeVariable<unsigned int> var_num_slots_{"num_slots", kVariableModePoll};
   FakeVariable<std::string> var_kiosk_required_platform_version_{
       "kiosk_required_platform_version", kVariableModePoll};
diff --git a/update_manager/fake_time_provider.h b/update_manager/fake_time_provider.h
index 2aea2e7..bd370d2 100644
--- a/update_manager/fake_time_provider.h
+++ b/update_manager/fake_time_provider.h
@@ -29,10 +29,12 @@
 
   FakeVariable<base::Time>* var_curr_date() override { return &var_curr_date_; }
   FakeVariable<int>* var_curr_hour() override { return &var_curr_hour_; }
+  FakeVariable<int>* var_curr_minute() override { return &var_curr_minute_; }
 
  private:
   FakeVariable<base::Time> var_curr_date_{"curr_date", kVariableModePoll};
   FakeVariable<int> var_curr_hour_{"curr_hour", kVariableModePoll};
+  FakeVariable<int> var_curr_minute_{"curr_minute", kVariableModePoll};
 
   DISALLOW_COPY_AND_ASSIGN(FakeTimeProvider);
 };
diff --git a/update_manager/fake_update_manager.h b/update_manager/fake_update_manager.h
index 2ea00b6..173b1a9 100644
--- a/update_manager/fake_update_manager.h
+++ b/update_manager/fake_update_manager.h
@@ -27,8 +27,10 @@
 class FakeUpdateManager : public UpdateManager {
  public:
   explicit FakeUpdateManager(chromeos_update_engine::ClockInterface* clock)
-      : UpdateManager(clock, base::TimeDelta::FromSeconds(5),
-                      base::TimeDelta::FromHours(1), new FakeState()) {
+      : UpdateManager(clock,
+                      base::TimeDelta::FromSeconds(5),
+                      base::TimeDelta::FromHours(1),
+                      new FakeState()) {
     // The FakeUpdateManager uses a DefaultPolicy.
     set_policy(new DefaultPolicy(clock));
   }
diff --git a/update_manager/fake_variable.h b/update_manager/fake_variable.h
index 2f8e079..ef5b4f3 100644
--- a/update_manager/fake_variable.h
+++ b/update_manager/fake_variable.h
@@ -26,7 +26,7 @@
 
 // A fake typed variable to use while testing policy implementations. The
 // variable can be instructed to return any object of its type.
-template<typename T>
+template <typename T>
 class FakeVariable : public Variable<T> {
  public:
   FakeVariable(const std::string& name, VariableMode mode)
@@ -39,14 +39,10 @@
   // returned by GetValue(), the pointer is released and has to be set again.
   // A value of null means that the GetValue() call will fail and return
   // null.
-  void reset(const T* p_value) {
-    ptr_.reset(p_value);
-  }
+  void reset(const T* p_value) { ptr_.reset(p_value); }
 
   // Make the NotifyValueChanged() public for FakeVariables.
-  void NotifyValueChanged() {
-    Variable<T>::NotifyValueChanged();
-  }
+  void NotifyValueChanged() { Variable<T>::NotifyValueChanged(); }
 
  protected:
   // Variable<T> overrides.
diff --git a/update_manager/generic_variables.h b/update_manager/generic_variables.h
index f87a05e..afbdcbe 100644
--- a/update_manager/generic_variables.h
+++ b/update_manager/generic_variables.h
@@ -47,7 +47,7 @@
 //    private:
 //     MyType foo_;
 //   };
-template<typename T>
+template <typename T>
 class PollCopyVariable : public Variable<T> {
  public:
   // Creates the variable returning copies of the passed |ref|. The reference to
@@ -55,24 +55,35 @@
   // method is called. If |is_set_p| is not null, then this flag will be
   // consulted prior to returning the value, and an |errmsg| will be returned if
   // it is not set.
-  PollCopyVariable(const std::string& name, const T& ref, const bool* is_set_p,
+  PollCopyVariable(const std::string& name,
+                   const T& ref,
+                   const bool* is_set_p,
                    const std::string& errmsg)
-      : Variable<T>(name, kVariableModePoll), ref_(ref), is_set_p_(is_set_p),
+      : Variable<T>(name, kVariableModePoll),
+        ref_(ref),
+        is_set_p_(is_set_p),
         errmsg_(errmsg) {}
   PollCopyVariable(const std::string& name, const T& ref, const bool* is_set_p)
       : PollCopyVariable(name, ref, is_set_p, std::string()) {}
   PollCopyVariable(const std::string& name, const T& ref)
       : PollCopyVariable(name, ref, nullptr) {}
 
-  PollCopyVariable(const std::string& name, const base::TimeDelta poll_interval,
-                   const T& ref, const bool* is_set_p,
+  PollCopyVariable(const std::string& name,
+                   const base::TimeDelta poll_interval,
+                   const T& ref,
+                   const bool* is_set_p,
                    const std::string& errmsg)
-      : Variable<T>(name, poll_interval), ref_(ref), is_set_p_(is_set_p),
+      : Variable<T>(name, poll_interval),
+        ref_(ref),
+        is_set_p_(is_set_p),
         errmsg_(errmsg) {}
-  PollCopyVariable(const std::string& name, const base::TimeDelta poll_interval,
-                   const T& ref, const bool* is_set_p)
+  PollCopyVariable(const std::string& name,
+                   const base::TimeDelta poll_interval,
+                   const T& ref,
+                   const bool* is_set_p)
       : PollCopyVariable(name, poll_interval, ref, is_set_p, std::string()) {}
-  PollCopyVariable(const std::string& name, const base::TimeDelta poll_interval,
+  PollCopyVariable(const std::string& name,
+                   const base::TimeDelta poll_interval,
                    const T& ref)
       : PollCopyVariable(name, poll_interval, ref, nullptr) {}
 
@@ -109,7 +120,7 @@
 
 // Variable class returning a constant value that is cached on the variable when
 // it is created.
-template<typename T>
+template <typename T>
 class ConstCopyVariable : public Variable<T> {
  public:
   // Creates the variable returning copies of the passed |obj|. The value passed
@@ -132,7 +143,7 @@
 
 // Variable class returning a copy of a value returned by a given function. The
 // function is called every time the variable is being polled.
-template<typename T>
+template <typename T>
 class CallCopyVariable : public Variable<T> {
  public:
   CallCopyVariable(const std::string& name, base::Callback<T(void)> func)
@@ -160,7 +171,6 @@
   DISALLOW_COPY_AND_ASSIGN(CallCopyVariable);
 };
 
-
 // A Variable class to implement simple Async variables. It provides two methods
 // SetValue and UnsetValue to modify the current value of the variable and
 // notify the registered observers whenever the value changed.
@@ -168,7 +178,7 @@
 // The type T needs to be copy-constructible, default-constructible and have an
 // operator== (to determine if the value changed), which makes this class
 // suitable for basic types.
-template<typename T>
+template <typename T>
 class AsyncCopyVariable : public Variable<T> {
  public:
   explicit AsyncCopyVariable(const std::string& name)
@@ -176,7 +186,8 @@
 
   AsyncCopyVariable(const std::string& name, const T value)
       : Variable<T>(name, kVariableModeAsync),
-        has_value_(true), value_(value) {}
+        has_value_(true),
+        value_(value) {}
 
   void SetValue(const T& new_value) {
     bool should_notify = !(has_value_ && new_value == value_);
diff --git a/update_manager/generic_variables_unittest.cc b/update_manager/generic_variables_unittest.cc
index cb0c48f..0ff97e3 100644
--- a/update_manager/generic_variables_unittest.cc
+++ b/update_manager/generic_variables_unittest.cc
@@ -34,15 +34,14 @@
 
 class UmPollCopyVariableTest : public ::testing::Test {};
 
-
 TEST_F(UmPollCopyVariableTest, SimpleTest) {
   // Tests that copies are generated as intended.
   int source = 5;
   PollCopyVariable<int> var("var", source);
 
   // Generate and validate a copy.
-  unique_ptr<const int> copy_1(var.GetValue(
-          UmTestUtils::DefaultTimeout(), nullptr));
+  unique_ptr<const int> copy_1(
+      var.GetValue(UmTestUtils::DefaultTimeout(), nullptr));
   ASSERT_NE(nullptr, copy_1.get());
   EXPECT_EQ(5, *copy_1);
 
@@ -70,7 +69,6 @@
   UmTestUtils::ExpectVariableHasValue(5, &var);
 }
 
-
 class CopyConstructorTestClass {
  public:
   CopyConstructorTestClass(void) : copied_(false) {}
@@ -84,7 +82,6 @@
   int val_ = 0;
 };
 
-
 TEST_F(UmPollCopyVariableTest, UseCopyConstructorTest) {
   // Ensures that CopyVariables indeed uses the copy constructor.
   const CopyConstructorTestClass source;
@@ -97,7 +94,6 @@
   EXPECT_TRUE(copy->copied_);
 }
 
-
 class UmConstCopyVariableTest : public ::testing::Test {};
 
 TEST_F(UmConstCopyVariableTest, SimpleTest) {
@@ -110,7 +106,6 @@
   UmTestUtils::ExpectVariableHasValue(5, &var);
 }
 
-
 class UmCallCopyVariableTest : public ::testing::Test {};
 
 CopyConstructorTestClass test_func(CopyConstructorTestClass* obj) {
@@ -126,8 +121,8 @@
   ASSERT_FALSE(test_obj.copied_);
   test_obj.val_ = 5;
 
-  base::Callback<CopyConstructorTestClass(void)> cb = base::Bind(
-      test_func, &test_obj);
+  base::Callback<CopyConstructorTestClass(void)> cb =
+      base::Bind(test_func, &test_obj);
   CallCopyVariable<CopyConstructorTestClass> var("var", cb);
 
   unique_ptr<const CopyConstructorTestClass> copy(
@@ -148,16 +143,13 @@
 
 class UmAsyncCopyVariableTest : public ::testing::Test {
  protected:
-  void SetUp() override {
-    loop_.SetAsCurrent();
-  }
+  void SetUp() override { loop_.SetAsCurrent(); }
 
   void TearDown() override {
     // No remaining event on the main loop.
     EXPECT_FALSE(loop_.PendingTasks());
   }
 
-
   brillo::FakeMessageLoop loop_{nullptr};
 };
 
@@ -185,9 +177,7 @@
 
 class CallCounterObserver : public BaseVariable::ObserverInterface {
  public:
-  void ValueChanged(BaseVariable* variable) {
-    calls_count_++;
-  }
+  void ValueChanged(BaseVariable* variable) { calls_count_++; }
 
   int calls_count_ = 0;
 };
diff --git a/update_manager/interactive_update_policy_impl.cc b/update_manager/interactive_update_policy_impl.cc
index df7f17b..872dc5d 100644
--- a/update_manager/interactive_update_policy_impl.cc
+++ b/update_manager/interactive_update_policy_impl.cc
@@ -16,6 +16,9 @@
 
 #include "update_engine/update_manager/interactive_update_policy_impl.h"
 
+using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::InstallPlan;
+
 namespace chromeos_update_manager {
 
 // Check to see if an interactive update was requested.
@@ -24,21 +27,51 @@
     State* state,
     std::string* error,
     UpdateCheckParams* result) const {
-  UpdaterProvider* const updater_provider = state->updater_provider();
+  bool interactive;
+  if (CheckInteractiveUpdateRequested(
+          ec, state->updater_provider(), &interactive)) {
+    result->interactive = interactive;
+    LOG(INFO) << "Forced update signaled ("
+              << (interactive ? "interactive" : "periodic")
+              << "), allowing update check.";
+    return EvalStatus::kSucceeded;
+  }
+  return EvalStatus::kContinue;
+}
 
+EvalStatus InteractiveUpdatePolicyImpl::UpdateCanBeApplied(
+    EvaluationContext* ec,
+    State* state,
+    std::string* error,
+    ErrorCode* result,
+    InstallPlan* install_plan) const {
+  bool interactive;
+  if (CheckInteractiveUpdateRequested(
+          ec, state->updater_provider(), &interactive)) {
+    LOG(INFO) << "Forced update signaled ("
+              << (interactive ? "interactive" : "periodic")
+              << "), allowing update to be applied.";
+    *result = ErrorCode::kSuccess;
+    return EvalStatus::kSucceeded;
+  }
+  return EvalStatus::kContinue;
+}
+
+bool InteractiveUpdatePolicyImpl::CheckInteractiveUpdateRequested(
+    EvaluationContext* ec,
+    UpdaterProvider* const updater_provider,
+    bool* interactive_out) const {
   // First, check to see if an interactive update was requested.
   const UpdateRequestStatus* forced_update_requested_p =
       ec->GetValue(updater_provider->var_forced_update_requested());
   if (forced_update_requested_p != nullptr &&
       *forced_update_requested_p != UpdateRequestStatus::kNone) {
-    result->is_interactive =
-        (*forced_update_requested_p == UpdateRequestStatus::kInteractive);
-    LOG(INFO) << "Forced update signaled ("
-              << (result->is_interactive ? "interactive" : "periodic")
-              << "), allowing update check.";
-    return EvalStatus::kSucceeded;
+    if (interactive_out)
+      *interactive_out =
+          (*forced_update_requested_p == UpdateRequestStatus::kInteractive);
+    return true;
   }
-  return EvalStatus::kContinue;
+  return false;
 }
 
 }  // namespace chromeos_update_manager
diff --git a/update_manager/interactive_update_policy_impl.h b/update_manager/interactive_update_policy_impl.h
index a431456..3690cfb 100644
--- a/update_manager/interactive_update_policy_impl.h
+++ b/update_manager/interactive_update_policy_impl.h
@@ -19,6 +19,8 @@
 
 #include <string>
 
+#include "update_engine/common/error_code.h"
+#include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/update_manager/policy_utils.h"
 
 namespace chromeos_update_manager {
@@ -35,15 +37,30 @@
                                 std::string* error,
                                 UpdateCheckParams* result) const override;
 
+  EvalStatus UpdateCanBeApplied(
+      EvaluationContext* ec,
+      State* state,
+      std::string* error,
+      chromeos_update_engine::ErrorCode* result,
+      chromeos_update_engine::InstallPlan* install_plan) const override;
+
  protected:
   std::string PolicyName() const override {
     return "InteractiveUpdatePolicyImpl";
   }
 
  private:
+  // Checks whether a forced update was requested. If there is a forced update,
+  // return true and set |interactive_out| to true if the forced update is
+  // interactive, and false otherwise. If there are no forced updates, return
+  // true and don't modify |interactive_out|.
+  bool CheckInteractiveUpdateRequested(EvaluationContext* ec,
+                                       UpdaterProvider* const updater_provider,
+                                       bool* interactive_out) const;
+
   DISALLOW_COPY_AND_ASSIGN(InteractiveUpdatePolicyImpl);
 };
 
 }  // namespace chromeos_update_manager
 
-#endif  // UPDATE_ENGINE_UPDATE_MANAGER_OFFICIAL_BUILD_CHECK_POLICY_IMPL_H_
+#endif  // UPDATE_ENGINE_UPDATE_MANAGER_INTERACTIVE_UPDATE_POLICY_IMPL_H_
diff --git a/update_manager/mock_policy.h b/update_manager/mock_policy.h
index 8060bf8..46b6c78 100644
--- a/update_manager/mock_policy.h
+++ b/update_manager/mock_policy.h
@@ -32,39 +32,42 @@
   explicit MockPolicy(chromeos_update_engine::ClockInterface* clock)
       : default_policy_(clock) {
     // We defer to the corresponding DefaultPolicy methods, by default.
-    ON_CALL(*this, UpdateCheckAllowed(testing::_, testing::_, testing::_,
-                                      testing::_))
-        .WillByDefault(testing::Invoke(
-                &default_policy_, &DefaultPolicy::UpdateCheckAllowed));
+    ON_CALL(*this,
+            UpdateCheckAllowed(testing::_, testing::_, testing::_, testing::_))
+        .WillByDefault(testing::Invoke(&default_policy_,
+                                       &DefaultPolicy::UpdateCheckAllowed));
     ON_CALL(*this,
             UpdateCanBeApplied(
                 testing::_, testing::_, testing::_, testing::_, testing::_))
         .WillByDefault(testing::Invoke(&default_policy_,
                                        &DefaultPolicy::UpdateCanBeApplied));
-    ON_CALL(*this, UpdateCanStart(testing::_, testing::_, testing::_,
-                                  testing::_, testing::_))
-        .WillByDefault(testing::Invoke(
-                &default_policy_, &DefaultPolicy::UpdateCanStart));
-    ON_CALL(*this, UpdateDownloadAllowed(testing::_, testing::_, testing::_,
-                                         testing::_))
-        .WillByDefault(testing::Invoke(
-                &default_policy_, &DefaultPolicy::UpdateDownloadAllowed));
+    ON_CALL(*this,
+            UpdateCanStart(
+                testing::_, testing::_, testing::_, testing::_, testing::_))
+        .WillByDefault(
+            testing::Invoke(&default_policy_, &DefaultPolicy::UpdateCanStart));
+    ON_CALL(
+        *this,
+        UpdateDownloadAllowed(testing::_, testing::_, testing::_, testing::_))
+        .WillByDefault(testing::Invoke(&default_policy_,
+                                       &DefaultPolicy::UpdateDownloadAllowed));
     ON_CALL(*this, P2PEnabled(testing::_, testing::_, testing::_, testing::_))
-        .WillByDefault(testing::Invoke(
-                &default_policy_, &DefaultPolicy::P2PEnabled));
-    ON_CALL(*this, P2PEnabledChanged(testing::_, testing::_, testing::_,
-                                     testing::_, testing::_))
-        .WillByDefault(testing::Invoke(
-                &default_policy_, &DefaultPolicy::P2PEnabledChanged));
+        .WillByDefault(
+            testing::Invoke(&default_policy_, &DefaultPolicy::P2PEnabled));
+    ON_CALL(*this,
+            P2PEnabledChanged(
+                testing::_, testing::_, testing::_, testing::_, testing::_))
+        .WillByDefault(testing::Invoke(&default_policy_,
+                                       &DefaultPolicy::P2PEnabledChanged));
   }
 
   MockPolicy() : MockPolicy(nullptr) {}
   ~MockPolicy() override {}
 
   // Policy overrides.
-  MOCK_CONST_METHOD4(UpdateCheckAllowed,
-                     EvalStatus(EvaluationContext*, State*, std::string*,
-                                UpdateCheckParams*));
+  MOCK_CONST_METHOD4(
+      UpdateCheckAllowed,
+      EvalStatus(EvaluationContext*, State*, std::string*, UpdateCheckParams*));
 
   MOCK_CONST_METHOD5(UpdateCanBeApplied,
                      EvalStatus(EvaluationContext*,
@@ -74,20 +77,22 @@
                                 chromeos_update_engine::InstallPlan*));
 
   MOCK_CONST_METHOD5(UpdateCanStart,
-                     EvalStatus(EvaluationContext*, State*, std::string*,
-                                UpdateDownloadParams*, UpdateState));
+                     EvalStatus(EvaluationContext*,
+                                State*,
+                                std::string*,
+                                UpdateDownloadParams*,
+                                UpdateState));
 
-  MOCK_CONST_METHOD4(UpdateDownloadAllowed,
-                     EvalStatus(EvaluationContext*, State*, std::string*,
-                                bool*));
+  MOCK_CONST_METHOD4(
+      UpdateDownloadAllowed,
+      EvalStatus(EvaluationContext*, State*, std::string*, bool*));
 
-  MOCK_CONST_METHOD4(P2PEnabled,
-                     EvalStatus(EvaluationContext*, State*, std::string*,
-                                bool*));
+  MOCK_CONST_METHOD4(
+      P2PEnabled, EvalStatus(EvaluationContext*, State*, std::string*, bool*));
 
-  MOCK_CONST_METHOD5(P2PEnabledChanged,
-                     EvalStatus(EvaluationContext*, State*, std::string*,
-                                bool*, bool));
+  MOCK_CONST_METHOD5(
+      P2PEnabledChanged,
+      EvalStatus(EvaluationContext*, State*, std::string*, bool*, bool));
 
  protected:
   // Policy override.
diff --git a/update_manager/mock_variable.h b/update_manager/mock_variable.h
index 1493491..8b6c276 100644
--- a/update_manager/mock_variable.h
+++ b/update_manager/mock_variable.h
@@ -26,7 +26,7 @@
 namespace chromeos_update_manager {
 
 // This is a generic mock of the Variable class.
-template<typename T>
+template <typename T>
 class MockVariable : public Variable<T> {
  public:
   using Variable<T>::Variable;
diff --git a/update_manager/policy.h b/update_manager/policy.h
index b60c4da..5d65d9a 100644
--- a/update_manager/policy.h
+++ b/update_manager/policy.h
@@ -24,6 +24,7 @@
 #include "update_engine/common/error_code.h"
 #include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/update_manager/evaluation_context.h"
+#include "update_engine/update_manager/rollback_prefs.h"
 #include "update_engine/update_manager/state.h"
 
 namespace chromeos_update_manager {
@@ -47,11 +48,18 @@
   //
   // A target version prefix, if imposed by policy; otherwise, an empty string.
   std::string target_version_prefix;
+  // Specifies whether rollback images are allowed by device policy.
+  bool rollback_allowed;
+  // Specifies the number of Chrome milestones rollback should be allowed,
+  // starting from the stable version at any time. Value is -1 if unspecified
+  // (e.g. no device policy is available yet), in this case no version
+  // roll-forward should happen.
+  int rollback_allowed_milestones;
   // A target channel, if so imposed by policy; otherwise, an empty string.
   std::string target_channel;
 
   // Whether the allowed update is interactive (user-initiated) or periodic.
-  bool is_interactive;
+  bool interactive;
 };
 
 // Input arguments to UpdateCanStart.
@@ -64,7 +72,7 @@
   //
   // Whether the current update check is an interactive one. The caller should
   // feed the value returned by the preceding call to UpdateCheckAllowed().
-  bool is_interactive;
+  bool interactive;
   // Whether it is a delta payload.
   bool is_delta_payload;
   // Wallclock time when payload was first (consecutively) offered by Omaha.
@@ -118,7 +126,7 @@
 
   // Information pertaining to update scattering.
   //
-  // The currently knwon (persisted) scattering wallclock-based wait period and
+  // The currently known (persisted) scattering wallclock-based wait period and
   // update check threshold; zero if none.
   base::TimeDelta scatter_wait_period;
   int scatter_check_threshold;
@@ -196,37 +204,34 @@
   // Returns the name of a public policy request.
   // IMPORTANT: Be sure to add a conditional for each new public policy that is
   // being added to this class in the future.
-  template<typename R, typename... Args>
-  std::string PolicyRequestName(
-      EvalStatus (Policy::*policy_method)(EvaluationContext*, State*,
-                                          std::string*, R*,
-                                          Args...) const) const {
+  template <typename R, typename... Args>
+  std::string PolicyRequestName(EvalStatus (Policy::*policy_method)(
+      EvaluationContext*, State*, std::string*, R*, Args...) const) const {
     std::string class_name = PolicyName() + "::";
 
-    if (reinterpret_cast<typeof(&Policy::UpdateCheckAllowed)>(
-            policy_method) == &Policy::UpdateCheckAllowed)
+    if (reinterpret_cast<typeof(&Policy::UpdateCheckAllowed)>(policy_method) ==
+        &Policy::UpdateCheckAllowed)
       return class_name + "UpdateCheckAllowed";
     if (reinterpret_cast<typeof(&Policy::UpdateCanBeApplied)>(policy_method) ==
         &Policy::UpdateCanBeApplied)
       return class_name + "UpdateCanBeApplied";
-    if (reinterpret_cast<typeof(&Policy::UpdateCanStart)>(
-            policy_method) == &Policy::UpdateCanStart)
+    if (reinterpret_cast<typeof(&Policy::UpdateCanStart)>(policy_method) ==
+        &Policy::UpdateCanStart)
       return class_name + "UpdateCanStart";
     if (reinterpret_cast<typeof(&Policy::UpdateDownloadAllowed)>(
             policy_method) == &Policy::UpdateDownloadAllowed)
       return class_name + "UpdateDownloadAllowed";
-    if (reinterpret_cast<typeof(&Policy::P2PEnabled)>(
-            policy_method) == &Policy::P2PEnabled)
+    if (reinterpret_cast<typeof(&Policy::P2PEnabled)>(policy_method) ==
+        &Policy::P2PEnabled)
       return class_name + "P2PEnabled";
-    if (reinterpret_cast<typeof(&Policy::P2PEnabledChanged)>(
-            policy_method) == &Policy::P2PEnabledChanged)
+    if (reinterpret_cast<typeof(&Policy::P2PEnabledChanged)>(policy_method) ==
+        &Policy::P2PEnabledChanged)
       return class_name + "P2PEnabledChanged";
 
     NOTREACHED();
     return class_name + "(unknown)";
   }
 
-
   // List of policy requests. A policy request takes an EvaluationContext as the
   // first argument, a State instance, a returned error message, a returned
   // value and optionally followed by one or more arbitrary constant arguments.
@@ -236,9 +241,10 @@
 
   // UpdateCheckAllowed returns whether it is allowed to request an update check
   // to Omaha.
-  virtual EvalStatus UpdateCheckAllowed(
-      EvaluationContext* ec, State* state, std::string* error,
-      UpdateCheckParams* result) const = 0;
+  virtual EvalStatus UpdateCheckAllowed(EvaluationContext* ec,
+                                        State* state,
+                                        std::string* error,
+                                        UpdateCheckParams* result) const = 0;
 
   // UpdateCanBeApplied returns whether the given |install_plan| can be acted
   // on at this time.  The reason for not applying is returned in |result|.
@@ -257,12 +263,11 @@
   // that need to be persisted has changed, returns
   // EvalStatus::kAskMeAgainLater. Arguments include an |update_state| that
   // encapsulates data pertaining to the current ongoing update process.
-  virtual EvalStatus UpdateCanStart(
-      EvaluationContext* ec,
-      State* state,
-      std::string* error,
-      UpdateDownloadParams* result,
-      UpdateState update_state) const = 0;
+  virtual EvalStatus UpdateCanStart(EvaluationContext* ec,
+                                    State* state,
+                                    std::string* error,
+                                    UpdateDownloadParams* result,
+                                    UpdateState update_state) const = 0;
 
   // Checks whether downloading of an update is allowed; currently, this checks
   // whether the network connection type is suitable for updating over.  May
@@ -270,26 +275,28 @@
   // Returns |EvalStatus::kSucceeded|, setting |result| according to whether or
   // not the current connection can be used; on error, returns
   // |EvalStatus::kFailed| and sets |error| accordingly.
-  virtual EvalStatus UpdateDownloadAllowed(
-      EvaluationContext* ec,
-      State* state,
-      std::string* error,
-      bool* result) const = 0;
+  virtual EvalStatus UpdateDownloadAllowed(EvaluationContext* ec,
+                                           State* state,
+                                           std::string* error,
+                                           bool* result) const = 0;
 
   // Checks whether P2P is enabled. This may consult device policy and other
   // global settings.
-  virtual EvalStatus P2PEnabled(
-      EvaluationContext* ec, State* state, std::string* error,
-      bool* result) const = 0;
+  virtual EvalStatus P2PEnabled(EvaluationContext* ec,
+                                State* state,
+                                std::string* error,
+                                bool* result) const = 0;
 
   // Checks whether P2P is enabled, but blocks (returns
   // |EvalStatus::kAskMeAgainLater|) until it is different from |prev_result|.
   // If the P2P enabled status is not expected to change, will return
   // immediately with |EvalStatus::kSucceeded|. This internally uses the
   // P2PEnabled() policy above.
-  virtual EvalStatus P2PEnabledChanged(
-      EvaluationContext* ec, State* state, std::string* error,
-      bool* result, bool prev_result) const = 0;
+  virtual EvalStatus P2PEnabledChanged(EvaluationContext* ec,
+                                       State* state,
+                                       std::string* error,
+                                       bool* result,
+                                       bool prev_result) const = 0;
 
  protected:
   Policy() {}
diff --git a/update_manager/policy_test_utils.cc b/update_manager/policy_test_utils.cc
index fbfcb82..5491e00 100644
--- a/update_manager/policy_test_utils.cc
+++ b/update_manager/policy_test_utils.cc
@@ -48,6 +48,17 @@
   fake_clock_.SetWallclockTime(Time::FromInternalValue(12345678901234L));
 }
 
+void UmPolicyTestBase::SetUpDefaultTimeProvider() {
+  Time current_time = fake_clock_.GetWallclockTime();
+  base::Time::Exploded exploded;
+  current_time.LocalExplode(&exploded);
+  fake_state_.time_provider()->var_curr_hour()->reset(new int(exploded.hour));
+  fake_state_.time_provider()->var_curr_minute()->reset(
+      new int(exploded.minute));
+  fake_state_.time_provider()->var_curr_date()->reset(
+      new Time(current_time.LocalMidnight()));
+}
+
 void UmPolicyTestBase::SetUpDefaultState() {
   fake_state_.updater_provider()->var_updater_started_time()->reset(
       new Time(fake_clock_.GetWallclockTime()));
@@ -73,7 +84,7 @@
   // This is a non-interactive check returning a delta payload, seen for the
   // first time (|first_seen_period| ago). Clearly, there were no failed
   // attempts so far.
-  update_state.is_interactive = false;
+  update_state.interactive = false;
   update_state.is_delta_payload = false;
   update_state.first_seen = first_seen_time;
   update_state.num_checks = 1;
diff --git a/update_manager/policy_test_utils.h b/update_manager/policy_test_utils.h
index 5b93f7b..eb5758f 100644
--- a/update_manager/policy_test_utils.h
+++ b/update_manager/policy_test_utils.h
@@ -42,6 +42,9 @@
   // Sets the clock to fixed values.
   virtual void SetUpDefaultClock();
 
+  // Sets the fake time provider to the time given by the fake clock.
+  virtual void SetUpDefaultTimeProvider();
+
   // Sets up the default state in fake_state_.  override to add Policy-specific
   // items, but only after calling this class's implementation.
   virtual void SetUpDefaultState();
diff --git a/update_manager/policy_utils.h b/update_manager/policy_utils.h
index eaf9ee9..3204780 100644
--- a/update_manager/policy_utils.h
+++ b/update_manager/policy_utils.h
@@ -30,13 +30,13 @@
 //   const int* my_value = ec->GetValue(state->my_provider()->var_my_value());
 //   POLICY_CHECK_VALUE_AND_FAIL(my_value, error);
 //
-#define POLICY_CHECK_VALUE_AND_FAIL(ptr, error) \
-    do { \
-      if ((ptr) == nullptr) { \
-        *(error) = #ptr " is required but is null."; \
-        return EvalStatus::kFailed; \
-      } \
-    } while (false)
+#define POLICY_CHECK_VALUE_AND_FAIL(ptr, error)    \
+  do {                                             \
+    if ((ptr) == nullptr) {                        \
+      *(error) = #ptr " is required but is null."; \
+      return EvalStatus::kFailed;                  \
+    }                                              \
+  } while (false)
 
 namespace chromeos_update_manager {
 
diff --git a/update_manager/prng_unittest.cc b/update_manager/prng_unittest.cc
index 2a3f689..cb35f0a 100644
--- a/update_manager/prng_unittest.cc
+++ b/update_manager/prng_unittest.cc
@@ -63,7 +63,7 @@
 
 TEST(UmPRNGTest, RandCoversRange) {
   PRNG a(42);
-  int hits[11] = { 0 };
+  int hits[11] = {0};
 
   for (int i = 0; i < 1000; i++) {
     int r = a.RandMinMax(0, 10);
diff --git a/update_manager/real_device_policy_provider.cc b/update_manager/real_device_policy_provider.cc
index d9880c3..586ee3e 100644
--- a/update_manager/real_device_policy_provider.cc
+++ b/update_manager/real_device_policy_provider.cc
@@ -18,6 +18,8 @@
 
 #include <stdint.h>
 
+#include <vector>
+
 #include <base/location.h>
 #include <base/logging.h>
 #include <base/time/time.h>
@@ -33,6 +35,7 @@
 using policy::DevicePolicy;
 using std::set;
 using std::string;
+using std::vector;
 
 namespace {
 
@@ -100,10 +103,9 @@
       TimeDelta::FromMinutes(kDevicePolicyRefreshRateInMinutes));
 }
 
-template<typename T>
+template <typename T>
 void RealDevicePolicyProvider::UpdateVariable(
-    AsyncCopyVariable<T>* var,
-    bool (DevicePolicy::*getter_method)(T*) const) {
+    AsyncCopyVariable<T>* var, bool (DevicePolicy::*getter_method)(T*) const) {
   T new_value;
   if (policy_provider_->device_policy_is_loaded() &&
       (policy_provider_->GetDevicePolicy().*getter_method)(&new_value)) {
@@ -113,7 +115,7 @@
   }
 }
 
-template<typename T>
+template <typename T>
 void RealDevicePolicyProvider::UpdateVariable(
     AsyncCopyVariable<T>* var,
     bool (RealDevicePolicyProvider::*getter_method)(T*) const) {
@@ -126,11 +128,28 @@
   }
 }
 
+bool RealDevicePolicyProvider::ConvertRollbackToTargetVersion(
+    RollbackToTargetVersion* rollback_to_target_version) const {
+  int rollback_to_target_version_int;
+  if (!policy_provider_->GetDevicePolicy().GetRollbackToTargetVersion(
+          &rollback_to_target_version_int)) {
+    return false;
+  }
+  if (rollback_to_target_version_int < 0 ||
+      rollback_to_target_version_int >=
+          static_cast<int>(RollbackToTargetVersion::kMaxValue)) {
+    return false;
+  }
+  *rollback_to_target_version =
+      static_cast<RollbackToTargetVersion>(rollback_to_target_version_int);
+  return true;
+}
+
 bool RealDevicePolicyProvider::ConvertAllowedConnectionTypesForUpdate(
-      set<ConnectionType>* allowed_types) const {
+    set<ConnectionType>* allowed_types) const {
   set<string> allowed_types_str;
-  if (!policy_provider_->GetDevicePolicy()
-      .GetAllowedConnectionTypesForUpdate(&allowed_types_str)) {
+  if (!policy_provider_->GetDevicePolicy().GetAllowedConnectionTypesForUpdate(
+          &allowed_types_str)) {
     return false;
   }
   allowed_types->clear();
@@ -150,7 +169,7 @@
     TimeDelta* scatter_factor) const {
   int64_t scatter_factor_in_seconds;
   if (!policy_provider_->GetDevicePolicy().GetScatterFactorInSeconds(
-      &scatter_factor_in_seconds)) {
+          &scatter_factor_in_seconds)) {
     return false;
   }
   if (scatter_factor_in_seconds < 0) {
@@ -162,6 +181,23 @@
   return true;
 }
 
+bool RealDevicePolicyProvider::ConvertDisallowedTimeIntervals(
+    WeeklyTimeIntervalVector* disallowed_intervals_out) const {
+  vector<DevicePolicy::WeeklyTimeInterval> parsed_intervals;
+  if (!policy_provider_->GetDevicePolicy().GetDisallowedTimeIntervals(
+          &parsed_intervals)) {
+    return false;
+  }
+
+  disallowed_intervals_out->clear();
+  for (const auto& interval : parsed_intervals) {
+    disallowed_intervals_out->emplace_back(
+        WeeklyTime(interval.start_day_of_week, interval.start_time),
+        WeeklyTime(interval.end_day_of_week, interval.end_time));
+  }
+  return true;
+}
+
 void RealDevicePolicyProvider::RefreshDevicePolicy() {
   if (!policy_provider_->Reload()) {
     LOG(INFO) << "No device policies/settings present.";
@@ -176,6 +212,14 @@
   UpdateVariable(&var_update_disabled_, &DevicePolicy::GetUpdateDisabled);
   UpdateVariable(&var_target_version_prefix_,
                  &DevicePolicy::GetTargetVersionPrefix);
+  UpdateVariable(&var_rollback_to_target_version_,
+                 &RealDevicePolicyProvider::ConvertRollbackToTargetVersion);
+  UpdateVariable(&var_rollback_allowed_milestones_,
+                 &DevicePolicy::GetRollbackAllowedMilestones);
+  if (policy_provider_->IsConsumerDevice()) {
+    // For consumer devices (which won't ever have policy), set value to 0.
+    var_rollback_allowed_milestones_.SetValue(0);
+  }
   UpdateVariable(&var_scatter_factor_,
                  &RealDevicePolicyProvider::ConvertScatterFactor);
   UpdateVariable(
@@ -187,6 +231,10 @@
   UpdateVariable(&var_au_p2p_enabled_, &DevicePolicy::GetAuP2PEnabled);
   UpdateVariable(&var_allow_kiosk_app_control_chrome_version_,
                  &DevicePolicy::GetAllowKioskAppControlChromeVersion);
+  UpdateVariable(&var_auto_launched_kiosk_app_id_,
+                 &DevicePolicy::GetAutoLaunchedKioskAppId);
+  UpdateVariable(&var_disallowed_time_intervals_,
+                 &RealDevicePolicyProvider::ConvertDisallowedTimeIntervals);
 }
 
 }  // namespace chromeos_update_manager
diff --git a/update_manager/real_device_policy_provider.h b/update_manager/real_device_policy_provider.h
index 5b5ee58..bda4cff 100644
--- a/update_manager/real_device_policy_provider.h
+++ b/update_manager/real_device_policy_provider.h
@@ -20,6 +20,7 @@
 #include <memory>
 #include <set>
 #include <string>
+#include <utility>
 
 #include <brillo/message_loops/message_loop.h>
 #include <gtest/gtest_prod.h>  // for FRIEND_TEST
@@ -71,31 +72,43 @@
     return &var_target_version_prefix_;
   }
 
+  Variable<RollbackToTargetVersion>* var_rollback_to_target_version() override {
+    return &var_rollback_to_target_version_;
+  }
+
+  Variable<int>* var_rollback_allowed_milestones() override {
+    return &var_rollback_allowed_milestones_;
+  }
+
   Variable<base::TimeDelta>* var_scatter_factor() override {
     return &var_scatter_factor_;
   }
 
   Variable<std::set<chromeos_update_engine::ConnectionType>>*
-      var_allowed_connection_types_for_update() override {
+  var_allowed_connection_types_for_update() override {
     return &var_allowed_connection_types_for_update_;
   }
 
-  Variable<std::string>* var_owner() override {
-    return &var_owner_;
-  }
+  Variable<std::string>* var_owner() override { return &var_owner_; }
 
   Variable<bool>* var_http_downloads_enabled() override {
     return &var_http_downloads_enabled_;
   }
 
-  Variable<bool>* var_au_p2p_enabled() override {
-    return &var_au_p2p_enabled_;
-  }
+  Variable<bool>* var_au_p2p_enabled() override { return &var_au_p2p_enabled_; }
 
   Variable<bool>* var_allow_kiosk_app_control_chrome_version() override {
     return &var_allow_kiosk_app_control_chrome_version_;
   }
 
+  Variable<std::string>* var_auto_launched_kiosk_app_id() override {
+    return &var_auto_launched_kiosk_app_id_;
+  }
+
+  Variable<WeeklyTimeIntervalVector>* var_disallowed_time_intervals() override {
+    return &var_disallowed_time_intervals_;
+  }
+
  private:
   FRIEND_TEST(UmRealDevicePolicyProviderTest, RefreshScheduledTest);
   FRIEND_TEST(UmRealDevicePolicyProviderTest, NonExistentDevicePolicyReloaded);
@@ -119,16 +132,21 @@
 
   // Updates the async variable |var| based on the result value of the method
   // passed, which is a DevicePolicy getter method.
-  template<typename T>
+  template <typename T>
   void UpdateVariable(AsyncCopyVariable<T>* var,
                       bool (policy::DevicePolicy::*getter_method)(T*) const);
 
   // Updates the async variable |var| based on the result value of the getter
   // method passed, which is a wrapper getter on this class.
-  template<typename T>
-  void UpdateVariable(
-      AsyncCopyVariable<T>* var,
-      bool (RealDevicePolicyProvider::*getter_method)(T*) const);
+  template <typename T>
+  void UpdateVariable(AsyncCopyVariable<T>* var,
+                      bool (RealDevicePolicyProvider::*getter_method)(T*)
+                          const);
+
+  // Wrapper for DevicePolicy::GetRollbackToTargetVersion() that converts the
+  // result to RollbackToTargetVersion.
+  bool ConvertRollbackToTargetVersion(
+      RollbackToTargetVersion* rollback_to_target_version) const;
 
   // Wrapper for DevicePolicy::GetScatterFactorInSeconds() that converts the
   // result to a base::TimeDelta. It returns the same value as
@@ -140,6 +158,12 @@
   bool ConvertAllowedConnectionTypesForUpdate(
       std::set<chromeos_update_engine::ConnectionType>* allowed_types) const;
 
+  // Wrapper for DevicePolicy::GetUpdateTimeRestrictions() that converts
+  // the DevicePolicy::WeeklyTimeInterval structs to WeeklyTimeInterval objects,
+  // which offer more functionality.
+  bool ConvertDisallowedTimeIntervals(
+      WeeklyTimeIntervalVector* disallowed_intervals_out) const;
+
   // Used for fetching information about the device policy.
   policy::PolicyProvider* policy_provider_;
 
@@ -154,8 +178,8 @@
 #endif  // USE_DBUS
 
   // Variable exposing whether the policy is loaded.
-  AsyncCopyVariable<bool> var_device_policy_is_loaded_{
-      "policy_is_loaded", false};
+  AsyncCopyVariable<bool> var_device_policy_is_loaded_{"policy_is_loaded",
+                                                       false};
 
   // Variables mapping the exposed methods from the policy::DevicePolicy.
   AsyncCopyVariable<std::string> var_release_channel_{"release_channel"};
@@ -164,6 +188,10 @@
   AsyncCopyVariable<bool> var_update_disabled_{"update_disabled"};
   AsyncCopyVariable<std::string> var_target_version_prefix_{
       "target_version_prefix"};
+  AsyncCopyVariable<RollbackToTargetVersion> var_rollback_to_target_version_{
+      "rollback_to_target_version"};
+  AsyncCopyVariable<int> var_rollback_allowed_milestones_{
+      "rollback_allowed_milestones"};
   AsyncCopyVariable<base::TimeDelta> var_scatter_factor_{"scatter_factor"};
   AsyncCopyVariable<std::set<chromeos_update_engine::ConnectionType>>
       var_allowed_connection_types_for_update_{
@@ -173,6 +201,10 @@
   AsyncCopyVariable<bool> var_au_p2p_enabled_{"au_p2p_enabled"};
   AsyncCopyVariable<bool> var_allow_kiosk_app_control_chrome_version_{
       "allow_kiosk_app_control_chrome_version"};
+  AsyncCopyVariable<WeeklyTimeIntervalVector> var_disallowed_time_intervals_{
+      "update_time_restrictions"};
+  AsyncCopyVariable<std::string> var_auto_launched_kiosk_app_id_{
+      "auto_launched_kiosk_app_id"};
 
   DISALLOW_COPY_AND_ASSIGN(RealDevicePolicyProvider);
 };
diff --git a/update_manager/real_device_policy_provider_unittest.cc b/update_manager/real_device_policy_provider_unittest.cc
index 167cbd9..0d7b0d0 100644
--- a/update_manager/real_device_policy_provider_unittest.cc
+++ b/update_manager/real_device_policy_provider_unittest.cc
@@ -17,6 +17,7 @@
 #include "update_engine/update_manager/real_device_policy_provider.h"
 
 #include <memory>
+#include <vector>
 
 #include <base/memory/ptr_util.h>
 #include <brillo/message_loops/fake_message_loop.h>
@@ -40,18 +41,20 @@
 using base::TimeDelta;
 using brillo::MessageLoop;
 using chromeos_update_engine::ConnectionType;
+using policy::DevicePolicy;
 #if USE_DBUS
 using chromeos_update_engine::dbus_test_utils::MockSignalHandler;
 #endif  // USE_DBUS
 using std::set;
 using std::string;
 using std::unique_ptr;
+using std::vector;
+using testing::_;
 using testing::DoAll;
 using testing::Mock;
 using testing::Return;
 using testing::ReturnRef;
 using testing::SetArgPointee;
-using testing::_;
 
 namespace chromeos_update_manager {
 
@@ -87,8 +90,7 @@
   }
 
   void SetUpNonExistentDevicePolicy() {
-    ON_CALL(mock_policy_provider_, Reload())
-        .WillByDefault(Return(false));
+    ON_CALL(mock_policy_provider_, Reload()).WillByDefault(Return(false));
     ON_CALL(mock_policy_provider_, device_policy_is_loaded())
         .WillByDefault(Return(false));
     EXPECT_CALL(mock_policy_provider_, GetDevicePolicy()).Times(0);
@@ -96,8 +98,7 @@
 
   void SetUpExistentDevicePolicy() {
     // Setup the default behavior of the mocked PolicyProvider.
-    ON_CALL(mock_policy_provider_, Reload())
-        .WillByDefault(Return(true));
+    ON_CALL(mock_policy_provider_, Reload()).WillByDefault(Return(true));
     ON_CALL(mock_policy_provider_, device_policy_is_loaded())
         .WillByDefault(Return(true));
     ON_CALL(mock_policy_provider_, GetDevicePolicy())
@@ -178,6 +179,10 @@
   UmTestUtils::ExpectVariableNotSet(provider_->var_release_channel_delegated());
   UmTestUtils::ExpectVariableNotSet(provider_->var_update_disabled());
   UmTestUtils::ExpectVariableNotSet(provider_->var_target_version_prefix());
+  UmTestUtils::ExpectVariableNotSet(
+      provider_->var_rollback_to_target_version());
+  UmTestUtils::ExpectVariableNotSet(
+      provider_->var_rollback_allowed_milestones());
   UmTestUtils::ExpectVariableNotSet(provider_->var_scatter_factor());
   UmTestUtils::ExpectVariableNotSet(
       provider_->var_allowed_connection_types_for_update());
@@ -186,6 +191,9 @@
   UmTestUtils::ExpectVariableNotSet(provider_->var_au_p2p_enabled());
   UmTestUtils::ExpectVariableNotSet(
       provider_->var_allow_kiosk_app_control_chrome_version());
+  UmTestUtils::ExpectVariableNotSet(
+      provider_->var_auto_launched_kiosk_app_id());
+  UmTestUtils::ExpectVariableNotSet(provider_->var_disallowed_time_intervals());
 }
 
 TEST_F(UmRealDevicePolicyProviderTest, ValuesUpdated) {
@@ -203,6 +211,8 @@
       .WillOnce(Return(false));
   EXPECT_CALL(mock_device_policy_, GetAllowKioskAppControlChromeVersion(_))
       .WillOnce(DoAll(SetArgPointee<0>(true), Return(true)));
+  EXPECT_CALL(mock_device_policy_, GetAutoLaunchedKioskAppId(_))
+      .WillOnce(DoAll(SetArgPointee<0>(string("myapp")), Return(true)));
 
   provider_->RefreshDevicePolicy();
 
@@ -216,6 +226,63 @@
       provider_->var_allowed_connection_types_for_update());
   UmTestUtils::ExpectVariableHasValue(
       true, provider_->var_allow_kiosk_app_control_chrome_version());
+  UmTestUtils::ExpectVariableHasValue(
+      string("myapp"), provider_->var_auto_launched_kiosk_app_id());
+}
+
+TEST_F(UmRealDevicePolicyProviderTest, RollbackToTargetVersionConverted) {
+  SetUpExistentDevicePolicy();
+  EXPECT_CALL(mock_device_policy_, GetRollbackToTargetVersion(_))
+#if USE_DBUS
+      .Times(2)
+#else
+      .Times(1)
+#endif  // USE_DBUS
+      .WillRepeatedly(DoAll(SetArgPointee<0>(2), Return(true)));
+  EXPECT_TRUE(provider_->Init());
+  loop_.RunOnce(false);
+
+  UmTestUtils::ExpectVariableHasValue(
+      RollbackToTargetVersion::kRollbackAndPowerwash,
+      provider_->var_rollback_to_target_version());
+}
+
+TEST_F(UmRealDevicePolicyProviderTest, RollbackAllowedMilestonesOobe) {
+  SetUpNonExistentDevicePolicy();
+  EXPECT_CALL(mock_device_policy_, GetRollbackAllowedMilestones(_)).Times(0);
+  ON_CALL(mock_policy_provider_, IsConsumerDevice())
+      .WillByDefault(Return(false));
+  EXPECT_TRUE(provider_->Init());
+  loop_.RunOnce(false);
+
+  UmTestUtils::ExpectVariableNotSet(
+      provider_->var_rollback_allowed_milestones());
+}
+
+TEST_F(UmRealDevicePolicyProviderTest, RollbackAllowedMilestonesConsumer) {
+  SetUpNonExistentDevicePolicy();
+  EXPECT_CALL(mock_device_policy_, GetRollbackAllowedMilestones(_)).Times(0);
+  ON_CALL(mock_policy_provider_, IsConsumerDevice())
+      .WillByDefault(Return(true));
+  EXPECT_TRUE(provider_->Init());
+  loop_.RunOnce(false);
+
+  UmTestUtils::ExpectVariableHasValue(
+      0, provider_->var_rollback_allowed_milestones());
+}
+
+TEST_F(UmRealDevicePolicyProviderTest,
+       RollbackAllowedMilestonesEnterprisePolicySet) {
+  SetUpExistentDevicePolicy();
+  ON_CALL(mock_device_policy_, GetRollbackAllowedMilestones(_))
+      .WillByDefault(DoAll(SetArgPointee<0>(2), Return(true)));
+  ON_CALL(mock_policy_provider_, IsConsumerDevice())
+      .WillByDefault(Return(false));
+  EXPECT_TRUE(provider_->Init());
+  loop_.RunOnce(false);
+
+  UmTestUtils::ExpectVariableHasValue(
+      2, provider_->var_rollback_allowed_milestones());
 }
 
 TEST_F(UmRealDevicePolicyProviderTest, ScatterFactorConverted) {
@@ -268,4 +335,25 @@
       provider_->var_allowed_connection_types_for_update());
 }
 
+TEST_F(UmRealDevicePolicyProviderTest, DisallowedIntervalsConverted) {
+  SetUpExistentDevicePolicy();
+
+  vector<DevicePolicy::WeeklyTimeInterval> intervals = {
+      {5, TimeDelta::FromHours(5), 6, TimeDelta::FromHours(8)},
+      {1, TimeDelta::FromHours(1), 3, TimeDelta::FromHours(10)}};
+
+  EXPECT_CALL(mock_device_policy_, GetDisallowedTimeIntervals(_))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(intervals), Return(true)));
+  EXPECT_TRUE(provider_->Init());
+  loop_.RunOnce(false);
+
+  UmTestUtils::ExpectVariableHasValue(
+      WeeklyTimeIntervalVector{
+          WeeklyTimeInterval(WeeklyTime(5, TimeDelta::FromHours(5)),
+                             WeeklyTime(6, TimeDelta::FromHours(8))),
+          WeeklyTimeInterval(WeeklyTime(1, TimeDelta::FromHours(1)),
+                             WeeklyTime(3, TimeDelta::FromHours(10)))},
+      provider_->var_disallowed_time_intervals());
+}
+
 }  // namespace chromeos_update_manager
diff --git a/update_manager/real_random_provider_unittest.cc b/update_manager/real_random_provider_unittest.cc
index ca67da6..1b22063 100644
--- a/update_manager/real_random_provider_unittest.cc
+++ b/update_manager/real_random_provider_unittest.cc
@@ -55,9 +55,8 @@
   // by design, once every 2^320 runs.
   bool is_same_value = true;
   for (int i = 0; i < 5; i++) {
-    unique_ptr<const uint64_t> other_value(
-        provider_->var_seed()->GetValue(UmTestUtils::DefaultTimeout(),
-                                        nullptr));
+    unique_ptr<const uint64_t> other_value(provider_->var_seed()->GetValue(
+        UmTestUtils::DefaultTimeout(), nullptr));
     ASSERT_NE(nullptr, other_value.get());
     is_same_value = is_same_value && *other_value == *value;
   }
diff --git a/update_manager/real_shill_provider.cc b/update_manager/real_shill_provider.cc
index 2c58a7e..0144603 100644
--- a/update_manager/real_shill_provider.cc
+++ b/update_manager/real_shill_provider.cc
@@ -94,8 +94,8 @@
 
   // Update the connection status.
   default_service_path_ = default_service_path;
-  bool is_connected = (default_service_path_.IsValid() &&
-                       default_service_path_.value() != "/");
+  bool is_connected =
+      (default_service_path_.IsValid() && default_service_path_.value() != "/");
   var_is_connected_.SetValue(is_connected);
   var_conn_last_changed_.SetValue(clock_->GetWallclockTime());
 
diff --git a/update_manager/real_shill_provider.h b/update_manager/real_shill_provider.h
index e7708c8..ec5c570 100644
--- a/update_manager/real_shill_provider.h
+++ b/update_manager/real_shill_provider.h
@@ -46,15 +46,14 @@
   // Initializes the provider and returns whether it succeeded.
   bool Init();
 
-  Variable<bool>* var_is_connected() override {
-    return &var_is_connected_;
-  }
+  Variable<bool>* var_is_connected() override { return &var_is_connected_; }
 
   Variable<chromeos_update_engine::ConnectionType>* var_conn_type() override {
     return &var_conn_type_;
   }
 
-  Variable<chromeos_update_engine::ConnectionTethering>* var_conn_tethering() override {
+  Variable<chromeos_update_engine::ConnectionTethering>* var_conn_tethering()
+      override {
     return &var_conn_tethering_;
   }
 
diff --git a/update_manager/real_shill_provider_unittest.cc b/update_manager/real_shill_provider_unittest.cc
index 6506923..dcc729a 100644
--- a/update_manager/real_shill_provider_unittest.cc
+++ b/update_manager/real_shill_provider_unittest.cc
@@ -41,10 +41,10 @@
 using org::chromium::flimflam::ManagerProxyMock;
 using org::chromium::flimflam::ServiceProxyMock;
 using std::unique_ptr;
+using testing::_;
 using testing::Mock;
 using testing::Return;
 using testing::SetArgPointee;
-using testing::_;
 
 namespace {
 
@@ -99,9 +99,7 @@
     return time;
   }
 
-  Time ConnChangedTime() {
-    return InitTime() + TimeDelta::FromSeconds(10);
-  }
+  Time ConnChangedTime() { return InitTime() + TimeDelta::FromSeconds(10); }
 
   // Sets the default_service object path in the response from the
   // ManagerProxyMock instance.
@@ -210,8 +208,9 @@
   chromeos_update_engine::FakeShillProxy* fake_shill_proxy_;
 
   // The registered signal handler for the signal Manager.PropertyChanged.
-  chromeos_update_engine::dbus_test_utils::MockSignalHandler<
-      void(const std::string&, const brillo::Any&)> manager_property_changed_;
+  chromeos_update_engine::dbus_test_utils::MockSignalHandler<void(
+      const std::string&, const brillo::Any&)>
+      manager_property_changed_;
 
   unique_ptr<RealShillProvider> provider_;
 };
@@ -269,7 +268,6 @@
   return service_proxy_mock;
 }
 
-
 // Query the connection status, type and time last changed, as they were set
 // during initialization (no signals).
 TEST_F(UmRealShillProviderTest, ReadBaseValues) {
@@ -315,17 +313,15 @@
 // Test that Wifi connection is identified correctly.
 TEST_F(UmRealShillProviderTest, ReadConnTypeWifi) {
   InitWithDefaultService("/");
-  SetupConnectionAndTestType(kFakeWifiServicePath,
-                             shill::kTypeWifi,
-                             ConnectionType::kWifi);
+  SetupConnectionAndTestType(
+      kFakeWifiServicePath, shill::kTypeWifi, ConnectionType::kWifi);
 }
 
 // Test that Wimax connection is identified correctly.
 TEST_F(UmRealShillProviderTest, ReadConnTypeWimax) {
   InitWithDefaultService("/");
-  SetupConnectionAndTestType(kFakeWimaxServicePath,
-                             shill::kTypeWimax,
-                             ConnectionType::kWimax);
+  SetupConnectionAndTestType(
+      kFakeWimaxServicePath, shill::kTypeWimax, ConnectionType::kWimax);
 }
 
 // Test that Bluetooth connection is identified correctly.
@@ -347,9 +343,8 @@
 // Test that an unknown connection is identified as such.
 TEST_F(UmRealShillProviderTest, ReadConnTypeUnknown) {
   InitWithDefaultService("/");
-  SetupConnectionAndTestType(kFakeUnknownServicePath,
-                             "FooConnectionType",
-                             ConnectionType::kUnknown);
+  SetupConnectionAndTestType(
+      kFakeUnknownServicePath, "FooConnectionType", ConnectionType::kUnknown);
 }
 
 // Tests that VPN connection is identified correctly.
@@ -406,9 +401,8 @@
                              shill::kTypeEthernet,
                              ConnectionType::kEthernet);
 
-  SetupConnectionAndTestType(kFakeWifiServicePath,
-                             shill::kTypeWifi,
-                             ConnectionType::kWifi);
+  SetupConnectionAndTestType(
+      kFakeWifiServicePath, shill::kTypeWifi, ConnectionType::kWifi);
 }
 
 // Test that a non-tethering mode is identified correctly.
@@ -438,9 +432,8 @@
 // Test that an unknown tethering mode is identified as such.
 TEST_F(UmRealShillProviderTest, ReadConnTetheringUnknown) {
   InitWithDefaultService("/");
-  SetupConnectionAndTestTethering(kFakeWifiServicePath,
-                                  "FooConnTethering",
-                                  ConnectionTethering::kUnknown);
+  SetupConnectionAndTestTethering(
+      kFakeWifiServicePath, "FooConnTethering", ConnectionTethering::kUnknown);
 }
 
 // Ensure that the connection tethering mode is properly cached in the provider.
diff --git a/update_manager/real_state.h b/update_manager/real_state.h
index e83c49d..056d46d 100644
--- a/update_manager/real_state.h
+++ b/update_manager/real_state.h
@@ -34,34 +34,24 @@
             ShillProvider* shill_provider,
             SystemProvider* system_provider,
             TimeProvider* time_provider,
-            UpdaterProvider* updater_provider) :
-      config_provider_(config_provider),
-      device_policy_provider_(device_policy_provider),
-      random_provider_(random_provider),
-      shill_provider_(shill_provider),
-      system_provider_(system_provider),
-      time_provider_(time_provider),
-      updater_provider_(updater_provider) {}
+            UpdaterProvider* updater_provider)
+      : config_provider_(config_provider),
+        device_policy_provider_(device_policy_provider),
+        random_provider_(random_provider),
+        shill_provider_(shill_provider),
+        system_provider_(system_provider),
+        time_provider_(time_provider),
+        updater_provider_(updater_provider) {}
 
   // These methods return the given provider.
-  ConfigProvider* config_provider() override {
-    return config_provider_.get();
-  }
+  ConfigProvider* config_provider() override { return config_provider_.get(); }
   DevicePolicyProvider* device_policy_provider() override {
     return device_policy_provider_.get();
   }
-  RandomProvider* random_provider() override {
-    return random_provider_.get();
-  }
-  ShillProvider* shill_provider() override {
-    return shill_provider_.get();
-  }
-  SystemProvider* system_provider() override {
-    return system_provider_.get();
-  }
-  TimeProvider* time_provider() override {
-    return time_provider_.get();
-  }
+  RandomProvider* random_provider() override { return random_provider_.get(); }
+  ShillProvider* shill_provider() override { return shill_provider_.get(); }
+  SystemProvider* system_provider() override { return system_provider_.get(); }
+  TimeProvider* time_provider() override { return time_provider_.get(); }
   UpdaterProvider* updater_provider() override {
     return updater_provider_.get();
   }
diff --git a/update_manager/real_system_provider.cc b/update_manager/real_system_provider.cc
index fdf7e86..a900071 100644
--- a/update_manager/real_system_provider.cc
+++ b/update_manager/real_system_provider.cc
@@ -21,7 +21,7 @@
 #include <base/logging.h>
 #include <base/time/time.h>
 #if USE_CHROME_KIOSK_APP
-#include <libcros/dbus-proxies.h>
+#include <kiosk-app/dbus-proxies.h>
 #endif  // USE_CHROME_KIOSK_APP
 
 #include "update_engine/common/utils.h"
@@ -95,23 +95,20 @@
 }  // namespace
 
 bool RealSystemProvider::Init() {
-  var_is_normal_boot_mode_.reset(
-      new ConstCopyVariable<bool>("is_normal_boot_mode",
-                                  hardware_->IsNormalBootMode()));
+  var_is_normal_boot_mode_.reset(new ConstCopyVariable<bool>(
+      "is_normal_boot_mode", hardware_->IsNormalBootMode()));
 
-  var_is_official_build_.reset(
-      new ConstCopyVariable<bool>("is_official_build",
-                                  hardware_->IsOfficialBuild()));
+  var_is_official_build_.reset(new ConstCopyVariable<bool>(
+      "is_official_build", hardware_->IsOfficialBuild()));
 
-  var_is_oobe_complete_.reset(
-      new CallCopyVariable<bool>(
-          "is_oobe_complete",
-          base::Bind(&chromeos_update_engine::HardwareInterface::IsOOBEComplete,
-                     base::Unretained(hardware_), nullptr)));
+  var_is_oobe_complete_.reset(new CallCopyVariable<bool>(
+      "is_oobe_complete",
+      base::Bind(&chromeos_update_engine::HardwareInterface::IsOOBEComplete,
+                 base::Unretained(hardware_),
+                 nullptr)));
 
-  var_num_slots_.reset(
-      new ConstCopyVariable<unsigned int>(
-          "num_slots", boot_control_->GetNumSlots()));
+  var_num_slots_.reset(new ConstCopyVariable<unsigned int>(
+      "num_slots", boot_control_->GetNumSlots()));
 
   var_kiosk_required_platform_version_.reset(new RetryPollVariable<string>(
       "kiosk_required_platform_version",
@@ -126,8 +123,8 @@
     string* required_platform_version) {
 #if USE_CHROME_KIOSK_APP
   brillo::ErrorPtr error;
-  if (!libcros_proxy_->GetKioskAppRequiredPlatformVersion(
-          required_platform_version, &error)) {
+  if (!kiosk_app_proxy_->GetRequiredPlatformVersion(required_platform_version,
+                                                    &error)) {
     LOG(WARNING) << "Failed to get kiosk required platform version";
     required_platform_version->clear();
     return false;
diff --git a/update_manager/real_system_provider.h b/update_manager/real_system_provider.h
index 80a8615..114c6ea 100644
--- a/update_manager/real_system_provider.h
+++ b/update_manager/real_system_provider.h
@@ -26,7 +26,7 @@
 
 namespace org {
 namespace chromium {
-class LibCrosServiceInterfaceProxyInterface;
+class KioskAppServiceInterfaceProxyInterface;
 }  // namespace chromium
 }  // namespace org
 
@@ -38,13 +38,15 @@
   RealSystemProvider(
       chromeos_update_engine::HardwareInterface* hardware,
       chromeos_update_engine::BootControlInterface* boot_control,
-      org::chromium::LibCrosServiceInterfaceProxyInterface* libcros_proxy)
+      org::chromium::KioskAppServiceInterfaceProxyInterface* kiosk_app_proxy)
       : hardware_(hardware),
 #if USE_CHROME_KIOSK_APP
         boot_control_(boot_control),
-        libcros_proxy_(libcros_proxy) {}
+        kiosk_app_proxy_(kiosk_app_proxy) {
+  }
 #else
-        boot_control_(boot_control) {}
+        boot_control_(boot_control) {
+  }
 #endif  // USE_CHROME_KIOSK_APP
 
   // Initializes the provider and returns whether it succeeded.
@@ -83,7 +85,7 @@
   chromeos_update_engine::HardwareInterface* const hardware_;
   chromeos_update_engine::BootControlInterface* const boot_control_;
 #if USE_CHROME_KIOSK_APP
-  org::chromium::LibCrosServiceInterfaceProxyInterface* const libcros_proxy_;
+  org::chromium::KioskAppServiceInterfaceProxyInterface* const kiosk_app_proxy_;
 #endif  // USE_CHROME_KIOSK_APP
 
   DISALLOW_COPY_AND_ASSIGN(RealSystemProvider);
diff --git a/update_manager/real_system_provider_unittest.cc b/update_manager/real_system_provider_unittest.cc
index 103a35f..f654f7a 100644
--- a/update_manager/real_system_provider_unittest.cc
+++ b/update_manager/real_system_provider_unittest.cc
@@ -26,10 +26,10 @@
 #include "update_engine/common/fake_hardware.h"
 #include "update_engine/update_manager/umtest_utils.h"
 #if USE_CHROME_KIOSK_APP
-#include "libcros/dbus-proxies.h"
-#include "libcros/dbus-proxy-mocks.h"
+#include "kiosk-app/dbus-proxies.h"
+#include "kiosk-app/dbus-proxy-mocks.h"
 
-using org::chromium::LibCrosServiceInterfaceProxyMock;
+using org::chromium::KioskAppServiceInterfaceProxyMock;
 #endif  // USE_CHROME_KIOSK_APP
 using std::unique_ptr;
 using testing::_;
@@ -39,7 +39,7 @@
 
 #if USE_CHROME_KIOSK_APP
 namespace {
-const char kRequiredPlatformVersion[] ="1234.0.0";
+const char kRequiredPlatformVersion[] = "1234.0.0";
 }  // namespace
 #endif  // USE_CHROME_KIOSK_APP
 
@@ -49,14 +49,13 @@
  protected:
   void SetUp() override {
 #if USE_CHROME_KIOSK_APP
-    libcros_proxy_mock_.reset(new LibCrosServiceInterfaceProxyMock());
-    ON_CALL(*libcros_proxy_mock_,
-            GetKioskAppRequiredPlatformVersion(_, _, _))
+    kiosk_app_proxy_mock_.reset(new KioskAppServiceInterfaceProxyMock());
+    ON_CALL(*kiosk_app_proxy_mock_, GetRequiredPlatformVersion(_, _, _))
         .WillByDefault(
             DoAll(SetArgPointee<0>(kRequiredPlatformVersion), Return(true)));
 
     provider_.reset(new RealSystemProvider(
-        &fake_hardware_, &fake_boot_control_, libcros_proxy_mock_.get()));
+        &fake_hardware_, &fake_boot_control_, kiosk_app_proxy_mock_.get()));
 #else
     provider_.reset(
         new RealSystemProvider(&fake_hardware_, &fake_boot_control_, nullptr));
@@ -69,7 +68,7 @@
   unique_ptr<RealSystemProvider> provider_;
 
 #if USE_CHROME_KIOSK_APP
-  unique_ptr<LibCrosServiceInterfaceProxyMock> libcros_proxy_mock_;
+  unique_ptr<KioskAppServiceInterfaceProxyMock> kiosk_app_proxy_mock_;
 #endif  // USE_CHROME_KIOSK_APP
 };
 
@@ -98,8 +97,7 @@
 }
 
 TEST_F(UmRealSystemProviderTest, KioskRequiredPlatformVersionFailure) {
-  EXPECT_CALL(*libcros_proxy_mock_,
-              GetKioskAppRequiredPlatformVersion(_, _, _))
+  EXPECT_CALL(*kiosk_app_proxy_mock_, GetRequiredPlatformVersion(_, _, _))
       .WillOnce(Return(false));
 
   UmTestUtils::ExpectVariableNotSet(
@@ -108,15 +106,13 @@
 
 TEST_F(UmRealSystemProviderTest,
        KioskRequiredPlatformVersionRecoveryFromFailure) {
-  EXPECT_CALL(*libcros_proxy_mock_,
-              GetKioskAppRequiredPlatformVersion(_, _, _))
+  EXPECT_CALL(*kiosk_app_proxy_mock_, GetRequiredPlatformVersion(_, _, _))
       .WillOnce(Return(false));
   UmTestUtils::ExpectVariableNotSet(
       provider_->var_kiosk_required_platform_version());
-  testing::Mock::VerifyAndClearExpectations(libcros_proxy_mock_.get());
+  testing::Mock::VerifyAndClearExpectations(kiosk_app_proxy_mock_.get());
 
-  EXPECT_CALL(*libcros_proxy_mock_,
-              GetKioskAppRequiredPlatformVersion(_, _, _))
+  EXPECT_CALL(*kiosk_app_proxy_mock_, GetRequiredPlatformVersion(_, _, _))
       .WillOnce(
           DoAll(SetArgPointee<0>(kRequiredPlatformVersion), Return(true)));
   UmTestUtils::ExpectVariableHasValue(
diff --git a/update_manager/real_time_provider.cc b/update_manager/real_time_provider.cc
index db26816..efd1747 100644
--- a/update_manager/real_time_provider.cc
+++ b/update_manager/real_time_provider.cc
@@ -38,8 +38,7 @@
       : Variable<Time>(name, TimeDelta::FromHours(1)), clock_(clock) {}
 
  protected:
-  virtual const Time* GetValue(TimeDelta /* timeout */,
-                               string* /* errmsg */) {
+  virtual const Time* GetValue(TimeDelta /* timeout */, string* /* errmsg */) {
     Time::Exploded now_exp;
     clock_->GetWallclockTime().LocalExplode(&now_exp);
     now_exp.hour = now_exp.minute = now_exp.second = now_exp.millisecond = 0;
@@ -64,8 +63,7 @@
       : Variable<int>(name, TimeDelta::FromMinutes(5)), clock_(clock) {}
 
  protected:
-  virtual const int* GetValue(TimeDelta /* timeout */,
-                              string* /* errmsg */) {
+  virtual const int* GetValue(TimeDelta /* timeout */, string* /* errmsg */) {
     Time::Exploded exploded;
     clock_->GetWallclockTime().LocalExplode(&exploded);
     return new int(exploded.hour);
@@ -77,9 +75,28 @@
   DISALLOW_COPY_AND_ASSIGN(CurrHourVariable);
 };
 
+class CurrMinuteVariable : public Variable<int> {
+ public:
+  CurrMinuteVariable(const string& name, ClockInterface* clock)
+      : Variable<int>(name, TimeDelta::FromSeconds(15)), clock_(clock) {}
+
+ protected:
+  virtual const int* GetValue(TimeDelta /* timeout */, string* /* errmsg */) {
+    Time::Exploded exploded;
+    clock_->GetWallclockTime().LocalExplode(&exploded);
+    return new int(exploded.minute);
+  }
+
+ private:
+  ClockInterface* clock_;
+
+  DISALLOW_COPY_AND_ASSIGN(CurrMinuteVariable);
+};
+
 bool RealTimeProvider::Init() {
   var_curr_date_.reset(new CurrDateVariable("curr_date", clock_));
   var_curr_hour_.reset(new CurrHourVariable("curr_hour", clock_));
+  var_curr_minute_.reset(new CurrMinuteVariable("curr_minute", clock_));
   return true;
 }
 
diff --git a/update_manager/real_time_provider.h b/update_manager/real_time_provider.h
index e7cae94..40dab36 100644
--- a/update_manager/real_time_provider.h
+++ b/update_manager/real_time_provider.h
@@ -39,9 +39,9 @@
     return var_curr_date_.get();
   }
 
-  Variable<int>* var_curr_hour() override {
-    return var_curr_hour_.get();
-  }
+  Variable<int>* var_curr_hour() override { return var_curr_hour_.get(); }
+
+  Variable<int>* var_curr_minute() override { return var_curr_minute_.get(); }
 
  private:
   // A clock abstraction (fakeable).
@@ -49,6 +49,7 @@
 
   std::unique_ptr<Variable<base::Time>> var_curr_date_;
   std::unique_ptr<Variable<int>> var_curr_hour_;
+  std::unique_ptr<Variable<int>> var_curr_minute_;
 
   DISALLOW_COPY_AND_ASSIGN(RealTimeProvider);
 };
diff --git a/update_manager/real_time_provider_unittest.cc b/update_manager/real_time_provider_unittest.cc
index f8db30b..ce2a718 100644
--- a/update_manager/real_time_provider_unittest.cc
+++ b/update_manager/real_time_provider_unittest.cc
@@ -84,4 +84,13 @@
                                       provider_->var_curr_hour());
 }
 
+TEST_F(UmRealTimeProviderTest, CurrMinuteValid) {
+  const Time now = CurrTime();
+  Time::Exploded expected;
+  now.LocalExplode(&expected);
+  fake_clock_.SetWallclockTime(now);
+  UmTestUtils::ExpectVariableHasValue(expected.minute,
+                                      provider_->var_curr_minute());
+}
+
 }  // namespace chromeos_update_manager
diff --git a/update_manager/real_updater_provider.cc b/update_manager/real_updater_provider.cc
index 050bd42..134db69 100644
--- a/update_manager/real_updater_provider.cc
+++ b/update_manager/real_updater_provider.cc
@@ -45,10 +45,11 @@
 
 // A templated base class for all update related variables. Provides uniform
 // construction and a system state handle.
-template<typename T>
+template <typename T>
 class UpdaterVariableBase : public Variable<T> {
  public:
-  UpdaterVariableBase(const string& name, VariableMode mode,
+  UpdaterVariableBase(const string& name,
+                      VariableMode mode,
                       SystemState* system_state)
       : Variable<T>(name, mode), system_state_(system_state) {}
 
@@ -125,8 +126,8 @@
 
     if (raw.progress() < 0.0 || raw.progress() > 1.0) {
       if (errmsg) {
-        *errmsg = StringPrintf("Invalid progress value received: %f",
-                               raw.progress());
+        *errmsg =
+            StringPrintf("Invalid progress value received: %f", raw.progress());
       }
       return nullptr;
     }
@@ -157,22 +158,20 @@
 };
 
 const StageVariable::CurrOpStrToStage StageVariable::curr_op_str_to_stage[] = {
-  {update_engine::kUpdateStatusIdle, Stage::kIdle},
-  {update_engine::kUpdateStatusCheckingForUpdate, Stage::kCheckingForUpdate},
-  {update_engine::kUpdateStatusUpdateAvailable, Stage::kUpdateAvailable},
-  {update_engine::kUpdateStatusDownloading, Stage::kDownloading},
-  {update_engine::kUpdateStatusVerifying, Stage::kVerifying},
-  {update_engine::kUpdateStatusFinalizing, Stage::kFinalizing},
-  {update_engine::kUpdateStatusUpdatedNeedReboot, Stage::kUpdatedNeedReboot},
-  {  // NOLINT(whitespace/braces)
-    update_engine::kUpdateStatusReportingErrorEvent,
-    Stage::kReportingErrorEvent
-  },
-  {update_engine::kUpdateStatusAttemptingRollback, Stage::kAttemptingRollback},
+    {update_engine::kUpdateStatusIdle, Stage::kIdle},
+    {update_engine::kUpdateStatusCheckingForUpdate, Stage::kCheckingForUpdate},
+    {update_engine::kUpdateStatusUpdateAvailable, Stage::kUpdateAvailable},
+    {update_engine::kUpdateStatusDownloading, Stage::kDownloading},
+    {update_engine::kUpdateStatusVerifying, Stage::kVerifying},
+    {update_engine::kUpdateStatusFinalizing, Stage::kFinalizing},
+    {update_engine::kUpdateStatusUpdatedNeedReboot, Stage::kUpdatedNeedReboot},
+    {update_engine::kUpdateStatusReportingErrorEvent,
+     Stage::kReportingErrorEvent},
+    {update_engine::kUpdateStatusAttemptingRollback,
+     Stage::kAttemptingRollback},
 };
 
-const Stage* StageVariable::GetValue(TimeDelta /* timeout */,
-                                     string* errmsg) {
+const Stage* StageVariable::GetValue(TimeDelta /* timeout */, string* errmsg) {
   GetStatusHelper raw(system_state(), errmsg);
   if (!raw.is_success())
     return nullptr;
@@ -316,9 +315,7 @@
     prefs->AddObserver(key, this);
     OnPrefSet(key);
   }
-  ~BooleanPrefVariable() {
-    prefs_->RemoveObserver(key_, this);
-  }
+  ~BooleanPrefVariable() { prefs_->RemoveObserver(key_, this); }
 
  private:
   // Reads the actual value from the Prefs instance and updates the Variable
@@ -331,9 +328,7 @@
     SetValue(result);
   }
 
-  void OnPrefDeleted(const string& key) override {
-    SetValue(default_val_);
-  }
+  void OnPrefDeleted(const string& key) override { SetValue(default_val_); }
 
   chromeos_update_engine::PrefsInterface* prefs_;
 
@@ -350,8 +345,8 @@
  public:
   ConsecutiveFailedUpdateChecksVariable(const string& name,
                                         SystemState* system_state)
-      : UpdaterVariableBase<unsigned int>(name, kVariableModePoll,
-                                          system_state) {}
+      : UpdaterVariableBase<unsigned int>(
+            name, kVariableModePoll, system_state) {}
 
  private:
   const unsigned int* GetValue(TimeDelta /* timeout */,
@@ -369,8 +364,8 @@
  public:
   ServerDictatedPollIntervalVariable(const string& name,
                                      SystemState* system_state)
-      : UpdaterVariableBase<unsigned int>(name, kVariableModePoll,
-                                          system_state) {}
+      : UpdaterVariableBase<unsigned int>(
+            name, kVariableModePoll, system_state) {}
 
  private:
   const unsigned int* GetValue(TimeDelta /* timeout */,
@@ -388,7 +383,7 @@
  public:
   ForcedUpdateRequestedVariable(const string& name, SystemState* system_state)
       : UpdaterVariableBase<UpdateRequestStatus>::UpdaterVariableBase(
-          name, kVariableModeAsync, system_state) {
+            name, kVariableModeAsync, system_state) {
     system_state->update_attempter()->set_forced_update_pending_callback(
         new base::Callback<void(bool, bool)>(  // NOLINT(readability/function)
             base::Bind(&ForcedUpdateRequestedVariable::Reset,
@@ -401,11 +396,11 @@
     return new UpdateRequestStatus(update_request_status_);
   }
 
-  void Reset(bool forced_update_requested, bool is_interactive) {
+  void Reset(bool forced_update_requested, bool interactive) {
     UpdateRequestStatus new_value = UpdateRequestStatus::kNone;
     if (forced_update_requested)
-      new_value = (is_interactive ? UpdateRequestStatus::kInteractive :
-                   UpdateRequestStatus::kPeriodic);
+      new_value = (interactive ? UpdateRequestStatus::kInteractive
+                               : UpdateRequestStatus::kPeriodic);
     if (update_request_status_ != new_value) {
       update_request_status_ = new_value;
       NotifyValueChanged();
diff --git a/update_manager/real_updater_provider.h b/update_manager/real_updater_provider.h
index 5e3e27b..1b46895 100644
--- a/update_manager/real_updater_provider.h
+++ b/update_manager/real_updater_provider.h
@@ -52,13 +52,9 @@
     return var_update_completed_time_.get();
   }
 
-  Variable<double>* var_progress() override {
-    return var_progress_.get();
-  }
+  Variable<double>* var_progress() override { return var_progress_.get(); }
 
-  Variable<Stage>* var_stage() override {
-    return var_stage_.get();
-  }
+  Variable<Stage>* var_stage() override { return var_stage_.get(); }
 
   Variable<std::string>* var_new_version() override {
     return var_new_version_.get();
@@ -76,9 +72,7 @@
     return var_new_channel_.get();
   }
 
-  Variable<bool>* var_p2p_enabled() override {
-    return var_p2p_enabled_.get();
-  }
+  Variable<bool>* var_p2p_enabled() override { return var_p2p_enabled_.get(); }
 
   Variable<bool>* var_cellular_enabled() override {
     return var_cellular_enabled_.get();
diff --git a/update_manager/real_updater_provider_unittest.cc b/update_manager/real_updater_provider_unittest.cc
index b653885..fb7a763 100644
--- a/update_manager/real_updater_provider_unittest.cc
+++ b/update_manager/real_updater_provider_unittest.cc
@@ -114,9 +114,8 @@
   Time SetupUpdateCompletedTime(bool valid) {
     const TimeDelta kDurationSinceUpdate = TimeDelta::FromMinutes(7);
     const Time kUpdateBootTime = Time() + kDurationSinceUpdate * 2;
-    const Time kCurrBootTime = (valid ?
-                                kUpdateBootTime + kDurationSinceUpdate :
-                                kUpdateBootTime - kDurationSinceUpdate);
+    const Time kCurrBootTime = (valid ? kUpdateBootTime + kDurationSinceUpdate
+                                      : kUpdateBootTime - kDurationSinceUpdate);
     const Time kCurrWallclockTime = FixedTime();
     EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
                 GetBootTimeAtUpdate(_))
diff --git a/update_manager/rollback_prefs.h b/update_manager/rollback_prefs.h
new file mode 100644
index 0000000..11d09d6
--- /dev/null
+++ b/update_manager/rollback_prefs.h
@@ -0,0 +1,41 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_ROLLBACK_PREFS_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_ROLLBACK_PREFS_H_
+
+namespace chromeos_update_manager {
+
+// Value used to represent that kernel key versions can always roll-forward.
+// This is the maximum value of a kernel key version.
+constexpr int kRollforwardInfinity = 0xfffffffe;
+
+// Whether the device should roll back to the target version, and if yes, which
+// type of rollback should it do. Matches chrome_device_policy.proto's
+// AutoUpdateSettingsProto::RollbackToTargetVersion.
+enum class RollbackToTargetVersion {
+  kUnspecified = 0,
+  kDisabled = 1,
+  kRollbackAndPowerwash = 2,
+  kRollbackAndRestoreIfPossible = 3,
+  kRollbackOnlyIfRestorePossible = 4,
+  // This value must be the last entry.
+  kMaxValue = 5
+};
+
+}  // namespace chromeos_update_manager
+
+#endif  // UPDATE_ENGINE_UPDATE_MANAGER_ROLLBACK_PREFS_H_
diff --git a/update_manager/shill_provider.h b/update_manager/shill_provider.h
index e6f4628..c7bb2e2 100644
--- a/update_manager/shill_provider.h
+++ b/update_manager/shill_provider.h
@@ -40,7 +40,7 @@
   // A variable returning the tethering mode of a network connection. Unknown if
   // not connected.
   virtual Variable<chromeos_update_engine::ConnectionTethering>*
-      var_conn_tethering() = 0;
+  var_conn_tethering() = 0;
 
   // A variable returning the time when network connection last changed.
   // Initialized to current time.
diff --git a/update_manager/staging_utils.cc b/update_manager/staging_utils.cc
new file mode 100644
index 0000000..4835ab2
--- /dev/null
+++ b/update_manager/staging_utils.cc
@@ -0,0 +1,142 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/staging_utils.h"
+
+#include <utility>
+#include <vector>
+
+#include <base/logging.h>
+#include <base/rand_util.h>
+#include <base/time/time.h>
+#include <policy/device_policy.h>
+
+#include "update_engine/common/constants.h"
+#include "update_engine/common/hardware_interface.h"
+#include "update_engine/common/prefs_interface.h"
+#include "update_engine/system_state.h"
+
+using base::TimeDelta;
+using chromeos_update_engine::kPrefsWallClockStagingWaitPeriod;
+using chromeos_update_engine::PrefsInterface;
+using chromeos_update_engine::SystemState;
+using policy::DevicePolicy;
+
+namespace chromeos_update_manager {
+
+int GetStagingSchedule(const DevicePolicy* device_policy,
+                       StagingSchedule* staging_schedule_out) {
+  StagingSchedule staging_schedule;
+  if (!device_policy->GetDeviceUpdateStagingSchedule(&staging_schedule) ||
+      staging_schedule.empty()) {
+    return 0;
+  }
+
+  // Last percentage of the schedule should be 100.
+  if (staging_schedule.back().percentage != 100) {
+    LOG(ERROR) << "Last percentage of the schedule is not 100, it's: "
+               << staging_schedule.back().percentage;
+    return 0;
+  }
+
+  int previous_days = 0;
+  int previous_percentage = -1;
+  // Ensure that the schedule has a monotonically increasing set of percentages
+  // and that days are also monotonically increasing.
+  for (const auto& staging_pair : staging_schedule) {
+    int days = staging_pair.days;
+    if (previous_days >= days) {
+      LOG(ERROR) << "Days in staging schedule are not monotonically "
+                 << "increasing. Previous value: " << previous_days
+                 << " Current value: " << days;
+      return 0;
+    }
+    previous_days = days;
+    int percentage = staging_pair.percentage;
+    if (previous_percentage >= percentage) {
+      LOG(ERROR) << "Percentages in staging schedule are not monotonically "
+                 << "increasing. Previous value: " << previous_percentage
+                 << " Current value: " << percentage;
+      return 0;
+    }
+    previous_percentage = percentage;
+  }
+  // Modify staging schedule only if the schedule in the device policy is valid.
+  if (staging_schedule_out)
+    *staging_schedule_out = std::move(staging_schedule);
+
+  return previous_days;
+}
+
+int CalculateWaitTimeInDaysFromSchedule(
+    const StagingSchedule& staging_schedule) {
+  int prev_days = 0;
+  int percentage_position = base::RandInt(1, 100);
+  for (const auto& staging_pair : staging_schedule) {
+    int days = staging_pair.days;
+    if (percentage_position <= staging_pair.percentage) {
+      // Scatter between the start of the range and the end.
+      return prev_days + base::RandInt(1, days - prev_days);
+    }
+    prev_days = days;
+  }
+  // Something went wrong.
+  NOTREACHED();
+  return 0;
+}
+
+StagingCase CalculateStagingCase(const DevicePolicy* device_policy,
+                                 PrefsInterface* prefs,
+                                 TimeDelta* staging_wait_time,
+                                 StagingSchedule* staging_schedule) {
+  // Check that the schedule in the device policy is correct.
+  StagingSchedule new_staging_schedule;
+  int max_days = GetStagingSchedule(device_policy, &new_staging_schedule);
+  if (max_days == 0)
+    return StagingCase::kOff;
+
+  // Calculate the new wait time.
+  TimeDelta new_staging_wait_time = TimeDelta::FromDays(
+      CalculateWaitTimeInDaysFromSchedule(new_staging_schedule));
+  DCHECK_GT(new_staging_wait_time.InSeconds(), 0);
+  if (staging_wait_time->InSeconds() > 0) {
+    // If there hasn't been any changes to the schedule and there is a value
+    // set, don't change the waiting time.
+    if (new_staging_schedule == *staging_schedule) {
+      return StagingCase::kNoAction;
+    }
+    // Otherwise, update the schedule and wait time.
+    *staging_wait_time = new_staging_wait_time;
+    *staging_schedule = std::move(new_staging_schedule);
+    return StagingCase::kNoSavedValue;
+  }
+  // Getting this means the schedule changed, update the old schedule.
+  *staging_schedule = std::move(new_staging_schedule);
+
+  int64_t wait_period_in_days;
+  // There exists a persisted value that is valid. That is, it's smaller than
+  // the maximum amount of days of staging set by the user.
+  if (prefs->GetInt64(kPrefsWallClockStagingWaitPeriod, &wait_period_in_days) &&
+      wait_period_in_days > 0 && wait_period_in_days <= max_days) {
+    *staging_wait_time = TimeDelta::FromDays(wait_period_in_days);
+    return StagingCase::kSetStagingFromPref;
+  }
+
+  *staging_wait_time = new_staging_wait_time;
+  return StagingCase::kNoSavedValue;
+}
+
+}  // namespace chromeos_update_manager
diff --git a/update_manager/staging_utils.h b/update_manager/staging_utils.h
new file mode 100644
index 0000000..e91bfeb
--- /dev/null
+++ b/update_manager/staging_utils.h
@@ -0,0 +1,71 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_STAGING_UTILS_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_STAGING_UTILS_H_
+
+#include <utility>
+#include <vector>
+
+#include <base/time/time.h>
+#include <policy/device_policy.h>
+
+#include "update_engine/common/prefs_interface.h"
+
+namespace chromeos_update_manager {
+
+using StagingSchedule = std::vector<policy::DevicePolicy::DayPercentagePair>;
+
+// Possible cases that staging might run into based on the inputs.
+enum class StagingCase {
+  // Staging is off, remove the persisted value.
+  kOff,
+  // Staging is enabled, but there is no valid persisted value, saved value or
+  // the value of the schedule has changed.
+  kNoSavedValue,
+  // Staging is enabled, and there is a valid persisted value.
+  kSetStagingFromPref,
+  // Staging is enabled, and there have been no changes to the schedule.
+  kNoAction
+};
+
+// Calculate the bucket in which the device belongs based on a given staging
+// schedule. |staging_schedule| is assumed to have already been validated.
+int CalculateWaitTimeInDaysFromSchedule(
+    const StagingSchedule& staging_schedule);
+
+// Verifies that |device_policy| contains a valid staging schedule. If
+// |device_policy| contains a valid staging schedule, move it into
+// |staging_schedule_out| and return the total number of days spanned by the
+// schedule. Otherwise, don't modify |staging_schedule_out| and return 0 (which
+// is an invalid value for the length of a schedule).
+int GetStagingSchedule(const policy::DevicePolicy* device_policy,
+                       StagingSchedule* staging_schedule_out);
+
+// Uses the given arguments to check whether staging is on, and whether the
+// state should be updated with a new waiting time or not. |staging_wait_time|
+// should contain the old value of the wait time, it will be replaced with the
+// new calculated wait time value if staging is on. |staging_schedule| should
+// contain the previous staging schedule, if there is a new schedule found, its
+// value will be replaced with the new one.
+StagingCase CalculateStagingCase(const policy::DevicePolicy* device_policy,
+                                 chromeos_update_engine::PrefsInterface* prefs,
+                                 base::TimeDelta* staging_wait_time,
+                                 StagingSchedule* staging_schedule);
+
+}  // namespace chromeos_update_manager
+
+#endif  // UPDATE_ENGINE_UPDATE_MANAGER_STAGING_UTILS_H_
diff --git a/update_manager/staging_utils_unittest.cc b/update_manager/staging_utils_unittest.cc
new file mode 100644
index 0000000..8d75acd
--- /dev/null
+++ b/update_manager/staging_utils_unittest.cc
@@ -0,0 +1,175 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/staging_utils.h"
+
+#include <memory>
+#include <utility>
+
+#include <base/time/time.h>
+#include <gtest/gtest.h>
+#include <policy/mock_device_policy.h>
+
+#include "update_engine/common/constants.h"
+#include "update_engine/common/fake_prefs.h"
+
+using base::TimeDelta;
+using chromeos_update_engine::FakePrefs;
+using chromeos_update_engine::kPrefsWallClockStagingWaitPeriod;
+using testing::_;
+using testing::DoAll;
+using testing::Return;
+using testing::SetArgPointee;
+
+namespace chromeos_update_manager {
+
+constexpr TimeDelta kDay = TimeDelta::FromDays(1);
+constexpr int kMaxDays = 28;
+constexpr int kValidDaySum = 14;
+const StagingSchedule valid_schedule = {{2, 0}, {7, 50}, {9, 80}, {14, 100}};
+
+class StagingUtilsScheduleTest : public testing::Test {
+ protected:
+  void SetUp() override {
+    test_wait_time_ = TimeDelta();
+    test_staging_schedule_ = StagingSchedule();
+  }
+
+  void SetStagingSchedule(const StagingSchedule& staging_schedule) {
+    EXPECT_CALL(device_policy_, GetDeviceUpdateStagingSchedule(_))
+        .WillRepeatedly(
+            DoAll(SetArgPointee<0>(staging_schedule), Return(true)));
+  }
+
+  void SetPersistedStagingVal(int64_t wait_time) {
+    EXPECT_TRUE(
+        fake_prefs_.SetInt64(kPrefsWallClockStagingWaitPeriod, wait_time));
+  }
+
+  void TestStagingCase(const StagingCase& expected) {
+    EXPECT_EQ(expected,
+              CalculateStagingCase(&device_policy_,
+                                   &fake_prefs_,
+                                   &test_wait_time_,
+                                   &test_staging_schedule_));
+  }
+
+  void ExpectNoChanges() {
+    EXPECT_EQ(TimeDelta(), test_wait_time_);
+    EXPECT_EQ(StagingSchedule(), test_staging_schedule_);
+  }
+
+  policy::MockDevicePolicy device_policy_;
+  TimeDelta test_wait_time_;
+  StagingSchedule test_staging_schedule_;
+  FakePrefs fake_prefs_;
+};
+
+// Last element should be 100, if not return false.
+TEST_F(StagingUtilsScheduleTest, GetStagingScheduleInvalidLastElem) {
+  SetStagingSchedule(StagingSchedule{{2, 10}, {4, 20}, {5, 40}});
+  EXPECT_EQ(0, GetStagingSchedule(&device_policy_, &test_staging_schedule_));
+  ExpectNoChanges();
+}
+
+// Percentage should be monotonically increasing.
+TEST_F(StagingUtilsScheduleTest, GetStagingScheduleNonMonotonic) {
+  SetStagingSchedule(StagingSchedule{{2, 10}, {6, 20}, {11, 20}, {12, 100}});
+  EXPECT_EQ(0, GetStagingSchedule(&device_policy_, &test_staging_schedule_));
+  ExpectNoChanges();
+}
+
+// The days should be monotonically increasing.
+TEST_F(StagingUtilsScheduleTest, GetStagingScheduleOverMaxDays) {
+  SetStagingSchedule(StagingSchedule{{2, 10}, {4, 20}, {15, 30}, {10, 100}});
+  EXPECT_EQ(0, GetStagingSchedule(&device_policy_, &test_staging_schedule_));
+  ExpectNoChanges();
+}
+
+TEST_F(StagingUtilsScheduleTest, GetStagingScheduleValid) {
+  SetStagingSchedule(valid_schedule);
+  EXPECT_EQ(kValidDaySum,
+            GetStagingSchedule(&device_policy_, &test_staging_schedule_));
+  EXPECT_EQ(test_staging_schedule_, valid_schedule);
+}
+
+TEST_F(StagingUtilsScheduleTest, StagingOffNoSchedule) {
+  // If the function returns false, the schedule shouldn't get used.
+  EXPECT_CALL(device_policy_, GetDeviceUpdateStagingSchedule(_))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(valid_schedule), Return(false)));
+  TestStagingCase(StagingCase::kOff);
+  ExpectNoChanges();
+}
+
+TEST_F(StagingUtilsScheduleTest, StagingOffEmptySchedule) {
+  SetStagingSchedule(StagingSchedule());
+  TestStagingCase(StagingCase::kOff);
+  ExpectNoChanges();
+}
+
+TEST_F(StagingUtilsScheduleTest, StagingOffInvalidSchedule) {
+  // Any invalid schedule should return |StagingCase::kOff|.
+  SetStagingSchedule(StagingSchedule{{3, 30}, {6, 40}});
+  TestStagingCase(StagingCase::kOff);
+  ExpectNoChanges();
+}
+
+TEST_F(StagingUtilsScheduleTest, StagingOnNoAction) {
+  test_wait_time_ = kDay;
+  // Same as valid schedule, just using std::pair types.
+  StagingSchedule valid_schedule_pairs = {{2, 0}, {7, 50}, {9, 80}, {14, 100}};
+  test_staging_schedule_ = valid_schedule_pairs;
+  SetStagingSchedule(valid_schedule);
+  TestStagingCase(StagingCase::kNoAction);
+  // Vars should not be changed.
+  EXPECT_EQ(kDay, test_wait_time_);
+  EXPECT_EQ(test_staging_schedule_, valid_schedule_pairs);
+}
+
+TEST_F(StagingUtilsScheduleTest, StagingNoSavedValueChangePolicy) {
+  test_wait_time_ = kDay;
+  SetStagingSchedule(valid_schedule);
+  TestStagingCase(StagingCase::kNoSavedValue);
+  // Vars should change since < 2 days should not be possible due to
+  // valid_schedule's value.
+  EXPECT_NE(kDay, test_wait_time_);
+  EXPECT_EQ(test_staging_schedule_, valid_schedule);
+  EXPECT_LE(test_wait_time_, kDay * kMaxDays);
+}
+
+// Tests the case where there was a reboot and there is no persisted value.
+TEST_F(StagingUtilsScheduleTest, StagingNoSavedValueNoPersisted) {
+  SetStagingSchedule(valid_schedule);
+  TestStagingCase(StagingCase::kNoSavedValue);
+  // Vars should change since there are no preset values and there is a new
+  // staging schedule.
+  EXPECT_NE(TimeDelta(), test_wait_time_);
+  EXPECT_EQ(test_staging_schedule_, valid_schedule);
+  EXPECT_LE(test_wait_time_, kDay * kMaxDays);
+}
+
+// If there is a pref set and its value is less than the day count, use that
+// pref.
+TEST_F(StagingUtilsScheduleTest, StagingSetFromPref) {
+  SetStagingSchedule(valid_schedule);
+  SetPersistedStagingVal(5);
+  TestStagingCase(StagingCase::kSetStagingFromPref);
+  // Vars should change.
+  EXPECT_EQ(kDay * 5, test_wait_time_);
+  EXPECT_EQ(test_staging_schedule_, valid_schedule);
+}
+
+}  // namespace chromeos_update_manager
diff --git a/update_manager/state_factory.cc b/update_manager/state_factory.cc
index 208ed51..78cec6a 100644
--- a/update_manager/state_factory.cc
+++ b/update_manager/state_factory.cc
@@ -46,7 +46,7 @@
 
 State* DefaultStateFactory(
     policy::PolicyProvider* policy_provider,
-    org::chromium::LibCrosServiceInterfaceProxyInterface* libcros_proxy,
+    org::chromium::KioskAppServiceInterfaceProxyInterface* kiosk_app_proxy,
     chromeos_update_engine::SystemState* system_state) {
   chromeos_update_engine::ClockInterface* const clock = system_state->clock();
   unique_ptr<RealConfigProvider> config_provider(
@@ -70,20 +70,18 @@
 #endif  // USE_SHILL
   unique_ptr<RealRandomProvider> random_provider(new RealRandomProvider());
   unique_ptr<RealSystemProvider> system_provider(new RealSystemProvider(
-      system_state->hardware(), system_state->boot_control(), libcros_proxy));
+      system_state->hardware(), system_state->boot_control(), kiosk_app_proxy));
 
   unique_ptr<RealTimeProvider> time_provider(new RealTimeProvider(clock));
   unique_ptr<RealUpdaterProvider> updater_provider(
       new RealUpdaterProvider(system_state));
 
-  if (!(config_provider->Init() &&
-        device_policy_provider->Init() &&
+  if (!(config_provider->Init() && device_policy_provider->Init() &&
         random_provider->Init() &&
 #if USE_SHILL
         shill_provider->Init() &&
 #endif  // USE_SHILL
-        system_provider->Init() &&
-        time_provider->Init() &&
+        system_provider->Init() && time_provider->Init() &&
         updater_provider->Init())) {
     LOG(ERROR) << "Error initializing providers";
     return nullptr;
diff --git a/update_manager/state_factory.h b/update_manager/state_factory.h
index 689684a..1c1c1d9 100644
--- a/update_manager/state_factory.h
+++ b/update_manager/state_factory.h
@@ -22,7 +22,7 @@
 
 namespace org {
 namespace chromium {
-class LibCrosServiceInterfaceProxyInterface;
+class KioskAppServiceInterfaceProxyInterface;
 }  // namespace chromium
 }  // namespace org
 
@@ -35,7 +35,7 @@
 // to initialize.
 State* DefaultStateFactory(
     policy::PolicyProvider* policy_provider,
-    org::chromium::LibCrosServiceInterfaceProxyInterface* libcros_proxy,
+    org::chromium::KioskAppServiceInterfaceProxyInterface* kiosk_app_proxy,
     chromeos_update_engine::SystemState* system_state);
 
 }  // namespace chromeos_update_manager
diff --git a/update_manager/time_provider.h b/update_manager/time_provider.h
index 663ec2c..94f4a8f 100644
--- a/update_manager/time_provider.h
+++ b/update_manager/time_provider.h
@@ -36,6 +36,9 @@
   // consistent with base::Time.
   virtual Variable<int>* var_curr_hour() = 0;
 
+  // Returns the current minutes (0 to 60) in local time.
+  virtual Variable<int>* var_curr_minute() = 0;
+
  protected:
   TimeProvider() {}
 
diff --git a/update_manager/umtest_utils.h b/update_manager/umtest_utils.h
index 80693db..576f53c 100644
--- a/update_manager/umtest_utils.h
+++ b/update_manager/umtest_utils.h
@@ -37,7 +37,7 @@
   }
 
   // Calls GetValue on |variable| and expects its result to be |expected|.
-  template<typename T>
+  template <typename T>
   static void ExpectVariableHasValue(const T& expected, Variable<T>* variable) {
     ASSERT_NE(nullptr, variable);
     std::unique_ptr<const T> value(
@@ -47,7 +47,7 @@
   }
 
   // Calls GetValue on |variable| and expects its result to be null.
-  template<typename T>
+  template <typename T>
   static void ExpectVariableNotSet(Variable<T>* variable) {
     ASSERT_NE(nullptr, variable);
     std::unique_ptr<const T> value(
diff --git a/update_manager/update_manager-inl.h b/update_manager/update_manager-inl.h
index 77224cf..e9dee3f 100644
--- a/update_manager/update_manager-inl.h
+++ b/update_manager/update_manager-inl.h
@@ -28,13 +28,13 @@
 
 namespace chromeos_update_manager {
 
-template<typename R, typename... Args>
+template <typename R, typename... Args>
 EvalStatus UpdateManager::EvaluatePolicy(
     EvaluationContext* ec,
-    EvalStatus (Policy::*policy_method)(EvaluationContext*, State*,
-                                        std::string*, R*,
-                                        Args...) const,
-    R* result, Args... args) {
+    EvalStatus (Policy::*policy_method)(
+        EvaluationContext*, State*, std::string*, R*, Args...) const,
+    R* result,
+    Args... args) {
   // If expiration timeout fired, dump the context and reset expiration.
   // IMPORTANT: We must still proceed with evaluation of the policy in this
   // case, so that the evaluation time (and corresponding reevaluation timeouts)
@@ -53,15 +53,15 @@
 
   // First try calling the actual policy.
   std::string error;
-  EvalStatus status = (policy_.get()->*policy_method)(ec, state_.get(), &error,
-                                                      result, args...);
+  EvalStatus status = (policy_.get()->*policy_method)(
+      ec, state_.get(), &error, result, args...);
   // If evaluating the main policy failed, defer to the default policy.
   if (status == EvalStatus::kFailed) {
     LOG(WARNING) << "Evaluating policy failed: " << error
                  << "\nEvaluation context: " << ec->DumpContext();
     error.clear();
-    status = (default_policy_.*policy_method)(ec, state_.get(), &error, result,
-                                              args...);
+    status = (default_policy_.*policy_method)(
+        ec, state_.get(), &error, result, args...);
     if (status == EvalStatus::kFailed) {
       LOG(WARNING) << "Evaluating default policy failed: " << error;
     } else if (status == EvalStatus::kAskMeAgainLater) {
@@ -76,13 +76,12 @@
   return status;
 }
 
-template<typename R, typename... Args>
+template <typename R, typename... Args>
 void UpdateManager::OnPolicyReadyToEvaluate(
     scoped_refptr<EvaluationContext> ec,
     base::Callback<void(EvalStatus status, const R& result)> callback,
-    EvalStatus (Policy::*policy_method)(EvaluationContext*, State*,
-                                        std::string*, R*,
-                                        Args...) const,
+    EvalStatus (Policy::*policy_method)(
+        EvaluationContext*, State*, std::string*, R*, Args...) const,
     Args... args) {
   // Evaluate the policy.
   R result;
@@ -95,10 +94,13 @@
   }
 
   // Re-schedule the policy request based on used variables.
-  base::Closure reeval_callback = base::Bind(
-      &UpdateManager::OnPolicyReadyToEvaluate<R, Args...>,
-      base::Unretained(this), ec, callback,
-      policy_method, args...);
+  base::Closure reeval_callback =
+      base::Bind(&UpdateManager::OnPolicyReadyToEvaluate<R, Args...>,
+                 base::Unretained(this),
+                 ec,
+                 callback,
+                 policy_method,
+                 args...);
   if (ec->RunOnValueChangeOrTimeout(reeval_callback))
     return;  // Reevaluation scheduled successfully.
 
@@ -111,12 +113,12 @@
   callback.Run(status, result);
 }
 
-template<typename R, typename... ActualArgs, typename... ExpectedArgs>
+template <typename R, typename... ActualArgs, typename... ExpectedArgs>
 EvalStatus UpdateManager::PolicyRequest(
-    EvalStatus (Policy::*policy_method)(EvaluationContext*, State*,
-                                        std::string*, R*,
-                                        ExpectedArgs...) const,
-    R* result, ActualArgs... args) {
+    EvalStatus (Policy::*policy_method)(
+        EvaluationContext*, State*, std::string*, R*, ExpectedArgs...) const,
+    R* result,
+    ActualArgs... args) {
   scoped_refptr<EvaluationContext> ec(
       new EvaluationContext(clock_, evaluation_timeout_));
   // A PolicyRequest always consists on a single evaluation on a new
@@ -124,8 +126,8 @@
   // IMPORTANT: To ensure that ActualArgs can be converted to ExpectedArgs, we
   // explicitly instantiate EvaluatePolicy with the latter in lieu of the
   // former.
-  EvalStatus ret = EvaluatePolicy<R, ExpectedArgs...>(ec.get(), policy_method,
-                                                      result, args...);
+  EvalStatus ret = EvaluatePolicy<R, ExpectedArgs...>(
+      ec.get(), policy_method, result, args...);
   // Sync policy requests must not block, if they do then this is an error.
   DCHECK(EvalStatus::kAskMeAgainLater != ret);
   LOG_IF(WARNING, EvalStatus::kAskMeAgainLater == ret)
@@ -133,20 +135,20 @@
   return ret;
 }
 
-template<typename R, typename... ActualArgs, typename... ExpectedArgs>
+template <typename R, typename... ActualArgs, typename... ExpectedArgs>
 void UpdateManager::AsyncPolicyRequest(
     base::Callback<void(EvalStatus, const R& result)> callback,
-    EvalStatus (Policy::*policy_method)(EvaluationContext*, State*,
-                                        std::string*, R*,
-                                        ExpectedArgs...) const,
+    EvalStatus (Policy::*policy_method)(
+        EvaluationContext*, State*, std::string*, R*, ExpectedArgs...) const,
     ActualArgs... args) {
-  scoped_refptr<EvaluationContext> ec =
-      new EvaluationContext(
-          clock_, evaluation_timeout_, expiration_timeout_,
-          std::unique_ptr<base::Callback<void(EvaluationContext*)>>(
-              new base::Callback<void(EvaluationContext*)>(
-                  base::Bind(&UpdateManager::UnregisterEvalContext,
-                             weak_ptr_factory_.GetWeakPtr()))));
+  scoped_refptr<EvaluationContext> ec = new EvaluationContext(
+      clock_,
+      evaluation_timeout_,
+      expiration_timeout_,
+      std::unique_ptr<base::Callback<void(EvaluationContext*)>>(
+          new base::Callback<void(EvaluationContext*)>(
+              base::Bind(&UpdateManager::UnregisterEvalContext,
+                         weak_ptr_factory_.GetWeakPtr()))));
   if (!ec_repo_.insert(ec.get()).second) {
     LOG(ERROR) << "Failed to register evaluation context; this is a bug.";
   }
@@ -154,9 +156,13 @@
   // IMPORTANT: To ensure that ActualArgs can be converted to ExpectedArgs, we
   // explicitly instantiate UpdateManager::OnPolicyReadyToEvaluate with the
   // latter in lieu of the former.
-  base::Closure eval_callback = base::Bind(
-      &UpdateManager::OnPolicyReadyToEvaluate<R, ExpectedArgs...>,
-      base::Unretained(this), ec, callback, policy_method, args...);
+  base::Closure eval_callback =
+      base::Bind(&UpdateManager::OnPolicyReadyToEvaluate<R, ExpectedArgs...>,
+                 base::Unretained(this),
+                 ec,
+                 callback,
+                 policy_method,
+                 args...);
   brillo::MessageLoop::current()->PostTask(FROM_HERE, eval_callback);
 }
 
diff --git a/update_manager/update_manager.cc b/update_manager/update_manager.cc
index 25f3216..5dfc09c 100644
--- a/update_manager/update_manager.cc
+++ b/update_manager/update_manager.cc
@@ -27,11 +27,14 @@
 
 UpdateManager::UpdateManager(chromeos_update_engine::ClockInterface* clock,
                              base::TimeDelta evaluation_timeout,
-                             base::TimeDelta expiration_timeout, State* state)
-      : default_policy_(clock), state_(state), clock_(clock),
-        evaluation_timeout_(evaluation_timeout),
-        expiration_timeout_(expiration_timeout),
-        weak_ptr_factory_(this) {
+                             base::TimeDelta expiration_timeout,
+                             State* state)
+    : default_policy_(clock),
+      state_(state),
+      clock_(clock),
+      evaluation_timeout_(evaluation_timeout),
+      expiration_timeout_(expiration_timeout),
+      weak_ptr_factory_(this) {
 #ifdef __ANDROID__
   policy_.reset(new AndroidThingsPolicy());
 #else
diff --git a/update_manager/update_manager.h b/update_manager/update_manager.h
index a2f35df..b0fd97f 100644
--- a/update_manager/update_manager.h
+++ b/update_manager/update_manager.h
@@ -34,7 +34,7 @@
 namespace chromeos_update_manager {
 
 // Comparator for scoped_refptr objects.
-template<typename T>
+template <typename T>
 struct ScopedRefPtrLess {
   bool operator()(const scoped_refptr<T>& first,
                   const scoped_refptr<T>& second) const {
@@ -49,7 +49,8 @@
   // |state|.
   UpdateManager(chromeos_update_engine::ClockInterface* clock,
                 base::TimeDelta evaluation_timeout,
-                base::TimeDelta expiration_timeout, State* state);
+                base::TimeDelta expiration_timeout,
+                State* state);
 
   virtual ~UpdateManager();
 
@@ -68,12 +69,12 @@
   //
   // An example call to this method is:
   //   um.PolicyRequest(&Policy::SomePolicyMethod, &bool_result, arg1, arg2);
-  template<typename R, typename... ActualArgs, typename... ExpectedArgs>
+  template <typename R, typename... ActualArgs, typename... ExpectedArgs>
   EvalStatus PolicyRequest(
-      EvalStatus (Policy::*policy_method)(EvaluationContext*, State*,
-                                          std::string*, R*,
-                                          ExpectedArgs...) const,
-      R* result, ActualArgs...);
+      EvalStatus (Policy::*policy_method)(
+          EvaluationContext*, State*, std::string*, R*, ExpectedArgs...) const,
+      R* result,
+      ActualArgs...);
 
   // Evaluates the given |policy_method| policy with the provided |args|
   // arguments and calls the |callback| callback with the result when done.
@@ -83,19 +84,16 @@
   // policy until another status is returned. If the policy implementation based
   // its return value solely on const variables, the callback will be called
   // with the EvalStatus::kAskMeAgainLater status (which indicates an error).
-  template<typename R, typename... ActualArgs, typename... ExpectedArgs>
+  template <typename R, typename... ActualArgs, typename... ExpectedArgs>
   void AsyncPolicyRequest(
       base::Callback<void(EvalStatus, const R& result)> callback,
-      EvalStatus (Policy::*policy_method)(EvaluationContext*, State*,
-                                          std::string*, R*,
-                                          ExpectedArgs...) const,
+      EvalStatus (Policy::*policy_method)(
+          EvaluationContext*, State*, std::string*, R*, ExpectedArgs...) const,
       ActualArgs... args);
 
  protected:
   // The UpdateManager receives ownership of the passed Policy instance.
-  void set_policy(const Policy* policy) {
-    policy_.reset(policy);
-  }
+  void set_policy(const Policy* policy) { policy_.reset(policy); }
 
   // State getter used for testing.
   State* state() { return state_.get(); }
@@ -111,13 +109,13 @@
   // EvaluatePolicy() evaluates the passed |policy_method| method on the current
   // policy with the given |args| arguments. If the method fails, the default
   // policy is used instead.
-  template<typename R, typename... Args>
+  template <typename R, typename... Args>
   EvalStatus EvaluatePolicy(
       EvaluationContext* ec,
-      EvalStatus (Policy::*policy_method)(EvaluationContext*, State*,
-                                          std::string*, R*,
-                                          Args...) const,
-      R* result, Args... args);
+      EvalStatus (Policy::*policy_method)(
+          EvaluationContext*, State*, std::string*, R*, Args...) const,
+      R* result,
+      Args... args);
 
   // OnPolicyReadyToEvaluate() is called by the main loop when the evaluation
   // of the given |policy_method| should be executed. If the evaluation finishes
@@ -125,13 +123,12 @@
   // returned by the policy. If the evaluation returns an
   // EvalStatus::kAskMeAgainLater state, the |callback| will NOT be called and
   // the evaluation will be re-scheduled to be called later.
-  template<typename R, typename... Args>
+  template <typename R, typename... Args>
   void OnPolicyReadyToEvaluate(
       scoped_refptr<EvaluationContext> ec,
       base::Callback<void(EvalStatus status, const R& result)> callback,
-      EvalStatus (Policy::*policy_method)(EvaluationContext*, State*,
-                                          std::string*, R*,
-                                          Args...) const,
+      EvalStatus (Policy::*policy_method)(
+          EvaluationContext*, State*, std::string*, R*, Args...) const,
       Args... args);
 
   // Unregisters (removes from repo) a previously created EvaluationContext.
@@ -163,7 +160,8 @@
   // will remove all pending events associated with all outstanding contexts
   // (which should, in turn, trigger their destruction).
   std::set<scoped_refptr<EvaluationContext>,
-           ScopedRefPtrLess<EvaluationContext>> ec_repo_;
+           ScopedRefPtrLess<EvaluationContext>>
+      ec_repo_;
 
   base::WeakPtrFactory<UpdateManager> weak_ptr_factory_;
 
diff --git a/update_manager/update_manager_unittest.cc b/update_manager/update_manager_unittest.cc
index c2766ea..f1a8d17 100644
--- a/update_manager/update_manager_unittest.cc
+++ b/update_manager/update_manager_unittest.cc
@@ -81,13 +81,13 @@
   void SetUp() override {
     loop_.SetAsCurrent();
     fake_state_ = new FakeState();
-    umut_.reset(new UpdateManager(&fake_clock_, TimeDelta::FromSeconds(5),
-                                  TimeDelta::FromSeconds(1), fake_state_));
+    umut_.reset(new UpdateManager(&fake_clock_,
+                                  TimeDelta::FromSeconds(5),
+                                  TimeDelta::FromSeconds(1),
+                                  fake_state_));
   }
 
-  void TearDown() override {
-    EXPECT_FALSE(loop_.PendingTasks());
-  }
+  void TearDown() override { EXPECT_FALSE(loop_.PendingTasks()); }
 
   base::SimpleTestClock test_clock_;
   brillo::FakeMessageLoop loop_{&test_clock_};
@@ -103,7 +103,8 @@
  public:
   explicit FailingPolicy(int* num_called_p) : num_called_p_(num_called_p) {}
   FailingPolicy() : FailingPolicy(nullptr) {}
-  EvalStatus UpdateCheckAllowed(EvaluationContext* ec, State* state,
+  EvalStatus UpdateCheckAllowed(EvaluationContext* ec,
+                                State* state,
                                 string* error,
                                 UpdateCheckParams* result) const override {
     if (num_called_p_)
@@ -121,7 +122,8 @@
 
 // The LazyPolicy always returns EvalStatus::kAskMeAgainLater.
 class LazyPolicy : public DefaultPolicy {
-  EvalStatus UpdateCheckAllowed(EvaluationContext* ec, State* state,
+  EvalStatus UpdateCheckAllowed(EvaluationContext* ec,
+                                State* state,
                                 string* error,
                                 UpdateCheckParams* result) const override {
     return EvalStatus::kAskMeAgainLater;
@@ -139,9 +141,11 @@
 class DelayPolicy : public DefaultPolicy {
  public:
   DelayPolicy(int sleep_secs, Time time_threshold, int* num_called_p)
-      : sleep_secs_(sleep_secs), time_threshold_(time_threshold),
+      : sleep_secs_(sleep_secs),
+        time_threshold_(time_threshold),
         num_called_p_(num_called_p) {}
-  EvalStatus UpdateCheckAllowed(EvaluationContext* ec, State* state,
+  EvalStatus UpdateCheckAllowed(EvaluationContext* ec,
+                                State* state,
                                 string* error,
                                 UpdateCheckParams* result) const override {
     if (num_called_p_)
@@ -173,9 +177,10 @@
 // of EvalStatus and T instances. This allows to create a callback that keeps
 // track of when it is called and the arguments passed to it, to be used with
 // the UpdateManager::AsyncPolicyRequest().
-template<typename T>
+template <typename T>
 static void AccumulateCallsCallback(vector<pair<EvalStatus, T>>* acc,
-                                    EvalStatus status, const T& result) {
+                                    EvalStatus status,
+                                    const T& result) {
   acc->push_back(std::make_pair(status, result));
 }
 
@@ -183,13 +188,13 @@
 // this tests cover all policy requests as defined in Policy.
 TEST_F(UmUpdateManagerTest, PolicyRequestCallUpdateCheckAllowed) {
   UpdateCheckParams result;
-  EXPECT_EQ(EvalStatus::kSucceeded, umut_->PolicyRequest(
-      &Policy::UpdateCheckAllowed, &result));
+  EXPECT_EQ(EvalStatus::kSucceeded,
+            umut_->PolicyRequest(&Policy::UpdateCheckAllowed, &result));
 }
 
 TEST_F(UmUpdateManagerTest, PolicyRequestCallUpdateCanStart) {
   UpdateState update_state = UpdateState();
-  update_state.is_interactive = true;
+  update_state.interactive = true;
   update_state.is_delta_payload = false;
   update_state.first_seen = FixedTime();
   update_state.num_checks = 1;
@@ -213,9 +218,9 @@
   update_state.scatter_check_threshold_max = 8;
 
   UpdateDownloadParams result;
-  EXPECT_EQ(EvalStatus::kSucceeded,
-            umut_->PolicyRequest(&Policy::UpdateCanStart, &result,
-                                 update_state));
+  EXPECT_EQ(
+      EvalStatus::kSucceeded,
+      umut_->PolicyRequest(&Policy::UpdateCanStart, &result, update_state));
 }
 
 TEST_F(UmUpdateManagerTest, PolicyRequestCallsDefaultOnError) {
@@ -225,8 +230,8 @@
   // which will set this as true.
   UpdateCheckParams result;
   result.updates_enabled = false;
-  EvalStatus status = umut_->PolicyRequest(
-      &Policy::UpdateCheckAllowed, &result);
+  EvalStatus status =
+      umut_->PolicyRequest(&Policy::UpdateCheckAllowed, &result);
   EXPECT_EQ(EvalStatus::kSucceeded, status);
   EXPECT_TRUE(result.updates_enabled);
 }
@@ -250,8 +255,8 @@
   umut_->set_policy(new FailingPolicy());
 
   vector<pair<EvalStatus, UpdateCheckParams>> calls;
-  Callback<void(EvalStatus, const UpdateCheckParams&)> callback = Bind(
-      AccumulateCallsCallback<UpdateCheckParams>, &calls);
+  Callback<void(EvalStatus, const UpdateCheckParams&)> callback =
+      Bind(AccumulateCallsCallback<UpdateCheckParams>, &calls);
 
   umut_->AsyncPolicyRequest(callback, &Policy::UpdateCheckAllowed);
   // The callback should wait until we run the main loop for it to be executed.
@@ -291,8 +296,9 @@
   // reattempted.
   int num_called = 0;
   umut_->set_policy(new DelayPolicy(
-          0, fake_clock_.GetWallclockTime() + TimeDelta::FromSeconds(3),
-          &num_called));
+      0,
+      fake_clock_.GetWallclockTime() + TimeDelta::FromSeconds(3),
+      &num_called));
 
   vector<pair<EvalStatus, UpdateCheckParams>> calls;
   Callback<void(EvalStatus, const UpdateCheckParams&)> callback =
diff --git a/update_manager/update_time_restrictions_policy_impl.cc b/update_manager/update_time_restrictions_policy_impl.cc
new file mode 100644
index 0000000..f9b83de
--- /dev/null
+++ b/update_manager/update_time_restrictions_policy_impl.cc
@@ -0,0 +1,74 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "update_engine/update_manager/update_time_restrictions_policy_impl.h"
+
+#include <memory>
+
+#include <base/time/time.h>
+
+#include "update_engine/update_manager/device_policy_provider.h"
+#include "update_engine/update_manager/system_provider.h"
+#include "update_engine/update_manager/weekly_time.h"
+
+using base::Time;
+using base::TimeDelta;
+
+using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::InstallPlan;
+
+namespace chromeos_update_manager {
+
+EvalStatus UpdateTimeRestrictionsPolicyImpl::UpdateCanBeApplied(
+    EvaluationContext* ec,
+    State* state,
+    std::string* error,
+    ErrorCode* result,
+    InstallPlan* install_plan) const {
+  DevicePolicyProvider* const dp_provider = state->device_policy_provider();
+  TimeProvider* const time_provider = state->time_provider();
+
+  // If kiosk mode is not enabled, don't restrict updates.
+  if (!ec->GetValue(dp_provider->var_auto_launched_kiosk_app_id()))
+    return EvalStatus::kContinue;
+
+  const Time* curr_date = ec->GetValue(time_provider->var_curr_date());
+  const int* curr_hour = ec->GetValue(time_provider->var_curr_hour());
+  const int* curr_minute = ec->GetValue(time_provider->var_curr_minute());
+  if (!curr_date || !curr_hour || !curr_minute) {
+    LOG(WARNING) << "Unable to access local time.";
+    return EvalStatus::kContinue;
+  }
+
+  WeeklyTime now = WeeklyTime::FromTime(*curr_date);
+  now.AddTime(TimeDelta::FromHours(*curr_hour) +
+              TimeDelta::FromMinutes(*curr_minute));
+
+  const WeeklyTimeIntervalVector* intervals =
+      ec->GetValue(dp_provider->var_disallowed_time_intervals());
+  if (!intervals) {
+    return EvalStatus::kContinue;
+  }
+  for (const auto& interval : *intervals) {
+    if (interval.InRange(now)) {
+      *result = ErrorCode::kOmahaUpdateDeferredPerPolicy;
+      return EvalStatus::kSucceeded;
+    }
+  }
+
+  return EvalStatus::kContinue;
+}
+
+}  // namespace chromeos_update_manager
diff --git a/update_manager/update_time_restrictions_policy_impl.h b/update_manager/update_time_restrictions_policy_impl.h
new file mode 100644
index 0000000..11cbceb
--- /dev/null
+++ b/update_manager/update_time_restrictions_policy_impl.h
@@ -0,0 +1,61 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_UPDATE_TIME_RESTRICTIONS_POLICY_IMPL_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_UPDATE_TIME_RESTRICTIONS_POLICY_IMPL_H_
+
+#include <string>
+
+#include <base/time/time.h>
+
+#include "update_engine/common/error_code.h"
+#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/update_manager/policy_utils.h"
+
+namespace chromeos_update_manager {
+
+// Policy that allows administrators to set time intervals during which
+// automatic update checks are disallowed. This implementation then checks if
+// the current time falls in the range spanned by the time intervals. If the
+// current time falls in one of the intervals then the update check is
+// blocked by this policy.
+class UpdateTimeRestrictionsPolicyImpl : public PolicyImplBase {
+ public:
+  UpdateTimeRestrictionsPolicyImpl() = default;
+  ~UpdateTimeRestrictionsPolicyImpl() override = default;
+
+  // When the current time is inside one of the intervals returns
+  // kSucceeded and sets |result| to kOmahaUpdateDeferredPerPolicy. If the
+  // current time is not inside any intervals returns kContinue. In case of
+  // errors, i.e. cannot access intervals or time, return kContinue.
+  EvalStatus UpdateCanBeApplied(
+      EvaluationContext* ec,
+      State* state,
+      std::string* error,
+      chromeos_update_engine::ErrorCode* result,
+      chromeos_update_engine::InstallPlan* install_plan) const override;
+
+ protected:
+  std::string PolicyName() const override {
+    return "UpdateTimeRestrictionsPolicyImpl";
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(UpdateTimeRestrictionsPolicyImpl);
+};
+
+}  // namespace chromeos_update_manager
+
+#endif  // UPDATE_ENGINE_UPDATE_MANAGER_UPDATE_TIME_RESTRICTIONS_POLICY_IMPL_H_
diff --git a/update_manager/update_time_restrictions_policy_impl_unittest.cc b/update_manager/update_time_restrictions_policy_impl_unittest.cc
new file mode 100644
index 0000000..74e7f3c
--- /dev/null
+++ b/update_manager/update_time_restrictions_policy_impl_unittest.cc
@@ -0,0 +1,120 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/update_time_restrictions_policy_impl.h"
+
+#include <memory>
+
+#include <base/time/time.h>
+
+#include "update_engine/update_manager/policy_test_utils.h"
+#include "update_engine/update_manager/weekly_time.h"
+
+using base::Time;
+using base::TimeDelta;
+using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::InstallPlan;
+using std::string;
+
+namespace chromeos_update_manager {
+
+constexpr TimeDelta kHour = TimeDelta::FromHours(1);
+constexpr TimeDelta kMinute = TimeDelta::FromMinutes(1);
+
+const WeeklyTimeIntervalVector kTestIntervals{
+    // Monday 10:15 AM to Monday 3:30 PM.
+    WeeklyTimeInterval(WeeklyTime(1, kHour * 10 + kMinute * 15),
+                       WeeklyTime(1, kHour * 15 + kMinute * 30)),
+    // Wednesday 8:30 PM to Thursday 8:40 AM.
+    WeeklyTimeInterval(WeeklyTime(3, kHour * 20 + kMinute * 30),
+                       WeeklyTime(4, kHour * 8 + kMinute * 40)),
+};
+
+class UmUpdateTimeRestrictionsPolicyImplTest : public UmPolicyTestBase {
+ protected:
+  UmUpdateTimeRestrictionsPolicyImplTest() {
+    policy_ = std::make_unique<UpdateTimeRestrictionsPolicyImpl>();
+  }
+
+  void TestPolicy(const Time::Exploded& exploded,
+                  const WeeklyTimeIntervalVector& test_intervals,
+                  const EvalStatus& expected_value,
+                  bool kiosk) {
+    if (kiosk)
+      fake_state_.device_policy_provider()
+          ->var_auto_launched_kiosk_app_id()
+          ->reset(new string("myapp"));
+
+    Time time;
+    EXPECT_TRUE(Time::FromLocalExploded(exploded, &time));
+    fake_clock_.SetWallclockTime(time);
+    SetUpDefaultTimeProvider();
+    fake_state_.device_policy_provider()
+        ->var_disallowed_time_intervals()
+        ->reset(new WeeklyTimeIntervalVector(test_intervals));
+    ErrorCode result;
+    InstallPlan install_plan;
+    ExpectPolicyStatus(
+        expected_value, &Policy::UpdateCanBeApplied, &result, &install_plan);
+    if (expected_value == EvalStatus::kSucceeded)
+      EXPECT_EQ(result, ErrorCode::kOmahaUpdateDeferredPerPolicy);
+  }
+};
+
+// If there are no intervals, then the check should always return kContinue.
+TEST_F(UmUpdateTimeRestrictionsPolicyImplTest, NoIntervalsSetTest) {
+  Time::Exploded random_time{2018, 7, 1, 9, 12, 30, 0, 0};
+  TestPolicy(random_time,
+             WeeklyTimeIntervalVector(),
+             EvalStatus::kContinue,
+             /* kiosk = */ true);
+}
+
+// Check that all intervals are checked.
+TEST_F(UmUpdateTimeRestrictionsPolicyImplTest, TimeInRange) {
+  // Monday, July 9th 2018 12:30 PM.
+  Time::Exploded first_interval_time{2018, 7, 1, 9, 12, 30, 0, 0};
+  TestPolicy(first_interval_time,
+             kTestIntervals,
+             EvalStatus::kSucceeded,
+             /* kiosk = */ true);
+
+  // Check second interval.
+  // Thursday, July 12th 2018 4:30 AM.
+  Time::Exploded second_interval_time{2018, 7, 4, 12, 4, 30, 0, 0};
+  TestPolicy(second_interval_time,
+             kTestIntervals,
+             EvalStatus::kSucceeded,
+             /* kiosk = */ true);
+}
+
+TEST_F(UmUpdateTimeRestrictionsPolicyImplTest, TimeOutOfRange) {
+  // Monday, July 9th 2018 6:30 PM.
+  Time::Exploded out_of_range_time{2018, 7, 1, 9, 18, 30, 0, 0};
+  TestPolicy(out_of_range_time,
+             kTestIntervals,
+             EvalStatus::kContinue,
+             /* kiosk = */ true);
+}
+
+TEST_F(UmUpdateTimeRestrictionsPolicyImplTest, NoKioskDisablesPolicy) {
+  Time::Exploded in_range_time{2018, 7, 1, 9, 12, 30, 0, 0};
+  TestPolicy(in_range_time,
+             kTestIntervals,
+             EvalStatus::kContinue,
+             /* kiosk = */ false);
+}
+}  // namespace chromeos_update_manager
diff --git a/update_manager/variable.h b/update_manager/variable.h
index 7109692..6c7d350 100644
--- a/update_manager/variable.h
+++ b/update_manager/variable.h
@@ -74,20 +74,14 @@
   }
 
   // Returns the variable name as a string.
-  const std::string& GetName() const {
-    return name_;
-  }
+  const std::string& GetName() const { return name_; }
 
   // Returns the variable mode.
-  VariableMode GetMode() const {
-    return mode_;
-  }
+  VariableMode GetMode() const { return mode_; }
 
   // For VariableModePoll variables, it returns the polling interval of this
   // variable. In other case, it returns 0.
-  base::TimeDelta GetPollInterval() const {
-    return poll_interval_;
-  }
+  base::TimeDelta GetPollInterval() const { return poll_interval_; }
 
   // Adds and removes observers for value changes on the variable. This only
   // works for kVariableAsync variables since the other modes don't track value
@@ -106,8 +100,8 @@
  protected:
   // Creates a BaseVariable using the default polling interval (5 minutes).
   BaseVariable(const std::string& name, VariableMode mode)
-      : BaseVariable(name, mode,
-                     base::TimeDelta::FromMinutes(kDefaultPollMinutes)) {}
+      : BaseVariable(
+            name, mode, base::TimeDelta::FromMinutes(kDefaultPollMinutes)) {}
 
   // Creates a BaseVariable with mode kVariableModePoll and the provided
   // polling interval.
@@ -116,8 +110,8 @@
 
   // Reset the poll interval on a polling variable to the given one.
   void SetPollInterval(base::TimeDelta poll_interval) {
-    DCHECK_EQ(kVariableModePoll, mode_) << "Can't set the poll_interval on a "
-                                        << mode_ << " variable";
+    DCHECK_EQ(kVariableModePoll, mode_)
+        << "Can't set the poll_interval on a " << mode_ << " variable";
     poll_interval_ = poll_interval;
   }
 
@@ -140,11 +134,13 @@
   FRIEND_TEST(UmBaseVariableTest, NotifyValueChangedTest);
   FRIEND_TEST(UmBaseVariableTest, NotifyValueRemovesObserversTest);
 
-  BaseVariable(const std::string& name, VariableMode mode,
+  BaseVariable(const std::string& name,
+               VariableMode mode,
                base::TimeDelta poll_interval)
-    : name_(name), mode_(mode),
-      poll_interval_(mode == kVariableModePoll ?
-                     poll_interval : base::TimeDelta()) {}
+      : name_(name),
+        mode_(mode),
+        poll_interval_(mode == kVariableModePoll ? poll_interval
+                                                 : base::TimeDelta()) {}
 
   void OnValueChangedNotification() {
     // A ValueChanged() method can change the list of observers, for example
@@ -184,7 +180,7 @@
 // Interface to an Update Manager variable of a given type. Implementation
 // internals are hidden as protected members, since policies should not be
 // using them directly.
-template<typename T>
+template <typename T>
 class Variable : public BaseVariable {
  public:
   ~Variable() override {}
diff --git a/update_manager/variable_unittest.cc b/update_manager/variable_unittest.cc
index 144002a..fe40ce5 100644
--- a/update_manager/variable_unittest.cc
+++ b/update_manager/variable_unittest.cc
@@ -42,8 +42,7 @@
   ~DefaultVariable() override {}
 
  protected:
-  const T* GetValue(TimeDelta /* timeout */,
-                    string* /* errmsg */) override {
+  const T* GetValue(TimeDelta /* timeout */, string* /* errmsg */) override {
     return new T();
   }
 
@@ -53,9 +52,7 @@
 
 class UmBaseVariableTest : public ::testing::Test {
  protected:
-  void SetUp() override {
-    loop_.SetAsCurrent();
-  }
+  void SetUp() override { loop_.SetAsCurrent(); }
 
   brillo::FakeMessageLoop loop_{nullptr};
 };
@@ -87,9 +84,7 @@
 
 class BaseVariableObserver : public BaseVariable::ObserverInterface {
  public:
-  void ValueChanged(BaseVariable* variable) {
-    calls_.push_back(variable);
-  }
+  void ValueChanged(BaseVariable* variable) { calls_.push_back(variable); }
 
   // List of called functions.
   vector<BaseVariable*> calls_;
diff --git a/update_manager/weekly_time.cc b/update_manager/weekly_time.cc
new file mode 100644
index 0000000..e478f9f
--- /dev/null
+++ b/update_manager/weekly_time.cc
@@ -0,0 +1,75 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "update_engine/update_manager/weekly_time.h"
+
+#include <base/strings/string_number_conversions.h>
+#include <base/strings/stringprintf.h>
+#include <base/time/time.h>
+
+using base::Time;
+using base::TimeDelta;
+using std::string;
+
+namespace {
+const int kDaysInWeek = 7;
+}
+
+namespace chromeos_update_manager {
+
+TimeDelta WeeklyTime::GetDurationTo(const WeeklyTime& other) const {
+  if (other.TimeFromStartOfWeek() < TimeFromStartOfWeek()) {
+    return other.TimeFromStartOfWeek() +
+           (TimeDelta::FromDays(kDaysInWeek) - TimeFromStartOfWeek());
+  }
+  return other.TimeFromStartOfWeek() - TimeFromStartOfWeek();
+}
+
+TimeDelta WeeklyTime::TimeFromStartOfWeek() const {
+  return TimeDelta::FromDays(day_of_week_) + time_;
+}
+
+void WeeklyTime::AddTime(const TimeDelta& offset) {
+  time_ += offset;
+  int days_over = time_.InDays();
+  time_ -= TimeDelta::FromDays(days_over);
+  day_of_week_ = (day_of_week_ + days_over - 1) % kDaysInWeek + 1;
+}
+
+// static
+WeeklyTime WeeklyTime::FromTime(const Time& time) {
+  Time::Exploded exploded;
+  time.LocalExplode(&exploded);
+  return WeeklyTime(exploded.day_of_week,
+                    TimeDelta::FromHours(exploded.hour) +
+                        TimeDelta::FromMinutes(exploded.minute));
+}
+
+bool WeeklyTimeInterval::InRange(const WeeklyTime& time) const {
+  return time == start_ ||
+         (time.GetDurationTo(start_) >= time.GetDurationTo(end_) &&
+          time != end_);
+}
+
+string WeeklyTimeInterval::ToString() const {
+  return base::StringPrintf(
+      "Start: day_of_week=%d time=%d\nEnd: day_of_week=%d time=%d",
+      start_.day_of_week(),
+      start_.time().InMinutes(),
+      end_.day_of_week(),
+      end_.time().InMinutes());
+}
+
+}  // namespace chromeos_update_manager
diff --git a/update_manager/weekly_time.h b/update_manager/weekly_time.h
new file mode 100644
index 0000000..9e3a039
--- /dev/null
+++ b/update_manager/weekly_time.h
@@ -0,0 +1,97 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_WEEKLY_TIME_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_WEEKLY_TIME_H_
+
+#include <string>
+#include <vector>
+
+#include <base/time/time.h>
+
+namespace chromeos_update_manager {
+
+// Represents a day of the week and the time since it started.
+class WeeklyTime {
+ public:
+  // Day of week (Sunday = 0 and so on) and time since start of the day (12 AM).
+  WeeklyTime(const int& day_of_week, const base::TimeDelta& time)
+      : day_of_week_(day_of_week), time_(time) {}
+
+  // Create a weekly time from a time object.
+  static WeeklyTime FromTime(const base::Time& time);
+
+  bool operator==(const WeeklyTime& other) const {
+    return time_ == other.time() && day_of_week_ == other.day_of_week();
+  }
+
+  bool operator!=(const WeeklyTime& other) const { return !(*this == other); }
+
+  // Return the duration between WeeklyTime and |other|. |other| is always
+  // considered to be after WeeklyTime. i.e. calling this function on [Friday
+  // 12:00, Monday 12:00] would return 3 days.
+  base::TimeDelta GetDurationTo(const WeeklyTime& other) const;
+
+  // Gets the weekly time represented as a time delta.
+  base::TimeDelta TimeFromStartOfWeek() const;
+
+  // Adds the given |offset| to the time with proper wraparound (e.g. Sunday + 1
+  // day = Monday).
+  void AddTime(const base::TimeDelta& offset);
+
+  int day_of_week() const { return day_of_week_; }
+
+  base::TimeDelta time() const { return time_; }
+
+ private:
+  int day_of_week_;
+  base::TimeDelta time_;
+};
+
+// Represents an interval of time during a week represented with WeeklyTime
+// objects. This interval can span at most 7 days. |end| is always considered to
+// be after |start|, this is possible since the times of the week are cyclic.
+// For example, the interval [Thursday 12:00, Monday 12:00) will span the time
+// between Thursday and Monday.
+class WeeklyTimeInterval {
+ public:
+  WeeklyTimeInterval(const WeeklyTime& start, const WeeklyTime& end)
+      : start_(start), end_(end) {}
+
+  // Determines if |time| is in this interval.
+  bool InRange(const WeeklyTime& time) const;
+
+  WeeklyTime start() const { return start_; }
+
+  WeeklyTime end() const { return end_; }
+
+  bool operator==(const WeeklyTimeInterval& other) const {
+    return start_ == other.start() && end_ == other.end();
+  }
+
+  // Converts the interval to a string. Used for the BoxedValue ToString
+  // function.
+  std::string ToString() const;
+
+ private:
+  WeeklyTime start_;
+  WeeklyTime end_;
+};
+
+using WeeklyTimeIntervalVector = std::vector<WeeklyTimeInterval>;
+
+}  // namespace chromeos_update_manager
+
+#endif  // UPDATE_ENGINE_UPDATE_MANAGER_WEEKLY_TIME_H_
diff --git a/update_manager/weekly_time_unittest.cc b/update_manager/weekly_time_unittest.cc
new file mode 100644
index 0000000..52c5425
--- /dev/null
+++ b/update_manager/weekly_time_unittest.cc
@@ -0,0 +1,212 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "update_engine/update_manager/weekly_time.h"
+
+#include <tuple>
+
+#include <base/time/time.h>
+#include <gtest/gtest.h>
+
+using base::TimeDelta;
+using std::tuple;
+
+namespace chromeos_update_manager {
+
+namespace {
+
+enum {
+  kSunday = 0,
+  kMonday,
+  kTuesday,
+  kWednesday,
+  kThursday,
+  kFriday,
+  kSaturday
+};
+
+}  // namespace
+
+class WeeklyTimeDurationTest
+    : public testing::TestWithParam<tuple<int /* start_day_of_week */,
+                                          TimeDelta /* start_time */,
+                                          int /* end_day_of_week */,
+                                          TimeDelta /* end_time */,
+                                          TimeDelta /* expected result */>> {
+ protected:
+  int start_day_of_week() { return std::get<0>(GetParam()); }
+  TimeDelta start_time() { return std::get<1>(GetParam()); }
+  int end_day_of_week() { return std::get<2>(GetParam()); }
+  TimeDelta end_time() { return std::get<3>(GetParam()); }
+  TimeDelta result() { return std::get<4>(GetParam()); }
+};
+
+TEST_P(WeeklyTimeDurationTest, GetDurationTo) {
+  WeeklyTime start = WeeklyTime(start_day_of_week(), start_time());
+  WeeklyTime end = WeeklyTime(end_day_of_week(), end_time());
+
+  EXPECT_EQ(result(), start.GetDurationTo(end));
+}
+
+INSTANTIATE_TEST_CASE_P(
+    SameMinutes,
+    WeeklyTimeDurationTest,
+    testing::Values(std::make_tuple(kThursday,
+                                    TimeDelta::FromMinutes(30),
+                                    kSaturday,
+                                    TimeDelta::FromMinutes(30),
+                                    TimeDelta::FromDays(2))));
+
+INSTANTIATE_TEST_CASE_P(
+    DifferentMinutes,
+    WeeklyTimeDurationTest,
+    testing::Values(std::make_tuple(kMonday,
+                                    TimeDelta::FromMinutes(10),
+                                    kWednesday,
+                                    TimeDelta::FromMinutes(30),
+                                    TimeDelta::FromDays(2) +
+                                        TimeDelta::FromMinutes(20))));
+
+INSTANTIATE_TEST_CASE_P(
+    EndLessThanStartSameMinutes,
+    WeeklyTimeDurationTest,
+    testing::Values(std::make_tuple(kSaturday,
+                                    TimeDelta::FromMinutes(100),
+                                    kTuesday,
+                                    TimeDelta::FromMinutes(100),
+                                    TimeDelta::FromDays(3))));
+
+INSTANTIATE_TEST_CASE_P(
+    EndLessThanStartDifferentMinutes,
+    WeeklyTimeDurationTest,
+    testing::Values(std::make_tuple(kSaturday,
+                                    TimeDelta::FromMinutes(150),
+                                    kMonday,
+                                    TimeDelta::FromMinutes(10),
+                                    TimeDelta::FromDays(2) -
+                                        TimeDelta::FromMinutes(140))));
+
+class WeeklyTimeOffsetTest
+    : public testing::TestWithParam<tuple<int /* day_of_week */,
+                                          TimeDelta /* time */,
+                                          TimeDelta /* offset */,
+                                          WeeklyTime /* expected result */>> {
+ protected:
+  int day_of_week() { return std::get<0>(GetParam()); }
+  TimeDelta time() { return std::get<1>(GetParam()); }
+  TimeDelta offset() { return std::get<2>(GetParam()); }
+  WeeklyTime result() { return std::get<3>(GetParam()); }
+};
+
+TEST_P(WeeklyTimeOffsetTest, WeekTimeAddTime) {
+  WeeklyTime test_time = WeeklyTime(day_of_week(), time());
+  test_time.AddTime(offset());
+
+  EXPECT_EQ(result(), test_time);
+}
+
+INSTANTIATE_TEST_CASE_P(
+    SameDayTest,
+    WeeklyTimeOffsetTest,
+    testing::Values(std::make_tuple(kTuesday,
+                                    TimeDelta::FromMinutes(200),
+                                    TimeDelta::FromMinutes(400),
+                                    WeeklyTime(kTuesday,
+                                               TimeDelta::FromMinutes(600)))));
+
+INSTANTIATE_TEST_CASE_P(DayChangeTest,
+                        WeeklyTimeOffsetTest,
+                        testing::Values(std::make_tuple(
+                            kThursday,
+                            TimeDelta::FromHours(23),
+                            TimeDelta::FromHours(2),
+                            WeeklyTime(kFriday, TimeDelta::FromHours(1)))));
+
+INSTANTIATE_TEST_CASE_P(DayChangeTestOver7,
+                        WeeklyTimeOffsetTest,
+                        testing::Values(std::make_tuple(
+                            kSunday,
+                            TimeDelta::FromHours(20),
+                            TimeDelta::FromDays(3),
+                            WeeklyTime(kWednesday, TimeDelta::FromHours(20)))));
+
+class WeeklyTimeIntervalRangeTest
+    : public testing::TestWithParam<tuple<int /* test_day_of_week */,
+                                          int /* test_time */,
+                                          bool /* in regular interval */,
+                                          bool /* in short interval */,
+                                          bool /* |start| < | */>> {
+ protected:
+  int day_of_week() { return std::get<0>(GetParam()); }
+  int minutes() { return std::get<1>(GetParam()); }
+  bool regular_result() { return std::get<2>(GetParam()); }
+  bool short_result() { return std::get<3>(GetParam()); }
+  bool wraparound_result() { return std::get<4>(GetParam()); }
+};
+
+TEST_P(WeeklyTimeIntervalRangeTest, InRange) {
+  WeeklyTime test =
+      WeeklyTime(day_of_week(), TimeDelta::FromMinutes(minutes()));
+  WeeklyTimeInterval interval_regular =
+      WeeklyTimeInterval(WeeklyTime(kMonday, TimeDelta::FromMinutes(10)),
+                         WeeklyTime(kWednesday, TimeDelta::FromMinutes(30)));
+  WeeklyTimeInterval interval_short =
+      WeeklyTimeInterval(WeeklyTime(kThursday, TimeDelta::FromMinutes(10)),
+                         WeeklyTime(kThursday, TimeDelta::FromMinutes(11)));
+
+  WeeklyTimeInterval interval_wraparound =
+      WeeklyTimeInterval(WeeklyTime(kFriday, TimeDelta::FromMinutes(10)),
+                         WeeklyTime(kTuesday, TimeDelta::FromMinutes(30)));
+
+  EXPECT_EQ(regular_result(), interval_regular.InRange(test));
+  EXPECT_EQ(short_result(), interval_short.InRange(test));
+  EXPECT_EQ(wraparound_result(), interval_wraparound.InRange(test));
+}
+
+// Test the left side of the range being inclusive.
+INSTANTIATE_TEST_CASE_P(
+    InclusiveSuccessLeft,
+    WeeklyTimeIntervalRangeTest,
+    testing::Values(std::make_tuple(kThursday, 10, false, true, false)));
+
+// Test the right side of the range being exclusive.
+INSTANTIATE_TEST_CASE_P(
+    ExclusiveSuccessRight,
+    WeeklyTimeIntervalRangeTest,
+    testing::Values(std::make_tuple(kThursday, 11, false, false, false)));
+
+// Test falling out of the interval by a small amount.
+INSTANTIATE_TEST_CASE_P(
+    FailOutsideRangeSmall,
+    WeeklyTimeIntervalRangeTest,
+    testing::Values(std::make_tuple(kThursday, 12, false, false, false)));
+
+// These test cases check that intervals wrap around properly.
+INSTANTIATE_TEST_CASE_P(
+    WraparoundOutside,
+    WeeklyTimeIntervalRangeTest,
+    testing::Values(std::make_tuple(kWednesday, 10, true, false, false)));
+
+INSTANTIATE_TEST_CASE_P(
+    WraparoundInsideRight,
+    WeeklyTimeIntervalRangeTest,
+    testing::Values(std::make_tuple(kSaturday, 10, false, false, true)));
+
+INSTANTIATE_TEST_CASE_P(
+    WraparoundInsideLeft,
+    WeeklyTimeIntervalRangeTest,
+    testing::Values(std::make_tuple(kMonday, 0, false, false, true)));
+
+}  // namespace chromeos_update_manager
diff --git a/update_metadata.proto b/update_metadata.proto
index 99b7422..7e8e7d4 100644
--- a/update_metadata.proto
+++ b/update_metadata.proto
@@ -14,24 +14,26 @@
 // limitations under the License.
 //
 
-// Update file format: A delta update file contains all the deltas needed
-// to update a system from one specific version to another specific
-// version. The update format is represented by this struct pseudocode:
+// Update file format: An update file contains all the operations needed
+// to update a system to a specific version. It can be a full payload which
+// can update from any version, or a delta payload which can only update
+// from a specific version.
+// The update format is represented by this struct pseudocode:
 // struct delta_update_file {
 //   char magic[4] = "CrAU";
-//   uint64 file_format_version;
+//   uint64 file_format_version;  // payload major version
 //   uint64 manifest_size;  // Size of protobuf DeltaArchiveManifest
 //
-//   // Only present if format_version > 1:
+//   // Only present if format_version >= 2:
 //   uint32 metadata_signature_size;
 //
-//   // The Bzip2 compressed DeltaArchiveManifest
-//   char manifest[];
+//   // The DeltaArchiveManifest protobuf serialized, not compressed.
+//   char manifest[manifest_size];
 //
 //   // The signature of the metadata (from the beginning of the payload up to
 //   // this location, not including the signature itself). This is a serialized
 //   // Signatures message.
-//   char medatada_signature_message[metadata_signature_size];
+//   char metadata_signature_message[metadata_signature_size];
 //
 //   // Data blobs for files, no specific format. The specific offset
 //   // and length of each data blob is recorded in the DeltaArchiveManifest.
@@ -39,9 +41,12 @@
 //     char data[];
 //   } blobs[];
 //
-//   // These two are not signed:
+//   // The signature of the entire payload, everything up to this location,
+//   // except that metadata_signature_message is skipped to simplify signing
+//   // process. These two are not signed:
 //   uint64 payload_signatures_message_size;
-//   char payload_signatures_message[];
+//   // This is a serialized Signatures message.
+//   char payload_signatures_message[payload_signatures_message_size];
 //
 // };
 
@@ -61,13 +66,13 @@
 //   dst_extents on the drive, zero padding to block size.
 // - MOVE: Copy the data in src_extents to dst_extents. Extents may overlap,
 //   so it may be desirable to read all src_extents data into memory before
-//   writing it out.
+//   writing it out. (deprecated)
 // - SOURCE_COPY: Copy the data in src_extents in the old partition to
 //   dst_extents in the new partition. There's no overlapping of data because
 //   the extents are in different partitions.
 // - BSDIFF: Read src_length bytes from src_extents into memory, perform
 //   bspatch with attached data, write new data to dst_extents, zero padding
-//   to block size.
+//   to block size. (deprecated)
 // - SOURCE_BSDIFF: Read the data in src_extents in the old partition, perform
 //   bspatch with the attached data and write the new data to dst_extents in the
 //   new partition.
@@ -82,7 +87,7 @@
 //   the new partition.
 //
 // The operations allowed in the payload (supported by the client) depend on the
-// major and minor version. See InstallOperation.Type bellow for details.
+// major and minor version. See InstallOperation.Type below for details.
 
 syntax = "proto2";
 
@@ -102,6 +107,11 @@
 // A sentinel value (kuint64max) as the start block denotes a sparse-hole
 // in a file whose block-length is specified by num_blocks.
 
+message Extent {
+  optional uint64 start_block = 1;
+  optional uint64 num_blocks = 2;
+}
+
 // Signatures: Updates may be signed by the OS vendor. The client verifies
 // an update's signature by hashing the entire download. The section of the
 // download that contains the signature is at the end of the file, so when
@@ -114,11 +124,6 @@
 // to verify the download. The public key is expected to be part of the
 // client.
 
-message Extent {
-  optional uint64 start_block = 1;
-  optional uint64 num_blocks = 2;
-}
-
 message Signatures {
   message Signature {
     optional uint32 version = 1;
@@ -155,8 +160,8 @@
   enum Type {
     REPLACE = 0;  // Replace destination extents w/ attached data
     REPLACE_BZ = 1;  // Replace destination extents w/ attached bzipped data
-    MOVE = 2;  // Move source extents to destination extents
-    BSDIFF = 3;  // The data is a bsdiff binary diff
+    MOVE = 2 [deprecated = true];  // Move source extents to destination extents
+    BSDIFF = 3 [deprecated = true];  // The data is a bsdiff binary diff
 
     // On minor version 2 or newer, these operations are supported:
     SOURCE_COPY = 4; // Copy from source to target partition
@@ -175,11 +180,14 @@
     PUFFDIFF = 9;  // The data is in puffdiff format.
   }
   required Type type = 1;
+
+  // Only minor version 6 or newer support 64 bits |data_offset| and
+  // |data_length|, older client will read them as uint32.
   // The offset into the delta file (after the protobuf)
   // where the data (if any) is stored
-  optional uint32 data_offset = 2;
+  optional uint64 data_offset = 2;
   // The length of the data in the delta file
-  optional uint32 data_length = 3;
+  optional uint64 data_length = 3;
 
   // Ordered list of extents that are read from (if any) and written to.
   repeated Extent src_extents = 4;
@@ -248,6 +256,52 @@
   // Whether a failure in the postinstall step for this partition should be
   // ignored.
   optional bool postinstall_optional = 9;
+
+  // On minor version 6 or newer, these fields are supported:
+
+  // The extent for data covered by verity hash tree.
+  optional Extent hash_tree_data_extent = 10;
+
+  // The extent to store verity hash tree.
+  optional Extent hash_tree_extent = 11;
+
+  // The hash algorithm used in verity hash tree.
+  optional string hash_tree_algorithm = 12;
+
+  // The salt used for verity hash tree.
+  optional bytes hash_tree_salt = 13;
+
+  // The extent for data covered by FEC.
+  optional Extent fec_data_extent = 14;
+
+  // The extent to store FEC.
+  optional Extent fec_extent = 15;
+
+  // The number of FEC roots.
+  optional uint32 fec_roots = 16 [default = 2];
+}
+
+message DynamicPartitionGroup {
+  // Name of the group.
+  required string name = 1;
+
+  // Maximum size of the group. The sum of sizes of all partitions in the group
+  // must not exceed the maximum size of the group.
+  optional uint64 size = 2;
+
+  // A list of partitions that belong to the group.
+  repeated string partition_names = 3;
+}
+
+// Metadata related to all dynamic partitions.
+message DynamicPartitionMetadata {
+  // All updatable groups present in |partitions| of this DeltaArchiveManifest.
+  // - If an updatable group is on the device but not in the manifest, it is
+  //   not updated. Hence, the group will not be resized, and partitions cannot
+  //   be added to or removed from the group.
+  // - If an updatable group is in the manifest but not on the device, the group
+  //   is added to the device.
+  repeated DynamicPartitionGroup groups = 1;
 }
 
 message DeltaArchiveManifest {
@@ -281,6 +335,7 @@
   optional ImageInfo new_image_info = 11;
 
   // The minor version, also referred as "delta version", of the payload.
+  // Minor version 0 is full payload, everything else is delta payload.
   optional uint32 minor_version = 12 [default = 0];
 
   // Only present in major version >= 2. List of partitions that will be
@@ -294,4 +349,7 @@
   // The maximum timestamp of the OS allowed to apply this payload.
   // Can be used to prevent downgrading the OS.
   optional int64 max_timestamp = 14;
+
+  // Metadata related to all dynamic partitions.
+  optional DynamicPartitionMetadata dynamic_partition_metadata = 15;
 }
diff --git a/update_status_utils.cc b/update_status_utils.cc
index ff039b8..cbc4f14 100644
--- a/update_status_utils.cc
+++ b/update_status_utils.cc
@@ -30,6 +30,8 @@
       return update_engine::kUpdateStatusCheckingForUpdate;
     case UpdateStatus::UPDATE_AVAILABLE:
       return update_engine::kUpdateStatusUpdateAvailable;
+    case UpdateStatus::NEED_PERMISSION_TO_UPDATE:
+      return update_engine::kUpdateStatusNeedPermissionToUpdate;
     case UpdateStatus::DOWNLOADING:
       return update_engine::kUpdateStatusDownloading;
     case UpdateStatus::VERIFYING:
@@ -50,8 +52,7 @@
   return nullptr;
 }
 
-bool StringToUpdateStatus(const std::string& s,
-                          UpdateStatus* status) {
+bool StringToUpdateStatus(const std::string& s, UpdateStatus* status) {
   if (s == update_engine::kUpdateStatusIdle) {
     *status = UpdateStatus::IDLE;
     return true;
@@ -61,6 +62,9 @@
   } else if (s == update_engine::kUpdateStatusUpdateAvailable) {
     *status = UpdateStatus::UPDATE_AVAILABLE;
     return true;
+  } else if (s == update_engine::kUpdateStatusNeedPermissionToUpdate) {
+    *status = UpdateStatus::NEED_PERMISSION_TO_UPDATE;
+    return true;
   } else if (s == update_engine::kUpdateStatusDownloading) {
     *status = UpdateStatus::DOWNLOADING;
     return true;
diff --git a/utils_android.cc b/utils_android.cc
deleted file mode 100644
index 393e65a..0000000
--- a/utils_android.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/utils_android.h"
-
-#include <fs_mgr.h>
-
-using std::string;
-
-namespace chromeos_update_engine {
-
-namespace {
-
-// Open the appropriate fstab file and fallback to /fstab.device if
-// that's what's being used.
-static struct fstab* OpenFSTab() {
-  struct fstab* fstab = fs_mgr_read_fstab_default();
-  if (fstab != nullptr)
-    return fstab;
-
-  fstab = fs_mgr_read_fstab("/fstab.device");
-  return fstab;
-}
-
-}  // namespace
-
-namespace utils {
-
-bool DeviceForMountPoint(const string& mount_point, base::FilePath* device) {
-  struct fstab* fstab;
-  struct fstab_rec* record;
-
-  fstab = OpenFSTab();
-  if (fstab == nullptr) {
-    LOG(ERROR) << "Error opening fstab file.";
-    return false;
-  }
-  record = fs_mgr_get_entry_for_mount_point(fstab, mount_point.c_str());
-  if (record == nullptr) {
-    LOG(ERROR) << "Error finding " << mount_point << " entry in fstab file.";
-    fs_mgr_free_fstab(fstab);
-    return false;
-  }
-
-  *device = base::FilePath(record->blk_device);
-  fs_mgr_free_fstab(fstab);
-  return true;
-}
-
-}  // namespace utils
-
-}  // namespace chromeos_update_engine
diff --git a/utils_android.h b/utils_android.h
deleted file mode 100644
index 18dd8ab..0000000
--- a/utils_android.h
+++ /dev/null
@@ -1,37 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_UTILS_ANDROID_H_
-#define UPDATE_ENGINE_UTILS_ANDROID_H_
-
-#include <string>
-
-#include <base/files/file_util.h>
-
-namespace chromeos_update_engine {
-
-namespace utils {
-
-// Find the block device that should be mounted in the |mount_point| path and
-// store it in |device|. Returns whether a device was found on the fstab.
-bool DeviceForMountPoint(const std::string& mount_point,
-                         base::FilePath* device);
-
-}  // namespace utils
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_UTILS_ANDROID_H_