Snap for 7478028 from 02fce0ac17fa8089c154bced05b12b42e737d0af to mainline-documentsui-release

Change-Id: I281fa2d437650f40fc4b40438b84bf429af8f155
diff --git a/.clang-format b/.clang-format
index 3b6a627..3044f59 100644
--- a/.clang-format
+++ b/.clang-format
@@ -37,3 +37,7 @@
 IncludeBlocks: Preserve
 PointerAlignment: Left
 TabWidth: 2
+
+# cpplint.py does smarter #include sorting than clang-format (the former ignores
+# case and changes '-' to '_').
+SortIncludes: false
diff --git a/Android.bp b/Android.bp
index 3287b7b..d74e78f 100644
--- a/Android.bp
+++ b/Android.bp
@@ -15,6 +15,23 @@
 //
 
 // AIDL interface between libupdate_engine and framework.jar
+package {
+    default_applicable_licenses: ["system_update_engine_license"],
+}
+
+// Added automatically by a large-scale-change
+// See: http://go/android-license-faq
+license {
+    name: "system_update_engine_license",
+    visibility: [":__subpackages__"],
+    license_kinds: [
+        "SPDX-license-identifier-Apache-2.0",
+    ],
+    license_text: [
+        "NOTICE",
+    ],
+}
+
 filegroup {
     name: "libupdate_engine_aidl",
     srcs: [
@@ -29,12 +46,7 @@
 
     cflags: [
         "-DBASE_VER=576279",
-        "-DUSE_BINDER=1",
-        "-DUSE_CHROME_NETWORK_PROXY=0",
-        "-DUSE_CHROME_KIOSK_APP=0",
         "-DUSE_HWID_OVERRIDE=0",
-        "-DUSE_MTD=0",
-        "-DUSE_OMAHA=0",
         "-D_FILE_OFFSET_BITS=64",
         "-D_POSIX_C_SOURCE=199309L",
         "-Wa,--noexecstack",
@@ -54,7 +66,7 @@
     ],
     include_dirs: ["system"],
     local_include_dirs: ["client_library/include"],
-    static_libs: ["libgtest_prod"],
+    header_libs: ["libgtest_prod_headers"],
     shared_libs: [
         "libbrillo-stream",
         "libbrillo",
@@ -85,6 +97,44 @@
     },
 }
 
+// libcow_operation_convert (type: library)
+// ========================================================
+cc_library_static {
+    name: "libpayload_extent_utils",
+    defaults: [
+        "ue_defaults",
+    ],
+    host_supported: true,
+    recovery_available: true,
+    srcs: [
+        "payload_generator/extent_utils.cc",
+    ],
+    static_libs: [
+        "update_metadata-protos",
+    ],
+}
+
+cc_library {
+    name: "libcow_operation_convert",
+    host_supported: true,
+    recovery_available: true,
+    defaults: [
+        "ue_defaults",
+        "update_metadata-protos_exports",
+    ],
+    srcs: [
+        "common/cow_operation_convert.cc",
+    ],
+    static_libs: [
+        "libsnapshot_cow",
+        "update_metadata-protos",
+        "libpayload_extent_ranges",
+        "libpayload_extent_utils",
+        "libbrotli",
+        "libz",
+    ],
+}
+
 // update_metadata-protos (type: static_library)
 // ========================================================
 // Protobufs.
@@ -123,15 +173,22 @@
         "libbz",
         "libbspatch",
         "libbrotli",
+        "libc++fs",
         "libfec_rs",
         "libpuffpatch",
         "libverity_tree",
+        "libsnapshot_cow",
+        "libbrotli",
+        "libz",
+        "libpayload_extent_ranges",
+        "libpayload_extent_utils",
+        "libcow_operation_convert",
     ],
     shared_libs: [
-        "libziparchive",
         "libbase",
         "libcrypto",
         "libfec",
+        "libziparchive",
     ],
 }
 
@@ -145,6 +202,7 @@
     recovery_available: true,
 
     srcs: [
+        "aosp/platform_constants_android.cc",
         "common/action_processor.cc",
         "common/boot_control_stub.cc",
         "common/clock.cc",
@@ -158,7 +216,6 @@
         "common/http_fetcher.cc",
         "common/hwid_override.cc",
         "common/multi_range_http_fetcher.cc",
-        "common/platform_constants_android.cc",
         "common/prefs.cc",
         "common/proxy_resolver.cc",
         "common/subprocess.cc",
@@ -167,8 +224,8 @@
         "payload_consumer/bzip_extent_writer.cc",
         "payload_consumer/cached_file_descriptor.cc",
         "payload_consumer/certificate_parser_android.cc",
+        "payload_consumer/cow_writer_file_descriptor.cc",
         "payload_consumer/delta_performer.cc",
-        "payload_consumer/download_action.cc",
         "payload_consumer/extent_reader.cc",
         "payload_consumer/extent_writer.cc",
         "payload_consumer/file_descriptor.cc",
@@ -180,10 +237,15 @@
         "payload_consumer/payload_constants.cc",
         "payload_consumer/payload_metadata.cc",
         "payload_consumer/payload_verifier.cc",
+        "payload_consumer/partition_writer.cc",
+        "payload_consumer/partition_writer_factory_android.cc",
+        "payload_consumer/vabc_partition_writer.cc",
+        "payload_consumer/snapshot_extent_writer.cc",
         "payload_consumer/postinstall_runner_action.cc",
         "payload_consumer/verity_writer_android.cc",
         "payload_consumer/xz_extent_writer.cc",
         "payload_consumer/fec_file_descriptor.cc",
+        "payload_consumer/partition_update_generator_android.cc",
     ],
 }
 
@@ -200,6 +262,8 @@
         "libgsi",
         "libpayload_consumer",
         "libsnapshot",
+        "libsnapshot_cow",
+        "libz",
         "update_metadata-protos",
     ],
     shared_libs: [
@@ -241,10 +305,10 @@
     recovery_available: true,
 
     srcs: [
-        "boot_control_android.cc",
-        "cleanup_previous_update_action.cc",
-        "dynamic_partition_control_android.cc",
-        "dynamic_partition_utils.cc",
+        "aosp/boot_control_android.cc",
+        "aosp/cleanup_previous_update_action.cc",
+        "aosp/dynamic_partition_control_android.cc",
+        "aosp/dynamic_partition_utils.cc",
     ],
 }
 
@@ -261,10 +325,15 @@
     ],
 
     static_libs: [
+        "libavb",
+        "libavb_user",
+        "gkiprops",
         "libpayload_consumer",
         "libupdate_engine_boot_control",
+        "PlatformProperties",
     ],
     shared_libs: [
+        "apex_aidl_interface-cpp",
         "libandroid_net",
         "libbase",
         "libbinder",
@@ -273,11 +342,15 @@
         "libbrillo-binder",
         "libcurl",
         "libcutils",
+        "libupdate_engine_stable-V1-cpp",
         "liblog",
         "libssl",
         "libstatslog",
         "libutils",
     ],
+    whole_static_libs: [
+        "com.android.sysprop.apex",
+    ],
 }
 
 cc_library_static {
@@ -298,17 +371,20 @@
 
     srcs: [
         ":libupdate_engine_aidl",
-        "binder_service_android.cc",
+        "common/system_state.cc",
+        "aosp/apex_handler_android.cc",
+        "aosp/binder_service_android.cc",
+        "aosp/binder_service_stable_android.cc",
+        "aosp/daemon_android.cc",
+        "aosp/daemon_state_android.cc",
+        "aosp/hardware_android.cc",
+        "aosp/logging_android.cc",
+        "aosp/network_selector_android.cc",
+        "aosp/update_attempter_android.cc",
         "certificate_checker.cc",
-        "daemon.cc",
-        "daemon_state_android.cc",
-        "hardware_android.cc",
+        "download_action.cc",
         "libcurl_http_fetcher.cc",
-        "logging_android.cc",
-        "metrics_reporter_android.cc",
         "metrics_utils.cc",
-        "network_selector_android.cc",
-        "update_attempter_android.cc",
         "update_boot_flags_action.cc",
         "update_status_utils.cc",
     ],
@@ -330,7 +406,7 @@
         "otacerts",
     ],
 
-    srcs: ["main.cc"],
+    srcs: ["main.cc", "aosp/metrics_reporter_android.cc"],
     init_rc: ["update_engine.rc"],
 }
 
@@ -353,15 +429,18 @@
     // TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
     // out of the DBus interface.
     include_dirs: ["external/cros/system_api/dbus"],
+    header_libs: ["libgtest_prod_headers"],
 
     srcs: [
-        "hardware_android.cc",
-        "logging_android.cc",
-        "metrics_reporter_stub.cc",
+        "aosp/hardware_android.cc",
+        "aosp/logging_android.cc",
+        "aosp/sideload_main.cc",
+        "aosp/update_attempter_android.cc",
+        "common/metrics_reporter_stub.cc",
+        "common/network_selector_stub.cc",
+        "common/system_state.cc",
+        "download_action.cc",
         "metrics_utils.cc",
-        "network_selector_stub.cc",
-        "sideload_main.cc",
-        "update_attempter_android.cc",
         "update_boot_flags_action.cc",
         "update_status_utils.cc",
     ],
@@ -382,9 +461,9 @@
         // We add the static versions of the shared libraries that are not installed to
         // recovery image due to size concerns. Need to include all the static library
         // dependencies of these static libraries.
+        "gkiprops",
         "libevent",
         "libmodpb64",
-        "libgtest_prod",
         "libprotobuf-cpp-lite",
         "libbrillo-stream",
         "libbrillo",
@@ -406,54 +485,6 @@
     ],
 }
 
-// libupdate_engine_client (type: shared_library)
-// ========================================================
-cc_library_shared {
-    name: "libupdate_engine_client",
-
-    cflags: [
-        "-Wall",
-        "-Werror",
-        "-Wno-unused-parameter",
-        "-DUSE_BINDER=1",
-    ],
-    export_include_dirs: ["client_library/include"],
-    include_dirs: [
-        // TODO(deymo): Remove "external/cros/system_api/dbus" when dbus is not used.
-        "external/cros/system_api/dbus",
-        "system",
-    ],
-
-    aidl: {
-        local_include_dirs: ["binder_bindings"],
-    },
-
-    shared_libs: [
-        "libchrome",
-        "libbrillo",
-        "libbinder",
-        "libbrillo-binder",
-        "libutils",
-    ],
-
-    srcs: [
-        ":libupdate_engine_client_aidl",
-        "client_library/client.cc",
-        "client_library/client_binder.cc",
-        "parcelable_update_engine_status.cc",
-        "update_status_utils.cc",
-    ],
-}
-
-filegroup {
-    name: "libupdate_engine_client_aidl",
-    srcs: [
-        "binder_bindings/android/brillo/IUpdateEngine.aidl",
-        "binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl",
-    ],
-    path: "binder_bindings",
-}
-
 // update_engine_client (type: executable)
 // ========================================================
 // update_engine console client.
@@ -478,8 +509,8 @@
 
     srcs: [
         ":libupdate_engine_aidl",
+        "aosp/update_engine_client_android.cc",
         "common/error_code_utils.cc",
-        "update_engine_client_android.cc",
         "update_status_utils.cc",
     ],
 }
@@ -509,6 +540,8 @@
         "libpuffdiff",
         "libverity_tree",
         "update_metadata-protos",
+        "libpayload_extent_utils",
+        "libcow_size_estimator",
     ],
     shared_libs: [
         "libbase",
@@ -517,6 +550,40 @@
 }
 
 cc_library_static {
+    name: "libpayload_extent_ranges",
+    defaults: [
+        "ue_defaults",
+    ],
+    host_supported: true,
+    recovery_available: true,
+    srcs: [
+        "payload_generator/extent_ranges.cc",
+    ],
+    static_libs: [
+        "update_metadata-protos",
+    ],
+}
+
+cc_library_static {
+    name: "libcow_size_estimator",
+    defaults: [
+        "ue_defaults",
+        "update_metadata-protos_exports"
+    ],
+    host_supported: true,
+    recovery_available: true,
+    srcs: [
+        "payload_generator/cow_size_estimator.cc",
+    ],
+    static_libs: [
+        "update_metadata-protos",
+        "libbase",
+        "libsnapshot_cow",
+        "libcow_operation_convert",
+    ],
+}
+
+cc_library_static {
     name: "libpayload_generator",
     defaults: [
         "ue_defaults",
@@ -525,32 +592,29 @@
     host_supported: true,
 
     srcs: [
+        "common/system_state.cc",
+        "download_action.cc",
         "payload_generator/ab_generator.cc",
         "payload_generator/annotated_operation.cc",
         "payload_generator/blob_file_writer.cc",
         "payload_generator/block_mapping.cc",
         "payload_generator/boot_img_filesystem.cc",
         "payload_generator/bzip.cc",
-        "payload_generator/cycle_breaker.cc",
         "payload_generator/deflate_utils.cc",
         "payload_generator/delta_diff_generator.cc",
         "payload_generator/delta_diff_utils.cc",
         "payload_generator/ext2_filesystem.cc",
         "payload_generator/extent_ranges.cc",
-        "payload_generator/extent_utils.cc",
         "payload_generator/full_update_generator.cc",
-        "payload_generator/graph_types.cc",
-        "payload_generator/graph_utils.cc",
-        "payload_generator/inplace_generator.cc",
         "payload_generator/mapfile_filesystem.cc",
+        "payload_generator/merge_sequence_generator.cc",
         "payload_generator/payload_file.cc",
         "payload_generator/payload_generation_config_android.cc",
         "payload_generator/payload_generation_config.cc",
+        "payload_generator/payload_properties.cc",
         "payload_generator/payload_signer.cc",
         "payload_generator/raw_filesystem.cc",
         "payload_generator/squashfs_filesystem.cc",
-        "payload_generator/tarjan.cc",
-        "payload_generator/topological_sort.cc",
         "payload_generator/xz_android.cc",
     ],
 }
@@ -701,16 +765,22 @@
     test_suites: ["device-tests"],
 
     srcs: [
+        "aosp/apex_handler_android_unittest.cc",
+        "aosp/cleanup_previous_update_action_unittest.cc",
+        "aosp/dynamic_partition_control_android_unittest.cc",
+        "aosp/update_attempter_android_unittest.cc",
         "certificate_checker_unittest.cc",
         "common/action_pipe_unittest.cc",
         "common/action_processor_unittest.cc",
         "common/action_unittest.cc",
+        "common/cow_operation_convert_unittest.cc",
         "common/cpu_limiter_unittest.cc",
         "common/fake_prefs.cc",
         "common/file_fetcher_unittest.cc",
         "common/hash_calculator_unittest.cc",
         "common/http_fetcher_unittest.cc",
         "common/hwid_override_unittest.cc",
+        "common/metrics_reporter_stub.cc",
         "common/mock_http_fetcher.cc",
         "common/prefs_unittest.cc",
         "common/proxy_resolver_unittest.cc",
@@ -718,18 +788,24 @@
         "common/terminator_unittest.cc",
         "common/test_utils.cc",
         "common/utils_unittest.cc",
-        "dynamic_partition_control_android_unittest.cc",
+        "download_action_android_unittest.cc",
+        "libcurl_http_fetcher_unittest.cc",
         "payload_consumer/bzip_extent_writer_unittest.cc",
         "payload_consumer/cached_file_descriptor_unittest.cc",
+        "payload_consumer/cow_writer_file_descriptor_unittest.cc",
         "payload_consumer/certificate_parser_android_unittest.cc",
         "payload_consumer/delta_performer_integration_test.cc",
         "payload_consumer/delta_performer_unittest.cc",
+        "payload_consumer/partition_writer_unittest.cc",
         "payload_consumer/extent_reader_unittest.cc",
         "payload_consumer/extent_writer_unittest.cc",
+        "payload_consumer/snapshot_extent_writer_unittest.cc",
         "payload_consumer/fake_file_descriptor.cc",
         "payload_consumer/file_descriptor_utils_unittest.cc",
         "payload_consumer/file_writer_unittest.cc",
         "payload_consumer/filesystem_verifier_action_unittest.cc",
+        "payload_consumer/install_plan_unittest.cc",
+        "payload_consumer/partition_update_generator_android_unittest.cc",
         "payload_consumer/postinstall_runner_action_unittest.cc",
         "payload_consumer/verity_writer_android_unittest.cc",
         "payload_consumer/xz_extent_writer_unittest.cc",
@@ -737,7 +813,6 @@
         "payload_generator/blob_file_writer_unittest.cc",
         "payload_generator/block_mapping_unittest.cc",
         "payload_generator/boot_img_filesystem_unittest.cc",
-        "payload_generator/cycle_breaker_unittest.cc",
         "payload_generator/deflate_utils_unittest.cc",
         "payload_generator/delta_diff_utils_unittest.cc",
         "payload_generator/ext2_filesystem_unittest.cc",
@@ -745,30 +820,28 @@
         "payload_generator/extent_utils_unittest.cc",
         "payload_generator/fake_filesystem.cc",
         "payload_generator/full_update_generator_unittest.cc",
-        "payload_generator/graph_utils_unittest.cc",
-        "payload_generator/inplace_generator_unittest.cc",
         "payload_generator/mapfile_filesystem_unittest.cc",
+        "payload_generator/merge_sequence_generator_unittest.cc",
         "payload_generator/payload_file_unittest.cc",
         "payload_generator/payload_generation_config_android_unittest.cc",
         "payload_generator/payload_generation_config_unittest.cc",
+        "payload_generator/payload_properties_unittest.cc",
         "payload_generator/payload_signer_unittest.cc",
         "payload_generator/squashfs_filesystem_unittest.cc",
-        "payload_generator/tarjan_unittest.cc",
-        "payload_generator/topological_sort_unittest.cc",
         "payload_generator/zip_unittest.cc",
         "testrunner.cc",
-        "update_attempter_android_unittest.cc",
+        "update_status_utils_unittest.cc",
     ],
 }
 
 // Brillo update payload generation script
 // ========================================================
-cc_prebuilt_binary {
+sh_binary {
     name: "brillo_update_payload",
     device_supported: false,
     host_supported: true,
 
-    srcs: ["scripts/brillo_update_payload"],
+    src: "scripts/brillo_update_payload",
     required: [
         "delta_generator",
         "shflags",
@@ -782,11 +855,49 @@
     },
 }
 
-// AIDL interface between libupdate_engine and the Things jar.
-filegroup {
-    name: "things_update_engine_aidl",
+// update_engine header library
+cc_library_headers {
+    name: "libupdate_engine_headers",
+
+    // This header library is available to core and product modules.
+    product_available: true,
+
+    export_include_dirs: ["."],
+    apex_available: [
+        "com.android.gki.*",
+        "//apex_available:platform",
+    ],
+    host_supported: true,
+    recovery_available: true,
+    ramdisk_available: true,
+
+    target: {
+        darwin: {
+            enabled: false,
+        },
+    }
+}
+
+cc_binary_host {
+    name: "cow_converter",
+    defaults: [
+        "ue_defaults",
+        "libpayload_consumer_exports",
+    ],
     srcs: [
-        "binder_bindings/android/brillo/IUpdateEngine.aidl",
-        "binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl",
+        "aosp/cow_converter.cc",
+    ],
+    static_libs: [
+        "liblog",
+        "libbrotli",
+        "libbase",
+        "libcow_operation_convert",
+        "libcow_size_estimator",
+        "libpayload_consumer",
+        "libpayload_extent_ranges",
+        "libpayload_extent_utils",
+        "libsnapshot_cow",
+        "libz",
+        "update_metadata-protos",
     ],
 }
diff --git a/BUILD.gn b/BUILD.gn
new file mode 100644
index 0000000..e60d33b
--- /dev/null
+++ b/BUILD.gn
@@ -0,0 +1,609 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Stop linter from complaining XXX_unittest.cc naming.
+# gnlint: disable=GnLintSourceFileNames
+
+import("//common-mk/generate-dbus-adaptors.gni")
+import("//common-mk/generate-dbus-proxies.gni")
+import("//common-mk/openssl_pem.gni")
+import("//common-mk/pkg_config.gni")
+import("//common-mk/proto_library.gni")
+import("//update_engine/tar_bunzip2.gni")
+
+group("all") {
+  deps = [
+    ":delta_generator",
+    ":libpayload_consumer",
+    ":libpayload_generator",
+    ":libupdate_engine",
+    ":libupdate_engine_client",
+    ":update_engine",
+    ":update_engine-dbus-adaptor",
+    ":update_engine-dbus-kiosk-app-client",
+    ":update_engine_client",
+    ":update_metadata-protos",
+  ]
+
+  if (use.test) {
+    deps += [
+      ":test_http_server",
+      ":test_subprocess",
+      ":update_engine-test_images",
+      ":update_engine-testkeys",
+      ":update_engine-testkeys-ec",
+      ":update_engine_test_libs",
+      ":update_engine_unittests",
+    ]
+  }
+
+  if (use.fuzzer) {
+    deps += [
+      ":update_engine_delta_performer_fuzzer",
+      ":update_engine_omaha_request_action_fuzzer",
+    ]
+  }
+}
+
+pkg_config("target_defaults") {
+  cflags_cc = [
+    "-fno-strict-aliasing",
+    "-Wnon-virtual-dtor",
+  ]
+  cflags = [ "-ffunction-sections" ]
+  ldflags = [ "-Wl,--gc-sections" ]
+  defines = [
+    "__CHROMEOS__",
+    "_FILE_OFFSET_BITS=64",
+    "_POSIX_C_SOURCE=199309L",
+    "USE_CFM=${use.cfm}",
+    "USE_DBUS=${use.dbus}",
+    "USE_FEC=0",
+    "USE_HWID_OVERRIDE=${use.hwid_override}",
+  ]
+  include_dirs = [
+    # We need this include dir because we include all the local code as
+    # "update_engine/...".
+    "${platform2_root}",
+    "${platform2_root}/update_engine/client_library/include",
+  ]
+
+  # NOSORT
+  pkg_deps = [
+    "libbrillo",
+    "libchrome",
+
+    # system_api depends on protobuf (or protobuf-lite). It must appear
+    # before protobuf here or the linker flags won't be in the right
+    # order.
+    "system_api",
+  ]
+  if (use.fuzzer) {
+    pkg_deps += [ "protobuf" ]
+  } else {
+    pkg_deps += [ "protobuf-lite" ]
+  }
+}
+
+# Protobufs.
+proto_library("update_metadata-protos") {
+  proto_in_dir = "."
+  proto_out_dir = "include/update_engine"
+  sources = [ "update_metadata.proto" ]
+}
+
+# Chrome D-Bus bindings.
+generate_dbus_adaptors("update_engine-dbus-adaptor") {
+  dbus_adaptors_out_dir = "include/dbus_bindings"
+  sources = [ "dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml" ]
+}
+
+generate_dbus_proxies("update_engine-dbus-kiosk-app-client") {
+  mock_output_file = "include/kiosk-app/dbus-proxy-mocks.h"
+  proxy_output_file = "include/kiosk-app/dbus-proxies.h"
+  sources = [ "dbus_bindings/org.chromium.KioskAppService.dbus-xml" ]
+}
+
+# The payload application component and common dependencies.
+static_library("libpayload_consumer") {
+  sources = [
+    "common/action_processor.cc",
+    "common/boot_control_stub.cc",
+    "common/clock.cc",
+    "common/constants.cc",
+    "common/cpu_limiter.cc",
+    "common/dynamic_partition_control_stub.cc",
+    "common/error_code_utils.cc",
+    "common/hash_calculator.cc",
+    "common/http_common.cc",
+    "common/http_fetcher.cc",
+    "common/hwid_override.cc",
+    "common/multi_range_http_fetcher.cc",
+    "common/prefs.cc",
+    "common/proxy_resolver.cc",
+    "common/subprocess.cc",
+    "common/terminator.cc",
+    "common/utils.cc",
+    "cros/platform_constants_chromeos.cc",
+    "payload_consumer/bzip_extent_writer.cc",
+    "payload_consumer/cached_file_descriptor.cc",
+    "payload_consumer/certificate_parser_stub.cc",
+    "payload_consumer/delta_performer.cc",
+    "payload_consumer/extent_reader.cc",
+    "payload_consumer/extent_writer.cc",
+    "payload_consumer/file_descriptor.cc",
+    "payload_consumer/file_descriptor_utils.cc",
+    "payload_consumer/file_writer.cc",
+    "payload_consumer/filesystem_verifier_action.cc",
+    "payload_consumer/install_plan.cc",
+    "payload_consumer/mount_history.cc",
+    "payload_consumer/partition_update_generator_stub.cc",
+    "payload_consumer/partition_writer_factory_chromeos.cc",
+    "payload_consumer/partition_writer.cc",
+    "payload_consumer/payload_constants.cc",
+    "payload_consumer/payload_metadata.cc",
+    "payload_consumer/payload_verifier.cc",
+    "payload_consumer/postinstall_runner_action.cc",
+    "payload_consumer/verity_writer_stub.cc",
+    "payload_consumer/xz_extent_writer.cc",
+  ]
+  configs += [ ":target_defaults" ]
+  libs = [
+    "bz2",
+    "rt",
+  ]
+
+  # TODO(crbug.com/1082873): Remove after fixing usage of deprecated
+  # declarations.
+  cflags_cc = [ "-Wno-error=deprecated-declarations" ]
+
+  # TODO(deymo): Remove unused dependencies once we stop including files
+  # from the root directory.
+  all_dependent_pkg_deps = [
+    "libbspatch",
+    "libcrypto",
+    "libpuffpatch",
+    "xz-embedded",
+  ]
+  public_deps = [ ":update_metadata-protos" ]
+}
+
+# The main daemon static_library with all the code used to check for updates
+# with Omaha and expose a DBus daemon.
+static_library("libupdate_engine") {
+  sources = [
+    "certificate_checker.cc",
+    "common/connection_utils.cc",
+    "common/system_state.cc",
+    "cros/boot_control_chromeos.cc",
+    "cros/common_service.cc",
+    "cros/connection_manager.cc",
+    "cros/daemon_chromeos.cc",
+    "cros/dbus_connection.cc",
+    "cros/dbus_service.cc",
+    "cros/hardware_chromeos.cc",
+    "cros/image_properties_chromeos.cc",
+    "cros/logging.cc",
+    "cros/metrics_reporter_omaha.cc",
+    "cros/omaha_request_action.cc",
+    "cros/omaha_request_builder_xml.cc",
+    "cros/omaha_request_params.cc",
+    "cros/omaha_response_handler_action.cc",
+    "cros/omaha_utils.cc",
+    "cros/p2p_manager.cc",
+    "cros/payload_state.cc",
+    "cros/power_manager_chromeos.cc",
+    "cros/real_system_state.cc",
+    "cros/requisition_util.cc",
+    "cros/shill_proxy.cc",
+    "cros/update_attempter.cc",
+    "cros/download_action_chromeos.cc",
+    "libcurl_http_fetcher.cc",
+    "metrics_utils.cc",
+    "update_boot_flags_action.cc",
+    "update_manager/boxed_value.cc",
+    "update_manager/chromeos_policy.cc",
+    "update_manager/default_policy.cc",
+    "update_manager/enough_slots_ab_updates_policy_impl.cc",
+    "update_manager/enterprise_device_policy_impl.cc",
+    "update_manager/enterprise_rollback_policy_impl.cc",
+    "update_manager/evaluation_context.cc",
+    "update_manager/interactive_update_policy_impl.cc",
+    "update_manager/minimum_version_policy_impl.cc",
+    "update_manager/next_update_check_policy_impl.cc",
+    "update_manager/official_build_check_policy_impl.cc",
+    "update_manager/out_of_box_experience_policy_impl.cc",
+    "update_manager/policy.cc",
+    "update_manager/policy_test_utils.cc",
+    "update_manager/real_config_provider.cc",
+    "update_manager/real_device_policy_provider.cc",
+    "update_manager/real_random_provider.cc",
+    "update_manager/real_shill_provider.cc",
+    "update_manager/real_system_provider.cc",
+    "update_manager/real_time_provider.cc",
+    "update_manager/real_updater_provider.cc",
+    "update_manager/staging_utils.cc",
+    "update_manager/state_factory.cc",
+    "update_manager/update_manager.cc",
+    "update_manager/update_time_restrictions_monitor.cc",
+    "update_manager/update_time_restrictions_policy_impl.cc",
+    "update_manager/weekly_time.cc",
+    "update_status_utils.cc",
+  ]
+  configs += [ ":target_defaults" ]
+  libs = [
+    "bz2",
+    "policy",
+    "rootdev",
+    "rt",
+  ]
+  all_dependent_pkg_deps = [
+    "dbus-1",
+    "expat",
+    "libcurl",
+    "libdebugd-client",
+    "libmetrics",
+    "libpower_manager-client",
+    "libsession_manager-client",
+    "libshill-client",
+    "libssl",
+    "libupdate_engine-client",
+    "vboot_host",
+  ]
+  deps = [
+    ":libpayload_consumer",
+    ":update_engine-dbus-adaptor",
+    ":update_engine-dbus-kiosk-app-client",
+    ":update_metadata-protos",
+  ]
+
+  if (use.dlc) {
+    all_dependent_pkg_deps += [ "libdlcservice-client" ]
+  }
+
+  if (use.chrome_network_proxy) {
+    sources += [ "cros/chrome_browser_proxy_resolver.cc" ]
+  }
+
+  if (use.dlc) {
+    sources += [
+      "cros/dlcservice_chromeos.cc",
+      "cros/excluder_chromeos.cc",
+    ]
+  } else {
+    sources += [
+      "common/dlcservice_stub.cc",
+      "common/excluder_stub.cc",
+    ]
+  }
+}
+
+# update_engine daemon.
+executable("update_engine") {
+  sources = [ "main.cc" ]
+  configs += [ ":target_defaults" ]
+  deps = [ ":libupdate_engine" ]
+}
+
+# update_engine client library.
+static_library("libupdate_engine_client") {
+  sources = [
+    "client_library/client_dbus.cc",
+    "update_status_utils.cc",
+  ]
+  include_dirs = [ "client_library/include" ]
+  configs += [ ":target_defaults" ]
+  pkg_deps = [
+    "dbus-1",
+    "libupdate_engine-client",
+  ]
+}
+
+# update_engine console client.
+executable("update_engine_client") {
+  sources = [
+    "common/error_code_utils.cc",
+    "cros/omaha_utils.cc",
+    "cros/update_engine_client.cc",
+  ]
+  configs += [ ":target_defaults" ]
+  deps = [ ":libupdate_engine_client" ]
+}
+
+# server-side code. This is used for delta_generator and unittests but not
+# for any client code.
+static_library("libpayload_generator") {
+  sources = [
+    "common/file_fetcher.cc",
+    "common/system_state.cc",
+    "cros/real_system_state.cc",
+    "download_action.cc",
+    "payload_generator/ab_generator.cc",
+    "payload_generator/annotated_operation.cc",
+    "payload_generator/blob_file_writer.cc",
+    "payload_generator/block_mapping.cc",
+    "payload_generator/boot_img_filesystem_stub.cc",
+    "payload_generator/bzip.cc",
+    "payload_generator/cow_size_estimator_stub.cc",
+    "payload_generator/deflate_utils.cc",
+    "payload_generator/delta_diff_generator.cc",
+    "payload_generator/delta_diff_utils.cc",
+    "payload_generator/ext2_filesystem.cc",
+    "payload_generator/extent_ranges.cc",
+    "payload_generator/extent_utils.cc",
+    "payload_generator/full_update_generator.cc",
+    "payload_generator/mapfile_filesystem.cc",
+    "payload_generator/merge_sequence_generator.cc",
+    "payload_generator/payload_file.cc",
+    "payload_generator/payload_generation_config.cc",
+    "payload_generator/payload_generation_config_chromeos.cc",
+    "payload_generator/payload_properties.cc",
+    "payload_generator/payload_signer.cc",
+    "payload_generator/raw_filesystem.cc",
+    "payload_generator/squashfs_filesystem.cc",
+    "payload_generator/xz_chromeos.cc",
+  ]
+  configs += [ ":target_defaults" ]
+  all_dependent_pkg_deps = [
+    "ext2fs",
+    "libbsdiff",
+    "liblzma",
+    "libpuffdiff",
+  ]
+  deps = [
+    ":libpayload_consumer",
+    ":update_metadata-protos",
+  ]
+
+  # TODO(crbug.com/1082873): Remove after fixing usage of deprecated
+  # declarations.
+  cflags_cc = [ "-Wno-error=deprecated-declarations" ]
+}
+
+# server-side delta generator.
+executable("delta_generator") {
+  sources = [ "payload_generator/generate_delta_main.cc" ]
+  configs += [ ":target_defaults" ]
+  configs -= [ "//common-mk:pie" ]
+  deps = [
+    ":libpayload_consumer",
+    ":libpayload_generator",
+  ]
+}
+
+if (use.test || use.fuzzer) {
+  static_library("update_engine_test_libs") {
+    sources = [
+      "common/fake_prefs.cc",
+      "common/mock_http_fetcher.cc",
+      "common/test_utils.cc",
+      "cros/fake_shill_proxy.cc",
+      "cros/fake_system_state.cc",
+      "payload_consumer/fake_file_descriptor.cc",
+      "payload_generator/fake_filesystem.cc",
+      "update_manager/umtest_utils.cc",
+    ]
+
+    # TODO(crbug.com/887845): After library odering issue is fixed,
+    # //common-mk:test can be moved in all_dependent_configs and
+    # //common-mk:test in each test configs can be removed.
+    configs += [
+      "//common-mk:test",
+      ":target_defaults",
+    ]
+    pkg_deps = [ "libshill-client-test" ]
+    deps = [ ":libupdate_engine" ]
+  }
+}
+
+if (use.test) {
+  # Public keys used for unit testing.
+  genopenssl_key("update_engine-testkeys") {
+    openssl_pem_in_dir = "."
+    openssl_pem_out_dir = "include/update_engine"
+    sources = [
+      "unittest_key.pem",
+      "unittest_key2.pem",
+      "unittest_key_RSA4096.pem",
+    ]
+  }
+
+  genopenssl_key("update_engine-testkeys-ec") {
+    openssl_pem_in_dir = "."
+    openssl_pem_out_dir = "include/update_engine"
+    openssl_pem_algorithm = "ec"
+    sources = [ "unittest_key_EC.pem" ]
+  }
+
+  # Unpacks sample images used for testing.
+  tar_bunzip2("update_engine-test_images") {
+    image_out_dir = "."
+    sources = [ "sample_images/sample_images.tar.bz2" ]
+  }
+
+  # Test HTTP Server.
+  executable("test_http_server") {
+    sources = [
+      "common/http_common.cc",
+      "test_http_server.cc",
+    ]
+
+    # //common-mk:test should be on the top.
+    # TODO(crbug.com/887845): Remove this after library odering issue is fixed.
+    configs += [
+      "//common-mk:test",
+      ":target_defaults",
+    ]
+  }
+
+  # Test subprocess helper.
+  executable("test_subprocess") {
+    sources = [ "test_subprocess.cc" ]
+
+    # //common-mk:test should be on the top.
+    # TODO(crbug.com/887845): Remove this after library odering issue is fixed.
+    configs += [
+      "//common-mk:test",
+      ":target_defaults",
+    ]
+  }
+
+  # Main unittest file.
+  executable("update_engine_unittests") {
+    sources = [
+      "certificate_checker_unittest.cc",
+      "common/action_pipe_unittest.cc",
+      "common/action_processor_unittest.cc",
+      "common/action_unittest.cc",
+      "common/cpu_limiter_unittest.cc",
+      "common/hash_calculator_unittest.cc",
+      "common/http_fetcher_unittest.cc",
+      "common/hwid_override_unittest.cc",
+      "common/prefs_unittest.cc",
+      "common/proxy_resolver_unittest.cc",
+      "common/subprocess_unittest.cc",
+      "common/terminator_unittest.cc",
+      "common/utils_unittest.cc",
+      "cros/boot_control_chromeos_unittest.cc",
+      "cros/common_service_unittest.cc",
+      "cros/connection_manager_unittest.cc",
+      "cros/hardware_chromeos_unittest.cc",
+      "cros/image_properties_chromeos_unittest.cc",
+      "cros/metrics_reporter_omaha_unittest.cc",
+      "cros/omaha_request_action_unittest.cc",
+      "cros/omaha_request_builder_xml_unittest.cc",
+      "cros/omaha_request_params_unittest.cc",
+      "cros/omaha_response_handler_action_unittest.cc",
+      "cros/omaha_utils_unittest.cc",
+      "cros/p2p_manager_unittest.cc",
+      "cros/payload_state_unittest.cc",
+      "cros/requisition_util_unittest.cc",
+      "cros/update_attempter_unittest.cc",
+      "cros/download_action_chromeos_unittest.cc",
+      "libcurl_http_fetcher_unittest.cc",
+      "metrics_utils_unittest.cc",
+      "payload_consumer/bzip_extent_writer_unittest.cc",
+      "payload_consumer/cached_file_descriptor_unittest.cc",
+      "payload_consumer/delta_performer_integration_test.cc",
+      "payload_consumer/delta_performer_unittest.cc",
+      "payload_consumer/extent_reader_unittest.cc",
+      "payload_consumer/extent_writer_unittest.cc",
+      "payload_consumer/file_descriptor_utils_unittest.cc",
+      "payload_consumer/file_writer_unittest.cc",
+      "payload_consumer/filesystem_verifier_action_unittest.cc",
+      "payload_consumer/install_plan_unittest.cc",
+      "payload_consumer/postinstall_runner_action_unittest.cc",
+      "payload_consumer/xz_extent_writer_unittest.cc",
+      "payload_generator/ab_generator_unittest.cc",
+      "payload_generator/blob_file_writer_unittest.cc",
+      "payload_generator/block_mapping_unittest.cc",
+      "payload_generator/deflate_utils_unittest.cc",
+      "payload_generator/delta_diff_utils_unittest.cc",
+      "payload_generator/ext2_filesystem_unittest.cc",
+      "payload_generator/extent_ranges_unittest.cc",
+      "payload_generator/extent_utils_unittest.cc",
+      "payload_generator/full_update_generator_unittest.cc",
+      "payload_generator/mapfile_filesystem_unittest.cc",
+      "payload_generator/merge_sequence_generator_unittest.cc",
+      "payload_generator/payload_file_unittest.cc",
+      "payload_generator/payload_generation_config_unittest.cc",
+      "payload_generator/payload_properties_unittest.cc",
+      "payload_generator/payload_signer_unittest.cc",
+      "payload_generator/squashfs_filesystem_unittest.cc",
+      "payload_generator/zip_unittest.cc",
+      "testrunner.cc",
+      "update_boot_flags_action_unittest.cc",
+      "update_manager/boxed_value_unittest.cc",
+      "update_manager/chromeos_policy_unittest.cc",
+      "update_manager/enterprise_device_policy_impl_unittest.cc",
+      "update_manager/enterprise_rollback_policy_impl_unittest.cc",
+      "update_manager/evaluation_context_unittest.cc",
+      "update_manager/generic_variables_unittest.cc",
+      "update_manager/minimum_version_policy_impl_unittest.cc",
+      "update_manager/prng_unittest.cc",
+      "update_manager/real_device_policy_provider_unittest.cc",
+      "update_manager/real_random_provider_unittest.cc",
+      "update_manager/real_shill_provider_unittest.cc",
+      "update_manager/real_system_provider_unittest.cc",
+      "update_manager/real_time_provider_unittest.cc",
+      "update_manager/real_updater_provider_unittest.cc",
+      "update_manager/staging_utils_unittest.cc",
+      "update_manager/update_manager_unittest.cc",
+      "update_manager/update_time_restrictions_monitor_unittest.cc",
+      "update_manager/update_time_restrictions_policy_impl_unittest.cc",
+      "update_manager/variable_unittest.cc",
+      "update_manager/weekly_time_unittest.cc",
+      "update_status_utils_unittest.cc",
+    ]
+    if (use.dlc) {
+      sources += [ "cros/excluder_chromeos_unittest.cc" ]
+    }
+
+    # //common-mk:test should be on the top.
+    # TODO(crbug.com/887845): Remove this after library odering issue is fixed.
+    configs += [
+      "//common-mk:test",
+      ":target_defaults",
+    ]
+    pkg_deps = [
+      "libbrillo-test",
+      "libchrome-test",
+      "libdebugd-client-test",
+      "libpower_manager-client-test",
+      "libsession_manager-client-test",
+      "libshill-client-test",
+    ]
+    deps = [
+      ":libpayload_generator",
+      ":libupdate_engine",
+      ":update_engine_test_libs",
+    ]
+  }
+}
+
+# Fuzzer target.
+if (use.fuzzer) {
+  executable("update_engine_delta_performer_fuzzer") {
+    sources = [ "payload_consumer/delta_performer_fuzzer.cc" ]
+    configs += [
+      "//common-mk/common_fuzzer",
+      ":target_defaults",
+    ]
+    pkg_deps = [
+      "libbrillo-test",
+      "libchrome-test",
+    ]
+    deps = [
+      ":libupdate_engine",
+      ":update_engine_test_libs",
+    ]
+  }
+  executable("update_engine_omaha_request_action_fuzzer") {
+    sources = [ "cros/omaha_request_action_fuzzer.cc" ]
+    configs += [
+      "//common-mk/common_fuzzer",
+      ":target_defaults",
+    ]
+    pkg_deps = [
+      "libbrillo-test",
+      "libchrome-test",
+    ]
+    deps = [
+      ":libupdate_engine",
+      ":update_engine_test_libs",
+    ]
+  }
+}
diff --git a/COMMIT-QUEUE.ini b/COMMIT-QUEUE.ini
deleted file mode 100644
index ed99b9f..0000000
--- a/COMMIT-QUEUE.ini
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Per-project Commit Queue settings.
-# Documentation: http://goo.gl/5J7oND
-
-[GENERAL]
-
-# Moblab testing is needed because of the udpate_payloads ebuild.
-pre-cq-configs: default guado_moblab-no-vmtest-pre-cq
diff --git a/Doxyfile b/Doxyfile
new file mode 100644
index 0000000..db31f86
--- /dev/null
+++ b/Doxyfile
@@ -0,0 +1,9 @@
+CLANG_DATABASE_PATH=../../
+HAVE_DOT=YES
+CALL_GRAPH=YES
+CALLER_GRAPH=YES
+GENERATE_HTML=YES
+GENERATE_LATEX=NO
+INPUT=.
+RECURSIVE=YES
+
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..d97975c
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,3 @@
+third_party {
+  license_type: NOTICE
+}
diff --git a/OWNERS b/OWNERS
index 07ee38e..938752f 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,10 +1,14 @@
 set noparent
 
-# Current general maintainers:
+# Android et. al. maintainers:
 deymo@google.com
+elsk@google.com
 senj@google.com
+xunchang@google.com
 
 # Chromium OS maintainers:
-benchan@google.com
 ahassani@google.com
-xiaochu@google.com
+kimjae@google.com
+
+# Chromium OS only:
+# COMPONENT: Internals>Installer
diff --git a/PRESUBMIT.cfg b/PRESUBMIT.cfg
index f2c7831..42156b3 100644
--- a/PRESUBMIT.cfg
+++ b/PRESUBMIT.cfg
@@ -1,6 +1,6 @@
 [Hook Scripts]
-hook0=../../../../chromite/bin/cros lint ${PRESUBMIT_FILES}
-hook1=../../../platform2/common-mk/gyplint.py ${PRESUBMIT_FILES}
+cros lint = cros lint ${PRESUBMIT_FILES}
+gnlint = ../../../platform2/common-mk/gnlint.py ${PRESUBMIT_FILES}
 
 [Hook Overrides]
 clang_format_check: true
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index 40ddcd1..85fd5ec 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -2,3 +2,6 @@
 clang_format = true
 cpplint = true
 pylint = true
+
+[Hook Scripts]
+protobuflint = ./protobuflint.py ${PREUPLOAD_COMMIT} ${PREUPLOAD_FILES}
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..71f271b
--- /dev/null
+++ b/README.md
@@ -0,0 +1,642 @@
+# Chrome OS Update Process
+
+[TOC]
+
+System updates in more modern operating systems like Chrome OS and Android are
+called A/B updates, over-the-air ([OTA]) updates, seamless updates, or simply
+auto updates. In contrast to more primitive system updates (like Windows or
+macOS) where the system is booted into a special mode to override the system
+partitions with newer updates and may take several minutes or hours, A/B updates
+have several advantages including but not limited to:
+
+*   Updates maintain a workable system that remains on the disk during and after
+    an update. Hence, reducing the likelihood of corrupting a device into a
+    non-usable state. And reducing the need for flashing devices manually or at
+    repair and warranty centers, etc.
+*   Updates can happen while the system is running (normally with minimum
+    overhead) without interrupting the user. The only downside for users is a
+    required reboot (or, in Chrome OS, a sign out which automatically causes a
+    reboot if an update was performed where the reboot duration is about 10
+    seconds and is no different than a normal reboot).
+*   The user does not need (although they can) to request for an update. The
+    update checks happen periodically in the background.
+*   If the update fails to apply, the user is not affected. The user will
+    continue on the old version of the system and the system will attempt to
+    apply the update again at a later time.
+*   If the update applies correctly but fails to boot, the system will rollback
+    to the old partition and the user can still use the system as usual.
+*   The user does not need to reserve enough space for the update. The system
+    has already reserved enough space in terms of two copies (A and B) of a
+    partition. The system doesn’t even need any cache space on the disk,
+    everything happens seamlessly from network to memory to the inactive
+    partitions.
+
+## Life of an A/B Update
+
+In A/B update capable systems, each partition, such as the kernel or root (or
+other artifacts like [DLC]), has two copies. We call these two copies active (A)
+and inactive (B). The system is booted into the active partition (depending on
+which copy has the higher priority at boot time) and when a new update is
+available, it is written into the inactive partition. After a successful reboot,
+the previously inactive partition becomes active and the old active partition
+becomes inactive.
+
+But everything starts with generating update payloads in (Google) servers for
+each new system image. Once the update payloads are generated, they are signed
+with specific keys and stored in a location known to an update server (Omaha).
+
+When the updater client initiates an update (either periodically or user
+initiated), it first consults different device policies to see if the update
+check is allowed. For example, device policies can prevent an update check
+during certain times of a day or they require the update check time to be
+scattered throughout the day randomly, etc.
+
+Once policies allow for the update check, the updater client sends a request to
+the update server (all this communication happens over HTTPS) and identifies its
+parameters like its Application ID, hardware ID, version, board, etc. Then if
+the update server decides to serve an update payload, it will respond with all
+the parameters needed to perform an update like the URLs to download the
+payloads, the metadata signatures, the payload size and hash, etc. The updater
+client continues communicating with the update server after different state
+changes, like reporting that it started to download the payload or it finished
+the update, or reports that the update failed with specific error codes, etc.
+
+Each payload consists of two main sections: metadata and extra data. The
+metadata is basically a list of operations that should be performed for an
+update. The extra data contains the data blobs needed by some or all of these
+operations. The updater client first downloads the metadata and
+cryptographically verifies it using the provided signatures from the update
+server’s response. Once the metadata is verified as valid, the rest of the
+payload can easily be verified cryptographically (mostly through SHA256 hashes).
+
+Next, the updater client marks the inactive partition as unbootable (because it
+needs to write the new updates into it). At this point the system cannot
+rollback to the inactive partition anymore.
+
+Then, the updater client performs the operations defined in the metadata (in the
+order they appear in the metadata) and the rest of the payload is gradually
+downloaded when these operations require their data. Once an operation is
+finished its data is discarded. This eliminates the need for caching the entire
+payload before applying it. During this process the updater client periodically
+checkpoints the last operation performed so in the event of failure or system
+shutdown, etc. it can continue from the point it missed without redoing all
+operations from the beginning.
+
+During the download, the updater client hashes the downloaded bytes and when the
+download finishes, it checks the payload signature (located at the end of the
+payload). If the signature cannot be verified, the update is rejected.
+
+After the inactive partition is updated, the entire partition is re-read, hashed
+and compared to a hash value passed in the metadata to make sure the update was
+successfully written into the partition.
+
+In the next step, the [Postinstall] process (if any) is called. The postinstall
+reconstructs the dm-verity tree hash of the ROOT partition and writes it at the
+end of the partition (after the last block of the file system). The postinstall
+can also perform any board specific or firmware update tasks necessary. If
+postinstall fails, the entire update is considered failed.
+
+Then the updater client goes into a state that identifies the update has
+completed and the user needs to reboot the system. At this point, until the user
+reboots (or signs out), the updater client will not do any more system updates
+even if newer updates are available. However, it does continue to perform
+periodic update checks so we can have statistics on the number of active devices
+in the field.
+
+After the update proved successful, the inactive partition is marked to have a
+higher priority (on a boot, a partition with higher priority is booted
+first). Once the user reboots the system, it will boot into the updated
+partition and it is marked as active. At this point, after the reboot, The
+updater client calls into the [`chromeos-setgoodkernel`] program. The program
+verifies the integrity of the system partitions using the dm-verity and marks
+the active partition as healthy. At this point the system is basically updated
+successfully.
+
+## Update Engine Daemon
+
+The `update_engine` is a single-threaded daemon process that runs all the
+times. This process is the heart of the auto updates. It runs with lower
+priorities in the background and is one of the last processes to start after a
+system boot. Different clients (like Chrome or other services) can send requests
+for update checks to the update engine. The details of how requests are passed
+to the update engine is system dependent, but in Chrome OS it is D-Bus.  Look at
+the [D-Bus interface] for a list of all available methods.
+
+There are many resiliency features embedded in the update engine that makes auto
+updates robust including but not limited to:
+
+*   If the update engine crashes, it will restart automatically.
+*   During an active update it periodically checkpoints the state of the update
+    and if it fails to continue the update or crashes in the middle, it will
+    continue from the last checkpoint.
+*   It retries failed network communication.
+*   If it fails to apply a delta payload (due to bit changes on the active
+    partition) for a few times, it switches to full payload.
+
+The updater clients writes its active preferences in
+`/var/lib/update_engine/prefs`. These preferences help with tracking changes
+during the lifetime of the updater client and allows properly continuing the
+update process after failed attempts or crashes.
+
+The core update engine code base in a Chromium OS checkout is located in
+`src/aosp/system/update_engine` fetching [this repository].
+
+### Policy Management
+
+In Chrome OS, devices are allowed to accept different policies from their
+managing organizations. Some of these policies affect how/when updates should be
+performed. For example, an organization may want to scatter the update checks
+during certain times of the day so as not to interfere with normal
+business. Within the update engine daemon, [UpdateManager] has the
+responsibility of loading such policies and making different decisions based on
+them. For example, some policies may allow the act of checking for updates to
+happen, while they prevent downloading the update payload. Or some policies
+don’t allow the update check within certain time frames, etc.  Anything that
+relates to the Chrome OS update policies should be contained within the
+[update_manager] directory in the source code.
+
+### Rollback vs. Enterprise Rollback
+
+Chrome OS defines a concept for Rollback: Whenever a newly updated system does
+not work as it is intended, under certain circumstances the device can be rolled
+back to a previously working version. There are two types of rollback supported
+in Chrome OS: A (legacy, original) rollback and an enterprise rollback (I know,
+naming is confusing).
+
+A normal rollback, which has existed for as long as Chrome OS had auto updater,
+is performed by switching the currently inactive partition into the active
+partition and rebooting into it. It is as simple as running a successful
+postinstall on the inactive partition, and rebooting the device. It is a feature
+used by Chrome that happens under certain circumstances. Of course rollback
+can’t happen if the inactive partition has been tampered with or has been nuked
+by the updater client to install an even newer update. Normally a rollback is
+followed by a Powerwash which clobbers the stateful partition.
+
+Enterprise rollback is a new feature added to allow enterprise users to
+downgrade the installed image to an older version. It is very similar to a
+normal system update, except that an older update payload is downloaded and
+installed. There is no direct API for entering into the enterprise rollback. It
+is managed by the enterprise device policies only.
+
+Developers should be careful when touching any rollback related feature and make
+sure they know exactly which of these two features they are trying to adapt.
+
+### Interactive vs Non-Interactive vs. Forced Updates
+
+Non-interactive updates are updates that are scheduled periodically by the
+update engine and happen in the background. Interactive updates, on the other
+hand, happen when a user specifically requests an update check (e.g. by clicking
+on “Check For Update” button in Chrome OS’s About page). Depending on the update
+server's policies, interactive updates have higher priority than non-interactive
+updates (by carrying marker hints). They may decide to not provide an update if
+they have busy server load, etc. There are other internal differences between
+these two types of updates too. For example, interactive updates try to install
+the update faster.
+
+Forced updates are similar to interactive updates (initiated by some kind of
+user action), but they can also be configured to act as non-interactive. Since
+non-interactive updates happen periodically, a forced-non-interactive update
+causes a non-interactive update at the moment of the request, not at a later
+time. We can call a forced non-interactive update with:
+
+```bash
+update_engine_client --interactive=false --check_for_update
+```
+
+### P2P Updates
+
+Many organizations might not have the external bandwidth requirements that
+system updates need for all their devices. To help with this, Chrome OS can act
+as a payload server to other client devices in the same network subnet. This is
+basically a peer-to-peer update system that allows the devices to download the
+update payloads from other devices in the network. This has to be enabled
+explicitly in the organization through device policies and specific network
+configurations to be enabled for P2P updates to work. Regardless of the location
+of update payloads, all update requests go through update servers in HTTPS.
+
+Check out the [P2P update related code] for both the server and the client side.
+
+### Network
+
+The updater client has the capability to download the payloads using Ethernet,
+WiFi, or Cellular networks depending on which one the device is connected
+to. Downloading over Cellular networks will prompt permission from the user as
+it can consume a considerable amount of data.
+
+### Logs
+
+In Chrome OS the `update_engine` logs are located in `/var/log/update_engine`
+directory. Whenever `update_engine` starts, it starts a new log file with the
+current data-time format in the log file’s name
+(`update_engine.log-DATE-TIME`). Many log files can be seen in
+`/var/log/update_engine` after a few restarts of the update engine or after the
+system reboots. The latest active log is symlinked to
+`/var/log/update_engine.log`.
+
+## Update Payload Generation
+
+The update payload generation is the process of converting a set of
+partitions/files into a format that is both understandable by the updater client
+(especially if it's a much older version) and is securely verifiable. This
+process involves breaking the input partitions into smaller components and
+compressing them in order to help with network bandwidth when downloading the
+payloads.
+
+For each generated payload, there is a corresponding properties file which
+contains the metadata information of the payload in JSON format. Normally the
+file is located in the same location as the generated payload and its file name
+is the same as the payload file name plus `.json`
+postfix. e.g. `/path/to/payload.bin` and `/path/to/payload.bin.json`. This
+properties file is necessary in order to do any kind of auto update in [`cros
+flash`], AU autotests, etc. Similarly the updater server uses this file to
+dispatch the payload properties to the updater clients.
+
+Once update payloads are generated, their original images cannot be changed
+anymore otherwise the update payloads may not be able to be applied.
+
+`delta_generator` is a tool with a wide range of options for generating
+different types of update payloads. Its code is located in
+`update_engine/payload_generator`. This directory contains all the source code
+related to mechanics of generating an update payload. None of the files in this
+directory should be included or used in any other library/executable other than
+the `delta_generator` which means this directory does not get compiled into the
+rest of the update engine tools.
+
+However, it is not recommended to use `delta_generator` directly. To manually
+generate payloads easier, [`cros_generate_update_payloads`] should be used. Most
+of the higher level policies and tools for generating payloads reside as a
+library in [`chromite/lib/paygen`]. Whenever calls to the update payload
+generation API are needed, this library should be used instead.
+
+### Update Payload File Specification
+
+Each update payload file has a specific structure defined in the table below:
+
+|Field|Size (bytes)|Type|Description|
+|-----|------------|----|-----------|
+|Magic Number|4|char[4]|Magic string "CrAU" identifying this is an update payload.|
+|Major Version|8|uint64|Payload major version number.|
+|Manifest Size|8|uint64|Manifest size in bytes.|
+|Manifest Signature Size|4|uint32|Manifest signature blob size in bytes (only in major version 2).|
+|Manifest|Varies|[DeltaArchiveManifest]|The list of operations to be performed.|
+|Manifest Signature|Varies|[Signatures]|The signature of the first five fields. There could be multiple signatures if the key has changed.|
+|Payload Data|Varies|List of raw or compressed data blobs|The list of binary blobs used by operations in the metadata.|
+|Payload Signature Size|Varies|uint64|The size of the payload signature.|
+|Payload Signature|Varies|[Signatures]|The signature of the entire payload except the metadata signature. There could be multiple signatures if the key has changed.|
+
+### Delta vs. Full Update Payloads
+
+There are two types of payload: Full and Delta. A full payload is generated
+solely from the target image (the image we want to update to) and has all the
+data necessary to update the inactive partition. Hence, full payloads can be
+quite large in size. A delta payload, on the other hand, is a differential
+update generated by comparing the source image (the active partitions) and the
+target image and producing the diffs between these two images. It is basically a
+differential update similar to applications like `diff` or `bsdiff`. Hence,
+updating the system using the delta payloads requires the system to read parts
+of the active partition in order to update the inactive partition (or
+reconstruct the target partition). The delta payloads are significantly smaller
+than the full payloads. The structure of the payload is equal for both types.
+
+Payload generation is quite resource intensive and its tools are implemented
+with high parallelism.
+
+#### Generating Full Payloads
+
+A full payload is generated by breaking the partition into 2MiB (configurable)
+chunks and either compressing them using bzip2 or XZ algorithms or keeping it as
+raw data depending on which produces smaller data. Full payloads are much larger
+in comparison to delta payloads hence require longer download time if the
+network bandwidth is limited. On the other hand, full payloads are a bit faster
+to apply because the system doesn’t need to read data from the source partition.
+
+#### Generating Delta Payloads
+
+Delta payloads are generated by looking at both the source and target images
+data on a file and metadata basis (more precisely, the file system level on each
+appropriate partition). The reason we can generate delta payloads is that Chrome
+OS partitions are read only. So with high certainty we can assume the active
+partitions on the client’s device is bit-by-bit equal to the original partitions
+generated in the image generation/signing phase. The process for generating a
+delta payload is roughly as follows:
+
+1.  Find all the zero-filled blocks on the target partition and produce `ZERO`
+    operation for them. `ZERO` operation basically discards the associated
+    blocks (depending on the implementation).
+2.  Find all the blocks that have not changed between the source and target
+    partitions by directly comparing one-to-one source and target blocks and
+    produce `SOURCE_COPY` operation.
+3.  List all the files (and their associated blocks) in the source and target
+    partitions and remove blocks (and files) which we have already generated
+    operations for in the last two steps. Assign the remaining metadata (inodes,
+    etc) of each partition as a file.
+4.  If a file is new, generate a `REPLACE`, `REPLACE_XZ`, or `REPLACE_BZ`
+    operation for its data blocks depending on which one generates a smaller
+    data blob.
+5.  For each other file, compare the source and target blocks and produce a
+    `SOURCE_BSDIFF` or `PUFFDIFF` operation depending on which one generates a
+    smaller data blob. These two operations produce binary diffs between a
+    source and target data blob. (Look at [bsdiff] and [puffin] for details of
+    such binary differential programs!)
+6.  Sort the operations based on their target partitions’ block offset.
+7.  Optionally merge same or similar operations next to each other into larger
+    operations for better efficiency and potentially smaller payloads.
+
+Full payloads can only contain `REPLACE`, `REPLACE_BZ`, and `REPLACE_XZ`
+operations. Delta payloads can contain any operations.
+
+### Major and Minor versions
+
+The major and minor versions specify the update payload file format and the
+capability of the updater client to accept certain types of update payloads
+respectively. These numbers are [hard coded] in the updater client.
+
+Major version is basically the update payload file version specified in the
+[update payload file specification] above (second field). Each updater client
+supports a range of major versions. Currently, there are only two major
+versions: 1, and 2. And both Chrome OS and Android are on major version 2 (major
+version 1 is being deprecated). Whenever there are new additions that cannot be
+fitted in the [Manifest protobuf], we need to uprev the major version. Upreving
+major version should be done with utmost care because older clients do not know
+how to handle the newer versions. Any major version uprev in Chrome OS should be
+associated with a GoldenEye stepping stone.
+
+Minor version defines the capability of the updater client to accept certain
+operations or perform certain actions. Each updater client supports a range of
+minor versions. For example, the updater client with minor version 4 (or less)
+does not know how to handle a `PUFFDIFF` operation. So when generating a delta
+payload for an image which has an updater client with minor version 4 (or less)
+we cannot produce PUFFDIFF operation for it. The payload generation process
+looks at the source image’s minor version to decide the type of operations it
+supports and only a payload that confirms to those restrictions. Similarly, if
+there is a bug in a client with a specific minor version, an uprev in the minor
+version helps with avoiding to generate payloads that cause that bug to
+manifest. However, upreving minor versions is quite expensive too in terms of
+maintainability and it can be error prone. So one should practice caution when
+making such a change.
+
+Minor versions are irrelevant in full payloads. Full payloads should always be
+able to be applied for very old clients. The reason is that the updater clients
+may not send their current version, so if we had different types of full
+payloads, we would not have known which version to serve to the client.
+
+### Signed vs Unsigned Payloads
+
+Update payloads can be signed (with private/public key pairs) for use in
+production or be kept unsigned for use in testing. Tools like `delta_generator`
+help with generating metadata and payload hashes or signing the payloads given
+private keys.
+
+## update_payload Scripts
+
+[update_payload] contains a set of python scripts used mostly to validate
+payload generation and application. We normally test the update payloads using
+an actual device (live tests). [`brillo_update_payload`] script can be used to
+generate and test applying of a payload on a host device machine. These tests
+can be viewed as dynamic tests without the need for an actual device. Other
+`update_payload` scripts (like [`check_update_payload`]) can be used to
+statically check that a payload is in the correct state and its application
+works correctly. These scripts actually apply the payload statically without
+running the code in payload_consumer.
+
+## Postinstall
+
+[Postinstall] is a process called after the updater client writes the new image
+artifacts to the inactive partitions. One of postinstall's main responsibilities
+is to recreate the dm-verity tree hash at the end of the root partition. Among
+other things, it installs new firmware updates or any board specific
+processes. Postinstall runs in separate chroot inside the newly installed
+partition. So it is quite separated from the rest of the active running
+system. Anything that needs to be done after an update and before the device is
+rebooted, should be implemented inside the postinstall.
+
+## Building Update Engine
+
+You can build `update_engine` the same as other platform applications:
+
+```bash
+(chroot) $ emerge-${BOARD} update_engine
+```
+or to build without the source copy:
+
+```bash
+(chroot) $ cros_workon_make --board=${BOARD} update_engine
+```
+
+After a change in the `update_engine` daemon, either build an image and install
+the image on the device using cros flash, etc. or use `cros deploy` to only
+install the `update_engine` service on the device:
+
+```bash
+(chroot) $ cros deploy update_engine
+```
+
+You need to restart the `update_engine` daemon in order to see the affected
+changes:
+
+```bash
+# SSH into the device.
+restart update-engine # with a dash not underscore.
+```
+
+Other payload generation tools like `delta_generator` are board agnostic and
+only available in the SDK. So in order to make any changes to the
+`delta_generator`, you should build the SDK:
+
+```bash
+# Do it only once to start building the 9999 ebuild from ToT.
+(chroot) $ cros_workon --host start update_engine
+
+(chroot) $ sudo emerge update_engine
+```
+
+If you make any changes to the D-Bus interface make sure `system_api`,
+`update_engine-client`, and `update_engine` packages are marked to build from
+9999 ebuild and then build both packages in that order:
+
+```bash
+(chroot) $ emerge-${BOARD} system_api update_engine-client update_engine
+```
+
+If you make any changes to [`update_engine` protobufs] in the `system_api`,
+build the `system_api` package first.
+
+## Running Unit Tests
+
+[Running unit tests similar to other platforms]:
+
+```bash
+(chroot) $ FEATURES=test emerge-<board> update_engine
+```
+
+or
+
+```bash
+(chroot) $ cros_workon_make --board=<board> --test update_engine
+```
+
+or
+
+```bash
+(chroot) $ cros_run_unit_tests --board ${BOARD} --packages update_engine
+```
+
+The above commands run all the unit tests, but `update_engine` package is quite
+large and it takes a long time to run all the unit tests. To run all unit tests
+in a test class run:
+
+```bash
+(chroot) $ FEATURES=test \
+    P2_TEST_FILTER="*OmahaRequestActionTest.*-*RunAsRoot*" \
+    emerge-amd64-generic update_engine
+```
+
+To run one exact unit test fixture (e.g. `MultiAppUpdateTest`), run:
+
+```bash
+(chroot) $ FEATURES=test \
+    P2_TEST_FILTER="*OmahaRequestActionTest.MultiAppUpdateTest-*RunAsRoot*" \
+    emerge-amd64-generic update_engine
+```
+
+To run `update_payload` unit tests enter `update_engine/scripts` directory and
+run the desired `unittest.p`y files.
+
+## Initiating a Configured Update
+
+There are different methods to initiate an update:
+
+*   Click on the “Check For Update” button in setting’s About page. There is no
+    way to configure this way of update check.
+*   Use the [`update_engine_client`] program. There are a few configurations you
+    can do.
+*   Call `autest` in the crosh. Mainly used by the QA team and is not intended
+    to be used by any other team.
+*   Use [`cros flash`]. It internally uses the update_engine to flash a device
+    with a given image.
+*   Run one of many auto update autotests.
+*   Start a [Dev Server] on your host machine and send a specific HTTP request
+    (look at `cros_au` API in the Dev Server code), that has the information
+    like the IP address of your Chromebook and where the update payloads are
+    located to the Dev Server to start an update on your device (**Warning:**
+    complicated to do, not recommended).
+
+`update_engine_client` is a client application that can help initiate an update
+or get more information about the status of the updater client. It has several
+options like initiating an interactive vs. non-interactive update, changing
+channels, getting the current status of update process, doing a rollback,
+changing the Omaha URL to download the payload (the most important one), etc.
+
+`update_engine` daemon reads the `/etc/lsb-release` file on the device to
+identify different update parameters like the updater server (Omaha) URL, the
+current channel, etc. However, to override any of these parameters, create the
+file `/mnt/stateful_partition/etc/lsb-release` with desired customized
+parameters. For example, this can be used to point to a developer version of the
+update server and allow the update_engine to schedule a periodic update from
+that specific server.
+
+If you have some changes in the protocol that communicates with Omaha, but you
+don’t have those changes in the update server, or you have some specific
+payloads that do not exist on the production update server you can use
+[Nebraska] to help with doing an update.
+
+## Note to Developers and Maintainers
+
+When changing the update engine source code be extra careful about these things:
+
+### Do NOT Break Backward Compatibility
+
+At each release cycle we should be able to generate full and delta payloads that
+can correctly be applied to older devices that run older versions of the update
+engine client. So for example, removing or not passing arguments in the metadata
+proto file might break older clients. Or passing operations that are not
+understood in older clients will break them. Whenever changing anything in the
+payload generation process, ask yourself this question: Would it work on older
+clients? If not, do I need to control it with minor versions or any other means.
+
+Especially regarding enterprise rollback, a newer updater client should be able
+to accept an older update payload. Normally this happens using a full payload,
+but care should be taken in order to not break this compatibility.
+
+### Think About The Future
+
+When creating a change in the update engine, think about 5 years from now:
+
+*   How can the change be implemented that five years from now older clients
+    don’t break?
+*   How is it going to be maintained five years from now?
+*   How can it make it easier for future changes without breaking older clients
+    or incurring heavy maintenance costs?
+
+### Prefer Not To Implement Your Feature In The Updater Client
+If a feature can be implemented from server side, Do NOT implement it in the
+client updater. Because the client updater can be fragile at points and small
+mistakes can have catastrophic consequences. For example, if a bug is introduced
+in the updater client that causes it to crash right before checking for update
+and we can't quite catch this bug early in the release process, then the
+production devices which have already moved to the new buggy system, may no
+longer receive automatic updates anymore. So, always think if the feature is
+being implemented can be done form the server side (with potentially minimal
+changes to the client updater)? Or can the feature be moved to another service
+with minimal interface to the updater client. Answering these questions will pay
+off greatly in the future.
+
+### Be Respectful Of Other Code Bases
+
+The current update engine code base is used in many projects like Android. We
+sync the code base among these two projects frequently. Try to not break Android
+or other systems that share the update engine code. Whenever landing a change,
+always think about whether Android needs that change:
+
+*   How will it affect Android?
+*   Can the change be moved to an interface and stubs implementations be
+    implemented so as not to affect Android?
+*   Can Chrome OS or Android specific code be guarded by macros?
+
+As a basic measure, if adding/removing/renaming code, make sure to change both
+`build.gn` and `Android.bp`. Do not bring Chrome OS specific code (for example
+other libraries that live in `system_api` or `dlcservice`) into the common code
+of update_engine. Try to separate these concerns using best software engineering
+practices.
+
+### Merging from Android (or other code bases)
+
+Chrome OS tracks the Android code as an [upstream branch]. To merge the Android
+code to Chrome OS (or vice versa) just do a `git merge` of that branch into
+Chrome OS, test it using whatever means and upload a merge commit.
+
+```bash
+repo start merge-aosp
+git merge --no-ff --strategy=recursive -X patience cros/upstream
+repo upload --cbr --no-verify .
+```
+
+[Postinstall]: #postinstall
+[update payload file specification]: #update-payload-file-specification
+[OTA]: https://source.android.com/devices/tech/ota
+[DLC]: https://chromium.googlesource.com/chromiumos/platform2/+/master/dlcservice
+[`chromeos-setgoodkernel`]: https://chromium.googlesource.com/chromiumos/platform2/+/master/installer/chromeos-setgoodkernel
+[D-Bus interface]: /dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml
+[this repository]: /
+[UpdateManager]: /update_manager/update_manager.cc
+[update_manager]: /update_manager/
+[P2P update related code]: https://chromium.googlesource.com/chromiumos/platform2/+/master/p2p/
+[`cros_generate_update_payloads`]: https://chromium.googlesource.com/chromiumos/chromite/+/master/scripts/cros_generate_update_payload.py
+[`chromite/lib/paygen`]: https://chromium.googlesource.com/chromiumos/chromite/+/master/lib/paygen/
+[DeltaArchiveManifest]: /update_metadata.proto#302
+[Signatures]: /update_metadata.proto#122
+[hard coded]: /update_engine.conf
+[Manifest protobuf]: /update_metadata.proto
+[update_payload]: /scripts/
+[Postinstall]: https://chromium.googlesource.com/chromiumos/platform2/+/master/installer/chromeos-postinst
+[`update_engine` protobufs]: https://chromium.googlesource.com/chromiumos/platform2/+/master/system_api/dbus/update_engine/
+[Running unit tests similar to other platforms]: https://chromium.googlesource.com/chromiumos/docs/+/master/testing/running_unit_tests.md
+[Nebraska]: https://chromium.googlesource.com/chromiumos/platform/dev-util/+/master/nebraska/
+[upstream branch]: https://chromium.googlesource.com/aosp/platform/system/update_engine/+/upstream
+[`cros flash`]: https://chromium.googlesource.com/chromiumos/docs/+/master/cros_flash.md
+[bsdiff]: https://android.googlesource.com/platform/external/bsdiff/+/master
+[puffin]: https://android.googlesource.com/platform/external/puffin/+/master
+[`update_engine_client`]: /update_engine_client.cc
+[`brillo_update_payload`]: /scripts/brillo_update_payload
+[`check_update_payload`]: /scripts/paycheck.py
+[Dev Server]: https://chromium.googlesource.com/chromiumos/chromite/+/master/docs/devserver.md
diff --git a/UpdateEngine.conf b/UpdateEngine.conf
index 9490096..f9a66dc 100644
--- a/UpdateEngine.conf
+++ b/UpdateEngine.conf
@@ -1,5 +1,20 @@
 <!DOCTYPE busconfig PUBLIC "-//freedesktop//DTD D-BUS Bus Configuration 1.0//EN"
   "http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
+<!--
+  Copyright (C) 2019 The Android Open Source Project
+
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+!-->
 <busconfig>
   <policy user="root">
     <allow own="org.chromium.UpdateEngine" />
@@ -26,7 +41,7 @@
            send_member="ResetStatus"/>
     <allow send_destination="org.chromium.UpdateEngine"
            send_interface="org.chromium.UpdateEngineInterface"
-           send_member="GetStatus"/>
+           send_member="GetStatusAdvanced"/>
     <allow send_destination="org.chromium.UpdateEngine"
            send_interface="org.chromium.UpdateEngineInterface"
            send_member="RebootIfNeeded"/>
@@ -66,15 +81,12 @@
     <allow send_destination="org.chromium.UpdateEngine"
            send_interface="org.chromium.UpdateEngineInterface"
            send_member="GetLastAttemptError"/>
-    <allow send_destination="org.chromium.UpdateEngine"
-           send_interface="org.chromium.UpdateEngineInterface"
-           send_member="GetEolStatus"/>
     <allow send_interface="org.chromium.UpdateEngineLibcrosProxyResolvedInterface" />
   </policy>
   <policy user="power">
     <allow send_destination="org.chromium.UpdateEngine"
            send_interface="org.chromium.UpdateEngineInterface"
-           send_member="GetStatus"/>
+           send_member="GetStatusAdvanced"/>
   </policy>
   <policy user="dlcservice">
     <allow send_destination="org.chromium.UpdateEngine"
@@ -82,6 +94,12 @@
            send_member="GetStatus"/>
     <allow send_destination="org.chromium.UpdateEngine"
            send_interface="org.chromium.UpdateEngineInterface"
+           send_member="GetStatusAdvanced"/>
+    <allow send_destination="org.chromium.UpdateEngine"
+           send_interface="org.chromium.UpdateEngineInterface"
            send_member="AttemptInstall"/>
+    <allow send_destination="org.chromium.UpdateEngine"
+           send_interface="org.chromium.UpdateEngineInterface"
+           send_member="SetDlcActiveValue"/>
   </policy>
 </busconfig>
diff --git a/aosp/apex_handler_android.cc b/aosp/apex_handler_android.cc
new file mode 100644
index 0000000..8beef96
--- /dev/null
+++ b/aosp/apex_handler_android.cc
@@ -0,0 +1,110 @@
+//
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <memory>
+#include <utility>
+
+#include <base/files/file_util.h>
+
+#include <ApexProperties.sysprop.h>
+
+#include "update_engine/aosp/apex_handler_android.h"
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+namespace {
+
+android::apex::CompressedApexInfoList CreateCompressedApexInfoList(
+    const std::vector<ApexInfo>& apex_infos) {
+  android::apex::CompressedApexInfoList compressed_apex_info_list;
+  for (const auto& apex_info : apex_infos) {
+    if (!apex_info.is_compressed()) {
+      continue;
+    }
+    android::apex::CompressedApexInfo compressed_apex_info;
+    compressed_apex_info.moduleName = apex_info.package_name();
+    compressed_apex_info.versionCode = apex_info.version();
+    compressed_apex_info.decompressedSize = apex_info.decompressed_size();
+    compressed_apex_info_list.apexInfos.emplace_back(
+        std::move(compressed_apex_info));
+  }
+  return compressed_apex_info_list;
+}
+
+}  // namespace
+
+std::unique_ptr<ApexHandlerInterface> CreateApexHandler() {
+  if (android::sysprop::ApexProperties::updatable().value_or(false)) {
+    return std::make_unique<ApexHandlerAndroid>();
+  } else {
+    return std::make_unique<FlattenedApexHandlerAndroid>();
+  }
+}
+
+android::base::Result<uint64_t> ApexHandlerAndroid::CalculateSize(
+    const std::vector<ApexInfo>& apex_infos) const {
+  // We might not need to decompress every APEX. Communicate with apexd to get
+  // accurate requirement.
+  auto apex_service = GetApexService();
+  if (apex_service == nullptr) {
+    return android::base::Error() << "Failed to get hold of apexservice";
+  }
+
+  auto compressed_apex_info_list = CreateCompressedApexInfoList(apex_infos);
+  int64_t size_from_apexd;
+  auto result = apex_service->calculateSizeForCompressedApex(
+      compressed_apex_info_list, &size_from_apexd);
+  if (!result.isOk()) {
+    return android::base::Error()
+           << "Failed to get size required from apexservice";
+  }
+  return size_from_apexd;
+}
+
+bool ApexHandlerAndroid::AllocateSpace(
+    const std::vector<ApexInfo>& apex_infos) const {
+  auto apex_service = GetApexService();
+  if (apex_service == nullptr) {
+    return false;
+  }
+  auto compressed_apex_info_list = CreateCompressedApexInfoList(apex_infos);
+  auto result =
+      apex_service->reserveSpaceForCompressedApex(compressed_apex_info_list);
+  return result.isOk();
+}
+
+android::sp<android::apex::IApexService> ApexHandlerAndroid::GetApexService()
+    const {
+  auto binder = android::defaultServiceManager()->waitForService(
+      android::String16("apexservice"));
+  if (binder == nullptr) {
+    return nullptr;
+  }
+  return android::interface_cast<android::apex::IApexService>(binder);
+}
+
+android::base::Result<uint64_t> FlattenedApexHandlerAndroid::CalculateSize(
+    const std::vector<ApexInfo>& apex_infos) const {
+  return 0;
+}
+
+bool FlattenedApexHandlerAndroid::AllocateSpace(
+    const std::vector<ApexInfo>& apex_infos) const {
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/aosp/apex_handler_android.h b/aosp/apex_handler_android.h
new file mode 100644
index 0000000..767f561
--- /dev/null
+++ b/aosp/apex_handler_android.h
@@ -0,0 +1,53 @@
+//
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef SYSTEM_UPDATE_ENGINE_AOSP_APEX_HANDLER_ANDROID_H_
+#define SYSTEM_UPDATE_ENGINE_AOSP_APEX_HANDLER_ANDROID_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <android/apex/IApexService.h>
+#include <binder/IServiceManager.h>
+
+#include "update_engine/aosp/apex_handler_interface.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+
+std::unique_ptr<ApexHandlerInterface> CreateApexHandler();
+
+class ApexHandlerAndroid : virtual public ApexHandlerInterface {
+ public:
+  android::base::Result<uint64_t> CalculateSize(
+      const std::vector<ApexInfo>& apex_infos) const;
+  bool AllocateSpace(const std::vector<ApexInfo>& apex_infos) const;
+
+ private:
+  android::sp<android::apex::IApexService> GetApexService() const;
+};
+
+class FlattenedApexHandlerAndroid : virtual public ApexHandlerInterface {
+ public:
+  android::base::Result<uint64_t> CalculateSize(
+      const std::vector<ApexInfo>& apex_infos) const;
+  bool AllocateSpace(const std::vector<ApexInfo>& apex_infos) const;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // SYSTEM_UPDATE_ENGINE_AOSP_APEX_HANDLER_ANDROID_H_
diff --git a/aosp/apex_handler_android_unittest.cc b/aosp/apex_handler_android_unittest.cc
new file mode 100644
index 0000000..847ccaa
--- /dev/null
+++ b/aosp/apex_handler_android_unittest.cc
@@ -0,0 +1,102 @@
+//
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <utility>
+#include <filesystem>
+
+#include "update_engine/aosp/apex_handler_android.h"
+
+#include <android-base/file.h>
+#include <android-base/strings.h>
+#include <gtest/gtest.h>
+
+using android::base::EndsWith;
+
+namespace chromeos_update_engine {
+
+namespace fs = std::filesystem;
+
+ApexInfo CreateApexInfo(const std::string& package_name,
+                        int version,
+                        bool is_compressed,
+                        int decompressed_size) {
+  ApexInfo result;
+  result.set_package_name(package_name);
+  result.set_version(version);
+  result.set_is_compressed(is_compressed);
+  result.set_decompressed_size(decompressed_size);
+  return std::move(result);
+}
+
+TEST(ApexHandlerAndroidTest, CalculateSizeUpdatableApex) {
+  ApexHandlerAndroid apex_handler;
+  std::vector<ApexInfo> apex_infos;
+  ApexInfo compressed_apex_1 = CreateApexInfo("sample1", 1, true, 1);
+  ApexInfo compressed_apex_2 = CreateApexInfo("sample2", 2, true, 2);
+  ApexInfo uncompressed_apex = CreateApexInfo("uncompressed", 1, false, 4);
+  apex_infos.push_back(compressed_apex_1);
+  apex_infos.push_back(compressed_apex_2);
+  apex_infos.push_back(uncompressed_apex);
+  auto result = apex_handler.CalculateSize(apex_infos);
+  ASSERT_TRUE(result.ok());
+  ASSERT_EQ(*result, 3u);
+}
+
+TEST(ApexHandlerAndroidTest, AllocateSpaceUpdatableApex) {
+  ApexHandlerAndroid apex_handler;
+  std::vector<ApexInfo> apex_infos;
+  ApexInfo compressed_apex_1 = CreateApexInfo("sample1", 1, true, 1);
+  ApexInfo compressed_apex_2 = CreateApexInfo("sample2", 2, true, 2);
+  ApexInfo uncompressed_apex = CreateApexInfo("uncompressed", 1, false, 4);
+  apex_infos.push_back(compressed_apex_1);
+  apex_infos.push_back(compressed_apex_2);
+  apex_infos.push_back(uncompressed_apex);
+  ASSERT_TRUE(apex_handler.AllocateSpace(apex_infos));
+
+  // Should be able to pass empty list
+  ASSERT_TRUE(apex_handler.AllocateSpace({}));
+}
+
+TEST(ApexHandlerAndroidTest, CalculateSizeFlattenedApex) {
+  FlattenedApexHandlerAndroid apex_handler;
+  std::vector<ApexInfo> apex_infos;
+  ApexInfo compressed_apex_1 = CreateApexInfo("sample1", 1, true, 1);
+  ApexInfo compressed_apex_2 = CreateApexInfo("sample2", 2, true, 2);
+  ApexInfo uncompressed_apex = CreateApexInfo("uncompressed", 1, false, 4);
+  apex_infos.push_back(compressed_apex_1);
+  apex_infos.push_back(compressed_apex_2);
+  apex_infos.push_back(uncompressed_apex);
+  auto result = apex_handler.CalculateSize(apex_infos);
+  ASSERT_TRUE(result.ok());
+  ASSERT_EQ(*result, 0u);
+}
+
+TEST(ApexHandlerAndroidTest, AllocateSpaceFlattenedApex) {
+  FlattenedApexHandlerAndroid apex_handler;
+  std::vector<ApexInfo> apex_infos;
+  ApexInfo compressed_apex_1 = CreateApexInfo("sample1", 1, true, 1);
+  ApexInfo compressed_apex_2 = CreateApexInfo("sample2", 2, true, 2);
+  ApexInfo uncompressed_apex = CreateApexInfo("uncompressed", 1, false, 4);
+  apex_infos.push_back(compressed_apex_1);
+  apex_infos.push_back(compressed_apex_2);
+  apex_infos.push_back(uncompressed_apex);
+  ASSERT_TRUE(apex_handler.AllocateSpace(apex_infos));
+
+  // Should be able to pass empty list
+  ASSERT_TRUE(apex_handler.AllocateSpace({}));
+}
+
+}  // namespace chromeos_update_engine
diff --git a/aosp/apex_handler_interface.h b/aosp/apex_handler_interface.h
new file mode 100644
index 0000000..b9b6c96
--- /dev/null
+++ b/aosp/apex_handler_interface.h
@@ -0,0 +1,38 @@
+//
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef SYSTEM_UPDATE_ENGINE_AOSP_APEX_HANDLER_INTERFACE_H_
+#define SYSTEM_UPDATE_ENGINE_AOSP_APEX_HANDLER_INTERFACE_H_
+
+#include <vector>
+
+#include <android-base/result.h>
+
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+
+class ApexHandlerInterface {
+ public:
+  virtual ~ApexHandlerInterface() = default;
+  virtual android::base::Result<uint64_t> CalculateSize(
+      const std::vector<ApexInfo>& apex_infos) const = 0;
+  virtual bool AllocateSpace(const std::vector<ApexInfo>& apex_infos) const = 0;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // SYSTEM_UPDATE_ENGINE_AOSP_APEX_HANDLER_INTERFACE_H_
diff --git a/binder_service_android.cc b/aosp/binder_service_android.cc
similarity index 94%
rename from binder_service_android.cc
rename to aosp/binder_service_android.cc
index 6b8a552..ed76c4a 100644
--- a/binder_service_android.cc
+++ b/aosp/binder_service_android.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/binder_service_android.h"
+#include "update_engine/aosp/binder_service_android.h"
 
 #include <memory>
 
@@ -24,6 +24,8 @@
 #include <brillo/errors/error.h>
 #include <utils/String8.h>
 
+#include "update_engine/aosp/binder_service_android_common.h"
+
 using android::binder::Status;
 using android::os::IUpdateEngineCallback;
 using android::os::ParcelFileDescriptor;
@@ -31,23 +33,6 @@
 using std::vector;
 using update_engine::UpdateEngineStatus;
 
-namespace {
-Status ErrorPtrToStatus(const brillo::ErrorPtr& error) {
-  return Status::fromServiceSpecificError(
-      1, android::String8{error->GetMessage().c_str()});
-}
-
-vector<string> ToVecString(const vector<android::String16>& inp) {
-  vector<string> out;
-  out.reserve(inp.size());
-  for (const auto& e : inp) {
-    out.emplace_back(android::String8{e}.string());
-  }
-  return out;
-}
-
-}  // namespace
-
 namespace chromeos_update_engine {
 
 BinderUpdateEngineAndroidService::BinderUpdateEngineAndroidService(
diff --git a/binder_service_android.h b/aosp/binder_service_android.h
similarity index 92%
rename from binder_service_android.h
rename to aosp/binder_service_android.h
index 5f28225..f41fbdf 100644
--- a/binder_service_android.h
+++ b/aosp/binder_service_android.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_BINDER_SERVICE_ANDROID_H_
-#define UPDATE_ENGINE_BINDER_SERVICE_ANDROID_H_
+#ifndef UPDATE_ENGINE_AOSP_BINDER_SERVICE_ANDROID_H_
+#define UPDATE_ENGINE_AOSP_BINDER_SERVICE_ANDROID_H_
 
 #include <stdint.h>
 
@@ -28,8 +28,8 @@
 
 #include "android/os/BnUpdateEngine.h"
 #include "android/os/IUpdateEngineCallback.h"
-#include "update_engine/service_delegate_android_interface.h"
-#include "update_engine/service_observer_interface.h"
+#include "update_engine/aosp/service_delegate_android_interface.h"
+#include "update_engine/common/service_observer_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -96,4 +96,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_BINDER_SERVICE_ANDROID_H_
+#endif  // UPDATE_ENGINE_AOSP_BINDER_SERVICE_ANDROID_H_
diff --git a/aosp/binder_service_android_common.h b/aosp/binder_service_android_common.h
new file mode 100644
index 0000000..223b32e
--- /dev/null
+++ b/aosp/binder_service_android_common.h
@@ -0,0 +1,45 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_AOSP_BINDER_SERVICE_ANDROID_COMMON_H_
+#define UPDATE_ENGINE_AOSP_BINDER_SERVICE_ANDROID_COMMON_H_
+
+#include <string>
+#include <vector>
+
+#include <binder/Status.h>
+
+namespace chromeos_update_engine {
+
+static inline android::binder::Status ErrorPtrToStatus(
+    const brillo::ErrorPtr& error) {
+  return android::binder::Status::fromServiceSpecificError(
+      1, android::String8{error->GetMessage().c_str()});
+}
+
+static inline std::vector<std::string> ToVecString(
+    const std::vector<android::String16>& inp) {
+  std::vector<std::string> out;
+  out.reserve(inp.size());
+  for (const auto& e : inp) {
+    out.emplace_back(android::String8{e}.string());
+  }
+  return out;
+}
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_AOSP_BINDER_SERVICE_ANDROID_COMMON_H_
diff --git a/aosp/binder_service_stable_android.cc b/aosp/binder_service_stable_android.cc
new file mode 100644
index 0000000..17b35ee
--- /dev/null
+++ b/aosp/binder_service_stable_android.cc
@@ -0,0 +1,132 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/aosp/binder_service_stable_android.h"
+
+#include <memory>
+
+#include <base/bind.h>
+#include <base/logging.h>
+#include <binderwrapper/binder_wrapper.h>
+#include <brillo/errors/error.h>
+#include <utils/String8.h>
+
+#include "update_engine/aosp/binder_service_android_common.h"
+
+using android::binder::Status;
+using android::os::IUpdateEngineStableCallback;
+using android::os::ParcelFileDescriptor;
+using std::string;
+using std::vector;
+using update_engine::UpdateEngineStatus;
+
+namespace chromeos_update_engine {
+
+BinderUpdateEngineAndroidStableService::BinderUpdateEngineAndroidStableService(
+    ServiceDelegateAndroidInterface* service_delegate)
+    : service_delegate_(service_delegate) {}
+
+void BinderUpdateEngineAndroidStableService::SendStatusUpdate(
+    const UpdateEngineStatus& update_engine_status) {
+  last_status_ = static_cast<int>(update_engine_status.status);
+  last_progress_ = update_engine_status.progress;
+  if (callback_) {
+    callback_->onStatusUpdate(last_status_, last_progress_);
+  }
+}
+
+void BinderUpdateEngineAndroidStableService::SendPayloadApplicationComplete(
+    ErrorCode error_code) {
+  if (callback_) {
+    callback_->onPayloadApplicationComplete(static_cast<int>(error_code));
+  }
+}
+
+Status BinderUpdateEngineAndroidStableService::bind(
+    const android::sp<IUpdateEngineStableCallback>& callback,
+    bool* return_value) {
+  // Reject binding if another callback is already bound.
+  if (callback_ != nullptr) {
+    LOG(ERROR) << "Another callback is already bound. Can't bind new callback.";
+    *return_value = false;
+    return Status::ok();
+  }
+
+  // See BinderUpdateEngineAndroidService::bind.
+  if (last_status_ != -1) {
+    auto status = callback->onStatusUpdate(last_status_, last_progress_);
+    if (!status.isOk()) {
+      LOG(ERROR) << "Failed to call onStatusUpdate() from callback: "
+                 << status.toString8();
+      *return_value = false;
+      return Status::ok();
+    }
+  }
+
+  callback_ = callback;
+
+  const android::sp<IBinder>& callback_binder =
+      IUpdateEngineStableCallback::asBinder(callback);
+  auto binder_wrapper = android::BinderWrapper::Get();
+  binder_wrapper->RegisterForDeathNotifications(
+      callback_binder,
+      base::Bind(base::IgnoreResult(
+                     &BinderUpdateEngineAndroidStableService::UnbindCallback),
+                 base::Unretained(this),
+                 base::Unretained(callback_binder.get())));
+
+  *return_value = true;
+  return Status::ok();
+}
+
+Status BinderUpdateEngineAndroidStableService::unbind(
+    const android::sp<IUpdateEngineStableCallback>& callback,
+    bool* return_value) {
+  const android::sp<IBinder>& callback_binder =
+      IUpdateEngineStableCallback::asBinder(callback);
+  auto binder_wrapper = android::BinderWrapper::Get();
+  binder_wrapper->UnregisterForDeathNotifications(callback_binder);
+
+  *return_value = UnbindCallback(callback_binder.get());
+  return Status::ok();
+}
+
+Status BinderUpdateEngineAndroidStableService::applyPayloadFd(
+    const ParcelFileDescriptor& pfd,
+    int64_t payload_offset,
+    int64_t payload_size,
+    const vector<android::String16>& header_kv_pairs) {
+  vector<string> str_headers = ToVecString(header_kv_pairs);
+
+  brillo::ErrorPtr error;
+  if (!service_delegate_->ApplyPayload(
+          pfd.get(), payload_offset, payload_size, str_headers, &error)) {
+    return ErrorPtrToStatus(error);
+  }
+  return Status::ok();
+}
+
+bool BinderUpdateEngineAndroidStableService::UnbindCallback(
+    const IBinder* callback) {
+  if (IUpdateEngineStableCallback::asBinder(callback_).get() != callback) {
+    LOG(ERROR) << "Unable to unbind unknown callback.";
+    return false;
+  }
+  callback_ = nullptr;
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/aosp/binder_service_stable_android.h b/aosp/binder_service_stable_android.h
new file mode 100644
index 0000000..212afaa
--- /dev/null
+++ b/aosp/binder_service_stable_android.h
@@ -0,0 +1,85 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_AOSP_BINDER_SERVICE_STABLE_ANDROID_H_
+#define UPDATE_ENGINE_AOSP_BINDER_SERVICE_STABLE_ANDROID_H_
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include <utils/Errors.h>
+#include <utils/String16.h>
+#include <utils/StrongPointer.h>
+
+#include "android/os/BnUpdateEngineStable.h"
+#include "android/os/IUpdateEngineStableCallback.h"
+#include "update_engine/aosp/service_delegate_android_interface.h"
+#include "update_engine/common/service_observer_interface.h"
+
+namespace chromeos_update_engine {
+
+class BinderUpdateEngineAndroidStableService
+    : public android::os::BnUpdateEngineStable,
+      public ServiceObserverInterface {
+ public:
+  explicit BinderUpdateEngineAndroidStableService(
+      ServiceDelegateAndroidInterface* service_delegate);
+  ~BinderUpdateEngineAndroidStableService() override = default;
+
+  const char* ServiceName() const {
+    return "android.os.UpdateEngineStableService";
+  }
+
+  // ServiceObserverInterface overrides.
+  void SendStatusUpdate(
+      const update_engine::UpdateEngineStatus& update_engine_status) override;
+  void SendPayloadApplicationComplete(ErrorCode error_code) override;
+
+  // android::os::BnUpdateEngineStable overrides.
+  android::binder::Status applyPayloadFd(
+      const ::android::os::ParcelFileDescriptor& pfd,
+      int64_t payload_offset,
+      int64_t payload_size,
+      const std::vector<android::String16>& header_kv_pairs) override;
+  android::binder::Status bind(
+      const android::sp<android::os::IUpdateEngineStableCallback>& callback,
+      bool* return_value) override;
+  android::binder::Status unbind(
+      const android::sp<android::os::IUpdateEngineStableCallback>& callback,
+      bool* return_value) override;
+
+ private:
+  // Remove the passed |callback| from the list of registered callbacks. Called
+  // on unbind() or whenever the callback object is destroyed.
+  // Returns true on success.
+  bool UnbindCallback(const IBinder* callback);
+
+  // Bound callback. The stable interface only supports one callback at a time.
+  android::sp<android::os::IUpdateEngineStableCallback> callback_;
+
+  // Cached copy of the last status update sent. Used to send an initial
+  // notification when bind() is called from the client.
+  int last_status_{-1};
+  double last_progress_{0.0};
+
+  ServiceDelegateAndroidInterface* service_delegate_;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_AOSP_BINDER_SERVICE_STABLE_ANDROID_H_
diff --git a/boot_control_android.cc b/aosp/boot_control_android.cc
similarity index 78%
rename from boot_control_android.cc
rename to aosp/boot_control_android.cc
index ec2ca0f..c1ac0d4 100644
--- a/boot_control_android.cc
+++ b/aosp/boot_control_android.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/boot_control_android.h"
+#include "update_engine/aosp/boot_control_android.h"
 
 #include <memory>
 #include <utility>
@@ -25,13 +25,11 @@
 #include <bootloader_message/bootloader_message.h>
 #include <brillo/message_loops/message_loop.h>
 
+#include "update_engine/aosp/dynamic_partition_control_android.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/dynamic_partition_control_android.h"
 
 using std::string;
 
-using android::dm::DmDeviceState;
-using android::hardware::hidl_string;
 using android::hardware::Return;
 using android::hardware::boot::V1_0::BoolResult;
 using android::hardware::boot::V1_0::CommandResult;
@@ -69,7 +67,8 @@
 
   LOG(INFO) << "Loaded boot control hidl hal.";
 
-  dynamic_control_ = std::make_unique<DynamicPartitionControlAndroid>();
+  dynamic_control_ =
+      std::make_unique<DynamicPartitionControlAndroid>(GetCurrentSlot());
 
   return true;
 }
@@ -82,12 +81,24 @@
   return module_->getCurrentSlot();
 }
 
+bool BootControlAndroid::GetPartitionDevice(const std::string& partition_name,
+                                            BootControlInterface::Slot slot,
+                                            bool not_in_payload,
+                                            std::string* device,
+                                            bool* is_dynamic) const {
+  return dynamic_control_->GetPartitionDevice(partition_name,
+                                              slot,
+                                              GetCurrentSlot(),
+                                              not_in_payload,
+                                              device,
+                                              is_dynamic);
+}
 
 bool BootControlAndroid::GetPartitionDevice(const string& partition_name,
-                                            Slot slot,
+                                            BootControlInterface::Slot slot,
                                             string* device) const {
-  return dynamic_control_->GetPartitionDevice(
-      partition_name, slot, GetCurrentSlot(), device);
+  return GetPartitionDevice(
+      partition_name, slot, false /* not_in_payload */, device, nullptr);
 }
 
 bool BootControlAndroid::IsSlotBootable(Slot slot) const {
@@ -171,4 +182,12 @@
   return dynamic_control_.get();
 }
 
+std::optional<PartitionDevice> BootControlAndroid::GetPartitionDevice(
+    const std::string& partition_name,
+    uint32_t slot,
+    uint32_t current_slot,
+    bool not_in_payload) const {
+  return dynamic_control_->GetPartitionDevice(
+      partition_name, slot, current_slot, not_in_payload);
+}
 }  // namespace chromeos_update_engine
diff --git a/boot_control_android.h b/aosp/boot_control_android.h
similarity index 77%
rename from boot_control_android.h
rename to aosp/boot_control_android.h
index 0b042e3..926023a 100644
--- a/boot_control_android.h
+++ b/aosp/boot_control_android.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_BOOT_CONTROL_ANDROID_H_
-#define UPDATE_ENGINE_BOOT_CONTROL_ANDROID_H_
+#ifndef UPDATE_ENGINE_AOSP_BOOT_CONTROL_ANDROID_H_
+#define UPDATE_ENGINE_AOSP_BOOT_CONTROL_ANDROID_H_
 
 #include <map>
 #include <memory>
@@ -24,9 +24,9 @@
 #include <android/hardware/boot/1.0/IBootControl.h>
 #include <liblp/builder.h>
 
+#include "update_engine/aosp/dynamic_partition_control_android.h"
 #include "update_engine/common/boot_control.h"
 #include "update_engine/common/dynamic_partition_control_interface.h"
-#include "update_engine/dynamic_partition_control_android.h"
 
 namespace chromeos_update_engine {
 
@@ -44,6 +44,16 @@
   // BootControlInterface overrides.
   unsigned int GetNumSlots() const override;
   BootControlInterface::Slot GetCurrentSlot() const override;
+  std::optional<PartitionDevice> GetPartitionDevice(
+      const std::string& partition_name,
+      uint32_t slot,
+      uint32_t current_slot,
+      bool not_in_payload = false) const override;
+  bool GetPartitionDevice(const std::string& partition_name,
+                          BootControlInterface::Slot slot,
+                          bool not_in_payload,
+                          std::string* device,
+                          bool* is_dynamic) const override;
   bool GetPartitionDevice(const std::string& partition_name,
                           BootControlInterface::Slot slot,
                           std::string* device) const override;
@@ -65,4 +75,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_BOOT_CONTROL_ANDROID_H_
+#endif  // UPDATE_ENGINE_AOSP_BOOT_CONTROL_ANDROID_H_
diff --git a/cleanup_previous_update_action.cc b/aosp/cleanup_previous_update_action.cc
similarity index 75%
rename from cleanup_previous_update_action.cc
rename to aosp/cleanup_previous_update_action.cc
index 88dbc57..dde6b89 100644
--- a/cleanup_previous_update_action.cc
+++ b/aosp/cleanup_previous_update_action.cc
@@ -13,13 +13,14 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 //
-#include "update_engine/cleanup_previous_update_action.h"
+#include "update_engine/aosp/cleanup_previous_update_action.h"
 
 #include <chrono>  // NOLINT(build/c++11) -- for merge times
 #include <functional>
 #include <string>
 #include <type_traits>
 
+#include <android-base/chrono_utils.h>
 #include <android-base/properties.h>
 #include <base/bind.h>
 
@@ -31,7 +32,7 @@
 #include "update_engine/payload_consumer/delta_performer.h"
 
 using android::base::GetBoolProperty;
-using android::snapshot::SnapshotManager;
+using android::snapshot::ISnapshotManager;
 using android::snapshot::SnapshotMergeStats;
 using android::snapshot::UpdateState;
 using brillo::MessageLoop;
@@ -56,7 +57,7 @@
 CleanupPreviousUpdateAction::CleanupPreviousUpdateAction(
     PrefsInterface* prefs,
     BootControlInterface* boot_control,
-    android::snapshot::SnapshotManager* snapshot,
+    android::snapshot::ISnapshotManager* snapshot,
     CleanupPreviousUpdateActionDelegateInterface* delegate)
     : prefs_(prefs),
       boot_control_(boot_control),
@@ -65,32 +66,30 @@
       running_(false),
       cancel_failed_(false),
       last_percentage_(0),
-      merge_stats_(SnapshotMergeStats::GetInstance(*snapshot)) {}
+      merge_stats_(nullptr) {}
+
+CleanupPreviousUpdateAction::~CleanupPreviousUpdateAction() {
+  StopActionInternal();
+}
 
 void CleanupPreviousUpdateAction::PerformAction() {
-  ResumeAction();
+  StartActionInternal();
 }
 
 void CleanupPreviousUpdateAction::TerminateProcessing() {
-  SuspendAction();
+  StopActionInternal();
 }
 
 void CleanupPreviousUpdateAction::ResumeAction() {
-  CHECK(prefs_);
-  CHECK(boot_control_);
-
-  LOG(INFO) << "Starting/resuming CleanupPreviousUpdateAction";
-  running_ = true;
   StartActionInternal();
 }
 
 void CleanupPreviousUpdateAction::SuspendAction() {
-  LOG(INFO) << "Stopping/suspending CleanupPreviousUpdateAction";
-  running_ = false;
+  StopActionInternal();
 }
 
 void CleanupPreviousUpdateAction::ActionCompleted(ErrorCode error_code) {
-  running_ = false;
+  StopActionInternal();
   ReportMergeStats();
   metadata_device_ = nullptr;
 }
@@ -103,7 +102,52 @@
   return "CleanupPreviousUpdateAction";
 }
 
+// This function is called at the beginning of all delayed functions. By
+// resetting |scheduled_task_|, the delayed function acknowledges that the task
+// has already been executed, therefore there's no need to cancel it in the
+// future. This avoids StopActionInternal() from resetting task IDs in an
+// unexpected way because task IDs could be reused.
+void CleanupPreviousUpdateAction::AcknowledgeTaskExecuted() {
+  if (scheduled_task_ != MessageLoop::kTaskIdNull) {
+    LOG(INFO) << "Executing task " << scheduled_task_;
+  }
+  scheduled_task_ = MessageLoop::kTaskIdNull;
+}
+
+// Check that scheduled_task_ is a valid task ID. Otherwise, terminate the
+// action.
+void CleanupPreviousUpdateAction::CheckTaskScheduled(std::string_view name) {
+  if (scheduled_task_ == MessageLoop::kTaskIdNull) {
+    LOG(ERROR) << "Unable to schedule " << name;
+    processor_->ActionComplete(this, ErrorCode::kError);
+  } else {
+    LOG(INFO) << "CleanupPreviousUpdateAction scheduled task ID "
+              << scheduled_task_ << " for " << name;
+  }
+}
+
+void CleanupPreviousUpdateAction::StopActionInternal() {
+  LOG(INFO) << "Stopping/suspending/completing CleanupPreviousUpdateAction";
+  running_ = false;
+
+  if (scheduled_task_ != MessageLoop::kTaskIdNull) {
+    if (MessageLoop::current()->CancelTask(scheduled_task_)) {
+      LOG(INFO) << "CleanupPreviousUpdateAction cancelled pending task ID "
+                << scheduled_task_;
+    } else {
+      LOG(ERROR) << "CleanupPreviousUpdateAction unable to cancel task ID "
+                 << scheduled_task_;
+    }
+  }
+  scheduled_task_ = MessageLoop::kTaskIdNull;
+}
+
 void CleanupPreviousUpdateAction::StartActionInternal() {
+  CHECK(prefs_);
+  CHECK(boot_control_);
+
+  LOG(INFO) << "Starting/resuming CleanupPreviousUpdateAction";
+  running_ = true;
   // Do nothing on non-VAB device.
   if (!boot_control_->GetDynamicPartitionControl()
            ->GetVirtualAbFeatureFlag()
@@ -111,21 +155,25 @@
     processor_->ActionComplete(this, ErrorCode::kSuccess);
     return;
   }
-  // SnapshotManager is only available on VAB devices.
-  CHECK(snapshot_);
+  // SnapshotManager must be available on VAB devices.
+  CHECK(snapshot_ != nullptr);
+  merge_stats_ = snapshot_->GetSnapshotMergeStatsInstance();
+  CHECK(merge_stats_ != nullptr);
   WaitBootCompletedOrSchedule();
 }
 
 void CleanupPreviousUpdateAction::ScheduleWaitBootCompleted() {
   TEST_AND_RETURN(running_);
-  MessageLoop::current()->PostDelayedTask(
+  scheduled_task_ = MessageLoop::current()->PostDelayedTask(
       FROM_HERE,
       base::Bind(&CleanupPreviousUpdateAction::WaitBootCompletedOrSchedule,
                  base::Unretained(this)),
       kCheckBootCompletedInterval);
+  CheckTaskScheduled("WaitBootCompleted");
 }
 
 void CleanupPreviousUpdateAction::WaitBootCompletedOrSchedule() {
+  AcknowledgeTaskExecuted();
   TEST_AND_RETURN(running_);
   if (!kIsRecovery &&
       !android::base::GetBoolProperty(kBootCompletedProp, false)) {
@@ -134,25 +182,32 @@
     return;
   }
 
+  auto boot_time = std::chrono::duration_cast<std::chrono::milliseconds>(
+      android::base::boot_clock::now().time_since_epoch());
+  merge_stats_->set_boot_complete_time_ms(boot_time.count());
+
   LOG(INFO) << "Boot completed, waiting on markBootSuccessful()";
   CheckSlotMarkedSuccessfulOrSchedule();
 }
 
 void CleanupPreviousUpdateAction::ScheduleWaitMarkBootSuccessful() {
   TEST_AND_RETURN(running_);
-  MessageLoop::current()->PostDelayedTask(
+  scheduled_task_ = MessageLoop::current()->PostDelayedTask(
       FROM_HERE,
       base::Bind(
           &CleanupPreviousUpdateAction::CheckSlotMarkedSuccessfulOrSchedule,
           base::Unretained(this)),
       kCheckSlotMarkedSuccessfulInterval);
+  CheckTaskScheduled("WaitMarkBootSuccessful");
 }
 
 void CleanupPreviousUpdateAction::CheckSlotMarkedSuccessfulOrSchedule() {
+  AcknowledgeTaskExecuted();
   TEST_AND_RETURN(running_);
   if (!kIsRecovery &&
       !boot_control_->IsSlotMarkedSuccessful(boot_control_->GetCurrentSlot())) {
     ScheduleWaitMarkBootSuccessful();
+    return;
   }
 
   if (metadata_device_ == nullptr) {
@@ -210,19 +265,22 @@
 
 void CleanupPreviousUpdateAction::ScheduleWaitForMerge() {
   TEST_AND_RETURN(running_);
-  MessageLoop::current()->PostDelayedTask(
+  scheduled_task_ = MessageLoop::current()->PostDelayedTask(
       FROM_HERE,
       base::Bind(&CleanupPreviousUpdateAction::WaitForMergeOrSchedule,
                  base::Unretained(this)),
       kWaitForMergeInterval);
+  CheckTaskScheduled("WaitForMerge");
 }
 
 void CleanupPreviousUpdateAction::WaitForMergeOrSchedule() {
+  AcknowledgeTaskExecuted();
   TEST_AND_RETURN(running_);
+  auto update_uses_compression = snapshot_->UpdateUsesCompression();
   auto state = snapshot_->ProcessUpdateState(
       std::bind(&CleanupPreviousUpdateAction::OnMergePercentageUpdate, this),
       std::bind(&CleanupPreviousUpdateAction::BeforeCancel, this));
-  merge_stats_->set_state(state);
+  merge_stats_->set_state(state, update_uses_compression);
 
   switch (state) {
     case UpdateState::None: {
@@ -266,6 +324,7 @@
 
     case UpdateState::MergeFailed: {
       LOG(ERROR) << "Merge failed. Device may be corrupted.";
+      merge_stats_->set_merge_failure_code(snapshot_->ReadMergeFailureCode());
       processor_->ActionComplete(this, ErrorCode::kDeviceCorrupted);
       return;
     }
@@ -343,16 +402,21 @@
     return;
   }
 
-  uint64_t cow_file_size;
-  if (snapshot_->InitiateMerge(&cow_file_size)) {
-    merge_stats_->set_cow_file_size(cow_file_size);
+  snapshot_->UpdateCowStats(merge_stats_);
+
+  auto merge_start_time = std::chrono::duration_cast<std::chrono::milliseconds>(
+      android::base::boot_clock::now().time_since_epoch());
+  merge_stats_->set_boot_complete_to_merge_start_time_ms(
+      merge_start_time.count() - merge_stats_->boot_complete_time_ms());
+
+  if (snapshot_->InitiateMerge()) {
     WaitForMergeOrSchedule();
     return;
   }
 
   LOG(WARNING) << "InitiateMerge failed.";
   auto state = snapshot_->GetUpdateState();
-  merge_stats_->set_state(state);
+  merge_stats_->set_state(state, snapshot_->UpdateUsesCompression());
   if (state == UpdateState::Unverified) {
     // We are stuck at unverified state. This can happen if the update has
     // been applied, but it has not even been attempted yet (in libsnapshot,
@@ -405,6 +469,13 @@
   bool vab_retrofit = boot_control_->GetDynamicPartitionControl()
                           ->GetVirtualAbFeatureFlag()
                           .IsRetrofit();
+  bool vab_compression_enabled = boot_control_->GetDynamicPartitionControl()
+                                     ->GetVirtualAbCompressionFeatureFlag()
+                                     .IsEnabled();
+  // The snapshot has been merged, so we can no longer call
+  // DynamicPartitionControlInterface::UpdateUsesSnapshotCompression.
+  // However, we have saved the flag in the snapshot report.
+  bool vab_compression_used = report.compression_enabled();
 
   LOG(INFO) << "Reporting merge stats: "
             << android::snapshot::UpdateState_Name(report.state()) << " in "
@@ -416,7 +487,14 @@
                              static_cast<int64_t>(passed_ms.count()),
                              static_cast<int32_t>(report.resume_count()),
                              vab_retrofit,
-                             static_cast<int64_t>(report.cow_file_size()));
+                             static_cast<int64_t>(report.cow_file_size()),
+                             vab_compression_enabled,
+                             vab_compression_used,
+                             report.total_cow_size_bytes(),
+                             report.estimated_cow_size_bytes(),
+                             report.boot_complete_time_ms(),
+                             report.boot_complete_to_merge_start_time_ms(),
+                             static_cast<int32_t>(report.merge_failure_code()));
 #endif
 }
 
diff --git a/cleanup_previous_update_action.h b/aosp/cleanup_previous_update_action.h
similarity index 81%
rename from cleanup_previous_update_action.h
rename to aosp/cleanup_previous_update_action.h
index 91e08b0..b93c557 100644
--- a/cleanup_previous_update_action.h
+++ b/aosp/cleanup_previous_update_action.h
@@ -14,12 +14,13 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_H_
-#define UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_H_
+#ifndef UPDATE_ENGINE_AOSP_CLEANUP_PREVIOUS_UPDATE_ACTION_H_
+#define UPDATE_ENGINE_AOSP_CLEANUP_PREVIOUS_UPDATE_ACTION_H_
 
 #include <chrono>  // NOLINT(build/c++11) -- for merge times
 #include <memory>
 #include <string>
+#include <string_view>
 
 #include <brillo/message_loops/message_loop.h>
 #include <libsnapshot/snapshot.h>
@@ -49,8 +50,9 @@
   CleanupPreviousUpdateAction(
       PrefsInterface* prefs,
       BootControlInterface* boot_control,
-      android::snapshot::SnapshotManager* snapshot,
+      android::snapshot::ISnapshotManager* snapshot,
       CleanupPreviousUpdateActionDelegateInterface* delegate);
+  ~CleanupPreviousUpdateAction();
 
   void PerformAction() override;
   void SuspendAction() override;
@@ -67,14 +69,20 @@
  private:
   PrefsInterface* prefs_;
   BootControlInterface* boot_control_;
-  android::snapshot::SnapshotManager* snapshot_;
+  android::snapshot::ISnapshotManager* snapshot_;
   CleanupPreviousUpdateActionDelegateInterface* delegate_;
   std::unique_ptr<android::snapshot::AutoDevice> metadata_device_;
   bool running_{false};
   bool cancel_failed_{false};
   unsigned int last_percentage_{0};
-  android::snapshot::SnapshotMergeStats* merge_stats_;
+  android::snapshot::ISnapshotMergeStats* merge_stats_;
+  brillo::MessageLoop::TaskId scheduled_task_{brillo::MessageLoop::kTaskIdNull};
 
+  // Helpers for task management.
+  void AcknowledgeTaskExecuted();
+  void CheckTaskScheduled(std::string_view name);
+
+  void StopActionInternal();
   void StartActionInternal();
   void ScheduleWaitBootCompleted();
   void WaitBootCompletedOrSchedule();
@@ -92,4 +100,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_H_
+#endif  // UPDATE_ENGINE_AOSP_CLEANUP_PREVIOUS_UPDATE_ACTION_H_
diff --git a/aosp/cleanup_previous_update_action_unittest.cc b/aosp/cleanup_previous_update_action_unittest.cc
new file mode 100644
index 0000000..0d2b4e6
--- /dev/null
+++ b/aosp/cleanup_previous_update_action_unittest.cc
@@ -0,0 +1,174 @@
+//
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <algorithm>
+
+#include <brillo/message_loops/fake_message_loop.h>
+#include <gtest/gtest.h>
+#include <libsnapshot/snapshot.h>
+#include <libsnapshot/mock_snapshot.h>
+#include <libsnapshot/mock_snapshot_merge_stats.h>
+
+#include "update_engine/aosp/cleanup_previous_update_action.h"
+#include "update_engine/common/mock_boot_control.h"
+#include "update_engine/common/mock_dynamic_partition_control.h"
+#include "update_engine/common/mock_prefs.h"
+
+namespace chromeos_update_engine {
+
+using android::snapshot::AutoDevice;
+using android::snapshot::MockSnapshotManager;
+using android::snapshot::MockSnapshotMergeStats;
+using android::snapshot::UpdateState;
+using testing::_;
+using testing::AtLeast;
+using testing::Return;
+
+class MockCleanupPreviousUpdateActionDelegate final
+    : public CleanupPreviousUpdateActionDelegateInterface {
+  MOCK_METHOD(void, OnCleanupProgressUpdate, (double), (override));
+};
+
+class MockActionProcessor : public ActionProcessor {
+ public:
+  MOCK_METHOD(void, ActionComplete, (AbstractAction*, ErrorCode), (override));
+};
+
+class MockAutoDevice : public AutoDevice {
+ public:
+  explicit MockAutoDevice(std::string name) : AutoDevice(name) {}
+  ~MockAutoDevice() = default;
+};
+
+class CleanupPreviousUpdateActionTest : public ::testing::Test {
+ public:
+  void SetUp() override {
+    ON_CALL(boot_control_, GetDynamicPartitionControl())
+        .WillByDefault(Return(&dynamic_control_));
+    ON_CALL(boot_control_, GetCurrentSlot()).WillByDefault(Return(0));
+    ON_CALL(mock_snapshot_, GetSnapshotMergeStatsInstance())
+        .WillByDefault(Return(&mock_stats_));
+    action_.SetProcessor(&mock_processor_);
+    loop_.SetAsCurrent();
+  }
+
+  constexpr static FeatureFlag LAUNCH{FeatureFlag::Value::LAUNCH};
+  constexpr static FeatureFlag NONE{FeatureFlag::Value::NONE};
+  MockSnapshotManager mock_snapshot_;
+  MockPrefs mock_prefs_;
+  MockBootControl boot_control_;
+  MockDynamicPartitionControl dynamic_control_{};
+  MockCleanupPreviousUpdateActionDelegate mock_delegate_;
+  MockSnapshotMergeStats mock_stats_;
+  MockActionProcessor mock_processor_;
+  brillo::FakeMessageLoop loop_{nullptr};
+  CleanupPreviousUpdateAction action_{
+      &mock_prefs_, &boot_control_, &mock_snapshot_, &mock_delegate_};
+};
+
+TEST_F(CleanupPreviousUpdateActionTest, NonVabTest) {
+  // Since VAB isn't even enabled, |GetSnapshotMergeStatsInstance| shouldn't be
+  // called at all
+  EXPECT_CALL(mock_snapshot_, GetSnapshotMergeStatsInstance()).Times(0);
+  EXPECT_CALL(dynamic_control_, GetVirtualAbFeatureFlag())
+      .Times(AtLeast(1))
+      .WillRepeatedly(Return(NONE));
+  action_.PerformAction();
+}
+
+TEST_F(CleanupPreviousUpdateActionTest, VABSlotSuccessful) {
+  // Expectaion: if VABC is enabled, Clenup action should call
+  // |SnapshotMergeStats::Start()| to start merge, and wait for it to finish
+  EXPECT_CALL(mock_snapshot_, GetSnapshotMergeStatsInstance())
+      .Times(AtLeast(1));
+  EXPECT_CALL(mock_snapshot_, EnsureMetadataMounted())
+      .Times(AtLeast(1))
+      .WillRepeatedly(
+          []() { return std::make_unique<MockAutoDevice>("mock_device"); });
+  EXPECT_CALL(dynamic_control_, GetVirtualAbFeatureFlag())
+      .Times(AtLeast(1))
+      .WillRepeatedly(Return(LAUNCH));
+  // CleanupPreviousUpdateAction should use whatever slot returned by
+  // |GetCurrentSlot()|
+  EXPECT_CALL(boot_control_, GetCurrentSlot())
+      .Times(AtLeast(1))
+      .WillRepeatedly(Return(1));
+  EXPECT_CALL(boot_control_, IsSlotMarkedSuccessful(1))
+      .Times(AtLeast(1))
+      .WillRepeatedly(Return(true));
+  EXPECT_CALL(mock_snapshot_, ProcessUpdateState(_, _))
+      .Times(AtLeast(2))
+      .WillOnce(Return(UpdateState::Merging))
+      .WillRepeatedly(Return(UpdateState::MergeCompleted));
+  EXPECT_CALL(mock_stats_, Start())
+      .Times(AtLeast(1))
+      .WillRepeatedly(Return(true));
+  EXPECT_CALL(mock_processor_, ActionComplete(&action_, ErrorCode::kSuccess))
+      .Times(1);
+  action_.PerformAction();
+  while (loop_.PendingTasks()) {
+    ASSERT_TRUE(loop_.RunOnce(true));
+  }
+}
+
+TEST_F(CleanupPreviousUpdateActionTest, VabSlotNotReady) {
+  // Cleanup action should repeatly query boot control until the slot is marked
+  // successful.
+  static constexpr auto MAX_TIMEPOINT =
+      std::chrono::steady_clock::time_point::max();
+  EXPECT_CALL(mock_snapshot_, GetSnapshotMergeStatsInstance())
+      .Times(AtLeast(1));
+  EXPECT_CALL(mock_snapshot_, EnsureMetadataMounted())
+      .Times(AtLeast(1))
+      .WillRepeatedly(
+          []() { return std::make_unique<MockAutoDevice>("mock_device"); });
+  EXPECT_CALL(dynamic_control_, GetVirtualAbFeatureFlag())
+      .Times(AtLeast(1))
+      .WillRepeatedly(Return(LAUNCH));
+  auto slot_success_time = MAX_TIMEPOINT;
+  auto merge_start_time = MAX_TIMEPOINT;
+  EXPECT_CALL(boot_control_, IsSlotMarkedSuccessful(_))
+      .Times(AtLeast(3))
+      .WillOnce(Return(false))
+      .WillOnce(Return(false))
+      .WillOnce([&slot_success_time]() {
+        slot_success_time =
+            std::min(slot_success_time, std::chrono::steady_clock::now());
+        return true;
+      });
+
+  EXPECT_CALL(mock_stats_, Start())
+      .Times(1)
+      .WillRepeatedly([&merge_start_time]() {
+        merge_start_time =
+            std::min(merge_start_time, std::chrono::steady_clock::now());
+        return true;
+      });
+
+  EXPECT_CALL(mock_snapshot_, ProcessUpdateState(_, _))
+      .Times(AtLeast(1))
+      .WillRepeatedly(Return(UpdateState::MergeCompleted));
+  EXPECT_CALL(mock_processor_, ActionComplete(&action_, ErrorCode::kSuccess))
+      .Times(1);
+  action_.PerformAction();
+  while (loop_.PendingTasks()) {
+    ASSERT_TRUE(loop_.RunOnce(true));
+  }
+  ASSERT_LT(slot_success_time, merge_start_time)
+      << "Merge should not be started until slot is marked successful";
+}
+
+}  // namespace chromeos_update_engine
diff --git a/aosp/cow_converter.cc b/aosp/cow_converter.cc
new file mode 100644
index 0000000..8c641b8
--- /dev/null
+++ b/aosp/cow_converter.cc
@@ -0,0 +1,131 @@
+//
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <stdio.h>
+#include <string.h>
+
+#include <cstdint>
+#include <cstdio>
+#include <memory>
+
+#include <sys/fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+
+#include <base/files/file_path.h>
+#include <libsnapshot/cow_writer.h>
+
+#include "update_engine/common/cow_operation_convert.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
+#include "update_engine/payload_consumer/payload_metadata.h"
+#include "update_engine/payload_generator/cow_size_estimator.h"
+#include "update_engine/update_metadata.pb.h"
+
+using android::snapshot::CowWriter;
+
+namespace chromeos_update_engine {
+
+bool ProcessPartition(const chromeos_update_engine::PartitionUpdate& partition,
+                      const char* image_dir,
+                      size_t block_size) {
+  base::FilePath img_dir{image_dir};
+  auto target_img = img_dir.Append(partition.partition_name() + ".img");
+  auto output_cow = img_dir.Append(partition.partition_name() + ".cow");
+  FileDescriptorPtr target_img_fd = std::make_shared<EintrSafeFileDescriptor>();
+  if (!target_img_fd->Open(target_img.value().c_str(), O_RDONLY)) {
+    PLOG(ERROR) << "Failed to open " << target_img.value();
+    return false;
+  }
+  android::base::unique_fd output_fd{
+      open(output_cow.value().c_str(), O_RDWR | O_CREAT, 0744)};
+  if (output_fd < 0) {
+    PLOG(ERROR) << "Failed to open " << output_cow.value();
+    return false;
+  }
+
+  android::snapshot::CowWriter cow_writer{
+      {.block_size = static_cast<uint32_t>(block_size), .compression = "gz"}};
+  TEST_AND_RETURN_FALSE(cow_writer.Initialize(output_fd));
+  TEST_AND_RETURN_FALSE(CowDryRun(target_img_fd,
+                                  partition.operations(),
+                                  partition.merge_operations(),
+                                  block_size,
+                                  &cow_writer));
+  TEST_AND_RETURN_FALSE(cow_writer.Finalize());
+  return true;
+}
+
+}  // namespace chromeos_update_engine
+
+using chromeos_update_engine::MetadataParseResult;
+using chromeos_update_engine::PayloadMetadata;
+
+int main(int argc, const char* argv[]) {
+  if (argc != 3) {
+    printf("Usage: %s <payload.bin> <extracted target_file>\n", argv[0]);
+    return -1;
+  }
+  const char* payload_path = argv[1];
+  const char* images_dir = argv[2];
+  int payload_fd = open(payload_path, O_RDONLY);
+  if (payload_fd < 0) {
+    PLOG(ERROR) << "Failed to open payload file:";
+    return 1;
+  }
+  chromeos_update_engine::ScopedFdCloser closer{&payload_fd};
+  auto payload_size = chromeos_update_engine::utils::FileSize(payload_fd);
+  if (payload_size <= 0) {
+    PLOG(ERROR)
+        << "Couldn't determine size of payload file, or payload file is empty";
+    return 2;
+  }
+
+  PayloadMetadata payload_metadata;
+  auto payload = static_cast<unsigned char*>(
+      mmap(nullptr, payload_size, PROT_READ, MAP_PRIVATE, payload_fd, 0));
+
+  // C++ dark magic to ensure that |payload| is properly deallocated once the
+  // program exits.
+  auto munmap_deleter = [payload_size](auto payload) {
+    munmap(payload, payload_size);
+  };
+  std::unique_ptr<unsigned char, decltype(munmap_deleter)> munmapper{
+      payload, munmap_deleter};
+
+  if (payload == nullptr) {
+    PLOG(ERROR) << "Failed to mmap() payload file";
+    return 3;
+  }
+  if (payload_metadata.ParsePayloadHeader(payload, payload_size, nullptr) !=
+      chromeos_update_engine::MetadataParseResult::kSuccess) {
+    LOG(ERROR) << "Payload header parse failed!";
+    return 4;
+  }
+  chromeos_update_engine::DeltaArchiveManifest manifest;
+  if (!payload_metadata.GetManifest(payload, payload_size, &manifest)) {
+    LOG(ERROR) << "Failed to parse manifest!";
+    return 5;
+  }
+
+  for (const auto& partition : manifest.partitions()) {
+    LOG(INFO) << partition.partition_name();
+    if (!ProcessPartition(partition, images_dir, manifest.block_size())) {
+      return 6;
+    }
+  }
+  return 0;
+}
diff --git a/aosp/daemon_android.cc b/aosp/daemon_android.cc
new file mode 100644
index 0000000..c102e3b
--- /dev/null
+++ b/aosp/daemon_android.cc
@@ -0,0 +1,74 @@
+//
+// Copyright (C) 2015 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/aosp/daemon_android.h"
+
+#include <sysexits.h>
+
+#include <binderwrapper/binder_wrapper.h>
+
+#include "update_engine/aosp/daemon_state_android.h"
+
+using std::unique_ptr;
+
+namespace chromeos_update_engine {
+
+unique_ptr<DaemonBase> DaemonBase::CreateInstance() {
+  return std::make_unique<DaemonAndroid>();
+}
+
+int DaemonAndroid::OnInit() {
+  // Register the |subprocess_| singleton with this Daemon as the signal
+  // handler.
+  subprocess_.Init(this);
+
+  int exit_code = brillo::Daemon::OnInit();
+  if (exit_code != EX_OK)
+    return exit_code;
+
+  android::BinderWrapper::Create();
+  binder_watcher_.Init();
+
+  DaemonStateAndroid* daemon_state_android = new DaemonStateAndroid();
+  daemon_state_.reset(daemon_state_android);
+  LOG_IF(ERROR, !daemon_state_android->Initialize())
+      << "Failed to initialize system state.";
+
+  auto binder_wrapper = android::BinderWrapper::Get();
+
+  // Create the Binder Service.
+  binder_service_ = new BinderUpdateEngineAndroidService{
+      daemon_state_android->service_delegate()};
+  if (!binder_wrapper->RegisterService(binder_service_->ServiceName(),
+                                       binder_service_)) {
+    LOG(ERROR) << "Failed to register binder service.";
+  }
+  daemon_state_->AddObserver(binder_service_.get());
+
+  // Create the stable binder service.
+  stable_binder_service_ = new BinderUpdateEngineAndroidStableService{
+      daemon_state_android->service_delegate()};
+  if (!binder_wrapper->RegisterService(stable_binder_service_->ServiceName(),
+                                       stable_binder_service_)) {
+    LOG(ERROR) << "Failed to register stable binder service.";
+  }
+  daemon_state_->AddObserver(stable_binder_service_.get());
+
+  daemon_state_->StartUpdater();
+  return EX_OK;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/aosp/daemon_android.h b/aosp/daemon_android.h
new file mode 100644
index 0000000..38a8689
--- /dev/null
+++ b/aosp/daemon_android.h
@@ -0,0 +1,58 @@
+//
+// Copyright (C) 2015 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_AOSP_DAEMON_ANDROID_H_
+#define UPDATE_ENGINE_AOSP_DAEMON_ANDROID_H_
+
+#include <memory>
+
+#include <brillo/binder_watcher.h>
+
+#include "update_engine/aosp/binder_service_android.h"
+#include "update_engine/aosp/binder_service_stable_android.h"
+#include "update_engine/common/daemon_base.h"
+#include "update_engine/common/daemon_state_interface.h"
+#include "update_engine/common/subprocess.h"
+
+namespace chromeos_update_engine {
+
+class DaemonAndroid : public DaemonBase {
+ public:
+  DaemonAndroid() = default;
+
+ protected:
+  int OnInit() override;
+
+ private:
+  // The Subprocess singleton class requires a |brillo::MessageLoop| in the
+  // current thread, so we need to initialize it from this class instead of
+  // the main() function.
+  Subprocess subprocess_;
+
+  brillo::BinderWatcher binder_watcher_;
+  android::sp<BinderUpdateEngineAndroidService> binder_service_;
+  android::sp<BinderUpdateEngineAndroidStableService> stable_binder_service_;
+
+  // The daemon state with all the required daemon classes for the configured
+  // platform.
+  std::unique_ptr<DaemonStateInterface> daemon_state_;
+
+  DISALLOW_COPY_AND_ASSIGN(DaemonAndroid);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_AOSP_DAEMON_ANDROID_H_
diff --git a/daemon_state_android.cc b/aosp/daemon_state_android.cc
similarity index 74%
rename from daemon_state_android.cc
rename to aosp/daemon_state_android.cc
index c9c09b8..da49080 100644
--- a/daemon_state_android.cc
+++ b/aosp/daemon_state_android.cc
@@ -14,15 +14,16 @@
 // limitations under the License.
 //
 
-#include "update_engine/daemon_state_android.h"
+#include "update_engine/aosp/daemon_state_android.h"
 
 #include <base/logging.h>
 
+#include "update_engine/aosp/apex_handler_android.h"
+#include "update_engine/aosp/update_attempter_android.h"
 #include "update_engine/common/boot_control.h"
 #include "update_engine/common/boot_control_stub.h"
 #include "update_engine/common/hardware.h"
 #include "update_engine/common/prefs.h"
-#include "update_engine/update_attempter_android.h"
 
 namespace chromeos_update_engine {
 
@@ -45,17 +46,17 @@
 
   // Initialize prefs.
   base::FilePath non_volatile_path;
-  // TODO(deymo): Fall back to in-memory prefs if there's no physical directory
-  // available.
   if (!hardware_->GetNonVolatileDirectory(&non_volatile_path)) {
-    LOG(ERROR) << "Failed to get a non-volatile directory.";
-    return false;
-  }
-  Prefs* prefs = new Prefs();
-  prefs_.reset(prefs);
-  if (!prefs->Init(non_volatile_path.Append(kPrefsSubDirectory))) {
-    LOG(ERROR) << "Failed to initialize preferences.";
-    return false;
+    prefs_.reset(new MemoryPrefs());
+    LOG(WARNING)
+        << "Could not get a non-volatile directory, fall back to memory prefs";
+  } else {
+    Prefs* prefs = new Prefs();
+    prefs_.reset(prefs);
+    if (!prefs->Init(non_volatile_path.Append(kPrefsSubDirectory))) {
+      LOG(ERROR) << "Failed to initialize preferences.";
+      return false;
+    }
   }
 
   // The CertificateChecker singleton is used by the update attempter.
@@ -64,8 +65,11 @@
   certificate_checker_->Init();
 
   // Initialize the UpdateAttempter before the UpdateManager.
-  update_attempter_.reset(new UpdateAttempterAndroid(
-      this, prefs_.get(), boot_control_.get(), hardware_.get()));
+  update_attempter_.reset(new UpdateAttempterAndroid(this,
+                                                     prefs_.get(),
+                                                     boot_control_.get(),
+                                                     hardware_.get(),
+                                                     CreateApexHandler()));
 
   return true;
 }
diff --git a/daemon_state_android.h b/aosp/daemon_state_android.h
similarity index 84%
rename from daemon_state_android.h
rename to aosp/daemon_state_android.h
index 928a14e..dea3a23 100644
--- a/daemon_state_android.h
+++ b/aosp/daemon_state_android.h
@@ -14,20 +14,20 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_DAEMON_STATE_ANDROID_H_
-#define UPDATE_ENGINE_DAEMON_STATE_ANDROID_H_
+#ifndef UPDATE_ENGINE_AOSP_DAEMON_STATE_ANDROID_H_
+#define UPDATE_ENGINE_AOSP_DAEMON_STATE_ANDROID_H_
 
 #include <memory>
 #include <set>
 
+#include "update_engine/aosp/service_delegate_android_interface.h"
+#include "update_engine/aosp/update_attempter_android.h"
 #include "update_engine/certificate_checker.h"
 #include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/daemon_state_interface.h"
 #include "update_engine/common/hardware_interface.h"
 #include "update_engine/common/prefs_interface.h"
-#include "update_engine/daemon_state_interface.h"
-#include "update_engine/service_delegate_android_interface.h"
-#include "update_engine/service_observer_interface.h"
-#include "update_engine/update_attempter_android.h"
+#include "update_engine/common/service_observer_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -73,4 +73,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_DAEMON_STATE_ANDROID_H_
+#endif  // UPDATE_ENGINE_AOSP_DAEMON_STATE_ANDROID_H_
diff --git a/dynamic_partition_control_android.cc b/aosp/dynamic_partition_control_android.cc
similarity index 67%
rename from dynamic_partition_control_android.cc
rename to aosp/dynamic_partition_control_android.cc
index ecd6252..538b57c 100644
--- a/dynamic_partition_control_android.cc
+++ b/aosp/dynamic_partition_control_android.cc
@@ -14,13 +14,17 @@
 // limitations under the License.
 //
 
-#include "update_engine/dynamic_partition_control_android.h"
+#include "update_engine/aosp/dynamic_partition_control_android.h"
 
+#include <algorithm>
 #include <chrono>  // NOLINT(build/c++11) - using libsnapshot / liblp API
+#include <cstdint>
 #include <map>
 #include <memory>
 #include <set>
 #include <string>
+#include <string_view>
+#include <utility>
 #include <vector>
 
 #include <android-base/properties.h>
@@ -28,18 +32,25 @@
 #include <base/files/file_util.h>
 #include <base/logging.h>
 #include <base/strings/string_util.h>
+#include <base/strings/stringprintf.h>
 #include <bootloader_message/bootloader_message.h>
 #include <fs_mgr.h>
 #include <fs_mgr_dm_linear.h>
 #include <fs_mgr_overlayfs.h>
 #include <libavb/libavb.h>
 #include <libdm/dm.h>
+#include <liblp/liblp.h>
+#include <libsnapshot/cow_writer.h>
 #include <libsnapshot/snapshot.h>
+#include <libsnapshot/snapshot_stub.h>
 
-#include "update_engine/cleanup_previous_update_action.h"
+#include "update_engine/aosp/cleanup_previous_update_action.h"
+#include "update_engine/aosp/dynamic_partition_utils.h"
 #include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/dynamic_partition_control_interface.h"
+#include "update_engine/common/platform_constants.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/dynamic_partition_utils.h"
+#include "update_engine/payload_consumer/cow_writer_file_descriptor.h"
 #include "update_engine/payload_consumer/delta_performer.h"
 
 using android::base::GetBoolProperty;
@@ -58,7 +69,9 @@
 using android::snapshot::OptimizeSourceCopyOperation;
 using android::snapshot::Return;
 using android::snapshot::SnapshotManager;
+using android::snapshot::SnapshotManagerStub;
 using android::snapshot::UpdateState;
+using base::StringPrintf;
 
 namespace chromeos_update_engine {
 
@@ -67,6 +80,14 @@
     "ro.boot.dynamic_partitions_retrofit";
 constexpr char kVirtualAbEnabled[] = "ro.virtual_ab.enabled";
 constexpr char kVirtualAbRetrofit[] = "ro.virtual_ab.retrofit";
+constexpr char kVirtualAbCompressionEnabled[] =
+    "ro.virtual_ab.compression.enabled";
+
+// Currently, android doesn't have a retrofit prop for VAB Compression. However,
+// struct FeatureFlag forces us to determine if a feature is 'retrofit'. So this
+// is here just to simplify code. Replace it with real retrofit prop name once
+// there is one.
+constexpr char kVirtualAbCompressionRetrofit[] = "";
 constexpr char kPostinstallFstabPrefix[] = "ro.postinstall.fstab.prefix";
 // Map timeout for dynamic partitions.
 constexpr std::chrono::milliseconds kMapTimeout{1000};
@@ -74,19 +95,15 @@
 // needs to be mapped, this timeout is longer than |kMapTimeout|.
 constexpr std::chrono::milliseconds kMapSnapshotTimeout{5000};
 
-#ifdef __ANDROID_RECOVERY__
-constexpr bool kIsRecovery = true;
-#else
-constexpr bool kIsRecovery = false;
-#endif
-
 DynamicPartitionControlAndroid::~DynamicPartitionControlAndroid() {
   Cleanup();
 }
 
 static FeatureFlag GetFeatureFlag(const char* enable_prop,
                                   const char* retrofit_prop) {
-  bool retrofit = GetBoolProperty(retrofit_prop, false);
+  // Default retrofit to false if retrofit_prop is empty.
+  bool retrofit = retrofit_prop && retrofit_prop[0] != '\0' &&
+                  GetBoolProperty(retrofit_prop, false);
   bool enabled = GetBoolProperty(enable_prop, false);
   if (retrofit && !enabled) {
     LOG(ERROR) << retrofit_prop << " is true but " << enable_prop
@@ -102,14 +119,20 @@
   return FeatureFlag(FeatureFlag::Value::NONE);
 }
 
-DynamicPartitionControlAndroid::DynamicPartitionControlAndroid()
+DynamicPartitionControlAndroid::DynamicPartitionControlAndroid(
+    uint32_t source_slot)
     : dynamic_partitions_(
           GetFeatureFlag(kUseDynamicPartitions, kRetrfoitDynamicPartitions)),
-      virtual_ab_(GetFeatureFlag(kVirtualAbEnabled, kVirtualAbRetrofit)) {
+      virtual_ab_(GetFeatureFlag(kVirtualAbEnabled, kVirtualAbRetrofit)),
+      virtual_ab_compression_(GetFeatureFlag(kVirtualAbCompressionEnabled,
+                                             kVirtualAbCompressionRetrofit)),
+      source_slot_(source_slot) {
   if (GetVirtualAbFeatureFlag().IsEnabled()) {
     snapshot_ = SnapshotManager::New();
-    CHECK(snapshot_ != nullptr) << "Cannot initialize SnapshotManager.";
+  } else {
+    snapshot_ = SnapshotManagerStub::New();
   }
+  CHECK(snapshot_ != nullptr) << "Cannot initialize SnapshotManager.";
 }
 
 FeatureFlag DynamicPartitionControlAndroid::GetDynamicPartitionsFeatureFlag() {
@@ -120,6 +143,15 @@
   return virtual_ab_;
 }
 
+FeatureFlag
+DynamicPartitionControlAndroid::GetVirtualAbCompressionFeatureFlag() {
+  if constexpr (constants::kIsRecovery) {
+    // Don't attempt VABC in recovery
+    return FeatureFlag(FeatureFlag::Value::NONE);
+  }
+  return virtual_ab_compression_;
+}
+
 bool DynamicPartitionControlAndroid::OptimizeOperation(
     const std::string& partition_name,
     const InstallOperation& operation,
@@ -253,9 +285,10 @@
   return true;
 }
 
-void DynamicPartitionControlAndroid::UnmapAllPartitions() {
+bool DynamicPartitionControlAndroid::UnmapAllPartitions() {
+  snapshot_->UnmapAllSnapshots();
   if (mapped_devices_.empty()) {
-    return;
+    return false;
   }
   // UnmapPartitionOnDeviceMapper removes objects from mapped_devices_, hence
   // a copy is needed for the loop.
@@ -264,6 +297,7 @@
   for (const auto& partition_name : mapped) {
     ignore_result(UnmapPartitionOnDeviceMapper(partition_name));
   }
+  return true;
 }
 
 void DynamicPartitionControlAndroid::Cleanup() {
@@ -287,9 +321,16 @@
 
 std::unique_ptr<MetadataBuilder>
 DynamicPartitionControlAndroid::LoadMetadataBuilder(
-    const std::string& super_device, uint32_t source_slot) {
-  return LoadMetadataBuilder(
-      super_device, source_slot, BootControlInterface::kInvalidSlot);
+    const std::string& super_device, uint32_t slot) {
+  auto builder = MetadataBuilder::New(PartitionOpener(), super_device, slot);
+  if (builder == nullptr) {
+    LOG(WARNING) << "No metadata slot " << BootControlInterface::SlotName(slot)
+                 << " in " << super_device;
+    return nullptr;
+  }
+  LOG(INFO) << "Loaded metadata from slot "
+            << BootControlInterface::SlotName(slot) << " in " << super_device;
+  return builder;
 }
 
 std::unique_ptr<MetadataBuilder>
@@ -297,26 +338,19 @@
     const std::string& super_device,
     uint32_t source_slot,
     uint32_t target_slot) {
-  std::unique_ptr<MetadataBuilder> builder;
-  if (target_slot == BootControlInterface::kInvalidSlot) {
-    builder =
-        MetadataBuilder::New(PartitionOpener(), super_device, source_slot);
-  } else {
-    bool always_keep_source_slot = !target_supports_snapshot_;
-    builder = MetadataBuilder::NewForUpdate(PartitionOpener(),
-                                            super_device,
-                                            source_slot,
-                                            target_slot,
-                                            always_keep_source_slot);
-  }
-
+  bool always_keep_source_slot = !target_supports_snapshot_;
+  auto builder = MetadataBuilder::NewForUpdate(PartitionOpener(),
+                                               super_device,
+                                               source_slot,
+                                               target_slot,
+                                               always_keep_source_slot);
   if (builder == nullptr) {
     LOG(WARNING) << "No metadata slot "
                  << BootControlInterface::SlotName(source_slot) << " in "
                  << super_device;
     return nullptr;
   }
-  LOG(INFO) << "Loaded metadata from slot "
+  LOG(INFO) << "Created metadata for new update from slot "
             << BootControlInterface::SlotName(source_slot) << " in "
             << super_device;
   return builder;
@@ -428,17 +462,17 @@
     return false;
   }
 
+  if (!SetTargetBuildVars(manifest)) {
+    return false;
+  }
+
   // Although the current build supports dynamic partitions, the given payload
   // doesn't use it for target partitions. This could happen when applying a
   // retrofit update. Skip updating the partition metadata for the target slot.
-  is_target_dynamic_ = !manifest.dynamic_partition_metadata().groups().empty();
   if (!is_target_dynamic_) {
     return true;
   }
 
-  target_supports_snapshot_ =
-      manifest.dynamic_partition_metadata().snapshot_enabled();
-
   if (!update)
     return true;
 
@@ -489,8 +523,60 @@
     }
   }
 
-  return PrepareDynamicPartitionsForUpdate(
-      source_slot, target_slot, manifest, delete_source);
+  // TODO(xunchang) support partial update on non VAB enabled devices.
+  TEST_AND_RETURN_FALSE(PrepareDynamicPartitionsForUpdate(
+      source_slot, target_slot, manifest, delete_source));
+
+  if (required_size != nullptr) {
+    *required_size = 0;
+  }
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::SetTargetBuildVars(
+    const DeltaArchiveManifest& manifest) {
+  // Precondition: current build supports dynamic partition.
+  CHECK(GetDynamicPartitionsFeatureFlag().IsEnabled());
+
+  bool is_target_dynamic =
+      !manifest.dynamic_partition_metadata().groups().empty();
+  bool target_supports_snapshot =
+      manifest.dynamic_partition_metadata().snapshot_enabled();
+
+  if (manifest.partial_update()) {
+    // Partial updates requires DAP. On partial updates that does not involve
+    // dynamic partitions, groups() can be empty, so also assume
+    // is_target_dynamic in this case. This assumption should be safe because we
+    // also check target_supports_snapshot below, which presumably also implies
+    // target build supports dynamic partition.
+    if (!is_target_dynamic) {
+      LOG(INFO) << "Assuming target build supports dynamic partitions for "
+                   "partial updates.";
+      is_target_dynamic = true;
+    }
+
+    // Partial updates requires Virtual A/B. Double check that both current
+    // build and target build supports Virtual A/B.
+    if (!GetVirtualAbFeatureFlag().IsEnabled()) {
+      LOG(ERROR) << "Partial update cannot be applied on a device that does "
+                    "not support snapshots.";
+      return false;
+    }
+    if (!target_supports_snapshot) {
+      LOG(ERROR) << "Cannot apply partial update to a build that does not "
+                    "support snapshots.";
+      return false;
+    }
+  }
+
+  // Store the flags.
+  is_target_dynamic_ = is_target_dynamic;
+  // If !is_target_dynamic_, leave target_supports_snapshot_ unset because
+  // snapshots would not work without dynamic partition.
+  if (is_target_dynamic_) {
+    target_supports_snapshot_ = target_supports_snapshot;
+  }
+  return true;
 }
 
 namespace {
@@ -721,9 +807,7 @@
   }
 
   std::string device_dir_str;
-  if (!GetDeviceDir(&device_dir_str)) {
-    return false;
-  }
+  TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str));
   base::FilePath device_dir(device_dir_str);
   auto source_device =
       device_dir.Append(GetSuperPartitionName(source_slot)).value();
@@ -740,21 +824,125 @@
         DeleteSourcePartitions(builder.get(), source_slot, manifest));
   }
 
-  if (!UpdatePartitionMetadata(builder.get(), target_slot, manifest)) {
-    return false;
-  }
+  TEST_AND_RETURN_FALSE(
+      UpdatePartitionMetadata(builder.get(), target_slot, manifest));
 
   auto target_device =
       device_dir.Append(GetSuperPartitionName(target_slot)).value();
   return StoreMetadata(target_device, builder.get(), target_slot);
 }
 
+DynamicPartitionControlAndroid::SpaceLimit
+DynamicPartitionControlAndroid::GetSpaceLimit(bool use_snapshot) {
+  // On device retrofitting dynamic partitions, allocatable_space = "super",
+  // where "super" is the sum of all block devices for that slot. Since block
+  // devices are dedicated for the corresponding slot, there's no need to halve
+  // the allocatable space.
+  if (GetDynamicPartitionsFeatureFlag().IsRetrofit())
+    return SpaceLimit::ERROR_IF_EXCEEDED_SUPER;
+
+  // On device launching dynamic partitions w/o VAB, regardless of recovery
+  // sideload, super partition must be big enough to hold both A and B slots of
+  // groups. Hence,
+  // allocatable_space = super / 2
+  if (!GetVirtualAbFeatureFlag().IsEnabled())
+    return SpaceLimit::ERROR_IF_EXCEEDED_HALF_OF_SUPER;
+
+  // Source build supports VAB. Super partition must be big enough to hold
+  // one slot of groups (ERROR_IF_EXCEEDED_SUPER). However, there are cases
+  // where additional warning messages needs to be written.
+
+  // If using snapshot updates, implying that target build also uses VAB,
+  // allocatable_space = super
+  if (use_snapshot)
+    return SpaceLimit::ERROR_IF_EXCEEDED_SUPER;
+
+  // Source build supports VAB but not using snapshot updates. There are
+  // several cases, as listed below.
+  // Sideloading: allocatable_space = super.
+  if (IsRecovery())
+    return SpaceLimit::ERROR_IF_EXCEEDED_SUPER;
+
+  // On launch VAB device, this implies secondary payload.
+  // Technically, we don't have to check anything, but sum(groups) < super
+  // still applies.
+  if (!GetVirtualAbFeatureFlag().IsRetrofit())
+    return SpaceLimit::ERROR_IF_EXCEEDED_SUPER;
+
+  // On retrofit VAB device, either of the following:
+  // - downgrading: allocatable_space = super / 2
+  // - secondary payload: don't check anything
+  // These two cases are indistinguishable,
+  // hence emit warning if sum(groups) > super / 2
+  return SpaceLimit::WARN_IF_EXCEEDED_HALF_OF_SUPER;
+}
+
+bool DynamicPartitionControlAndroid::CheckSuperPartitionAllocatableSpace(
+    android::fs_mgr::MetadataBuilder* builder,
+    const DeltaArchiveManifest& manifest,
+    bool use_snapshot) {
+  uint64_t sum_groups = 0;
+  for (const auto& group : manifest.dynamic_partition_metadata().groups()) {
+    sum_groups += group.size();
+  }
+
+  uint64_t full_space = builder->AllocatableSpace();
+  uint64_t half_space = full_space / 2;
+  constexpr const char* fmt =
+      "The maximum size of all groups for the target slot (%" PRIu64
+      ") has exceeded %sallocatable space for dynamic partitions %" PRIu64 ".";
+  switch (GetSpaceLimit(use_snapshot)) {
+    case SpaceLimit::ERROR_IF_EXCEEDED_HALF_OF_SUPER: {
+      if (sum_groups > half_space) {
+        LOG(ERROR) << StringPrintf(fmt, sum_groups, "HALF OF ", half_space);
+        return false;
+      }
+      // If test passes, it implies that the following two conditions also pass.
+      break;
+    }
+    case SpaceLimit::WARN_IF_EXCEEDED_HALF_OF_SUPER: {
+      if (sum_groups > half_space) {
+        LOG(WARNING) << StringPrintf(fmt, sum_groups, "HALF OF ", half_space)
+                     << " This is allowed for downgrade or secondary OTA on "
+                        "retrofit VAB device.";
+      }
+      // still check sum(groups) < super
+      [[fallthrough]];
+    }
+    case SpaceLimit::ERROR_IF_EXCEEDED_SUPER: {
+      if (sum_groups > full_space) {
+        LOG(ERROR) << base::StringPrintf(fmt, sum_groups, "", full_space);
+        return false;
+      }
+      break;
+    }
+  }
+
+  return true;
+}
+
 bool DynamicPartitionControlAndroid::PrepareSnapshotPartitionsForUpdate(
     uint32_t source_slot,
     uint32_t target_slot,
     const DeltaArchiveManifest& manifest,
     uint64_t* required_size) {
   TEST_AND_RETURN_FALSE(ExpectMetadataMounted());
+
+  std::string device_dir_str;
+  TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str));
+  base::FilePath device_dir(device_dir_str);
+  auto super_device =
+      device_dir.Append(GetSuperPartitionName(source_slot)).value();
+  auto builder = LoadMetadataBuilder(super_device, source_slot);
+  if (builder == nullptr) {
+    LOG(ERROR) << "No metadata at "
+               << BootControlInterface::SlotName(source_slot);
+    return false;
+  }
+
+  TEST_AND_RETURN_FALSE(
+      CheckSuperPartitionAllocatableSpace(builder.get(), manifest, true));
+
   if (!snapshot_->BeginUpdate()) {
     LOG(ERROR) << "Cannot begin new update.";
     return false;
@@ -780,6 +968,18 @@
     MetadataBuilder* builder,
     uint32_t target_slot,
     const DeltaArchiveManifest& manifest) {
+  // Check preconditions.
+  if (GetVirtualAbFeatureFlag().IsEnabled()) {
+    CHECK(!target_supports_snapshot_ || IsRecovery())
+        << "Must use snapshot on VAB device when target build supports VAB and "
+           "not sideloading.";
+    LOG_IF(INFO, !target_supports_snapshot_)
+        << "Not using snapshot on VAB device because target build does not "
+           "support snapshot. Secondary or downgrade OTA?";
+    LOG_IF(INFO, IsRecovery())
+        << "Not using snapshot on VAB device because sideloading.";
+  }
+
   // If applying downgrade from Virtual A/B to non-Virtual A/B, the left-over
   // COW group needs to be deleted to ensure there are enough space to create
   // target partitions.
@@ -788,24 +988,8 @@
   const std::string target_suffix = SlotSuffixForSlotNumber(target_slot);
   DeleteGroupsWithSuffix(builder, target_suffix);
 
-  uint64_t total_size = 0;
-  for (const auto& group : manifest.dynamic_partition_metadata().groups()) {
-    total_size += group.size();
-  }
-
-  std::string expr;
-  uint64_t allocatable_space = builder->AllocatableSpace();
-  if (!GetDynamicPartitionsFeatureFlag().IsRetrofit()) {
-    allocatable_space /= 2;
-    expr = "half of ";
-  }
-  if (total_size > allocatable_space) {
-    LOG(ERROR) << "The maximum size of all groups with suffix " << target_suffix
-               << " (" << total_size << ") has exceeded " << expr
-               << "allocatable space for dynamic partitions "
-               << allocatable_space << ".";
-    return false;
-  }
+  TEST_AND_RETURN_FALSE(
+      CheckSuperPartitionAllocatableSpace(builder, manifest, false));
 
   // name of partition(e.g. "system") -> size in bytes
   std::map<std::string, uint64_t> partition_sizes;
@@ -849,6 +1033,12 @@
                    << " to size " << partition_size << ". Not enough space?";
         return false;
       }
+      if (p->size() < partition_size) {
+        LOG(ERROR) << "Partition " << partition_name_suffix
+                   << " was expected to have size " << partition_size
+                   << ", but instead has size " << p->size();
+        return false;
+      }
       LOG(INFO) << "Added partition " << partition_name_suffix << " to group "
                 << group_name_suffix << " with size " << partition_size;
     }
@@ -874,38 +1064,99 @@
     const std::string& partition_name,
     uint32_t slot,
     uint32_t current_slot,
+    bool not_in_payload,
+    std::string* device,
+    bool* is_dynamic) {
+  auto partition_dev =
+      GetPartitionDevice(partition_name, slot, current_slot, not_in_payload);
+  if (!partition_dev.has_value()) {
+    return false;
+  }
+  if (device) {
+    *device = std::move(partition_dev->rw_device_path);
+  }
+  if (is_dynamic) {
+    *is_dynamic = partition_dev->is_dynamic;
+  }
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::GetPartitionDevice(
+    const std::string& partition_name,
+    uint32_t slot,
+    uint32_t current_slot,
     std::string* device) {
+  return GetPartitionDevice(
+      partition_name, slot, current_slot, false, device, nullptr);
+}
+
+static std::string GetStaticDevicePath(
+    const base::FilePath& device_dir,
+    const std::string& partition_name_suffixed) {
+  base::FilePath path = device_dir.Append(partition_name_suffixed);
+  return path.value();
+}
+
+std::optional<PartitionDevice>
+DynamicPartitionControlAndroid::GetPartitionDevice(
+    const std::string& partition_name,
+    uint32_t slot,
+    uint32_t current_slot,
+    bool not_in_payload) {
+  std::string device_dir_str;
+  if (!GetDeviceDir(&device_dir_str)) {
+    LOG(ERROR) << "Failed to GetDeviceDir()";
+    return {};
+  }
+  const base::FilePath device_dir(device_dir_str);
+  // When VABC is enabled, we can't get device path for dynamic partitions in
+  // target slot.
   const auto& partition_name_suffix =
       partition_name + SlotSuffixForSlotNumber(slot);
-  std::string device_dir_str;
-  TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str));
-  base::FilePath device_dir(device_dir_str);
+  if (UpdateUsesSnapshotCompression() && slot != current_slot &&
+      IsDynamicPartition(partition_name, slot)) {
+    return {
+        {.readonly_device_path = base::FilePath{std::string{VABC_DEVICE_DIR}}
+                                     .Append(partition_name_suffix)
+                                     .value(),
+         .is_dynamic = true}};
+  }
 
   // When looking up target partition devices, treat them as static if the
   // current payload doesn't encode them as dynamic partitions. This may happen
   // when applying a retrofit update on top of a dynamic-partitions-enabled
   // build.
+  std::string device;
   if (GetDynamicPartitionsFeatureFlag().IsEnabled() &&
       (slot == current_slot || is_target_dynamic_)) {
-    switch (GetDynamicPartitionDevice(
-        device_dir, partition_name_suffix, slot, current_slot, device)) {
+    switch (GetDynamicPartitionDevice(device_dir,
+                                      partition_name_suffix,
+                                      slot,
+                                      current_slot,
+                                      not_in_payload,
+                                      &device)) {
       case DynamicPartitionDeviceStatus::SUCCESS:
-        return true;
+        return {{.rw_device_path = device,
+                 .readonly_device_path = device,
+                 .is_dynamic = true}};
+
       case DynamicPartitionDeviceStatus::TRY_STATIC:
         break;
       case DynamicPartitionDeviceStatus::ERROR:  // fallthrough
       default:
-        return false;
+        return {};
     }
   }
-  base::FilePath path = device_dir.Append(partition_name_suffix);
-  if (!DeviceExists(path.value())) {
-    LOG(ERROR) << "Device file " << path.value() << " does not exist.";
-    return false;
+  // Try static partitions.
+  auto static_path = GetStaticDevicePath(device_dir, partition_name_suffix);
+  if (!DeviceExists(static_path)) {
+    LOG(ERROR) << "Device file " << static_path << " does not exist.";
+    return {};
   }
 
-  *device = path.value();
-  return true;
+  return {{.rw_device_path = static_path,
+           .readonly_device_path = static_path,
+           .is_dynamic = false}};
 }
 
 bool DynamicPartitionControlAndroid::IsSuperBlockDevice(
@@ -924,6 +1175,7 @@
     const std::string& partition_name_suffix,
     uint32_t slot,
     uint32_t current_slot,
+    bool not_in_payload,
     std::string* device) {
   std::string super_device =
       device_dir.Append(GetSuperPartitionName(slot)).value();
@@ -963,7 +1215,7 @@
     }
   }
 
-  bool force_writable = slot != current_slot;
+  bool force_writable = (slot != current_slot) && !not_in_payload;
   if (MapPartitionOnDeviceMapper(
           super_device, partition_name_suffix, slot, force_writable, device)) {
     return DynamicPartitionDeviceStatus::SUCCESS;
@@ -977,7 +1229,7 @@
 }
 
 bool DynamicPartitionControlAndroid::IsRecovery() {
-  return kIsRecovery;
+  return constants::kIsRecovery;
 }
 
 static bool IsIncrementalUpdate(const DeltaArchiveManifest& manifest) {
@@ -1005,7 +1257,7 @@
 
   LOG(INFO) << "Will overwrite existing partitions. Slot "
             << BootControlInterface::SlotName(source_slot)
-            << "may be unbootable until update finishes!";
+            << " may be unbootable until update finishes!";
   const std::string source_suffix = SlotSuffixForSlotNumber(source_slot);
   DeleteGroupsWithSuffix(builder, source_suffix);
 
@@ -1049,6 +1301,76 @@
   return true;
 }
 
+bool DynamicPartitionControlAndroid::ListDynamicPartitionsForSlot(
+    uint32_t slot,
+    uint32_t current_slot,
+    std::vector<std::string>* partitions) {
+  CHECK(slot == source_slot_ || target_slot_ != UINT32_MAX)
+      << " source slot: " << source_slot_ << " target slot: " << target_slot_
+      << " slot: " << slot
+      << " attempting to query dynamic partition metadata for target slot "
+         "before PreparePartitionForUpdate() is called. The "
+         "metadata in target slot isn't valid until "
+         "PreparePartitionForUpdate() is called, contining execution would "
+         "likely cause problems.";
+  bool slot_enables_dynamic_partitions =
+      GetDynamicPartitionsFeatureFlag().IsEnabled();
+  // Check if the target slot has dynamic partitions, this may happen when
+  // applying a retrofit package.
+  if (slot != current_slot) {
+    slot_enables_dynamic_partitions =
+        slot_enables_dynamic_partitions && is_target_dynamic_;
+  }
+
+  if (!slot_enables_dynamic_partitions) {
+    LOG(INFO) << "Dynamic partition is not enabled for slot " << slot;
+    return true;
+  }
+
+  std::string device_dir_str;
+  TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str));
+  base::FilePath device_dir(device_dir_str);
+  auto super_device = device_dir.Append(GetSuperPartitionName(slot)).value();
+  auto builder = LoadMetadataBuilder(super_device, slot);
+  TEST_AND_RETURN_FALSE(builder != nullptr);
+
+  std::vector<std::string> result;
+  auto suffix = SlotSuffixForSlotNumber(slot);
+  for (const auto& group : builder->ListGroups()) {
+    for (const auto& partition : builder->ListPartitionsInGroup(group)) {
+      std::string_view partition_name = partition->name();
+      if (!android::base::ConsumeSuffix(&partition_name, suffix)) {
+        continue;
+      }
+      result.emplace_back(partition_name);
+    }
+  }
+  *partitions = std::move(result);
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::VerifyExtentsForUntouchedPartitions(
+    uint32_t source_slot,
+    uint32_t target_slot,
+    const std::vector<std::string>& partitions) {
+  std::string device_dir_str;
+  TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str));
+  base::FilePath device_dir(device_dir_str);
+
+  auto source_super_device =
+      device_dir.Append(GetSuperPartitionName(source_slot)).value();
+  auto source_builder = LoadMetadataBuilder(source_super_device, source_slot);
+  TEST_AND_RETURN_FALSE(source_builder != nullptr);
+
+  auto target_super_device =
+      device_dir.Append(GetSuperPartitionName(target_slot)).value();
+  auto target_builder = LoadMetadataBuilder(target_super_device, target_slot);
+  TEST_AND_RETURN_FALSE(target_builder != nullptr);
+
+  return MetadataBuilder::VerifyExtentsAgainstSourceMetadata(
+      *source_builder, source_slot, *target_builder, target_slot, partitions);
+}
+
 bool DynamicPartitionControlAndroid::ExpectMetadataMounted() {
   // No need to mount metadata for non-Virtual A/B devices.
   if (!GetVirtualAbFeatureFlag().IsEnabled()) {
@@ -1076,4 +1398,80 @@
   return metadata_device_ != nullptr;
 }
 
+std::unique_ptr<android::snapshot::ISnapshotWriter>
+DynamicPartitionControlAndroid::OpenCowWriter(
+    const std::string& partition_name,
+    const std::optional<std::string>& source_path,
+    bool is_append) {
+  auto suffix = SlotSuffixForSlotNumber(target_slot_);
+
+  auto super_device = GetSuperDevice();
+  if (!super_device.has_value()) {
+    return nullptr;
+  }
+  CreateLogicalPartitionParams params = {
+      .block_device = super_device->value(),
+      .metadata_slot = target_slot_,
+      .partition_name = partition_name + suffix,
+      .force_writable = true,
+      .timeout_ms = kMapSnapshotTimeout};
+  // TODO(zhangkelvin) Open an APPEND mode CowWriter once there's an API to do
+  // it.
+  return snapshot_->OpenSnapshotWriter(params, std::move(source_path));
+}  // namespace chromeos_update_engine
+
+FileDescriptorPtr DynamicPartitionControlAndroid::OpenCowFd(
+    const std::string& unsuffixed_partition_name,
+    const std::optional<std::string>& source_path,
+    bool is_append) {
+  auto cow_writer =
+      OpenCowWriter(unsuffixed_partition_name, source_path, is_append);
+  if (cow_writer == nullptr) {
+    return nullptr;
+  }
+  if (!cow_writer->InitializeAppend(kEndOfInstallLabel)) {
+    return nullptr;
+  }
+  return std::make_shared<CowWriterFileDescriptor>(std::move(cow_writer));
+}
+
+std::optional<base::FilePath> DynamicPartitionControlAndroid::GetSuperDevice() {
+  std::string device_dir_str;
+  if (!GetDeviceDir(&device_dir_str)) {
+    LOG(ERROR) << "Failed to get device dir!";
+    return {};
+  }
+  base::FilePath device_dir(device_dir_str);
+  auto super_device = device_dir.Append(GetSuperPartitionName(target_slot_));
+  return super_device;
+}
+
+bool DynamicPartitionControlAndroid::MapAllPartitions() {
+  return snapshot_->MapAllSnapshots(kMapSnapshotTimeout);
+}
+
+bool DynamicPartitionControlAndroid::IsDynamicPartition(
+    const std::string& partition_name, uint32_t slot) {
+  if (slot >= dynamic_partition_list_.size()) {
+    LOG(ERROR) << "Seeing unexpected slot # " << slot << " currently assuming "
+               << dynamic_partition_list_.size() << " slots";
+    return false;
+  }
+  auto& dynamic_partition_list = dynamic_partition_list_[slot];
+  if (dynamic_partition_list.empty() &&
+      GetDynamicPartitionsFeatureFlag().IsEnabled()) {
+    // Use the DAP config of the target slot.
+    CHECK(ListDynamicPartitionsForSlot(
+        slot, source_slot_, &dynamic_partition_list));
+  }
+  return std::find(dynamic_partition_list.begin(),
+                   dynamic_partition_list.end(),
+                   partition_name) != dynamic_partition_list.end();
+}
+
+bool DynamicPartitionControlAndroid::UpdateUsesSnapshotCompression() {
+  return GetVirtualAbFeatureFlag().IsEnabled() &&
+         snapshot_->UpdateUsesCompression();
+}
+
 }  // namespace chromeos_update_engine
diff --git a/dynamic_partition_control_android.h b/aosp/dynamic_partition_control_android.h
similarity index 68%
rename from dynamic_partition_control_android.h
rename to aosp/dynamic_partition_control_android.h
index 8ad7593..df91401 100644
--- a/dynamic_partition_control_android.h
+++ b/aosp/dynamic_partition_control_android.h
@@ -14,16 +14,19 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
-#define UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
+#ifndef UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
+#define UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
 
 #include <memory>
 #include <set>
 #include <string>
+#include <string_view>
+#include <vector>
 
 #include <base/files/file_util.h>
 #include <libsnapshot/auto_device.h>
 #include <libsnapshot/snapshot.h>
+#include <libsnapshot/snapshot_writer.h>
 
 #include "update_engine/common/dynamic_partition_control_interface.h"
 
@@ -31,10 +34,16 @@
 
 class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface {
  public:
-  DynamicPartitionControlAndroid();
+  // A directory where all partitions mapped by VABC is expected to be found.
+  // Per earlier discussion with VAB team, this directory is unlikely to change.
+  // So we declare it as a constant here.
+  static constexpr std::string_view VABC_DEVICE_DIR = "/dev/block/mapper/";
+  explicit DynamicPartitionControlAndroid(uint32_t source_slot);
   ~DynamicPartitionControlAndroid();
+
   FeatureFlag GetDynamicPartitionsFeatureFlag() override;
   FeatureFlag GetVirtualAbFeatureFlag() override;
+  FeatureFlag GetVirtualAbCompressionFeatureFlag() override;
   bool OptimizeOperation(const std::string& partition_name,
                          const InstallOperation& operation,
                          InstallOperation* optimized) override;
@@ -53,16 +62,58 @@
 
   bool ResetUpdate(PrefsInterface* prefs) override;
 
+  bool ListDynamicPartitionsForSlot(
+      uint32_t slot,
+      uint32_t current_slot,
+      std::vector<std::string>* partitions) override;
+
+  bool VerifyExtentsForUntouchedPartitions(
+      uint32_t source_slot,
+      uint32_t target_slot,
+      const std::vector<std::string>& partitions) override;
+
+  bool GetDeviceDir(std::string* path) override;
+
   // Return the device for partition |partition_name| at slot |slot|.
   // |current_slot| should be set to the current active slot.
   // Note: this function is only used by BootControl*::GetPartitionDevice.
   // Other callers should prefer BootControl*::GetPartitionDevice over
   // BootControl*::GetDynamicPartitionControl()->GetPartitionDevice().
+  std::optional<PartitionDevice> GetPartitionDevice(
+      const std::string& partition_name,
+      uint32_t slot,
+      uint32_t current_slot,
+      bool not_in_payload);
+  // Deprecated, please use GetPartitionDevice(string, uint32_t, uint32_t);
+  // TODO(zhangkelvin) Remove below deprecated APIs.
+  bool GetPartitionDevice(const std::string& partition_name,
+                          uint32_t slot,
+                          uint32_t current_slot,
+                          bool not_in_payload,
+                          std::string* device,
+                          bool* is_dynamic);
+
   bool GetPartitionDevice(const std::string& partition_name,
                           uint32_t slot,
                           uint32_t current_slot,
                           std::string* device);
 
+  // Partition name is expected to be unsuffixed. e.g. system, vendor
+  // Return an interface to write to a snapshoted partition.
+  std::unique_ptr<android::snapshot::ISnapshotWriter> OpenCowWriter(
+      const std::string& unsuffixed_partition_name,
+      const std::optional<std::string>& source_path,
+      bool is_append) override;
+  FileDescriptorPtr OpenCowFd(const std::string& unsuffixed_partition_name,
+                              const std::optional<std::string>&,
+                              bool is_append = false) override;
+
+  bool UnmapAllPartitions() override;
+
+  bool IsDynamicPartition(const std::string& part_name, uint32_t slot) override;
+
+  bool UpdateUsesSnapshotCompression() override;
+
  protected:
   // These functions are exposed for testing.
 
@@ -72,16 +123,14 @@
   virtual bool UnmapPartitionOnDeviceMapper(
       const std::string& target_partition_name);
 
-  // Retrieve metadata from |super_device| at slot |source_slot|.
-  //
-  // If |target_slot| != kInvalidSlot, before returning the metadata, this
-  // function modifies the metadata so that during updates, the metadata can be
-  // written to |target_slot|. In particular, on retrofit devices, the returned
+  // Retrieves metadata from |super_device| at slot |slot|.
+  virtual std::unique_ptr<android::fs_mgr::MetadataBuilder> LoadMetadataBuilder(
+      const std::string& super_device, uint32_t slot);
+
+  // Retrieves metadata from |super_device| at slot |source_slot|. And
+  // modifies the metadata so that during updates, the metadata can be written
+  // to |target_slot|. In particular, on retrofit devices, the returned
   // metadata automatically includes block devices at |target_slot|.
-  //
-  // If |target_slot| == kInvalidSlot, this function returns metadata at
-  // |source_slot| without modifying it. This is the same as
-  // LoadMetadataBuilder(const std::string&, uint32_t).
   virtual std::unique_ptr<android::fs_mgr::MetadataBuilder> LoadMetadataBuilder(
       const std::string& super_device,
       uint32_t source_slot,
@@ -120,13 +169,6 @@
   virtual bool GetDmDevicePathByName(const std::string& name,
                                      std::string* path);
 
-  // Retrieve metadata from |super_device| at slot |source_slot|.
-  virtual std::unique_ptr<android::fs_mgr::MetadataBuilder> LoadMetadataBuilder(
-      const std::string& super_device, uint32_t source_slot);
-
-  // Return a possible location for devices listed by name.
-  virtual bool GetDeviceDir(std::string* path);
-
   // Return the name of the super partition (which stores super partition
   // metadata) for a given slot.
   virtual std::string GetSuperPartitionName(uint32_t slot);
@@ -173,38 +215,68 @@
   virtual bool EraseSystemOtherAvbFooter(uint32_t source_slot,
                                          uint32_t target_slot);
 
+  // Helper for PreparePartitionsForUpdate. Used for devices with dynamic
+  // partitions updating without snapshots.
+  // If |delete_source| is set, source partitions are deleted before resizing
+  // target partitions (using DeleteSourcePartitions).
+  virtual bool PrepareDynamicPartitionsForUpdate(
+      uint32_t source_slot,
+      uint32_t target_slot,
+      const DeltaArchiveManifest& manifest,
+      bool delete_source);
+
+  bool MapAllPartitions() override;
+
+  void SetSourceSlot(uint32_t slot) { source_slot_ = slot; }
+  void SetTargetSlot(uint32_t slot) { target_slot_ = slot; }
+
  private:
   friend class DynamicPartitionControlAndroidTest;
+  friend class SnapshotPartitionTestP;
 
-  void UnmapAllPartitions();
+  std::optional<base::FilePath> GetSuperDevice();
+
   bool MapPartitionInternal(const std::string& super_device,
                             const std::string& target_partition_name,
                             uint32_t slot,
                             bool force_writable,
                             std::string* path);
 
-  // Update |builder| according to |partition_metadata|, assuming the device
-  // does not have Virtual A/B.
+  // Update |builder| according to |partition_metadata|.
+  // - In Android mode, this is only called when the device
+  //   does not have Virtual A/B.
+  // - When sideloading, this maybe called as a fallback path if CoW cannot
+  //   be created.
   bool UpdatePartitionMetadata(android::fs_mgr::MetadataBuilder* builder,
                                uint32_t target_slot,
                                const DeltaArchiveManifest& manifest);
 
-  // Helper for PreparePartitionsForUpdate. Used for devices with dynamic
-  // partitions updating without snapshots.
-  // If |delete_source| is set, source partitions are deleted before resizing
-  // target partitions (using DeleteSourcePartitions).
-  bool PrepareDynamicPartitionsForUpdate(uint32_t source_slot,
-                                         uint32_t target_slot,
-                                         const DeltaArchiveManifest& manifest,
-                                         bool delete_source);
-
-  // Helper for PreparePartitionsForUpdate. Used for snapshotted partitions for
-  // Virtual A/B update.
+  // Helper for PreparePartitionsForUpdate. Used for snapshotted partitions
+  // for Virtual A/B update.
   bool PrepareSnapshotPartitionsForUpdate(uint32_t source_slot,
                                           uint32_t target_slot,
                                           const DeltaArchiveManifest& manifest,
                                           uint64_t* required_size);
 
+  enum SpaceLimit {
+    // Most restricted: if sum(groups) > super / 2, error
+    ERROR_IF_EXCEEDED_HALF_OF_SUPER,
+    // Implies ERROR_IF_EXCEEDED_SUPER; then, if sum(groups) > super / 2, warn
+    WARN_IF_EXCEEDED_HALF_OF_SUPER,
+    // Least restricted: if sum(groups) > super, error
+    ERROR_IF_EXCEEDED_SUPER,
+  };
+  // Helper of CheckSuperPartitionAllocatableSpace. Determine limit for groups
+  // and partitions.
+  SpaceLimit GetSpaceLimit(bool use_snapshot);
+
+  // Returns true if the allocatable space in super partition is larger than
+  // the size of dynamic partition groups in the manifest.
+  bool CheckSuperPartitionAllocatableSpace(
+      android::fs_mgr::MetadataBuilder* builder,
+      const DeltaArchiveManifest& manifest,
+      bool use_snapshot);
+
   enum class DynamicPartitionDeviceStatus {
     SUCCESS,
     ERROR,
@@ -220,6 +292,7 @@
       const std::string& partition_name_suffix,
       uint32_t slot,
       uint32_t current_slot,
+      bool not_in_payload,
       std::string* device);
 
   // Return true if |partition_name_suffix| is a block device of
@@ -236,17 +309,20 @@
   // Returns true if metadata is expected to be mounted, false otherwise.
   // Note that it returns false on non-Virtual A/B devices.
   //
-  // Almost all functions of SnapshotManager depends on metadata being mounted.
+  // Almost all functions of SnapshotManager depends on metadata being
+  // mounted.
   // - In Android mode for Virtual A/B devices, assume it is mounted. If not,
   //   let caller fails when calling into SnapshotManager.
-  // - In recovery for Virtual A/B devices, it is possible that metadata is not
+  // - In recovery for Virtual A/B devices, it is possible that metadata is
+  // not
   //   formatted, hence it cannot be mounted. Caller should not call into
   //   SnapshotManager.
-  // - On non-Virtual A/B devices, updates do not depend on metadata partition.
+  // - On non-Virtual A/B devices, updates do not depend on metadata
+  // partition.
   //   Caller should not call into SnapshotManager.
   //
-  // This function does NOT mount metadata partition. Use EnsureMetadataMounted
-  // to mount metadata partition.
+  // This function does NOT mount metadata partition. Use
+  // EnsureMetadataMounted to mount metadata partition.
   bool ExpectMetadataMounted();
 
   // Ensure /metadata is mounted. Returns true if successful, false otherwise.
@@ -255,21 +331,28 @@
   // doing anything.
   bool EnsureMetadataMounted();
 
+  // Set boolean flags related to target build. This includes flags like
+  // target_supports_snapshot_ and is_target_dynamic_.
+  bool SetTargetBuildVars(const DeltaArchiveManifest& manifest);
+
   std::set<std::string> mapped_devices_;
   const FeatureFlag dynamic_partitions_;
   const FeatureFlag virtual_ab_;
-  std::unique_ptr<android::snapshot::SnapshotManager> snapshot_;
+  const FeatureFlag virtual_ab_compression_;
+  std::unique_ptr<android::snapshot::ISnapshotManager> snapshot_;
   std::unique_ptr<android::snapshot::AutoDevice> metadata_device_;
   bool target_supports_snapshot_ = false;
   // Whether the target partitions should be loaded as dynamic partitions. Set
   // by PreparePartitionsForUpdate() per each update.
   bool is_target_dynamic_ = false;
+
   uint32_t source_slot_ = UINT32_MAX;
   uint32_t target_slot_ = UINT32_MAX;
+  std::vector<std::vector<std::string>> dynamic_partition_list_{2UL};
 
   DISALLOW_COPY_AND_ASSIGN(DynamicPartitionControlAndroid);
 };
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
+#endif  // UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
diff --git a/dynamic_partition_control_android_unittest.cc b/aosp/dynamic_partition_control_android_unittest.cc
similarity index 75%
rename from dynamic_partition_control_android_unittest.cc
rename to aosp/dynamic_partition_control_android_unittest.cc
index 2081918..6f1d4ef 100644
--- a/dynamic_partition_control_android_unittest.cc
+++ b/aosp/dynamic_partition_control_android_unittest.cc
@@ -14,8 +14,9 @@
 // limitations under the License.
 //
 
-#include "update_engine/dynamic_partition_control_android.h"
+#include "update_engine/aosp/dynamic_partition_control_android.h"
 
+#include <algorithm>
 #include <set>
 #include <vector>
 
@@ -24,19 +25,21 @@
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
 #include <libavb/libavb.h>
+#include <libsnapshot/mock_snapshot.h>
 
+#include "update_engine/aosp/dynamic_partition_test_utils.h"
+#include "update_engine/aosp/mock_dynamic_partition_control_android.h"
 #include "update_engine/common/mock_prefs.h"
 #include "update_engine/common/test_utils.h"
-#include "update_engine/dynamic_partition_test_utils.h"
-#include "update_engine/mock_dynamic_partition_control.h"
 
 using android::dm::DmDeviceState;
+using android::snapshot::MockSnapshotManager;
 using chromeos_update_engine::test_utils::ScopedLoopbackDeviceBinder;
-using chromeos_update_engine::test_utils::ScopedTempFile;
 using std::string;
 using testing::_;
 using testing::AnyNumber;
 using testing::AnyOf;
+using testing::AtLeast;
 using testing::Invoke;
 using testing::NiceMock;
 using testing::Not;
@@ -54,7 +57,10 @@
         .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
     ON_CALL(dynamicControl(), GetVirtualAbFeatureFlag())
         .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::NONE)));
-
+    ON_CALL(dynamicControl(), GetVirtualAbCompressionFeatureFlag())
+        .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::NONE)));
+    ON_CALL(dynamicControl(), UpdateUsesSnapshotCompression())
+        .WillByDefault(Return(false));
     ON_CALL(dynamicControl(), GetDeviceDir(_))
         .WillByDefault(Invoke([](auto path) {
           *path = kFakeDevicePath;
@@ -72,6 +78,17 @@
 
     ON_CALL(dynamicControl(), EraseSystemOtherAvbFooter(_, _))
         .WillByDefault(Return(true));
+
+    ON_CALL(dynamicControl(), IsRecovery()).WillByDefault(Return(false));
+
+    ON_CALL(dynamicControl(), PrepareDynamicPartitionsForUpdate(_, _, _, _))
+        .WillByDefault(Invoke([&](uint32_t source_slot,
+                                  uint32_t target_slot,
+                                  const DeltaArchiveManifest& manifest,
+                                  bool delete_source) {
+          return dynamicControl().RealPrepareDynamicPartitionsForUpdate(
+              source_slot, target_slot, manifest, delete_source);
+        }));
   }
 
   // Return the mocked DynamicPartitionControlInterface.
@@ -100,13 +117,24 @@
   // |slot|.
   void SetMetadata(uint32_t slot,
                    const PartitionSuffixSizes& sizes,
-                   uint32_t partition_attr = 0) {
+                   uint32_t partition_attr = 0,
+                   uint64_t super_size = kDefaultSuperSize) {
+    EXPECT_CALL(dynamicControl(),
+                LoadMetadataBuilder(GetSuperDevice(slot), slot))
+        .Times(AnyNumber())
+        .WillRepeatedly(Invoke([=](auto, auto) {
+          return NewFakeMetadata(PartitionSuffixSizesToManifest(sizes),
+                                 partition_attr,
+                                 super_size);
+        }));
+
     EXPECT_CALL(dynamicControl(),
                 LoadMetadataBuilder(GetSuperDevice(slot), slot, _))
         .Times(AnyNumber())
-        .WillRepeatedly(Invoke([sizes, partition_attr](auto, auto, auto) {
+        .WillRepeatedly(Invoke([=](auto, auto, auto) {
           return NewFakeMetadata(PartitionSuffixSizesToManifest(sizes),
-                                 partition_attr);
+                                 partition_attr,
+                                 super_size);
         }));
   }
 
@@ -194,6 +222,8 @@
   void SetUp() override {
     DynamicPartitionControlAndroidTest::SetUp();
     SetSlots(GetParam());
+    dynamicControl().SetSourceSlot(source());
+    dynamicControl().SetTargetSlot(target());
   }
 };
 
@@ -363,6 +393,92 @@
   EXPECT_EQ(GetDevice(T("bar")), bar_device);
 }
 
+TEST_P(DynamicPartitionControlAndroidTestP, GetMountableDevicePath) {
+  ON_CALL(dynamicControl(), GetDynamicPartitionsFeatureFlag())
+      .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
+  ON_CALL(dynamicControl(), GetVirtualAbFeatureFlag())
+      .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
+  ON_CALL(dynamicControl(), GetVirtualAbCompressionFeatureFlag())
+      .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::NONE)));
+  ON_CALL(dynamicControl(), UpdateUsesSnapshotCompression())
+      .WillByDefault(Return(false));
+  ON_CALL(dynamicControl(), IsDynamicPartition(_, _))
+      .WillByDefault(Return(true));
+
+  EXPECT_CALL(dynamicControl(),
+              DeviceExists(AnyOf(GetDevice(S("vendor")),
+                                 GetDevice(T("vendor")),
+                                 GetDevice(S("system")),
+                                 GetDevice(T("system")))))
+      .WillRepeatedly(Return(true));
+  EXPECT_CALL(
+      dynamicControl(),
+      GetState(AnyOf(S("vendor"), T("vendor"), S("system"), T("system"))))
+      .WillRepeatedly(Return(DmDeviceState::ACTIVE));
+
+  SetMetadata(source(), {{S("system"), 2_GiB}, {S("vendor"), 1_GiB}});
+  SetMetadata(target(), {{T("system"), 2_GiB}, {T("vendor"), 1_GiB}});
+  std::string device;
+  ASSERT_TRUE(dynamicControl().GetPartitionDevice(
+      "system", source(), source(), &device));
+  ASSERT_EQ(GetDmDevice(S("system")), device);
+
+  ASSERT_TRUE(dynamicControl().GetPartitionDevice(
+      "system", target(), source(), &device));
+  ASSERT_EQ(GetDevice(T("system")), device);
+
+  // If VABC is disabled, mountable device path should be same as device path.
+  auto device_info =
+      dynamicControl().GetPartitionDevice("system", target(), source(), false);
+  ASSERT_TRUE(device_info.has_value());
+  ASSERT_EQ(device_info->readonly_device_path, device);
+}
+
+TEST_P(DynamicPartitionControlAndroidTestP, GetMountableDevicePathVABC) {
+  ON_CALL(dynamicControl(), GetDynamicPartitionsFeatureFlag())
+      .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
+  ON_CALL(dynamicControl(), GetVirtualAbFeatureFlag())
+      .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
+  ON_CALL(dynamicControl(), GetVirtualAbCompressionFeatureFlag())
+      .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
+  ON_CALL(dynamicControl(), UpdateUsesSnapshotCompression())
+      .WillByDefault(Return(true));
+  EXPECT_CALL(dynamicControl(), IsDynamicPartition(_, _))
+      .Times(AtLeast(1))
+      .WillRepeatedly(Return(true));
+
+  EXPECT_CALL(dynamicControl(),
+              DeviceExists(AnyOf(GetDevice(S("vendor")),
+                                 GetDevice(T("vendor")),
+                                 GetDevice(S("system")),
+                                 GetDevice(T("system")))))
+      .WillRepeatedly(Return(true));
+  EXPECT_CALL(
+      dynamicControl(),
+      GetState(AnyOf(S("vendor"), T("vendor"), S("system"), T("system"))))
+      .WillRepeatedly(Return(DmDeviceState::ACTIVE));
+
+  SetMetadata(source(), {{S("system"), 2_GiB}, {S("vendor"), 1_GiB}});
+  SetMetadata(target(), {{T("system"), 2_GiB}, {T("vendor"), 1_GiB}});
+
+  std::string device;
+  ASSERT_TRUE(dynamicControl().GetPartitionDevice(
+      "system", source(), source(), &device));
+  ASSERT_EQ(GetDmDevice(S("system")), device);
+
+  ASSERT_TRUE(dynamicControl().GetPartitionDevice(
+      "system", target(), source(), &device));
+  ASSERT_EQ("", device);
+
+  auto device_info =
+      dynamicControl().GetPartitionDevice("system", target(), source(), false);
+  ASSERT_TRUE(device_info.has_value());
+  base::FilePath vabc_device_dir{
+      std::string{DynamicPartitionControlAndroid::VABC_DEVICE_DIR}};
+  ASSERT_EQ(device_info->readonly_device_path,
+            vabc_device_dir.Append(T("system")).value());
+}
+
 TEST_P(DynamicPartitionControlAndroidTestP,
        GetPartitionDeviceWhenResumingUpdate) {
   // Static partition bar_{a,b} exists.
@@ -769,11 +885,11 @@
 }
 
 TEST_F(DynamicPartitionControlAndroidTest, IsAvbNotEnabledInFstab) {
-  // clang-format off
   std::string fstab_content =
-      "system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other,logical\n"  // NOLINT(whitespace/line_length)
-      "/dev/block/by-name/system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other\n";  // NOLINT(whitespace/line_length)
-  // clang-format on
+      "system /postinstall ext4 ro,nosuid,nodev,noexec "
+      "slotselect_other,logical\n"
+      "/dev/block/by-name/system /postinstall ext4 "
+      "ro,nosuid,nodev,noexec slotselect_other\n";
   ScopedTempFile fstab;
   ASSERT_TRUE(test_utils::WriteFileString(fstab.path(), fstab_content));
   ASSERT_THAT(dynamicControl().RealIsAvbEnabledInFstab(fstab.path()),
@@ -781,10 +897,9 @@
 }
 
 TEST_F(DynamicPartitionControlAndroidTest, IsAvbEnabledInFstab) {
-  // clang-format off
   std::string fstab_content =
-      "system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other,logical,avb_keys=/foo\n";  // NOLINT(whitespace/line_length)
-  // clang-format on
+      "system /postinstall ext4 ro,nosuid,nodev,noexec "
+      "slotselect_other,logical,avb_keys=/foo\n";
   ScopedTempFile fstab;
   ASSERT_TRUE(test_utils::WriteFileString(fstab.path(), fstab_content));
   ASSERT_THAT(dynamicControl().RealIsAvbEnabledInFstab(fstab.path()),
@@ -892,4 +1007,123 @@
   ASSERT_EQ(new_expected, device_content);
 }
 
+class FakeAutoDevice : public android::snapshot::AutoDevice {
+ public:
+  FakeAutoDevice() : AutoDevice("") {}
+};
+
+class SnapshotPartitionTestP : public DynamicPartitionControlAndroidTestP {
+ public:
+  void SetUp() override {
+    DynamicPartitionControlAndroidTestP::SetUp();
+    ON_CALL(dynamicControl(), GetVirtualAbFeatureFlag())
+        .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
+
+    snapshot_ = new NiceMock<MockSnapshotManager>();
+    dynamicControl().snapshot_.reset(snapshot_);  // takes ownership
+    EXPECT_CALL(*snapshot_, BeginUpdate()).WillOnce(Return(true));
+    EXPECT_CALL(*snapshot_, EnsureMetadataMounted())
+        .WillRepeatedly(
+            Invoke([]() { return std::make_unique<FakeAutoDevice>(); }));
+
+    manifest_ =
+        PartitionSizesToManifest({{"system", 3_GiB}, {"vendor", 1_GiB}});
+  }
+  void ExpectCreateUpdateSnapshots(android::snapshot::Return val) {
+    manifest_.mutable_dynamic_partition_metadata()->set_snapshot_enabled(true);
+    EXPECT_CALL(*snapshot_, CreateUpdateSnapshots(_))
+        .WillRepeatedly(Invoke([&, val](const auto& manifest) {
+          // Deep comparison requires full protobuf library. Comparing the
+          // pointers are sufficient.
+          EXPECT_EQ(&manifest_, &manifest);
+          LOG(WARNING) << "CreateUpdateSnapshots returning " << val.string();
+          return val;
+        }));
+  }
+  bool PreparePartitionsForUpdate(uint64_t* required_size) {
+    return dynamicControl().PreparePartitionsForUpdate(
+        source(), target(), manifest_, true /* update */, required_size);
+  }
+  MockSnapshotManager* snapshot_ = nullptr;
+  DeltaArchiveManifest manifest_;
+};
+
+// Test happy path of PreparePartitionsForUpdate on a Virtual A/B device.
+TEST_P(SnapshotPartitionTestP, PreparePartitions) {
+  ExpectCreateUpdateSnapshots(android::snapshot::Return::Ok());
+  SetMetadata(source(), {});
+  uint64_t required_size = 0;
+  EXPECT_TRUE(PreparePartitionsForUpdate(&required_size));
+  EXPECT_EQ(0u, required_size);
+}
+
+// Test that if not enough space, required size returned by SnapshotManager is
+// passed up.
+TEST_P(SnapshotPartitionTestP, PreparePartitionsNoSpace) {
+  ExpectCreateUpdateSnapshots(android::snapshot::Return::NoSpace(1_GiB));
+  uint64_t required_size = 0;
+
+  SetMetadata(source(), {});
+  EXPECT_FALSE(PreparePartitionsForUpdate(&required_size));
+  EXPECT_EQ(1_GiB, required_size);
+}
+
+// Test that in recovery, use empty space in super partition for a snapshot
+// update first.
+TEST_P(SnapshotPartitionTestP, RecoveryUseSuperEmpty) {
+  ExpectCreateUpdateSnapshots(android::snapshot::Return::Ok());
+  EXPECT_CALL(dynamicControl(), IsRecovery()).WillRepeatedly(Return(true));
+
+  // Metadata is needed to perform super partition size check.
+  SetMetadata(source(), {});
+
+  // Must not call PrepareDynamicPartitionsForUpdate if
+  // PrepareSnapshotPartitionsForUpdate succeeds.
+  EXPECT_CALL(dynamicControl(), PrepareDynamicPartitionsForUpdate(_, _, _, _))
+      .Times(0);
+  uint64_t required_size = 0;
+  EXPECT_TRUE(PreparePartitionsForUpdate(&required_size));
+  EXPECT_EQ(0u, required_size);
+}
+
+// Test that in recovery, if CreateUpdateSnapshots throws an error, try
+// the flashing path for full updates.
+TEST_P(SnapshotPartitionTestP, RecoveryErrorShouldDeleteSource) {
+  // Expectation on PreparePartitionsForUpdate
+  ExpectCreateUpdateSnapshots(android::snapshot::Return::NoSpace(1_GiB));
+  EXPECT_CALL(dynamicControl(), IsRecovery()).WillRepeatedly(Return(true));
+  EXPECT_CALL(*snapshot_, CancelUpdate()).WillOnce(Return(true));
+  EXPECT_CALL(dynamicControl(), PrepareDynamicPartitionsForUpdate(_, _, _, _))
+      .WillRepeatedly(Invoke([&](auto source_slot,
+                                 auto target_slot,
+                                 const auto& manifest,
+                                 auto delete_source) {
+        EXPECT_EQ(source(), source_slot);
+        EXPECT_EQ(target(), target_slot);
+        // Deep comparison requires full protobuf library. Comparing the
+        // pointers are sufficient.
+        EXPECT_EQ(&manifest_, &manifest);
+        EXPECT_TRUE(delete_source);
+        return dynamicControl().RealPrepareDynamicPartitionsForUpdate(
+            source_slot, target_slot, manifest, delete_source);
+      }));
+  // Only one slot of space in super
+  uint64_t super_size = kDefaultGroupSize + 1_MiB;
+  // Expectation on PrepareDynamicPartitionsForUpdate
+  SetMetadata(
+      source(), {{S("system"), 2_GiB}, {S("vendor"), 1_GiB}}, 0, super_size);
+  ExpectUnmap({T("system"), T("vendor")});
+  // Expect that the source partitions aren't present in target super
+  // metadata.
+  ExpectStoreMetadata({{T("system"), 3_GiB}, {T("vendor"), 1_GiB}});
+
+  uint64_t required_size = 0;
+  EXPECT_TRUE(PreparePartitionsForUpdate(&required_size));
+  EXPECT_EQ(0u, required_size);
+}
+
+INSTANTIATE_TEST_CASE_P(DynamicPartitionControlAndroidTest,
+                        SnapshotPartitionTestP,
+                        testing::Values(TestParam{0, 1}, TestParam{1, 0}));
+
 }  // namespace chromeos_update_engine
diff --git a/dynamic_partition_test_utils.h b/aosp/dynamic_partition_test_utils.h
similarity index 94%
rename from dynamic_partition_test_utils.h
rename to aosp/dynamic_partition_test_utils.h
index 70a176b..c518382 100644
--- a/dynamic_partition_test_utils.h
+++ b/aosp/dynamic_partition_test_utils.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_TEST_UTILS_H_
-#define UPDATE_ENGINE_DYNAMIC_PARTITION_TEST_UTILS_H_
+#ifndef UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_TEST_UTILS_H_
+#define UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_TEST_UTILS_H_
 
 #include <stdint.h>
 
@@ -47,7 +47,7 @@
 
 constexpr const uint32_t kMaxNumSlots = 2;
 constexpr const char* kSlotSuffixes[kMaxNumSlots] = {"_a", "_b"};
-constexpr const char* kFakeDevicePath = "/fake/dev/path/";
+constexpr std::string_view kFakeDevicePath = "/fake/dev/path/";
 constexpr const char* kFakeDmDevicePath = "/fake/dm/dev/path/";
 constexpr const uint32_t kFakeMetadataSize = 65536;
 constexpr const char* kDefaultGroup = "foo";
@@ -112,7 +112,7 @@
 }
 
 inline std::string GetDevice(const std::string& name) {
-  return kFakeDevicePath + name;
+  return std::string(kFakeDevicePath) + name;
 }
 
 inline std::string GetDmDevice(const std::string& name) {
@@ -175,9 +175,11 @@
 }
 
 inline std::unique_ptr<MetadataBuilder> NewFakeMetadata(
-    const DeltaArchiveManifest& manifest, uint32_t partition_attr = 0) {
+    const DeltaArchiveManifest& manifest,
+    uint32_t partition_attr = 0,
+    uint64_t super_size = kDefaultSuperSize) {
   auto builder =
-      MetadataBuilder::New(kDefaultSuperSize, kFakeMetadataSize, kMaxNumSlots);
+      MetadataBuilder::New(super_size, kFakeMetadataSize, kMaxNumSlots);
   for (const auto& group : manifest.dynamic_partition_metadata().groups()) {
     EXPECT_TRUE(builder->AddGroup(group.name(), group.size()));
     for (const auto& partition_name : group.partition_names()) {
@@ -283,4 +285,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_DYNAMIC_PARTITION_TEST_UTILS_H_
+#endif  // UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_TEST_UTILS_H_
diff --git a/dynamic_partition_utils.cc b/aosp/dynamic_partition_utils.cc
similarity index 95%
rename from dynamic_partition_utils.cc
rename to aosp/dynamic_partition_utils.cc
index f9bd886..6b77a45 100644
--- a/dynamic_partition_utils.cc
+++ b/aosp/dynamic_partition_utils.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/dynamic_partition_utils.h"
+#include "update_engine/aosp/dynamic_partition_utils.h"
 
 #include <vector>
 
diff --git a/dynamic_partition_utils.h b/aosp/dynamic_partition_utils.h
similarity index 85%
rename from dynamic_partition_utils.h
rename to aosp/dynamic_partition_utils.h
index 09fce00..5a51d5e 100644
--- a/dynamic_partition_utils.h
+++ b/aosp/dynamic_partition_utils.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_
-#define UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_
+#ifndef UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_UTILS_H_
+#define UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_UTILS_H_
 
 #include <string>
 
@@ -30,4 +30,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_
+#endif  // UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_UTILS_H_
diff --git a/aosp/hardware_android.cc b/aosp/hardware_android.cc
new file mode 100644
index 0000000..624cfc9
--- /dev/null
+++ b/aosp/hardware_android.cc
@@ -0,0 +1,375 @@
+//
+// Copyright (C) 2015 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/aosp/hardware_android.h"
+
+#include <sys/types.h>
+
+#include <memory>
+#include <string>
+#include <string_view>
+
+#include <android/sysprop/GkiProperties.sysprop.h>
+#include <android-base/properties.h>
+#include <base/files/file_util.h>
+#include <base/strings/string_number_conversions.h>
+#include <base/strings/string_util.h>
+#include <bootloader_message/bootloader_message.h>
+#include <fstab/fstab.h>
+#include <libavb/libavb.h>
+#include <libavb_user/avb_ops_user.h>
+
+#include "update_engine/common/error_code_utils.h"
+#include "update_engine/common/hardware.h"
+#include "update_engine/common/platform_constants.h"
+#include "update_engine/common/utils.h"
+
+#ifndef __ANDROID_RECOVERY__
+#include <android/sysprop/OtaProperties.sysprop.h>
+#endif
+
+using android::base::GetBoolProperty;
+using android::base::GetIntProperty;
+using android::base::GetProperty;
+using std::string;
+
+namespace chromeos_update_engine {
+
+namespace {
+
+// Android properties that identify the hardware and potentially non-updatable
+// parts of the bootloader (such as the bootloader version and the baseband
+// version).
+const char kPropProductManufacturer[] = "ro.product.manufacturer";
+const char kPropBootHardwareSKU[] = "ro.boot.hardware.sku";
+const char kPropBootRevision[] = "ro.boot.revision";
+const char kPropBuildDateUTC[] = "ro.build.date.utc";
+
+string GetPartitionBuildDate(const string& partition_name) {
+  return android::base::GetProperty("ro." + partition_name + ".build.date.utc",
+                                    "");
+}
+
+ErrorCode IsTimestampNewerLogged(const std::string& partition_name,
+                                 const std::string& old_version,
+                                 const std::string& new_version) {
+  auto error_code = utils::IsTimestampNewer(old_version, new_version);
+  if (error_code != ErrorCode::kSuccess) {
+    LOG(WARNING) << "Timestamp check failed with "
+                 << utils::ErrorCodeToString(error_code) << ": "
+                 << partition_name << " Partition timestamp: " << old_version
+                 << " Update timestamp: " << new_version;
+  }
+  return error_code;
+}
+
+void SetVbmetaDigestProp(const std::string& value) {
+#ifndef __ANDROID_RECOVERY__
+  if (!android::sysprop::OtaProperties::other_vbmeta_digest(value)) {
+    LOG(WARNING) << "Failed to set other vbmeta digest to " << value;
+  }
+#endif
+}
+
+std::string CalculateVbmetaDigestForInactiveSlot() {
+  AvbSlotVerifyData* avb_slot_data;
+
+  auto suffix = fs_mgr_get_other_slot_suffix();
+  const char* requested_partitions[] = {nullptr};
+  auto avb_ops = avb_ops_user_new();
+  auto verify_result = avb_slot_verify(avb_ops,
+                                       requested_partitions,
+                                       suffix.c_str(),
+                                       AVB_SLOT_VERIFY_FLAGS_NONE,
+                                       AVB_HASHTREE_ERROR_MODE_EIO,
+                                       &avb_slot_data);
+  if (verify_result != AVB_SLOT_VERIFY_RESULT_OK) {
+    LOG(WARNING) << "Failed to verify avb slot data: " << verify_result;
+    return "";
+  }
+
+  uint8_t vbmeta_digest[AVB_SHA256_DIGEST_SIZE];
+  avb_slot_verify_data_calculate_vbmeta_digest(
+      avb_slot_data, AVB_DIGEST_TYPE_SHA256, vbmeta_digest);
+
+  std::string encoded_digest =
+      base::HexEncode(vbmeta_digest, AVB_SHA256_DIGEST_SIZE);
+  return base::ToLowerASCII(encoded_digest);
+}
+
+}  // namespace
+
+namespace hardware {
+
+// Factory defined in hardware.h.
+std::unique_ptr<HardwareInterface> CreateHardware() {
+  return std::make_unique<HardwareAndroid>();
+}
+
+}  // namespace hardware
+
+// In Android there are normally three kinds of builds: eng, userdebug and user.
+// These builds target respectively a developer build, a debuggable version of
+// the final product and the pristine final product the end user will run.
+// Apart from the ro.build.type property name, they differ in the following
+// properties that characterize the builds:
+// * eng builds: ro.secure=0 and ro.debuggable=1
+// * userdebug builds: ro.secure=1 and ro.debuggable=1
+// * user builds: ro.secure=1 and ro.debuggable=0
+//
+// See IsOfficialBuild() and IsNormalMode() for the meaning of these options in
+// Android.
+
+bool HardwareAndroid::IsOfficialBuild() const {
+  // We run an official build iff ro.secure == 1, because we expect the build to
+  // behave like the end user product and check for updates. Note that while
+  // developers are able to build "official builds" by just running "make user",
+  // that will only result in a more restrictive environment. The important part
+  // is that we don't produce and push "non-official" builds to the end user.
+  //
+  // In case of a non-bool value, we take the most restrictive option and
+  // assume we are in an official-build.
+  return GetBoolProperty("ro.secure", true);
+}
+
+bool HardwareAndroid::IsNormalBootMode() const {
+  // We are running in "dev-mode" iff ro.debuggable == 1. In dev-mode the
+  // update_engine will allow extra developers options, such as providing a
+  // different update URL. In case of error, we assume the build is in
+  // normal-mode.
+  return !GetBoolProperty("ro.debuggable", false);
+}
+
+bool HardwareAndroid::AreDevFeaturesEnabled() const {
+  return !IsNormalBootMode();
+}
+
+bool HardwareAndroid::IsOOBEEnabled() const {
+  // No OOBE flow blocking updates for Android-based boards.
+  return false;
+}
+
+bool HardwareAndroid::IsOOBEComplete(base::Time* out_time_of_oobe) const {
+  LOG(WARNING) << "OOBE is not enabled but IsOOBEComplete() called.";
+  if (out_time_of_oobe)
+    *out_time_of_oobe = base::Time();
+  return true;
+}
+
+string HardwareAndroid::GetHardwareClass() const {
+  auto manufacturer = GetProperty(kPropProductManufacturer, "");
+  auto sku = GetProperty(kPropBootHardwareSKU, "");
+  auto revision = GetProperty(kPropBootRevision, "");
+
+  return manufacturer + ":" + sku + ":" + revision;
+}
+
+string HardwareAndroid::GetDeviceRequisition() const {
+  LOG(WARNING) << "STUB: Getting requisition is not supported.";
+  return "";
+}
+
+int HardwareAndroid::GetMinKernelKeyVersion() const {
+  LOG(WARNING) << "STUB: No Kernel key version is available.";
+  return -1;
+}
+
+int HardwareAndroid::GetMinFirmwareKeyVersion() const {
+  LOG(WARNING) << "STUB: No Firmware key version is available.";
+  return -1;
+}
+
+int HardwareAndroid::GetMaxFirmwareKeyRollforward() const {
+  LOG(WARNING) << "STUB: Getting firmware_max_rollforward is not supported.";
+  return -1;
+}
+
+bool HardwareAndroid::SetMaxFirmwareKeyRollforward(
+    int firmware_max_rollforward) {
+  LOG(WARNING) << "STUB: Setting firmware_max_rollforward is not supported.";
+  return false;
+}
+
+bool HardwareAndroid::SetMaxKernelKeyRollforward(int kernel_max_rollforward) {
+  LOG(WARNING) << "STUB: Setting kernel_max_rollforward is not supported.";
+  return false;
+}
+
+int HardwareAndroid::GetPowerwashCount() const {
+  LOG(WARNING) << "STUB: Assuming no factory reset was performed.";
+  return 0;
+}
+
+bool HardwareAndroid::SchedulePowerwash(bool save_rollback_data) {
+  LOG(INFO) << "Scheduling a powerwash to BCB.";
+  LOG_IF(WARNING, save_rollback_data) << "save_rollback_data was true but "
+                                      << "isn't supported.";
+  string err;
+  if (!update_bootloader_message({"--wipe_data", "--reason=wipe_data_from_ota"},
+                                 &err)) {
+    LOG(ERROR) << "Failed to update bootloader message: " << err;
+    return false;
+  }
+  return true;
+}
+
+bool HardwareAndroid::CancelPowerwash() {
+  string err;
+  if (!clear_bootloader_message(&err)) {
+    LOG(ERROR) << "Failed to clear bootloader message: " << err;
+    return false;
+  }
+  return true;
+}
+
+bool HardwareAndroid::GetNonVolatileDirectory(base::FilePath* path) const {
+  base::FilePath local_path(constants::kNonVolatileDirectory);
+  if (!base::DirectoryExists(local_path)) {
+    LOG(ERROR) << "Non-volatile directory not found: " << local_path.value();
+    return false;
+  }
+  *path = local_path;
+  return true;
+}
+
+bool HardwareAndroid::GetPowerwashSafeDirectory(base::FilePath* path) const {
+  // On Android, we don't have a directory persisted across powerwash.
+  return false;
+}
+
+int64_t HardwareAndroid::GetBuildTimestamp() const {
+  return GetIntProperty<int64_t>(kPropBuildDateUTC, 0);
+}
+
+// Returns true if the device runs an userdebug build, and explicitly allows OTA
+// downgrade.
+bool HardwareAndroid::AllowDowngrade() const {
+  return GetBoolProperty("ro.ota.allow_downgrade", false) &&
+         GetBoolProperty("ro.debuggable", false);
+}
+
+bool HardwareAndroid::GetFirstActiveOmahaPingSent() const {
+  LOG(WARNING) << "STUB: Assuming first active omaha was never set.";
+  return false;
+}
+
+bool HardwareAndroid::SetFirstActiveOmahaPingSent() {
+  LOG(WARNING) << "STUB: Assuming first active omaha is set.";
+  // We will set it true, so its failure doesn't cause escalation.
+  return true;
+}
+
+void HardwareAndroid::SetWarmReset(bool warm_reset) {
+  if constexpr (!constants::kIsRecovery) {
+    constexpr char warm_reset_prop[] = "ota.warm_reset";
+    if (!android::base::SetProperty(warm_reset_prop, warm_reset ? "1" : "0")) {
+      LOG(WARNING) << "Failed to set prop " << warm_reset_prop;
+    }
+  }
+}
+
+void HardwareAndroid::SetVbmetaDigestForInactiveSlot(bool reset) {
+  if constexpr (constants::kIsRecovery) {
+    return;
+  }
+
+  if (android::base::GetProperty("ro.boot.avb_version", "").empty() &&
+      android::base::GetProperty("ro.boot.vbmeta.avb_version", "").empty()) {
+    LOG(INFO) << "Device doesn't use avb, skipping setting vbmeta digest";
+    return;
+  }
+
+  if (reset) {
+    SetVbmetaDigestProp("");
+    return;
+  }
+
+  std::string digest = CalculateVbmetaDigestForInactiveSlot();
+  if (digest.empty()) {
+    LOG(WARNING) << "Failed to calculate the vbmeta digest for the other slot";
+    return;
+  }
+  SetVbmetaDigestProp(digest);
+}
+
+string HardwareAndroid::GetVersionForLogging(
+    const string& partition_name) const {
+  if (partition_name == "boot") {
+    // ro.bootimage.build.date.utc
+    return GetPartitionBuildDate("bootimage");
+  }
+  return GetPartitionBuildDate(partition_name);
+}
+
+ErrorCode HardwareAndroid::IsPartitionUpdateValid(
+    const string& partition_name, const string& new_version) const {
+  if (partition_name == "boot") {
+    const auto old_version = GetPartitionBuildDate("bootimage");
+    auto error_code =
+        IsTimestampNewerLogged(partition_name, old_version, new_version);
+    if (error_code == ErrorCode::kPayloadTimestampError) {
+      bool prevent_downgrade =
+          android::sysprop::GkiProperties::prevent_downgrade_version().value_or(
+              false);
+      if (!prevent_downgrade) {
+        LOG(WARNING) << "Downgrade of boot image is detected, but permitting "
+                        "update because device does not prevent boot image "
+                        "downgrade";
+        // If prevent_downgrade_version sysprop is not explicitly set, permit
+        // downgrade in boot image version.
+        // Even though error_code is overridden here, always call
+        // IsTimestampNewerLogged to produce log messages.
+        error_code = ErrorCode::kSuccess;
+      }
+    }
+    return error_code;
+  }
+
+  const auto old_version = GetPartitionBuildDate(partition_name);
+  // TODO(zhangkelvin)  for some partitions, missing a current timestamp should
+  // be an error, e.g. system, vendor, product etc.
+  auto error_code =
+      IsTimestampNewerLogged(partition_name, old_version, new_version);
+  return error_code;
+}
+
+// Mount options for non-system partitions. This option causes selinux treat
+// every file in the mounted filesystem as having the 'postinstall_file'
+// context, regardless of what the filesystem itself records. See "SELinux
+// User's and Administrator's Guide" for more information on this option.
+constexpr const char* kDefaultPostinstallMountOptions =
+    "context=u:object_r:postinstall_file:s0";
+
+// Mount options for system partitions. This option causes selinux to use the
+// 'postinstall_file' context as a fallback if there are no other selinux
+// contexts associated with the file in the mounted partition. See "SELinux
+// User's and Administrator's Guide" for more information on this option.
+constexpr const char* kSystemPostinstallMountOptions =
+    "defcontext=u:object_r:postinstall_file:s0";
+
+// Name of the system-partition
+constexpr std::string_view kSystemPartitionName = "system";
+
+const char* HardwareAndroid::GetPartitionMountOptions(
+    const std::string& partition_name) const {
+  if (partition_name == kSystemPartitionName) {
+    return kSystemPostinstallMountOptions;
+  } else {
+    return kDefaultPostinstallMountOptions;
+  }
+}
+
+}  // namespace chromeos_update_engine
diff --git a/hardware_android.h b/aosp/hardware_android.h
similarity index 72%
rename from hardware_android.h
rename to aosp/hardware_android.h
index 145a936..d20e8df 100644
--- a/hardware_android.h
+++ b/aosp/hardware_android.h
@@ -14,21 +14,23 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_HARDWARE_ANDROID_H_
-#define UPDATE_ENGINE_HARDWARE_ANDROID_H_
+#ifndef UPDATE_ENGINE_AOSP_HARDWARE_ANDROID_H_
+#define UPDATE_ENGINE_AOSP_HARDWARE_ANDROID_H_
 
 #include <string>
+#include <string_view>
 
 #include <base/macros.h>
 #include <base/time/time.h>
 
+#include "update_engine/common/error_code.h"
 #include "update_engine/common/hardware.h"
 #include "update_engine/common/hardware_interface.h"
 
 namespace chromeos_update_engine {
 
 // Implements the real interface with the hardware in the Android platform.
-class HardwareAndroid final : public HardwareInterface {
+class HardwareAndroid : public HardwareInterface {
  public:
   HardwareAndroid() = default;
   ~HardwareAndroid() override = default;
@@ -40,15 +42,14 @@
   bool IsOOBEEnabled() const override;
   bool IsOOBEComplete(base::Time* out_time_of_oobe) const override;
   std::string GetHardwareClass() const override;
-  std::string GetFirmwareVersion() const override;
-  std::string GetECVersion() const override;
+  std::string GetDeviceRequisition() const override;
   int GetMinKernelKeyVersion() const override;
   int GetMinFirmwareKeyVersion() const override;
   int GetMaxFirmwareKeyRollforward() const override;
   bool SetMaxFirmwareKeyRollforward(int firmware_max_rollforward) override;
   bool SetMaxKernelKeyRollforward(int kernel_max_rollforward) override;
   int GetPowerwashCount() const override;
-  bool SchedulePowerwash(bool is_rollback) override;
+  bool SchedulePowerwash(bool save_rollback_data) override;
   bool CancelPowerwash() override;
   bool GetNonVolatileDirectory(base::FilePath* path) const override;
   bool GetPowerwashSafeDirectory(base::FilePath* path) const override;
@@ -57,6 +58,14 @@
   bool GetFirstActiveOmahaPingSent() const override;
   bool SetFirstActiveOmahaPingSent() override;
   void SetWarmReset(bool warm_reset) override;
+  void SetVbmetaDigestForInactiveSlot(bool reset) override;
+  [[nodiscard]] std::string GetVersionForLogging(
+      const std::string& partition_name) const override;
+  [[nodiscard]] ErrorCode IsPartitionUpdateValid(
+      const std::string& partition_name,
+      const std::string& new_version) const override;
+  [[nodiscard]] const char* GetPartitionMountOptions(
+      const std::string& partition_name) const override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HardwareAndroid);
@@ -64,4 +73,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_HARDWARE_ANDROID_H_
+#endif  // UPDATE_ENGINE_AOSP_HARDWARE_ANDROID_H_
diff --git a/logging_android.cc b/aosp/logging_android.cc
similarity index 94%
rename from logging_android.cc
rename to aosp/logging_android.cc
index 0219075..5ccf7bc 100644
--- a/logging_android.cc
+++ b/aosp/logging_android.cc
@@ -240,9 +240,16 @@
   std::string_view sv = str_newline;
   ignore_result(android::base::ConsumeSuffix(&sv, "\n"));
   std::string str(sv.data(), sv.size());
-  // This will eventually be redirected to CombinedLogger.
-  // Use nullptr as tag so that liblog infers log tag from getprogname().
-  __android_log_write(priority, nullptr /* tag */, str.c_str());
+
+  if (priority == ANDROID_LOG_FATAL) {
+    // Abort the program for priority FATAL. __android_log_assert will log the
+    // message to stderr and CombinedLogger.
+    __android_log_assert(nullptr, nullptr, "%s", str.c_str());
+  } else {
+    // This will eventually be redirected to CombinedLogger.
+    // Use nullptr as tag so that liblog infers log tag from getprogname().
+    __android_log_write(priority, nullptr /* tag */, str.c_str());
+  }
   return true;
 }
 
diff --git a/metrics_reporter_android.cc b/aosp/metrics_reporter_android.cc
similarity index 80%
rename from metrics_reporter_android.cc
rename to aosp/metrics_reporter_android.cc
index d8fa6e5..a324fab 100644
--- a/metrics_reporter_android.cc
+++ b/aosp/metrics_reporter_android.cc
@@ -14,10 +14,12 @@
 // limitations under the License.
 //
 
-#include "update_engine/metrics_reporter_android.h"
+#include "update_engine/aosp/metrics_reporter_android.h"
 
 #include <stdint.h>
 
+#include <algorithm>
+#include <any>
 #include <memory>
 #include <string>
 
@@ -30,6 +32,7 @@
 #include <statslog.h>
 
 #include "update_engine/common/constants.h"
+#include "update_engine/payload_consumer/install_plan.h"
 
 using android::fs_mgr::GetPartitionGroupName;
 using android::fs_mgr::LpMetadata;
@@ -48,20 +51,38 @@
 int32_t GetStatsdEnumValue(int32_t value) {
   return kMetricsReporterEnumOffset + value;
 }
+
+bool IsHashTreeEnabled(
+    const chromeos_update_engine::InstallPlan* install_plan) {
+  return std::any_of(
+      install_plan->partitions.begin(),
+      install_plan->partitions.end(),
+      [](const auto& partition) { return partition.hash_tree_size > 0; });
+}
+
+bool IsFECEnabled(const chromeos_update_engine::InstallPlan* install_plan) {
+  return std::any_of(
+      install_plan->partitions.begin(),
+      install_plan->partitions.end(),
+      [](const auto& partition) { return partition.fec_size > 0; });
+}
+
 }  // namespace
 
 namespace chromeos_update_engine {
 
 namespace metrics {
 
-std::unique_ptr<MetricsReporterInterface> CreateMetricsReporter() {
-  return std::make_unique<MetricsReporterAndroid>();
+std::unique_ptr<MetricsReporterInterface> CreateMetricsReporter(
+    DynamicPartitionControlInterface* dynamic_partition_control,
+    const InstallPlan* install_plan) {
+  return std::make_unique<MetricsReporterAndroid>(dynamic_partition_control,
+                                                  install_plan);
 }
 
 }  // namespace metrics
 
 void MetricsReporterAndroid::ReportUpdateAttemptMetrics(
-    SystemState* /* system_state */,
     int attempt_number,
     PayloadType payload_type,
     base::TimeDelta duration,
@@ -106,6 +127,11 @@
     }
   }
 
+  bool vab_compression_enabled = android::base::GetBoolProperty(
+      "ro.virtual_ab.compression.enabled", false);
+  bool vab_compression_used =
+      dynamic_partition_control_->UpdateUsesSnapshotCompression();
+
   android::util::stats_write(
       android::util::UPDATE_ENGINE_UPDATE_ATTEMPT_REPORTED,
       attempt_number,
@@ -118,7 +144,9 @@
       android::base::GetProperty("ro.build.fingerprint", "").c_str(),
       super_partition_size_bytes,
       slot_size_bytes,
-      super_free_space);
+      super_free_space,
+      vab_compression_enabled,
+      vab_compression_used);
 }
 
 void MetricsReporterAndroid::ReportUpdateAttemptDownloadMetrics(
@@ -157,7 +185,9 @@
       static_cast<int32_t>(total_bytes_downloaded),
       static_cast<int32_t>(download_overhead_percentage),
       static_cast<int32_t>(total_duration.InMinutes()),
-      static_cast<int32_t>(reboot_count));
+      static_cast<int32_t>(reboot_count),
+      IsHashTreeEnabled(install_plan_),
+      IsFECEnabled(install_plan_));
 }
 
 void MetricsReporterAndroid::ReportAbnormallyTerminatedUpdateAttemptMetrics() {
diff --git a/metrics_reporter_android.h b/aosp/metrics_reporter_android.h
similarity index 81%
rename from metrics_reporter_android.h
rename to aosp/metrics_reporter_android.h
index e320c12..aeb579a 100644
--- a/metrics_reporter_android.h
+++ b/aosp/metrics_reporter_android.h
@@ -14,25 +14,28 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_METRICS_REPORTER_ANDROID_H_
-#define UPDATE_ENGINE_METRICS_REPORTER_ANDROID_H_
+#ifndef UPDATE_ENGINE_AOSP_METRICS_REPORTER_ANDROID_H_
+#define UPDATE_ENGINE_AOSP_METRICS_REPORTER_ANDROID_H_
 
 #include <string>
 
 #include "update_engine/common/error_code.h"
-#include "update_engine/metrics_constants.h"
-#include "update_engine/metrics_reporter_interface.h"
+#include "update_engine/common/metrics_constants.h"
+#include "update_engine/common/metrics_reporter_interface.h"
+#include "update_engine/payload_consumer/install_plan.h"
 
 namespace chromeos_update_engine {
 
 class MetricsReporterAndroid : public MetricsReporterInterface {
  public:
-  MetricsReporterAndroid() = default;
+  explicit MetricsReporterAndroid(
+      DynamicPartitionControlInterface* dynamic_partition_control,
+      const InstallPlan* install_plan)
+      : dynamic_partition_control_(dynamic_partition_control),
+        install_plan_(install_plan) {}
 
   ~MetricsReporterAndroid() override = default;
 
-  void Initialize() override {}
-
   void ReportRollbackMetrics(metrics::RollbackResult result) override {}
 
   void ReportEnterpriseRollbackMetrics(
@@ -41,13 +44,11 @@
   void ReportDailyMetrics(base::TimeDelta os_age) override {}
 
   void ReportUpdateCheckMetrics(
-      SystemState* system_state,
       metrics::CheckResult result,
       metrics::CheckReaction reaction,
       metrics::DownloadErrorCode download_error_code) override {}
 
-  void ReportUpdateAttemptMetrics(SystemState* system_state,
-                                  int attempt_number,
+  void ReportUpdateAttemptMetrics(int attempt_number,
                                   PayloadType payload_type,
                                   base::TimeDelta duration,
                                   base::TimeDelta duration_uptime,
@@ -95,9 +96,12 @@
       bool has_time_restriction_policy, int time_to_update_days) override {}
 
  private:
+  DynamicPartitionControlInterface* dynamic_partition_control_{};
+  const InstallPlan* install_plan_{};
+
   DISALLOW_COPY_AND_ASSIGN(MetricsReporterAndroid);
 };
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_METRICS_REPORTER_ANDROID_H_
+#endif  // UPDATE_ENGINE_AOSP_METRICS_REPORTER_ANDROID_H_
diff --git a/aosp/mock_dynamic_partition_control_android.h b/aosp/mock_dynamic_partition_control_android.h
new file mode 100644
index 0000000..428b6c7
--- /dev/null
+++ b/aosp/mock_dynamic_partition_control_android.h
@@ -0,0 +1,146 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <stdint.h>
+
+#include <memory>
+#include <set>
+#include <string>
+
+#include <gmock/gmock.h>
+
+#include <libsnapshot/cow_writer.h>
+#include <libsnapshot/snapshot_writer.h>
+
+#include "payload_consumer/file_descriptor.h"
+#include "update_engine/aosp/dynamic_partition_control_android.h"
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/dynamic_partition_control_interface.h"
+
+namespace chromeos_update_engine {
+
+class MockDynamicPartitionControlAndroid
+    : public DynamicPartitionControlAndroid {
+ public:
+  MockDynamicPartitionControlAndroid()
+      : DynamicPartitionControlAndroid(0 /*source slot*/) {}
+  MOCK_METHOD(
+      bool,
+      MapPartitionOnDeviceMapper,
+      (const std::string&, const std::string&, uint32_t, bool, std::string*),
+      (override));
+  MOCK_METHOD(bool,
+              UnmapPartitionOnDeviceMapper,
+              (const std::string&),
+              (override));
+  MOCK_METHOD(void, Cleanup, (), (override));
+  MOCK_METHOD(bool, DeviceExists, (const std::string&), (override));
+  MOCK_METHOD(::android::dm::DmDeviceState,
+              GetState,
+              (const std::string&),
+              (override));
+  MOCK_METHOD(bool,
+              GetDmDevicePathByName,
+              (const std::string&, std::string*),
+              (override));
+  MOCK_METHOD(std::unique_ptr<::android::fs_mgr::MetadataBuilder>,
+              LoadMetadataBuilder,
+              (const std::string&, uint32_t),
+              (override));
+  MOCK_METHOD(std::unique_ptr<::android::fs_mgr::MetadataBuilder>,
+              LoadMetadataBuilder,
+              (const std::string&, uint32_t, uint32_t),
+              (override));
+  MOCK_METHOD(bool,
+              StoreMetadata,
+              (const std::string&, android::fs_mgr::MetadataBuilder*, uint32_t),
+              (override));
+  MOCK_METHOD(bool, GetDeviceDir, (std::string*), (override));
+  MOCK_METHOD(FeatureFlag, GetDynamicPartitionsFeatureFlag, (), (override));
+  MOCK_METHOD(std::string, GetSuperPartitionName, (uint32_t), (override));
+  MOCK_METHOD(FeatureFlag, GetVirtualAbFeatureFlag, (), (override));
+  MOCK_METHOD(FeatureFlag, GetVirtualAbCompressionFeatureFlag, (), (override));
+  MOCK_METHOD(bool, FinishUpdate, (bool), (override));
+  MOCK_METHOD(bool,
+              GetSystemOtherPath,
+              (uint32_t, uint32_t, const std::string&, std::string*, bool*),
+              (override));
+  MOCK_METHOD(bool,
+              EraseSystemOtherAvbFooter,
+              (uint32_t, uint32_t),
+              (override));
+  MOCK_METHOD(std::optional<bool>, IsAvbEnabledOnSystemOther, (), (override));
+  MOCK_METHOD(bool, IsRecovery, (), (override));
+  MOCK_METHOD(bool,
+              PrepareDynamicPartitionsForUpdate,
+              (uint32_t, uint32_t, const DeltaArchiveManifest&, bool),
+              (override));
+  MOCK_METHOD(std::unique_ptr<android::snapshot::ISnapshotWriter>,
+              OpenCowWriter,
+              (const std::string& unsuffixed_partition_name,
+               const std::optional<std::string>& source_path,
+               bool is_append),
+              (override));
+  MOCK_METHOD(FileDescriptorPtr,
+              OpenCowFd,
+              (const std::string& unsuffixed_partition_name,
+               const std::optional<std::string>& source_path,
+               bool is_append),
+              (override));
+  MOCK_METHOD(bool, MapAllPartitions, (), (override));
+  MOCK_METHOD(bool, UnmapAllPartitions, (), (override));
+  MOCK_METHOD(bool,
+              IsDynamicPartition,
+              (const std::string&, uint32_t slot),
+              (override));
+  MOCK_METHOD(bool, UpdateUsesSnapshotCompression, (), (override));
+
+  void set_fake_mapped_devices(const std::set<std::string>& fake) override {
+    DynamicPartitionControlAndroid::set_fake_mapped_devices(fake);
+  }
+
+  bool RealGetSystemOtherPath(uint32_t source_slot,
+                              uint32_t target_slot,
+                              const std::string& partition_name_suffix,
+                              std::string* path,
+                              bool* should_unmap) {
+    return DynamicPartitionControlAndroid::GetSystemOtherPath(
+        source_slot, target_slot, partition_name_suffix, path, should_unmap);
+  }
+
+  bool RealEraseSystemOtherAvbFooter(uint32_t source_slot,
+                                     uint32_t target_slot) {
+    return DynamicPartitionControlAndroid::EraseSystemOtherAvbFooter(
+        source_slot, target_slot);
+  }
+
+  std::optional<bool> RealIsAvbEnabledInFstab(const std::string& path) {
+    return DynamicPartitionControlAndroid::IsAvbEnabledInFstab(path);
+  }
+
+  bool RealPrepareDynamicPartitionsForUpdate(
+      uint32_t source_slot,
+      uint32_t target_slot,
+      const DeltaArchiveManifest& manifest,
+      bool delete_source) {
+    return DynamicPartitionControlAndroid::PrepareDynamicPartitionsForUpdate(
+        source_slot, target_slot, manifest, delete_source);
+  }
+  using DynamicPartitionControlAndroid::SetSourceSlot;
+  using DynamicPartitionControlAndroid::SetTargetSlot;
+};
+
+}  // namespace chromeos_update_engine
diff --git a/network_selector_android.cc b/aosp/network_selector_android.cc
similarity index 88%
rename from network_selector_android.cc
rename to aosp/network_selector_android.cc
index 55ba799..a7db415 100644
--- a/network_selector_android.cc
+++ b/aosp/network_selector_android.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/network_selector_android.h"
+#include "update_engine/aosp/network_selector_android.h"
 
 #include <memory>
 
@@ -25,14 +25,14 @@
 
 namespace network {
 
-// Factory defined in network_selector.h.
+// Factory defined in common/network_selector.h.
 std::unique_ptr<NetworkSelectorInterface> CreateNetworkSelector() {
   return std::make_unique<NetworkSelectorAndroid>();
 }
 
 }  // namespace network
 
-// Defined in network_selector_interface.h.
+// Defined in common/network_selector_interface.h.
 const NetworkId kDefaultNetworkId = NETWORK_UNSPECIFIED;
 
 bool NetworkSelectorAndroid::SetProcessNetwork(NetworkId network_id) {
diff --git a/network_selector_android.h b/aosp/network_selector_android.h
similarity index 81%
rename from network_selector_android.h
rename to aosp/network_selector_android.h
index 135536c..b79d1b3 100644
--- a/network_selector_android.h
+++ b/aosp/network_selector_android.h
@@ -14,12 +14,12 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_NETWORK_SELECTOR_ANDROID_H_
-#define UPDATE_ENGINE_NETWORK_SELECTOR_ANDROID_H_
+#ifndef UPDATE_ENGINE_AOSP_NETWORK_SELECTOR_ANDROID_H_
+#define UPDATE_ENGINE_AOSP_NETWORK_SELECTOR_ANDROID_H_
 
 #include <base/macros.h>
 
-#include "update_engine/network_selector_interface.h"
+#include "update_engine/common/network_selector_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -37,4 +37,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_NETWORK_SELECTOR_ANDROID_H_
+#endif  // UPDATE_ENGINE_AOSP_NETWORK_SELECTOR_ANDROID_H_
diff --git a/common/platform_constants_android.cc b/aosp/platform_constants_android.cc
similarity index 93%
rename from common/platform_constants_android.cc
rename to aosp/platform_constants_android.cc
index f468c3b..a0a2a5e 100644
--- a/common/platform_constants_android.cc
+++ b/aosp/platform_constants_android.cc
@@ -31,8 +31,6 @@
 // No deadline file API support on Android.
 const char kOmahaResponseDeadlineFile[] = "";
 const char kNonVolatileDirectory[] = "/data/misc/update_engine";
-const char kPostinstallMountOptions[] =
-    "context=u:object_r:postinstall_file:s0";
 
 }  // namespace constants
 }  // namespace chromeos_update_engine
diff --git a/service_delegate_android_interface.h b/aosp/service_delegate_android_interface.h
similarity index 95%
rename from service_delegate_android_interface.h
rename to aosp/service_delegate_android_interface.h
index 34a9712..3c28794 100644
--- a/service_delegate_android_interface.h
+++ b/aosp/service_delegate_android_interface.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_SERVICE_DELEGATE_ANDROID_INTERFACE_H_
-#define UPDATE_ENGINE_SERVICE_DELEGATE_ANDROID_INTERFACE_H_
+#ifndef UPDATE_ENGINE_AOSP_SERVICE_DELEGATE_ANDROID_INTERFACE_H_
+#define UPDATE_ENGINE_AOSP_SERVICE_DELEGATE_ANDROID_INTERFACE_H_
 
 #include <inttypes.h>
 
@@ -124,4 +124,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_SERVICE_DELEGATE_ANDROID_INTERFACE_H_
+#endif  // UPDATE_ENGINE_AOSP_SERVICE_DELEGATE_ANDROID_INTERFACE_H_
diff --git a/sideload_main.cc b/aosp/sideload_main.cc
similarity index 94%
rename from sideload_main.cc
rename to aosp/sideload_main.cc
index 27967cd..bf015c9 100644
--- a/sideload_main.cc
+++ b/aosp/sideload_main.cc
@@ -28,15 +28,15 @@
 #include <brillo/streams/file_stream.h>
 #include <brillo/streams/stream.h>
 
+#include "update_engine/aosp/update_attempter_android.h"
 #include "update_engine/common/boot_control.h"
 #include "update_engine/common/error_code_utils.h"
 #include "update_engine/common/hardware.h"
+#include "update_engine/common/logging.h"
 #include "update_engine/common/prefs.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/terminator.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/logging.h"
-#include "update_engine/update_attempter_android.h"
 
 using std::string;
 using std::vector;
@@ -154,8 +154,11 @@
     return false;
   }
 
-  UpdateAttempterAndroid update_attempter(
-      &sideload_daemon_state, &prefs, boot_control.get(), hardware.get());
+  UpdateAttempterAndroid update_attempter(&sideload_daemon_state,
+                                          &prefs,
+                                          boot_control.get(),
+                                          hardware.get(),
+                                          nullptr);
   update_attempter.Init();
 
   TEST_AND_RETURN_FALSE(update_attempter.ApplyPayload(
diff --git a/update_attempter_android.cc b/aosp/update_attempter_android.cc
similarity index 90%
rename from update_attempter_android.cc
rename to aosp/update_attempter_android.cc
index b7d119f..4636c43 100644
--- a/update_attempter_android.cc
+++ b/aosp/update_attempter_android.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/update_attempter_android.h"
+#include "update_engine/aosp/update_attempter_android.h"
 
 #include <algorithm>
 #include <map>
@@ -31,18 +31,18 @@
 #include <brillo/strings/string_utils.h>
 #include <log/log_safetynet.h>
 
-#include "update_engine/cleanup_previous_update_action.h"
+#include "update_engine/aosp/cleanup_previous_update_action.h"
 #include "update_engine/common/constants.h"
+#include "update_engine/common/daemon_state_interface.h"
+#include "update_engine/common/download_action.h"
 #include "update_engine/common/error_code_utils.h"
 #include "update_engine/common/file_fetcher.h"
+#include "update_engine/common/metrics_reporter_interface.h"
+#include "update_engine/common/network_selector.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/daemon_state_interface.h"
-#include "update_engine/metrics_reporter_interface.h"
 #include "update_engine/metrics_utils.h"
-#include "update_engine/network_selector.h"
 #include "update_engine/payload_consumer/certificate_parser_interface.h"
 #include "update_engine/payload_consumer/delta_performer.h"
-#include "update_engine/payload_consumer/download_action.h"
 #include "update_engine/payload_consumer/file_descriptor.h"
 #include "update_engine/payload_consumer/file_descriptor_utils.h"
 #include "update_engine/payload_consumer/filesystem_verifier_action.h"
@@ -64,7 +64,6 @@
 using base::Time;
 using base::TimeDelta;
 using base::TimeTicks;
-using std::shared_ptr;
 using std::string;
 using std::vector;
 using update_engine::UpdateEngineStatus;
@@ -133,14 +132,17 @@
     DaemonStateInterface* daemon_state,
     PrefsInterface* prefs,
     BootControlInterface* boot_control,
-    HardwareInterface* hardware)
+    HardwareInterface* hardware,
+    std::unique_ptr<ApexHandlerInterface> apex_handler)
     : daemon_state_(daemon_state),
       prefs_(prefs),
       boot_control_(boot_control),
       hardware_(hardware),
+      apex_handler_android_(std::move(apex_handler)),
       processor_(new ActionProcessor()),
       clock_(new Clock()) {
-  metrics_reporter_ = metrics::CreateMetricsReporter();
+  metrics_reporter_ = metrics::CreateMetricsReporter(
+      boot_control_->GetDynamicPartitionControl(), &install_plan_);
   network_selector_ = network::CreateNetworkSelector();
 }
 
@@ -181,7 +183,7 @@
     return LogAndSetError(
         error, FROM_HERE, "Already processing an update, cancel it first.");
   }
-  DCHECK(status_ == UpdateStatus::IDLE);
+  DCHECK_EQ(status_, UpdateStatus::IDLE);
 
   std::map<string, string> headers;
   if (!ParseKeyValuePairHeaders(key_value_pair_headers, &headers, error)) {
@@ -317,6 +319,19 @@
     int64_t payload_size,
     const vector<string>& key_value_pair_headers,
     brillo::ErrorPtr* error) {
+  // update_engine state must be checked before modifying payload_fd_ otherwise
+  // already running update will be terminated (existing file descriptor will be
+  // closed)
+  if (status_ == UpdateStatus::UPDATED_NEED_REBOOT) {
+    return LogAndSetError(
+        error, FROM_HERE, "An update already applied, waiting for reboot");
+  }
+  if (processor_->IsRunning()) {
+    return LogAndSetError(
+        error, FROM_HERE, "Already processing an update, cancel it first.");
+  }
+  DCHECK_EQ(status_, UpdateStatus::IDLE);
+
   payload_fd_.reset(dup(fd));
   const string payload_url = "fd://" + std::to_string(payload_fd_.get());
 
@@ -349,6 +364,12 @@
   LOG(INFO) << "Attempting to reset state from "
             << UpdateStatusToString(status_) << " to UpdateStatus::IDLE";
 
+  if (apex_handler_android_ != nullptr) {
+    LOG(INFO) << "Cleaning up reserved space for compressed APEX (if any)";
+    std::vector<ApexInfo> apex_infos_blank;
+    apex_handler_android_->AllocateSpace(apex_infos_blank);
+  }
+
   switch (status_) {
     case UpdateStatus::IDLE: {
       if (!boot_control_->GetDynamicPartitionControl()->ResetUpdate(prefs_)) {
@@ -374,6 +395,9 @@
       // Resets the warm reset property since we won't switch the slot.
       hardware_->SetWarmReset(false);
 
+      // Resets the vbmeta digest.
+      hardware_->SetVbmetaDigestForInactiveSlot(true /* reset */);
+
       // Remove update progress for DeltaPerformer and remove snapshots.
       if (!boot_control_->GetDynamicPartitionControl()->ResetUpdate(prefs_))
         ret_value = false;
@@ -507,7 +531,7 @@
         return LogAndSetError(
             error, FROM_HERE, "Failed to hash " + partition_path);
       }
-      if (!DeltaPerformer::ValidateSourceHash(
+      if (!PartitionWriter::ValidateSourceHash(
               source_hash, operation, fd, &errorcode)) {
         return false;
       }
@@ -576,9 +600,9 @@
     cleanup_previous_update_code_ = code;
     NotifyCleanupPreviousUpdateCallbacksAndClear();
   }
-  if (type == DownloadAction::StaticType()) {
-    download_progress_ = 0;
-  }
+  // download_progress_ is actually used by other actions, such as
+  // filesystem_verify_action. Therefore we always clear it.
+  download_progress_ = 0;
   if (type == PostinstallRunnerAction::StaticType()) {
     bool succeeded =
         code == ErrorCode::kSuccess || code == ErrorCode::kUpdatedButNotActive;
@@ -592,8 +616,11 @@
     SetStatusAndNotify(UpdateStatus::CLEANUP_PREVIOUS_UPDATE);
   }
   if (type == DownloadAction::StaticType()) {
-    SetStatusAndNotify(UpdateStatus::FINALIZING);
+    auto download_action = static_cast<DownloadAction*>(action);
+    install_plan_ = *download_action->install_plan();
+    SetStatusAndNotify(UpdateStatus::VERIFYING);
   } else if (type == FilesystemVerifierAction::StaticType()) {
+    SetStatusAndNotify(UpdateStatus::FINALIZING);
     prefs_->SetBoolean(kPrefsVerityWritten, true);
   }
 }
@@ -644,6 +671,11 @@
   }
 }
 
+void UpdateAttempterAndroid::OnVerifyProgressUpdate(double progress) {
+  assert(status_ == UpdateStatus::VERIFYING);
+  ProgressUpdate(progress);
+}
+
 void UpdateAttempterAndroid::ScheduleProcessingStart() {
   LOG(INFO) << "Scheduling an action processor start.";
   brillo::MessageLoop::current()->PostTask(
@@ -725,15 +757,15 @@
       std::make_unique<DownloadAction>(prefs_,
                                        boot_control_,
                                        hardware_,
-                                       nullptr,  // system_state, not used.
                                        fetcher,  // passes ownership
                                        true /* interactive */);
   download_action->set_delegate(this);
   download_action->set_base_offset(base_offset_);
-  auto filesystem_verifier_action =
-      std::make_unique<FilesystemVerifierAction>();
+  auto filesystem_verifier_action = std::make_unique<FilesystemVerifierAction>(
+      boot_control_->GetDynamicPartitionControl());
   auto postinstall_runner_action =
       std::make_unique<PostinstallRunnerAction>(boot_control_, hardware_);
+  filesystem_verifier_action->set_delegate(this);
   postinstall_runner_action->set_delegate(this);
 
   // Bond them together. We have to use the leaf-types when calling
@@ -795,7 +827,6 @@
   TimeDelta duration_uptime = clock_->GetMonotonicTime() - monotonic_time_start;
 
   metrics_reporter_->ReportUpdateAttemptMetrics(
-      nullptr,  // system_state
       static_cast<int>(attempt_number),
       payload_type,
       duration,
@@ -859,20 +890,25 @@
   string current_version =
       android::base::GetProperty("ro.build.version.incremental", "");
   TEST_AND_RETURN(!current_version.empty());
+  const auto current_slot = boot_control_->GetCurrentSlot();
 
   // If there's no record of previous version (e.g. due to a data wipe), we
   // save the info of current boot and skip the metrics report.
   if (!prefs_->Exists(kPrefsPreviousVersion)) {
     prefs_->SetString(kPrefsBootId, current_boot_id);
     prefs_->SetString(kPrefsPreviousVersion, current_version);
+    prefs_->SetInt64(std::string{kPrefsPreviousSlot},
+                     boot_control_->GetCurrentSlot());
     ClearMetricsPrefs();
     return;
   }
+  int64_t previous_slot = -1;
+  prefs_->GetInt64(kPrefsPreviousSlot, &previous_slot);
   string previous_version;
-  // update_engine restarted under the same build.
+  // update_engine restarted under the same build and same slot.
   // TODO(xunchang) identify and report rollback by checking UpdateMarker.
   if (prefs_->GetString(kPrefsPreviousVersion, &previous_version) &&
-      previous_version == current_version) {
+      previous_version == current_version && previous_slot == current_slot) {
     string last_boot_id;
     bool is_reboot = prefs_->Exists(kPrefsBootId) &&
                      (prefs_->GetString(kPrefsBootId, &last_boot_id) &&
@@ -892,6 +928,8 @@
   // TODO(xunchang) check the build version is larger than the previous one.
   prefs_->SetString(kPrefsBootId, current_boot_id);
   prefs_->SetString(kPrefsPreviousVersion, current_version);
+  prefs_->SetInt64(std::string{kPrefsPreviousSlot},
+                   boot_control_->GetCurrentSlot());
 
   bool previous_attempt_exists = prefs_->Exists(kPrefsPayloadAttemptNumber);
   // |kPrefsPayloadAttemptNumber| should be cleared upon successful update.
@@ -954,6 +992,20 @@
     return 0;
   }
 
+  std::vector<ApexInfo> apex_infos(manifest.apex_info().begin(),
+                                   manifest.apex_info().end());
+  uint64_t apex_size_required = 0;
+  if (apex_handler_android_ != nullptr) {
+    auto result = apex_handler_android_->CalculateSize(apex_infos);
+    if (!result.ok()) {
+      LogAndSetError(error,
+                     FROM_HERE,
+                     "Failed to calculate size required for compressed APEX");
+      return 0;
+    }
+    apex_size_required = *result;
+  }
+
   string payload_id = GetPayloadId(headers);
   uint64_t required_size = 0;
   if (!DeltaPerformer::PreparePartitionsForUpdate(prefs_,
@@ -967,11 +1019,19 @@
       return 0;
     } else {
       LOG(ERROR) << "Insufficient space for payload: " << required_size
+                 << " bytes, apex decompression: " << apex_size_required
                  << " bytes";
-      return required_size;
+      return required_size + apex_size_required;
     }
   }
 
+  if (apex_size_required > 0 && apex_handler_android_ != nullptr &&
+      !apex_handler_android_->AllocateSpace(apex_infos)) {
+    LOG(ERROR) << "Insufficient space for apex decompression: "
+               << apex_size_required << " bytes";
+    return apex_size_required;
+  }
+
   LOG(INFO) << "Successfully allocated space for payload.";
   return 0;
 }
diff --git a/update_attempter_android.h b/aosp/update_attempter_android.h
similarity index 89%
rename from update_attempter_android.h
rename to aosp/update_attempter_android.h
index f8c78de..70938bc 100644
--- a/update_attempter_android.h
+++ b/aosp/update_attempter_android.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_UPDATE_ATTEMPTER_ANDROID_H_
-#define UPDATE_ENGINE_UPDATE_ATTEMPTER_ANDROID_H_
+#ifndef UPDATE_ENGINE_AOSP_UPDATE_ATTEMPTER_ANDROID_H_
+#define UPDATE_ENGINE_AOSP_UPDATE_ATTEMPTER_ANDROID_H_
 
 #include <stdint.h>
 
@@ -26,20 +26,22 @@
 #include <android-base/unique_fd.h>
 #include <base/time/time.h>
 
+#include "update_engine/aosp/apex_handler_interface.h"
+#include "update_engine/aosp/service_delegate_android_interface.h"
 #include "update_engine/client_library/include/update_engine/update_status.h"
 #include "update_engine/common/action_processor.h"
 #include "update_engine/common/boot_control_interface.h"
 #include "update_engine/common/clock.h"
+#include "update_engine/common/daemon_state_interface.h"
+#include "update_engine/common/download_action.h"
 #include "update_engine/common/hardware_interface.h"
+#include "update_engine/common/metrics_reporter_interface.h"
+#include "update_engine/common/network_selector_interface.h"
 #include "update_engine/common/prefs_interface.h"
-#include "update_engine/daemon_state_interface.h"
-#include "update_engine/metrics_reporter_interface.h"
+#include "update_engine/common/service_observer_interface.h"
 #include "update_engine/metrics_utils.h"
-#include "update_engine/network_selector_interface.h"
-#include "update_engine/payload_consumer/download_action.h"
+#include "update_engine/payload_consumer/filesystem_verifier_action.h"
 #include "update_engine/payload_consumer/postinstall_runner_action.h"
-#include "update_engine/service_delegate_android_interface.h"
-#include "update_engine/service_observer_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -47,6 +49,7 @@
     : public ServiceDelegateAndroidInterface,
       public ActionProcessorDelegate,
       public DownloadActionDelegate,
+      public FilesystemVerifyDelegate,
       public PostinstallRunnerAction::DelegateInterface,
       public CleanupPreviousUpdateActionDelegateInterface {
  public:
@@ -55,7 +58,8 @@
   UpdateAttempterAndroid(DaemonStateInterface* daemon_state,
                          PrefsInterface* prefs,
                          BootControlInterface* boot_control_,
-                         HardwareInterface* hardware_);
+                         HardwareInterface* hardware_,
+                         std::unique_ptr<ApexHandlerInterface> apex_handler);
   ~UpdateAttempterAndroid() override;
 
   // Further initialization to be done post construction.
@@ -101,6 +105,9 @@
   bool ShouldCancel(ErrorCode* cancel_reason) override;
   void DownloadComplete() override;
 
+  // FilesystemVerifyDelegate overrides
+  void OnVerifyProgressUpdate(double progress) override;
+
   // PostinstallRunnerAction::DelegateInterface
   void ProgressUpdate(double progress) override;
 
@@ -200,6 +207,8 @@
   BootControlInterface* boot_control_;
   HardwareInterface* hardware_;
 
+  std::unique_ptr<ApexHandlerInterface> apex_handler_android_;
+
   // Last status notification timestamp used for throttling. Use monotonic
   // TimeTicks to ensure that notifications are sent even if the system clock is
   // set back in the middle of an update.
@@ -241,4 +250,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_UPDATE_ATTEMPTER_ANDROID_H_
+#endif  // UPDATE_ENGINE_AOSP_UPDATE_ATTEMPTER_ANDROID_H_
diff --git a/update_attempter_android_unittest.cc b/aosp/update_attempter_android_unittest.cc
similarity index 91%
rename from update_attempter_android_unittest.cc
rename to aosp/update_attempter_android_unittest.cc
index 721b735..f73df16 100644
--- a/update_attempter_android_unittest.cc
+++ b/aosp/update_attempter_android_unittest.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/update_attempter_android.h"
+#include "update_engine/aosp/update_attempter_android.h"
 
 #include <memory>
 #include <string>
@@ -24,15 +24,16 @@
 #include <base/time/time.h>
 #include <gtest/gtest.h>
 
+#include "common/constants.h"
+#include "update_engine/aosp/daemon_state_android.h"
 #include "update_engine/common/fake_boot_control.h"
 #include "update_engine/common/fake_clock.h"
 #include "update_engine/common/fake_hardware.h"
 #include "update_engine/common/fake_prefs.h"
 #include "update_engine/common/mock_action_processor.h"
+#include "update_engine/common/mock_metrics_reporter.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/daemon_state_android.h"
-#include "update_engine/mock_metrics_reporter.h"
 
 using base::Time;
 using base::TimeDelta;
@@ -63,14 +64,14 @@
         std::move(payload));
   }
 
-  UpdateAttempterAndroid update_attempter_android_{
-      &daemon_state_, &prefs_, &boot_control_, &hardware_};
-
   DaemonStateAndroid daemon_state_;
   FakePrefs prefs_;
   FakeBootControl boot_control_;
   FakeHardware hardware_;
 
+  UpdateAttempterAndroid update_attempter_android_{
+      &daemon_state_, &prefs_, &boot_control_, &hardware_, nullptr};
+
   FakeClock* clock_;
   testing::NiceMock<MockMetricsReporter>* metrics_reporter_;
 };
@@ -81,6 +82,8 @@
   prefs_.SetString(kPrefsPreviousVersion, build_version);
   prefs_.SetString(kPrefsBootId, "oldboot");
   prefs_.SetInt64(kPrefsNumReboots, 1);
+  prefs_.SetInt64(kPrefsPreviousSlot, 1);
+  boot_control_.SetCurrentSlot(1);
 
   EXPECT_CALL(*metrics_reporter_, ReportTimeToReboot(_)).Times(0);
   update_attempter_android_.Init();
@@ -88,15 +91,15 @@
   // Check that the boot_id and reboot_count are updated.
   std::string boot_id;
   utils::GetBootId(&boot_id);
-  EXPECT_TRUE(prefs_.Exists(kPrefsBootId));
+  ASSERT_TRUE(prefs_.Exists(kPrefsBootId));
   std::string prefs_boot_id;
-  EXPECT_TRUE(prefs_.GetString(kPrefsBootId, &prefs_boot_id));
-  EXPECT_EQ(boot_id, prefs_boot_id);
+  ASSERT_TRUE(prefs_.GetString(kPrefsBootId, &prefs_boot_id));
+  ASSERT_EQ(boot_id, prefs_boot_id);
 
-  EXPECT_TRUE(prefs_.Exists(kPrefsNumReboots));
+  ASSERT_TRUE(prefs_.Exists(kPrefsNumReboots));
   int64_t reboot_count;
-  EXPECT_TRUE(prefs_.GetInt64(kPrefsNumReboots, &reboot_count));
-  EXPECT_EQ(2, reboot_count);
+  ASSERT_TRUE(prefs_.GetInt64(kPrefsNumReboots, &reboot_count));
+  ASSERT_EQ(2, reboot_count);
 }
 
 TEST_F(UpdateAttempterAndroidTest, UpdatePrefsBuildVersionChangeOnInit) {
@@ -138,8 +141,7 @@
   TimeDelta duration_uptime = up_time - Time::FromInternalValue(12345);
   EXPECT_CALL(
       *metrics_reporter_,
-      ReportUpdateAttemptMetrics(_,
-                                 2,
+      ReportUpdateAttemptMetrics(2,
                                  _,
                                  duration,
                                  duration_uptime,
diff --git a/update_engine_client_android.cc b/aosp/update_engine_client_android.cc
similarity index 100%
rename from update_engine_client_android.cc
rename to aosp/update_engine_client_android.cc
diff --git a/binder_bindings/android/brillo/IUpdateEngine.aidl b/binder_bindings/android/brillo/IUpdateEngine.aidl
deleted file mode 100644
index 56e1524..0000000
--- a/binder_bindings/android/brillo/IUpdateEngine.aidl
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.brillo;
-
-import android.brillo.IUpdateEngineStatusCallback;
-import android.brillo.ParcelableUpdateEngineStatus;
-
-interface IUpdateEngine {
-  void SetUpdateAttemptFlags(in int flags);
-  boolean AttemptUpdate(in String app_version, in String omaha_url, in int flags);
-  void AttemptRollback(in boolean powerwash);
-  boolean CanRollback();
-  void ResetStatus();
-  ParcelableUpdateEngineStatus GetStatus();
-  void RebootIfNeeded();
-  void SetChannel(in String target_channel, in boolean powewash);
-  String GetChannel(in boolean get_current_channel);
-  void SetCohortHint(in String cohort_hint);
-  String GetCohortHint();
-  void SetP2PUpdatePermission(in boolean enabled);
-  boolean GetP2PUpdatePermission();
-  void SetUpdateOverCellularPermission(in boolean enabled);
-  void SetUpdateOverCellularTarget(in String target_version,
-                                   in long target_size);
-  boolean GetUpdateOverCellularPermission();
-  long GetDurationSinceUpdate();
-  String GetPrevVersion();
-  String GetRollbackPartition();
-  void RegisterStatusCallback(in IUpdateEngineStatusCallback callback);
-  int GetLastAttemptError();
-  int GetEolStatus();
-}
diff --git a/binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl b/binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl
deleted file mode 100644
index 837d44d..0000000
--- a/binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.brillo;
-
-import android.brillo.ParcelableUpdateEngineStatus;
-
-interface IUpdateEngineStatusCallback {
-  oneway
-  void HandleStatusUpdate(in ParcelableUpdateEngineStatus status);
-}
diff --git a/binder_bindings/android/brillo/ParcelableUpdateEngineStatus.aidl b/binder_bindings/android/brillo/ParcelableUpdateEngineStatus.aidl
deleted file mode 100644
index fc10505..0000000
--- a/binder_bindings/android/brillo/ParcelableUpdateEngineStatus.aidl
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.brillo;
-
-parcelable ParcelableUpdateEngineStatus cpp_header
-    "update_engine/parcelable_update_engine_status.h";
diff --git a/binder_service_brillo.cc b/binder_service_brillo.cc
deleted file mode 100644
index cc74763..0000000
--- a/binder_service_brillo.cc
+++ /dev/null
@@ -1,247 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/binder_service_brillo.h"
-
-#include <base/bind.h>
-
-#include <binderwrapper/binder_wrapper.h>
-
-#include <utils/String16.h>
-#include <utils/StrongPointer.h>
-
-#include "update_engine/update_status_utils.h"
-
-using android::sp;
-using android::String16;
-using android::String8;
-using android::binder::Status;
-using android::brillo::IUpdateEngineStatusCallback;
-using android::brillo::ParcelableUpdateEngineStatus;
-using brillo::ErrorPtr;
-using std::string;
-using update_engine::UpdateEngineStatus;
-
-namespace chromeos_update_engine {
-
-namespace {
-string NormalString(const String16& in) {
-  return string{String8{in}.string()};
-}
-
-Status ToStatus(ErrorPtr* error) {
-  return Status::fromServiceSpecificError(
-      1, String8{error->get()->GetMessage().c_str()});
-}
-}  // namespace
-
-template <typename... Parameters, typename... Arguments>
-Status BinderUpdateEngineBrilloService::CallCommonHandler(
-    bool (UpdateEngineService::*Handler)(ErrorPtr*, Parameters...),
-    Arguments... arguments) {
-  ErrorPtr error;
-  if (((common_.get())->*Handler)(&error, arguments...))
-    return Status::ok();
-  return ToStatus(&error);
-}
-
-Status BinderUpdateEngineBrilloService::SetUpdateAttemptFlags(int flags) {
-  return CallCommonHandler(&UpdateEngineService::SetUpdateAttemptFlags, flags);
-}
-
-Status BinderUpdateEngineBrilloService::AttemptUpdate(
-    const String16& app_version,
-    const String16& omaha_url,
-    int flags,
-    bool* out_result) {
-  return CallCommonHandler(&UpdateEngineService::AttemptUpdate,
-                           NormalString(app_version),
-                           NormalString(omaha_url),
-                           flags,
-                           out_result);
-}
-
-Status BinderUpdateEngineBrilloService::AttemptRollback(bool powerwash) {
-  return CallCommonHandler(&UpdateEngineService::AttemptRollback, powerwash);
-}
-
-Status BinderUpdateEngineBrilloService::CanRollback(bool* out_can_rollback) {
-  return CallCommonHandler(&UpdateEngineService::CanRollback, out_can_rollback);
-}
-
-Status BinderUpdateEngineBrilloService::ResetStatus() {
-  return CallCommonHandler(&UpdateEngineService::ResetStatus);
-}
-
-Status BinderUpdateEngineBrilloService::GetStatus(
-    ParcelableUpdateEngineStatus* status) {
-  UpdateEngineStatus update_engine_status;
-  auto ret =
-      CallCommonHandler(&UpdateEngineService::GetStatus, &update_engine_status);
-
-  if (ret.isOk()) {
-    *status = ParcelableUpdateEngineStatus(update_engine_status);
-  }
-
-  return ret;
-}
-
-Status BinderUpdateEngineBrilloService::RebootIfNeeded() {
-  return CallCommonHandler(&UpdateEngineService::RebootIfNeeded);
-}
-
-Status BinderUpdateEngineBrilloService::SetChannel(
-    const String16& target_channel, bool powerwash) {
-  return CallCommonHandler(&UpdateEngineService::SetChannel,
-                           NormalString(target_channel),
-                           powerwash);
-}
-
-Status BinderUpdateEngineBrilloService::GetChannel(bool get_current_channel,
-                                                   String16* out_channel) {
-  string channel_string;
-  auto ret = CallCommonHandler(
-      &UpdateEngineService::GetChannel, get_current_channel, &channel_string);
-
-  *out_channel = String16(channel_string.c_str());
-  return ret;
-}
-
-Status BinderUpdateEngineBrilloService::SetCohortHint(
-    const String16& in_cohort_hint) {
-  return CallCommonHandler(&UpdateEngineService::SetCohortHint,
-                           NormalString(in_cohort_hint));
-}
-
-Status BinderUpdateEngineBrilloService::GetCohortHint(
-    String16* out_cohort_hint) {
-  string cohort_hint;
-  auto ret =
-      CallCommonHandler(&UpdateEngineService::GetCohortHint, &cohort_hint);
-
-  *out_cohort_hint = String16(cohort_hint.c_str());
-  return ret;
-}
-
-Status BinderUpdateEngineBrilloService::SetP2PUpdatePermission(bool enabled) {
-  return CallCommonHandler(&UpdateEngineService::SetP2PUpdatePermission,
-                           enabled);
-}
-
-Status BinderUpdateEngineBrilloService::GetP2PUpdatePermission(
-    bool* out_p2p_permission) {
-  return CallCommonHandler(&UpdateEngineService::GetP2PUpdatePermission,
-                           out_p2p_permission);
-}
-
-Status BinderUpdateEngineBrilloService::SetUpdateOverCellularPermission(
-    bool enabled) {
-  return CallCommonHandler(
-      &UpdateEngineService::SetUpdateOverCellularPermission, enabled);
-}
-
-Status BinderUpdateEngineBrilloService::SetUpdateOverCellularTarget(
-    const String16& target_version, int64_t target_size) {
-  return CallCommonHandler(&UpdateEngineService::SetUpdateOverCellularTarget,
-                           NormalString(target_version),
-                           target_size);
-}
-
-Status BinderUpdateEngineBrilloService::GetUpdateOverCellularPermission(
-    bool* out_cellular_permission) {
-  return CallCommonHandler(
-      &UpdateEngineService::GetUpdateOverCellularPermission,
-      out_cellular_permission);
-}
-
-Status BinderUpdateEngineBrilloService::GetDurationSinceUpdate(
-    int64_t* out_duration) {
-  return CallCommonHandler(&UpdateEngineService::GetDurationSinceUpdate,
-                           out_duration);
-}
-
-Status BinderUpdateEngineBrilloService::GetPrevVersion(
-    String16* out_prev_version) {
-  string version_string;
-  auto ret =
-      CallCommonHandler(&UpdateEngineService::GetPrevVersion, &version_string);
-
-  *out_prev_version = String16(version_string.c_str());
-  return ret;
-}
-
-Status BinderUpdateEngineBrilloService::GetRollbackPartition(
-    String16* out_rollback_partition) {
-  string partition_string;
-  auto ret = CallCommonHandler(&UpdateEngineService::GetRollbackPartition,
-                               &partition_string);
-
-  if (ret.isOk()) {
-    *out_rollback_partition = String16(partition_string.c_str());
-  }
-
-  return ret;
-}
-
-Status BinderUpdateEngineBrilloService::RegisterStatusCallback(
-    const sp<IUpdateEngineStatusCallback>& callback) {
-  callbacks_.emplace_back(callback);
-
-  auto binder_wrapper = android::BinderWrapper::Get();
-
-  binder_wrapper->RegisterForDeathNotifications(
-      IUpdateEngineStatusCallback::asBinder(callback),
-      base::Bind(&BinderUpdateEngineBrilloService::UnregisterStatusCallback,
-                 base::Unretained(this),
-                 base::Unretained(callback.get())));
-
-  return Status::ok();
-}
-
-Status BinderUpdateEngineBrilloService::GetLastAttemptError(
-    int* out_last_attempt_error) {
-  return CallCommonHandler(&UpdateEngineService::GetLastAttemptError,
-                           out_last_attempt_error);
-}
-
-Status BinderUpdateEngineBrilloService::GetEolStatus(int* out_eol_status) {
-  return CallCommonHandler(&UpdateEngineService::GetEolStatus, out_eol_status);
-}
-
-void BinderUpdateEngineBrilloService::UnregisterStatusCallback(
-    IUpdateEngineStatusCallback* callback) {
-  auto it = callbacks_.begin();
-  while (it != callbacks_.end() && it->get() != callback)
-    it++;
-
-  if (it == callbacks_.end()) {
-    LOG(ERROR) << "Got death notification for unknown callback.";
-    return;
-  }
-
-  LOG(INFO) << "Erasing orphan callback";
-  callbacks_.erase(it);
-}
-
-void BinderUpdateEngineBrilloService::SendStatusUpdate(
-    const UpdateEngineStatus& update_engine_status) {
-  ParcelableUpdateEngineStatus parcelable_status(update_engine_status);
-  for (auto& callback : callbacks_) {
-    callback->HandleStatusUpdate(parcelable_status);
-  }
-}
-
-}  // namespace chromeos_update_engine
diff --git a/binder_service_brillo.h b/binder_service_brillo.h
deleted file mode 100644
index d0d0dc9..0000000
--- a/binder_service_brillo.h
+++ /dev/null
@@ -1,114 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_BINDER_SERVICE_BRILLO_H_
-#define UPDATE_ENGINE_BINDER_SERVICE_BRILLO_H_
-
-#include <utils/Errors.h>
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include <utils/RefBase.h>
-
-#include "update_engine/common_service.h"
-#include "update_engine/parcelable_update_engine_status.h"
-#include "update_engine/service_observer_interface.h"
-
-#include "android/brillo/BnUpdateEngine.h"
-#include "android/brillo/IUpdateEngineStatusCallback.h"
-
-namespace chromeos_update_engine {
-
-class BinderUpdateEngineBrilloService : public android::brillo::BnUpdateEngine,
-                                        public ServiceObserverInterface {
- public:
-  explicit BinderUpdateEngineBrilloService(SystemState* system_state)
-      : common_(new UpdateEngineService(system_state)) {}
-  virtual ~BinderUpdateEngineBrilloService() = default;
-
-  const char* ServiceName() const {
-    return "android.brillo.UpdateEngineService";
-  }
-
-  // ServiceObserverInterface overrides.
-  void SendStatusUpdate(
-      const update_engine::UpdateEngineStatus& update_engine_status) override;
-  void SendPayloadApplicationComplete(ErrorCode error_code) override {}
-
-  // android::brillo::BnUpdateEngine overrides.
-  android::binder::Status SetUpdateAttemptFlags(int flags) override;
-  android::binder::Status AttemptUpdate(const android::String16& app_version,
-                                        const android::String16& omaha_url,
-                                        int flags,
-                                        bool* out_result) override;
-  android::binder::Status AttemptRollback(bool powerwash) override;
-  android::binder::Status CanRollback(bool* out_can_rollback) override;
-  android::binder::Status ResetStatus() override;
-  android::binder::Status GetStatus(
-      android::brillo::ParcelableUpdateEngineStatus* status);
-  android::binder::Status RebootIfNeeded() override;
-  android::binder::Status SetChannel(const android::String16& target_channel,
-                                     bool powerwash) override;
-  android::binder::Status GetChannel(bool get_current_channel,
-                                     android::String16* out_channel) override;
-  android::binder::Status SetCohortHint(
-      const android::String16& cohort_hint) override;
-  android::binder::Status GetCohortHint(
-      android::String16* out_cohort_hint) override;
-  android::binder::Status SetP2PUpdatePermission(bool enabled) override;
-  android::binder::Status GetP2PUpdatePermission(
-      bool* out_p2p_permission) override;
-  android::binder::Status SetUpdateOverCellularPermission(
-      bool enabled) override;
-  android::binder::Status SetUpdateOverCellularTarget(
-      const android::String16& target_version, int64_t target_size) override;
-  android::binder::Status GetUpdateOverCellularPermission(
-      bool* out_cellular_permission) override;
-  android::binder::Status GetDurationSinceUpdate(
-      int64_t* out_duration) override;
-  android::binder::Status GetPrevVersion(
-      android::String16* out_prev_version) override;
-  android::binder::Status GetRollbackPartition(
-      android::String16* out_rollback_partition) override;
-  android::binder::Status RegisterStatusCallback(
-      const android::sp<android::brillo::IUpdateEngineStatusCallback>& callback)
-      override;
-  android::binder::Status GetLastAttemptError(
-      int* out_last_attempt_error) override;
-  android::binder::Status GetEolStatus(int* out_eol_status) override;
-
- private:
-  // Generic function for dispatching to the common service.
-  template <typename... Parameters, typename... Arguments>
-  android::binder::Status CallCommonHandler(
-      bool (UpdateEngineService::*Handler)(brillo::ErrorPtr*, Parameters...),
-      Arguments... arguments);
-
-  // To be used as a death notification handler only.
-  void UnregisterStatusCallback(
-      android::brillo::IUpdateEngineStatusCallback* callback);
-
-  std::unique_ptr<UpdateEngineService> common_;
-
-  std::vector<android::sp<android::brillo::IUpdateEngineStatusCallback>>
-      callbacks_;
-};
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_BINDER_SERVICE_BRILLO_H_
diff --git a/client-headers/BUILD.gn b/client-headers/BUILD.gn
new file mode 100644
index 0000000..8c1a17e
--- /dev/null
+++ b/client-headers/BUILD.gn
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import("//common-mk/generate-dbus-proxies.gni")
+
+group("all") {
+  deps = [ ":libupdate_engine-client-headers" ]
+}
+
+# update_engine client library generated headers. Used by other daemons and
+# by the update_engine_client console program to interact with update_engine.
+generate_dbus_proxies("libupdate_engine-client-headers") {
+  sources = [ "../dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml" ]
+  dbus_service_config = "../dbus_bindings/dbus-service-config.json"
+  mock_output_file = "include/update_engine/dbus-proxy-mocks.h"
+  proxy_output_file = "include/update_engine/dbus-proxies.h"
+  proxy_path_in_mocks = "update_engine/dbus-proxies.h"
+}
diff --git a/libupdate_engine-client-test.pc.in b/client-headers/libupdate_engine-client-test.pc.in
similarity index 100%
rename from libupdate_engine-client-test.pc.in
rename to client-headers/libupdate_engine-client-test.pc.in
diff --git a/libupdate_engine-client.pc.in b/client-headers/libupdate_engine-client.pc.in
similarity index 100%
rename from libupdate_engine-client.pc.in
rename to client-headers/libupdate_engine-client.pc.in
diff --git a/client_library/client.cc b/client_library/client.cc
deleted file mode 100644
index b05df90..0000000
--- a/client_library/client.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/client_library/include/update_engine/client.h"
-
-#include <memory>
-
-#if USE_BINDER
-#include "update_engine/client_library/client_binder.h"
-#else  // !USE_BINDER
-#include "update_engine/client_library/client_dbus.h"
-#endif  // USE_BINDER
-
-using std::unique_ptr;
-
-namespace update_engine {
-
-unique_ptr<UpdateEngineClient> UpdateEngineClient::CreateInstance() {
-#if USE_BINDER
-  auto update_engine_client_impl = new internal::BinderUpdateEngineClient{};
-#else   // !USE_BINDER
-  auto update_engine_client_impl = new internal::DBusUpdateEngineClient{};
-#endif  // USE_BINDER
-  auto ret = unique_ptr<UpdateEngineClient>{update_engine_client_impl};
-
-  if (!update_engine_client_impl->Init()) {
-    ret.reset();
-  }
-
-  return ret;
-}
-
-}  // namespace update_engine
diff --git a/client_library/client_binder.cc b/client_library/client_binder.cc
deleted file mode 100644
index 588bc64..0000000
--- a/client_library/client_binder.cc
+++ /dev/null
@@ -1,264 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/client_library/client_binder.h"
-
-#include <binder/IServiceManager.h>
-
-#include <base/message_loop/message_loop.h>
-#include <utils/String8.h>
-
-#include "update_engine/common_service.h"
-#include "update_engine/parcelable_update_engine_status.h"
-#include "update_engine/update_status_utils.h"
-
-using android::getService;
-using android::OK;
-using android::String16;
-using android::String8;
-using android::binder::Status;
-using android::brillo::ParcelableUpdateEngineStatus;
-using chromeos_update_engine::StringToUpdateStatus;
-using std::string;
-using update_engine::UpdateAttemptFlags;
-
-namespace update_engine {
-namespace internal {
-
-bool BinderUpdateEngineClient::Init() {
-  if (!binder_watcher_.Init())
-    return false;
-
-  return getService(String16{"android.brillo.UpdateEngineService"},
-                    &service_) == OK;
-}
-
-bool BinderUpdateEngineClient::AttemptUpdate(const string& in_app_version,
-                                             const string& in_omaha_url,
-                                             bool at_user_request) {
-  bool started;
-  return service_
-      ->AttemptUpdate(
-          String16{in_app_version.c_str()},
-          String16{in_omaha_url.c_str()},
-          at_user_request ? 0 : UpdateAttemptFlags::kFlagNonInteractive,
-          &started)
-      .isOk();
-}
-
-bool BinderUpdateEngineClient::AttemptInstall(
-    const string& omaha_url, const std::vector<string>& dlc_module_ids) {
-  return false;
-}
-
-bool BinderUpdateEngineClient::GetStatus(int64_t* out_last_checked_time,
-                                         double* out_progress,
-                                         UpdateStatus* out_update_status,
-                                         string* out_new_version,
-                                         int64_t* out_new_size) const {
-  ParcelableUpdateEngineStatus status;
-
-  if (!service_->GetStatus(&status).isOk())
-    return false;
-
-  *out_last_checked_time = status.last_checked_time_;
-  *out_progress = status.progress_;
-  StringToUpdateStatus(String8{status.current_operation_}.string(),
-                       out_update_status);
-  *out_new_version = String8{status.new_version_}.string();
-  *out_new_size = status.new_size_;
-  return true;
-}
-
-bool BinderUpdateEngineClient::SetCohortHint(const string& in_cohort_hint) {
-  return service_->SetCohortHint(String16{in_cohort_hint.c_str()}).isOk();
-}
-
-bool BinderUpdateEngineClient::GetCohortHint(string* out_cohort_hint) const {
-  String16 out_as_string16;
-
-  if (!service_->GetCohortHint(&out_as_string16).isOk())
-    return false;
-
-  *out_cohort_hint = String8{out_as_string16}.string();
-  return true;
-}
-
-bool BinderUpdateEngineClient::SetUpdateOverCellularPermission(bool allowed) {
-  return service_->SetUpdateOverCellularPermission(allowed).isOk();
-}
-
-bool BinderUpdateEngineClient::GetUpdateOverCellularPermission(
-    bool* allowed) const {
-  return service_->GetUpdateOverCellularPermission(allowed).isOk();
-}
-
-bool BinderUpdateEngineClient::SetP2PUpdatePermission(bool enabled) {
-  return service_->SetP2PUpdatePermission(enabled).isOk();
-}
-
-bool BinderUpdateEngineClient::GetP2PUpdatePermission(bool* enabled) const {
-  return service_->GetP2PUpdatePermission(enabled).isOk();
-}
-
-bool BinderUpdateEngineClient::Rollback(bool powerwash) {
-  return service_->AttemptRollback(powerwash).isOk();
-}
-
-bool BinderUpdateEngineClient::GetRollbackPartition(
-    string* rollback_partition) const {
-  String16 out_as_string16;
-
-  if (!service_->GetRollbackPartition(&out_as_string16).isOk())
-    return false;
-
-  *rollback_partition = String8{out_as_string16}.string();
-  return true;
-}
-
-bool BinderUpdateEngineClient::GetPrevVersion(string* prev_version) const {
-  String16 out_as_string16;
-
-  if (!service_->GetPrevVersion(&out_as_string16).isOk())
-    return false;
-
-  *prev_version = String8{out_as_string16}.string();
-  return true;
-}
-
-void BinderUpdateEngineClient::RebootIfNeeded() {
-  if (!service_->RebootIfNeeded().isOk()) {
-    // Reboot error code doesn't necessarily mean that a reboot
-    // failed. For example, D-Bus may be shutdown before we receive the
-    // result.
-    LOG(INFO) << "RebootIfNeeded() failure ignored.";
-  }
-}
-
-bool BinderUpdateEngineClient::ResetStatus() {
-  return service_->ResetStatus().isOk();
-}
-
-Status BinderUpdateEngineClient::StatusUpdateCallback::HandleStatusUpdate(
-    const ParcelableUpdateEngineStatus& status) {
-  UpdateStatus update_status;
-
-  StringToUpdateStatus(String8{status.current_operation_}.string(),
-                       &update_status);
-
-  for (auto& handler : client_->handlers_) {
-    handler->HandleStatusUpdate(status.last_checked_time_,
-                                status.progress_,
-                                update_status,
-                                String8{status.new_version_}.string(),
-                                status.new_size_);
-  }
-
-  return Status::ok();
-}
-
-bool BinderUpdateEngineClient::RegisterStatusUpdateHandler(
-    StatusUpdateHandler* handler) {
-  if (!status_callback_.get()) {
-    status_callback_ = new BinderUpdateEngineClient::StatusUpdateCallback(this);
-    if (!service_->RegisterStatusCallback(status_callback_).isOk()) {
-      return false;
-    }
-  }
-
-  handlers_.push_back(handler);
-
-  int64_t last_checked_time;
-  double progress;
-  UpdateStatus update_status;
-  string new_version;
-  int64_t new_size;
-
-  if (!GetStatus(&last_checked_time,
-                 &progress,
-                 &update_status,
-                 &new_version,
-                 &new_size)) {
-    handler->IPCError("Could not get status from binder service");
-  }
-
-  handler->HandleStatusUpdate(
-      last_checked_time, progress, update_status, new_version, new_size);
-
-  return true;
-}
-
-bool BinderUpdateEngineClient::UnregisterStatusUpdateHandler(
-    StatusUpdateHandler* handler) {
-  auto it = std::find(handlers_.begin(), handlers_.end(), handler);
-  if (it != handlers_.end()) {
-    handlers_.erase(it);
-    return true;
-  }
-
-  return false;
-}
-
-bool BinderUpdateEngineClient::SetTargetChannel(const string& in_target_channel,
-                                                bool allow_powerwash) {
-  return service_
-      ->SetChannel(String16{in_target_channel.c_str()}, allow_powerwash)
-      .isOk();
-}
-
-bool BinderUpdateEngineClient::GetTargetChannel(string* out_channel) const {
-  String16 out_as_string16;
-
-  if (!service_->GetChannel(false, &out_as_string16).isOk())
-    return false;
-
-  *out_channel = String8{out_as_string16}.string();
-  return true;
-}
-
-bool BinderUpdateEngineClient::GetChannel(string* out_channel) const {
-  String16 out_as_string16;
-
-  if (!service_->GetChannel(true, &out_as_string16).isOk())
-    return false;
-
-  *out_channel = String8{out_as_string16}.string();
-  return true;
-}
-
-bool BinderUpdateEngineClient::GetLastAttemptError(
-    int32_t* last_attempt_error) const {
-  int out_as_int;
-
-  if (!service_->GetLastAttemptError(&out_as_int).isOk())
-    return false;
-
-  *last_attempt_error = out_as_int;
-  return true;
-}
-
-bool BinderUpdateEngineClient::GetEolStatus(int32_t* eol_status) const {
-  int out_as_int;
-
-  if (!service_->GetEolStatus(&out_as_int).isOk())
-    return false;
-
-  *eol_status = out_as_int;
-  return true;
-}
-
-}  // namespace internal
-}  // namespace update_engine
diff --git a/client_library/client_binder.h b/client_library/client_binder.h
deleted file mode 100644
index f3e4102..0000000
--- a/client_library/client_binder.h
+++ /dev/null
@@ -1,117 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_CLIENT_LIBRARY_CLIENT_BINDER_H_
-#define UPDATE_ENGINE_CLIENT_LIBRARY_CLIENT_BINDER_H_
-
-#include <cstdint>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include <base/macros.h>
-#include <utils/String16.h>
-#include <utils/StrongPointer.h>
-
-#include <brillo/binder_watcher.h>
-
-#include "android/brillo/BnUpdateEngineStatusCallback.h"
-#include "android/brillo/IUpdateEngine.h"
-
-#include "update_engine/client_library/include/update_engine/client.h"
-
-namespace update_engine {
-namespace internal {
-
-class BinderUpdateEngineClient : public UpdateEngineClient {
- public:
-  BinderUpdateEngineClient() = default;
-  bool Init();
-
-  virtual ~BinderUpdateEngineClient() = default;
-
-  bool AttemptUpdate(const std::string& app_version,
-                     const std::string& omaha_url,
-                     bool at_user_request) override;
-
-  bool AttemptInstall(const std::string& omaha_url,
-                      const std::vector<std::string>& dlc_module_ids) override;
-
-  bool GetStatus(int64_t* out_last_checked_time,
-                 double* out_progress,
-                 UpdateStatus* out_update_status,
-                 std::string* out_new_version,
-                 int64_t* out_new_size) const override;
-
-  bool SetCohortHint(const std::string& in_cohort_hint) override;
-  bool GetCohortHint(std::string* out_cohort_hint) const override;
-
-  bool SetUpdateOverCellularPermission(bool allowed) override;
-  bool GetUpdateOverCellularPermission(bool* allowed) const override;
-
-  bool SetP2PUpdatePermission(bool enabled) override;
-  bool GetP2PUpdatePermission(bool* enabled) const override;
-
-  bool Rollback(bool powerwash) override;
-
-  bool GetRollbackPartition(std::string* rollback_partition) const override;
-
-  void RebootIfNeeded() override;
-
-  bool GetPrevVersion(std::string* prev_version) const override;
-
-  bool ResetStatus() override;
-
-  bool SetTargetChannel(const std::string& target_channel,
-                        bool allow_powerwash) override;
-
-  bool GetTargetChannel(std::string* out_channel) const override;
-
-  bool GetChannel(std::string* out_channel) const override;
-
-  bool RegisterStatusUpdateHandler(StatusUpdateHandler* handler) override;
-  bool UnregisterStatusUpdateHandler(StatusUpdateHandler* handler) override;
-
-  bool GetLastAttemptError(int32_t* last_attempt_error) const override;
-
-  bool GetEolStatus(int32_t* eol_status) const override;
-
- private:
-  class StatusUpdateCallback
-      : public android::brillo::BnUpdateEngineStatusCallback {
-   public:
-    explicit StatusUpdateCallback(BinderUpdateEngineClient* client)
-        : client_(client) {}
-
-    android::binder::Status HandleStatusUpdate(
-        const android::brillo::ParcelableUpdateEngineStatus& status) override;
-
-   private:
-    BinderUpdateEngineClient* client_;
-  };
-
-  android::sp<android::brillo::IUpdateEngine> service_;
-  android::sp<android::brillo::IUpdateEngineStatusCallback> status_callback_;
-  std::vector<update_engine::StatusUpdateHandler*> handlers_;
-  brillo::BinderWatcher binder_watcher_;
-
-  DISALLOW_COPY_AND_ASSIGN(BinderUpdateEngineClient);
-};  // class BinderUpdateEngineClient
-
-}  // namespace internal
-}  // namespace update_engine
-
-#endif  // UPDATE_ENGINE_CLIENT_LIBRARY_CLIENT_BINDER_H_
diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc
index 7ca6307..30ad78c 100644
--- a/client_library/client_dbus.cc
+++ b/client_library/client_dbus.cc
@@ -16,23 +16,51 @@
 
 #include "update_engine/client_library/client_dbus.h"
 
-#include <base/message_loop/message_loop.h>
+#include <base/message_loop/message_loop_current.h>
+
+#include <memory>
 
 #include <dbus/bus.h>
 #include <update_engine/dbus-constants.h>
-#include <update_engine/proto_bindings/update_engine.pb.h>
 
 #include "update_engine/update_status_utils.h"
 
-using chromeos_update_engine::StringToUpdateStatus;
 using dbus::Bus;
 using org::chromium::UpdateEngineInterfaceProxy;
 using std::string;
+using std::unique_ptr;
 using std::vector;
 
 namespace update_engine {
+
+unique_ptr<UpdateEngineClient> UpdateEngineClient::CreateInstance() {
+  auto ret = std::make_unique<internal::DBusUpdateEngineClient>();
+  if (!ret->Init()) {
+    ret.reset();
+  }
+  return ret;
+}
+
 namespace internal {
 
+namespace {
+// This converts the status from Protobuf |StatusResult| to The internal
+// |UpdateEngineStatus| struct.
+void ConvertToUpdateEngineStatus(const StatusResult& status,
+                                 UpdateEngineStatus* out_status) {
+  out_status->last_checked_time = status.last_checked_time();
+  out_status->progress = status.progress();
+  out_status->new_version = status.new_version();
+  out_status->new_size_bytes = status.new_size();
+  out_status->status = static_cast<UpdateStatus>(status.current_operation());
+  out_status->is_enterprise_rollback = status.is_enterprise_rollback();
+  out_status->is_install = status.is_install();
+  out_status->eol_date = status.eol_date();
+  out_status->will_powerwash_after_reboot =
+      status.will_powerwash_after_reboot();
+}
+}  // namespace
+
 bool DBusUpdateEngineClient::Init() {
   Bus::Options options;
   options.bus_type = Bus::SYSTEM;
@@ -57,41 +85,24 @@
       nullptr);
 }
 
-bool DBusUpdateEngineClient::AttemptInstall(
-    const string& omaha_url, const vector<string>& dlc_module_ids) {
-  // Convert parameters into protobuf.
-  chromeos_update_engine::DlcParameters dlc_parameters;
-  dlc_parameters.set_omaha_url(omaha_url);
-  for (const auto& dlc_module_id : dlc_module_ids) {
-    chromeos_update_engine::DlcInfo* dlc_info = dlc_parameters.add_dlc_infos();
-    dlc_info->set_dlc_id(dlc_module_id);
-  }
-  string dlc_request;
-  if (dlc_parameters.SerializeToString(&dlc_request)) {
-    return proxy_->AttemptInstall(dlc_request, nullptr /* brillo::ErrorPtr* */);
-  } else {
-    LOG(ERROR) << "Fail to serialize a protobuf to a string.";
-    return false;
-  }
+bool DBusUpdateEngineClient::AttemptInstall(const string& omaha_url,
+                                            const vector<string>& dlc_ids) {
+  return proxy_->AttemptInstall(omaha_url, dlc_ids, nullptr);
 }
 
-bool DBusUpdateEngineClient::GetStatus(int64_t* out_last_checked_time,
-                                       double* out_progress,
-                                       UpdateStatus* out_update_status,
-                                       string* out_new_version,
-                                       int64_t* out_new_size) const {
-  string status_as_string;
-  const bool success = proxy_->GetStatus(out_last_checked_time,
-                                         out_progress,
-                                         &status_as_string,
-                                         out_new_version,
-                                         out_new_size,
-                                         nullptr);
-  if (!success) {
+bool DBusUpdateEngineClient::SetDlcActiveValue(bool is_active,
+                                               const std::string& dlc_id) {
+  return proxy_->SetDlcActiveValue(is_active, dlc_id, /*error=*/nullptr);
+}
+
+bool DBusUpdateEngineClient::GetStatus(UpdateEngineStatus* out_status) const {
+  StatusResult status;
+  if (!proxy_->GetStatusAdvanced(&status, nullptr)) {
     return false;
   }
 
-  return StringToUpdateStatus(status_as_string, out_update_status);
+  ConvertToUpdateEngineStatus(status, out_status);
+  return true;
 }
 
 bool DBusUpdateEngineClient::SetCohortHint(const string& cohort_hint) {
@@ -160,40 +171,25 @@
 
 void DBusUpdateEngineClient::StatusUpdateHandlersRegistered(
     StatusUpdateHandler* handler) const {
-  int64_t last_checked_time;
-  double progress;
-  UpdateStatus update_status;
-  string new_version;
-  int64_t new_size;
-
-  if (!GetStatus(&last_checked_time,
-                 &progress,
-                 &update_status,
-                 &new_version,
-                 &new_size)) {
+  UpdateEngineStatus status;
+  if (!GetStatus(&status)) {
     handler->IPCError("Could not query current status");
     return;
   }
 
   std::vector<update_engine::StatusUpdateHandler*> just_handler = {handler};
   for (auto h : handler ? just_handler : handlers_) {
-    h->HandleStatusUpdate(
-        last_checked_time, progress, update_status, new_version, new_size);
+    h->HandleStatusUpdate(status);
   }
 }
 
 void DBusUpdateEngineClient::RunStatusUpdateHandlers(
-    int64_t last_checked_time,
-    double progress,
-    const string& current_operation,
-    const string& new_version,
-    int64_t new_size) {
-  UpdateStatus status;
-  StringToUpdateStatus(current_operation, &status);
+    const StatusResult& status) {
+  UpdateEngineStatus ue_status;
+  ConvertToUpdateEngineStatus(status, &ue_status);
 
   for (auto handler : handlers_) {
-    handler->HandleStatusUpdate(
-        last_checked_time, progress, status, new_version, new_size);
+    handler->HandleStatusUpdate(ue_status);
   }
 }
 
@@ -210,7 +206,7 @@
 
 bool DBusUpdateEngineClient::RegisterStatusUpdateHandler(
     StatusUpdateHandler* handler) {
-  if (!base::MessageLoopForIO::current()) {
+  if (!base::MessageLoopCurrent::IsSet()) {
     LOG(FATAL) << "Cannot get UpdateEngineClient outside of message loop.";
     return false;
   }
@@ -222,7 +218,7 @@
     return true;
   }
 
-  proxy_->RegisterStatusUpdateSignalHandler(
+  proxy_->RegisterStatusUpdateAdvancedSignalHandler(
       base::Bind(&DBusUpdateEngineClient::RunStatusUpdateHandlers,
                  base::Unretained(this)),
       base::Bind(&DBusUpdateEngineClient::DBusStatusHandlersRegistered,
@@ -255,9 +251,5 @@
   return proxy_->GetLastAttemptError(last_attempt_error, nullptr);
 }
 
-bool DBusUpdateEngineClient::GetEolStatus(int32_t* eol_status) const {
-  return proxy_->GetEolStatus(eol_status, nullptr);
-}
-
 }  // namespace internal
 }  // namespace update_engine
diff --git a/client_library/client_dbus.h b/client_library/client_dbus.h
index a186d45..f19555f 100644
--- a/client_library/client_dbus.h
+++ b/client_library/client_dbus.h
@@ -23,6 +23,7 @@
 #include <vector>
 
 #include <base/macros.h>
+#include <update_engine/proto_bindings/update_engine.pb.h>
 
 #include "update_engine/client_library/include/update_engine/client.h"
 #include "update_engine/dbus-proxies.h"
@@ -42,13 +43,11 @@
                      bool at_user_request) override;
 
   bool AttemptInstall(const std::string& omaha_url,
-                      const std::vector<std::string>& dlc_module_ids) override;
+                      const std::vector<std::string>& dlc_ids) override;
 
-  bool GetStatus(int64_t* out_last_checked_time,
-                 double* out_progress,
-                 UpdateStatus* out_update_status,
-                 std::string* out_new_version,
-                 int64_t* out_new_size) const override;
+  bool SetDlcActiveValue(bool is_active, const std::string& dlc_id) override;
+
+  bool GetStatus(UpdateEngineStatus* out_status) const override;
 
   bool SetCohortHint(const std::string& cohort_hint) override;
   bool GetCohortHint(std::string* cohort_hint) const override;
@@ -81,8 +80,6 @@
 
   bool GetLastAttemptError(int32_t* last_attempt_error) const override;
 
-  bool GetEolStatus(int32_t* eol_status) const override;
-
  private:
   void DBusStatusHandlersRegistered(const std::string& interface,
                                     const std::string& signal_name,
@@ -93,11 +90,7 @@
   // registered handlers receive the event.
   void StatusUpdateHandlersRegistered(StatusUpdateHandler* handler) const;
 
-  void RunStatusUpdateHandlers(int64_t last_checked_time,
-                               double progress,
-                               const std::string& current_operation,
-                               const std::string& new_version,
-                               int64_t new_size);
+  void RunStatusUpdateHandlers(const StatusResult& status);
 
   std::unique_ptr<org::chromium::UpdateEngineInterfaceProxy> proxy_;
   std::vector<update_engine::StatusUpdateHandler*> handlers_;
diff --git a/client_library/include/update_engine/client.h b/client_library/include/update_engine/client.h
index 1bc6111..f734733 100644
--- a/client_library/include/update_engine/client.h
+++ b/client_library/include/update_engine/client.h
@@ -54,31 +54,18 @@
   //     empty indicates update_engine should use its default value. Note that
   //     update_engine will ignore this parameter in production mode to avoid
   //     pulling untrusted updates.
-  // |dlc_module_ids|
+  // |dlc_ids|
   //     A list of DLC module IDs.
-  virtual bool AttemptInstall(
-      const std::string& omaha_url,
-      const std::vector<std::string>& dlc_module_ids) = 0;
+  virtual bool AttemptInstall(const std::string& omaha_url,
+                              const std::vector<std::string>& dlc_ids) = 0;
 
-  // Returns the current status of the Update Engine.
-  //
-  // |out_last_checked_time|
-  //     the last time the update engine checked for an update in seconds since
-  //     the epoc.
-  // |out_progress|
-  //     when downloading an update, this is calculated as
-  //     (number of bytes received) / (total bytes).
-  // |out_update_status|
-  //     See update_status.h.
-  // |out_new_version|
-  //     string version of the new system image.
-  // |out_new_size|
-  //     number of bytes in the new system image.
-  virtual bool GetStatus(int64_t* out_last_checked_time,
-                         double* out_progress,
-                         UpdateStatus* out_update_status,
-                         std::string* out_new_version,
-                         int64_t* out_new_size) const = 0;
+  // Same as above but return the entire struct instead.
+  virtual bool GetStatus(UpdateEngineStatus* out_status) const = 0;
+
+  // Sets the DLC as active or inactive. When set to active, the ping metadata
+  // for the DLC is updated accordingly. When set to inactive, the metadata
+  // for the DLC is deleted.
+  virtual bool SetDlcActiveValue(bool is_active, const std::string& dlc_id) = 0;
 
   // Getter and setter for the cohort hint.
   virtual bool SetCohortHint(const std::string& cohort_hint) = 0;
@@ -132,9 +119,6 @@
   // Get the last UpdateAttempt error code.
   virtual bool GetLastAttemptError(int32_t* last_attempt_error) const = 0;
 
-  // Get the current end-of-life status code. See EolStatus enum for details.
-  virtual bool GetEolStatus(int32_t* eol_status) const = 0;
-
  protected:
   // Use CreateInstance().
   UpdateEngineClient() = default;
diff --git a/client_library/include/update_engine/status_update_handler.h b/client_library/include/update_engine/status_update_handler.h
index d2fad34..238f6bd 100644
--- a/client_library/include/update_engine/status_update_handler.h
+++ b/client_library/include/update_engine/status_update_handler.h
@@ -14,7 +14,9 @@
 // limitations under the License.
 //
 
+// NOLINTNEXTLINE(whitespace/line_length)
 #ifndef UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_
+// NOLINTNEXTLINE(whitespace/line_length)
 #define UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_
 
 #include <string>
@@ -35,13 +37,10 @@
   virtual void IPCError(const std::string& error) = 0;
 
   // Runs every time update_engine reports a status change.
-  virtual void HandleStatusUpdate(int64_t last_checked_time,
-                                  double progress,
-                                  UpdateStatus current_operation,
-                                  const std::string& new_version,
-                                  int64_t new_size) = 0;
+  virtual void HandleStatusUpdate(const UpdateEngineStatus& status) = 0;
 };
 
 }  // namespace update_engine
 
+// NOLINTNEXTLINE(whitespace/line_length)
 #endif  // UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_
diff --git a/client_library/include/update_engine/update_status.h b/client_library/include/update_engine/update_status.h
index 6490e27..043a36e 100644
--- a/client_library/include/update_engine/update_status.h
+++ b/client_library/include/update_engine/update_status.h
@@ -21,12 +21,19 @@
 
 #include <brillo/enum_flags.h>
 
+// NOTE: Keep this file in sync with
+// platform2/system_api/dbus/update_engine/update_engine.proto especially:
+// - |UpdateStatus| <-> |Operation|
+// - |UpdateEngineStatus| <-> |StatusResult|
+
 namespace update_engine {
 
-// ATTENTION: When adding a new enum value here, always append at the end and
-// make sure to make proper adjustments in UpdateAttempter:ActionCompleted(). If
-// any enum memeber is deprecated, the assigned value of other members should
-// not change. See b/62842358.
+// ATTENTION:
+// When adding a new enum value:
+// - always append at the end with proper adjustments in |ActionCompleted()|.
+// - always update |kNonIdleUpdateStatues| in update_attempter_unittest.cc.
+// When deprecating an old enum value:
+// - other enum values should not change their old values. See b/62842358.
 enum class UpdateStatus {
   IDLE = 0,
   CHECKING_FOR_UPDATE = 1,
@@ -42,6 +49,13 @@
   // allow updates, e.g. over cellular network.
   NEED_PERMISSION_TO_UPDATE = 10,
   CLEANUP_PREVIOUS_UPDATE = 11,
+
+  // This value is exclusively used in Chrome. DO NOT define nor use it.
+  // TODO(crbug.com/977320): Remove this value from chrome by refactoring the
+  // Chrome code and evantually from here. This is not really an operation or
+  // state that the update_engine stays on. This is the result of an internal
+  // failure and should be reflected differently.
+  // ERROR = -1,
 };
 
 // Enum of bit-wise flags for controlling how updates are attempted.
@@ -58,23 +72,27 @@
 DECLARE_FLAGS_ENUM(UpdateAttemptFlags);
 
 struct UpdateEngineStatus {
-  // When the update_engine last checked for updates (time_t: seconds from unix
-  // epoch)
+  // Update engine last checked update (time_t: seconds from unix epoch).
   int64_t last_checked_time;
-  // the current status/operation of the update_engine
+  // Current status/operation of the update_engine.
   UpdateStatus status;
-  // the current product version (oem bundle id)
+  // Current product version (oem bundle id).
   std::string current_version;
-  // the current system version
-  std::string current_system_version;
-  // The current progress (0.0f-1.0f).
+  // Current progress (0.0f-1.0f).
   double progress;
-  // the size of the update (bytes)
+  // Size of the update in bytes.
   uint64_t new_size_bytes;
-  // the new product version
+  // New product version.
   std::string new_version;
-  // the new system version, if there is one (empty, otherwise)
-  std::string new_system_version;
+  // Wether the update is an enterprise rollback. The value is valid only if the
+  // current operation is passed CHECKING_FOR_UPDATE.
+  bool is_enterprise_rollback;
+  // Indication of install for DLC(s).
+  bool is_install;
+  // The end-of-life date of the device in the number of days since Unix Epoch.
+  int64_t eol_date;
+  // The system will powerwash once the update is applied.
+  bool will_powerwash_after_reboot;
 };
 
 }  // namespace update_engine
diff --git a/common/action_pipe.h b/common/action_pipe.h
index 0c98ee1..4c56812 100644
--- a/common/action_pipe.h
+++ b/common/action_pipe.h
@@ -79,6 +79,8 @@
 
  private:
   ObjectType contents_;
+  // Give unit test access
+  friend class DownloadActionTest;
 
   // The ctor is private. This is because this class should construct itself
   // via the static Bond() method.
diff --git a/common/action_processor.h b/common/action_processor.h
index 735a106..ad98cc9 100644
--- a/common/action_processor.h
+++ b/common/action_processor.h
@@ -89,7 +89,7 @@
   // But this call deletes the action if there no other object has a reference
   // to it, so in that case, the caller should not try to access any of its
   // member variables after this call.
-  void ActionComplete(AbstractAction* actionptr, ErrorCode code);
+  virtual void ActionComplete(AbstractAction* actionptr, ErrorCode code);
 
  private:
   FRIEND_TEST(ActionProcessorTest, ChainActionsTest);
diff --git a/common/boot_control_interface.h b/common/boot_control_interface.h
index 3906e2f..321174e 100644
--- a/common/boot_control_interface.h
+++ b/common/boot_control_interface.h
@@ -59,12 +59,27 @@
   // every slot. In order to access the dynamic partitions in the target slot,
   // GetDynamicPartitionControl()->PreparePartitionsForUpdate() must be called
   // (with |update| == true for the first time for a payload, and |false| for
-  // for the rest of the times) prior to calling this function. On success,
-  // returns true and stores the block device in |device|.
+  // for the rest of the times) prior to calling this function.
+  // The handling may be different based on whether the partition is included
+  // in the update payload. On success, returns true; and stores the block
+  // device in |device|, if the partition is dynamic in |is_dynamic|.
+  virtual bool GetPartitionDevice(const std::string& partition_name,
+                                  Slot slot,
+                                  bool not_in_payload,
+                                  std::string* device,
+                                  bool* is_dynamic) const = 0;
+
+  // Overload of the above function. We assume the partition is always included
+  // in the payload.
   virtual bool GetPartitionDevice(const std::string& partition_name,
                                   Slot slot,
                                   std::string* device) const = 0;
 
+  virtual std::optional<PartitionDevice> GetPartitionDevice(
+      const std::string& partition_name,
+      uint32_t slot,
+      uint32_t current_slot,
+      bool not_in_payload = false) const = 0;
   // Returns whether the passed |slot| is marked as bootable. Returns false if
   // the slot is invalid.
   virtual bool IsSlotBootable(Slot slot) const = 0;
@@ -88,7 +103,7 @@
   // Check if |slot| is marked boot successfully.
   virtual bool IsSlotMarkedSuccessful(Slot slot) const = 0;
 
-  // Return the dynamic partition control interface.
+  // Return the dynamic partition control interface. Never null.
   virtual DynamicPartitionControlInterface* GetDynamicPartitionControl() = 0;
 
   // Return a human-readable slot name used for logging.
diff --git a/common/boot_control_stub.cc b/common/boot_control_stub.cc
index 2eb9211..a1cc055 100644
--- a/common/boot_control_stub.cc
+++ b/common/boot_control_stub.cc
@@ -35,6 +35,24 @@
   return 0;
 }
 
+bool BootControlStub::GetPartitionDevice(const std::string& partition_name,
+                                         BootControlInterface::Slot slot,
+                                         bool not_in_payload,
+                                         std::string* device,
+                                         bool* is_dynamic) const {
+  LOG(ERROR) << __FUNCTION__ << " should never be called.";
+  return false;
+}
+
+std::optional<PartitionDevice> BootControlStub::GetPartitionDevice(
+    const std::string& partition_name,
+    uint32_t slot,
+    uint32_t current_slot,
+    bool not_in_payload) const {
+  LOG(ERROR) << __FUNCTION__ << " should never be called.";
+  return {};
+}
+
 bool BootControlStub::GetPartitionDevice(const string& partition_name,
                                          Slot slot,
                                          string* device) const {
diff --git a/common/boot_control_stub.h b/common/boot_control_stub.h
index cc16190..dcddbae 100644
--- a/common/boot_control_stub.h
+++ b/common/boot_control_stub.h
@@ -41,8 +41,18 @@
   unsigned int GetNumSlots() const override;
   BootControlInterface::Slot GetCurrentSlot() const override;
   bool GetPartitionDevice(const std::string& partition_name,
+                          Slot slot,
+                          bool not_in_payload,
+                          std::string* device,
+                          bool* is_dynamic) const override;
+  bool GetPartitionDevice(const std::string& partition_name,
                           BootControlInterface::Slot slot,
                           std::string* device) const override;
+  std::optional<PartitionDevice> GetPartitionDevice(
+      const std::string& partition_name,
+      uint32_t slot,
+      uint32_t current_slot,
+      bool not_in_payload = false) const override;
   bool IsSlotBootable(BootControlInterface::Slot slot) const override;
   bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
   bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
diff --git a/common/cleanup_previous_update_action_delegate.h b/common/cleanup_previous_update_action_delegate.h
index 7dad9c5..8daf860 100644
--- a/common/cleanup_previous_update_action_delegate.h
+++ b/common/cleanup_previous_update_action_delegate.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_
-#define UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_
+#ifndef UPDATE_ENGINE_COMMON_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_
+#define UPDATE_ENGINE_COMMON_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_
 
 namespace chromeos_update_engine {
 
@@ -29,4 +29,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_
+#endif  // UPDATE_ENGINE_COMMON_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_
diff --git a/common/clock.cc b/common/clock.cc
index 05c495c..0821a56 100644
--- a/common/clock.cc
+++ b/common/clock.cc
@@ -20,11 +20,11 @@
 
 namespace chromeos_update_engine {
 
-base::Time Clock::GetWallclockTime() {
+base::Time Clock::GetWallclockTime() const {
   return base::Time::Now();
 }
 
-base::Time Clock::GetMonotonicTime() {
+base::Time Clock::GetMonotonicTime() const {
   struct timespec now_ts;
   if (clock_gettime(CLOCK_MONOTONIC_RAW, &now_ts) != 0) {
     // Avoid logging this as an error as call-sites may call this very
@@ -40,7 +40,7 @@
   return base::Time::FromTimeVal(now_tv);
 }
 
-base::Time Clock::GetBootTime() {
+base::Time Clock::GetBootTime() const {
   struct timespec now_ts;
   if (clock_gettime(CLOCK_BOOTTIME, &now_ts) != 0) {
     // Avoid logging this as an error as call-sites may call this very
diff --git a/common/clock.h b/common/clock.h
index 2f373a7..4021fa1 100644
--- a/common/clock.h
+++ b/common/clock.h
@@ -24,13 +24,11 @@
 // Implements a clock.
 class Clock : public ClockInterface {
  public:
-  Clock() {}
+  Clock() = default;
 
-  base::Time GetWallclockTime() override;
-
-  base::Time GetMonotonicTime() override;
-
-  base::Time GetBootTime() override;
+  base::Time GetWallclockTime() const override;
+  base::Time GetMonotonicTime() const override;
+  base::Time GetBootTime() const override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(Clock);
diff --git a/common/clock_interface.h b/common/clock_interface.h
index 2228983..176505d 100644
--- a/common/clock_interface.h
+++ b/common/clock_interface.h
@@ -32,21 +32,21 @@
   virtual ~ClockInterface() = default;
 
   // Gets the current time e.g. similar to base::Time::Now().
-  virtual base::Time GetWallclockTime() = 0;
+  virtual base::Time GetWallclockTime() const = 0;
 
   // Returns monotonic time since some unspecified starting point. It
   // is not increased when the system is sleeping nor is it affected
   // by NTP or the user changing the time.
   //
   // (This is a simple wrapper around clock_gettime(2) / CLOCK_MONOTONIC_RAW.)
-  virtual base::Time GetMonotonicTime() = 0;
+  virtual base::Time GetMonotonicTime() const = 0;
 
   // Returns monotonic time since some unspecified starting point. It
   // is increased when the system is sleeping but it's not affected
   // by NTP or the user changing the time.
   //
   // (This is a simple wrapper around clock_gettime(2) / CLOCK_BOOTTIME.)
-  virtual base::Time GetBootTime() = 0;
+  virtual base::Time GetBootTime() const = 0;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/connection_utils.cc b/common/connection_utils.cc
similarity index 86%
rename from connection_utils.cc
rename to common/connection_utils.cc
index aeb0163..44e5128 100644
--- a/connection_utils.cc
+++ b/common/connection_utils.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/connection_utils.h"
+#include "update_engine/common/connection_utils.h"
 
 #include <shill/dbus-constants.h>
 
@@ -32,10 +32,6 @@
     return ConnectionType::kEthernet;
   } else if (type_str == shill::kTypeWifi) {
     return ConnectionType::kWifi;
-  } else if (type_str == shill::kTypeWimax) {
-    return ConnectionType::kWimax;
-  } else if (type_str == shill::kTypeBluetooth) {
-    return ConnectionType::kBluetooth;
   } else if (type_str == shill::kTypeCellular) {
     return ConnectionType::kCellular;
   } else if (type_str == kTypeDisconnected) {
@@ -61,10 +57,6 @@
       return shill::kTypeEthernet;
     case ConnectionType::kWifi:
       return shill::kTypeWifi;
-    case ConnectionType::kWimax:
-      return shill::kTypeWimax;
-    case ConnectionType::kBluetooth:
-      return shill::kTypeBluetooth;
     case ConnectionType::kCellular:
       return shill::kTypeCellular;
     case ConnectionType::kDisconnected:
diff --git a/connection_utils.h b/common/connection_utils.h
similarity index 89%
rename from connection_utils.h
rename to common/connection_utils.h
index d5133a1..5d63fb2 100644
--- a/connection_utils.h
+++ b/common/connection_utils.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_CONNECTION_UTILS_H_
-#define UPDATE_ENGINE_CONNECTION_UTILS_H_
+#ifndef UPDATE_ENGINE_COMMON_CONNECTION_UTILS_H_
+#define UPDATE_ENGINE_COMMON_CONNECTION_UTILS_H_
 
 #include <string>
 
@@ -25,8 +25,6 @@
   kDisconnected,
   kEthernet,
   kWifi,
-  kWimax,
-  kBluetooth,
   kCellular,
   kUnknown
 };
@@ -49,4 +47,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_CONNECTION_UTILS_H_
+#endif  // UPDATE_ENGINE_COMMON_CONNECTION_UTILS_H_
diff --git a/common/constants.cc b/common/constants.cc
index 5bfb2b6..0677e66 100644
--- a/common/constants.cc
+++ b/common/constants.cc
@@ -18,6 +18,10 @@
 
 namespace chromeos_update_engine {
 
+const char kExclusionPrefsSubDir[] = "exclusion";
+
+const char kDlcPrefsSubDir[] = "dlc";
+
 const char kPowerwashSafePrefsSubDirectory[] = "update_engine/prefs";
 
 const char kPrefsSubDirectory[] = "prefs";
@@ -55,11 +59,19 @@
 const char kPrefsOmahaCohort[] = "omaha-cohort";
 const char kPrefsOmahaCohortHint[] = "omaha-cohort-hint";
 const char kPrefsOmahaCohortName[] = "omaha-cohort-name";
-const char kPrefsOmahaEolStatus[] = "omaha-eol-status";
+const char kPrefsOmahaEolDate[] = "omaha-eol-date";
 const char kPrefsP2PEnabled[] = "p2p-enabled";
 const char kPrefsP2PFirstAttemptTimestamp[] = "p2p-first-attempt-timestamp";
 const char kPrefsP2PNumAttempts[] = "p2p-num-attempts";
 const char kPrefsPayloadAttemptNumber[] = "payload-attempt-number";
+const char kPrefsTestUpdateCheckIntervalTimeout[] =
+    "test-update-check-interval-timeout";
+// Keep |kPrefsPingActive| in sync with |kDlcMetadataFilePingActive| in
+// dlcservice.
+const char kPrefsPingActive[] = "active";
+const char kPrefsPingLastActive[] = "date_last_active";
+const char kPrefsPingLastRollcall[] = "date_last_rollcall";
+const char kPrefsLastFp[] = "last-fp";
 const char kPrefsPostInstallSucceeded[] = "post-install-succeeded";
 const char kPrefsPreviousVersion[] = "previous-version";
 const char kPrefsResumedUpdateFailures[] = "resumed-update-failures";
@@ -99,6 +111,8 @@
 const char kPrefsWallClockScatteringWaitPeriod[] = "wall-clock-wait-period";
 const char kPrefsWallClockStagingWaitPeriod[] =
     "wall-clock-staging-wait-period";
+const char kPrefsManifestBytes[] = "manifest-bytes";
+const char kPrefsPreviousSlot[] = "previous-slot";
 
 // These four fields are generated by scripts/brillo_update_payload.
 const char kPayloadPropertyFileSize[] = "FILE_SIZE";
@@ -123,4 +137,12 @@
 // The default is 1 (always run post install).
 const char kPayloadPropertyRunPostInstall[] = "RUN_POST_INSTALL";
 
+const char kOmahaUpdaterVersion[] = "0.1.0.0";
+
+// X-Goog-Update headers.
+const char kXGoogleUpdateInteractivity[] = "X-Goog-Update-Interactivity";
+const char kXGoogleUpdateAppId[] = "X-Goog-Update-AppId";
+const char kXGoogleUpdateUpdater[] = "X-Goog-Update-Updater";
+const char kXGoogleUpdateSessionId[] = "X-Goog-SessionId";
+
 }  // namespace chromeos_update_engine
diff --git a/common/constants.h b/common/constants.h
index af1c0ab..68f720d 100644
--- a/common/constants.h
+++ b/common/constants.h
@@ -17,8 +17,16 @@
 #ifndef UPDATE_ENGINE_COMMON_CONSTANTS_H_
 #define UPDATE_ENGINE_COMMON_CONSTANTS_H_
 
+#include <cstdint>
+
 namespace chromeos_update_engine {
 
+// The root path of all exclusion prefs.
+extern const char kExclusionPrefsSubDir[];
+
+// The root path of all DLC metadata.
+extern const char kDlcPrefsSubDir[];
+
 // Directory for AU prefs that are preserved across powerwash.
 extern const char kPowerwashSafePrefsSubDirectory[];
 
@@ -56,13 +64,19 @@
 extern const char kPrefsOmahaCohort[];
 extern const char kPrefsOmahaCohortHint[];
 extern const char kPrefsOmahaCohortName[];
-extern const char kPrefsOmahaEolStatus[];
+extern const char kPrefsOmahaEolDate[];
 extern const char kPrefsP2PEnabled[];
 extern const char kPrefsP2PFirstAttemptTimestamp[];
 extern const char kPrefsP2PNumAttempts[];
 extern const char kPrefsPayloadAttemptNumber[];
+extern const char kPrefsTestUpdateCheckIntervalTimeout[];
+extern const char kPrefsPingActive[];
+extern const char kPrefsPingLastActive[];
+extern const char kPrefsPingLastRollcall[];
+extern const char kPrefsLastFp[];
 extern const char kPrefsPostInstallSucceeded[];
 extern const char kPrefsPreviousVersion[];
+extern const char kPrefsPreviousSlot[];
 extern const char kPrefsResumedUpdateFailures[];
 extern const char kPrefsRollbackHappened[];
 extern const char kPrefsRollbackVersion[];
@@ -95,6 +109,7 @@
 extern const char kPrefsVerityWritten[];
 extern const char kPrefsWallClockScatteringWaitPeriod[];
 extern const char kPrefsWallClockStagingWaitPeriod[];
+extern const char kPrefsManifestBytes[];
 
 // Keys used when storing and loading payload properties.
 extern const char kPayloadPropertyFileSize[];
@@ -108,6 +123,14 @@
 extern const char kPayloadPropertySwitchSlotOnReboot[];
 extern const char kPayloadPropertyRunPostInstall[];
 
+extern const char kOmahaUpdaterVersion[];
+
+// X-Goog-Update headers.
+extern const char kXGoogleUpdateInteractivity[];
+extern const char kXGoogleUpdateAppId[];
+extern const char kXGoogleUpdateUpdater[];
+extern const char kXGoogleUpdateSessionId[];
+
 // A download source is any combination of protocol and server (that's of
 // interest to us when looking at UMA metrics) using which we may download
 // the payload.
@@ -134,30 +157,30 @@
 } PayloadType;
 
 // Maximum number of times we'll allow using p2p for the same update payload.
-const int kMaxP2PAttempts = 10;
+constexpr int kMaxP2PAttempts = 10;
 
 // Maximum wallclock time we allow attempting to update using p2p for
 // the same update payload - five days.
-const int kMaxP2PAttemptTimeSeconds = 5 * 24 * 60 * 60;
+constexpr int kMaxP2PAttemptTimeSeconds = 5 * 24 * 60 * 60;
 
 // The maximum amount of time to spend waiting for p2p-client(1) to
 // return while waiting in line to use the LAN - six hours.
-const int kMaxP2PNetworkWaitTimeSeconds = 6 * 60 * 60;
+constexpr int kMaxP2PNetworkWaitTimeSeconds = 6 * 60 * 60;
 
 // The maximum number of payload files to keep in /var/cache/p2p.
-const int kMaxP2PFilesToKeep = 3;
+constexpr int kMaxP2PFilesToKeep = 3;
 
 // The maximum number of days to keep a p2p file;
-const int kMaxP2PFileAgeDays = 5;
+constexpr int kMaxP2PFileAgeDays = 5;
 
 // The default number of UMA buckets for metrics.
-const int kNumDefaultUmaBuckets = 50;
+constexpr int kNumDefaultUmaBuckets = 50;
 
-// General constants
-const int kNumBytesInOneMiB = 1024 * 1024;
+// General constexprants
+constexpr int kNumBytesInOneMiB = 1024 * 1024;
 
 // Number of redirects allowed when downloading.
-const int kDownloadMaxRedirects = 10;
+constexpr int kDownloadMaxRedirects = 10;
 
 // The minimum average speed that downloads must sustain...
 //
@@ -165,8 +188,8 @@
 // connectivity and we want to make as much forward progress as
 // possible. For p2p this is high (25 kB/second) since we can assume
 // high bandwidth (same LAN) and we want to fail fast.
-const int kDownloadLowSpeedLimitBps = 1;
-const int kDownloadP2PLowSpeedLimitBps = 25 * 1000;
+constexpr int kDownloadLowSpeedLimitBps = 1;
+constexpr int kDownloadP2PLowSpeedLimitBps = 25 * 1000;
 
 // ... measured over this period.
 //
@@ -175,18 +198,18 @@
 // for the workstation to generate the payload. For normal operation
 // and p2p, make this relatively low since we want to fail fast in
 // those cases.
-const int kDownloadLowSpeedTimeSeconds = 30;
-const int kDownloadDevModeLowSpeedTimeSeconds = 180;
-const int kDownloadP2PLowSpeedTimeSeconds = 60;
+constexpr int kDownloadLowSpeedTimeSeconds = 30;
+constexpr int kDownloadDevModeLowSpeedTimeSeconds = 180;
+constexpr int kDownloadP2PLowSpeedTimeSeconds = 60;
 
 // The maximum amount of HTTP server reconnect attempts.
 //
 // This is set high in order to maximize the attempt's chance of
 // succeeding. When using p2p, this is low in order to fail fast.
-const int kDownloadMaxRetryCount = 20;
-const int kDownloadMaxRetryCountOobeNotComplete = 3;
-const int kDownloadMaxRetryCountInteractive = 3;
-const int kDownloadP2PMaxRetryCount = 5;
+constexpr int kDownloadMaxRetryCount = 20;
+constexpr int kDownloadMaxRetryCountOobeNotComplete = 3;
+constexpr int kDownloadMaxRetryCountInteractive = 3;
+constexpr int kDownloadP2PMaxRetryCount = 5;
 
 // The connect timeout, in seconds.
 //
@@ -194,11 +217,19 @@
 // connectivity and we may be using HTTPS which involves complicated
 // multi-roundtrip setup. For p2p, this is set low because we can
 // the server is on the same LAN and we want to fail fast.
-const int kDownloadConnectTimeoutSeconds = 30;
-const int kDownloadP2PConnectTimeoutSeconds = 5;
+constexpr int kDownloadConnectTimeoutSeconds = 30;
+constexpr int kDownloadP2PConnectTimeoutSeconds = 5;
 
 // Size in bytes of SHA256 hash.
-const int kSHA256Size = 32;
+constexpr int kSHA256Size = 32;
+
+// A hardcoded label to mark end of all InstallOps
+// This number must be greater than number of install ops.
+// Number of install ops is bounded by number of blocks on any partition.
+// Currently, the block size is 4096. Using |kEndOfInstallLabel| of 2^48 will
+// allow partitions with 2^48 * 4096 = 2^60 bytes. That's 1024PB? Partitions on
+// android aren't getting that big any time soon.
+constexpr uint64_t kEndOfInstallLabel = (1ULL << 48);
 
 }  // namespace chromeos_update_engine
 
diff --git a/common/cow_operation_convert.cc b/common/cow_operation_convert.cc
new file mode 100644
index 0000000..2564abf
--- /dev/null
+++ b/common/cow_operation_convert.cc
@@ -0,0 +1,82 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/common/cow_operation_convert.h"
+
+#include <base/logging.h>
+
+#include "update_engine/payload_generator/extent_ranges.h"
+#include "update_engine/payload_generator/extent_utils.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+
+std::vector<CowOperation> ConvertToCowOperations(
+    const ::google::protobuf::RepeatedPtrField<
+        ::chromeos_update_engine::InstallOperation>& operations,
+    const ::google::protobuf::RepeatedPtrField<CowMergeOperation>&
+        merge_operations) {
+  ExtentRanges merge_extents;
+  std::vector<CowOperation> converted;
+  ExtentRanges modified_extents;
+
+  // We want all CowCopy ops to be done first, before any COW_REPLACE happen.
+  // Therefore we add these ops in 2 separate loops. This is because during
+  // merge, a CowReplace might modify a block needed by CowCopy, so we always
+  // perform CowCopy first.
+
+  // This loop handles CowCopy blocks within SOURCE_COPY, and the next loop
+  // converts the leftover blocks to CowReplace?
+  for (const auto& merge_op : merge_operations) {
+    if (merge_op.type() != CowMergeOperation::COW_COPY) {
+      continue;
+    }
+    merge_extents.AddExtent(merge_op.dst_extent());
+    const auto& src_extent = merge_op.src_extent();
+    const auto& dst_extent = merge_op.dst_extent();
+    // Add blocks in reverse order, because snapused specifically prefers this
+    // ordering. Since we already eliminated all self-overlapping SOURCE_COPY
+    // during delta generation, this should be safe to do.
+    for (uint64_t i = src_extent.num_blocks(); i > 0; i--) {
+      auto src_block = src_extent.start_block() + i - 1;
+      auto dst_block = dst_extent.start_block() + i - 1;
+      converted.push_back({CowOperation::CowCopy, src_block, dst_block});
+      modified_extents.AddBlock(dst_block);
+    }
+  }
+  // COW_REPLACE are added after COW_COPY, because replace might modify blocks
+  // needed by COW_COPY. Please don't merge this loop with the previous one.
+  for (const auto& operation : operations) {
+    if (operation.type() != InstallOperation::SOURCE_COPY) {
+      continue;
+    }
+    const auto& src_extents = operation.src_extents();
+    const auto& dst_extents = operation.dst_extents();
+    BlockIterator it1{src_extents};
+    BlockIterator it2{dst_extents};
+    while (!it1.is_end() && !it2.is_end()) {
+      auto src_block = *it1;
+      auto dst_block = *it2;
+      if (!merge_extents.ContainsBlock(dst_block)) {
+        converted.push_back({CowOperation::CowReplace, src_block, dst_block});
+      }
+      ++it1;
+      ++it2;
+    }
+  }
+  return converted;
+}
+}  // namespace chromeos_update_engine
diff --git a/common/cow_operation_convert.h b/common/cow_operation_convert.h
new file mode 100644
index 0000000..c0543f7
--- /dev/null
+++ b/common/cow_operation_convert.h
@@ -0,0 +1,56 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef __COW_OPERATION_CONVERT_H
+#define __COW_OPERATION_CONVERT_H
+
+#include <vector>
+
+#include <libsnapshot/cow_format.h>
+
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+struct CowOperation {
+  enum Type {
+    CowCopy = android::snapshot::kCowCopyOp,
+    CowReplace = android::snapshot::kCowReplaceOp,
+  };
+  Type op;
+  uint64_t src_block;
+  uint64_t dst_block;
+};
+
+// Convert SOURCE_COPY operations in `operations` list to a list of
+// CowOperations according to the merge sequence. This function only converts
+// SOURCE_COPY, other operations are ignored. If there's a merge conflict in
+// SOURCE_COPY operations, some blocks may be converted to COW_REPLACE instead
+// of COW_COPY.
+
+// The list returned does not necessarily preserve the order of
+// SOURCE_COPY in `operations`. The only guarantee about ordering in the
+// returned list is that if operations are applied in such order, there would be
+// no merge conflicts.
+
+// This funnction is intended to be used by delta_performer to perform
+// SOURCE_COPY operations on Virtual AB Compression devices.
+std::vector<CowOperation> ConvertToCowOperations(
+    const ::google::protobuf::RepeatedPtrField<
+        ::chromeos_update_engine::InstallOperation>& operations,
+    const ::google::protobuf::RepeatedPtrField<CowMergeOperation>&
+        merge_operations);
+}  // namespace chromeos_update_engine
+#endif
diff --git a/common/cow_operation_convert_unittest.cc b/common/cow_operation_convert_unittest.cc
new file mode 100644
index 0000000..93173fe
--- /dev/null
+++ b/common/cow_operation_convert_unittest.cc
@@ -0,0 +1,236 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <algorithm>
+#include <array>
+#include <initializer_list>
+
+#include <gtest/gtest.h>
+
+#include "update_engine/common/cow_operation_convert.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+using OperationList = ::google::protobuf::RepeatedPtrField<
+    ::chromeos_update_engine::InstallOperation>;
+using MergeOplist = ::google::protobuf::RepeatedPtrField<
+    ::chromeos_update_engine::CowMergeOperation>;
+
+std::ostream& operator<<(std::ostream& out, CowOperation::Type op) {
+  switch (op) {
+    case CowOperation::Type::CowCopy:
+      out << "CowCopy";
+      break;
+    case CowOperation::Type::CowReplace:
+      out << "CowReplace";
+      break;
+    default:
+      out << op;
+      break;
+  }
+  return out;
+}
+
+std::ostream& operator<<(std::ostream& out, const CowOperation& c) {
+  out << "{" << c.op << ", " << c.src_block << ", " << c.dst_block << "}";
+  return out;
+}
+
+class CowOperationConvertTest : public testing::Test {
+ public:
+  void VerifyCowMergeOp(const std::vector<CowOperation>& cow_ops) {
+    // Build a set of all extents covered by InstallOps.
+    ExtentRanges src_extent_set;
+    ExtentRanges dst_extent_set;
+    for (auto&& op : operations_) {
+      src_extent_set.AddRepeatedExtents(op.src_extents());
+      dst_extent_set.AddRepeatedExtents(op.dst_extents());
+    }
+    ExtentRanges modified_extents;
+    for (auto&& cow_op : cow_ops) {
+      if (cow_op.op == CowOperation::CowCopy) {
+        EXPECT_TRUE(src_extent_set.ContainsBlock(cow_op.src_block));
+        // converted operations should be conflict free.
+        EXPECT_FALSE(modified_extents.ContainsBlock(cow_op.src_block))
+            << "SOURCE_COPY operation " << cow_op
+            << " read from a modified block";
+      }
+      EXPECT_TRUE(dst_extent_set.ContainsBlock(cow_op.dst_block));
+      dst_extent_set.SubtractExtent(ExtentForRange(cow_op.dst_block, 1));
+      modified_extents.AddBlock(cow_op.dst_block);
+    }
+    // The generated CowOps should cover all extents in InstallOps.
+    EXPECT_EQ(dst_extent_set.blocks(), 0UL);
+    // It's possible that src_extent_set is non-empty, because some operations
+    // will be converted to CowReplace, and we don't count the source extent for
+    // those.
+  }
+  OperationList operations_;
+  MergeOplist merge_operations_;
+};
+
+void AddOperation(OperationList* operations,
+                  ::chromeos_update_engine::InstallOperation_Type op_type,
+                  std::initializer_list<std::array<int, 2>> src_extents,
+                  std::initializer_list<std::array<int, 2>> dst_extents) {
+  auto&& op = operations->Add();
+  op->set_type(op_type);
+  for (const auto& extent : src_extents) {
+    *op->add_src_extents() = ExtentForRange(extent[0], extent[1]);
+  }
+  for (const auto& extent : dst_extents) {
+    *op->add_dst_extents() = ExtentForRange(extent[0], extent[1]);
+  }
+}
+
+void AddMergeOperation(MergeOplist* operations,
+                       ::chromeos_update_engine::CowMergeOperation_Type op_type,
+                       std::array<int, 2> src_extent,
+                       std::array<int, 2> dst_extent) {
+  auto&& op = operations->Add();
+  op->set_type(op_type);
+  *op->mutable_src_extent() = ExtentForRange(src_extent[0], src_extent[1]);
+  *op->mutable_dst_extent() = ExtentForRange(dst_extent[0], dst_extent[1]);
+}
+
+TEST_F(CowOperationConvertTest, NoConflict) {
+  AddOperation(
+      &operations_, InstallOperation::SOURCE_COPY, {{20, 1}}, {{30, 1}});
+  AddOperation(
+      &operations_, InstallOperation::SOURCE_COPY, {{10, 1}}, {{20, 1}});
+  AddOperation(
+      &operations_, InstallOperation::SOURCE_COPY, {{0, 1}}, {{10, 1}});
+
+  AddMergeOperation(
+      &merge_operations_, CowMergeOperation::COW_COPY, {20, 1}, {30, 1});
+  AddMergeOperation(
+      &merge_operations_, CowMergeOperation::COW_COPY, {10, 1}, {20, 1});
+  AddMergeOperation(
+      &merge_operations_, CowMergeOperation::COW_COPY, {0, 1}, {10, 1});
+
+  auto cow_ops = ConvertToCowOperations(operations_, merge_operations_);
+  ASSERT_EQ(cow_ops.size(), 3UL);
+  ASSERT_TRUE(std::all_of(cow_ops.begin(), cow_ops.end(), [](auto&& cow_op) {
+    return cow_op.op == CowOperation::CowCopy;
+  }));
+  VerifyCowMergeOp(cow_ops);
+}
+
+TEST_F(CowOperationConvertTest, CowReplace) {
+  AddOperation(
+      &operations_, InstallOperation::SOURCE_COPY, {{30, 1}}, {{0, 1}});
+  AddOperation(
+      &operations_, InstallOperation::SOURCE_COPY, {{20, 1}}, {{30, 1}});
+  AddOperation(
+      &operations_, InstallOperation::SOURCE_COPY, {{10, 1}}, {{20, 1}});
+  AddOperation(
+      &operations_, InstallOperation::SOURCE_COPY, {{0, 1}}, {{10, 1}});
+
+  AddMergeOperation(
+      &merge_operations_, CowMergeOperation::COW_COPY, {20, 1}, {30, 1});
+  AddMergeOperation(
+      &merge_operations_, CowMergeOperation::COW_COPY, {10, 1}, {20, 1});
+  AddMergeOperation(
+      &merge_operations_, CowMergeOperation::COW_COPY, {0, 1}, {10, 1});
+
+  auto cow_ops = ConvertToCowOperations(operations_, merge_operations_);
+  ASSERT_EQ(cow_ops.size(), 4UL);
+  // Expect 3 COW_COPY and 1 COW_REPLACE
+  ASSERT_EQ(std::count_if(cow_ops.begin(),
+                          cow_ops.end(),
+                          [](auto&& cow_op) {
+                            return cow_op.op == CowOperation::CowCopy;
+                          }),
+            3);
+  ASSERT_EQ(std::count_if(cow_ops.begin(),
+                          cow_ops.end(),
+                          [](auto&& cow_op) {
+                            return cow_op.op == CowOperation::CowReplace;
+                          }),
+            1);
+  VerifyCowMergeOp(cow_ops);
+}
+
+TEST_F(CowOperationConvertTest, ReOrderSourceCopy) {
+  AddOperation(
+      &operations_, InstallOperation::SOURCE_COPY, {{30, 1}}, {{20, 1}});
+  AddOperation(
+      &operations_, InstallOperation::SOURCE_COPY, {{20, 1}}, {{10, 1}});
+  AddOperation(
+      &operations_, InstallOperation::SOURCE_COPY, {{10, 1}}, {{0, 1}});
+
+  AddMergeOperation(
+      &merge_operations_, CowMergeOperation::COW_COPY, {10, 1}, {0, 1});
+  AddMergeOperation(
+      &merge_operations_, CowMergeOperation::COW_COPY, {20, 1}, {10, 1});
+  AddMergeOperation(
+      &merge_operations_, CowMergeOperation::COW_COPY, {30, 1}, {20, 1});
+
+  auto cow_ops = ConvertToCowOperations(operations_, merge_operations_);
+  ASSERT_EQ(cow_ops.size(), 3UL);
+  // Expect 3 COW_COPY
+  ASSERT_TRUE(std::all_of(cow_ops.begin(), cow_ops.end(), [](auto&& cow_op) {
+    return cow_op.op == CowOperation::CowCopy;
+  }));
+  VerifyCowMergeOp(cow_ops);
+}
+
+TEST_F(CowOperationConvertTest, InterleavingSrcExtent) {
+  AddOperation(&operations_,
+               InstallOperation::SOURCE_COPY,
+               {{30, 5}, {35, 5}},
+               {{20, 10}});
+  AddOperation(
+      &operations_, InstallOperation::SOURCE_COPY, {{20, 1}}, {{10, 1}});
+  AddOperation(
+      &operations_, InstallOperation::SOURCE_COPY, {{10, 1}}, {{0, 1}});
+
+  AddMergeOperation(
+      &merge_operations_, CowMergeOperation::COW_COPY, {10, 1}, {0, 1});
+  AddMergeOperation(
+      &merge_operations_, CowMergeOperation::COW_COPY, {20, 1}, {10, 1});
+  AddMergeOperation(
+      &merge_operations_, CowMergeOperation::COW_COPY, {30, 5}, {20, 5});
+  AddMergeOperation(
+      &merge_operations_, CowMergeOperation::COW_COPY, {35, 5}, {25, 5});
+
+  auto cow_ops = ConvertToCowOperations(operations_, merge_operations_);
+  // Expect 4 COW_COPY
+  ASSERT_EQ(cow_ops.size(), 12UL);
+  ASSERT_TRUE(std::all_of(cow_ops.begin(), cow_ops.end(), [](auto&& cow_op) {
+    return cow_op.op == CowOperation::CowCopy;
+  }));
+  VerifyCowMergeOp(cow_ops);
+}
+
+TEST_F(CowOperationConvertTest, SelfOverlappingOperation) {
+  AddOperation(
+      &operations_, InstallOperation::SOURCE_COPY, {{20, 10}}, {{25, 10}});
+
+  AddMergeOperation(
+      &merge_operations_, CowMergeOperation::COW_COPY, {20, 10}, {25, 10});
+
+  auto cow_ops = ConvertToCowOperations(operations_, merge_operations_);
+  // Expect 10 COW_COPY
+  ASSERT_EQ(cow_ops.size(), 10UL);
+  ASSERT_TRUE(std::all_of(cow_ops.begin(), cow_ops.end(), [](auto&& cow_op) {
+    return cow_op.op == CowOperation::CowCopy;
+  }));
+  VerifyCowMergeOp(cow_ops);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/common/cpu_limiter.cc b/common/cpu_limiter.cc
index 1d14764..5f1ae6f 100644
--- a/common/cpu_limiter.cc
+++ b/common/cpu_limiter.cc
@@ -67,7 +67,7 @@
   if (shares_ == shares)
     return true;
 
-  std::string string_shares = base::IntToString(static_cast<int>(shares));
+  std::string string_shares = base::NumberToString(static_cast<int>(shares));
   LOG(INFO) << "Setting cgroup cpu shares to  " << string_shares;
   if (!utils::WriteFile(
           kCGroupSharesPath, string_shares.c_str(), string_shares.size())) {
diff --git a/common/cpu_limiter.h b/common/cpu_limiter.h
index c7add89..e6d7331 100644
--- a/common/cpu_limiter.h
+++ b/common/cpu_limiter.h
@@ -30,10 +30,6 @@
   kLow = 2,
 };
 
-// Sets the current process shares to |shares|. Returns true on
-// success, false otherwise.
-bool SetCpuShares(CpuShares shares);
-
 class CPULimiter {
  public:
   CPULimiter() = default;
diff --git a/dynamic_partition_utils.h b/common/daemon_base.h
similarity index 60%
copy from dynamic_partition_utils.h
copy to common/daemon_base.h
index 09fce00..4bc5ef7 100644
--- a/dynamic_partition_utils.h
+++ b/common/daemon_base.h
@@ -14,20 +14,27 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_
-#define UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_
+#ifndef UPDATE_ENGINE_COMMON_DAEMON_BASE_H_
+#define UPDATE_ENGINE_COMMON_DAEMON_BASE_H_
 
-#include <string>
+#include <memory>
 
-#include <liblp/builder.h>
+#include <brillo/daemons/daemon.h>
 
 namespace chromeos_update_engine {
 
-// Delete all groups (and their partitions) in |builder| that have names
-// ending with |suffix|.
-void DeleteGroupsWithSuffix(android::fs_mgr::MetadataBuilder* builder,
-                            const std::string& suffix);
+class DaemonBase : public brillo::Daemon {
+ public:
+  DaemonBase() = default;
+  virtual ~DaemonBase() = default;
+
+  // Creates an instance of the daemon.
+  static std::unique_ptr<DaemonBase> CreateInstance();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(DaemonBase);
+};
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_
+#endif  // UPDATE_ENGINE_COMMON_DAEMON_BASE_H_
diff --git a/daemon_state_interface.h b/common/daemon_state_interface.h
similarity index 82%
rename from daemon_state_interface.h
rename to common/daemon_state_interface.h
index 2356816..831e38b 100644
--- a/daemon_state_interface.h
+++ b/common/daemon_state_interface.h
@@ -14,12 +14,11 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_DAEMON_STATE_INTERFACE_H_
-#define UPDATE_ENGINE_DAEMON_STATE_INTERFACE_H_
+#ifndef UPDATE_ENGINE_COMMON_DAEMON_STATE_INTERFACE_H_
+#define UPDATE_ENGINE_COMMON_DAEMON_STATE_INTERFACE_H_
 
-#include "update_engine/service_observer_interface.h"
+#include "update_engine/common/service_observer_interface.h"
 
-#include <memory>
 #include <set>
 
 namespace chromeos_update_engine {
@@ -42,8 +41,10 @@
 
  protected:
   DaemonStateInterface() = default;
+
+  DISALLOW_COPY_AND_ASSIGN(DaemonStateInterface);
 };
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_DAEMON_STATE_INTERFACE_H_
+#endif  // UPDATE_ENGINE_COMMON_DAEMON_STATE_INTERFACE_H_
diff --git a/common/dlcservice.h b/common/dlcservice.h
deleted file mode 100644
index 9dae560..0000000
--- a/common/dlcservice.h
+++ /dev/null
@@ -1,32 +0,0 @@
-//
-// Copyright (C) 2018 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_COMMON_DLCSERVICE_H_
-#define UPDATE_ENGINE_COMMON_DLCSERVICE_H_
-
-#include <memory>
-
-#include "update_engine/common/dlcservice_interface.h"
-
-namespace chromeos_update_engine {
-
-// This factory function creates a new DlcServiceInterface instance for the
-// current platform.
-std::unique_ptr<DlcServiceInterface> CreateDlcService();
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_COMMON_DLCSERVICE_H_
diff --git a/common/dlcservice_interface.h b/common/dlcservice_interface.h
index aa24105..7b57710 100644
--- a/common/dlcservice_interface.h
+++ b/common/dlcservice_interface.h
@@ -17,6 +17,7 @@
 #ifndef UPDATE_ENGINE_COMMON_DLCSERVICE_INTERFACE_H_
 #define UPDATE_ENGINE_COMMON_DLCSERVICE_INTERFACE_H_
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -30,9 +31,17 @@
  public:
   virtual ~DlcServiceInterface() = default;
 
-  // Returns true and a list of installed DLC module ids in |dlc_module_ids|.
+  // Returns true and a list of installed DLC ids in |dlc_ids|.
   // On failure it returns false.
-  virtual bool GetInstalled(std::vector<std::string>* dlc_module_ids) = 0;
+  virtual bool GetDlcsToUpdate(std::vector<std::string>* dlc_ids) = 0;
+
+  // Returns true if dlcservice successfully handled the install completion
+  // method call, otherwise false.
+  virtual bool InstallCompleted(const std::vector<std::string>& dlc_ids) = 0;
+
+  // Returns true if dlcservice successfully handled the update completion
+  // method call, otherwise false.
+  virtual bool UpdateCompleted(const std::vector<std::string>& dlc_ids) = 0;
 
  protected:
   DlcServiceInterface() = default;
@@ -41,6 +50,10 @@
   DISALLOW_COPY_AND_ASSIGN(DlcServiceInterface);
 };
 
+// This factory function creates a new DlcServiceInterface instance for the
+// current platform.
+std::unique_ptr<DlcServiceInterface> CreateDlcService();
+
 }  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_COMMON_DLCSERVICE_INTERFACE_H_
diff --git a/common/dlcservice_stub.cc b/common/dlcservice_stub.cc
index c5f9306..2447147 100644
--- a/common/dlcservice_stub.cc
+++ b/common/dlcservice_stub.cc
@@ -27,9 +27,16 @@
   return std::make_unique<DlcServiceStub>();
 }
 
-bool DlcServiceStub::GetInstalled(std::vector<std::string>* dlc_module_ids) {
-  if (dlc_module_ids)
-    dlc_module_ids->clear();
+bool DlcServiceStub::GetDlcsToUpdate(vector<string>* dlc_ids) {
+  if (dlc_ids)
+    dlc_ids->clear();
+  return true;
+}
+
+bool DlcServiceStub::InstallCompleted(const vector<string>& dlc_ids) {
+  return true;
+}
+bool DlcServiceStub::UpdateCompleted(const vector<string>& dlc_ids) {
   return true;
 }
 
diff --git a/common/dlcservice_stub.h b/common/dlcservice_stub.h
index 4e12c11..bc803e8 100644
--- a/common/dlcservice_stub.h
+++ b/common/dlcservice_stub.h
@@ -31,7 +31,9 @@
   ~DlcServiceStub() = default;
 
   // BootControlInterface overrides.
-  bool GetInstalled(std::vector<std::string>* dlc_module_ids) override;
+  bool GetDlcsToUpdate(std::vector<std::string>* dlc_ids) override;
+  bool InstallCompleted(const std::vector<std::string>& dlc_ids) override;
+  bool UpdateCompleted(const std::vector<std::string>& dlc_ids) override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DlcServiceStub);
diff --git a/payload_consumer/download_action.h b/common/download_action.h
similarity index 72%
rename from payload_consumer/download_action.h
rename to common/download_action.h
index 1777e22..7b496b1 100644
--- a/payload_consumer/download_action.h
+++ b/common/download_action.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_DOWNLOAD_ACTION_H_
-#define UPDATE_ENGINE_PAYLOAD_CONSUMER_DOWNLOAD_ACTION_H_
+#ifndef UPDATE_ENGINE_COMMON_DOWNLOAD_ACTION_H_
+#define UPDATE_ENGINE_COMMON_DOWNLOAD_ACTION_H_
 
 #include <fcntl.h>
 #include <sys/stat.h>
@@ -23,6 +23,7 @@
 
 #include <memory>
 #include <string>
+#include <utility>
 
 #include "update_engine/common/action.h"
 #include "update_engine/common/boot_control_interface.h"
@@ -30,7 +31,6 @@
 #include "update_engine/common/multi_range_http_fetcher.h"
 #include "update_engine/payload_consumer/delta_performer.h"
 #include "update_engine/payload_consumer/install_plan.h"
-#include "update_engine/system_state.h"
 
 // The Download Action downloads a specified url to disk. The url should point
 // to an update in a delta payload format. The payload will be piped into a
@@ -71,12 +71,11 @@
 
   // Takes ownership of the passed in HttpFetcher. Useful for testing.
   // A good calling pattern is:
-  // DownloadAction(prefs, boot_contol, hardware, system_state,
+  // DownloadAction(prefs, boot_contol, hardware,
   //                new WhateverHttpFetcher, false);
   DownloadAction(PrefsInterface* prefs,
                  BootControlInterface* boot_control,
                  HardwareInterface* hardware,
-                 SystemState* system_state,
                  HttpFetcher* http_fetcher,
                  bool interactive);
   ~DownloadAction() override;
@@ -89,7 +88,9 @@
   std::string Type() const override { return StaticType(); }
 
   // Testing
-  void SetTestFileWriter(FileWriter* writer) { writer_ = writer; }
+  void SetTestFileWriter(std::unique_ptr<DeltaPerformer> writer) {
+    delta_performer_ = std::move(writer);
+  }
 
   int GetHTTPResponseCode() { return http_fetcher_->http_response_code(); }
 
@@ -108,46 +109,22 @@
 
   HttpFetcher* http_fetcher() { return http_fetcher_.get(); }
 
-  // Returns the p2p file id for the file being written or the empty
-  // string if we're not writing to a p2p file.
-  std::string p2p_file_id() { return p2p_file_id_; }
-
  private:
-  // Closes the file descriptor for the p2p file being written and
-  // clears |p2p_file_id_| to indicate that we're no longer sharing
-  // the file. If |delete_p2p_file| is True, also deletes the file.
-  // If there is no p2p file descriptor, this method does nothing.
-  void CloseP2PSharingFd(bool delete_p2p_file);
-
-  // Starts sharing the p2p file. Must be called before
-  // WriteToP2PFile(). Returns True if this worked.
-  bool SetupP2PSharingFd();
-
-  // Writes |length| bytes of payload from |data| into |file_offset|
-  // of the p2p file. Also does sanity checks; for example ensures we
-  // don't end up with a file with holes in it.
-  //
-  // This method does nothing if SetupP2PSharingFd() hasn't been
-  // called or if CloseP2PSharingFd() has been called.
-  void WriteToP2PFile(const void* data, size_t length, off_t file_offset);
+  // Attempt to load cached manifest data from prefs
+  // return true on success, false otherwise.
+  bool LoadCachedManifest(int64_t manifest_size);
 
   // Start downloading the current payload using delta_performer.
   void StartDownloading();
 
-  // The InstallPlan passed in
-  InstallPlan install_plan_;
-
   // Pointer to the current payload in install_plan_.payloads.
   InstallPlan::Payload* payload_{nullptr};
 
-  // SystemState required pointers.
+  // Required pointers.
   PrefsInterface* prefs_;
   BootControlInterface* boot_control_;
   HardwareInterface* hardware_;
 
-  // Global context for the system.
-  SystemState* system_state_;
-
   // Pointer to the MultiRangeHttpFetcher that does the http work.
   std::unique_ptr<MultiRangeHttpFetcher> http_fetcher_;
 
@@ -156,10 +133,6 @@
   // update.
   bool interactive_;
 
-  // The FileWriter that downloaded data should be written to. It will
-  // either point to *decompressing_file_writer_ or *delta_performer_.
-  FileWriter* writer_;
-
   std::unique_ptr<DeltaPerformer> delta_performer_;
 
   // Used by TransferTerminated to figure if this action terminated itself or
@@ -173,17 +146,6 @@
   uint64_t bytes_total_{0};
   bool download_active_{false};
 
-  // The file-id for the file we're sharing or the empty string
-  // if we're not using p2p to share.
-  std::string p2p_file_id_;
-
-  // The file descriptor for the p2p file used for caching the payload or -1
-  // if we're not using p2p to share.
-  int p2p_sharing_fd_;
-
-  // Set to |false| if p2p file is not visible.
-  bool p2p_visible_;
-
   // Loaded from prefs before downloading any payload.
   size_t resume_payload_index_{0};
 
@@ -199,4 +161,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_DOWNLOAD_ACTION_H_
+#endif  // UPDATE_ENGINE_COMMON_DOWNLOAD_ACTION_H_
diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h
index 58ebfe4..a5be6e1 100644
--- a/common/dynamic_partition_control_interface.h
+++ b/common/dynamic_partition_control_interface.h
@@ -21,14 +21,28 @@
 
 #include <memory>
 #include <string>
+#include <vector>
 
 #include "update_engine/common/action.h"
 #include "update_engine/common/cleanup_previous_update_action_delegate.h"
 #include "update_engine/common/error_code.h"
+#include "update_engine/common/prefs_interface.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
 #include "update_engine/update_metadata.pb.h"
 
+// Forware declare for libsnapshot/snapshot_writer.h
+namespace android::snapshot {
+class ISnapshotWriter;
+}
+
 namespace chromeos_update_engine {
 
+struct PartitionDevice {
+  std::string rw_device_path;
+  std::string readonly_device_path;
+  bool is_dynamic;
+};
+
 struct FeatureFlag {
   enum class Value { NONE = 0, RETROFIT, LAUNCH };
   constexpr explicit FeatureFlag(Value value) : value_(value) {}
@@ -41,7 +55,6 @@
 };
 
 class BootControlInterface;
-class PrefsInterface;
 
 class DynamicPartitionControlInterface {
  public:
@@ -55,6 +68,11 @@
 
   // Return the feature flags of Virtual A/B on this device.
   virtual FeatureFlag GetVirtualAbFeatureFlag() = 0;
+  // Return the feature flags of Virtual A/B Compression on this device.
+  // This function will tell you if current device supports VABC. However, it
+  // DOES NOT tell you if VABC is used for current OTA update. For that, use
+  // UpdateUsesSnapshotCompression.
+  virtual FeatureFlag GetVirtualAbCompressionFeatureFlag() = 0;
 
   // Attempt to optimize |operation|.
   // If successful, |optimized| contains an operation with extents that
@@ -118,6 +136,59 @@
   // progress, while ResetUpdate() forcefully free previously
   // allocated space for snapshot updates.
   virtual bool ResetUpdate(PrefsInterface* prefs) = 0;
+
+  // Reads the dynamic partitions metadata from the given slot, and puts the
+  // name of the dynamic partitions with the current suffix to |partitions|.
+  // Returns true on success.
+  virtual bool ListDynamicPartitionsForSlot(
+      uint32_t slot,
+      uint32_t current_slot,
+      std::vector<std::string>* partitions) = 0;
+
+  // Finds a possible location that list all block devices by name; and puts
+  // the result in |path|. Returns true on success.
+  // Sample result: /dev/block/by-name/
+  virtual bool GetDeviceDir(std::string* path) = 0;
+
+  // Verifies that the untouched dynamic partitions in the target metadata have
+  // the same extents as the source metadata.
+  virtual bool VerifyExtentsForUntouchedPartitions(
+      uint32_t source_slot,
+      uint32_t target_slot,
+      const std::vector<std::string>& partitions) = 0;
+  // Partition name is expected to be unsuffixed. e.g. system, vendor
+  // Return an interface to write to a snapshoted partition.
+  // If `is_append` is false, then existing COW data will be overwritten.
+  // Otherwise the cow writer will be opened on APPEND mode, existing COW data
+  // is preserved.
+  virtual std::unique_ptr<android::snapshot::ISnapshotWriter> OpenCowWriter(
+      const std::string& unsuffixed_partition_name,
+      const std::optional<std::string>&,
+      bool is_append = false) = 0;
+  // Open a general purpose FD capable to reading and writing to COW. Note that
+  // writes must be block aligned.
+  virtual FileDescriptorPtr OpenCowFd(
+      const std::string& unsuffixed_partition_name,
+      const std::optional<std::string>&,
+      bool is_append = false) = 0;
+
+  virtual bool IsDynamicPartition(const std::string& part_name,
+                                  uint32_t slot) = 0;
+
+  // Create virtual block devices for all partitions.
+  virtual bool MapAllPartitions() = 0;
+  // Unmap virtual block devices for all partitions.
+  virtual bool UnmapAllPartitions() = 0;
+
+  // Return if snapshot compression is enabled for this update.
+  // This function should only be called after preparing for an update
+  // (PreparePartitionsForUpdate), and before merging
+  // (see GetCleanupPreviousUpdateAction and CleanupPreviousUpdateAction) or
+  // resetting it (ResetUpdate).
+  //
+  // To know if the device supports snapshot compression by itself, use
+  // GetVirtualAbCompressionFeatureFlag
+  virtual bool UpdateUsesSnapshotCompression() = 0;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/common/dynamic_partition_control_stub.cc b/common/dynamic_partition_control_stub.cc
index 903b7ee..dd30a8b 100644
--- a/common/dynamic_partition_control_stub.cc
+++ b/common/dynamic_partition_control_stub.cc
@@ -20,6 +20,7 @@
 #include <string>
 
 #include <base/logging.h>
+#include <libsnapshot/cow_writer.h>
 
 #include "update_engine/common/dynamic_partition_control_stub.h"
 
@@ -33,6 +34,10 @@
   return FeatureFlag(FeatureFlag::Value::NONE);
 }
 
+FeatureFlag DynamicPartitionControlStub::GetVirtualAbCompressionFeatureFlag() {
+  return FeatureFlag(FeatureFlag::Value::NONE);
+}
+
 bool DynamicPartitionControlStub::OptimizeOperation(
     const std::string& partition_name,
     const InstallOperation& operation,
@@ -67,4 +72,47 @@
   return false;
 }
 
+bool DynamicPartitionControlStub::ListDynamicPartitionsForSlot(
+    uint32_t slot,
+    uint32_t current_slot,
+    std::vector<std::string>* partitions) {
+  return true;
+}
+
+bool DynamicPartitionControlStub::GetDeviceDir(std::string* path) {
+  return true;
+}
+
+bool DynamicPartitionControlStub::VerifyExtentsForUntouchedPartitions(
+    uint32_t source_slot,
+    uint32_t target_slot,
+    const std::vector<std::string>& partitions) {
+  return true;
+}
+
+std::unique_ptr<android::snapshot::ISnapshotWriter>
+DynamicPartitionControlStub::OpenCowWriter(
+    const std::string& /*unsuffixed_partition_name*/,
+    const std::optional<std::string>& /*source_path*/,
+    bool /*is_append*/) {
+  return nullptr;
+}
+
+bool DynamicPartitionControlStub::MapAllPartitions() {
+  return false;
+}
+
+bool DynamicPartitionControlStub::UnmapAllPartitions() {
+  return false;
+}
+
+bool DynamicPartitionControlStub::IsDynamicPartition(
+    const std::string& part_name, uint32_t slot) {
+  return false;
+}
+
+bool DynamicPartitionControlStub::UpdateUsesSnapshotCompression() {
+  return false;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h
index d8e254e..515ec7c 100644
--- a/common/dynamic_partition_control_stub.h
+++ b/common/dynamic_partition_control_stub.h
@@ -21,6 +21,7 @@
 
 #include <memory>
 #include <string>
+#include <vector>
 
 #include "update_engine/common/dynamic_partition_control_interface.h"
 
@@ -30,6 +31,7 @@
  public:
   FeatureFlag GetDynamicPartitionsFeatureFlag() override;
   FeatureFlag GetVirtualAbFeatureFlag() override;
+  FeatureFlag GetVirtualAbCompressionFeatureFlag() override;
   bool OptimizeOperation(const std::string& partition_name,
                          const InstallOperation& operation,
                          InstallOperation* optimized) override;
@@ -46,8 +48,35 @@
       PrefsInterface* prefs,
       CleanupPreviousUpdateActionDelegateInterface* delegate) override;
   bool ResetUpdate(PrefsInterface* prefs) override;
-};
 
+  bool ListDynamicPartitionsForSlot(
+      uint32_t slot,
+      uint32_t current_slot,
+      std::vector<std::string>* partitions) override;
+  bool GetDeviceDir(std::string* path) override;
+
+  bool VerifyExtentsForUntouchedPartitions(
+      uint32_t source_slot,
+      uint32_t target_slot,
+      const std::vector<std::string>& partitions) override;
+
+  std::unique_ptr<android::snapshot::ISnapshotWriter> OpenCowWriter(
+      const std::string& unsuffixed_partition_name,
+      const std::optional<std::string>&,
+      bool is_append) override;
+
+  FileDescriptorPtr OpenCowFd(const std::string& unsuffixed_partition_name,
+                              const std::optional<std::string>&,
+                              bool is_append = false) override {
+    return nullptr;
+  }
+
+  bool MapAllPartitions() override;
+  bool UnmapAllPartitions() override;
+
+  bool IsDynamicPartition(const std::string& part_name, uint32_t slot) override;
+  bool UpdateUsesSnapshotCompression() override;
+};
 }  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_STUB_H_
diff --git a/common/error_code.h b/common/error_code.h
index e473a05..a889888 100644
--- a/common/error_code.h
+++ b/common/error_code.h
@@ -85,6 +85,8 @@
   kUnresolvedHostRecovered = 59,
   kNotEnoughSpace = 60,
   kDeviceCorrupted = 61,
+  kPackageExcludedFromUpdate = 62,
+  kPostInstallMountError = 63,
 
   // VERY IMPORTANT! When adding new error codes:
   //
diff --git a/common/error_code_utils.cc b/common/error_code_utils.cc
index 3fbf0fe..421544a 100644
--- a/common/error_code_utils.cc
+++ b/common/error_code_utils.cc
@@ -171,11 +171,15 @@
       return "ErrorCode::kNotEnoughSpace";
     case ErrorCode::kDeviceCorrupted:
       return "ErrorCode::kDeviceCorrupted";
+    case ErrorCode::kPackageExcludedFromUpdate:
+      return "ErrorCode::kPackageExcludedFromUpdate";
+    case ErrorCode::kPostInstallMountError:
+      return "ErrorCode::kPostInstallMountError";
       // Don't add a default case to let the compiler warn about newly added
       // error codes which should be added here.
   }
 
-  return "Unknown error: " + base::UintToString(static_cast<unsigned>(code));
+  return "Unknown error: " + base::NumberToString(static_cast<unsigned>(code));
 }
 
 }  // namespace utils
diff --git a/common/excluder_interface.h b/common/excluder_interface.h
new file mode 100644
index 0000000..1dfd227
--- /dev/null
+++ b/common/excluder_interface.h
@@ -0,0 +1,62 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_COMMON_EXCLUDER_INTERFACE_H_
+#define UPDATE_ENGINE_COMMON_EXCLUDER_INTERFACE_H_
+
+#include <memory>
+#include <string>
+
+#include <base/macros.h>
+
+namespace chromeos_update_engine {
+
+class PrefsInterface;
+
+// TODO(b/171829801): Move this interface to 'cros' directory. 'aosp' in no way
+// is using this. Not even the stub implementation.
+class ExcluderInterface {
+ public:
+  virtual ~ExcluderInterface() = default;
+
+  // Returns true on successfuly excluding |name|, otherwise false. On a
+  // successful |Exclude()| the passed in |name| will be considered excluded
+  // and calls to |IsExcluded()| will return true. The exclusions are persisted.
+  virtual bool Exclude(const std::string& name) = 0;
+
+  // Returns true if |name| reached the exclusion limit, otherwise false.
+  virtual bool IsExcluded(const std::string& name) = 0;
+
+  // Returns true on sucessfully reseting the entire exclusion state, otherwise
+  // false. On a successful |Reset()| there will be no excluded |name| in the
+  // exclusion state.
+  virtual bool Reset() = 0;
+
+  // Not copyable or movable
+  ExcluderInterface(const ExcluderInterface&) = delete;
+  ExcluderInterface& operator=(const ExcluderInterface&) = delete;
+  ExcluderInterface(ExcluderInterface&&) = delete;
+  ExcluderInterface& operator=(ExcluderInterface&&) = delete;
+
+ protected:
+  ExcluderInterface() = default;
+};
+
+std::unique_ptr<ExcluderInterface> CreateExcluder();
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_COMMON_EXCLUDER_INTERFACE_H_
diff --git a/power_manager_android.cc b/common/excluder_stub.cc
similarity index 60%
copy from power_manager_android.cc
copy to common/excluder_stub.cc
index 63a0351..2b987fd 100644
--- a/power_manager_android.cc
+++ b/common/excluder_stub.cc
@@ -1,5 +1,5 @@
 //
-// Copyright (C) 2016 The Android Open Source Project
+// Copyright (C) 2020 The Android Open Source Project
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -14,23 +14,30 @@
 // limitations under the License.
 //
 
-#include "update_engine/power_manager_android.h"
+#include "update_engine/common/excluder_stub.h"
 
 #include <memory>
 
-#include <base/logging.h>
+#include "update_engine/common/prefs_interface.h"
+
+using std::string;
 
 namespace chromeos_update_engine {
 
-namespace power_manager {
-std::unique_ptr<PowerManagerInterface> CreatePowerManager() {
-  return std::unique_ptr<PowerManagerInterface>(new PowerManagerAndroid());
+std::unique_ptr<ExcluderInterface> CreateExcluder() {
+  return std::make_unique<ExcluderStub>();
 }
-}  // namespace power_manager
 
-bool PowerManagerAndroid::RequestReboot() {
-  LOG(WARNING) << "PowerManager not implemented.";
+bool ExcluderStub::Exclude(const string& name) {
+  return true;
+}
+
+bool ExcluderStub::IsExcluded(const string& name) {
   return false;
 }
 
+bool ExcluderStub::Reset() {
+  return true;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/common/excluder_stub.h b/common/excluder_stub.h
new file mode 100644
index 0000000..2d5372a
--- /dev/null
+++ b/common/excluder_stub.h
@@ -0,0 +1,46 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_COMMON_EXCLUDER_STUB_H_
+#define UPDATE_ENGINE_COMMON_EXCLUDER_STUB_H_
+
+#include <string>
+
+#include "update_engine/common/excluder_interface.h"
+
+namespace chromeos_update_engine {
+
+// An implementation of the |ExcluderInterface| that does nothing.
+class ExcluderStub : public ExcluderInterface {
+ public:
+  ExcluderStub() = default;
+  ~ExcluderStub() = default;
+
+  // |ExcluderInterface| overrides.
+  bool Exclude(const std::string& name) override;
+  bool IsExcluded(const std::string& name) override;
+  bool Reset() override;
+
+  // Not copyable or movable.
+  ExcluderStub(const ExcluderStub&) = delete;
+  ExcluderStub& operator=(const ExcluderStub&) = delete;
+  ExcluderStub(ExcluderStub&&) = delete;
+  ExcluderStub& operator=(ExcluderStub&&) = delete;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_COMMON_EXCLUDER_STUB_H_
diff --git a/common/fake_boot_control.h b/common/fake_boot_control.h
index bd9d9ca..79e2139 100644
--- a/common/fake_boot_control.h
+++ b/common/fake_boot_control.h
@@ -48,16 +48,29 @@
 
   bool GetPartitionDevice(const std::string& partition_name,
                           BootControlInterface::Slot slot,
-                          std::string* device) const override {
-    if (slot >= num_slots_)
+                          bool not_in_payload,
+                          std::string* device,
+                          bool* is_dynamic) const override {
+    auto dev =
+        GetPartitionDevice(partition_name, slot, current_slot_, not_in_payload);
+    if (!dev.has_value()) {
       return false;
-    auto part_it = devices_[slot].find(partition_name);
-    if (part_it == devices_[slot].end())
-      return false;
-    *device = part_it->second;
+    }
+    if (is_dynamic) {
+      *is_dynamic = dev->is_dynamic;
+    }
+    if (device) {
+      *device = dev->rw_device_path;
+    }
     return true;
   }
 
+  bool GetPartitionDevice(const std::string& partition_name,
+                          BootControlInterface::Slot slot,
+                          std::string* device) const override {
+    return GetPartitionDevice(partition_name, slot, false, device, nullptr);
+  }
+
   bool IsSlotBootable(BootControlInterface::Slot slot) const override {
     return slot < num_slots_ && is_bootable_[slot];
   }
@@ -105,10 +118,29 @@
     is_bootable_[slot] = bootable;
   }
 
-  DynamicPartitionControlInterface* GetDynamicPartitionControl() {
+  DynamicPartitionControlInterface* GetDynamicPartitionControl() override {
     return dynamic_partition_control_.get();
   }
 
+  std::optional<PartitionDevice> GetPartitionDevice(
+      const std::string& partition_name,
+      uint32_t slot,
+      uint32_t current_slot,
+      bool not_in_payload = false) const override {
+    if (slot >= devices_.size()) {
+      return {};
+    }
+    auto device_path = devices_[slot].find(partition_name);
+    if (device_path == devices_[slot].end()) {
+      return {};
+    }
+    PartitionDevice device;
+    device.is_dynamic = false;
+    device.rw_device_path = device_path->second;
+    device.readonly_device_path = device.rw_device_path;
+    return device;
+  }
+
  private:
   BootControlInterface::Slot num_slots_{2};
   BootControlInterface::Slot current_slot_{0};
diff --git a/common/fake_clock.h b/common/fake_clock.h
index 165ec4d..9c47b57 100644
--- a/common/fake_clock.h
+++ b/common/fake_clock.h
@@ -26,11 +26,11 @@
  public:
   FakeClock() {}
 
-  base::Time GetWallclockTime() override { return wallclock_time_; }
+  base::Time GetWallclockTime() const override { return wallclock_time_; }
 
-  base::Time GetMonotonicTime() override { return monotonic_time_; }
+  base::Time GetMonotonicTime() const override { return monotonic_time_; }
 
-  base::Time GetBootTime() override { return boot_time_; }
+  base::Time GetBootTime() const override { return boot_time_; }
 
   void SetWallclockTime(const base::Time& time) { wallclock_time_ = time; }
 
diff --git a/common/fake_hardware.h b/common/fake_hardware.h
index 0b232da..6c25183 100644
--- a/common/fake_hardware.h
+++ b/common/fake_hardware.h
@@ -19,10 +19,13 @@
 
 #include <map>
 #include <string>
+#include <utility>
 
 #include <base/time/time.h>
 
+#include "update_engine/common/error_code.h"
 #include "update_engine/common/hardware_interface.h"
+#include "update_engine/common/utils.h"
 
 namespace chromeos_update_engine {
 
@@ -73,9 +76,9 @@
 
   std::string GetHardwareClass() const override { return hardware_class_; }
 
-  std::string GetFirmwareVersion() const override { return firmware_version_; }
-
-  std::string GetECVersion() const override { return ec_version_; }
+  std::string GetDeviceRequisition() const override {
+    return device_requisition_;
+  }
 
   int GetMinKernelKeyVersion() const override {
     return min_kernel_key_version_;
@@ -104,15 +107,15 @@
 
   int GetPowerwashCount() const override { return powerwash_count_; }
 
-  bool SchedulePowerwash(bool is_rollback) override {
+  bool SchedulePowerwash(bool save_rollback_data) override {
     powerwash_scheduled_ = true;
-    is_rollback_powerwash_ = is_rollback;
+    save_rollback_data_ = save_rollback_data;
     return true;
   }
 
   bool CancelPowerwash() override {
     powerwash_scheduled_ = false;
-    is_rollback_powerwash_ = false;
+    save_rollback_data_ = false;
     return true;
   }
 
@@ -169,12 +172,10 @@
     hardware_class_ = hardware_class;
   }
 
-  void SetFirmwareVersion(const std::string& firmware_version) {
-    firmware_version_ = firmware_version;
+  void SetDeviceRequisition(const std::string& requisition) {
+    device_requisition_ = requisition;
   }
 
-  void SetECVersion(const std::string& ec_version) { ec_version_ = ec_version; }
-
   void SetMinKernelKeyVersion(int min_kernel_key_version) {
     min_kernel_key_version_ = min_kernel_key_version;
   }
@@ -191,13 +192,39 @@
     build_timestamp_ = build_timestamp;
   }
 
-  void SetWarmReset(bool warm_reset) { warm_reset_ = warm_reset; }
+  void SetWarmReset(bool warm_reset) override { warm_reset_ = warm_reset; }
+
+  void SetVbmetaDigestForInactiveSlot(bool reset) override {}
 
   // Getters to verify state.
   int GetMaxKernelKeyRollforward() const { return kernel_max_rollforward_; }
 
   bool GetIsRollbackPowerwashScheduled() const {
-    return powerwash_scheduled_ && is_rollback_powerwash_;
+    return powerwash_scheduled_ && save_rollback_data_;
+  }
+  std::string GetVersionForLogging(
+      const std::string& partition_name) const override {
+    return partition_timestamps_[partition_name];
+  }
+  void SetVersion(const std::string& partition_name, std::string timestamp) {
+    partition_timestamps_[partition_name] = std::move(timestamp);
+  }
+  ErrorCode IsPartitionUpdateValid(
+      const std::string& partition_name,
+      const std::string& new_version) const override {
+    const auto old_version = GetVersionForLogging(partition_name);
+    return utils::IsTimestampNewer(old_version, new_version);
+  }
+
+  const char* GetPartitionMountOptions(
+      const std::string& partition_name) const override {
+#ifdef __ANDROID__
+    // TODO(allight): This matches the declaration in hardware_android.cc but
+    // ideally shouldn't be duplicated.
+    return "defcontext=u:object_r:postinstall_file:s0";
+#else
+    return "";
+#endif
   }
 
  private:
@@ -209,18 +236,18 @@
   // Jan 20, 2007
   base::Time oobe_timestamp_{base::Time::FromTimeT(1169280000)};
   std::string hardware_class_{"Fake HWID BLAH-1234"};
-  std::string firmware_version_{"Fake Firmware v1.0.1"};
-  std::string ec_version_{"Fake EC v1.0a"};
+  std::string device_requisition_{"fake_requisition"};
   int min_kernel_key_version_{kMinKernelKeyVersion};
   int min_firmware_key_version_{kMinFirmwareKeyVersion};
   int kernel_max_rollforward_{kKernelMaxRollforward};
   int firmware_max_rollforward_{kFirmwareMaxRollforward};
   int powerwash_count_{kPowerwashCountNotSet};
   bool powerwash_scheduled_{false};
-  bool is_rollback_powerwash_{false};
+  bool save_rollback_data_{false};
   int64_t build_timestamp_{0};
   bool first_active_omaha_ping_sent_{false};
   bool warm_reset_{false};
+  mutable std::map<std::string, std::string> partition_timestamps_;
 
   DISALLOW_COPY_AND_ASSIGN(FakeHardware);
 };
diff --git a/common/fake_prefs.cc b/common/fake_prefs.cc
index c446e06..ea6ea60 100644
--- a/common/fake_prefs.cc
+++ b/common/fake_prefs.cc
@@ -17,10 +17,12 @@
 #include "update_engine/common/fake_prefs.h"
 
 #include <algorithm>
+#include <utility>
 
 #include <gtest/gtest.h>
 
 using std::string;
+using std::vector;
 
 using chromeos_update_engine::FakePrefs;
 
@@ -65,8 +67,8 @@
   return GetValue(key, value);
 }
 
-bool FakePrefs::SetString(const string& key, const string& value) {
-  SetValue(key, value);
+bool FakePrefs::SetString(const string& key, std::string_view value) {
+  SetValue(key, std::string(value));
   return true;
 }
 
@@ -105,6 +107,29 @@
   return true;
 }
 
+bool FakePrefs::Delete(const string& key, const vector<string>& nss) {
+  bool success = Delete(key);
+  for (const auto& ns : nss) {
+    vector<string> ns_keys;
+    success = GetSubKeys(ns, &ns_keys) && success;
+    for (const auto& sub_key : ns_keys) {
+      auto last_key_seperator = sub_key.find_last_of(kKeySeparator);
+      if (last_key_seperator != string::npos &&
+          key == sub_key.substr(last_key_seperator + 1)) {
+        success = Delete(sub_key) && success;
+      }
+    }
+  }
+  return success;
+}
+
+bool FakePrefs::GetSubKeys(const string& ns, vector<string>* keys) const {
+  for (const auto& pr : values_)
+    if (pr.first.compare(0, ns.length(), ns) == 0)
+      keys->push_back(pr.first);
+  return true;
+}
+
 string FakePrefs::GetTypeName(PrefType type) {
   switch (type) {
     case PrefType::kString:
@@ -125,10 +150,10 @@
 }
 
 template <typename T>
-void FakePrefs::SetValue(const string& key, const T& value) {
+void FakePrefs::SetValue(const string& key, T value) {
   CheckKeyType(key, PrefConsts<T>::type);
   values_[key].type = PrefConsts<T>::type;
-  values_[key].value.*(PrefConsts<T>::member) = value;
+  values_[key].value.*(PrefConsts<T>::member) = std::move(value);
   const auto observers_for_key = observers_.find(key);
   if (observers_for_key != observers_.end()) {
     std::vector<ObserverInterface*> copy_observers(observers_for_key->second);
diff --git a/common/fake_prefs.h b/common/fake_prefs.h
index b1c5b71..430c291 100644
--- a/common/fake_prefs.h
+++ b/common/fake_prefs.h
@@ -19,6 +19,7 @@
 
 #include <map>
 #include <string>
+#include <string_view>
 #include <vector>
 
 #include <base/macros.h>
@@ -40,7 +41,7 @@
 
   // PrefsInterface methods.
   bool GetString(const std::string& key, std::string* value) const override;
-  bool SetString(const std::string& key, const std::string& value) override;
+  bool SetString(const std::string& key, std::string_view value) override;
   bool GetInt64(const std::string& key, int64_t* value) const override;
   bool SetInt64(const std::string& key, const int64_t value) override;
   bool GetBoolean(const std::string& key, bool* value) const override;
@@ -48,6 +49,11 @@
 
   bool Exists(const std::string& key) const override;
   bool Delete(const std::string& key) override;
+  bool Delete(const std::string& key,
+              const std::vector<std::string>& nss) override;
+
+  bool GetSubKeys(const std::string& ns,
+                  std::vector<std::string>* keys) const override;
 
   void AddObserver(const std::string& key,
                    ObserverInterface* observer) override;
@@ -91,7 +97,7 @@
   // Helper function to set a value of the passed |key|. It sets the type based
   // on the template parameter T.
   template <typename T>
-  void SetValue(const std::string& key, const T& value);
+  void SetValue(const std::string& key, T value);
 
   // Helper function to get a value from the map checking for invalid calls.
   // The function fails the test if you attempt to read a value  defined as a
diff --git a/common/file_fetcher.h b/common/file_fetcher.h
index fbdfc32..bd39007 100644
--- a/common/file_fetcher.h
+++ b/common/file_fetcher.h
@@ -59,6 +59,12 @@
   void SetHeader(const std::string& header_name,
                  const std::string& header_value) override {}
 
+  bool GetHeader(const std::string& header_name,
+                 std::string* header_value) const override {
+    header_value->clear();
+    return false;
+  }
+
   // Suspend the asynchronous file read.
   void Pause() override;
 
diff --git a/common/hardware_interface.h b/common/hardware_interface.h
index d92a6fc..4e820f1 100644
--- a/common/hardware_interface.h
+++ b/common/hardware_interface.h
@@ -25,6 +25,8 @@
 #include <base/files/file_path.h>
 #include <base/time/time.h>
 
+#include "update_engine/common/error_code.h"
+
 namespace chromeos_update_engine {
 
 // The hardware interface allows access to the crossystem exposed properties,
@@ -62,13 +64,9 @@
   // Returns the HWID or an empty string on error.
   virtual std::string GetHardwareClass() const = 0;
 
-  // Returns the firmware version or an empty string if the system is
-  // not running chrome os firmware.
-  virtual std::string GetFirmwareVersion() const = 0;
-
-  // Returns the ec version or an empty string if the system is not
-  // running a custom chrome os ec.
-  virtual std::string GetECVersion() const = 0;
+  // Returns the OEM device requisition or an empty string if the system does
+  // not have a requisition, or if not running Chrome OS.
+  virtual std::string GetDeviceRequisition() const = 0;
 
   // Returns the minimum kernel key version that verified boot on Chrome OS
   // will allow to boot. This is the value of crossystem tpm_kernver. Returns
@@ -102,9 +100,9 @@
   virtual int GetPowerwashCount() const = 0;
 
   // Signals that a powerwash (stateful partition wipe) should be performed
-  // after reboot. If |is_rollback| is true additional state is preserved
-  // during shutdown that can be restored after the powerwash.
-  virtual bool SchedulePowerwash(bool is_rollback) = 0;
+  // after reboot. If |save_rollback_data| is true additional state is
+  // preserved during shutdown that can be restored after the powerwash.
+  virtual bool SchedulePowerwash(bool save_rollback_data) = 0;
 
   // Cancel the powerwash operation scheduled to be performed on next boot.
   virtual bool CancelPowerwash() = 0;
@@ -138,6 +136,33 @@
   // If |warm_reset| is true, sets the warm reset to indicate a warm reset is
   // needed on the next reboot. Otherwise, clears the flag.
   virtual void SetWarmReset(bool warm_reset) = 0;
+
+  // If not reset, sets the vbmeta digest of the inactive slot as a sysprop.
+  // Otherwise, clears the sysprop.
+  virtual void SetVbmetaDigestForInactiveSlot(bool reset) = 0;
+
+  // Return the version/timestamp for partition `partition_name`.
+  // Don't make any assumption about the formatting of returned string.
+  // Only used for logging/debugging purposes.
+  virtual std::string GetVersionForLogging(
+      const std::string& partition_name) const = 0;
+
+  // Return true if and only if `new_version` is "newer" than the
+  // version number of partition `partition_name`. The notion of
+  // "newer" is defined by this function. Caller should not make
+  // any assumption about the underlying logic.
+  // Return:
+  // - kSuccess if update is valid.
+  // - kPayloadTimestampError if downgrade is detected
+  // - kDownloadManifestParseError if |new_version| has an incorrect format
+  // - Other error values if the source of error is known, or kError for
+  //   a generic error on the device.
+  virtual ErrorCode IsPartitionUpdateValid(
+      const std::string& partition_name,
+      const std::string& new_version) const = 0;
+
+  virtual const char* GetPartitionMountOptions(
+      const std::string& partition_name) const = 0;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/common/hash_calculator.cc b/common/hash_calculator.cc
index d010a53..60812d5 100644
--- a/common/hash_calculator.cc
+++ b/common/hash_calculator.cc
@@ -95,6 +95,11 @@
   return RawHashOfBytes(data.data(), data.size(), out_hash);
 }
 
+bool HashCalculator::RawHashOfFile(const string& name, brillo::Blob* out_hash) {
+  const auto file_size = utils::FileSize(name);
+  return RawHashOfFile(name, file_size, out_hash) == file_size;
+}
+
 off_t HashCalculator::RawHashOfFile(const string& name,
                                     off_t length,
                                     brillo::Blob* out_hash) {
diff --git a/common/hash_calculator.h b/common/hash_calculator.h
index b7e4d86..4426128 100644
--- a/common/hash_calculator.h
+++ b/common/hash_calculator.h
@@ -75,6 +75,7 @@
   static off_t RawHashOfFile(const std::string& name,
                              off_t length,
                              brillo::Blob* out_hash);
+  static bool RawHashOfFile(const std::string& name, brillo::Blob* out_hash);
 
  private:
   // If non-empty, the final raw hash. Will only be set to non-empty when
diff --git a/common/hash_calculator_unittest.cc b/common/hash_calculator_unittest.cc
index e8f73d5..fe7d543 100644
--- a/common/hash_calculator_unittest.cc
+++ b/common/hash_calculator_unittest.cc
@@ -104,7 +104,7 @@
 }
 
 TEST_F(HashCalculatorTest, UpdateFileSimpleTest) {
-  test_utils::ScopedTempFile data_file("data.XXXXXX");
+  ScopedTempFile data_file("data.XXXXXX");
   ASSERT_TRUE(test_utils::WriteFileString(data_file.path(), "hi"));
 
   for (const int length : {-1, 2, 10}) {
@@ -126,7 +126,7 @@
 }
 
 TEST_F(HashCalculatorTest, RawHashOfFileSimpleTest) {
-  test_utils::ScopedTempFile data_file("data.XXXXXX");
+  ScopedTempFile data_file("data.XXXXXX");
   ASSERT_TRUE(test_utils::WriteFileString(data_file.path(), "hi"));
 
   for (const int length : {-1, 2, 10}) {
diff --git a/common/http_common.cc b/common/http_common.cc
index 5f234b0..c8bac47 100644
--- a/common/http_common.cc
+++ b/common/http_common.cc
@@ -21,6 +21,7 @@
 #include <cstdlib>
 
 #include <base/macros.h>
+#include <base/stl_util.h>
 
 namespace chromeos_update_engine {
 
@@ -56,7 +57,7 @@
 
   bool is_found = false;
   size_t i;
-  for (i = 0; i < arraysize(http_response_table); i++)
+  for (i = 0; i < base::size(http_response_table); i++)
     if ((is_found = (http_response_table[i].code == code)))
       break;
 
@@ -77,7 +78,7 @@
 
   bool is_found = false;
   size_t i;
-  for (i = 0; i < arraysize(http_content_type_table); i++)
+  for (i = 0; i < base::size(http_content_type_table); i++)
     if ((is_found = (http_content_type_table[i].type == type)))
       break;
 
diff --git a/common/http_fetcher.h b/common/http_fetcher.h
index 2b4fc83..7fa5f09 100644
--- a/common/http_fetcher.h
+++ b/common/http_fetcher.h
@@ -28,6 +28,7 @@
 #include <brillo/message_loops/message_loop.h>
 
 #include "update_engine/common/http_common.h"
+#include "update_engine/common/metrics_constants.h"
 #include "update_engine/common/proxy_resolver.h"
 
 // This class is a simple wrapper around an HTTP library (libcurl). We can
@@ -58,6 +59,12 @@
   HttpFetcherDelegate* delegate() const { return delegate_; }
   int http_response_code() const { return http_response_code_; }
 
+  // Returns additional error code that can't be expressed in terms of an HTTP
+  // response code. For example, if there was a specific internal error code in
+  // the objects used in the implementation of this class (like libcurl) that we
+  // are interested about, we can communicate it through this value.
+  ErrorCode GetAuxiliaryErrorCode() const { return auxiliary_error_code_; }
+
   // Optional: Post data to the server. The HttpFetcher should make a copy
   // of this data and upload it via HTTP POST during the transfer. The type of
   // the data is necessary for properly setting the Content-Type HTTP header.
@@ -99,6 +106,14 @@
   virtual void SetHeader(const std::string& header_name,
                          const std::string& header_value) = 0;
 
+  // Only used for testing.
+  // If |header_name| is set, the value will be set into |header_value|.
+  // On success the boolean true will be returned, hoewever on failture to find
+  // the |header_name| in the header the return value will be false. The state
+  // in which |header_value| is left in for failures is an empty string.
+  virtual bool GetHeader(const std::string& header_name,
+                         std::string* header_value) const = 0;
+
   // If data is coming in too quickly, you can call Pause() to pause the
   // transfer. The delegate will not have ReceivedBytes() called while
   // an HttpFetcher is paused.
@@ -150,6 +165,10 @@
   // set to the response code when the transfer is complete.
   int http_response_code_;
 
+  // Set when there is an error that can't be expressed in the form of
+  // |http_response_code_|.
+  ErrorCode auxiliary_error_code_{ErrorCode::kSuccess};
+
   // The delegate; may be null.
   HttpFetcherDelegate* delegate_;
 
diff --git a/common/http_fetcher_unittest.cc b/common/http_fetcher_unittest.cc
index 237ea20..99ea99b 100644
--- a/common/http_fetcher_unittest.cc
+++ b/common/http_fetcher_unittest.cc
@@ -28,15 +28,25 @@
 #include <base/bind.h>
 #include <base/location.h>
 #include <base/logging.h>
+#if BASE_VER < 780000  // Android
 #include <base/message_loop/message_loop.h>
+#endif  // BASE_VER < 780000
+#include <base/stl_util.h>
 #include <base/strings/string_number_conversions.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
+#if BASE_VER >= 780000  // CrOS
+#include <base/task/single_thread_task_executor.h>
+#endif  // BASE_VER >= 780000
 #include <base/time/time.h>
 #include <brillo/message_loops/base_message_loop.h>
 #include <brillo/message_loops/message_loop.h>
 #include <brillo/message_loops/message_loop_utils.h>
+#ifdef __CHROMEOS__
+#include <brillo/process/process.h>
+#else
 #include <brillo/process.h>
+#endif  // __CHROMEOS__
 #include <brillo/streams/file_stream.h>
 #include <brillo/streams/stream.h>
 #include <gtest/gtest.h>
@@ -359,7 +369,7 @@
   HttpServer* CreateServer() override { return new NullHttpServer; }
 
  private:
-  test_utils::ScopedTempFile temp_file_{"ue_file_fetcher.XXXXXX"};
+  ScopedTempFile temp_file_{"ue_file_fetcher.XXXXXX"};
 };
 
 class MultiRangeHttpFetcherOverFileFetcherTest : public FileFetcherTest {
@@ -398,8 +408,13 @@
 template <typename T>
 class HttpFetcherTest : public ::testing::Test {
  public:
+#if BASE_VER < 780000  // Android
   base::MessageLoopForIO base_loop_;
   brillo::BaseMessageLoop loop_{&base_loop_};
+#else   // Chrome OS
+  base::SingleThreadTaskExecutor base_loop_{base::MessagePumpType::IO};
+  brillo::BaseMessageLoop loop_{base_loop_.task_runner()};
+#endif  // BASE_VER < 780000
 
   T test_;
 
@@ -1049,7 +1064,7 @@
   unique_ptr<HttpServer> server(this->test_.CreateServer());
   ASSERT_TRUE(server->started_);
 
-  for (size_t c = 0; c < arraysize(kRedirectCodes); ++c) {
+  for (size_t c = 0; c < base::size(kRedirectCodes); ++c) {
     const string url = base::StringPrintf(
         "/redirect/%d/download/%d", kRedirectCodes[c], kMediumLength);
     RedirectTest(server.get(), true, url, this->test_.NewLargeFetcher());
@@ -1066,7 +1081,7 @@
   string url;
   for (int r = 0; r < kDownloadMaxRedirects; r++) {
     url += base::StringPrintf("/redirect/%d",
-                              kRedirectCodes[r % arraysize(kRedirectCodes)]);
+                              kRedirectCodes[r % base::size(kRedirectCodes)]);
   }
   url += base::StringPrintf("/download/%d", kMediumLength);
   RedirectTest(server.get(), true, url, this->test_.NewLargeFetcher());
@@ -1082,7 +1097,7 @@
   string url;
   for (int r = 0; r < kDownloadMaxRedirects + 1; r++) {
     url += base::StringPrintf("/redirect/%d",
-                              kRedirectCodes[r % arraysize(kRedirectCodes)]);
+                              kRedirectCodes[r % base::size(kRedirectCodes)]);
   }
   url += base::StringPrintf("/download/%d", kMediumLength);
   RedirectTest(server.get(), false, url, this->test_.NewLargeFetcher());
diff --git a/common/hwid_override.cc b/common/hwid_override.cc
index 8800e94..1bb0f8f 100644
--- a/common/hwid_override.cc
+++ b/common/hwid_override.cc
@@ -16,14 +16,12 @@
 
 #include "update_engine/common/hwid_override.h"
 
-#include <map>
 #include <string>
 
 #include <base/files/file_path.h>
 #include <base/files/file_util.h>
 #include <brillo/key_value_store.h>
 
-using std::map;
 using std::string;
 
 namespace chromeos_update_engine {
diff --git a/logging.h b/common/logging.h
similarity index 100%
rename from logging.h
rename to common/logging.h
diff --git a/metrics_constants.h b/common/metrics_constants.h
similarity index 94%
rename from metrics_constants.h
rename to common/metrics_constants.h
index 137143a..b7633b9 100644
--- a/metrics_constants.h
+++ b/common/metrics_constants.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_METRICS_CONSTANTS_H_
-#define UPDATE_ENGINE_METRICS_CONSTANTS_H_
+#ifndef UPDATE_ENGINE_COMMON_METRICS_CONSTANTS_H_
+#define UPDATE_ENGINE_COMMON_METRICS_CONSTANTS_H_
 
 namespace chromeos_update_engine {
 
@@ -106,7 +106,7 @@
   kUpdateCanceled,              // Update canceled by the user.
   kUpdateSucceededNotActive,    // Update succeeded but the new slot is not
                                 // active.
-
+  kUpdateSkipped,               // Current update skipped.
   kNumConstants,
 
   kUnset = -1
@@ -119,12 +119,12 @@
   kUnknown = 0,           // Unknown.
   kEthernet = 1,          // Ethernet.
   kWifi = 2,              // Wireless.
-  kWimax = 3,             // WiMax.
-  kBluetooth = 4,         // Bluetooth.
   kCellular = 5,          // Cellular.
   kTetheredEthernet = 6,  // Tethered (Ethernet).
   kTetheredWifi = 7,      // Tethered (Wifi).
   kDisconnected = 8,      // Disconnected.
+  // deprecated: kWimax = 3,
+  // deprecated: kBluetooth = 4,
 
   kNumConstants,
   kUnset = -1
@@ -144,4 +144,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_METRICS_CONSTANTS_H_
+#endif  // UPDATE_ENGINE_COMMON_METRICS_CONSTANTS_H_
diff --git a/metrics_reporter_interface.h b/common/metrics_reporter_interface.h
similarity index 94%
rename from metrics_reporter_interface.h
rename to common/metrics_reporter_interface.h
index fce8bfd..a7a91a5 100644
--- a/metrics_reporter_interface.h
+++ b/common/metrics_reporter_interface.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_METRICS_REPORTER_INTERFACE_H_
-#define UPDATE_ENGINE_METRICS_REPORTER_INTERFACE_H_
+#ifndef UPDATE_ENGINE_COMMON_METRICS_REPORTER_INTERFACE_H_
+#define UPDATE_ENGINE_COMMON_METRICS_REPORTER_INTERFACE_H_
 
 #include <memory>
 #include <string>
@@ -23,27 +23,20 @@
 #include <base/time/time.h>
 
 #include "update_engine/common/constants.h"
+#include "update_engine/common/dynamic_partition_control_interface.h"
 #include "update_engine/common/error_code.h"
-#include "update_engine/metrics_constants.h"
-#include "update_engine/system_state.h"
+#include "update_engine/common/metrics_constants.h"
+#include "update_engine/payload_consumer/install_plan.h"
 
 namespace chromeos_update_engine {
 
 enum class ServerToCheck;
 enum class CertificateCheckResult;
 
-namespace metrics {
-
-std::unique_ptr<MetricsReporterInterface> CreateMetricsReporter();
-
-}  // namespace metrics
-
 class MetricsReporterInterface {
  public:
   virtual ~MetricsReporterInterface() = default;
 
-  virtual void Initialize() = 0;
-
   // Helper function to report metrics related to user-initiated rollback. The
   // following metrics are reported:
   //
@@ -94,7 +87,6 @@
   // if it's set, |kMetricCheckRollbackTargetVersion| reports the same, but only
   // if rollback is also allowed using enterprise policy.
   virtual void ReportUpdateCheckMetrics(
-      SystemState* system_state,
       metrics::CheckResult result,
       metrics::CheckReaction reaction,
       metrics::DownloadErrorCode download_error_code) = 0;
@@ -122,8 +114,7 @@
   // |kMetricAttemptTimeSinceLastAttemptUptimeMinutes| metrics are
   // automatically calculated and reported by maintaining persistent and
   // process-local state variables.
-  virtual void ReportUpdateAttemptMetrics(SystemState* system_state,
-                                          int attempt_number,
+  virtual void ReportUpdateAttemptMetrics(int attempt_number,
                                           PayloadType payload_type,
                                           base::TimeDelta duration,
                                           base::TimeDelta duration_uptime,
@@ -244,6 +235,14 @@
       bool has_time_restriction_policy, int time_to_update_days) = 0;
 };
 
+namespace metrics {
+
+std::unique_ptr<MetricsReporterInterface> CreateMetricsReporter(
+    DynamicPartitionControlInterface* dynamic_partition_control,
+    const InstallPlan* install_plan);
+
+}  // namespace metrics
+
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_METRICS_REPORTER_INTERFACE_H_
+#endif  // UPDATE_ENGINE_COMMON_METRICS_REPORTER_INTERFACE_H_
diff --git a/metrics_reporter_stub.cc b/common/metrics_reporter_stub.cc
similarity index 84%
rename from metrics_reporter_stub.cc
rename to common/metrics_reporter_stub.cc
index 81664a5..61559d9 100644
--- a/metrics_reporter_stub.cc
+++ b/common/metrics_reporter_stub.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/metrics_reporter_stub.h"
+#include "update_engine/common/metrics_reporter_stub.h"
 
 #include <memory>
 
@@ -22,7 +22,9 @@
 
 namespace metrics {
 
-std::unique_ptr<MetricsReporterInterface> CreateMetricsReporter() {
+std::unique_ptr<MetricsReporterInterface> CreateMetricsReporter(
+    DynamicPartitionControlInterface* dynamic_partition_control,
+    const InstallPlan* install_plan) {
   return std::make_unique<MetricsReporterStub>();
 }
 
diff --git a/metrics_reporter_stub.h b/common/metrics_reporter_stub.h
similarity index 88%
rename from metrics_reporter_stub.h
rename to common/metrics_reporter_stub.h
index 25660b5..80cf469 100644
--- a/metrics_reporter_stub.h
+++ b/common/metrics_reporter_stub.h
@@ -14,14 +14,14 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_METRICS_REPORTER_STUB_H_
-#define UPDATE_ENGINE_METRICS_REPORTER_STUB_H_
+#ifndef UPDATE_ENGINE_COMMON_METRICS_REPORTER_STUB_H_
+#define UPDATE_ENGINE_COMMON_METRICS_REPORTER_STUB_H_
 
 #include <string>
 
 #include "update_engine/common/error_code.h"
-#include "update_engine/metrics_constants.h"
-#include "update_engine/metrics_reporter_interface.h"
+#include "update_engine/common/metrics_constants.h"
+#include "update_engine/common/metrics_reporter_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -31,8 +31,6 @@
 
   ~MetricsReporterStub() override = default;
 
-  void Initialize() override {}
-
   void ReportRollbackMetrics(metrics::RollbackResult result) override {}
 
   void ReportEnterpriseRollbackMetrics(
@@ -41,13 +39,11 @@
   void ReportDailyMetrics(base::TimeDelta os_age) override {}
 
   void ReportUpdateCheckMetrics(
-      SystemState* system_state,
       metrics::CheckResult result,
       metrics::CheckReaction reaction,
       metrics::DownloadErrorCode download_error_code) override {}
 
-  void ReportUpdateAttemptMetrics(SystemState* system_state,
-                                  int attempt_number,
+  void ReportUpdateAttemptMetrics(int attempt_number,
                                   PayloadType payload_type,
                                   base::TimeDelta duration,
                                   base::TimeDelta duration_uptime,
@@ -100,4 +96,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_METRICS_REPORTER_STUB_H_
+#endif  // UPDATE_ENGINE_COMMON_METRICS_REPORTER_STUB_H_
diff --git a/common/mock_action_processor.h b/common/mock_action_processor.h
index 4c62109..9785776 100644
--- a/common/mock_action_processor.h
+++ b/common/mock_action_processor.h
@@ -32,6 +32,8 @@
   MOCK_METHOD0(StartProcessing, void());
   MOCK_METHOD1(EnqueueAction, void(AbstractAction* action));
 
+  MOCK_METHOD2(ActionComplete, void(AbstractAction*, ErrorCode));
+
   // This is a legacy workaround described in:
   // https://github.com/google/googletest/blob/master/googlemock/docs/CookBook.md#legacy-workarounds-for-move-only-types-legacymoveonly
   void EnqueueAction(std::unique_ptr<AbstractAction> action) override {
diff --git a/common/mock_boot_control.h b/common/mock_boot_control.h
new file mode 100644
index 0000000..f75ce5e
--- /dev/null
+++ b/common/mock_boot_control.h
@@ -0,0 +1,74 @@
+//
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_COMMON_MOCK_BOOT_CONTROL_H_
+#define UPDATE_ENGINE_COMMON_MOCK_BOOT_CONTROL_H_
+
+#include <memory>
+#include <string>
+
+#include <gmock/gmock.h>
+
+#include "update_engine/common/boot_control_stub.h"
+
+namespace chromeos_update_engine {
+
+class MockBootControl final : public BootControlStub {
+ public:
+  MOCK_METHOD(bool,
+              IsSlotMarkedSuccessful,
+              (BootControlInterface::Slot),
+              (const override));
+  MOCK_METHOD(unsigned int, GetNumSlots, (), (const override));
+  MOCK_METHOD(BootControlInterface::Slot, GetCurrentSlot, (), (const override));
+  MOCK_METHOD(bool,
+              GetPartitionDevice,
+              (const std::string&, Slot, bool, std::string*, bool*),
+              (const override));
+  MOCK_METHOD(bool,
+              GetPartitionDevice,
+              (const std::string&, BootControlInterface::Slot, std::string*),
+              (const override));
+  MOCK_METHOD(std::optional<PartitionDevice>,
+              GetPartitionDevice,
+              (const std::string&, uint32_t, uint32_t, bool),
+              (const override));
+
+  MOCK_METHOD(bool,
+              IsSlotBootable,
+              (BootControlInterface::Slot),
+              (const override));
+  MOCK_METHOD(bool,
+              MarkSlotUnbootable,
+              (BootControlInterface::Slot),
+              (override));
+  MOCK_METHOD(bool,
+              SetActiveBootSlot,
+              (BootControlInterface::Slot),
+              (override));
+  MOCK_METHOD(bool,
+              MarkBootSuccessfulAsync,
+              (base::Callback<void(bool)>),
+              (override));
+  MOCK_METHOD(DynamicPartitionControlInterface*,
+              GetDynamicPartitionControl,
+              (),
+              (override));
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_COMMON_MOCK_BOOT_CONTROL_H_
diff --git a/payload_consumer/mock_download_action.h b/common/mock_download_action.h
similarity index 81%
rename from payload_consumer/mock_download_action.h
rename to common/mock_download_action.h
index 3abb809..ecda9a3 100644
--- a/payload_consumer/mock_download_action.h
+++ b/common/mock_download_action.h
@@ -14,15 +14,15 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_MOCK_DOWNLOAD_ACTION_H_
-#define UPDATE_ENGINE_PAYLOAD_CONSUMER_MOCK_DOWNLOAD_ACTION_H_
+#ifndef UPDATE_ENGINE_COMMON_MOCK_DOWNLOAD_ACTION_H_
+#define UPDATE_ENGINE_COMMON_MOCK_DOWNLOAD_ACTION_H_
 
 #include <stdint.h>
 
 #include <gmock/gmock.h>
 
+#include "update_engine/common/download_action.h"
 #include "update_engine/common/error_code.h"
-#include "update_engine/payload_consumer/download_action.h"
 
 namespace chromeos_update_engine {
 
@@ -38,4 +38,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_MOCK_DOWNLOAD_ACTION_H_
+#endif  // UPDATE_ENGINE_COMMON_MOCK_DOWNLOAD_ACTION_H_
diff --git a/common/mock_dynamic_partition_control.h b/common/mock_dynamic_partition_control.h
new file mode 100644
index 0000000..bfd1b0c
--- /dev/null
+++ b/common/mock_dynamic_partition_control.h
@@ -0,0 +1,89 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <stdint.h>
+
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include <gmock/gmock.h>
+
+#include "update_engine/common/dynamic_partition_control_interface.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
+
+namespace chromeos_update_engine {
+
+class MockDynamicPartitionControl : public DynamicPartitionControlInterface {
+ public:
+  MOCK_METHOD(void, Cleanup, (), (override));
+  MOCK_METHOD(bool, GetDeviceDir, (std::string*), (override));
+  MOCK_METHOD(FeatureFlag, GetDynamicPartitionsFeatureFlag, (), (override));
+  MOCK_METHOD(FeatureFlag, GetVirtualAbCompressionFeatureFlag, (), (override));
+  MOCK_METHOD(FeatureFlag, GetVirtualAbFeatureFlag, (), (override));
+  MOCK_METHOD(bool, FinishUpdate, (bool), (override));
+  MOCK_METHOD(FileDescriptorPtr,
+              OpenCowFd,
+              (const std::string& unsuffixed_partition_name,
+               const std::optional<std::string>& source_path,
+               bool is_append),
+              (override));
+  MOCK_METHOD(bool, MapAllPartitions, (), (override));
+  MOCK_METHOD(bool, UnmapAllPartitions, (), (override));
+
+  MOCK_METHOD(bool,
+              OptimizeOperation,
+              (const std::string&, const InstallOperation&, InstallOperation*),
+              (override));
+
+  std::unique_ptr<android::snapshot::ISnapshotWriter> OpenCowWriter(
+      const std::string& unsuffixed_partition_name,
+      const std::optional<std::string>&,
+      bool is_append = false) override {
+    return nullptr;
+  }
+
+  MOCK_METHOD(
+      bool,
+      PreparePartitionsForUpdate,
+      (uint32_t, uint32_t, const DeltaArchiveManifest&, bool, uint64_t*),
+      (override));
+
+  MOCK_METHOD(bool, ResetUpdate, (PrefsInterface*), (override));
+  MOCK_METHOD(std::unique_ptr<AbstractAction>,
+              GetCleanupPreviousUpdateAction,
+              (BootControlInterface*,
+               PrefsInterface*,
+               CleanupPreviousUpdateActionDelegateInterface*),
+              (override));
+  MOCK_METHOD(bool,
+              ListDynamicPartitionsForSlot,
+              (uint32_t, uint32_t, std::vector<std::string>*),
+              (override));
+
+  MOCK_METHOD(bool,
+              VerifyExtentsForUntouchedPartitions,
+              (uint32_t, uint32_t, const std::vector<std::string>&),
+              (override));
+  MOCK_METHOD(bool,
+              IsDynamicPartition,
+              (const std::string&, uint32_t slot),
+              (override));
+  MOCK_METHOD(bool, UpdateUsesSnapshotCompression, (), (override));
+};
+
+}  // namespace chromeos_update_engine
diff --git a/common/mock_excluder.h b/common/mock_excluder.h
new file mode 100644
index 0000000..560ba0d
--- /dev/null
+++ b/common/mock_excluder.h
@@ -0,0 +1,37 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_COMMON_MOCK_APP_EXCLUDER_H_
+#define UPDATE_ENGINE_COMMON_MOCK_APP_EXCLUDER_H_
+
+#include "update_engine/common/excluder_interface.h"
+
+#include <string>
+
+#include <gmock/gmock.h>
+
+namespace chromeos_update_engine {
+
+class MockExcluder : public ExcluderInterface {
+ public:
+  MOCK_METHOD(bool, Exclude, (const std::string&), (override));
+  MOCK_METHOD(bool, IsExcluded, (const std::string&), (override));
+  MOCK_METHOD(bool, Reset, (), (override));
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_COMMON_MOCK_APP_EXCLUDER_H_
diff --git a/common/mock_hardware.h b/common/mock_hardware.h
index 84c0c5b..071906b 100644
--- a/common/mock_hardware.h
+++ b/common/mock_hardware.h
@@ -45,11 +45,6 @@
     ON_CALL(*this, GetHardwareClass())
         .WillByDefault(
             testing::Invoke(&fake_, &FakeHardware::GetHardwareClass));
-    ON_CALL(*this, GetFirmwareVersion())
-        .WillByDefault(
-            testing::Invoke(&fake_, &FakeHardware::GetFirmwareVersion));
-    ON_CALL(*this, GetECVersion())
-        .WillByDefault(testing::Invoke(&fake_, &FakeHardware::GetECVersion));
     ON_CALL(*this, GetMinKernelKeyVersion())
         .WillByDefault(
             testing::Invoke(&fake_, &FakeHardware::GetMinKernelKeyVersion));
@@ -90,8 +85,6 @@
   MOCK_CONST_METHOD0(IsOOBEEnabled, bool());
   MOCK_CONST_METHOD1(IsOOBEComplete, bool(base::Time* out_time_of_oobe));
   MOCK_CONST_METHOD0(GetHardwareClass, std::string());
-  MOCK_CONST_METHOD0(GetFirmwareVersion, std::string());
-  MOCK_CONST_METHOD0(GetECVersion, std::string());
   MOCK_CONST_METHOD0(GetMinKernelKeyVersion, int());
   MOCK_CONST_METHOD0(GetMinFirmwareKeyVersion, int());
   MOCK_CONST_METHOD0(GetMaxFirmwareKeyRollforward, int());
diff --git a/common/mock_http_fetcher.cc b/common/mock_http_fetcher.cc
index 10e3b9e..1b3cd7d 100644
--- a/common/mock_http_fetcher.cc
+++ b/common/mock_http_fetcher.cc
@@ -22,6 +22,7 @@
 #include <base/logging.h>
 #include <base/strings/string_util.h>
 #include <base/time/time.h>
+#include <brillo/message_loops/message_loop.h>
 #include <gtest/gtest.h>
 
 // This is a mock implementation of HttpFetcher which is useful for testing.
@@ -43,12 +44,12 @@
     SignalTransferComplete();
     return;
   }
-  if (sent_size_ < data_.size())
+  if (sent_offset_ < data_.size())
     SendData(true);
 }
 
 void MockHttpFetcher::SendData(bool skip_delivery) {
-  if (fail_transfer_ || sent_size_ == data_.size()) {
+  if (fail_transfer_ || sent_offset_ == data_.size()) {
     SignalTransferComplete();
     return;
   }
@@ -60,19 +61,22 @@
 
   // Setup timeout callback even if the transfer is about to be completed in
   // order to get a call to |TransferComplete|.
-  if (timeout_id_ == MessageLoop::kTaskIdNull) {
+  if (timeout_id_ == MessageLoop::kTaskIdNull && delay_) {
+    CHECK(MessageLoop::current());
     timeout_id_ = MessageLoop::current()->PostDelayedTask(
         FROM_HERE,
         base::Bind(&MockHttpFetcher::TimeoutCallback, base::Unretained(this)),
         base::TimeDelta::FromMilliseconds(10));
   }
 
-  if (!skip_delivery) {
+  if (!skip_delivery || !delay_) {
     const size_t chunk_size =
-        min(kMockHttpFetcherChunkSize, data_.size() - sent_size_);
-    sent_size_ += chunk_size;
+        min(kMockHttpFetcherChunkSize, data_.size() - sent_offset_);
+    sent_offset_ += chunk_size;
+    bytes_sent_ += chunk_size;
     CHECK(delegate_);
-    delegate_->ReceivedBytes(this, &data_[sent_size_ - chunk_size], chunk_size);
+    delegate_->ReceivedBytes(
+        this, &data_[sent_offset_ - chunk_size], chunk_size);
   }
   // We may get terminated and deleted right after |ReceivedBytes| call, so we
   // should not access any class member variable after this call.
@@ -81,7 +85,7 @@
 void MockHttpFetcher::TimeoutCallback() {
   CHECK(!paused_);
   timeout_id_ = MessageLoop::kTaskIdNull;
-  CHECK_LE(sent_size_, data_.size());
+  CHECK_LE(sent_offset_, data_.size());
   // Same here, we should not access any member variable after this call.
   SendData(false);
 }
@@ -90,10 +94,15 @@
 // The transfer cannot be resumed.
 void MockHttpFetcher::TerminateTransfer() {
   LOG(INFO) << "Terminating transfer.";
-  // Kill any timeout, it is ok to call with kTaskIdNull.
-  MessageLoop::current()->CancelTask(timeout_id_);
-  timeout_id_ = MessageLoop::kTaskIdNull;
-  delegate_->TransferTerminated(this);
+  // During testing, MessageLoop may or may not be available.
+  // So don't call CancelTask() unless necessary.
+  if (timeout_id_ != MessageLoop::kTaskIdNull) {
+    MessageLoop::current()->CancelTask(timeout_id_);
+    timeout_id_ = MessageLoop::kTaskIdNull;
+  }
+  if (delegate_) {
+    delegate_->TransferTerminated(this);
+  }
 }
 
 void MockHttpFetcher::SetHeader(const std::string& header_name,
diff --git a/common/mock_http_fetcher.h b/common/mock_http_fetcher.h
index 492e6ce..ea5b83d 100644
--- a/common/mock_http_fetcher.h
+++ b/common/mock_http_fetcher.h
@@ -46,7 +46,7 @@
                   size_t size,
                   ProxyResolver* proxy_resolver)
       : HttpFetcher(proxy_resolver),
-        sent_size_(0),
+        sent_offset_(0),
         timeout_id_(brillo::MessageLoop::kTaskIdNull),
         paused_(false),
         fail_transfer_(false),
@@ -64,7 +64,7 @@
 
   // Ignores this.
   void SetOffset(off_t offset) override {
-    sent_size_ = offset;
+    sent_offset_ = offset;
     if (delegate_)
       delegate_->SeekToOffset(offset);
   }
@@ -76,8 +76,8 @@
   void set_connect_timeout(int connect_timeout_seconds) override {}
   void set_max_retry_count(int max_retry_count) override {}
 
-  // Dummy: no bytes were downloaded.
-  size_t GetBytesDownloaded() override { return sent_size_; }
+  // No bytes were downloaded in the mock class.
+  size_t GetBytesDownloaded() override { return bytes_sent_; }
 
   // Begins the transfer if it hasn't already begun.
   void BeginTransfer(const std::string& url) override;
@@ -89,6 +89,12 @@
   void SetHeader(const std::string& header_name,
                  const std::string& header_value) override;
 
+  bool GetHeader(const std::string& header_name,
+                 std::string* header_value) const override {
+    header_value->clear();
+    return false;
+  }
+
   // Return the value of the header |header_name| or the empty string if not
   // set.
   std::string GetHeader(const std::string& header_name) const;
@@ -107,6 +113,8 @@
 
   const brillo::Blob& post_data() const { return post_data_; }
 
+  void set_delay(bool delay) { delay_ = delay; }
+
  private:
   // Sends data to the delegate and sets up a timeout callback if needed. There
   // must be a delegate. If |skip_delivery| is true, no bytes will be delivered,
@@ -123,8 +131,11 @@
   // A full copy of the data we'll return to the delegate
   brillo::Blob data_;
 
-  // The number of bytes we've sent so far
-  size_t sent_size_;
+  // The current offset, marks the first byte that will be sent next
+  size_t sent_offset_{0};
+
+  // Total number of bytes transferred
+  size_t bytes_sent_{0};
 
   // The extra headers set.
   std::map<std::string, std::string> extra_headers_;
@@ -134,13 +145,16 @@
   brillo::MessageLoop::TaskId timeout_id_;
 
   // True iff the fetcher is paused.
-  bool paused_;
+  bool paused_{false};
 
   // Set to true if the transfer should fail.
-  bool fail_transfer_;
+  bool fail_transfer_{false};
 
   // Set to true if BeginTransfer should EXPECT fail.
-  bool never_use_;
+  bool never_use_{false};
+
+  // Whether it should wait for 10ms before sending data to delegates
+  bool delay_{true};
 
   DISALLOW_COPY_AND_ASSIGN(MockHttpFetcher);
 };
diff --git a/mock_metrics_reporter.h b/common/mock_metrics_reporter.h
similarity index 87%
rename from mock_metrics_reporter.h
rename to common/mock_metrics_reporter.h
index baf3a78..1bb1e84 100644
--- a/mock_metrics_reporter.h
+++ b/common/mock_metrics_reporter.h
@@ -14,14 +14,14 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_MOCK_METRICS_REPORTER_H_
-#define UPDATE_ENGINE_MOCK_METRICS_REPORTER_H_
+#ifndef UPDATE_ENGINE_COMMON_MOCK_METRICS_REPORTER_H_
+#define UPDATE_ENGINE_COMMON_MOCK_METRICS_REPORTER_H_
 
 #include <string>
 
 #include <gmock/gmock.h>
 
-#include "update_engine/metrics_reporter_interface.h"
+#include "update_engine/common/metrics_reporter_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -36,15 +36,13 @@
 
   MOCK_METHOD1(ReportDailyMetrics, void(base::TimeDelta os_age));
 
-  MOCK_METHOD4(ReportUpdateCheckMetrics,
-               void(SystemState* system_state,
-                    metrics::CheckResult result,
+  MOCK_METHOD3(ReportUpdateCheckMetrics,
+               void(metrics::CheckResult result,
                     metrics::CheckReaction reaction,
                     metrics::DownloadErrorCode download_error_code));
 
-  MOCK_METHOD8(ReportUpdateAttemptMetrics,
-               void(SystemState* system_state,
-                    int attempt_number,
+  MOCK_METHOD7(ReportUpdateAttemptMetrics,
+               void(int attempt_number,
                     PayloadType payload_type,
                     base::TimeDelta duration,
                     base::TimeDelta duration_uptime,
@@ -96,4 +94,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_MOCK_METRICS_REPORTER_H_
+#endif  // UPDATE_ENGINE_COMMON_MOCK_METRICS_REPORTER_H_
diff --git a/common/mock_prefs.h b/common/mock_prefs.h
index 2582e19..49431fb 100644
--- a/common/mock_prefs.h
+++ b/common/mock_prefs.h
@@ -18,6 +18,7 @@
 #define UPDATE_ENGINE_COMMON_MOCK_PREFS_H_
 
 #include <string>
+#include <vector>
 
 #include <gmock/gmock.h>
 
@@ -30,8 +31,7 @@
  public:
   MOCK_CONST_METHOD2(GetString,
                      bool(const std::string& key, std::string* value));
-  MOCK_METHOD2(SetString,
-               bool(const std::string& key, const std::string& value));
+  MOCK_METHOD2(SetString, bool(const std::string& key, std::string_view value));
   MOCK_CONST_METHOD2(GetInt64, bool(const std::string& key, int64_t* value));
   MOCK_METHOD2(SetInt64, bool(const std::string& key, const int64_t value));
 
@@ -40,6 +40,12 @@
 
   MOCK_CONST_METHOD1(Exists, bool(const std::string& key));
   MOCK_METHOD1(Delete, bool(const std::string& key));
+  MOCK_METHOD2(Delete,
+               bool(const std::string& key,
+                    const std::vector<std::string>& nss));
+
+  MOCK_CONST_METHOD2(GetSubKeys,
+                     bool(const std::string&, std::vector<std::string>*));
 
   MOCK_METHOD2(AddObserver, void(const std::string& key, ObserverInterface*));
   MOCK_METHOD2(RemoveObserver,
diff --git a/mock_service_observer.h b/common/mock_service_observer.h
similarity index 81%
rename from mock_service_observer.h
rename to common/mock_service_observer.h
index e434eab..2c895f9 100644
--- a/mock_service_observer.h
+++ b/common/mock_service_observer.h
@@ -14,11 +14,11 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_MOCK_SERVICE_OBSERVER_H_
-#define UPDATE_ENGINE_MOCK_SERVICE_OBSERVER_H_
+#ifndef UPDATE_ENGINE_COMMON_MOCK_SERVICE_OBSERVER_H_
+#define UPDATE_ENGINE_COMMON_MOCK_SERVICE_OBSERVER_H_
 
 #include <gmock/gmock.h>
-#include "update_engine/service_observer_interface.h"
+#include "update_engine/common/service_observer_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -32,4 +32,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_MOCK_SERVICE_OBSERVER_H_
+#endif  // UPDATE_ENGINE_COMMON_MOCK_SERVICE_OBSERVER_H_
diff --git a/common/multi_range_http_fetcher.h b/common/multi_range_http_fetcher.h
index f57ea7f..ef32f0d 100644
--- a/common/multi_range_http_fetcher.h
+++ b/common/multi_range_http_fetcher.h
@@ -83,6 +83,11 @@
     base_fetcher_->SetHeader(header_name, header_value);
   }
 
+  bool GetHeader(const std::string& header_name,
+                 std::string* header_value) const override {
+    return base_fetcher_->GetHeader(header_name, header_value);
+  }
+
   void Pause() override { base_fetcher_->Pause(); }
 
   void Unpause() override { base_fetcher_->Unpause(); }
diff --git a/network_selector.h b/common/network_selector.h
similarity index 80%
rename from network_selector.h
rename to common/network_selector.h
index 22aed8e..bfc09c5 100644
--- a/network_selector.h
+++ b/common/network_selector.h
@@ -14,12 +14,12 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_NETWORK_SELECTOR_H_
-#define UPDATE_ENGINE_NETWORK_SELECTOR_H_
+#ifndef UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_H_
+#define UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_H_
 
 #include <memory>
 
-#include "update_engine/network_selector_interface.h"
+#include "update_engine/common/network_selector_interface.h"
 
 namespace chromeos_update_engine {
 namespace network {
@@ -30,4 +30,4 @@
 }  // namespace network
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_NETWORK_SELECTOR_H_
+#endif  // UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_H_
diff --git a/network_selector_interface.h b/common/network_selector_interface.h
similarity index 88%
rename from network_selector_interface.h
rename to common/network_selector_interface.h
index bd0948f..42ce32e 100644
--- a/network_selector_interface.h
+++ b/common/network_selector_interface.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_NETWORK_SELECTOR_INTERFACE_H_
-#define UPDATE_ENGINE_NETWORK_SELECTOR_INTERFACE_H_
+#ifndef UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_INTERFACE_H_
+#define UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_INTERFACE_H_
 
 #include <cstdint>
 
@@ -45,4 +45,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_NETWORK_SELECTOR_INTERFACE_H_
+#endif  // UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_INTERFACE_H_
diff --git a/network_selector_stub.cc b/common/network_selector_stub.cc
similarity index 87%
rename from network_selector_stub.cc
rename to common/network_selector_stub.cc
index 67925f4..24c0e25 100644
--- a/network_selector_stub.cc
+++ b/common/network_selector_stub.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/network_selector_stub.h"
+#include "update_engine/common/network_selector_stub.h"
 
 #include <memory>
 
@@ -24,14 +24,14 @@
 
 namespace network {
 
-// Factory defined in network_selector.h.
+// Factory defined in common/network_selector.h.
 std::unique_ptr<NetworkSelectorInterface> CreateNetworkSelector() {
   return std::make_unique<NetworkSelectorStub>();
 }
 
 }  // namespace network
 
-// Defined in network_selector_interface.h.
+// Defined in common/network_selector_interface.h.
 const NetworkId kDefaultNetworkId = 0;
 
 bool NetworkSelectorStub::SetProcessNetwork(NetworkId network_id) {
diff --git a/network_selector_stub.h b/common/network_selector_stub.h
similarity index 81%
rename from network_selector_stub.h
rename to common/network_selector_stub.h
index b3f7b48..b32df91 100644
--- a/network_selector_stub.h
+++ b/common/network_selector_stub.h
@@ -14,12 +14,12 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_NETWORK_SELECTOR_STUB_H_
-#define UPDATE_ENGINE_NETWORK_SELECTOR_STUB_H_
+#ifndef UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_STUB_H_
+#define UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_STUB_H_
 
 #include <base/macros.h>
 
-#include "update_engine/network_selector_interface.h"
+#include "update_engine/common/network_selector_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -37,4 +37,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_NETWORK_SELECTOR_STUB_H_
+#endif  // UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_STUB_H_
diff --git a/common/platform_constants.h b/common/platform_constants.h
index 243af69..06399e5 100644
--- a/common/platform_constants.h
+++ b/common/platform_constants.h
@@ -54,9 +54,11 @@
 // The stateful directory used by update_engine.
 extern const char kNonVolatileDirectory[];
 
-// Options passed to the filesystem when mounting the new partition during
-// postinstall.
-extern const char kPostinstallMountOptions[];
+#ifdef __ANDROID_RECOVERY__
+constexpr bool kIsRecovery = true;
+#else
+constexpr bool kIsRecovery = false;
+#endif
 
 }  // namespace constants
 }  // namespace chromeos_update_engine
diff --git a/common/prefs.cc b/common/prefs.cc
index 12d06c0..1e06be4 100644
--- a/common/prefs.cc
+++ b/common/prefs.cc
@@ -18,22 +18,44 @@
 
 #include <algorithm>
 
+#include <base/files/file_enumerator.h>
 #include <base/files/file_util.h>
 #include <base/logging.h>
 #include <base/strings/string_number_conversions.h>
+#include <base/strings/string_split.h>
 #include <base/strings/string_util.h>
 
 #include "update_engine/common/utils.h"
 
 using std::string;
+using std::vector;
 
 namespace chromeos_update_engine {
 
+namespace {
+
+void DeleteEmptyDirectories(const base::FilePath& path) {
+  base::FileEnumerator path_enum(
+      path, false /* recursive */, base::FileEnumerator::DIRECTORIES);
+  for (base::FilePath dir_path = path_enum.Next(); !dir_path.empty();
+       dir_path = path_enum.Next()) {
+    DeleteEmptyDirectories(dir_path);
+    if (base::IsDirectoryEmpty(dir_path))
+#if BASE_VER < 800000
+      base::DeleteFile(dir_path, false);
+#else
+      base::DeleteFile(dir_path);
+#endif
+  }
+}
+
+}  // namespace
+
 bool PrefsBase::GetString(const string& key, string* value) const {
   return storage_->GetKey(key, value);
 }
 
-bool PrefsBase::SetString(const string& key, const string& value) {
+bool PrefsBase::SetString(const string& key, std::string_view value) {
   TEST_AND_RETURN_FALSE(storage_->SetKey(key, value));
   const auto observers_for_key = observers_.find(key);
   if (observers_for_key != observers_.end()) {
@@ -54,7 +76,7 @@
 }
 
 bool PrefsBase::SetInt64(const string& key, const int64_t value) {
-  return SetString(key, base::Int64ToString(value));
+  return SetString(key, base::NumberToString(value));
 }
 
 bool PrefsBase::GetBoolean(const string& key, bool* value) const {
@@ -92,6 +114,28 @@
   return true;
 }
 
+bool PrefsBase::Delete(const string& pref_key, const vector<string>& nss) {
+  // Delete pref key for platform.
+  bool success = Delete(pref_key);
+  // Delete pref key in each namespace.
+  for (const auto& ns : nss) {
+    vector<string> namespace_keys;
+    success = GetSubKeys(ns, &namespace_keys) && success;
+    for (const auto& key : namespace_keys) {
+      auto last_key_seperator = key.find_last_of(kKeySeparator);
+      if (last_key_seperator != string::npos &&
+          pref_key == key.substr(last_key_seperator + 1)) {
+        success = Delete(key) && success;
+      }
+    }
+  }
+  return success;
+}
+
+bool PrefsBase::GetSubKeys(const string& ns, vector<string>* keys) const {
+  return storage_->GetSubKeys(ns, keys);
+}
+
 void PrefsBase::AddObserver(const string& key, ObserverInterface* observer) {
   observers_[key].push_back(observer);
 }
@@ -104,6 +148,10 @@
     observers_for_key.erase(observer_it);
 }
 
+string PrefsInterface::CreateSubKey(const vector<string>& ns_and_key) {
+  return base::JoinString(ns_and_key, string(1, kKeySeparator));
+}
+
 // Prefs
 
 bool Prefs::Init(const base::FilePath& prefs_dir) {
@@ -112,6 +160,8 @@
 
 bool Prefs::FileStorage::Init(const base::FilePath& prefs_dir) {
   prefs_dir_ = prefs_dir;
+  // Delete empty directories. Ignore errors when deleting empty directories.
+  DeleteEmptyDirectories(prefs_dir_);
   return true;
 }
 
@@ -119,13 +169,30 @@
   base::FilePath filename;
   TEST_AND_RETURN_FALSE(GetFileNameForKey(key, &filename));
   if (!base::ReadFileToString(filename, value)) {
-    LOG(INFO) << key << " not present in " << prefs_dir_.value();
     return false;
   }
   return true;
 }
 
-bool Prefs::FileStorage::SetKey(const string& key, const string& value) {
+bool Prefs::FileStorage::GetSubKeys(const string& ns,
+                                    vector<string>* keys) const {
+  base::FilePath filename;
+  TEST_AND_RETURN_FALSE(GetFileNameForKey(ns, &filename));
+  base::FileEnumerator namespace_enum(
+      prefs_dir_, true, base::FileEnumerator::FILES);
+  for (base::FilePath f = namespace_enum.Next(); !f.empty();
+       f = namespace_enum.Next()) {
+    auto filename_str = filename.value();
+    if (f.value().compare(0, filename_str.length(), filename_str) == 0) {
+      // Only return the key portion excluding the |prefs_dir_| with slash.
+      keys->push_back(f.value().substr(
+          prefs_dir_.AsEndingWithSeparator().value().length()));
+    }
+  }
+  return true;
+}
+
+bool Prefs::FileStorage::SetKey(const string& key, std::string_view value) {
   base::FilePath filename;
   TEST_AND_RETURN_FALSE(GetFileNameForKey(key, &filename));
   if (!base::DirectoryExists(filename.DirName())) {
@@ -147,19 +214,21 @@
 bool Prefs::FileStorage::DeleteKey(const string& key) {
   base::FilePath filename;
   TEST_AND_RETURN_FALSE(GetFileNameForKey(key, &filename));
+#if BASE_VER < 800000
   TEST_AND_RETURN_FALSE(base::DeleteFile(filename, false));
+#else
+  TEST_AND_RETURN_FALSE(base::DeleteFile(filename));
+#endif
   return true;
 }
 
 bool Prefs::FileStorage::GetFileNameForKey(const string& key,
                                            base::FilePath* filename) const {
-  // Allows only non-empty keys containing [A-Za-z0-9_-].
+  // Allows only non-empty keys containing [A-Za-z0-9_-/].
   TEST_AND_RETURN_FALSE(!key.empty());
-  for (size_t i = 0; i < key.size(); ++i) {
-    char c = key.at(i);
+  for (char c : key)
     TEST_AND_RETURN_FALSE(base::IsAsciiAlpha(c) || base::IsAsciiDigit(c) ||
-                          c == '_' || c == '-');
-  }
+                          c == '_' || c == '-' || c == kKeySeparator);
   *filename = prefs_dir_.Append(key);
   return true;
 }
@@ -175,8 +244,26 @@
   return true;
 }
 
+bool MemoryPrefs::MemoryStorage::GetSubKeys(const string& ns,
+                                            vector<string>* keys) const {
+  using value_type = decltype(values_)::value_type;
+  using key_type = decltype(values_)::key_type;
+  auto lower_comp = [](const value_type& pr, const key_type& ns) {
+    return pr.first.substr(0, ns.length()) < ns;
+  };
+  auto upper_comp = [](const key_type& ns, const value_type& pr) {
+    return ns < pr.first.substr(0, ns.length());
+  };
+  auto lower_it =
+      std::lower_bound(begin(values_), end(values_), ns, lower_comp);
+  auto upper_it = std::upper_bound(lower_it, end(values_), ns, upper_comp);
+  while (lower_it != upper_it)
+    keys->push_back((lower_it++)->first);
+  return true;
+}
+
 bool MemoryPrefs::MemoryStorage::SetKey(const string& key,
-                                        const string& value) {
+                                        std::string_view value) {
   values_[key] = value;
   return true;
 }
@@ -187,9 +274,8 @@
 
 bool MemoryPrefs::MemoryStorage::DeleteKey(const string& key) {
   auto it = values_.find(key);
-  if (it == values_.end())
-    return false;
-  values_.erase(it);
+  if (it != values_.end())
+    values_.erase(it);
   return true;
 }
 
diff --git a/common/prefs.h b/common/prefs.h
index 0116454..93477dd 100644
--- a/common/prefs.h
+++ b/common/prefs.h
@@ -19,6 +19,7 @@
 
 #include <map>
 #include <string>
+#include <string_view>
 #include <vector>
 
 #include <base/files/file_path.h>
@@ -42,9 +43,14 @@
     // Returns whether the operation succeeded.
     virtual bool GetKey(const std::string& key, std::string* value) const = 0;
 
+    // Get the keys stored within the namespace. If there are no keys in the
+    // namespace, |keys| will be empty. Returns whether the operation succeeded.
+    virtual bool GetSubKeys(const std::string& ns,
+                            std::vector<std::string>* keys) const = 0;
+
     // Set the value of the key named |key| to |value| regardless of the
     // previous value. Returns whether the operation succeeded.
-    virtual bool SetKey(const std::string& key, const std::string& value) = 0;
+    virtual bool SetKey(const std::string& key, std::string_view value) = 0;
 
     // Returns whether the key named |key| exists.
     virtual bool KeyExists(const std::string& key) const = 0;
@@ -61,7 +67,7 @@
 
   // PrefsInterface methods.
   bool GetString(const std::string& key, std::string* value) const override;
-  bool SetString(const std::string& key, const std::string& value) override;
+  bool SetString(const std::string& key, std::string_view value) override;
   bool GetInt64(const std::string& key, int64_t* value) const override;
   bool SetInt64(const std::string& key, const int64_t value) override;
   bool GetBoolean(const std::string& key, bool* value) const override;
@@ -69,6 +75,11 @@
 
   bool Exists(const std::string& key) const override;
   bool Delete(const std::string& key) override;
+  bool Delete(const std::string& pref_key,
+              const std::vector<std::string>& nss) override;
+
+  bool GetSubKeys(const std::string& ns,
+                  std::vector<std::string>* keys) const override;
 
   void AddObserver(const std::string& key,
                    ObserverInterface* observer) override;
@@ -111,7 +122,9 @@
 
     // PrefsBase::StorageInterface overrides.
     bool GetKey(const std::string& key, std::string* value) const override;
-    bool SetKey(const std::string& key, const std::string& value) override;
+    bool GetSubKeys(const std::string& ns,
+                    std::vector<std::string>* keys) const override;
+    bool SetKey(const std::string& key, std::string_view value) override;
     bool KeyExists(const std::string& key) const override;
     bool DeleteKey(const std::string& key) override;
 
@@ -149,7 +162,9 @@
 
     // PrefsBase::StorageInterface overrides.
     bool GetKey(const std::string& key, std::string* value) const override;
-    bool SetKey(const std::string& key, const std::string& value) override;
+    bool GetSubKeys(const std::string& ns,
+                    std::vector<std::string>* keys) const override;
+    bool SetKey(const std::string& key, std::string_view value) override;
     bool KeyExists(const std::string& key) const override;
     bool DeleteKey(const std::string& key) override;
 
diff --git a/common/prefs_interface.h b/common/prefs_interface.h
index 03ae3ec..e773a35 100644
--- a/common/prefs_interface.h
+++ b/common/prefs_interface.h
@@ -20,6 +20,7 @@
 #include <stdint.h>
 
 #include <string>
+#include <vector>
 
 namespace chromeos_update_engine {
 
@@ -51,7 +52,7 @@
 
   // Associates |key| with a string |value|. Returns true on success,
   // false otherwise.
-  virtual bool SetString(const std::string& key, const std::string& value) = 0;
+  virtual bool SetString(const std::string& key, std::string_view value) = 0;
 
   // Gets an int64_t |value| associated with |key|. Returns true on
   // success, false on failure (including when the |key| is not
@@ -79,6 +80,19 @@
   // this key. Calling with non-existent keys does nothing.
   virtual bool Delete(const std::string& key) = 0;
 
+  // Deletes the pref key from platform and given namespace subdirectories.
+  // Keys are matched against end of pref keys in each namespace.
+  // Returns true if all deletes were successful.
+  virtual bool Delete(const std::string& pref_key,
+                      const std::vector<std::string>& nss) = 0;
+
+  // Creates a key which is part of a sub preference.
+  static std::string CreateSubKey(const std::vector<std::string>& ns_with_key);
+
+  // Returns a list of keys within the namespace.
+  virtual bool GetSubKeys(const std::string& ns,
+                          std::vector<std::string>* keys) const = 0;
+
   // Add an observer to watch whenever the given |key| is modified. The
   // OnPrefSet() and OnPrefDelete() methods will be called whenever any of the
   // Set*() methods or the Delete() method are called on the given key,
@@ -90,6 +104,10 @@
   // anymore for future Set*() and Delete() method calls.
   virtual void RemoveObserver(const std::string& key,
                               ObserverInterface* observer) = 0;
+
+ protected:
+  // Key separator used to create sub key and get file names,
+  static const char kKeySeparator = '/';
 };
 
 }  // namespace chromeos_update_engine
diff --git a/common/prefs_unittest.cc b/common/prefs_unittest.cc
index cb6fc70..a5f46e5 100644
--- a/common/prefs_unittest.cc
+++ b/common/prefs_unittest.cc
@@ -20,6 +20,7 @@
 
 #include <limits>
 #include <string>
+#include <vector>
 
 #include <base/files/file_util.h>
 #include <base/files/scoped_temp_dir.h>
@@ -30,8 +31,11 @@
 #include <gtest/gtest.h>
 
 using std::string;
+using std::vector;
 using testing::_;
+using testing::ElementsAre;
 using testing::Eq;
+using testing::UnorderedElementsAre;
 
 namespace {
 // Test key used along the tests.
@@ -40,12 +44,109 @@
 
 namespace chromeos_update_engine {
 
-class PrefsTest : public ::testing::Test {
+class BasePrefsTest : public ::testing::Test {
+ protected:
+  void MultiNamespaceKeyTest() {
+    ASSERT_TRUE(common_prefs_);
+    auto key0 = common_prefs_->CreateSubKey({"ns1", "key"});
+    // Corner case for "ns1".
+    auto key0corner = common_prefs_->CreateSubKey({"ns11", "key"});
+    auto key1A = common_prefs_->CreateSubKey({"ns1", "nsA", "keyA"});
+    auto key1B = common_prefs_->CreateSubKey({"ns1", "nsA", "keyB"});
+    auto key2 = common_prefs_->CreateSubKey({"ns1", "nsB", "key"});
+    // Corner case for "ns1/nsB".
+    auto key2corner = common_prefs_->CreateSubKey({"ns1", "nsB1", "key"});
+    EXPECT_FALSE(common_prefs_->Exists(key0));
+    EXPECT_FALSE(common_prefs_->Exists(key1A));
+    EXPECT_FALSE(common_prefs_->Exists(key1B));
+    EXPECT_FALSE(common_prefs_->Exists(key2));
+
+    EXPECT_TRUE(common_prefs_->SetString(key0, ""));
+    EXPECT_TRUE(common_prefs_->SetString(key0corner, ""));
+    EXPECT_TRUE(common_prefs_->SetString(key1A, ""));
+    EXPECT_TRUE(common_prefs_->SetString(key1B, ""));
+    EXPECT_TRUE(common_prefs_->SetString(key2, ""));
+    EXPECT_TRUE(common_prefs_->SetString(key2corner, ""));
+
+    EXPECT_TRUE(common_prefs_->Exists(key0));
+    EXPECT_TRUE(common_prefs_->Exists(key0corner));
+    EXPECT_TRUE(common_prefs_->Exists(key1A));
+    EXPECT_TRUE(common_prefs_->Exists(key1B));
+    EXPECT_TRUE(common_prefs_->Exists(key2));
+    EXPECT_TRUE(common_prefs_->Exists(key2corner));
+
+    vector<string> keys2;
+    EXPECT_TRUE(common_prefs_->GetSubKeys("ns1/nsB/", &keys2));
+    EXPECT_THAT(keys2, ElementsAre(key2));
+    for (const auto& key : keys2)
+      EXPECT_TRUE(common_prefs_->Delete(key));
+    EXPECT_TRUE(common_prefs_->Exists(key0));
+    EXPECT_TRUE(common_prefs_->Exists(key0corner));
+    EXPECT_TRUE(common_prefs_->Exists(key1A));
+    EXPECT_TRUE(common_prefs_->Exists(key1B));
+    EXPECT_FALSE(common_prefs_->Exists(key2));
+    EXPECT_TRUE(common_prefs_->Exists(key2corner));
+
+    vector<string> keys2corner;
+    EXPECT_TRUE(common_prefs_->GetSubKeys("ns1/nsB", &keys2corner));
+    EXPECT_THAT(keys2corner, ElementsAre(key2corner));
+    for (const auto& key : keys2corner)
+      EXPECT_TRUE(common_prefs_->Delete(key));
+    EXPECT_FALSE(common_prefs_->Exists(key2corner));
+
+    vector<string> keys1;
+    EXPECT_TRUE(common_prefs_->GetSubKeys("ns1/nsA/", &keys1));
+    EXPECT_THAT(keys1, UnorderedElementsAre(key1A, key1B));
+    for (const auto& key : keys1)
+      EXPECT_TRUE(common_prefs_->Delete(key));
+    EXPECT_TRUE(common_prefs_->Exists(key0));
+    EXPECT_TRUE(common_prefs_->Exists(key0corner));
+    EXPECT_FALSE(common_prefs_->Exists(key1A));
+    EXPECT_FALSE(common_prefs_->Exists(key1B));
+
+    vector<string> keys0;
+    EXPECT_TRUE(common_prefs_->GetSubKeys("ns1/", &keys0));
+    EXPECT_THAT(keys0, ElementsAre(key0));
+    for (const auto& key : keys0)
+      EXPECT_TRUE(common_prefs_->Delete(key));
+    EXPECT_FALSE(common_prefs_->Exists(key0));
+    EXPECT_TRUE(common_prefs_->Exists(key0corner));
+
+    vector<string> keys0corner;
+    EXPECT_TRUE(common_prefs_->GetSubKeys("ns1", &keys0corner));
+    EXPECT_THAT(keys0corner, ElementsAre(key0corner));
+    for (const auto& key : keys0corner)
+      EXPECT_TRUE(common_prefs_->Delete(key));
+    EXPECT_FALSE(common_prefs_->Exists(key0corner));
+
+    // Test sub directory namespace.
+    const string kDlcPrefsSubDir = "foo-dir";
+    key1A = common_prefs_->CreateSubKey({kDlcPrefsSubDir, "dlc1", "keyA"});
+    EXPECT_TRUE(common_prefs_->SetString(key1A, "fp_1A"));
+    key1B = common_prefs_->CreateSubKey({kDlcPrefsSubDir, "dlc1", "keyB"});
+    EXPECT_TRUE(common_prefs_->SetString(key1B, "fp_1B"));
+    auto key2A = common_prefs_->CreateSubKey({kDlcPrefsSubDir, "dlc2", "keyA"});
+    EXPECT_TRUE(common_prefs_->SetString(key2A, "fp_A2"));
+
+    vector<string> fpKeys;
+    EXPECT_TRUE(common_prefs_->GetSubKeys(kDlcPrefsSubDir, &fpKeys));
+    EXPECT_EQ(fpKeys.size(), 3UL);
+    EXPECT_TRUE(common_prefs_->Delete(fpKeys[0]));
+    EXPECT_TRUE(common_prefs_->Delete(fpKeys[1]));
+    EXPECT_TRUE(common_prefs_->Delete(fpKeys[2]));
+    EXPECT_FALSE(common_prefs_->Exists(key1A));
+  }
+
+  PrefsInterface* common_prefs_;
+};
+
+class PrefsTest : public BasePrefsTest {
  protected:
   void SetUp() override {
     ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
     prefs_dir_ = temp_dir_.GetPath();
     ASSERT_TRUE(prefs_.Init(prefs_dir_));
+    common_prefs_ = &prefs_;
   }
 
   bool SetValue(const string& key, const string& value) {
@@ -59,6 +160,31 @@
   Prefs prefs_;
 };
 
+TEST(Prefs, Init) {
+  Prefs prefs;
+  const string ns1 = "ns1";
+  const string ns2A = "ns2A";
+  const string ns2B = "ns2B";
+  const string sub_pref = "sp";
+
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  auto ns1_path = temp_dir.GetPath().Append(ns1);
+  auto ns2A_path = ns1_path.Append(ns2A);
+  auto ns2B_path = ns1_path.Append(ns2B);
+  auto sub_pref_path = ns2A_path.Append(sub_pref);
+
+  EXPECT_TRUE(base::CreateDirectory(ns2B_path));
+  EXPECT_TRUE(base::PathExists(ns2B_path));
+
+  EXPECT_TRUE(base::CreateDirectory(sub_pref_path));
+  EXPECT_TRUE(base::PathExists(sub_pref_path));
+
+  EXPECT_TRUE(base::PathExists(ns1_path));
+  ASSERT_TRUE(prefs.Init(temp_dir.GetPath()));
+  EXPECT_FALSE(base::PathExists(ns1_path));
+}
+
 TEST_F(PrefsTest, GetFileNameForKey) {
   const char kAllvalidCharsKey[] =
       "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_-";
@@ -77,6 +203,18 @@
   EXPECT_FALSE(prefs_.file_storage_.GetFileNameForKey("", &path));
 }
 
+TEST_F(PrefsTest, CreateSubKey) {
+  const string name_space = "ns";
+  const string sub_pref1 = "sp1";
+  const string sub_pref2 = "sp2";
+  const string sub_key = "sk";
+
+  EXPECT_EQ(PrefsInterface::CreateSubKey({name_space, sub_pref1, sub_key}),
+            "ns/sp1/sk");
+  EXPECT_EQ(PrefsInterface::CreateSubKey({name_space, sub_pref2, sub_key}),
+            "ns/sp2/sk");
+}
+
 TEST_F(PrefsTest, GetString) {
   const string test_data = "test data";
   ASSERT_TRUE(SetValue(kKey, test_data));
@@ -279,6 +417,94 @@
   EXPECT_FALSE(prefs_.Exists(kKey));
 }
 
+TEST_F(PrefsTest, SetDeleteSubKey) {
+  const string name_space = "ns";
+  const string sub_pref = "sp";
+  const string sub_key1 = "sk1";
+  const string sub_key2 = "sk2";
+  auto key1 = prefs_.CreateSubKey({name_space, sub_pref, sub_key1});
+  auto key2 = prefs_.CreateSubKey({name_space, sub_pref, sub_key2});
+  base::FilePath sub_pref_path = prefs_dir_.Append(name_space).Append(sub_pref);
+
+  ASSERT_TRUE(prefs_.SetInt64(key1, 0));
+  ASSERT_TRUE(prefs_.SetInt64(key2, 0));
+  EXPECT_TRUE(base::PathExists(sub_pref_path.Append(sub_key1)));
+  EXPECT_TRUE(base::PathExists(sub_pref_path.Append(sub_key2)));
+
+  ASSERT_TRUE(prefs_.Delete(key1));
+  EXPECT_FALSE(base::PathExists(sub_pref_path.Append(sub_key1)));
+  EXPECT_TRUE(base::PathExists(sub_pref_path.Append(sub_key2)));
+  ASSERT_TRUE(prefs_.Delete(key2));
+  EXPECT_FALSE(base::PathExists(sub_pref_path.Append(sub_key2)));
+  prefs_.Init(prefs_dir_);
+  EXPECT_FALSE(base::PathExists(prefs_dir_.Append(name_space)));
+}
+
+TEST_F(PrefsTest, DeletePrefs) {
+  const string kPrefsSubDir = "foo-dir";
+  const string kFpKey = "kPrefFp";
+  const string kNotFpKey = "NotkPrefFp";
+  const string kOtherKey = "kPrefNotFp";
+
+  EXPECT_TRUE(prefs_.SetString(kFpKey, "3.000"));
+  EXPECT_TRUE(prefs_.SetString(kOtherKey, "not_fp_val"));
+
+  auto key1_fp = prefs_.CreateSubKey({kPrefsSubDir, "id-1", kFpKey});
+  EXPECT_TRUE(prefs_.SetString(key1_fp, "3.7"));
+  auto key_not_fp = prefs_.CreateSubKey({kPrefsSubDir, "id-1", kOtherKey});
+  EXPECT_TRUE(prefs_.SetString(key_not_fp, "not_fp_val"));
+  auto key2_fp = prefs_.CreateSubKey({kPrefsSubDir, "id-2", kFpKey});
+  EXPECT_TRUE(prefs_.SetString(key2_fp, "3.9"));
+  auto key3_fp = prefs_.CreateSubKey({kPrefsSubDir, "id-3", kFpKey});
+  EXPECT_TRUE(prefs_.SetString(key3_fp, "3.45"));
+
+  // Pref key does not match full subkey at end, should not delete.
+  auto key_middle_fp = prefs_.CreateSubKey({kPrefsSubDir, kFpKey, kOtherKey});
+  EXPECT_TRUE(prefs_.SetString(key_middle_fp, "not_fp_val"));
+  auto key_end_not_fp = prefs_.CreateSubKey({kPrefsSubDir, "id-1", kNotFpKey});
+  EXPECT_TRUE(prefs_.SetString(key_end_not_fp, "not_fp_val"));
+
+  // Delete key in platform and one namespace.
+  prefs_.Delete(kFpKey, {kPrefsSubDir});
+
+  EXPECT_FALSE(prefs_.Exists(kFpKey));
+  EXPECT_FALSE(prefs_.Exists(key1_fp));
+  EXPECT_FALSE(prefs_.Exists(key2_fp));
+  EXPECT_FALSE(prefs_.Exists(key3_fp));
+
+  // Check other keys are not deleted.
+  EXPECT_TRUE(prefs_.Exists(kOtherKey));
+  EXPECT_TRUE(prefs_.Exists(key_not_fp));
+  EXPECT_TRUE(prefs_.Exists(key_middle_fp));
+  EXPECT_TRUE(prefs_.Exists(key_end_not_fp));
+}
+
+TEST_F(PrefsTest, DeleteMultipleNamespaces) {
+  const string kFirstSubDir = "foo-dir";
+  const string kSecondarySubDir = "bar-dir";
+  const string kTertiarySubDir = "ter-dir";
+  const string kFpKey = "kPrefFp";
+
+  EXPECT_TRUE(prefs_.SetString(kFpKey, "3.000"));
+  // Set pref key in different namespaces.
+  auto key1_fp = prefs_.CreateSubKey({kFirstSubDir, "id-1", kFpKey});
+  EXPECT_TRUE(prefs_.SetString(key1_fp, "3.7"));
+  auto key2_fp = prefs_.CreateSubKey({kSecondarySubDir, "id-3", kFpKey});
+  EXPECT_TRUE(prefs_.SetString(key2_fp, "7.45"));
+  auto key3_fp = prefs_.CreateSubKey({kTertiarySubDir, "id-3", kFpKey});
+  EXPECT_TRUE(prefs_.SetString(key3_fp, "7.45"));
+
+  // Delete key in platform and given namespaces.
+  prefs_.Delete(kFpKey, {kFirstSubDir, kSecondarySubDir});
+
+  EXPECT_FALSE(prefs_.Exists(kFpKey));
+  EXPECT_FALSE(prefs_.Exists(key1_fp));
+  EXPECT_FALSE(prefs_.Exists(key2_fp));
+
+  // Tertiary namespace not given to delete. Key should still exist.
+  EXPECT_TRUE(prefs_.Exists(key3_fp));
+}
+
 class MockPrefsObserver : public PrefsInterface::ObserverInterface {
  public:
   MOCK_METHOD1(OnPrefSet, void(const string&));
@@ -299,6 +525,19 @@
   prefs_.Delete(kKey);
   testing::Mock::VerifyAndClearExpectations(&mock_obserser);
 
+  auto key1 = prefs_.CreateSubKey({"ns", "sp1", "key1"});
+  prefs_.AddObserver(key1, &mock_obserser);
+
+  EXPECT_CALL(mock_obserser, OnPrefSet(key1));
+  EXPECT_CALL(mock_obserser, OnPrefDeleted(_)).Times(0);
+  prefs_.SetString(key1, "value");
+  testing::Mock::VerifyAndClearExpectations(&mock_obserser);
+
+  EXPECT_CALL(mock_obserser, OnPrefSet(_)).Times(0);
+  EXPECT_CALL(mock_obserser, OnPrefDeleted(Eq(key1)));
+  prefs_.Delete(key1);
+  testing::Mock::VerifyAndClearExpectations(&mock_obserser);
+
   prefs_.RemoveObserver(kKey, &mock_obserser);
 }
 
@@ -341,8 +580,14 @@
   prefs_.RemoveObserver(kInvalidKey, &mock_obserser);
 }
 
-class MemoryPrefsTest : public ::testing::Test {
+TEST_F(PrefsTest, MultiNamespaceKeyTest) {
+  MultiNamespaceKeyTest();
+}
+
+class MemoryPrefsTest : public BasePrefsTest {
  protected:
+  void SetUp() override { common_prefs_ = &prefs_; }
+
   MemoryPrefs prefs_;
 };
 
@@ -358,7 +603,16 @@
 
   EXPECT_TRUE(prefs_.Delete(kKey));
   EXPECT_FALSE(prefs_.Exists(kKey));
-  EXPECT_FALSE(prefs_.Delete(kKey));
+  EXPECT_TRUE(prefs_.Delete(kKey));
+
+  auto key = prefs_.CreateSubKey({"ns", "sp", "sk"});
+  ASSERT_TRUE(prefs_.SetInt64(key, 0));
+  EXPECT_TRUE(prefs_.Exists(key));
+  EXPECT_TRUE(prefs_.Delete(kKey));
+}
+
+TEST_F(MemoryPrefsTest, MultiNamespaceKeyTest) {
+  MultiNamespaceKeyTest();
 }
 
 }  // namespace chromeos_update_engine
diff --git a/common/scoped_task_id.h b/common/scoped_task_id.h
new file mode 100644
index 0000000..91a2986
--- /dev/null
+++ b/common/scoped_task_id.h
@@ -0,0 +1,123 @@
+//
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_SCOPED_TASK_ID_H_
+#define UPDATE_ENGINE_SCOPED_TASK_ID_H_
+
+#include <type_traits>
+#include <utility>
+
+#include <base/bind.h>
+#include <brillo/message_loops/message_loop.h>
+
+namespace chromeos_update_engine {
+
+// This class provides unique_ptr like semantic for |MessageLoop::TaskId|, when
+// instance of this class goes out of scope, underlying task will be cancelled.
+class ScopedTaskId {
+  using MessageLoop = brillo::MessageLoop;
+
+ public:
+  // Move only type similar to unique_ptr.
+  ScopedTaskId(const ScopedTaskId&) = delete;
+  ScopedTaskId& operator=(const ScopedTaskId&) = delete;
+
+  constexpr ScopedTaskId() = default;
+
+  constexpr ScopedTaskId(ScopedTaskId&& other) noexcept {
+    *this = std::move(other);
+  }
+
+  constexpr ScopedTaskId& operator=(ScopedTaskId&& other) noexcept {
+    std::swap(task_id_, other.task_id_);
+    return *this;
+  }
+
+  // Post a callback on current message loop, return true if succeeded, false if
+  // the previous callback hasn't run yet, or scheduling failed at MessageLoop
+  // side.
+  [[nodiscard]] bool PostTask(const base::Location& from_here,
+                              base::OnceClosure&& callback,
+                              base::TimeDelta delay = {}) noexcept {
+    return PostTask<decltype(callback)>(from_here, std::move(callback), delay);
+  }
+  [[nodiscard]] bool PostTask(const base::Location& from_here,
+                              std::function<void()>&& callback,
+                              base::TimeDelta delay = {}) noexcept {
+    return PostTask<decltype(callback)>(from_here, std::move(callback), delay);
+  }
+
+  ~ScopedTaskId() noexcept { Cancel(); }
+
+  // Cancel the underlying managed task, true if cancel successful. False if no
+  // task scheduled or task cancellation failed
+  bool Cancel() noexcept {
+    if (task_id_ != MessageLoop::kTaskIdNull) {
+      if (MessageLoop::current()->CancelTask(task_id_)) {
+        LOG(INFO) << "Cancelled task id " << task_id_;
+        task_id_ = MessageLoop::kTaskIdNull;
+        return true;
+      }
+    }
+    return false;
+  }
+
+  [[nodiscard]] constexpr bool IsScheduled() const noexcept {
+    return task_id_ != MessageLoop::kTaskIdNull;
+  }
+
+  [[nodiscard]] constexpr bool operator==(const ScopedTaskId& other) const
+      noexcept {
+    return other.task_id_ == task_id_;
+  }
+
+  [[nodiscard]] constexpr bool operator<(const ScopedTaskId& other) const
+      noexcept {
+    return task_id_ < other.task_id_;
+  }
+
+ private:
+  template <typename Callable>
+  [[nodiscard]] bool PostTask(const base::Location& from_here,
+                              Callable&& callback,
+                              base::TimeDelta delay) noexcept {
+    if (task_id_ != MessageLoop::kTaskIdNull) {
+      LOG(ERROR) << "Scheduling another task but task id " << task_id_
+                 << " isn't executed yet! This can cause the old task to leak.";
+      return false;
+    }
+    task_id_ = MessageLoop::current()->PostDelayedTask(
+        from_here,
+        base::BindOnce(&ScopedTaskId::ExecuteTask<decltype(callback)>,
+                       base::Unretained(this),
+                       std::move(callback)),
+        delay);
+    return task_id_ != MessageLoop::kTaskIdNull;
+  }
+  template <typename Callable>
+  void ExecuteTask(Callable&& callback) {
+    task_id_ = MessageLoop::kTaskIdNull;
+    if constexpr (std::is_same_v<Callable&&, base::OnceClosure&&>) {
+      std::move(callback).Run();
+    } else {
+      std::move(callback)();
+    }
+  }
+  MessageLoop::TaskId task_id_{MessageLoop::kTaskIdNull};
+};
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/service_observer_interface.h b/common/service_observer_interface.h
similarity index 88%
rename from service_observer_interface.h
rename to common/service_observer_interface.h
index 4edb0ac..c471231 100644
--- a/service_observer_interface.h
+++ b/common/service_observer_interface.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_SERVICE_OBSERVER_INTERFACE_H_
-#define UPDATE_ENGINE_SERVICE_OBSERVER_INTERFACE_H_
+#ifndef UPDATE_ENGINE_COMMON_SERVICE_OBSERVER_INTERFACE_H_
+#define UPDATE_ENGINE_COMMON_SERVICE_OBSERVER_INTERFACE_H_
 
 #include <memory>
 #include <string>
@@ -43,4 +43,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_SERVICE_OBSERVER_INTERFACE_H_
+#endif  // UPDATE_ENGINE_COMMON_SERVICE_OBSERVER_INTERFACE_H_
diff --git a/common/subprocess.cc b/common/subprocess.cc
index 0131f10..023017b 100644
--- a/common/subprocess.cc
+++ b/common/subprocess.cc
@@ -29,9 +29,9 @@
 #include <base/bind.h>
 #include <base/logging.h>
 #include <base/posix/eintr_wrapper.h>
+#include <base/stl_util.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
-#include <brillo/process.h>
 #include <brillo/secure_blob.h>
 
 #include "update_engine/common/utils.h"
@@ -95,6 +95,7 @@
   proc->RedirectUsingPipe(STDOUT_FILENO, false);
   proc->SetPreExecCallback(base::Bind(&SetupChild, env, flags));
 
+  LOG(INFO) << "Running \"" << base::JoinString(cmd, " ") << "\"";
   return proc->Start();
 }
 
@@ -122,13 +123,12 @@
     bytes_read = 0;
     bool eof;
     bool ok = utils::ReadAll(
-        record->stdout_fd, buf, arraysize(buf), &bytes_read, &eof);
+        record->stdout_fd, buf, base::size(buf), &bytes_read, &eof);
     record->stdout.append(buf, bytes_read);
     if (!ok || eof) {
       // There was either an error or an EOF condition, so we are done watching
       // the file descriptor.
-      MessageLoop::current()->CancelTask(record->stdout_task_id);
-      record->stdout_task_id = MessageLoop::kTaskIdNull;
+      record->stdout_controller.reset();
       return;
     }
   } while (bytes_read);
@@ -143,8 +143,7 @@
   // Make sure we read any remaining process output and then close the pipe.
   OnStdoutReady(record);
 
-  MessageLoop::current()->CancelTask(record->stdout_task_id);
-  record->stdout_task_id = MessageLoop::kTaskIdNull;
+  record->stdout_controller.reset();
 
   // Don't print any log if the subprocess exited with exit code 0.
   if (info.si_code != CLD_EXITED) {
@@ -199,12 +198,9 @@
                << record->stdout_fd << ".";
   }
 
-  record->stdout_task_id = MessageLoop::current()->WatchFileDescriptor(
-      FROM_HERE,
+  record->stdout_controller = base::FileDescriptorWatcher::WatchReadable(
       record->stdout_fd,
-      MessageLoop::WatchMode::kWatchRead,
-      true,
-      base::Bind(&Subprocess::OnStdoutReady, record.get()));
+      base::BindRepeating(&Subprocess::OnStdoutReady, record.get()));
 
   subprocess_records_[pid] = std::move(record);
   return pid;
@@ -234,22 +230,20 @@
 
 bool Subprocess::SynchronousExec(const vector<string>& cmd,
                                  int* return_code,
-                                 string* stdout) {
-  // The default for SynchronousExec is to use kSearchPath since the code relies
-  // on that.
-  return SynchronousExecFlags(
-      cmd, kRedirectStderrToStdout | kSearchPath, return_code, stdout);
+                                 string* stdout,
+                                 string* stderr) {
+  // The default for |SynchronousExec| is to use |kSearchPath| since the code
+  // relies on that.
+  return SynchronousExecFlags(cmd, kSearchPath, return_code, stdout, stderr);
 }
 
 bool Subprocess::SynchronousExecFlags(const vector<string>& cmd,
                                       uint32_t flags,
                                       int* return_code,
-                                      string* stdout) {
+                                      string* stdout,
+                                      string* stderr) {
   brillo::ProcessImpl proc;
-  // It doesn't make sense to redirect some pipes in the synchronous case
-  // because we won't be reading on our end, so we don't expose the output_pipes
-  // in this case.
-  if (!LaunchProcess(cmd, flags, {}, &proc)) {
+  if (!LaunchProcess(cmd, flags, {STDERR_FILENO}, &proc)) {
     LOG(ERROR) << "Failed to launch subprocess";
     return false;
   }
@@ -257,21 +251,39 @@
   if (stdout) {
     stdout->clear();
   }
+  if (stderr) {
+    stderr->clear();
+  }
 
-  int fd = proc.GetPipe(STDOUT_FILENO);
+  // Read from both stdout and stderr individually.
+  int stdout_fd = proc.GetPipe(STDOUT_FILENO);
+  int stderr_fd = proc.GetPipe(STDERR_FILENO);
   vector<char> buffer(32 * 1024);
-  while (true) {
-    int rc = HANDLE_EINTR(read(fd, buffer.data(), buffer.size()));
-    if (rc < 0) {
-      PLOG(ERROR) << "Reading from child's output";
-      break;
-    } else if (rc == 0) {
-      break;
-    } else {
-      if (stdout)
+  bool stdout_closed = false, stderr_closed = false;
+  while (!stdout_closed || !stderr_closed) {
+    if (!stdout_closed) {
+      int rc = HANDLE_EINTR(read(stdout_fd, buffer.data(), buffer.size()));
+      if (rc <= 0) {
+        stdout_closed = true;
+        if (rc < 0)
+          PLOG(ERROR) << "Reading from child's stdout";
+      } else if (stdout != nullptr) {
         stdout->append(buffer.data(), rc);
+      }
+    }
+
+    if (!stderr_closed) {
+      int rc = HANDLE_EINTR(read(stderr_fd, buffer.data(), buffer.size()));
+      if (rc <= 0) {
+        stderr_closed = true;
+        if (rc < 0)
+          PLOG(ERROR) << "Reading from child's stderr";
+      } else if (stderr != nullptr) {
+        stderr->append(buffer.data(), rc);
+      }
     }
   }
+
   // At this point, the subprocess already closed the output, so we only need to
   // wait for it to finish.
   int proc_return_code = proc.Wait();
diff --git a/common/subprocess.h b/common/subprocess.h
index bc19d16..2ed8b81 100644
--- a/common/subprocess.h
+++ b/common/subprocess.h
@@ -25,13 +25,19 @@
 #include <vector>
 
 #include <base/callback.h>
+#include <base/files/file_descriptor_watcher_posix.h>
 #include <base/logging.h>
 #include <base/macros.h>
 #include <brillo/asynchronous_signal_handler_interface.h>
 #include <brillo/message_loops/message_loop.h>
+#ifdef __CHROMEOS__
+#include <brillo/process/process.h>
+#include <brillo/process/process_reaper.h>
+#else
 #include <brillo/process.h>
 #include <brillo/process_reaper.h>
-#include <gtest/gtest_prod.h>  // for FRIEND_TEST
+#endif  // __CHROMEOS__
+#include <gtest/gtest_prod.h>
 
 // The Subprocess class is a singleton. It's used to spawn off a subprocess
 // and get notified when the subprocess exits. The result of Exec() can
@@ -87,14 +93,16 @@
 
   // Executes a command synchronously. Returns true on success. If |stdout| is
   // non-null, the process output is stored in it, otherwise the output is
-  // logged. Note that stderr is redirected to stdout.
+  // logged.
   static bool SynchronousExec(const std::vector<std::string>& cmd,
                               int* return_code,
-                              std::string* stdout);
+                              std::string* stdout,
+                              std::string* stderr);
   static bool SynchronousExecFlags(const std::vector<std::string>& cmd,
                                    uint32_t flags,
                                    int* return_code,
-                                   std::string* stdout);
+                                   std::string* stdout,
+                                   std::string* stderr);
 
   // Gets the one instance.
   static Subprocess& Get() { return *subprocess_singleton_; }
@@ -120,8 +128,8 @@
 
     // These are used to monitor the stdout of the running process, including
     // the stderr if it was redirected.
-    brillo::MessageLoop::TaskId stdout_task_id{
-        brillo::MessageLoop::kTaskIdNull};
+    std::unique_ptr<base::FileDescriptorWatcher::Controller> stdout_controller;
+
     int stdout_fd{-1};
     std::string stdout;
   };
diff --git a/common/subprocess_unittest.cc b/common/subprocess_unittest.cc
index 104ef41..ff4158e 100644
--- a/common/subprocess_unittest.cc
+++ b/common/subprocess_unittest.cc
@@ -28,9 +28,14 @@
 #include <base/bind.h>
 #include <base/files/scoped_temp_dir.h>
 #include <base/location.h>
+#if BASE_VER < 780000  // Android
 #include <base/message_loop/message_loop.h>
+#endif  // BASE_VER < 780000
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
+#if BASE_VER >= 780000  // Chrome OS
+#include <base/task/single_thread_task_executor.h>
+#endif  // BASE_VER >= 780000
 #include <base/time/time.h>
 #include <brillo/message_loops/base_message_loop.h>
 #include <brillo/message_loops/message_loop.h>
@@ -45,6 +50,7 @@
 using base::TimeDelta;
 using brillo::MessageLoop;
 using std::string;
+using std::unique_ptr;
 using std::vector;
 
 namespace {
@@ -69,10 +75,16 @@
     subprocess_.Init(&async_signal_handler_);
   }
 
+#if BASE_VER < 780000  // Android
   base::MessageLoopForIO base_loop_;
   brillo::BaseMessageLoop loop_{&base_loop_};
+#else   // Chrome OS
+  base::SingleThreadTaskExecutor base_loop_{base::MessagePumpType::IO};
+  brillo::BaseMessageLoop loop_{base_loop_.task_runner()};
+#endif  // BASE_VER < 780000
   brillo::AsynchronousSignalHandler async_signal_handler_;
   Subprocess subprocess_;
+  unique_ptr<base::FileDescriptorWatcher::Controller> watcher_;
 };
 
 namespace {
@@ -193,7 +205,7 @@
 TEST_F(SubprocessTest, SynchronousTrueSearchsOnPath) {
   int rc = -1;
   EXPECT_TRUE(Subprocess::SynchronousExecFlags(
-      {"true"}, Subprocess::kSearchPath, &rc, nullptr));
+      {"true"}, Subprocess::kSearchPath, &rc, nullptr, nullptr));
   EXPECT_EQ(0, rc);
 }
 
@@ -201,16 +213,17 @@
   vector<string> cmd = {
       kBinPath "/sh", "-c", "echo -n stdout-here; echo -n stderr-there >&2"};
   int rc = -1;
-  string stdout;
-  ASSERT_TRUE(Subprocess::SynchronousExec(cmd, &rc, &stdout));
+  string stdout, stderr;
+  ASSERT_TRUE(Subprocess::SynchronousExec(cmd, &rc, &stdout, &stderr));
   EXPECT_EQ(0, rc);
-  EXPECT_EQ("stdout-herestderr-there", stdout);
+  EXPECT_EQ("stdout-here", stdout);
+  EXPECT_EQ("stderr-there", stderr);
 }
 
 TEST_F(SubprocessTest, SynchronousEchoNoOutputTest) {
   int rc = -1;
   ASSERT_TRUE(Subprocess::SynchronousExec(
-      {kBinPath "/sh", "-c", "echo test"}, &rc, nullptr));
+      {kBinPath "/sh", "-c", "echo test"}, &rc, nullptr, nullptr));
   EXPECT_EQ(0, rc);
 }
 
@@ -255,26 +268,28 @@
   int fifo_fd = HANDLE_EINTR(open(fifo_path.c_str(), O_RDONLY));
   EXPECT_GE(fifo_fd, 0);
 
-  loop_.WatchFileDescriptor(FROM_HERE,
-                            fifo_fd,
-                            MessageLoop::WatchMode::kWatchRead,
-                            false,
-                            base::Bind(
-                                [](int fifo_fd, uint32_t tag) {
-                                  char c;
-                                  EXPECT_EQ(1,
-                                            HANDLE_EINTR(read(fifo_fd, &c, 1)));
-                                  EXPECT_EQ('X', c);
-                                  LOG(INFO) << "Killing tag " << tag;
-                                  Subprocess::Get().KillExec(tag);
-                                },
-                                fifo_fd,
-                                tag));
+  watcher_ = base::FileDescriptorWatcher::WatchReadable(
+      fifo_fd,
+      base::Bind(
+          [](unique_ptr<base::FileDescriptorWatcher::Controller>* watcher,
+             int fifo_fd,
+             uint32_t tag) {
+            char c;
+            EXPECT_EQ(1, HANDLE_EINTR(read(fifo_fd, &c, 1)));
+            EXPECT_EQ('X', c);
+            LOG(INFO) << "Killing tag " << tag;
+            Subprocess::Get().KillExec(tag);
+            *watcher = nullptr;
+          },
+          // watcher_ is no longer used outside the clousure.
+          base::Unretained(&watcher_),
+          fifo_fd,
+          tag));
 
   // This test would leak a callback that runs when the child process exits
   // unless we wait for it to run.
   brillo::MessageLoopRunUntil(
-      &loop_, TimeDelta::FromSeconds(120), base::Bind([] {
+      &loop_, TimeDelta::FromSeconds(20), base::Bind([] {
         return Subprocess::Get().subprocess_records_.empty();
       }));
   EXPECT_TRUE(Subprocess::Get().subprocess_records_.empty());
diff --git a/payload_generator/graph_types.cc b/common/system_state.cc
similarity index 76%
rename from payload_generator/graph_types.cc
rename to common/system_state.cc
index c03766d..cff1dfe 100644
--- a/payload_generator/graph_types.cc
+++ b/common/system_state.cc
@@ -1,5 +1,5 @@
 //
-// Copyright (C) 2015 The Android Open Source Project
+// Copyright (C) 2020 The Android Open Source Project
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -14,10 +14,10 @@
 // limitations under the License.
 //
 
-#include "update_engine/payload_generator/graph_types.h"
+#include "update_engine/common/system_state.h"
 
 namespace chromeos_update_engine {
 
-const Vertex::Index Vertex::kInvalidIndex = static_cast<Vertex::Index>(-1);
+SystemState* SystemState::g_pointer_ = nullptr;
 
 }  // namespace chromeos_update_engine
diff --git a/system_state.h b/common/system_state.h
similarity index 88%
rename from system_state.h
rename to common/system_state.h
index f46cbcf..8a9c865 100644
--- a/system_state.h
+++ b/common/system_state.h
@@ -14,8 +14,15 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_SYSTEM_STATE_H_
-#define UPDATE_ENGINE_SYSTEM_STATE_H_
+#ifndef UPDATE_ENGINE_COMMON_SYSTEM_STATE_H_
+#define UPDATE_ENGINE_COMMON_SYSTEM_STATE_H_
+
+#include <memory>
+
+#include <base/logging.h>
+
+#include "update_engine/common/clock_interface.h"
+#include "update_engine/common/prefs_interface.h"
 
 namespace chromeos_update_manager {
 
@@ -35,7 +42,6 @@
 // any circular references in header file inclusion. Hence forward-declaring
 // the required classes.
 class BootControlInterface;
-class ClockInterface;
 class ConnectionManagerInterface;
 class DlcServiceInterface;
 class HardwareInterface;
@@ -44,20 +50,20 @@
 class P2PManager;
 class PayloadStateInterface;
 class PowerManagerInterface;
-class PrefsInterface;
 class UpdateAttempter;
 
 // An interface to global system context, including platform resources,
 // the current state of the system, high-level objects whose lifetime is same
 // as main, system interfaces, etc.
 // Carved out separately so it can be mocked for unit tests.
-// Currently it has only one method, but we should start migrating other
-// methods to use this as and when needed to unit test them.
-// TODO(jaysri): Consider renaming this to something like GlobalContext.
 class SystemState {
  public:
-  // Destructs this object.
-  virtual ~SystemState() {}
+  virtual ~SystemState() = default;
+
+  static SystemState* Get() {
+    CHECK(g_pointer_ != nullptr);
+    return g_pointer_;
+  }
 
   // Sets or gets the latest device policy.
   virtual void set_device_policy(const policy::DevicePolicy* device_policy) = 0;
@@ -113,8 +119,11 @@
 
   // Returns a pointer to the DlcServiceInterface singleton.
   virtual DlcServiceInterface* dlcservice() = 0;
+
+ protected:
+  static SystemState* g_pointer_;
 };
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_SYSTEM_STATE_H_
+#endif  // UPDATE_ENGINE_COMMON_SYSTEM_STATE_H_
diff --git a/common/test_utils.h b/common/test_utils.h
index 44b7aa1..bb5a678 100644
--- a/common/test_utils.h
+++ b/common/test_utils.h
@@ -78,7 +78,7 @@
 
 void FillWithData(brillo::Blob* buffer);
 
-// Compare the value of native array for download source parameter.
+// Compare the value of builtin array for download source parameter.
 MATCHER_P(DownloadSourceMatcher, source_array, "") {
   return std::equal(source_array, source_array + kNumDownloadSources, arg);
 }
@@ -138,22 +138,6 @@
   DISALLOW_COPY_AND_ASSIGN(ScopedLoopbackDeviceBinder);
 };
 
-class ScopedTempFile {
- public:
-  ScopedTempFile() : ScopedTempFile("update_engine_test_temp_file.XXXXXX") {}
-
-  explicit ScopedTempFile(const std::string& pattern) {
-    EXPECT_TRUE(utils::MakeTempFile(pattern, &path_, nullptr));
-    unlinker_.reset(new ScopedPathUnlinker(path_));
-  }
-
-  const std::string& path() const { return path_; }
-
- private:
-  std::string path_;
-  std::unique_ptr<ScopedPathUnlinker> unlinker_;
-};
-
 class ScopedLoopMounter {
  public:
   explicit ScopedLoopMounter(const std::string& file_path,
diff --git a/common/utils.cc b/common/utils.cc
index fc89040..5dbb445 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -28,6 +28,7 @@
 #include <string.h>
 #include <sys/mount.h>
 #include <sys/resource.h>
+#include <sys/sendfile.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <time.h>
@@ -52,7 +53,6 @@
 #include <base/strings/stringprintf.h>
 #include <brillo/data_encoding.h>
 
-#include "update_engine/common/clock_interface.h"
 #include "update_engine/common/constants.h"
 #include "update_engine/common/platform_constants.h"
 #include "update_engine/common/prefs_interface.h"
@@ -84,49 +84,6 @@
 // The path to the kernel's boot_id.
 const char kBootIdPath[] = "/proc/sys/kernel/random/boot_id";
 
-// Return true if |disk_name| is an MTD or a UBI device. Note that this test is
-// simply based on the name of the device.
-bool IsMtdDeviceName(const string& disk_name) {
-  return base::StartsWith(
-             disk_name, "/dev/ubi", base::CompareCase::SENSITIVE) ||
-         base::StartsWith(disk_name, "/dev/mtd", base::CompareCase::SENSITIVE);
-}
-
-// Return the device name for the corresponding partition on a NAND device.
-// WARNING: This function returns device names that are not mountable.
-string MakeNandPartitionName(int partition_num) {
-  switch (partition_num) {
-    case 2:
-    case 4:
-    case 6: {
-      return base::StringPrintf("/dev/mtd%d", partition_num);
-    }
-    default: {
-      return base::StringPrintf("/dev/ubi%d_0", partition_num);
-    }
-  }
-}
-
-// Return the device name for the corresponding partition on a NAND device that
-// may be mountable (but may not be writable).
-string MakeNandPartitionNameForMount(int partition_num) {
-  switch (partition_num) {
-    case 2:
-    case 4:
-    case 6: {
-      return base::StringPrintf("/dev/mtd%d", partition_num);
-    }
-    case 3:
-    case 5:
-    case 7: {
-      return base::StringPrintf("/dev/ubiblock%d_0", partition_num);
-    }
-    default: {
-      return base::StringPrintf("/dev/ubi%d_0", partition_num);
-    }
-  }
-}
-
 // If |path| is absolute, or explicit relative to the current working directory,
 // leaves it as is. Otherwise, uses the system's temp directory, as defined by
 // base::GetTempDir() and prepends it to |path|. On success stores the full
@@ -155,27 +112,6 @@
 
 namespace utils {
 
-string ParseECVersion(string input_line) {
-  base::TrimWhitespaceASCII(input_line, base::TRIM_ALL, &input_line);
-
-  // At this point we want to convert the format key=value pair from mosys to
-  // a vector of key value pairs.
-  vector<pair<string, string>> kv_pairs;
-  if (base::SplitStringIntoKeyValuePairs(input_line, '=', ' ', &kv_pairs)) {
-    for (const pair<string, string>& kv_pair : kv_pairs) {
-      // Finally match against the fw_verion which may have quotes.
-      if (kv_pair.first == "fw_version") {
-        string output;
-        // Trim any quotes.
-        base::TrimString(kv_pair.second, "\"", &output);
-        return output;
-      }
-    }
-  }
-  LOG(ERROR) << "Unable to parse fwid from ec info.";
-  return "";
-}
-
 bool WriteFile(const char* path, const void* data, size_t data_len) {
   int fd = HANDLE_EINTR(open(path, O_WRONLY | O_CREAT | O_TRUNC, 0600));
   TEST_AND_RETURN_FALSE_ERRNO(fd >= 0);
@@ -256,10 +192,10 @@
   return true;
 }
 
-bool PWriteAll(const FileDescriptorPtr& fd,
-               const void* buf,
-               size_t count,
-               off_t offset) {
+bool WriteAll(const FileDescriptorPtr& fd,
+              const void* buf,
+              size_t count,
+              off_t offset) {
   TEST_AND_RETURN_FALSE_ERRNO(fd->Seek(offset, SEEK_SET) !=
                               static_cast<off_t>(-1));
   return WriteAll(fd, buf, count);
@@ -282,11 +218,11 @@
   return true;
 }
 
-bool PReadAll(const FileDescriptorPtr& fd,
-              void* buf,
-              size_t count,
-              off_t offset,
-              ssize_t* out_bytes_read) {
+bool ReadAll(const FileDescriptorPtr& fd,
+             void* buf,
+             size_t count,
+             off_t offset,
+             ssize_t* out_bytes_read) {
   TEST_AND_RETURN_FALSE_ERRNO(fd->Seek(offset, SEEK_SET) !=
                               static_cast<off_t>(-1));
   char* c_buf = static_cast<char*>(buf);
@@ -303,6 +239,31 @@
   return true;
 }
 
+bool PReadAll(const FileDescriptorPtr& fd,
+              void* buf,
+              size_t count,
+              off_t offset,
+              ssize_t* out_bytes_read) {
+  auto old_off = fd->Seek(0, SEEK_CUR);
+  TEST_AND_RETURN_FALSE_ERRNO(old_off >= 0);
+
+  auto success = ReadAll(fd, buf, count, offset, out_bytes_read);
+  TEST_AND_RETURN_FALSE_ERRNO(fd->Seek(old_off, SEEK_SET) == old_off);
+  return success;
+}
+
+bool PWriteAll(const FileDescriptorPtr& fd,
+               const void* buf,
+               size_t count,
+               off_t offset) {
+  auto old_off = fd->Seek(0, SEEK_CUR);
+  TEST_AND_RETURN_FALSE_ERRNO(old_off >= 0);
+
+  auto success = WriteAll(fd, buf, count, offset);
+  TEST_AND_RETURN_FALSE_ERRNO(fd->Seek(old_off, SEEK_SET) == old_off);
+  return success;
+}
+
 // Append |nbytes| of content from |buf| to the vector pointed to by either
 // |vec_p| or |str_p|.
 static void AppendBytes(const uint8_t* buf,
@@ -474,22 +435,6 @@
     return false;
   }
 
-  size_t partition_name_len = string::npos;
-  if (partition_name[last_nondigit_pos] == '_') {
-    // NAND block devices have weird naming which could be something
-    // like "/dev/ubiblock2_0". We discard "_0" in such a case.
-    size_t prev_nondigit_pos =
-        partition_name.find_last_not_of("0123456789", last_nondigit_pos - 1);
-    if (prev_nondigit_pos == string::npos ||
-        (prev_nondigit_pos + 1) == last_nondigit_pos) {
-      LOG(ERROR) << "Unable to parse partition device name: " << partition_name;
-      return false;
-    }
-
-    partition_name_len = last_nondigit_pos - prev_nondigit_pos;
-    last_nondigit_pos = prev_nondigit_pos;
-  }
-
   if (out_disk_name) {
     // Special case for MMC devices which have the following naming scheme:
     // mmcblk0p2
@@ -502,8 +447,7 @@
   }
 
   if (out_partition_num) {
-    string partition_str =
-        partition_name.substr(last_nondigit_pos + 1, partition_name_len);
+    string partition_str = partition_name.substr(last_nondigit_pos + 1);
     *out_partition_num = atoi(partition_str.c_str());
   }
   return true;
@@ -520,13 +464,6 @@
     return string();
   }
 
-  if (IsMtdDeviceName(disk_name)) {
-    // Special case for UBI block devices.
-    //   1. ubiblock is not writable, we need to use plain "ubi".
-    //   2. There is a "_0" suffix.
-    return MakeNandPartitionName(partition_num);
-  }
-
   string partition_name = disk_name;
   if (isdigit(partition_name.back())) {
     // Special case for devices with names ending with a digit.
@@ -540,17 +477,6 @@
   return partition_name;
 }
 
-string MakePartitionNameForMount(const string& part_name) {
-  if (IsMtdDeviceName(part_name)) {
-    int partition_num;
-    if (!SplitPartitionName(part_name, nullptr, &partition_num)) {
-      return "";
-    }
-    return MakeNandPartitionNameForMount(partition_num);
-  }
-  return part_name;
-}
-
 string ErrnoNumberAsString(int err) {
   char buf[100];
   buf[0] = '\0';
@@ -567,31 +493,9 @@
   return lstat(path, &stbuf) == 0 && S_ISLNK(stbuf.st_mode) != 0;
 }
 
-bool TryAttachingUbiVolume(int volume_num, int timeout) {
-  const string volume_path = base::StringPrintf("/dev/ubi%d_0", volume_num);
-  if (FileExists(volume_path.c_str())) {
-    return true;
-  }
-
-  int exit_code;
-  vector<string> cmd = {"ubiattach",
-                        "-m",
-                        base::StringPrintf("%d", volume_num),
-                        "-d",
-                        base::StringPrintf("%d", volume_num)};
-  TEST_AND_RETURN_FALSE(Subprocess::SynchronousExec(cmd, &exit_code, nullptr));
-  TEST_AND_RETURN_FALSE(exit_code == 0);
-
-  cmd = {"ubiblock", "--create", volume_path};
-  TEST_AND_RETURN_FALSE(Subprocess::SynchronousExec(cmd, &exit_code, nullptr));
-  TEST_AND_RETURN_FALSE(exit_code == 0);
-
-  while (timeout > 0 && !FileExists(volume_path.c_str())) {
-    sleep(1);
-    timeout--;
-  }
-
-  return FileExists(volume_path.c_str());
+bool IsRegFile(const char* path) {
+  struct stat stbuf;
+  return lstat(path, &stbuf) == 0 && S_ISREG(stbuf.st_mode) != 0;
 }
 
 bool MakeTempFile(const string& base_filename_template,
@@ -925,7 +829,7 @@
   return base_code;
 }
 
-string StringVectorToString(const vector<string> &vec_str) {
+string StringVectorToString(const vector<string>& vec_str) {
   string str = "[";
   for (vector<string>::const_iterator i = vec_str.begin(); i != vec_str.end();
        ++i) {
@@ -954,7 +858,7 @@
                             encoded_hash.c_str());
 }
 
-bool ConvertToOmahaInstallDate(Time time, int *out_num_days) {
+bool ConvertToOmahaInstallDate(Time time, int* out_num_days) {
   time_t unix_time = time.ToTimeT();
   // Output of: date +"%s" --date="Jan 1, 2007 0:00 PST".
   const time_t kOmahaEpoch = 1167638400;
@@ -1016,6 +920,25 @@
   return true;
 }
 
+bool GetVpdValue(string key, string* result) {
+  int exit_code = 0;
+  string value, error;
+  vector<string> cmd = {"vpd_get_value", key};
+  if (!chromeos_update_engine::Subprocess::SynchronousExec(
+          cmd, &exit_code, &value, &error) ||
+      exit_code) {
+    LOG(ERROR) << "Failed to get vpd key for " << value
+               << " with exit code: " << exit_code << " and error: " << error;
+    return false;
+  } else if (!error.empty()) {
+    LOG(INFO) << "vpd_get_value succeeded but with following errors: " << error;
+  }
+
+  base::TrimWhitespaceASCII(value, base::TRIM_ALL, &value);
+  *result = value;
+  return true;
+}
+
 bool GetBootId(string* boot_id) {
   TEST_AND_RETURN_FALSE(
       base::ReadFileToString(base::FilePath(kBootIdPath), boot_id));
@@ -1083,6 +1006,40 @@
   return str;
 }
 
+string GetExclusionName(const string& str_to_convert) {
+  return base::NumberToString(base::StringPieceHash()(str_to_convert));
+}
+
+static bool ParseTimestamp(const std::string& str, int64_t* out) {
+  if (!base::StringToInt64(str, out)) {
+    LOG(WARNING) << "Invalid timestamp: " << str;
+    return false;
+  }
+  return true;
+}
+
+ErrorCode IsTimestampNewer(const std::string& old_version,
+                           const std::string& new_version) {
+  if (old_version.empty() || new_version.empty()) {
+    LOG(WARNING)
+        << "One of old/new timestamp is empty, permit update anyway. Old: "
+        << old_version << " New: " << new_version;
+    return ErrorCode::kSuccess;
+  }
+  int64_t old_ver = 0;
+  if (!ParseTimestamp(old_version, &old_ver)) {
+    return ErrorCode::kError;
+  }
+  int64_t new_ver = 0;
+  if (!ParseTimestamp(new_version, &new_ver)) {
+    return ErrorCode::kDownloadManifestParseError;
+  }
+  if (old_ver > new_ver) {
+    return ErrorCode::kPayloadTimestampError;
+  }
+  return ErrorCode::kSuccess;
+}
+
 }  // namespace utils
 
 }  // namespace chromeos_update_engine
diff --git a/common/utils.h b/common/utils.h
index c6c34f4..59f236e 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -18,6 +18,7 @@
 #define UPDATE_ENGINE_COMMON_UTILS_H_
 
 #include <errno.h>
+#include <sys/types.h>
 #include <time.h>
 #include <unistd.h>
 
@@ -53,10 +54,6 @@
 std::string CalculateP2PFileId(const brillo::Blob& payload_hash,
                                size_t payload_size);
 
-// Parse the firmware version from one line of output from the
-// "mosys" command.
-std::string ParseECVersion(std::string input_line);
-
 // Writes the data passed to path. The file at path will be overwritten if it
 // exists. Returns true on success, false otherwise.
 bool WriteFile(const char* path, const void* data, size_t data_len);
@@ -67,6 +64,15 @@
 bool PWriteAll(int fd, const void* buf, size_t count, off_t offset);
 
 bool WriteAll(const FileDescriptorPtr& fd, const void* buf, size_t count);
+// WriteAll writes data at specified offset, but it modifies file position.
+bool WriteAll(const FileDescriptorPtr& fd,
+              const void* buf,
+              size_t count,
+              off_t off);
+
+// https://man7.org/linux/man-pages/man2/pread.2.html
+// PWriteAll writes data at specified offset, but it DOES NOT modify file
+// position. Behaves similar to linux' pwrite syscall.
 bool PWriteAll(const FileDescriptorPtr& fd,
                const void* buf,
                size_t count,
@@ -85,6 +91,16 @@
 bool PReadAll(
     int fd, void* buf, size_t count, off_t offset, ssize_t* out_bytes_read);
 
+// Reads data at specified offset, this function does change file position.
+bool ReadAll(const FileDescriptorPtr& fd,
+             void* buf,
+             size_t count,
+             off_t offset,
+             ssize_t* out_bytes_read);
+
+// https://man7.org/linux/man-pages/man2/pread.2.html
+// Reads data at specified offset, this function DOES NOT change file position.
+// Behavior is similar to linux's pread syscall.
 bool PReadAll(const FileDescriptorPtr& fd,
               void* buf,
               size_t count,
@@ -128,10 +144,8 @@
 // Returns true if |path| exists and is a symbolic link.
 bool IsSymlink(const char* path);
 
-// Try attaching UBI |volume_num|. If there is any error executing required
-// commands to attach the volume, this function returns false. This function
-// only returns true if "/dev/ubi%d_0" becomes available in |timeout| seconds.
-bool TryAttachingUbiVolume(int volume_num, int timeout);
+// Return true iff |path| exists and is a regular file
+bool IsRegFile(const char* path);
 
 // If |base_filename_template| is neither absolute (starts with "/") nor
 // explicitly relative to the current working directory (starts with "./" or
@@ -163,14 +177,6 @@
 // Returns empty string when invalid parameters are passed in
 std::string MakePartitionName(const std::string& disk_name, int partition_num);
 
-// Similar to "MakePartitionName" but returns a name that is suitable for
-// mounting. On NAND system we can write to "/dev/ubiX_0", which is what
-// MakePartitionName returns, but we cannot mount that device. To mount, we
-// have to use "/dev/ubiblockX_0" for rootfs. Stateful and OEM partitions are
-// mountable with "/dev/ubiX_0". The input is a partition device such as
-// /dev/sda3. Return empty string on error.
-std::string MakePartitionNameForMount(const std::string& part_name);
-
 // Set the read-only attribute on the block device |device| to the value passed
 // in |read_only|. Return whether the operation succeeded.
 bool SetBlockDeviceReadOnly(const std::string& device, bool read_only);
@@ -305,6 +311,10 @@
 // reboot. Returns whether it succeeded getting the boot_id.
 bool GetBootId(std::string* boot_id);
 
+// Gets a string value from the vpd for a given key using the `vpd_get_value`
+// shell command. Returns true on success.
+bool GetVpdValue(std::string key, std::string* result);
+
 // This function gets the file path of the file pointed to by FileDiscriptor.
 std::string GetFilePath(int fd);
 
@@ -332,6 +342,20 @@
 
 // Return a string representation of |utime| for log file names.
 std::string GetTimeAsString(time_t utime);
+// Returns the string format of the hashed |str_to_convert| that can be used
+// with |Excluder| as the exclusion name.
+std::string GetExclusionName(const std::string& str_to_convert);
+
+// Parse `old_version` and `new_version` as integer timestamps and
+// Return kSuccess if `new_version` is larger/newer.
+// Return kSuccess if either one is empty.
+// Return kError if |old_version| is not empty and not an integer.
+// Return kDownloadManifestParseError if |new_version| is not empty and not an
+// integer.
+// Return kPayloadTimestampError if both are integers but |new_version| <
+// |old_version|.
+ErrorCode IsTimestampNewer(const std::string& old_version,
+                           const std::string& new_version);
 
 }  // namespace utils
 
@@ -369,6 +393,48 @@
   DISALLOW_COPY_AND_ASSIGN(ScopedPathUnlinker);
 };
 
+class ScopedTempFile {
+ public:
+  ScopedTempFile() : ScopedTempFile("update_engine_temp.XXXXXX") {}
+
+  // If |open_fd| is true, a writable file descriptor will be opened for this
+  // file.
+  // If |truncate_size| is non-zero, truncate file to that size on creation.
+  explicit ScopedTempFile(const std::string& pattern,
+                          bool open_fd = false,
+                          size_t truncate_size = 0) {
+    CHECK(utils::MakeTempFile(pattern, &path_, open_fd ? &fd_ : nullptr));
+    unlinker_.reset(new ScopedPathUnlinker(path_));
+    if (open_fd) {
+      CHECK_GE(fd_, 0);
+      fd_closer_.reset(new ScopedFdCloser(&fd_));
+    }
+    if (truncate_size > 0) {
+      CHECK_EQ(0, truncate(path_.c_str(), truncate_size));
+    }
+  }
+  virtual ~ScopedTempFile() = default;
+
+  const std::string& path() const { return path_; }
+  int fd() const {
+    CHECK(fd_closer_);
+    return fd_;
+  }
+  void CloseFd() {
+    CHECK(fd_closer_);
+    fd_closer_.reset();
+  }
+
+ private:
+  std::string path_;
+  std::unique_ptr<ScopedPathUnlinker> unlinker_;
+
+  int fd_{-1};
+  std::unique_ptr<ScopedFdCloser> fd_closer_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedTempFile);
+};
+
 // A little object to call ActionComplete on the ActionProcessor when
 // it's destructed.
 class ScopedActionCompleter {
diff --git a/common/utils_unittest.cc b/common/utils_unittest.cc
index b4ac2f5..20c6b84 100644
--- a/common/utils_unittest.cc
+++ b/common/utils_unittest.cc
@@ -41,25 +41,12 @@
 
 class UtilsTest : public ::testing::Test {};
 
-TEST(UtilsTest, CanParseECVersion) {
-  // Should be able to parse and valid key value line.
-  EXPECT_EQ("12345", utils::ParseECVersion("fw_version=12345"));
-  EXPECT_EQ("123456",
-            utils::ParseECVersion("b=1231a fw_version=123456 a=fasd2"));
-  EXPECT_EQ("12345", utils::ParseECVersion("fw_version=12345"));
-  EXPECT_EQ("00VFA616",
-            utils::ParseECVersion("vendor=\"sam\" fw_version=\"00VFA616\""));
-
-  // For invalid entries, should return the empty string.
-  EXPECT_EQ("", utils::ParseECVersion("b=1231a fw_version a=fasd2"));
-}
-
 TEST(UtilsTest, WriteFileOpenFailure) {
   EXPECT_FALSE(utils::WriteFile("/this/doesn't/exist", "hello", 5));
 }
 
 TEST(UtilsTest, WriteFileReadFile) {
-  test_utils::ScopedTempFile file;
+  ScopedTempFile file;
   EXPECT_TRUE(utils::WriteFile(file.path().c_str(), "hello", 5));
 
   brillo::Blob readback;
@@ -73,7 +60,7 @@
 }
 
 TEST(UtilsTest, ReadFileChunk) {
-  test_utils::ScopedTempFile file;
+  ScopedTempFile file;
   brillo::Blob data;
   const size_t kSize = 1024 * 1024;
   for (size_t i = 0; i < kSize; i++) {
@@ -123,10 +110,6 @@
   EXPECT_EQ("/dev/mmcblk0", disk);
   EXPECT_EQ(3, part_num);
 
-  EXPECT_TRUE(utils::SplitPartitionName("/dev/ubiblock3_2", &disk, &part_num));
-  EXPECT_EQ("/dev/ubiblock", disk);
-  EXPECT_EQ(3, part_num);
-
   EXPECT_TRUE(utils::SplitPartitionName("/dev/loop10", &disk, &part_num));
   EXPECT_EQ("/dev/loop", disk);
   EXPECT_EQ(10, part_num);
@@ -135,14 +118,6 @@
   EXPECT_EQ("/dev/loop28", disk);
   EXPECT_EQ(11, part_num);
 
-  EXPECT_TRUE(utils::SplitPartitionName("/dev/loop10_0", &disk, &part_num));
-  EXPECT_EQ("/dev/loop", disk);
-  EXPECT_EQ(10, part_num);
-
-  EXPECT_TRUE(utils::SplitPartitionName("/dev/loop28p11_0", &disk, &part_num));
-  EXPECT_EQ("/dev/loop28", disk);
-  EXPECT_EQ(11, part_num);
-
   EXPECT_FALSE(utils::SplitPartitionName("/dev/mmcblk0p", &disk, &part_num));
   EXPECT_FALSE(utils::SplitPartitionName("/dev/sda", &disk, &part_num));
   EXPECT_FALSE(utils::SplitPartitionName("/dev/foo/bar", &disk, &part_num));
@@ -157,29 +132,6 @@
   EXPECT_EQ("/dev/mmcblk0p2", utils::MakePartitionName("/dev/mmcblk0", 2));
   EXPECT_EQ("/dev/loop8", utils::MakePartitionName("/dev/loop", 8));
   EXPECT_EQ("/dev/loop12p2", utils::MakePartitionName("/dev/loop12", 2));
-  EXPECT_EQ("/dev/ubi5_0", utils::MakePartitionName("/dev/ubiblock", 5));
-  EXPECT_EQ("/dev/mtd4", utils::MakePartitionName("/dev/ubiblock", 4));
-  EXPECT_EQ("/dev/ubi3_0", utils::MakePartitionName("/dev/ubiblock", 3));
-  EXPECT_EQ("/dev/mtd2", utils::MakePartitionName("/dev/ubiblock", 2));
-  EXPECT_EQ("/dev/ubi1_0", utils::MakePartitionName("/dev/ubiblock", 1));
-}
-
-TEST(UtilsTest, MakePartitionNameForMountTest) {
-  EXPECT_EQ("/dev/sda4", utils::MakePartitionNameForMount("/dev/sda4"));
-  EXPECT_EQ("/dev/sda123", utils::MakePartitionNameForMount("/dev/sda123"));
-  EXPECT_EQ("/dev/mmcblk2", utils::MakePartitionNameForMount("/dev/mmcblk2"));
-  EXPECT_EQ("/dev/mmcblk0p2",
-            utils::MakePartitionNameForMount("/dev/mmcblk0p2"));
-  EXPECT_EQ("/dev/loop0", utils::MakePartitionNameForMount("/dev/loop0"));
-  EXPECT_EQ("/dev/loop8", utils::MakePartitionNameForMount("/dev/loop8"));
-  EXPECT_EQ("/dev/loop12p2", utils::MakePartitionNameForMount("/dev/loop12p2"));
-  EXPECT_EQ("/dev/ubiblock5_0",
-            utils::MakePartitionNameForMount("/dev/ubiblock5_0"));
-  EXPECT_EQ("/dev/mtd4", utils::MakePartitionNameForMount("/dev/ubi4_0"));
-  EXPECT_EQ("/dev/ubiblock3_0",
-            utils::MakePartitionNameForMount("/dev/ubiblock3"));
-  EXPECT_EQ("/dev/mtd2", utils::MakePartitionNameForMount("/dev/ubi2"));
-  EXPECT_EQ("/dev/ubi1_0", utils::MakePartitionNameForMount("/dev/ubiblock1"));
 }
 
 TEST(UtilsTest, FuzzIntTest) {
@@ -197,7 +149,7 @@
 namespace {
 void GetFileFormatTester(const string& expected,
                          const vector<uint8_t>& contents) {
-  test_utils::ScopedTempFile file;
+  ScopedTempFile file;
   ASSERT_TRUE(utils::WriteFile(file.path().c_str(),
                                reinterpret_cast<const char*>(contents.data()),
                                contents.size()));
@@ -426,7 +378,7 @@
 }
 
 TEST(UtilsTest, RunAsRootUnmountFilesystemBusyFailureTest) {
-  test_utils::ScopedTempFile tmp_image("img.XXXXXX");
+  ScopedTempFile tmp_image("img.XXXXXX");
 
   EXPECT_TRUE(base::CopyFile(
       test_utils::GetBuildArtifactsPath().Append("gen/disk_ext2_4k.img"),
@@ -466,7 +418,7 @@
   EXPECT_TRUE(mnt_dir.CreateUniqueTempDir());
   EXPECT_FALSE(utils::IsMountpoint(mnt_dir.GetPath().value()));
 
-  test_utils::ScopedTempFile file;
+  ScopedTempFile file;
   EXPECT_FALSE(utils::IsMountpoint(file.path()));
 }
 
@@ -508,7 +460,7 @@
 }
 
 TEST(UtilsTest, GetFilePathTest) {
-  test_utils::ScopedTempFile file;
+  ScopedTempFile file;
   int fd = HANDLE_EINTR(open(file.path().c_str(), O_RDONLY));
   EXPECT_GE(fd, 0);
   EXPECT_EQ(file.path(), utils::GetFilePath(fd));
@@ -516,4 +468,14 @@
   IGNORE_EINTR(close(fd));
 }
 
+TEST(UtilsTest, ValidatePerPartitionTimestamp) {
+  ASSERT_EQ(ErrorCode::kPayloadTimestampError,
+            utils::IsTimestampNewer("10", "5"));
+  ASSERT_EQ(ErrorCode::kSuccess, utils::IsTimestampNewer("10", "11"));
+  ASSERT_EQ(ErrorCode::kDownloadManifestParseError,
+            utils::IsTimestampNewer("10", "lol"));
+  ASSERT_EQ(ErrorCode::kError, utils::IsTimestampNewer("lol", "ZZZ"));
+  ASSERT_EQ(ErrorCode::kSuccess, utils::IsTimestampNewer("10", ""));
+}
+
 }  // namespace chromeos_update_engine
diff --git a/connection_manager_android.cc b/connection_manager_android.cc
deleted file mode 100644
index 9d0c57b..0000000
--- a/connection_manager_android.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/connection_manager_android.h"
-
-#include <memory>
-
-namespace chromeos_update_engine {
-
-namespace connection_manager {
-std::unique_ptr<ConnectionManagerInterface> CreateConnectionManager(
-    SystemState* system_state) {
-  return std::unique_ptr<ConnectionManagerInterface>(
-      new ConnectionManagerAndroid());
-}
-}  // namespace connection_manager
-
-bool ConnectionManagerAndroid::GetConnectionProperties(
-    ConnectionType* out_type, ConnectionTethering* out_tethering) {
-  return false;
-}
-bool ConnectionManagerAndroid::IsUpdateAllowedOver(
-    ConnectionType type, ConnectionTethering tethering) const {
-  return true;
-}
-bool ConnectionManagerAndroid::IsAllowedConnectionTypesForUpdateSet() const {
-  return false;
-}
-
-}  // namespace chromeos_update_engine
diff --git a/connection_manager_android.h b/connection_manager_android.h
deleted file mode 100644
index 006f4ea..0000000
--- a/connection_manager_android.h
+++ /dev/null
@@ -1,44 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_CONNECTION_MANAGER_ANDROID_H_
-#define UPDATE_ENGINE_CONNECTION_MANAGER_ANDROID_H_
-
-#include <base/macros.h>
-
-#include "update_engine/connection_manager_interface.h"
-
-namespace chromeos_update_engine {
-
-// TODO(senj): Remove this class and use ShillProvider from the UpdateManager.
-class ConnectionManagerAndroid : public ConnectionManagerInterface {
- public:
-  ConnectionManagerAndroid() = default;
-  ~ConnectionManagerAndroid() override = default;
-
-  // ConnectionManagerInterface overrides.
-  bool GetConnectionProperties(ConnectionType* out_type,
-                               ConnectionTethering* out_tethering) override;
-  bool IsUpdateAllowedOver(ConnectionType type,
-                           ConnectionTethering tethering) const override;
-  bool IsAllowedConnectionTypesForUpdateSet() const override;
-
-  DISALLOW_COPY_AND_ASSIGN(ConnectionManagerAndroid);
-};
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_CONNECTION_MANAGER_ANDROID_H_
diff --git a/boot_control_chromeos.cc b/cros/boot_control_chromeos.cc
similarity index 83%
rename from boot_control_chromeos.cc
rename to cros/boot_control_chromeos.cc
index 0f47169..17659ae 100644
--- a/boot_control_chromeos.cc
+++ b/cros/boot_control_chromeos.cc
@@ -14,16 +14,19 @@
 // limitations under the License.
 //
 
-#include "update_engine/boot_control_chromeos.h"
+#include "update_engine/cros/boot_control_chromeos.h"
 
 #include <memory>
 #include <string>
 #include <utility>
+#include <vector>
 
 #include <base/bind.h>
 #include <base/files/file_path.h>
 #include <base/files/file_util.h>
+#include <base/strings/string_split.h>
 #include <base/strings/string_util.h>
+#include <chromeos/constants/imageloader.h>
 #include <rootdev/rootdev.h>
 
 extern "C" {
@@ -36,6 +39,7 @@
 #include "update_engine/common/utils.h"
 
 using std::string;
+using std::vector;
 
 namespace {
 
@@ -44,8 +48,7 @@
 const char* kAndroidPartitionNameKernel = "boot";
 const char* kAndroidPartitionNameRoot = "system";
 
-const char kDlcInstallRootDirectoryEncrypted[] = "/home/chronos/dlc";
-const char kPartitionNamePrefixDlc[] = "dlc_";
+const char kPartitionNamePrefixDlc[] = "dlc";
 const char kPartitionNameDlcA[] = "dlc_a";
 const char kPartitionNameDlcB[] = "dlc_b";
 const char kPartitionNameDlcImage[] = "dlc.img";
@@ -125,8 +128,9 @@
   }
   if (current_slot_ >= num_slots_) {
     LOG(ERROR) << "Couldn't find the slot number corresponding to the "
-               << "partition " << boot_device << ", number of slots: "
-               << num_slots_ << ". This device is not updateable.";
+               << "partition " << boot_device
+               << ", number of slots: " << num_slots_
+               << ". This device is not updateable.";
     num_slots_ = 1;
     current_slot_ = BootControlInterface::kInvalidSlot;
     return false;
@@ -148,24 +152,47 @@
   return current_slot_;
 }
 
-bool BootControlChromeOS::GetPartitionDevice(const string& partition_name,
-                                             unsigned int slot,
-                                             string* device) const {
+bool BootControlChromeOS::ParseDlcPartitionName(
+    const std::string partition_name,
+    std::string* dlc_id,
+    std::string* dlc_package) const {
+  CHECK_NE(dlc_id, nullptr);
+  CHECK_NE(dlc_package, nullptr);
+
+  vector<string> tokens = base::SplitString(
+      partition_name, "/", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+  if (tokens.size() != 3 || tokens[0] != kPartitionNamePrefixDlc) {
+    LOG(ERROR) << "DLC partition name (" << partition_name
+               << ") is not well formatted.";
+    return false;
+  }
+  if (tokens[1].empty() || tokens[2].empty()) {
+    LOG(ERROR) << " partition name does not contain valid DLC ID (" << tokens[1]
+               << ") or package (" << tokens[2] << ")";
+    return false;
+  }
+
+  *dlc_id = tokens[1];
+  *dlc_package = tokens[2];
+  return true;
+}
+
+bool BootControlChromeOS::GetPartitionDevice(const std::string& partition_name,
+                                             BootControlInterface::Slot slot,
+                                             bool not_in_payload,
+                                             std::string* device,
+                                             bool* is_dynamic) const {
   // Partition name prefixed with |kPartitionNamePrefixDlc| is a DLC module.
   if (base::StartsWith(partition_name,
                        kPartitionNamePrefixDlc,
                        base::CompareCase::SENSITIVE)) {
-    // Extract DLC module ID from partition_name (DLC module ID is the string
-    // after |kPartitionNamePrefixDlc| in partition_name).
-    const auto dlc_module_id =
-        partition_name.substr(strlen(kPartitionNamePrefixDlc));
-    if (dlc_module_id.empty()) {
-      LOG(ERROR) << " partition name does not contain DLC module ID:"
-                 << partition_name;
+    string dlc_id, dlc_package;
+    if (!ParseDlcPartitionName(partition_name, &dlc_id, &dlc_package))
       return false;
-    }
-    *device = base::FilePath(kDlcInstallRootDirectoryEncrypted)
-                  .Append(dlc_module_id)
+
+    *device = base::FilePath(imageloader::kDlcImageRootpath)
+                  .Append(dlc_id)
+                  .Append(dlc_package)
                   .Append(slot == 0 ? kPartitionNameDlcA : kPartitionNameDlcB)
                   .Append(kPartitionNameDlcImage)
                   .value();
@@ -180,9 +207,18 @@
     return false;
 
   *device = part_device;
+  if (is_dynamic) {
+    *is_dynamic = false;
+  }
   return true;
 }
 
+bool BootControlChromeOS::GetPartitionDevice(const string& partition_name,
+                                             BootControlInterface::Slot slot,
+                                             string* device) const {
+  return GetPartitionDevice(partition_name, slot, false, device, nullptr);
+}
+
 bool BootControlChromeOS::IsSlotBootable(Slot slot) const {
   int partition_num = GetPartitionNumber(kChromeOSPartitionNameKernel, slot);
   if (partition_num < 0)
diff --git a/boot_control_chromeos.h b/cros/boot_control_chromeos.h
similarity index 80%
rename from boot_control_chromeos.h
rename to cros/boot_control_chromeos.h
index 0209052..0dff2c0 100644
--- a/boot_control_chromeos.h
+++ b/cros/boot_control_chromeos.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_BOOT_CONTROL_CHROMEOS_H_
-#define UPDATE_ENGINE_BOOT_CONTROL_CHROMEOS_H_
+#ifndef UPDATE_ENGINE_CROS_BOOT_CONTROL_CHROMEOS_H_
+#define UPDATE_ENGINE_CROS_BOOT_CONTROL_CHROMEOS_H_
 
 #include <memory>
 #include <string>
@@ -47,6 +47,11 @@
   BootControlInterface::Slot GetCurrentSlot() const override;
   bool GetPartitionDevice(const std::string& partition_name,
                           BootControlInterface::Slot slot,
+                          bool not_in_payload,
+                          std::string* device,
+                          bool* is_dynamic) const override;
+  bool GetPartitionDevice(const std::string& partition_name,
+                          BootControlInterface::Slot slot,
                           std::string* device) const override;
   bool IsSlotBootable(BootControlInterface::Slot slot) const override;
   bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
@@ -59,6 +64,7 @@
   friend class BootControlChromeOSTest;
   FRIEND_TEST(BootControlChromeOSTest, SysfsBlockDeviceTest);
   FRIEND_TEST(BootControlChromeOSTest, GetPartitionNumberTest);
+  FRIEND_TEST(BootControlChromeOSTest, ParseDlcPartitionNameTest);
 
   // Returns the sysfs block device for a root block device. For example,
   // SysfsBlockDevice("/dev/sda") returns "/sys/block/sda". Returns an empty
@@ -74,6 +80,13 @@
   int GetPartitionNumber(const std::string partition_name,
                          BootControlInterface::Slot slot) const;
 
+  // Extracts DLC module ID and package ID from partition name. The structure of
+  // the partition name is dlc/<dlc-id>/<dlc-package>. For example:
+  // dlc/fake-dlc/fake-package
+  bool ParseDlcPartitionName(const std::string partition_name,
+                             std::string* dlc_id,
+                             std::string* dlc_package) const;
+
   // Cached values for GetNumSlots() and GetCurrentSlot().
   BootControlInterface::Slot num_slots_{1};
   BootControlInterface::Slot current_slot_{BootControlInterface::kInvalidSlot};
@@ -88,4 +101,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_BOOT_CONTROL_CHROMEOS_H_
+#endif  // UPDATE_ENGINE_CROS_BOOT_CONTROL_CHROMEOS_H_
diff --git a/boot_control_chromeos_unittest.cc b/cros/boot_control_chromeos_unittest.cc
similarity index 74%
rename from boot_control_chromeos_unittest.cc
rename to cros/boot_control_chromeos_unittest.cc
index 6a60009..fc1dd1e 100644
--- a/boot_control_chromeos_unittest.cc
+++ b/cros/boot_control_chromeos_unittest.cc
@@ -14,10 +14,12 @@
 // limitations under the License.
 //
 
-#include "update_engine/boot_control_chromeos.h"
+#include "update_engine/cros/boot_control_chromeos.h"
 
 #include <gtest/gtest.h>
 
+using std::string;
+
 namespace chromeos_update_engine {
 
 class BootControlChromeOSTest : public ::testing::Test {
@@ -67,4 +69,22 @@
   EXPECT_EQ(-1, bootctl_.GetPartitionNumber("A little panda", 0));
 }
 
+TEST_F(BootControlChromeOSTest, ParseDlcPartitionNameTest) {
+  string id, package;
+
+  EXPECT_TRUE(bootctl_.ParseDlcPartitionName("dlc/id/package", &id, &package));
+  EXPECT_EQ(id, "id");
+  EXPECT_EQ(package, "package");
+
+  EXPECT_FALSE(
+      bootctl_.ParseDlcPartitionName("dlc-foo/id/package", &id, &package));
+  EXPECT_FALSE(
+      bootctl_.ParseDlcPartitionName("dlc-foo/id/package/", &id, &package));
+  EXPECT_FALSE(bootctl_.ParseDlcPartitionName("dlc/id", &id, &package));
+  EXPECT_FALSE(bootctl_.ParseDlcPartitionName("dlc/id/", &id, &package));
+  EXPECT_FALSE(bootctl_.ParseDlcPartitionName("dlc//package", &id, &package));
+  EXPECT_FALSE(bootctl_.ParseDlcPartitionName("dlc", &id, &package));
+  EXPECT_FALSE(bootctl_.ParseDlcPartitionName("foo", &id, &package));
+}
+
 }  // namespace chromeos_update_engine
diff --git a/chrome_browser_proxy_resolver.cc b/cros/chrome_browser_proxy_resolver.cc
similarity index 94%
rename from chrome_browser_proxy_resolver.cc
rename to cros/chrome_browser_proxy_resolver.cc
index bfb58f7..3ea8a9b 100644
--- a/chrome_browser_proxy_resolver.cc
+++ b/cros/chrome_browser_proxy_resolver.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/chrome_browser_proxy_resolver.h"
+#include "update_engine/cros/chrome_browser_proxy_resolver.h"
 
 #include <utility>
 
@@ -23,7 +23,7 @@
 #include <base/strings/string_util.h>
 #include <brillo/http/http_proxy.h>
 
-#include "update_engine/dbus_connection.h"
+#include "update_engine/cros/dbus_connection.h"
 
 namespace chromeos_update_engine {
 
diff --git a/chrome_browser_proxy_resolver.h b/cros/chrome_browser_proxy_resolver.h
similarity index 91%
rename from chrome_browser_proxy_resolver.h
rename to cros/chrome_browser_proxy_resolver.h
index 10a55fb..76848ef 100644
--- a/chrome_browser_proxy_resolver.h
+++ b/cros/chrome_browser_proxy_resolver.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_CHROME_BROWSER_PROXY_RESOLVER_H_
-#define UPDATE_ENGINE_CHROME_BROWSER_PROXY_RESOLVER_H_
+#ifndef UPDATE_ENGINE_CROS_CHROME_BROWSER_PROXY_RESOLVER_H_
+#define UPDATE_ENGINE_CROS_CHROME_BROWSER_PROXY_RESOLVER_H_
 
 #include <deque>
 #include <map>
@@ -63,4 +63,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_CHROME_BROWSER_PROXY_RESOLVER_H_
+#endif  // UPDATE_ENGINE_CROS_CHROME_BROWSER_PROXY_RESOLVER_H_
diff --git a/common_service.cc b/cros/common_service.cc
similarity index 78%
rename from common_service.cc
rename to cros/common_service.cc
index 0d5ee6d..e5ee828 100644
--- a/common_service.cc
+++ b/cros/common_service.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/common_service.h"
+#include "update_engine/cros/common_service.h"
 
 #include <string>
 
@@ -26,16 +26,16 @@
 #include <brillo/strings/string_utils.h>
 #include <policy/device_policy.h>
 
-#include "update_engine/common/clock_interface.h"
 #include "update_engine/common/hardware_interface.h"
 #include "update_engine/common/prefs.h"
+#include "update_engine/common/system_state.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/connection_manager_interface.h"
-#include "update_engine/omaha_request_params.h"
-#include "update_engine/omaha_utils.h"
-#include "update_engine/p2p_manager.h"
-#include "update_engine/payload_state_interface.h"
-#include "update_engine/update_attempter.h"
+#include "update_engine/cros/connection_manager_interface.h"
+#include "update_engine/cros/omaha_request_params.h"
+#include "update_engine/cros/omaha_utils.h"
+#include "update_engine/cros/p2p_manager.h"
+#include "update_engine/cros/payload_state_interface.h"
+#include "update_engine/cros/update_attempter.h"
 
 using base::StringPrintf;
 using brillo::ErrorPtr;
@@ -50,11 +50,7 @@
 namespace {
 // Log and set the error on the passed ErrorPtr.
 void LogAndSetError(ErrorPtr* error,
-#if BASE_VER < 576279
-                    const tracked_objects::Location& location,
-#else
                     const base::Location& location,
-#endif
                     const string& reason) {
   brillo::Error::AddTo(error,
                        location,
@@ -70,8 +66,7 @@
 const char* const UpdateEngineService::kErrorFailed =
     "org.chromium.UpdateEngine.Error.Failed";
 
-UpdateEngineService::UpdateEngineService(SystemState* system_state)
-    : system_state_(system_state) {}
+UpdateEngineService::UpdateEngineService() = default;
 
 // org::chromium::UpdateEngineInterfaceInterface methods implementation.
 
@@ -83,7 +78,7 @@
             << "RestrictDownload="
             << ((flags & UpdateAttemptFlags::kFlagRestrictDownload) ? "yes"
                                                                     : "no");
-  system_state_->update_attempter()->SetUpdateAttemptFlags(flags);
+  SystemState::Get()->update_attempter()->SetUpdateAttemptFlags(flags);
   return true;
 }
 
@@ -102,16 +97,16 @@
             << "interactive=" << (interactive ? "yes " : "no ")
             << "RestrictDownload=" << (restrict_downloads ? "yes " : "no ");
 
-  *out_result = system_state_->update_attempter()->CheckForUpdate(
+  *out_result = SystemState::Get()->update_attempter()->CheckForUpdate(
       in_app_version, in_omaha_url, flags);
   return true;
 }
 
 bool UpdateEngineService::AttemptInstall(brillo::ErrorPtr* error,
                                          const string& omaha_url,
-                                         const vector<string>& dlc_module_ids) {
-  if (!system_state_->update_attempter()->CheckForInstall(dlc_module_ids,
-                                                          omaha_url)) {
+                                         const vector<string>& dlc_ids) {
+  if (!SystemState::Get()->update_attempter()->CheckForInstall(dlc_ids,
+                                                               omaha_url)) {
     // TODO(xiaochu): support more detailed error messages.
     LogAndSetError(error, FROM_HERE, "Could not schedule install operation.");
     return false;
@@ -122,7 +117,7 @@
 bool UpdateEngineService::AttemptRollback(ErrorPtr* error, bool in_powerwash) {
   LOG(INFO) << "Attempting rollback to non-active partitions.";
 
-  if (!system_state_->update_attempter()->Rollback(in_powerwash)) {
+  if (!SystemState::Get()->update_attempter()->Rollback(in_powerwash)) {
     // TODO(dgarrett): Give a more specific error code/reason.
     LogAndSetError(error, FROM_HERE, "Rollback attempt failed.");
     return false;
@@ -132,14 +127,14 @@
 
 bool UpdateEngineService::CanRollback(ErrorPtr* /* error */,
                                       bool* out_can_rollback) {
-  bool can_rollback = system_state_->update_attempter()->CanRollback();
+  bool can_rollback = SystemState::Get()->update_attempter()->CanRollback();
   LOG(INFO) << "Checking to see if we can rollback . Result: " << can_rollback;
   *out_can_rollback = can_rollback;
   return true;
 }
 
 bool UpdateEngineService::ResetStatus(ErrorPtr* error) {
-  if (!system_state_->update_attempter()->ResetStatus()) {
+  if (!SystemState::Get()->update_attempter()->ResetStatus()) {
     // TODO(dgarrett): Give a more specific error code/reason.
     LogAndSetError(error, FROM_HERE, "ResetStatus failed.");
     return false;
@@ -147,9 +142,20 @@
   return true;
 }
 
+bool UpdateEngineService::SetDlcActiveValue(brillo::ErrorPtr* error,
+                                            bool is_active,
+                                            const string& dlc_id) {
+  if (!SystemState::Get()->update_attempter()->SetDlcActiveValue(is_active,
+                                                                 dlc_id)) {
+    LogAndSetError(error, FROM_HERE, "SetDlcActiveValue failed.");
+    return false;
+  }
+  return true;
+}
+
 bool UpdateEngineService::GetStatus(ErrorPtr* error,
                                     UpdateEngineStatus* out_status) {
-  if (!system_state_->update_attempter()->GetStatus(out_status)) {
+  if (!SystemState::Get()->update_attempter()->GetStatus(out_status)) {
     LogAndSetError(error, FROM_HERE, "GetStatus failed.");
     return false;
   }
@@ -157,7 +163,7 @@
 }
 
 bool UpdateEngineService::RebootIfNeeded(ErrorPtr* error) {
-  if (!system_state_->update_attempter()->RebootIfNeeded()) {
+  if (!SystemState::Get()->update_attempter()->RebootIfNeeded()) {
     // TODO(dgarrett): Give a more specific error code/reason.
     LogAndSetError(error, FROM_HERE, "Reboot not needed, or attempt failed.");
     return false;
@@ -168,15 +174,16 @@
 bool UpdateEngineService::SetChannel(ErrorPtr* error,
                                      const string& in_target_channel,
                                      bool in_is_powerwash_allowed) {
-  const policy::DevicePolicy* device_policy = system_state_->device_policy();
+  const policy::DevicePolicy* device_policy =
+      SystemState::Get()->device_policy();
 
   // The device_policy is loaded in a lazy way before an update check. Load it
   // now from the libbrillo cache if it wasn't already loaded.
   if (!device_policy) {
-    UpdateAttempter* update_attempter = system_state_->update_attempter();
+    UpdateAttempter* update_attempter = SystemState::Get()->update_attempter();
     if (update_attempter) {
       update_attempter->RefreshDevicePolicy();
-      device_policy = system_state_->device_policy();
+      device_policy = SystemState::Get()->device_policy();
     }
   }
 
@@ -192,7 +199,7 @@
 
   LOG(INFO) << "Setting destination channel to: " << in_target_channel;
   string error_message;
-  if (!system_state_->request_params()->SetTargetChannel(
+  if (!SystemState::Get()->request_params()->SetTargetChannel(
           in_target_channel, in_is_powerwash_allowed, &error_message)) {
     LogAndSetError(error, FROM_HERE, error_message);
     return false;
@@ -203,20 +210,19 @@
 bool UpdateEngineService::GetChannel(ErrorPtr* /* error */,
                                      bool in_get_current_channel,
                                      string* out_channel) {
-  OmahaRequestParams* rp = system_state_->request_params();
+  OmahaRequestParams* rp = SystemState::Get()->request_params();
   *out_channel =
       (in_get_current_channel ? rp->current_channel() : rp->target_channel());
   return true;
 }
 
 bool UpdateEngineService::SetCohortHint(ErrorPtr* error,
-                                        string in_cohort_hint) {
-  PrefsInterface* prefs = system_state_->prefs();
-
+                                        const string& in_cohort_hint) {
   // It is ok to override the cohort hint with an invalid value since it is
   // stored in stateful partition. The code reading it should sanitize it
   // anyway.
-  if (!prefs->SetString(kPrefsOmahaCohortHint, in_cohort_hint)) {
+  if (!SystemState::Get()->prefs()->SetString(kPrefsOmahaCohortHint,
+                                              in_cohort_hint)) {
     LogAndSetError(
         error,
         FROM_HERE,
@@ -229,8 +235,7 @@
 
 bool UpdateEngineService::GetCohortHint(ErrorPtr* error,
                                         string* out_cohort_hint) {
-  PrefsInterface* prefs = system_state_->prefs();
-
+  const auto* prefs = SystemState::Get()->prefs();
   *out_cohort_hint = "";
   if (prefs->Exists(kPrefsOmahaCohortHint) &&
       !prefs->GetString(kPrefsOmahaCohortHint, out_cohort_hint)) {
@@ -242,9 +247,7 @@
 
 bool UpdateEngineService::SetP2PUpdatePermission(ErrorPtr* error,
                                                  bool in_enabled) {
-  PrefsInterface* prefs = system_state_->prefs();
-
-  if (!prefs->SetBoolean(kPrefsP2PEnabled, in_enabled)) {
+  if (!SystemState::Get()->prefs()->SetBoolean(kPrefsP2PEnabled, in_enabled)) {
     LogAndSetError(
         error,
         FROM_HERE,
@@ -257,8 +260,7 @@
 
 bool UpdateEngineService::GetP2PUpdatePermission(ErrorPtr* error,
                                                  bool* out_enabled) {
-  PrefsInterface* prefs = system_state_->prefs();
-
+  const auto* prefs = SystemState::Get()->prefs();
   bool p2p_pref = false;  // Default if no setting is present.
   if (prefs->Exists(kPrefsP2PEnabled) &&
       !prefs->GetBoolean(kPrefsP2PEnabled, &p2p_pref)) {
@@ -273,7 +275,7 @@
 bool UpdateEngineService::SetUpdateOverCellularPermission(ErrorPtr* error,
                                                           bool in_allowed) {
   ConnectionManagerInterface* connection_manager =
-      system_state_->connection_manager();
+      SystemState::Get()->connection_manager();
 
   // Check if this setting is allowed by the device policy.
   if (connection_manager->IsAllowedConnectionTypesForUpdateSet()) {
@@ -286,11 +288,8 @@
 
   // If the policy wasn't loaded yet, then it is still OK to change the local
   // setting because the policy will be checked again during the update check.
-
-  PrefsInterface* prefs = system_state_->prefs();
-
-  if (!prefs ||
-      !prefs->SetBoolean(kPrefsUpdateOverCellularPermission, in_allowed)) {
+  if (!SystemState::Get()->prefs()->SetBoolean(
+          kPrefsUpdateOverCellularPermission, in_allowed)) {
     LogAndSetError(error,
                    FROM_HERE,
                    string("Error setting the update over cellular to ") +
@@ -305,7 +304,7 @@
     const std::string& target_version,
     int64_t target_size) {
   ConnectionManagerInterface* connection_manager =
-      system_state_->connection_manager();
+      SystemState::Get()->connection_manager();
 
   // Check if this setting is allowed by the device policy.
   if (connection_manager->IsAllowedConnectionTypesForUpdateSet()) {
@@ -319,10 +318,8 @@
   // If the policy wasn't loaded yet, then it is still OK to change the local
   // setting because the policy will be checked again during the update check.
 
-  PrefsInterface* prefs = system_state_->prefs();
-
-  if (!prefs ||
-      !prefs->SetString(kPrefsUpdateOverCellularTargetVersion,
+  auto* prefs = SystemState::Get()->prefs();
+  if (!prefs->SetString(kPrefsUpdateOverCellularTargetVersion,
                         target_version) ||
       !prefs->SetInt64(kPrefsUpdateOverCellularTargetSize, target_size)) {
     LogAndSetError(
@@ -335,16 +332,15 @@
 bool UpdateEngineService::GetUpdateOverCellularPermission(ErrorPtr* error,
                                                           bool* out_allowed) {
   ConnectionManagerInterface* connection_manager =
-      system_state_->connection_manager();
+      SystemState::Get()->connection_manager();
 
   if (connection_manager->IsAllowedConnectionTypesForUpdateSet()) {
     // We have device policy, so ignore the user preferences.
     *out_allowed = connection_manager->IsUpdateAllowedOver(
         ConnectionType::kCellular, ConnectionTethering::kUnknown);
   } else {
-    PrefsInterface* prefs = system_state_->prefs();
-
-    if (!prefs || !prefs->Exists(kPrefsUpdateOverCellularPermission)) {
+    const auto* prefs = SystemState::Get()->prefs();
+    if (!prefs->Exists(kPrefsUpdateOverCellularPermission)) {
       // Update is not allowed as user preference is not set or not available.
       *out_allowed = false;
       return true;
@@ -366,26 +362,26 @@
 bool UpdateEngineService::GetDurationSinceUpdate(ErrorPtr* error,
                                                  int64_t* out_usec_wallclock) {
   base::Time time;
-  if (!system_state_->update_attempter()->GetBootTimeAtUpdate(&time)) {
+  if (!SystemState::Get()->update_attempter()->GetBootTimeAtUpdate(&time)) {
     LogAndSetError(error, FROM_HERE, "No pending update.");
     return false;
   }
 
-  ClockInterface* clock = system_state_->clock();
+  const auto* clock = SystemState::Get()->clock();
   *out_usec_wallclock = (clock->GetBootTime() - time).InMicroseconds();
   return true;
 }
 
 bool UpdateEngineService::GetPrevVersion(ErrorPtr* /* error */,
                                          string* out_prev_version) {
-  *out_prev_version = system_state_->update_attempter()->GetPrevVersion();
+  *out_prev_version = SystemState::Get()->update_attempter()->GetPrevVersion();
   return true;
 }
 
 bool UpdateEngineService::GetRollbackPartition(
     ErrorPtr* /* error */, string* out_rollback_partition_name) {
   BootControlInterface::Slot rollback_slot =
-      system_state_->update_attempter()->GetRollbackSlot();
+      SystemState::Get()->update_attempter()->GetRollbackSlot();
 
   if (rollback_slot == BootControlInterface::kInvalidSlot) {
     out_rollback_partition_name->clear();
@@ -393,7 +389,7 @@
   }
 
   string name;
-  if (!system_state_->boot_control()->GetPartitionDevice(
+  if (!SystemState::Get()->boot_control()->GetPartitionDevice(
           "KERNEL", rollback_slot, &name)) {
     LOG(ERROR) << "Invalid rollback device";
     return false;
@@ -407,25 +403,9 @@
 bool UpdateEngineService::GetLastAttemptError(ErrorPtr* /* error */,
                                               int32_t* out_last_attempt_error) {
   ErrorCode error_code =
-      system_state_->update_attempter()->GetAttemptErrorCode();
+      SystemState::Get()->update_attempter()->GetAttemptErrorCode();
   *out_last_attempt_error = static_cast<int>(error_code);
   return true;
 }
 
-bool UpdateEngineService::GetEolStatus(ErrorPtr* error,
-                                       int32_t* out_eol_status) {
-  PrefsInterface* prefs = system_state_->prefs();
-
-  string str_eol_status;
-  if (prefs->Exists(kPrefsOmahaEolStatus) &&
-      !prefs->GetString(kPrefsOmahaEolStatus, &str_eol_status)) {
-    LogAndSetError(error, FROM_HERE, "Error getting the end-of-life status.");
-    return false;
-  }
-
-  // StringToEolStatus will return kSupported for invalid values.
-  *out_eol_status = static_cast<int32_t>(StringToEolStatus(str_eol_status));
-  return true;
-}
-
 }  // namespace chromeos_update_engine
diff --git a/common_service.h b/cros/common_service.h
similarity index 90%
rename from common_service.h
rename to cros/common_service.h
index f93855d..2c176c5 100644
--- a/common_service.h
+++ b/cros/common_service.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_COMMON_SERVICE_H_
-#define UPDATE_ENGINE_COMMON_SERVICE_H_
+#ifndef UPDATE_ENGINE_CROS_SERVICE_H_
+#define UPDATE_ENGINE_CROS_SERVICE_H_
 
 #include <inttypes.h>
 
@@ -26,7 +26,6 @@
 #include <brillo/errors/error.h>
 
 #include "update_engine/client_library/include/update_engine/update_status.h"
-#include "update_engine/system_state.h"
 
 namespace chromeos_update_engine {
 
@@ -38,7 +37,7 @@
   // Generic service error.
   static const char* const kErrorFailed;
 
-  explicit UpdateEngineService(SystemState* system_state);
+  UpdateEngineService();
   virtual ~UpdateEngineService() = default;
 
   // Set flags that influence how updates and checks are performed.  These
@@ -55,10 +54,10 @@
 
   // Attempts a DLC module install operation.
   // |omaha_url|: the URL to query for update.
-  // |dlc_module_ids|: a list of DLC module IDs.
+  // |dlc_ids|: a list of DLC module IDs.
   bool AttemptInstall(brillo::ErrorPtr* error,
                       const std::string& omaha_url,
-                      const std::vector<std::string>& dlc_module_ids);
+                      const std::vector<std::string>& dlc_ids);
 
   bool AttemptRollback(brillo::ErrorPtr* error, bool in_powerwash);
 
@@ -70,6 +69,13 @@
   // update. This is used for development only.
   bool ResetStatus(brillo::ErrorPtr* error);
 
+  // Sets the DLC as active or inactive. When set to active, the ping metadata
+  // for the DLC is updated accordingly. When set to inactive, the metadata
+  // for the DLC is deleted.
+  bool SetDlcActiveValue(brillo::ErrorPtr* error,
+                         bool is_active,
+                         const std::string& dlc_id);
+
   // Returns the current status of the Update Engine. If an update is in
   // progress, the number of operations, size to download and overall progress
   // is reported.
@@ -102,7 +108,8 @@
   // Sets the current "cohort hint" value to |in_cohort_hint|. The cohort hint
   // is sent back to Omaha on every request and can be used as a hint of what
   // cohort should we be put on.
-  bool SetCohortHint(brillo::ErrorPtr* error, std::string in_cohort_hint);
+  bool SetCohortHint(brillo::ErrorPtr* error,
+                     const std::string& in_cohort_hint);
 
   // Return the current cohort hint. This value can be set with SetCohortHint()
   // and can also be updated from Omaha on every update check request.
@@ -152,15 +159,8 @@
   // Returns the last UpdateAttempt error.
   bool GetLastAttemptError(brillo::ErrorPtr* error,
                            int32_t* out_last_attempt_error);
-
-  // Returns the current end-of-life status of the device. This value is updated
-  // on every update check and persisted on disk across reboots.
-  bool GetEolStatus(brillo::ErrorPtr* error, int32_t* out_eol_status);
-
- private:
-  SystemState* system_state_;
 };
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_COMMON_SERVICE_H_
+#endif  // UPDATE_ENGINE_CROS_SERVICE_H_
diff --git a/common_service_unittest.cc b/cros/common_service_unittest.cc
similarity index 76%
rename from common_service_unittest.cc
rename to cros/common_service_unittest.cc
index 65202a0..0644643 100644
--- a/common_service_unittest.cc
+++ b/cros/common_service_unittest.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/common_service.h"
+#include "update_engine/cros/common_service.h"
 
 #include <gtest/gtest.h>
 #include <string>
@@ -24,9 +24,8 @@
 #include <policy/libpolicy.h>
 #include <policy/mock_device_policy.h>
 
-#include "update_engine/common/fake_prefs.h"
-#include "update_engine/fake_system_state.h"
-#include "update_engine/omaha_utils.h"
+#include "update_engine/cros/fake_system_state.h"
+#include "update_engine/cros/omaha_utils.h"
 
 using std::string;
 using std::vector;
@@ -39,17 +38,14 @@
 
 class UpdateEngineServiceTest : public ::testing::Test {
  protected:
-  UpdateEngineServiceTest()
-      : mock_update_attempter_(fake_system_state_.mock_update_attempter()),
-        common_service_(&fake_system_state_) {}
+  UpdateEngineServiceTest() = default;
 
-  void SetUp() override { fake_system_state_.set_device_policy(nullptr); }
+  void SetUp() override {
+    FakeSystemState::CreateInstance();
+    FakeSystemState::Get()->set_device_policy(nullptr);
+    mock_update_attempter_ = FakeSystemState::Get()->mock_update_attempter();
+  }
 
-  // Fake/mock infrastructure.
-  FakeSystemState fake_system_state_;
-  policy::MockDevicePolicy mock_device_policy_;
-
-  // Shortcut for fake_system_state_.mock_update_attempter().
   MockUpdateAttempter* mock_update_attempter_;
 
   brillo::ErrorPtr error_;
@@ -100,12 +96,26 @@
   EXPECT_FALSE(common_service_.AttemptInstall(&error_, "", {}));
 }
 
+TEST_F(UpdateEngineServiceTest, SetDlcActiveValue) {
+  EXPECT_CALL(*mock_update_attempter_, SetDlcActiveValue(_, _))
+      .WillOnce(Return(true));
+
+  EXPECT_TRUE(common_service_.SetDlcActiveValue(&error_, true, "dlc0"));
+}
+
+TEST_F(UpdateEngineServiceTest, SetDlcActiveValueReturnsFalse) {
+  EXPECT_CALL(*mock_update_attempter_, SetDlcActiveValue(_, _))
+      .WillOnce(Return(false));
+
+  EXPECT_FALSE(common_service_.SetDlcActiveValue(&error_, true, "dlc0"));
+}
+
 // SetChannel is allowed when there's no device policy (the device is not
 // enterprise enrolled).
 TEST_F(UpdateEngineServiceTest, SetChannelWithNoPolicy) {
   EXPECT_CALL(*mock_update_attempter_, RefreshDevicePolicy());
   // If SetTargetChannel is called it means the policy check passed.
-  EXPECT_CALL(*fake_system_state_.mock_request_params(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_request_params(),
               SetTargetChannel("stable-channel", true, _))
       .WillOnce(Return(true));
   EXPECT_TRUE(common_service_.SetChannel(&error_, "stable-channel", true));
@@ -115,10 +125,10 @@
 // When the policy is present, the delegated value should be checked.
 TEST_F(UpdateEngineServiceTest, SetChannelWithDelegatedPolicy) {
   policy::MockDevicePolicy mock_device_policy;
-  fake_system_state_.set_device_policy(&mock_device_policy);
+  FakeSystemState::Get()->set_device_policy(&mock_device_policy);
   EXPECT_CALL(mock_device_policy, GetReleaseChannelDelegated(_))
       .WillOnce(DoAll(SetArgPointee<0>(true), Return(true)));
-  EXPECT_CALL(*fake_system_state_.mock_request_params(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_request_params(),
               SetTargetChannel("beta-channel", true, _))
       .WillOnce(Return(true));
 
@@ -130,7 +140,7 @@
 // raised.
 TEST_F(UpdateEngineServiceTest, SetChannelWithInvalidChannel) {
   EXPECT_CALL(*mock_update_attempter_, RefreshDevicePolicy());
-  EXPECT_CALL(*fake_system_state_.mock_request_params(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_request_params(),
               SetTargetChannel("foo-channel", true, _))
       .WillOnce(Return(false));
 
@@ -141,8 +151,8 @@
 }
 
 TEST_F(UpdateEngineServiceTest, GetChannel) {
-  fake_system_state_.mock_request_params()->set_current_channel("current");
-  fake_system_state_.mock_request_params()->set_target_channel("target");
+  FakeSystemState::Get()->mock_request_params()->set_current_channel("current");
+  FakeSystemState::Get()->mock_request_params()->set_target_channel("target");
   string channel;
   EXPECT_TRUE(common_service_.GetChannel(
       &error_, true /* get_current_channel */, &channel));
@@ -169,19 +179,4 @@
                                UpdateEngineService::kErrorFailed));
 }
 
-TEST_F(UpdateEngineServiceTest, GetEolStatusTest) {
-  FakePrefs fake_prefs;
-  fake_system_state_.set_prefs(&fake_prefs);
-  // The default value should be "supported".
-  int32_t eol_status = static_cast<int32_t>(EolStatus::kEol);
-  EXPECT_TRUE(common_service_.GetEolStatus(&error_, &eol_status));
-  EXPECT_EQ(nullptr, error_);
-  EXPECT_EQ(EolStatus::kSupported, static_cast<EolStatus>(eol_status));
-
-  fake_prefs.SetString(kPrefsOmahaEolStatus, "security-only");
-  EXPECT_TRUE(common_service_.GetEolStatus(&error_, &eol_status));
-  EXPECT_EQ(nullptr, error_);
-  EXPECT_EQ(EolStatus::kSecurityOnly, static_cast<EolStatus>(eol_status));
-}
-
 }  // namespace chromeos_update_engine
diff --git a/connection_manager.cc b/cros/connection_manager.cc
similarity index 64%
rename from connection_manager.cc
rename to cros/connection_manager.cc
index 7263a74..6a5c63b 100644
--- a/connection_manager.cc
+++ b/cros/connection_manager.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/connection_manager.h"
+#include "update_engine/cros/connection_manager.h"
 
 #include <memory>
 #include <set>
@@ -26,12 +26,12 @@
 #include <shill/dbus-constants.h>
 #include <shill/dbus-proxies.h>
 
+#include "update_engine/common/connection_utils.h"
 #include "update_engine/common/prefs.h"
+#include "update_engine/common/system_state.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/connection_utils.h"
-#include "update_engine/shill_proxy.h"
-#include "update_engine/system_state.h"
-#include "update_engine/update_attempter.h"
+#include "update_engine/cros/shill_proxy.h"
+#include "update_engine/cros/update_attempter.h"
 
 using org::chromium::flimflam::ManagerProxyInterface;
 using org::chromium::flimflam::ServiceProxyInterface;
@@ -41,83 +41,75 @@
 namespace chromeos_update_engine {
 
 namespace connection_manager {
-std::unique_ptr<ConnectionManagerInterface> CreateConnectionManager(
-    SystemState* system_state) {
+std::unique_ptr<ConnectionManagerInterface> CreateConnectionManager() {
   return std::unique_ptr<ConnectionManagerInterface>(
-      new ConnectionManager(new ShillProxy(), system_state));
+      new ConnectionManager(new ShillProxy()));
 }
 }  // namespace connection_manager
 
-ConnectionManager::ConnectionManager(ShillProxyInterface* shill_proxy,
-                                     SystemState* system_state)
-    : shill_proxy_(shill_proxy), system_state_(system_state) {}
+ConnectionManager::ConnectionManager(ShillProxyInterface* shill_proxy)
+    : shill_proxy_(shill_proxy) {}
 
 bool ConnectionManager::IsUpdateAllowedOver(
     ConnectionType type, ConnectionTethering tethering) const {
-  switch (type) {
-    case ConnectionType::kBluetooth:
-      return false;
-
-    case ConnectionType::kCellular: {
-      set<string> allowed_types;
-
-      const policy::DevicePolicy* device_policy =
-          system_state_->device_policy();
-
-      // The device_policy is loaded in a lazy way before an update check. Load
-      // it now from the libbrillo cache if it wasn't already loaded.
-      if (!device_policy) {
-        UpdateAttempter* update_attempter = system_state_->update_attempter();
-        if (update_attempter) {
-          update_attempter->RefreshDevicePolicy();
-          device_policy = system_state_->device_policy();
-        }
-      }
-
-      if (!device_policy) {
-        // Device policy fails to be loaded (possibly due to guest account). We
-        // do not check the local user setting here, which should be checked by
-        // |OmahaRequestAction| during checking for update.
-        LOG(INFO) << "Allowing updates over cellular as device policy "
-                     "fails to be loaded.";
-        return true;
-      }
-
-      if (device_policy->GetAllowedConnectionTypesForUpdate(&allowed_types)) {
-        // The update setting is enforced by the device policy.
-
-        if (!base::ContainsKey(allowed_types, shill::kTypeCellular)) {
-          LOG(INFO) << "Disabling updates over cellular connection as it's not "
-                       "allowed in the device policy.";
-          return false;
-        }
-
-        LOG(INFO) << "Allowing updates over cellular per device policy.";
-        return true;
-      }
-
-      // If there's no update setting in the device policy, we do not check
-      // the local user setting here, which should be checked by
-      // |OmahaRequestAction| during checking for update.
-      LOG(INFO) << "Allowing updates over cellular as device policy does "
-                   "not include update setting.";
+  if (type != ConnectionType::kCellular) {
+    if (tethering != ConnectionTethering::kConfirmed) {
       return true;
     }
 
-    default:
-      if (tethering == ConnectionTethering::kConfirmed) {
-        // Treat this connection as if it is a cellular connection.
-        LOG(INFO) << "Current connection is confirmed tethered, using Cellular "
-                     "setting.";
-        return IsUpdateAllowedOver(ConnectionType::kCellular,
-                                   ConnectionTethering::kUnknown);
-      }
-      return true;
+    // Treat this connection as if it is a cellular connection.
+    LOG(INFO)
+        << "Current connection is confirmed tethered, using Cellular setting.";
   }
+
+  const policy::DevicePolicy* device_policy =
+      SystemState::Get()->device_policy();
+
+  // The device_policy is loaded in a lazy way before an update check. Load
+  // it now from the libbrillo cache if it wasn't already loaded.
+  if (!device_policy) {
+    UpdateAttempter* update_attempter = SystemState::Get()->update_attempter();
+    if (update_attempter) {
+      update_attempter->RefreshDevicePolicy();
+      device_policy = SystemState::Get()->device_policy();
+    }
+  }
+
+  if (!device_policy) {
+    // Device policy fails to be loaded (possibly due to guest account). We
+    // do not check the local user setting here, which should be checked by
+    // |OmahaRequestAction| during checking for update.
+    LOG(INFO) << "Allowing updates over cellular as device policy fails to be "
+                 "loaded.";
+    return true;
+  }
+
+  set<string> allowed_types;
+  if (device_policy->GetAllowedConnectionTypesForUpdate(&allowed_types)) {
+    // The update setting is enforced by the device policy.
+
+    // TODO(crbug.com/1054279): Use base::Contains after uprev to r680000.
+    if (allowed_types.find(shill::kTypeCellular) == allowed_types.end()) {
+      LOG(INFO) << "Disabling updates over cellular connection as it's not "
+                   "allowed in the device policy.";
+      return false;
+    }
+
+    LOG(INFO) << "Allowing updates over cellular per device policy.";
+    return true;
+  }
+
+  // If there's no update setting in the device policy, we do not check
+  // the local user setting here, which should be checked by
+  // |OmahaRequestAction| during checking for update.
+  LOG(INFO) << "Allowing updates over cellular as device policy does "
+               "not include update setting.";
+  return true;
 }
 
 bool ConnectionManager::IsAllowedConnectionTypesForUpdateSet() const {
-  const policy::DevicePolicy* device_policy = system_state_->device_policy();
+  const policy::DevicePolicy* device_policy =
+      SystemState::Get()->device_policy();
   if (!device_policy) {
     LOG(INFO) << "There's no device policy loaded yet.";
     return false;
diff --git a/connection_manager.h b/cros/connection_manager.h
similarity index 83%
rename from connection_manager.h
rename to cros/connection_manager.h
index d8527a3..bb54ff7 100644
--- a/connection_manager.h
+++ b/cros/connection_manager.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_CONNECTION_MANAGER_H_
-#define UPDATE_ENGINE_CONNECTION_MANAGER_H_
+#ifndef UPDATE_ENGINE_CROS_CONNECTION_MANAGER_H_
+#define UPDATE_ENGINE_CROS_CONNECTION_MANAGER_H_
 
 #include <memory>
 #include <string>
@@ -23,8 +23,8 @@
 #include <base/macros.h>
 #include <dbus/object_path.h>
 
-#include "update_engine/connection_manager_interface.h"
-#include "update_engine/shill_proxy_interface.h"
+#include "update_engine/cros/connection_manager_interface.h"
+#include "update_engine/cros/shill_proxy_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -35,8 +35,7 @@
  public:
   // Constructs a new ConnectionManager object initialized with the
   // given system state.
-  ConnectionManager(ShillProxyInterface* shill_proxy,
-                    SystemState* system_state);
+  explicit ConnectionManager(ShillProxyInterface* shill_proxy);
   ~ConnectionManager() override = default;
 
   // ConnectionManagerInterface overrides.
@@ -58,12 +57,9 @@
   // The mockable interface to access the shill DBus proxies.
   std::unique_ptr<ShillProxyInterface> shill_proxy_;
 
-  // The global context for update_engine.
-  SystemState* system_state_;
-
   DISALLOW_COPY_AND_ASSIGN(ConnectionManager);
 };
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_CONNECTION_MANAGER_H_
+#endif  // UPDATE_ENGINE_CROS_CONNECTION_MANAGER_H_
diff --git a/connection_manager_interface.h b/cros/connection_manager_interface.h
similarity index 88%
rename from connection_manager_interface.h
rename to cros/connection_manager_interface.h
index 9f77989..dc6c983 100644
--- a/connection_manager_interface.h
+++ b/cros/connection_manager_interface.h
@@ -14,19 +14,17 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_CONNECTION_MANAGER_INTERFACE_H_
-#define UPDATE_ENGINE_CONNECTION_MANAGER_INTERFACE_H_
+#ifndef UPDATE_ENGINE_CROS_CONNECTION_MANAGER_INTERFACE_H_
+#define UPDATE_ENGINE_CROS_CONNECTION_MANAGER_INTERFACE_H_
 
 #include <memory>
 
 #include <base/macros.h>
 
-#include "update_engine/connection_utils.h"
+#include "update_engine/common/connection_utils.h"
 
 namespace chromeos_update_engine {
 
-class SystemState;
-
 // This class exposes a generic interface to the connection manager
 // (e.g FlimFlam, Shill, etc.) to consolidate all connection-related
 // logic in update_engine.
@@ -59,10 +57,9 @@
 
 namespace connection_manager {
 // Factory function which creates a ConnectionManager.
-std::unique_ptr<ConnectionManagerInterface> CreateConnectionManager(
-    SystemState* system_state);
+std::unique_ptr<ConnectionManagerInterface> CreateConnectionManager();
 }  // namespace connection_manager
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_CONNECTION_MANAGER_INTERFACE_H_
+#endif  // UPDATE_ENGINE_CROS_CONNECTION_MANAGER_INTERFACE_H_
diff --git a/connection_manager_unittest.cc b/cros/connection_manager_unittest.cc
similarity index 86%
rename from connection_manager_unittest.cc
rename to cros/connection_manager_unittest.cc
index 3cdaf4c..46da8cc 100644
--- a/connection_manager_unittest.cc
+++ b/cros/connection_manager_unittest.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/connection_manager.h"
+#include "update_engine/cros/connection_manager.h"
 
 #include <memory>
 #include <set>
@@ -32,8 +32,8 @@
 #include <shill/dbus-proxy-mocks.h>
 
 #include "update_engine/common/test_utils.h"
-#include "update_engine/fake_shill_proxy.h"
-#include "update_engine/fake_system_state.h"
+#include "update_engine/cros/fake_shill_proxy.h"
+#include "update_engine/cros/fake_system_state.h"
 
 using chromeos_update_engine::connection_utils::StringForConnectionType;
 using org::chromium::flimflam::ManagerProxyMock;
@@ -52,7 +52,8 @@
 
   void SetUp() override {
     loop_.SetAsCurrent();
-    fake_system_state_.set_connection_manager(&cmut_);
+    FakeSystemState::CreateInstance();
+    FakeSystemState::Get()->set_connection_manager(&cmut_);
   }
 
   void TearDown() override { EXPECT_FALSE(loop_.PendingTasks()); }
@@ -81,11 +82,10 @@
                                 ConnectionTethering expected_tethering);
 
   brillo::FakeMessageLoop loop_{nullptr};
-  FakeSystemState fake_system_state_;
   FakeShillProxy* fake_shill_proxy_;
 
   // ConnectionManager under test.
-  ConnectionManager cmut_{fake_shill_proxy_, &fake_system_state_};
+  ConnectionManager cmut_{fake_shill_proxy_};
 };
 
 void ConnectionManagerTest::SetManagerReply(const char* default_service,
@@ -184,9 +184,6 @@
 TEST_F(ConnectionManagerTest, SimpleTest) {
   TestWithServiceType(shill::kTypeEthernet, nullptr, ConnectionType::kEthernet);
   TestWithServiceType(shill::kTypeWifi, nullptr, ConnectionType::kWifi);
-  TestWithServiceType(shill::kTypeWimax, nullptr, ConnectionType::kWimax);
-  TestWithServiceType(
-      shill::kTypeBluetooth, nullptr, ConnectionType::kBluetooth);
   TestWithServiceType(shill::kTypeCellular, nullptr, ConnectionType::kCellular);
 }
 
@@ -195,8 +192,6 @@
   TestWithServiceType(
       shill::kTypeVPN, shill::kTypeVPN, ConnectionType::kUnknown);
   TestWithServiceType(shill::kTypeVPN, shill::kTypeWifi, ConnectionType::kWifi);
-  TestWithServiceType(
-      shill::kTypeVPN, shill::kTypeWimax, ConnectionType::kWimax);
 }
 
 TEST_F(ConnectionManagerTest, TetheringTest) {
@@ -229,20 +224,10 @@
                                         ConnectionTethering::kUnknown));
 }
 
-TEST_F(ConnectionManagerTest, AllowUpdatesOverWimaxTest) {
-  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWimax,
-                                        ConnectionTethering::kUnknown));
-}
-
-TEST_F(ConnectionManagerTest, BlockUpdatesOverBluetoothTest) {
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kBluetooth,
-                                         ConnectionTethering::kUnknown));
-}
-
 TEST_F(ConnectionManagerTest, AllowUpdatesOnlyOver3GPerPolicyTest) {
   policy::MockDevicePolicy allow_3g_policy;
 
-  fake_system_state_.set_device_policy(&allow_3g_policy);
+  FakeSystemState::Get()->set_device_policy(&allow_3g_policy);
 
   // This test tests cellular (3G) being the only connection type being allowed.
   set<string> allowed_set;
@@ -259,14 +244,13 @@
 TEST_F(ConnectionManagerTest, AllowUpdatesOver3GAndOtherTypesPerPolicyTest) {
   policy::MockDevicePolicy allow_3g_policy;
 
-  fake_system_state_.set_device_policy(&allow_3g_policy);
+  FakeSystemState::Get()->set_device_policy(&allow_3g_policy);
 
   // This test tests multiple connection types being allowed, with
   // 3G one among them. Only Cellular is currently enforced by the policy
-  // setting, the others are ignored (see Bluetooth for example).
+  // setting.
   set<string> allowed_set;
   allowed_set.insert(StringForConnectionType(ConnectionType::kCellular));
-  allowed_set.insert(StringForConnectionType(ConnectionType::kBluetooth));
 
   EXPECT_CALL(allow_3g_policy, GetAllowedConnectionTypesForUpdate(_))
       .Times(3)
@@ -280,10 +264,6 @@
                                         ConnectionTethering::kUnknown));
   EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWifi,
                                         ConnectionTethering::kUnknown));
-  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWimax,
-                                        ConnectionTethering::kUnknown));
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kBluetooth,
-                                         ConnectionTethering::kUnknown));
 
   // Tethered networks are treated in the same way as Cellular networks and
   // thus allowed.
@@ -296,7 +276,7 @@
 TEST_F(ConnectionManagerTest, AllowUpdatesOverCellularByDefaultTest) {
   policy::MockDevicePolicy device_policy;
   // Set an empty device policy.
-  fake_system_state_.set_device_policy(&device_policy);
+  FakeSystemState::Get()->set_device_policy(&device_policy);
 
   EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
                                         ConnectionTethering::kUnknown));
@@ -305,7 +285,7 @@
 TEST_F(ConnectionManagerTest, AllowUpdatesOverTetheredNetworkByDefaultTest) {
   policy::MockDevicePolicy device_policy;
   // Set an empty device policy.
-  fake_system_state_.set_device_policy(&device_policy);
+  FakeSystemState::Get()->set_device_policy(&device_policy);
 
   EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWifi,
                                         ConnectionTethering::kConfirmed));
@@ -318,14 +298,13 @@
 TEST_F(ConnectionManagerTest, BlockUpdatesOver3GPerPolicyTest) {
   policy::MockDevicePolicy block_3g_policy;
 
-  fake_system_state_.set_device_policy(&block_3g_policy);
+  FakeSystemState::Get()->set_device_policy(&block_3g_policy);
 
   // Test that updates for 3G are blocked while updates are allowed
   // over several other types.
   set<string> allowed_set;
   allowed_set.insert(StringForConnectionType(ConnectionType::kEthernet));
   allowed_set.insert(StringForConnectionType(ConnectionType::kWifi));
-  allowed_set.insert(StringForConnectionType(ConnectionType::kWimax));
 
   EXPECT_CALL(block_3g_policy, GetAllowedConnectionTypesForUpdate(_))
       .Times(1)
@@ -338,7 +317,7 @@
 TEST_F(ConnectionManagerTest, AllowUpdatesOver3GIfPolicyIsNotSet) {
   policy::MockDevicePolicy device_policy;
 
-  fake_system_state_.set_device_policy(&device_policy);
+  FakeSystemState::Get()->set_device_policy(&device_policy);
 
   // Return false for GetAllowedConnectionTypesForUpdate and see
   // that updates are allowed as device policy is not set. Further
@@ -352,7 +331,7 @@
 }
 
 TEST_F(ConnectionManagerTest, AllowUpdatesOverCellularIfPolicyFailsToBeLoaded) {
-  fake_system_state_.set_device_policy(nullptr);
+  FakeSystemState::Get()->set_device_policy(nullptr);
 
   EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
                                         ConnectionTethering::kUnknown));
@@ -363,10 +342,6 @@
                StringForConnectionType(ConnectionType::kEthernet));
   EXPECT_STREQ(shill::kTypeWifi,
                StringForConnectionType(ConnectionType::kWifi));
-  EXPECT_STREQ(shill::kTypeWimax,
-               StringForConnectionType(ConnectionType::kWimax));
-  EXPECT_STREQ(shill::kTypeBluetooth,
-               StringForConnectionType(ConnectionType::kBluetooth));
   EXPECT_STREQ(shill::kTypeCellular,
                StringForConnectionType(ConnectionType::kCellular));
   EXPECT_STREQ("Unknown", StringForConnectionType(ConnectionType::kUnknown));
diff --git a/cros/daemon_chromeos.cc b/cros/daemon_chromeos.cc
new file mode 100644
index 0000000..366fb9a
--- /dev/null
+++ b/cros/daemon_chromeos.cc
@@ -0,0 +1,79 @@
+//
+// Copyright (C) 2015 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/cros/daemon_chromeos.h"
+
+#include <sysexits.h>
+
+#include <base/bind.h>
+#include <base/location.h>
+
+#include "update_engine/cros/real_system_state.h"
+
+using brillo::Daemon;
+using std::unique_ptr;
+
+namespace chromeos_update_engine {
+
+unique_ptr<DaemonBase> DaemonBase::CreateInstance() {
+  return std::make_unique<DaemonChromeOS>();
+}
+
+int DaemonChromeOS::OnInit() {
+  // Register the |subprocess_| singleton with this Daemon as the signal
+  // handler.
+  subprocess_.Init(this);
+
+  int exit_code = Daemon::OnInit();
+  if (exit_code != EX_OK)
+    return exit_code;
+
+  // Initialize update engine global state.
+  // TODO(deymo): Move the initialization to a factory method avoiding the
+  // explicit re-usage of the |bus| instance, shared between D-Bus service and
+  // D-Bus client calls.
+  RealSystemState::SetInstance(&system_state_);
+
+  // Create the DBus service.
+  dbus_adaptor_.reset(new UpdateEngineAdaptor());
+  SystemState::Get()->update_attempter()->AddObserver(dbus_adaptor_.get());
+
+  dbus_adaptor_->RegisterAsync(
+      base::Bind(&DaemonChromeOS::OnDBusRegistered, base::Unretained(this)));
+  LOG(INFO) << "Waiting for DBus object to be registered.";
+  return EX_OK;
+}
+
+void DaemonChromeOS::OnDBusRegistered(bool succeeded) {
+  if (!succeeded) {
+    LOG(ERROR) << "Registering the UpdateEngineAdaptor";
+    QuitWithExitCode(1);
+    return;
+  }
+
+  // Take ownership of the service now that everything is initialized. We need
+  // to this now and not before to avoid exposing a well known DBus service
+  // path that doesn't have the service it is supposed to implement.
+  if (!dbus_adaptor_->RequestOwnership()) {
+    LOG(ERROR) << "Unable to take ownership of the DBus service, is there "
+               << "other update_engine daemon running?";
+    QuitWithExitCode(1);
+    return;
+  }
+  SystemState::Get()->update_attempter()->StartUpdater();
+}
+
+}  // namespace chromeos_update_engine
diff --git a/cros/daemon_chromeos.h b/cros/daemon_chromeos.h
new file mode 100644
index 0000000..b23c2a6
--- /dev/null
+++ b/cros/daemon_chromeos.h
@@ -0,0 +1,63 @@
+//
+// Copyright (C) 2015 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CROS_DAEMON_CHROMEOS_H_
+#define UPDATE_ENGINE_CROS_DAEMON_CHROMEOS_H_
+
+#include <memory>
+
+#include "update_engine/common/daemon_base.h"
+#include "update_engine/common/daemon_state_interface.h"
+#include "update_engine/common/subprocess.h"
+#include "update_engine/cros/dbus_service.h"
+#include "update_engine/cros/real_system_state.h"
+
+namespace chromeos_update_engine {
+
+class DaemonChromeOS : public DaemonBase {
+ public:
+  DaemonChromeOS() = default;
+
+ protected:
+  int OnInit() override;
+
+ private:
+  // Run from the main loop when the |dbus_adaptor_| object is registered. At
+  // this point we can request ownership of the DBus service name and continue
+  // initialization.
+  void OnDBusRegistered(bool succeeded);
+
+  // |SystemState| is a global context, but we can't have a static singleton of
+  // its object because the style guide does not allow that (it has non-trivial
+  // dtor). We need an instance of |SystemState| in this class instead and have
+  // a global pointer to it. This is better to be defined as the first variable
+  // of this class so it is initialized first and destructed last.
+  RealSystemState system_state_;
+
+  // Main D-Bus service adaptor.
+  std::unique_ptr<UpdateEngineAdaptor> dbus_adaptor_;
+
+  // The Subprocess singleton class requires a brillo::MessageLoop in the
+  // current thread, so we need to initialize it from this class instead of
+  // the main() function.
+  Subprocess subprocess_;
+
+  DISALLOW_COPY_AND_ASSIGN(DaemonChromeOS);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_CROS_DAEMON_CHROMEOS_H_
diff --git a/dbus_connection.cc b/cros/dbus_connection.cc
similarity index 96%
rename from dbus_connection.cc
rename to cros/dbus_connection.cc
index cf17ec9..6808bae 100644
--- a/dbus_connection.cc
+++ b/cros/dbus_connection.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/dbus_connection.h"
+#include "update_engine/cros/dbus_connection.h"
 
 #include <base/time/time.h>
 
diff --git a/dbus_connection.h b/cros/dbus_connection.h
similarity index 87%
rename from dbus_connection.h
rename to cros/dbus_connection.h
index c3205ba..8f0d6f1 100644
--- a/dbus_connection.h
+++ b/cros/dbus_connection.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_DBUS_CONNECTION_H_
-#define UPDATE_ENGINE_DBUS_CONNECTION_H_
+#ifndef UPDATE_ENGINE_CROS_DBUS_CONNECTION_H_
+#define UPDATE_ENGINE_CROS_DBUS_CONNECTION_H_
 
 #include <base/memory/ref_counted.h>
 #include <brillo/dbus/dbus_connection.h>
@@ -41,4 +41,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_DBUS_CONNECTION_H_
+#endif  // UPDATE_ENGINE_CROS_DBUS_CONNECTION_H_
diff --git a/dbus_service.cc b/cros/dbus_service.cc
similarity index 72%
rename from dbus_service.cc
rename to cros/dbus_service.cc
index 7296053..1eb7b3c 100644
--- a/dbus_service.cc
+++ b/cros/dbus_service.cc
@@ -14,15 +14,15 @@
 // limitations under the License.
 //
 
-#include "update_engine/dbus_service.h"
+#include "update_engine/cros/dbus_service.h"
 
 #include <string>
 #include <vector>
 
 #include <update_engine/dbus-constants.h>
-#include <update_engine/proto_bindings/update_engine.pb.h>
 
-#include "update_engine/dbus_connection.h"
+#include "update_engine/cros/dbus_connection.h"
+#include "update_engine/proto_bindings/update_engine.pb.h"
 #include "update_engine/update_status_utils.h"
 
 namespace chromeos_update_engine {
@@ -31,10 +31,29 @@
 using chromeos_update_engine::UpdateEngineService;
 using std::string;
 using std::vector;
+using update_engine::Operation;
+using update_engine::StatusResult;
 using update_engine::UpdateEngineStatus;
 
-DBusUpdateEngineService::DBusUpdateEngineService(SystemState* system_state)
-    : common_(new UpdateEngineService{system_state}) {}
+namespace {
+// Converts the internal |UpdateEngineStatus| to the protobuf |StatusResult|.
+void ConvertToStatusResult(const UpdateEngineStatus& ue_status,
+                           StatusResult* out_status) {
+  out_status->set_last_checked_time(ue_status.last_checked_time);
+  out_status->set_progress(ue_status.progress);
+  out_status->set_current_operation(static_cast<Operation>(ue_status.status));
+  out_status->set_new_version(ue_status.new_version);
+  out_status->set_new_size(ue_status.new_size_bytes);
+  out_status->set_is_enterprise_rollback(ue_status.is_enterprise_rollback);
+  out_status->set_is_install(ue_status.is_install);
+  out_status->set_eol_date(ue_status.eol_date);
+  out_status->set_will_powerwash_after_reboot(
+      ue_status.will_powerwash_after_reboot);
+}
+}  // namespace
+
+DBusUpdateEngineService::DBusUpdateEngineService()
+    : common_(new UpdateEngineService()) {}
 
 // org::chromium::UpdateEngineInterfaceInterface methods implementation.
 
@@ -63,26 +82,9 @@
 }
 
 bool DBusUpdateEngineService::AttemptInstall(ErrorPtr* error,
-                                             const string& dlc_request) {
-  // Parse the raw parameters into protobuf.
-  DlcParameters dlc_parameters;
-  if (!dlc_parameters.ParseFromString(dlc_request)) {
-    *error = brillo::Error::Create(
-        FROM_HERE, "update_engine", "INTERNAL", "parameters are invalid.");
-    return false;
-  }
-  // Extract fields from the protobuf.
-  vector<string> dlc_module_ids;
-  for (const auto& dlc_info : dlc_parameters.dlc_infos()) {
-    if (dlc_info.dlc_id().empty()) {
-      *error = brillo::Error::Create(
-          FROM_HERE, "update_engine", "INTERNAL", "parameters are invalid.");
-      return false;
-    }
-    dlc_module_ids.push_back(dlc_info.dlc_id());
-  }
-  return common_->AttemptInstall(
-      error, dlc_parameters.omaha_url(), dlc_module_ids);
+                                             const string& in_omaha_url,
+                                             const vector<string>& dlc_ids) {
+  return common_->AttemptInstall(error, in_omaha_url, dlc_ids);
 }
 
 bool DBusUpdateEngineService::AttemptRollback(ErrorPtr* error,
@@ -99,21 +101,20 @@
   return common_->ResetStatus(error);
 }
 
-bool DBusUpdateEngineService::GetStatus(ErrorPtr* error,
-                                        int64_t* out_last_checked_time,
-                                        double* out_progress,
-                                        string* out_current_operation,
-                                        string* out_new_version,
-                                        int64_t* out_new_size) {
+bool DBusUpdateEngineService::SetDlcActiveValue(brillo::ErrorPtr* error,
+                                                bool is_active,
+                                                const string& dlc_id) {
+  return common_->SetDlcActiveValue(error, is_active, dlc_id);
+}
+
+bool DBusUpdateEngineService::GetStatusAdvanced(ErrorPtr* error,
+                                                StatusResult* out_status) {
   UpdateEngineStatus status;
   if (!common_->GetStatus(error, &status)) {
     return false;
   }
-  *out_last_checked_time = status.last_checked_time;
-  *out_progress = status.progress;
-  *out_current_operation = UpdateStatusToString(status.status);
-  *out_new_version = status.new_version;
-  *out_new_size = status.new_size_bytes;
+
+  ConvertToStatusResult(status, out_status);
   return true;
 }
 
@@ -191,15 +192,10 @@
   return common_->GetLastAttemptError(error, out_last_attempt_error);
 }
 
-bool DBusUpdateEngineService::GetEolStatus(ErrorPtr* error,
-                                           int32_t* out_eol_status) {
-  return common_->GetEolStatus(error, out_eol_status);
-}
-
-UpdateEngineAdaptor::UpdateEngineAdaptor(SystemState* system_state)
+UpdateEngineAdaptor::UpdateEngineAdaptor()
     : org::chromium::UpdateEngineInterfaceAdaptor(&dbus_service_),
       bus_(DBusConnection::Get()->GetDBus()),
-      dbus_service_(system_state),
+      dbus_service_(),
       dbus_object_(nullptr,
                    bus_,
                    dbus::ObjectPath(update_engine::kUpdateEngineServicePath)) {}
@@ -217,11 +213,11 @@
 
 void UpdateEngineAdaptor::SendStatusUpdate(
     const UpdateEngineStatus& update_engine_status) {
-  SendStatusUpdateSignal(update_engine_status.last_checked_time,
-                         update_engine_status.progress,
-                         UpdateStatusToString(update_engine_status.status),
-                         update_engine_status.new_version,
-                         update_engine_status.new_size_bytes);
+  StatusResult status;
+  ConvertToStatusResult(update_engine_status, &status);
+
+  // Send |StatusUpdateAdvanced| signal.
+  SendStatusUpdateAdvancedSignal(status);
 }
 
 }  // namespace chromeos_update_engine
diff --git a/dbus_service.h b/cros/dbus_service.h
similarity index 86%
rename from dbus_service.h
rename to cros/dbus_service.h
index 134461b..3ad6589 100644
--- a/dbus_service.h
+++ b/cros/dbus_service.h
@@ -14,20 +14,22 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_DBUS_SERVICE_H_
-#define UPDATE_ENGINE_DBUS_SERVICE_H_
+#ifndef UPDATE_ENGINE_CROS_DBUS_SERVICE_H_
+#define UPDATE_ENGINE_CROS_DBUS_SERVICE_H_
 
 #include <inttypes.h>
 
 #include <memory>
 #include <string>
+#include <vector>
 
 #include <base/memory/ref_counted.h>
 #include <brillo/errors/error.h>
+#include <update_engine/proto_bindings/update_engine.pb.h>
 
-#include "update_engine/common_service.h"
-#include "update_engine/service_observer_interface.h"
-#include "update_engine/update_attempter.h"
+#include "update_engine/common/service_observer_interface.h"
+#include "update_engine/cros/common_service.h"
+#include "update_engine/cros/update_attempter.h"
 
 #include "dbus_bindings/org.chromium.UpdateEngineInterface.h"
 
@@ -36,7 +38,7 @@
 class DBusUpdateEngineService
     : public org::chromium::UpdateEngineInterfaceInterface {
  public:
-  explicit DBusUpdateEngineService(SystemState* system_state);
+  DBusUpdateEngineService();
   virtual ~DBusUpdateEngineService() = default;
 
   // Implementation of org::chromium::UpdateEngineInterfaceInterface.
@@ -50,7 +52,8 @@
                               int32_t in_flags_as_int) override;
 
   bool AttemptInstall(brillo::ErrorPtr* error,
-                      const std::string& dlc_request) override;
+                      const std::string& in_omaha_url,
+                      const std::vector<std::string>& dlc_ids) override;
 
   bool AttemptRollback(brillo::ErrorPtr* error, bool in_powerwash) override;
 
@@ -62,15 +65,17 @@
   // update. This is used for development only.
   bool ResetStatus(brillo::ErrorPtr* error) override;
 
-  // Returns the current status of the Update Engine. If an update is in
-  // progress, the number of operations, size to download and overall progress
-  // is reported.
-  bool GetStatus(brillo::ErrorPtr* error,
-                 int64_t* out_last_checked_time,
-                 double* out_progress,
-                 std::string* out_current_operation,
-                 std::string* out_new_version,
-                 int64_t* out_new_size) override;
+  // Sets the DLC as active or inactive. When set to active, the ping metadata
+  // for the DLC is updated accordingly. When set to inactive, the metadata
+  // for the DLC is deleted.
+  bool SetDlcActiveValue(brillo::ErrorPtr* error,
+                         bool is_active,
+                         const std::string& dlc_id) override;
+
+  // Similar to Above, but returns a protobuffer instead. In the future it will
+  // have more features and is easily extendable.
+  bool GetStatusAdvanced(brillo::ErrorPtr* error,
+                         update_engine::StatusResult* out_status) override;
 
   // Reboots the device if an update is applied and a reboot is required.
   bool RebootIfNeeded(brillo::ErrorPtr* error) override;
@@ -150,9 +155,6 @@
   bool GetLastAttemptError(brillo::ErrorPtr* error,
                            int32_t* out_last_attempt_error) override;
 
-  // Returns the current end-of-life status of the device in |out_eol_status|.
-  bool GetEolStatus(brillo::ErrorPtr* error, int32_t* out_eol_status) override;
-
  private:
   std::unique_ptr<UpdateEngineService> common_;
 };
@@ -163,7 +165,7 @@
 class UpdateEngineAdaptor : public org::chromium::UpdateEngineInterfaceAdaptor,
                             public ServiceObserverInterface {
  public:
-  explicit UpdateEngineAdaptor(SystemState* system_state);
+  UpdateEngineAdaptor();
   ~UpdateEngineAdaptor() = default;
 
   // Register the DBus object with the update engine service asynchronously.
@@ -189,4 +191,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_DBUS_SERVICE_H_
+#endif  // UPDATE_ENGINE_CROS_DBUS_SERVICE_H_
diff --git a/dbus_test_utils.h b/cros/dbus_test_utils.h
similarity index 81%
rename from dbus_test_utils.h
rename to cros/dbus_test_utils.h
index b3748ce..1116c52 100644
--- a/dbus_test_utils.h
+++ b/cros/dbus_test_utils.h
@@ -14,11 +14,13 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_DBUS_TEST_UTILS_H_
-#define UPDATE_ENGINE_DBUS_TEST_UTILS_H_
+#ifndef UPDATE_ENGINE_CROS_DBUS_TEST_UTILS_H_
+#define UPDATE_ENGINE_CROS_DBUS_TEST_UTILS_H_
 
+#include <memory>
 #include <set>
 #include <string>
+#include <utility>
 
 #include <base/bind.h>
 #include <brillo/message_loops/message_loop.h>
@@ -27,13 +29,13 @@
 namespace chromeos_update_engine {
 namespace dbus_test_utils {
 
-#define MOCK_SIGNAL_HANDLER_EXPECT_SIGNAL_HANDLER(                           \
-    mock_signal_handler, mock_proxy, signal)                                 \
-  do {                                                                       \
-    EXPECT_CALL((mock_proxy),                                                \
-                Register##signal##SignalHandler(::testing::_, ::testing::_)) \
-        .WillOnce(::chromeos_update_engine::dbus_test_utils::GrabCallbacks(  \
-            &(mock_signal_handler)));                                        \
+#define MOCK_SIGNAL_HANDLER_EXPECT_SIGNAL_HANDLER(                             \
+    mock_signal_handler, mock_proxy, signal)                                   \
+  do {                                                                         \
+    EXPECT_CALL((mock_proxy),                                                  \
+                DoRegister##signal##SignalHandler(::testing::_, ::testing::_)) \
+        .WillOnce(::chromeos_update_engine::dbus_test_utils::GrabCallbacks(    \
+            &(mock_signal_handler)));                                          \
   } while (false)
 
 template <typename T>
@@ -52,10 +54,10 @@
 
   void GrabCallbacks(
       const base::Callback<T>& signal_callback,
-      dbus::ObjectProxy::OnConnectedCallback on_connected_callback) {
+      dbus::ObjectProxy::OnConnectedCallback* on_connected_callback) {
     signal_callback_.reset(new base::Callback<T>(signal_callback));
-    on_connected_callback_.reset(
-        new dbus::ObjectProxy::OnConnectedCallback(on_connected_callback));
+    on_connected_callback_.reset(new dbus::ObjectProxy::OnConnectedCallback(
+        std::move(*on_connected_callback)));
     // Notify from the main loop that the callback was connected.
     callback_connected_task_ = brillo::MessageLoop::current()->PostTask(
         FROM_HERE,
@@ -66,7 +68,7 @@
  private:
   void OnCallbackConnected() {
     callback_connected_task_ = brillo::MessageLoop::kTaskIdNull;
-    on_connected_callback_->Run("", "", true);
+    std::move(*on_connected_callback_).Run("", "", true);
   }
 
   brillo::MessageLoop::TaskId callback_connected_task_{
@@ -86,4 +88,4 @@
 }  // namespace dbus_test_utils
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_DBUS_TEST_UTILS_H_
+#endif  // UPDATE_ENGINE_CROS_DBUS_TEST_UTILS_H_
diff --git a/cros/dlcservice_chromeos.cc b/cros/dlcservice_chromeos.cc
new file mode 100644
index 0000000..e510c1d
--- /dev/null
+++ b/cros/dlcservice_chromeos.cc
@@ -0,0 +1,77 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/cros/dlcservice_chromeos.h"
+
+#include <brillo/errors/error.h>
+#include <dlcservice/proto_bindings/dlcservice.pb.h>
+// NOLINTNEXTLINE(build/include_alpha) "dbus-proxies.h" needs "dlcservice.pb.h"
+#include <dlcservice/dbus-proxies.h>
+
+#include "update_engine/cros/dbus_connection.h"
+
+using std::string;
+using std::vector;
+
+namespace chromeos_update_engine {
+
+namespace {
+org::chromium::DlcServiceInterfaceProxy GetDlcServiceProxy() {
+  return {DBusConnection::Get()->GetDBus()};
+}
+}  // namespace
+
+std::unique_ptr<DlcServiceInterface> CreateDlcService() {
+  return std::make_unique<DlcServiceChromeOS>();
+}
+
+bool DlcServiceChromeOS::GetDlcsToUpdate(vector<string>* dlc_ids) {
+  if (!dlc_ids)
+    return false;
+  dlc_ids->clear();
+
+  brillo::ErrorPtr err;
+  if (!GetDlcServiceProxy().GetDlcsToUpdate(dlc_ids, &err)) {
+    LOG(ERROR) << "dlcservice failed to return DLCs that need to be updated. "
+               << "ErrorCode=" << err->GetCode()
+               << ", ErrMsg=" << err->GetMessage();
+    dlc_ids->clear();
+    return false;
+  }
+  return true;
+}
+
+bool DlcServiceChromeOS::InstallCompleted(const vector<string>& dlc_ids) {
+  brillo::ErrorPtr err;
+  if (!GetDlcServiceProxy().InstallCompleted(dlc_ids, &err)) {
+    LOG(ERROR) << "dlcservice failed to complete install. ErrCode="
+               << err->GetCode() << ", ErrMsg=" << err->GetMessage();
+    return false;
+  }
+  return true;
+}
+
+bool DlcServiceChromeOS::UpdateCompleted(const vector<string>& dlc_ids) {
+  brillo::ErrorPtr err;
+  if (!GetDlcServiceProxy().UpdateCompleted(dlc_ids, &err)) {
+    LOG(ERROR) << "dlcservice failed to complete updated. ErrCode="
+               << err->GetCode() << ", ErrMsg=" << err->GetMessage();
+    return false;
+  }
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/cros/dlcservice_chromeos.h b/cros/dlcservice_chromeos.h
new file mode 100644
index 0000000..3f11b12
--- /dev/null
+++ b/cros/dlcservice_chromeos.h
@@ -0,0 +1,55 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CROS_DLCSERVICE_CHROMEOS_H_
+#define UPDATE_ENGINE_CROS_DLCSERVICE_CHROMEOS_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "update_engine/common/dlcservice_interface.h"
+
+namespace chromeos_update_engine {
+
+// The Chrome OS implementation of the DlcServiceInterface. This interface
+// interacts with dlcservice via D-Bus.
+class DlcServiceChromeOS : public DlcServiceInterface {
+ public:
+  DlcServiceChromeOS() = default;
+  ~DlcServiceChromeOS() = default;
+
+  // DlcServiceInterface overrides.
+
+  // Will clear the |dlc_ids|, passed to be modified. Clearing by default has
+  // the added benefit of avoiding indeterminate behavior in the case that
+  // |dlc_ids| wasn't empty to begin which would lead to possible duplicates and
+  // cases when error was not checked it's still safe.
+  bool GetDlcsToUpdate(std::vector<std::string>* dlc_ids) override;
+
+  // Call into dlcservice for it to mark the DLC IDs as being installed.
+  bool InstallCompleted(const std::vector<std::string>& dlc_ids) override;
+
+  // Call into dlcservice for it to mark the DLC IDs as being updated.
+  bool UpdateCompleted(const std::vector<std::string>& dlc_ids) override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(DlcServiceChromeOS);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_CROS_DLCSERVICE_CHROMEOS_H_
diff --git a/payload_consumer/download_action.cc b/cros/download_action_chromeos.cc
similarity index 74%
rename from payload_consumer/download_action.cc
rename to cros/download_action_chromeos.cc
index 09afc42..ee9c9a7 100644
--- a/payload_consumer/download_action.cc
+++ b/cros/download_action_chromeos.cc
@@ -1,5 +1,5 @@
 //
-// Copyright (C) 2011 The Android Open Source Project
+// Copyright (C) 2020 The Android Open Source Project
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/payload_consumer/download_action.h"
+#include "update_engine/cros/download_action_chromeos.h"
 
 #include <errno.h>
 
@@ -29,41 +29,37 @@
 #include "update_engine/common/boot_control_interface.h"
 #include "update_engine/common/error_code_utils.h"
 #include "update_engine/common/multi_range_http_fetcher.h"
+#include "update_engine/common/system_state.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/omaha_request_params.h"
-#include "update_engine/p2p_manager.h"
-#include "update_engine/payload_state_interface.h"
+#include "update_engine/cros/omaha_request_params.h"
+#include "update_engine/cros/p2p_manager.h"
+#include "update_engine/cros/payload_state_interface.h"
 
 using base::FilePath;
 using std::string;
 
 namespace chromeos_update_engine {
 
-DownloadAction::DownloadAction(PrefsInterface* prefs,
-                               BootControlInterface* boot_control,
-                               HardwareInterface* hardware,
-                               SystemState* system_state,
-                               HttpFetcher* http_fetcher,
-                               bool interactive)
+DownloadActionChromeos::DownloadActionChromeos(
+    PrefsInterface* prefs,
+    BootControlInterface* boot_control,
+    HardwareInterface* hardware,
+    HttpFetcher* http_fetcher,
+    bool interactive)
     : prefs_(prefs),
       boot_control_(boot_control),
       hardware_(hardware),
-      system_state_(system_state),
       http_fetcher_(new MultiRangeHttpFetcher(http_fetcher)),
       interactive_(interactive),
       writer_(nullptr),
       code_(ErrorCode::kSuccess),
       delegate_(nullptr),
       p2p_sharing_fd_(-1),
-      p2p_visible_(true) {
-#if BASE_VER < 576279
-  base::StatisticsRecorder::Initialize();
-#endif
-}
+      p2p_visible_(true) {}
 
-DownloadAction::~DownloadAction() {}
+DownloadActionChromeos::~DownloadActionChromeos() {}
 
-void DownloadAction::CloseP2PSharingFd(bool delete_p2p_file) {
+void DownloadActionChromeos::CloseP2PSharingFd(bool delete_p2p_file) {
   if (p2p_sharing_fd_ != -1) {
     if (close(p2p_sharing_fd_) != 0) {
       PLOG(ERROR) << "Error closing p2p sharing fd";
@@ -72,7 +68,8 @@
   }
 
   if (delete_p2p_file) {
-    FilePath path = system_state_->p2p_manager()->FileGetPath(p2p_file_id_);
+    FilePath path =
+        SystemState::Get()->p2p_manager()->FileGetPath(p2p_file_id_);
     if (unlink(path.value().c_str()) != 0) {
       PLOG(ERROR) << "Error deleting p2p file " << path.value();
     } else {
@@ -84,8 +81,8 @@
   p2p_file_id_.clear();
 }
 
-bool DownloadAction::SetupP2PSharingFd() {
-  P2PManager* p2p_manager = system_state_->p2p_manager();
+bool DownloadActionChromeos::SetupP2PSharingFd() {
+  P2PManager* p2p_manager = SystemState::Get()->p2p_manager();
 
   if (!p2p_manager->FileShare(p2p_file_id_, payload_->size)) {
     LOG(ERROR) << "Unable to share file via p2p";
@@ -120,9 +117,9 @@
   return true;
 }
 
-void DownloadAction::WriteToP2PFile(const void* data,
-                                    size_t length,
-                                    off_t file_offset) {
+void DownloadActionChromeos::WriteToP2PFile(const void* data,
+                                            size_t length,
+                                            off_t file_offset) {
   if (p2p_sharing_fd_ == -1) {
     if (!SetupP2PSharingFd())
       return;
@@ -167,7 +164,7 @@
   }
 }
 
-void DownloadAction::PerformAction() {
+void DownloadActionChromeos::PerformAction() {
   http_fetcher_->set_delegate(this);
 
   // Get the InstallPlan and read it
@@ -206,18 +203,76 @@
   StartDownloading();
 }
 
-void DownloadAction::StartDownloading() {
+bool DownloadActionChromeos::LoadCachedManifest(int64_t manifest_size) {
+  std::string cached_manifest_bytes;
+  if (!prefs_->GetString(kPrefsManifestBytes, &cached_manifest_bytes) ||
+      cached_manifest_bytes.size() <= 0) {
+    LOG(INFO) << "Cached Manifest data not found";
+    return false;
+  }
+  if (static_cast<int64_t>(cached_manifest_bytes.size()) != manifest_size) {
+    LOG(WARNING) << "Cached metadata has unexpected size: "
+                 << cached_manifest_bytes.size() << " vs. " << manifest_size;
+    return false;
+  }
+
+  ErrorCode error;
+  const bool success =
+      delta_performer_->Write(
+          cached_manifest_bytes.data(), cached_manifest_bytes.size(), &error) &&
+      delta_performer_->IsManifestValid();
+  if (success) {
+    LOG(INFO) << "Successfully parsed cached manifest";
+  } else {
+    // If parsing of cached data failed, fall back to fetch them using HTTP
+    LOG(WARNING) << "Cached manifest data fails to load, error code:"
+                 << static_cast<int>(error) << "," << error;
+  }
+  return success;
+}
+
+void DownloadActionChromeos::StartDownloading() {
   download_active_ = true;
   http_fetcher_->ClearRanges();
+
+  if (writer_ && writer_ != delta_performer_.get()) {
+    LOG(INFO) << "Using writer for test.";
+  } else {
+    delta_performer_.reset(new DeltaPerformer(prefs_,
+                                              boot_control_,
+                                              hardware_,
+                                              delegate_,
+                                              &install_plan_,
+                                              payload_,
+                                              interactive_));
+    writer_ = delta_performer_.get();
+  }
+
   if (install_plan_.is_resume &&
       payload_ == &install_plan_.payloads[resume_payload_index_]) {
-    // Resuming an update so fetch the update manifest metadata first.
+    // Resuming an update so parse the cached manifest first
     int64_t manifest_metadata_size = 0;
     int64_t manifest_signature_size = 0;
     prefs_->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size);
     prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size);
-    http_fetcher_->AddRange(base_offset_,
-                            manifest_metadata_size + manifest_signature_size);
+
+    // TODO(zhangkelvin) Add unittest for success and fallback route
+    if (!LoadCachedManifest(manifest_metadata_size + manifest_signature_size)) {
+      if (delta_performer_) {
+        // Create a new DeltaPerformer to reset all its state
+        delta_performer_ = std::make_unique<DeltaPerformer>(prefs_,
+                                                            boot_control_,
+                                                            hardware_,
+                                                            delegate_,
+                                                            &install_plan_,
+                                                            payload_,
+                                                            interactive_);
+        writer_ = delta_performer_.get();
+      }
+      http_fetcher_->AddRange(base_offset_,
+                              manifest_metadata_size + manifest_signature_size);
+    }
+
     // If there're remaining unprocessed data blobs, fetch them. Be careful not
     // to request data beyond the end of the payload to avoid 416 HTTP response
     // error codes.
@@ -241,20 +296,9 @@
     }
   }
 
-  if (writer_ && writer_ != delta_performer_.get()) {
-    LOG(INFO) << "Using writer for test.";
-  } else {
-    delta_performer_.reset(new DeltaPerformer(prefs_,
-                                              boot_control_,
-                                              hardware_,
-                                              delegate_,
-                                              &install_plan_,
-                                              payload_,
-                                              interactive_));
-    writer_ = delta_performer_.get();
-  }
-  if (system_state_ != nullptr) {
-    const PayloadStateInterface* payload_state = system_state_->payload_state();
+  if (SystemState::Get() != nullptr) {
+    const PayloadStateInterface* payload_state =
+        SystemState::Get()->payload_state();
     string file_id = utils::CalculateP2PFileId(payload_->hash, payload_->size);
     if (payload_state->GetUsingP2PForSharing()) {
       // If we're sharing the update, store the file_id to convey
@@ -267,7 +311,7 @@
       // hash. If this is the case, we NEED to clean it up otherwise
       // we're essentially timing out other peers downloading from us
       // (since we're never going to complete the file).
-      FilePath path = system_state_->p2p_manager()->FileGetPath(file_id);
+      FilePath path = SystemState::Get()->p2p_manager()->FileGetPath(file_id);
       if (!path.empty()) {
         if (unlink(path.value().c_str()) != 0) {
           PLOG(ERROR) << "Error deleting p2p file " << path.value();
@@ -293,15 +337,15 @@
   http_fetcher_->BeginTransfer(install_plan_.download_url);
 }
 
-void DownloadAction::SuspendAction() {
+void DownloadActionChromeos::SuspendAction() {
   http_fetcher_->Pause();
 }
 
-void DownloadAction::ResumeAction() {
+void DownloadActionChromeos::ResumeAction() {
   http_fetcher_->Unpause();
 }
 
-void DownloadAction::TerminateProcessing() {
+void DownloadActionChromeos::TerminateProcessing() {
   if (writer_) {
     writer_->Close();
     writer_ = nullptr;
@@ -313,13 +357,13 @@
   http_fetcher_->TerminateTransfer();
 }
 
-void DownloadAction::SeekToOffset(off_t offset) {
+void DownloadActionChromeos::SeekToOffset(off_t offset) {
   bytes_received_ = offset;
 }
 
-bool DownloadAction::ReceivedBytes(HttpFetcher* fetcher,
-                                   const void* bytes,
-                                   size_t length) {
+bool DownloadActionChromeos::ReceivedBytes(HttpFetcher* fetcher,
+                                           const void* bytes,
+                                           size_t length) {
   // Note that bytes_received_ is the current offset.
   if (!p2p_file_id_.empty()) {
     WriteToP2PFile(bytes, length, bytes_received_);
@@ -349,16 +393,17 @@
 
   // Call p2p_manager_->FileMakeVisible() when we've successfully
   // verified the manifest!
-  if (!p2p_visible_ && system_state_ && delta_performer_.get() &&
+  if (!p2p_visible_ && SystemState::Get() && delta_performer_.get() &&
       delta_performer_->IsManifestValid()) {
     LOG(INFO) << "Manifest has been validated. Making p2p file visible.";
-    system_state_->p2p_manager()->FileMakeVisible(p2p_file_id_);
+    SystemState::Get()->p2p_manager()->FileMakeVisible(p2p_file_id_);
     p2p_visible_ = true;
   }
   return true;
 }
 
-void DownloadAction::TransferComplete(HttpFetcher* fetcher, bool successful) {
+void DownloadActionChromeos::TransferComplete(HttpFetcher* fetcher,
+                                              bool successful) {
   if (writer_) {
     LOG_IF(WARNING, writer_->Close() != 0) << "Error closing the writer.";
     if (delta_performer_.get() == writer_) {
@@ -374,7 +419,7 @@
       code = delta_performer_->VerifyPayload(payload_->hash, payload_->size);
     if (code == ErrorCode::kSuccess) {
       if (payload_ < &install_plan_.payloads.back() &&
-          system_state_->payload_state()->NextPayload()) {
+          SystemState::Get()->payload_state()->NextPayload()) {
         LOG(INFO) << "Incrementing to next payload";
         // No need to reset if this payload was already applied.
         if (delta_performer_ && !payload_->already_applied)
@@ -383,7 +428,7 @@
         bytes_received_previous_payloads_ += payload_->size;
         payload_++;
         install_plan_.download_url =
-            system_state_->payload_state()->GetCurrentUrl();
+            SystemState::Get()->payload_state()->GetCurrentUrl();
         StartDownloading();
         return;
       }
@@ -392,11 +437,9 @@
       if (delegate_)
         delegate_->DownloadComplete();
 
-      // Log UpdateEngine.DownloadAction.* histograms to help diagnose
-      // long-blocking operations.
       std::string histogram_output;
-      base::StatisticsRecorder::WriteGraph("UpdateEngine.DownloadAction.",
-                                           &histogram_output);
+      base::StatisticsRecorder::WriteGraph(
+          "UpdateEngine.DownloadActionChromeos.", &histogram_output);
       LOG(INFO) << histogram_output;
     } else {
       LOG(ERROR) << "Download of " << install_plan_.download_url
@@ -413,7 +456,7 @@
   processor_->ActionComplete(this, code);
 }
 
-void DownloadAction::TransferTerminated(HttpFetcher* fetcher) {
+void DownloadActionChromeos::TransferTerminated(HttpFetcher* fetcher) {
   if (code_ != ErrorCode::kSuccess) {
     processor_->ActionComplete(this, code_);
   } else if (payload_->already_applied) {
diff --git a/payload_consumer/download_action.h b/cros/download_action_chromeos.h
similarity index 71%
copy from payload_consumer/download_action.h
copy to cros/download_action_chromeos.h
index 1777e22..068946a 100644
--- a/payload_consumer/download_action.h
+++ b/cros/download_action_chromeos.h
@@ -1,5 +1,5 @@
 //
-// Copyright (C) 2011 The Android Open Source Project
+// Copyright (C) 2020 The Android Open Source Project
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_DOWNLOAD_ACTION_H_
-#define UPDATE_ENGINE_PAYLOAD_CONSUMER_DOWNLOAD_ACTION_H_
+#ifndef UPDATE_ENGINE_COMMON_DOWNLOAD_ACTION_CHROMEOS_H_
+#define UPDATE_ENGINE_COMMON_DOWNLOAD_ACTION_CHROMEOS_H_
 
 #include <fcntl.h>
 #include <sys/stat.h>
@@ -26,11 +26,11 @@
 
 #include "update_engine/common/action.h"
 #include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/download_action.h"
 #include "update_engine/common/http_fetcher.h"
 #include "update_engine/common/multi_range_http_fetcher.h"
 #include "update_engine/payload_consumer/delta_performer.h"
 #include "update_engine/payload_consumer/install_plan.h"
-#include "update_engine/system_state.h"
 
 // The Download Action downloads a specified url to disk. The url should point
 // to an update in a delta payload format. The payload will be piped into a
@@ -38,48 +38,23 @@
 
 namespace chromeos_update_engine {
 
-class DownloadActionDelegate {
- public:
-  virtual ~DownloadActionDelegate() = default;
-
-  // Called periodically after bytes are received. This method will be invoked
-  // only if the DownloadAction is running. |bytes_progressed| is the number of
-  // bytes downloaded since the last call of this method, |bytes_received|
-  // the number of bytes downloaded thus far and |total| is the number of bytes
-  // expected.
-  virtual void BytesReceived(uint64_t bytes_progressed,
-                             uint64_t bytes_received,
-                             uint64_t total) = 0;
-
-  // Returns whether the download should be canceled, in which case the
-  // |cancel_reason| error should be set to the reason why the download was
-  // canceled.
-  virtual bool ShouldCancel(ErrorCode* cancel_reason) = 0;
-
-  // Called once the complete payload has been downloaded. Note that any errors
-  // while applying or downloading the partial payload will result in this
-  // method not being called.
-  virtual void DownloadComplete() = 0;
-};
-
 class PrefsInterface;
 
-class DownloadAction : public InstallPlanAction, public HttpFetcherDelegate {
+class DownloadActionChromeos : public InstallPlanAction,
+                               public HttpFetcherDelegate {
  public:
-  // Debugging/logging
-  static std::string StaticType() { return "DownloadAction"; }
+  static std::string StaticType() { return "DownloadActionChromeos"; }
 
   // Takes ownership of the passed in HttpFetcher. Useful for testing.
   // A good calling pattern is:
-  // DownloadAction(prefs, boot_contol, hardware, system_state,
+  // DownloadActionChromeos(prefs, boot_contol, hardware, system_state,
   //                new WhateverHttpFetcher, false);
-  DownloadAction(PrefsInterface* prefs,
-                 BootControlInterface* boot_control,
-                 HardwareInterface* hardware,
-                 SystemState* system_state,
-                 HttpFetcher* http_fetcher,
-                 bool interactive);
-  ~DownloadAction() override;
+  DownloadActionChromeos(PrefsInterface* prefs,
+                         BootControlInterface* boot_control,
+                         HardwareInterface* hardware,
+                         HttpFetcher* http_fetcher,
+                         bool interactive);
+  ~DownloadActionChromeos() override;
 
   // InstallPlanAction overrides.
   void PerformAction() override;
@@ -124,30 +99,27 @@
   bool SetupP2PSharingFd();
 
   // Writes |length| bytes of payload from |data| into |file_offset|
-  // of the p2p file. Also does sanity checks; for example ensures we
+  // of the p2p file. Also does validation checks; for example ensures we
   // don't end up with a file with holes in it.
   //
   // This method does nothing if SetupP2PSharingFd() hasn't been
   // called or if CloseP2PSharingFd() has been called.
   void WriteToP2PFile(const void* data, size_t length, off_t file_offset);
 
+  // Attempt to load cached manifest data from prefs
+  // return true on success, false otherwise.
+  bool LoadCachedManifest(int64_t manifest_size);
+
   // Start downloading the current payload using delta_performer.
   void StartDownloading();
 
-  // The InstallPlan passed in
-  InstallPlan install_plan_;
-
   // Pointer to the current payload in install_plan_.payloads.
   InstallPlan::Payload* payload_{nullptr};
 
-  // SystemState required pointers.
   PrefsInterface* prefs_;
   BootControlInterface* boot_control_;
   HardwareInterface* hardware_;
 
-  // Global context for the system.
-  SystemState* system_state_;
-
   // Pointer to the MultiRangeHttpFetcher that does the http work.
   std::unique_ptr<MultiRangeHttpFetcher> http_fetcher_;
 
@@ -190,7 +162,7 @@
   // Offset of the payload in the download URL, used by UpdateAttempterAndroid.
   int64_t base_offset_{0};
 
-  DISALLOW_COPY_AND_ASSIGN(DownloadAction);
+  DISALLOW_COPY_AND_ASSIGN(DownloadActionChromeos);
 };
 
 // We want to be sure that we're compiled with large file support on linux,
@@ -199,4 +171,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_DOWNLOAD_ACTION_H_
+#endif  // UPDATE_ENGINE_COMMON_DOWNLOAD_ACTION_CHROMEOS_H_
diff --git a/payload_consumer/download_action_unittest.cc b/cros/download_action_chromeos_unittest.cc
similarity index 87%
rename from payload_consumer/download_action_unittest.cc
rename to cros/download_action_chromeos_unittest.cc
index e6ca219..93c39ff 100644
--- a/payload_consumer/download_action_unittest.cc
+++ b/cros/download_action_chromeos_unittest.cc
@@ -14,8 +14,6 @@
 // limitations under the License.
 //
 
-#include "update_engine/payload_consumer/download_action.h"
-
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
 
@@ -34,14 +32,14 @@
 
 #include "update_engine/common/action_pipe.h"
 #include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/mock_download_action.h"
 #include "update_engine/common/mock_http_fetcher.h"
-#include "update_engine/common/mock_prefs.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/fake_p2p_manager_configuration.h"
-#include "update_engine/fake_system_state.h"
-#include "update_engine/mock_file_writer.h"
-#include "update_engine/payload_consumer/mock_download_action.h"
+#include "update_engine/cros/download_action_chromeos.h"
+#include "update_engine/cros/fake_p2p_manager_configuration.h"
+#include "update_engine/cros/fake_system_state.h"
+#include "update_engine/payload_consumer/mock_file_writer.h"
 #include "update_engine/update_manager/fake_update_manager.h"
 
 namespace chromeos_update_engine {
@@ -51,14 +49,15 @@
 using base::WriteFile;
 using std::string;
 using std::unique_ptr;
-using test_utils::ScopedTempFile;
 using testing::_;
 using testing::AtLeast;
 using testing::InSequence;
 using testing::Return;
 using testing::SetArgPointee;
 
-class DownloadActionTest : public ::testing::Test {};
+class DownloadActionChromeosTest : public ::testing::Test {
+  void SetUp() { FakeSystemState::CreateInstance(); }
+};
 
 namespace {
 
@@ -87,9 +86,10 @@
                        AbstractAction* action,
                        ErrorCode code) override {
     const string type = action->Type();
-    if (type == DownloadAction::StaticType()) {
+    if (type == DownloadActionChromeos::StaticType()) {
       EXPECT_EQ(expected_code_, code);
-      p2p_file_id_ = static_cast<DownloadAction*>(action)->p2p_file_id();
+      p2p_file_id_ =
+          static_cast<DownloadActionChromeos*>(action)->p2p_file_id();
     } else {
       EXPECT_EQ(ErrorCode::kSuccess, code);
     }
@@ -129,11 +129,10 @@
 void TestWithData(const brillo::Blob& data,
                   int fail_write,
                   bool use_download_delegate) {
+  FakeSystemState::CreateInstance();
   brillo::FakeMessageLoop loop(nullptr);
   loop.SetAsCurrent();
-  FakeSystemState fake_system_state;
 
-  // TODO(adlr): see if we need a different file for build bots
   ScopedTempFile output_temp_file;
   TestDirectFileWriter writer;
   EXPECT_EQ(
@@ -151,9 +150,9 @@
   install_plan.target_slot = 1;
   // We mark both slots as bootable. Only the target slot should be unbootable
   // after the download starts.
-  fake_system_state.fake_boot_control()->SetSlotBootable(
+  FakeSystemState::Get()->fake_boot_control()->SetSlotBootable(
       install_plan.source_slot, true);
-  fake_system_state.fake_boot_control()->SetSlotBootable(
+  FakeSystemState::Get()->fake_boot_control()->SetSlotBootable(
       install_plan.target_slot, true);
   auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
   feeder_action->set_obj(install_plan);
@@ -161,13 +160,12 @@
   MockHttpFetcher* http_fetcher =
       new MockHttpFetcher(data.data(), data.size(), nullptr);
   // takes ownership of passed in HttpFetcher
-  auto download_action =
-      std::make_unique<DownloadAction>(&prefs,
-                                       fake_system_state.boot_control(),
-                                       fake_system_state.hardware(),
-                                       &fake_system_state,
-                                       http_fetcher,
-                                       false /* interactive */);
+  auto download_action = std::make_unique<DownloadActionChromeos>(
+      &prefs,
+      FakeSystemState::Get()->boot_control(),
+      FakeSystemState::Get()->hardware(),
+      http_fetcher,
+      false /* interactive */);
   download_action->SetTestFileWriter(&writer);
   BondActions(feeder_action.get(), download_action.get());
   MockDownloadActionDelegate download_delegate;
@@ -196,9 +194,9 @@
   loop.Run();
   EXPECT_FALSE(loop.PendingTasks());
 
-  EXPECT_TRUE(fake_system_state.fake_boot_control()->IsSlotBootable(
+  EXPECT_TRUE(FakeSystemState::Get()->fake_boot_control()->IsSlotBootable(
       install_plan.source_slot));
-  EXPECT_FALSE(fake_system_state.fake_boot_control()->IsSlotBootable(
+  EXPECT_FALSE(FakeSystemState::Get()->fake_boot_control()->IsSlotBootable(
       install_plan.target_slot));
 }
 }  // namespace
@@ -253,8 +251,7 @@
   payload_datas.emplace_back(2 * kMockHttpFetcherChunkSize);
   brillo::FakeMessageLoop loop(nullptr);
   loop.SetAsCurrent();
-  FakeSystemState fake_system_state;
-  EXPECT_CALL(*fake_system_state.mock_payload_state(), NextPayload())
+  EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), NextPayload())
       .WillOnce(Return(true));
 
   MockFileWriter mock_file_writer;
@@ -277,13 +274,12 @@
   MockHttpFetcher* http_fetcher = new MockHttpFetcher(
       payload_datas[0].data(), payload_datas[0].size(), nullptr);
   // takes ownership of passed in HttpFetcher
-  auto download_action =
-      std::make_unique<DownloadAction>(&prefs,
-                                       fake_system_state.boot_control(),
-                                       fake_system_state.hardware(),
-                                       &fake_system_state,
-                                       http_fetcher,
-                                       false /* interactive */);
+  auto download_action = std::make_unique<DownloadActionChromeos>(
+      &prefs,
+      FakeSystemState::Get()->boot_control(),
+      FakeSystemState::Get()->hardware(),
+      http_fetcher,
+      false /* interactive */);
   download_action->SetTestFileWriter(&mock_file_writer);
   BondActions(feeder_action.get(), download_action.get());
   MockDownloadActionDelegate download_delegate;
@@ -348,6 +344,7 @@
 }
 
 void TestTerminateEarly(bool use_download_delegate) {
+  FakeSystemState::CreateInstance();
   brillo::FakeMessageLoop loop(nullptr);
   loop.SetAsCurrent();
 
@@ -364,13 +361,12 @@
     InstallPlan install_plan;
     install_plan.payloads.resize(1);
     feeder_action->set_obj(install_plan);
-    FakeSystemState fake_system_state_;
+
     MockPrefs prefs;
     auto download_action = std::make_unique<DownloadAction>(
         &prefs,
-        fake_system_state_.boot_control(),
-        fake_system_state_.hardware(),
-        &fake_system_state_,
+        FakeSystemState::Get()->boot_control(),
+        FakeSystemState::Get()->hardware(),
         new MockHttpFetcher(data.data(), data.size(), nullptr),
         false /* interactive */);
     download_action->SetTestFileWriter(&writer);
@@ -463,6 +459,7 @@
 }  // namespace
 
 TEST(DownloadActionTest, PassObjectOutTest) {
+  FakeSystemState::CreateInstance();
   brillo::FakeMessageLoop loop(nullptr);
   loop.SetAsCurrent();
 
@@ -477,12 +474,10 @@
   auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
   feeder_action->set_obj(install_plan);
   MockPrefs prefs;
-  FakeSystemState fake_system_state_;
   auto download_action =
       std::make_unique<DownloadAction>(&prefs,
-                                       fake_system_state_.boot_control(),
-                                       fake_system_state_.hardware(),
-                                       &fake_system_state_,
+                                       FakeSystemState::Get()->boot_control(),
+                                       FakeSystemState::Get()->hardware(),
                                        new MockHttpFetcher("x", 1, nullptr),
                                        false /* interactive */);
   download_action->SetTestFileWriter(&writer);
@@ -513,13 +508,15 @@
 // Test fixture for P2P tests.
 class P2PDownloadActionTest : public testing::Test {
  protected:
-  P2PDownloadActionTest()
-      : start_at_offset_(0), fake_um_(fake_system_state_.fake_clock()) {}
+  P2PDownloadActionTest() : start_at_offset_(0) {}
 
   ~P2PDownloadActionTest() override {}
 
   // Derived from testing::Test.
-  void SetUp() override { loop_.SetAsCurrent(); }
+  void SetUp() override {
+    loop_.SetAsCurrent();
+    FakeSystemState::CreateInstance();
+  }
 
   // Derived from testing::Test.
   void TearDown() override { EXPECT_FALSE(loop_.PendingTasks()); }
@@ -535,20 +532,15 @@
 
     // Setup p2p.
     FakeP2PManagerConfiguration* test_conf = new FakeP2PManagerConfiguration();
-    p2p_manager_.reset(P2PManager::Construct(test_conf,
-                                             nullptr,
-                                             &fake_um_,
-                                             "cros_au",
-                                             3,
-                                             base::TimeDelta::FromDays(5)));
-    fake_system_state_.set_p2p_manager(p2p_manager_.get());
+    p2p_manager_.reset(P2PManager::Construct(
+        test_conf, &fake_um_, "cros_au", 3, base::TimeDelta::FromDays(5)));
   }
 
   // To be called by tests to perform the download. The
   // |use_p2p_to_share| parameter is used to indicate whether the
   // payload should be shared via p2p.
   void StartDownload(bool use_p2p_to_share) {
-    EXPECT_CALL(*fake_system_state_.mock_payload_state(),
+    EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(),
                 GetUsingP2PForSharing())
         .WillRepeatedly(Return(use_p2p_to_share));
 
@@ -566,9 +558,8 @@
     // Note that DownloadAction takes ownership of the passed in HttpFetcher.
     auto download_action = std::make_unique<DownloadAction>(
         &prefs,
-        fake_system_state_.boot_control(),
-        fake_system_state_.hardware(),
-        &fake_system_state_,
+        FakeSystemState::Get()->boot_control(),
+        FakeSystemState::Get()->hardware(),
         new MockHttpFetcher(data_.c_str(), data_.length(), nullptr),
         false /* interactive */);
     auto http_fetcher = download_action->http_fetcher();
@@ -605,9 +596,6 @@
   // The ActionProcessor used for running the actions.
   ActionProcessor processor_;
 
-  // A fake system state.
-  FakeSystemState fake_system_state_;
-
   // The data being downloaded.
   string data_;
 
diff --git a/cros/excluder_chromeos.cc b/cros/excluder_chromeos.cc
new file mode 100644
index 0000000..35154d6
--- /dev/null
+++ b/cros/excluder_chromeos.cc
@@ -0,0 +1,63 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/cros/excluder_chromeos.h"
+
+#include <memory>
+#include <vector>
+
+#include <base/logging.h>
+#include <base/strings/string_piece.h>
+#include <base/strings/string_number_conversions.h>
+#include <base/strings/string_split.h>
+
+#include "update_engine/common/constants.h"
+#include "update_engine/common/system_state.h"
+
+using std::string;
+using std::vector;
+
+namespace chromeos_update_engine {
+
+std::unique_ptr<ExcluderInterface> CreateExcluder() {
+  return std::make_unique<ExcluderChromeOS>();
+}
+
+bool ExcluderChromeOS::Exclude(const string& name) {
+  auto* prefs = SystemState::Get()->prefs();
+  auto key = prefs->CreateSubKey({kExclusionPrefsSubDir, name});
+  return prefs->SetString(key, "");
+}
+
+bool ExcluderChromeOS::IsExcluded(const string& name) {
+  auto* prefs = SystemState::Get()->prefs();
+  auto key = prefs->CreateSubKey({kExclusionPrefsSubDir, name});
+  return prefs->Exists(key);
+}
+
+bool ExcluderChromeOS::Reset() {
+  auto* prefs = SystemState::Get()->prefs();
+  bool ret = true;
+  vector<string> keys;
+  if (!prefs->GetSubKeys(kExclusionPrefsSubDir, &keys))
+    return false;
+  for (const auto& key : keys)
+    if (!(ret &= prefs->Delete(key)))
+      LOG(ERROR) << "Failed to delete exclusion pref for " << key;
+  return ret;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/cros/excluder_chromeos.h b/cros/excluder_chromeos.h
new file mode 100644
index 0000000..7d3efc9
--- /dev/null
+++ b/cros/excluder_chromeos.h
@@ -0,0 +1,49 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CROS_EXCLUDER_CHROMEOS_H_
+#define UPDATE_ENGINE_CROS_EXCLUDER_CHROMEOS_H_
+
+#include <string>
+
+#include "update_engine/common/excluder_interface.h"
+#include "update_engine/common/prefs_interface.h"
+
+namespace chromeos_update_engine {
+
+class SystemState;
+
+// The Chrome OS implementation of the |ExcluderInterface|.
+class ExcluderChromeOS : public ExcluderInterface {
+ public:
+  ExcluderChromeOS() = default;
+  ~ExcluderChromeOS() = default;
+
+  // |ExcluderInterface| overrides.
+  bool Exclude(const std::string& name) override;
+  bool IsExcluded(const std::string& name) override;
+  bool Reset() override;
+
+  // Not copyable or movable.
+  ExcluderChromeOS(const ExcluderChromeOS&) = delete;
+  ExcluderChromeOS& operator=(const ExcluderChromeOS&) = delete;
+  ExcluderChromeOS(ExcluderChromeOS&&) = delete;
+  ExcluderChromeOS& operator=(ExcluderChromeOS&&) = delete;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_CROS_EXCLUDER_CHROMEOS_H_
diff --git a/cros/excluder_chromeos_unittest.cc b/cros/excluder_chromeos_unittest.cc
new file mode 100644
index 0000000..fd70818
--- /dev/null
+++ b/cros/excluder_chromeos_unittest.cc
@@ -0,0 +1,54 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/cros/excluder_chromeos.h"
+
+#include <gtest/gtest.h>
+
+#include "update_engine/cros/fake_system_state.h"
+
+namespace chromeos_update_engine {
+
+namespace {
+constexpr char kFakeHash[] =
+    "71ff43d76e2488e394e46872f5b066cc25e394c2c3e3790dd319517883b33db1";
+}  // namespace
+
+class ExcluderChromeOSTest : public ::testing::Test {
+ protected:
+  void SetUp() override { FakeSystemState::CreateInstance(); }
+
+  ExcluderChromeOS excluder_;
+};
+
+TEST_F(ExcluderChromeOSTest, ExclusionCheck) {
+  EXPECT_FALSE(excluder_.IsExcluded(kFakeHash));
+  EXPECT_TRUE(excluder_.Exclude(kFakeHash));
+  EXPECT_TRUE(excluder_.IsExcluded(kFakeHash));
+}
+
+TEST_F(ExcluderChromeOSTest, ResetFlow) {
+  EXPECT_TRUE(excluder_.Exclude("abc"));
+  EXPECT_TRUE(excluder_.Exclude(kFakeHash));
+  EXPECT_TRUE(excluder_.IsExcluded("abc"));
+  EXPECT_TRUE(excluder_.IsExcluded(kFakeHash));
+
+  EXPECT_TRUE(excluder_.Reset());
+  EXPECT_FALSE(excluder_.IsExcluded("abc"));
+  EXPECT_FALSE(excluder_.IsExcluded(kFakeHash));
+}
+
+}  // namespace chromeos_update_engine
diff --git a/fake_p2p_manager.h b/cros/fake_p2p_manager.h
similarity index 94%
rename from fake_p2p_manager.h
rename to cros/fake_p2p_manager.h
index 1f8ae95..1011b7e 100644
--- a/fake_p2p_manager.h
+++ b/cros/fake_p2p_manager.h
@@ -14,12 +14,12 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_FAKE_P2P_MANAGER_H_
-#define UPDATE_ENGINE_FAKE_P2P_MANAGER_H_
+#ifndef UPDATE_ENGINE_CROS_FAKE_P2P_MANAGER_H_
+#define UPDATE_ENGINE_CROS_FAKE_P2P_MANAGER_H_
 
 #include <string>
 
-#include "update_engine/p2p_manager.h"
+#include "update_engine/cros/p2p_manager.h"
 
 namespace chromeos_update_engine {
 
@@ -109,4 +109,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_FAKE_P2P_MANAGER_H_
+#endif  // UPDATE_ENGINE_CROS_FAKE_P2P_MANAGER_H_
diff --git a/fake_p2p_manager_configuration.h b/cros/fake_p2p_manager_configuration.h
similarity index 93%
rename from fake_p2p_manager_configuration.h
rename to cros/fake_p2p_manager_configuration.h
index f5b0e80..8d50ac8 100644
--- a/fake_p2p_manager_configuration.h
+++ b/cros/fake_p2p_manager_configuration.h
@@ -14,10 +14,10 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_FAKE_P2P_MANAGER_CONFIGURATION_H_
-#define UPDATE_ENGINE_FAKE_P2P_MANAGER_CONFIGURATION_H_
+#ifndef UPDATE_ENGINE_CROS_FAKE_P2P_MANAGER_CONFIGURATION_H_
+#define UPDATE_ENGINE_CROS_FAKE_P2P_MANAGER_CONFIGURATION_H_
 
-#include "update_engine/p2p_manager.h"
+#include "update_engine/cros/p2p_manager.h"
 
 #include <string>
 #include <vector>
@@ -99,4 +99,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_FAKE_P2P_MANAGER_CONFIGURATION_H_
+#endif  // UPDATE_ENGINE_CROS_FAKE_P2P_MANAGER_CONFIGURATION_H_
diff --git a/fake_shill_proxy.cc b/cros/fake_shill_proxy.cc
similarity index 96%
rename from fake_shill_proxy.cc
rename to cros/fake_shill_proxy.cc
index de96511..2d05a6b 100644
--- a/fake_shill_proxy.cc
+++ b/cros/fake_shill_proxy.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/fake_shill_proxy.h"
+#include "update_engine/cros/fake_shill_proxy.h"
 
 #include <utility>
 
diff --git a/fake_shill_proxy.h b/cros/fake_shill_proxy.h
similarity index 90%
rename from fake_shill_proxy.h
rename to cros/fake_shill_proxy.h
index ae17eaa..8c15a9d 100644
--- a/fake_shill_proxy.h
+++ b/cros/fake_shill_proxy.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_FAKE_SHILL_PROXY_H_
-#define UPDATE_ENGINE_FAKE_SHILL_PROXY_H_
+#ifndef UPDATE_ENGINE_CROS_FAKE_SHILL_PROXY_H_
+#define UPDATE_ENGINE_CROS_FAKE_SHILL_PROXY_H_
 
 #include <map>
 #include <memory>
@@ -25,7 +25,7 @@
 #include <shill/dbus-proxies.h>
 #include <shill/dbus-proxy-mocks.h>
 
-#include "update_engine/shill_proxy_interface.h"
+#include "update_engine/cros/shill_proxy_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -63,4 +63,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_FAKE_SHILL_PROXY_H_
+#endif  // UPDATE_ENGINE_CROS_FAKE_SHILL_PROXY_H_
diff --git a/fake_system_state.cc b/cros/fake_system_state.cc
similarity index 81%
rename from fake_system_state.cc
rename to cros/fake_system_state.cc
index 1bfcafa..7673b1d 100644
--- a/fake_system_state.cc
+++ b/cros/fake_system_state.cc
@@ -14,22 +14,20 @@
 // limitations under the License.
 //
 
-#include "update_engine/fake_system_state.h"
+#include "update_engine/cros/fake_system_state.h"
 
 namespace chromeos_update_engine {
 
 // Mock the SystemStateInterface so that we could lie that
 // OOBE is completed even when there's no such marker file, etc.
 FakeSystemState::FakeSystemState()
-    : mock_update_attempter_(this, nullptr),
-      mock_request_params_(this),
-      fake_update_manager_(&fake_clock_),
+    : mock_update_attempter_(nullptr),
       clock_(&fake_clock_),
       connection_manager_(&mock_connection_manager_),
       hardware_(&fake_hardware_),
       metrics_reporter_(&mock_metrics_reporter_),
-      prefs_(&mock_prefs_),
-      powerwash_safe_prefs_(&mock_powerwash_safe_prefs_),
+      prefs_(&fake_prefs_),
+      powerwash_safe_prefs_(&fake_powerwash_safe_prefs_),
       payload_state_(&mock_payload_state_),
       update_attempter_(&mock_update_attempter_),
       request_params_(&mock_request_params_),
@@ -37,7 +35,7 @@
       update_manager_(&fake_update_manager_),
       device_policy_(nullptr),
       fake_system_rebooted_(false) {
-  mock_payload_state_.Initialize(this);
+  mock_payload_state_.Initialize();
 }
 
 }  // namespace chromeos_update_engine
diff --git a/fake_system_state.h b/cros/fake_system_state.h
similarity index 86%
rename from fake_system_state.h
rename to cros/fake_system_state.h
index 24b1eec..da36306 100644
--- a/fake_system_state.h
+++ b/cros/fake_system_state.h
@@ -14,8 +14,10 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_FAKE_SYSTEM_STATE_H_
-#define UPDATE_ENGINE_FAKE_SYSTEM_STATE_H_
+#ifndef UPDATE_ENGINE_CROS_FAKE_SYSTEM_STATE_H_
+#define UPDATE_ENGINE_CROS_FAKE_SYSTEM_STATE_H_
+
+#include <memory>
 
 #include <base/logging.h>
 #include <gmock/gmock.h>
@@ -25,15 +27,16 @@
 #include "update_engine/common/fake_boot_control.h"
 #include "update_engine/common/fake_clock.h"
 #include "update_engine/common/fake_hardware.h"
+#include "update_engine/common/fake_prefs.h"
+#include "update_engine/common/mock_metrics_reporter.h"
 #include "update_engine/common/mock_prefs.h"
-#include "update_engine/mock_connection_manager.h"
-#include "update_engine/mock_metrics_reporter.h"
-#include "update_engine/mock_omaha_request_params.h"
-#include "update_engine/mock_p2p_manager.h"
-#include "update_engine/mock_payload_state.h"
-#include "update_engine/mock_power_manager.h"
-#include "update_engine/mock_update_attempter.h"
-#include "update_engine/system_state.h"
+#include "update_engine/common/system_state.h"
+#include "update_engine/cros/mock_connection_manager.h"
+#include "update_engine/cros/mock_omaha_request_params.h"
+#include "update_engine/cros/mock_p2p_manager.h"
+#include "update_engine/cros/mock_payload_state.h"
+#include "update_engine/cros/mock_power_manager.h"
+#include "update_engine/cros/mock_update_attempter.h"
 #include "update_engine/update_manager/fake_update_manager.h"
 
 namespace chromeos_update_engine {
@@ -42,7 +45,15 @@
 // OOBE is completed even when there's no such marker file, etc.
 class FakeSystemState : public SystemState {
  public:
-  FakeSystemState();
+  static void CreateInstance() {
+    static std::unique_ptr<FakeSystemState> system_state;
+    system_state.reset(new FakeSystemState());
+    g_pointer_ = system_state.get();
+  }
+
+  static FakeSystemState* Get() {
+    return reinterpret_cast<FakeSystemState*>(g_pointer_);
+  }
 
   // Base class overrides. All getters return the current implementation of
   // various members, either the default (fake/mock) or the one set to override
@@ -196,6 +207,16 @@
     return &fake_hardware_;
   }
 
+  inline FakePrefs* fake_prefs() {
+    CHECK(prefs_ == &fake_prefs_);
+    return &fake_prefs_;
+  }
+
+  inline FakePrefs* fake_powerwash_safe_prefs() {
+    CHECK(powerwash_safe_prefs_ == &fake_powerwash_safe_prefs_);
+    return &fake_powerwash_safe_prefs_;
+  }
+
   inline testing::NiceMock<MockMetricsReporter>* mock_metrics_reporter() {
     CHECK(metrics_reporter_ == &mock_metrics_reporter_);
     return &mock_metrics_reporter_;
@@ -237,11 +258,18 @@
   }
 
  private:
+  // Don't allow for direct initialization of this class.
+  FakeSystemState();
+
   // Default mock/fake implementations (owned).
+  chromeos_update_manager::FakeUpdateManager fake_update_manager_;
   FakeBootControl fake_boot_control_;
   FakeClock fake_clock_;
-  testing::NiceMock<MockConnectionManager> mock_connection_manager_;
   FakeHardware fake_hardware_;
+  FakePrefs fake_prefs_;
+  FakePrefs fake_powerwash_safe_prefs_;
+
+  testing::NiceMock<MockConnectionManager> mock_connection_manager_;
   testing::NiceMock<MockMetricsReporter> mock_metrics_reporter_;
   testing::NiceMock<MockPrefs> mock_prefs_;
   testing::NiceMock<MockPrefs> mock_powerwash_safe_prefs_;
@@ -249,7 +277,6 @@
   testing::NiceMock<MockUpdateAttempter> mock_update_attempter_;
   testing::NiceMock<MockOmahaRequestParams> mock_request_params_;
   testing::NiceMock<MockP2PManager> mock_p2p_manager_;
-  chromeos_update_manager::FakeUpdateManager fake_update_manager_;
   testing::NiceMock<MockPowerManager> mock_power_manager_;
 
   // Pointers to objects that client code can override. They are initialized to
@@ -278,4 +305,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_FAKE_SYSTEM_STATE_H_
+#endif  // UPDATE_ENGINE_CROS_FAKE_SYSTEM_STATE_H_
diff --git a/hardware_chromeos.cc b/cros/hardware_chromeos.cc
similarity index 77%
rename from hardware_chromeos.cc
rename to cros/hardware_chromeos.cc
index a49375e..ad0a64d 100644
--- a/hardware_chromeos.cc
+++ b/cros/hardware_chromeos.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/hardware_chromeos.h"
+#include "update_engine/cros/hardware_chromeos.h"
 
 #include <utility>
 
@@ -37,7 +37,10 @@
 #include "update_engine/common/platform_constants.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/dbus_connection.h"
+#include "update_engine/cros/dbus_connection.h"
+#if USE_CFM
+#include "update_engine/cros/requisition_util.h"
+#endif
 
 using std::string;
 using std::vector;
@@ -47,7 +50,7 @@
 const char kOOBECompletedMarker[] = "/home/chronos/.oobe_completed";
 
 // The stateful directory used by update_engine to store powerwash-safe files.
-// The files stored here must be whitelisted in the powerwash scripts.
+// The files stored here must be added to the powerwash script allowlist.
 const char kPowerwashSafeDirectory[] =
     "/mnt/stateful_partition/unencrypted/preserve";
 
@@ -61,6 +64,11 @@
 const char kPowerwashMarkerFile[] =
     "/mnt/stateful_partition/factory_install_reset";
 
+// The name of the marker file used to trigger a save of rollback data
+// during the next shutdown.
+const char kRollbackSaveMarkerFile[] =
+    "/mnt/stateful_partition/.save_rollback_data";
+
 // The contents of the powerwash marker file for the non-rollback case.
 const char kPowerwashCommand[] = "safe fast keepimg reason=update_engine\n";
 
@@ -167,22 +175,13 @@
   return ReadValueFromCrosSystem("hwid");
 }
 
-string HardwareChromeOS::GetFirmwareVersion() const {
-  return ReadValueFromCrosSystem("fwid");
-}
-
-string HardwareChromeOS::GetECVersion() const {
-  string input_line;
-  int exit_code = 0;
-  vector<string> cmd = {"/usr/sbin/mosys", "-k", "ec", "info"};
-
-  bool success = Subprocess::SynchronousExec(cmd, &exit_code, &input_line);
-  if (!success || exit_code) {
-    LOG(ERROR) << "Unable to read ec info from mosys (" << exit_code << ")";
-    return "";
-  }
-
-  return utils::ParseECVersion(input_line);
+string HardwareChromeOS::GetDeviceRequisition() const {
+#if USE_CFM
+  const char* kLocalStatePath = "/home/chronos/Local State";
+  return ReadDeviceRequisition(base::FilePath(kLocalStatePath));
+#else
+  return "";
+#endif
 }
 
 int HardwareChromeOS::GetMinKernelKeyVersion() const {
@@ -226,15 +225,25 @@
   return powerwash_count;
 }
 
-bool HardwareChromeOS::SchedulePowerwash(bool is_rollback) {
+bool HardwareChromeOS::SchedulePowerwash(bool save_rollback_data) {
+  if (save_rollback_data) {
+    if (!utils::WriteFile(kRollbackSaveMarkerFile, nullptr, 0)) {
+      PLOG(ERROR) << "Error in creating rollback save marker file: "
+                  << kRollbackSaveMarkerFile << ". Rollback will not"
+                  << " preserve any data.";
+    } else {
+      LOG(INFO) << "Rollback data save has been scheduled on next shutdown.";
+    }
+  }
+
   const char* powerwash_command =
-      is_rollback ? kRollbackPowerwashCommand : kPowerwashCommand;
+      save_rollback_data ? kRollbackPowerwashCommand : kPowerwashCommand;
   bool result = utils::WriteFile(
       kPowerwashMarkerFile, powerwash_command, strlen(powerwash_command));
   if (result) {
     LOG(INFO) << "Created " << kPowerwashMarkerFile
-              << " to powerwash on next reboot (is_rollback=" << is_rollback
-              << ")";
+              << " to powerwash on next reboot ("
+              << "save_rollback_data=" << save_rollback_data << ")";
   } else {
     PLOG(ERROR) << "Error in creating powerwash marker file: "
                 << kPowerwashMarkerFile;
@@ -244,7 +253,7 @@
 }
 
 bool HardwareChromeOS::CancelPowerwash() {
-  bool result = base::DeleteFile(base::FilePath(kPowerwashMarkerFile), false);
+  bool result = base::DeleteFile(base::FilePath(kPowerwashMarkerFile));
 
   if (result) {
     LOG(INFO) << "Successfully deleted the powerwash marker file : "
@@ -254,6 +263,11 @@
                 << kPowerwashMarkerFile;
   }
 
+  // Delete the rollback save marker file if it existed.
+  if (!base::DeleteFile(base::FilePath(kRollbackSaveMarkerFile))) {
+    PLOG(ERROR) << "Could not remove rollback save marker";
+  }
+
   return result;
 }
 
@@ -291,17 +305,11 @@
 }
 
 bool HardwareChromeOS::GetFirstActiveOmahaPingSent() const {
-  int exit_code = 0;
   string active_ping_str;
-  vector<string> cmd = {"vpd_get_value", kActivePingKey};
-  if (!Subprocess::SynchronousExec(cmd, &exit_code, &active_ping_str) ||
-      exit_code) {
-    LOG(ERROR) << "Failed to get vpd key for " << kActivePingKey
-               << " with exit code: " << exit_code;
+  if (!utils::GetVpdValue(kActivePingKey, &active_ping_str)) {
     return false;
   }
 
-  base::TrimWhitespaceASCII(active_ping_str, base::TRIM_ALL, &active_ping_str);
   int active_ping;
   if (active_ping_str.empty() ||
       !base::StringToInt(active_ping_str, &active_ping)) {
@@ -313,26 +321,51 @@
 
 bool HardwareChromeOS::SetFirstActiveOmahaPingSent() {
   int exit_code = 0;
-  string output;
+  string output, error;
   vector<string> vpd_set_cmd = {
       "vpd", "-i", "RW_VPD", "-s", string(kActivePingKey) + "=1"};
-  if (!Subprocess::SynchronousExec(vpd_set_cmd, &exit_code, &output) ||
+  if (!Subprocess::SynchronousExec(vpd_set_cmd, &exit_code, &output, &error) ||
       exit_code) {
     LOG(ERROR) << "Failed to set vpd key for " << kActivePingKey
-               << " with exit code: " << exit_code << " with error: " << output;
+               << " with exit code: " << exit_code << " with output: " << output
+               << " and error: " << error;
     return false;
+  } else if (!error.empty()) {
+    LOG(INFO) << "vpd succeeded but with error logs: " << error;
   }
 
   vector<string> vpd_dump_cmd = {"dump_vpd_log", "--force"};
-  if (!Subprocess::SynchronousExec(vpd_dump_cmd, &exit_code, &output) ||
+  if (!Subprocess::SynchronousExec(vpd_dump_cmd, &exit_code, &output, &error) ||
       exit_code) {
     LOG(ERROR) << "Failed to cache " << kActivePingKey << " using dump_vpd_log"
-               << " with exit code: " << exit_code << " with error: " << output;
+               << " with exit code: " << exit_code << " with output: " << output
+               << " and error: " << error;
     return false;
+  } else if (!error.empty()) {
+    LOG(INFO) << "dump_vpd_log succeeded but with error logs: " << error;
   }
   return true;
 }
 
 void HardwareChromeOS::SetWarmReset(bool warm_reset) {}
 
+void HardwareChromeOS::SetVbmetaDigestForInactiveSlot(bool reset) {}
+
+std::string HardwareChromeOS::GetVersionForLogging(
+    const std::string& partition_name) const {
+  // TODO(zhangkelvin) Implement per-partition timestamp for Chrome OS.
+  return "";
+}
+
+ErrorCode HardwareChromeOS::IsPartitionUpdateValid(
+    const std::string& partition_name, const std::string& new_version) const {
+  // TODO(zhangkelvin) Implement per-partition timestamp for Chrome OS.
+  return ErrorCode::kSuccess;
+}
+
+const char* HardwareChromeOS::GetPartitionMountOptions(
+    const std::string& partition_name) const {
+  return "";
+}
+
 }  // namespace chromeos_update_engine
diff --git a/hardware_chromeos.h b/cros/hardware_chromeos.h
similarity index 79%
rename from hardware_chromeos.h
rename to cros/hardware_chromeos.h
index 2bea989..a64f804 100644
--- a/hardware_chromeos.h
+++ b/cros/hardware_chromeos.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_HARDWARE_CHROMEOS_H_
-#define UPDATE_ENGINE_HARDWARE_CHROMEOS_H_
+#ifndef UPDATE_ENGINE_CROS_HARDWARE_CHROMEOS_H_
+#define UPDATE_ENGINE_CROS_HARDWARE_CHROMEOS_H_
 
 #include <memory>
 #include <string>
@@ -25,6 +25,7 @@
 #include <base/time/time.h>
 #include <debugd/dbus-proxies.h>
 
+#include "update_engine/common/error_code.h"
 #include "update_engine/common/hardware_interface.h"
 
 namespace chromeos_update_engine {
@@ -45,15 +46,14 @@
   bool IsOOBEEnabled() const override;
   bool IsOOBEComplete(base::Time* out_time_of_oobe) const override;
   std::string GetHardwareClass() const override;
-  std::string GetFirmwareVersion() const override;
-  std::string GetECVersion() const override;
+  std::string GetDeviceRequisition() const override;
   int GetMinKernelKeyVersion() const override;
   int GetMinFirmwareKeyVersion() const override;
   int GetMaxFirmwareKeyRollforward() const override;
   bool SetMaxFirmwareKeyRollforward(int firmware_max_rollforward) override;
   bool SetMaxKernelKeyRollforward(int kernel_max_rollforward) override;
   int GetPowerwashCount() const override;
-  bool SchedulePowerwash(bool is_rollback) override;
+  bool SchedulePowerwash(bool save_rollback_data) override;
   bool CancelPowerwash() override;
   bool GetNonVolatileDirectory(base::FilePath* path) const override;
   bool GetPowerwashSafeDirectory(base::FilePath* path) const override;
@@ -62,6 +62,14 @@
   bool GetFirstActiveOmahaPingSent() const override;
   bool SetFirstActiveOmahaPingSent() override;
   void SetWarmReset(bool warm_reset) override;
+  void SetVbmetaDigestForInactiveSlot(bool reset) override;
+  std::string GetVersionForLogging(
+      const std::string& partition_name) const override;
+  ErrorCode IsPartitionUpdateValid(
+      const std::string& partition_name,
+      const std::string& new_version) const override;
+  const char* GetPartitionMountOptions(
+      const std::string& partition_name) const override;
 
  private:
   friend class HardwareChromeOSTest;
@@ -80,4 +88,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_HARDWARE_CHROMEOS_H_
+#endif  // UPDATE_ENGINE_CROS_HARDWARE_CHROMEOS_H_
diff --git a/hardware_chromeos_unittest.cc b/cros/hardware_chromeos_unittest.cc
similarity index 97%
rename from hardware_chromeos_unittest.cc
rename to cros/hardware_chromeos_unittest.cc
index 162dec4..50bced6 100644
--- a/hardware_chromeos_unittest.cc
+++ b/cros/hardware_chromeos_unittest.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/hardware_chromeos.h"
+#include "update_engine/cros/hardware_chromeos.h"
 
 #include <memory>
 
diff --git a/image_properties.h b/cros/image_properties.h
similarity index 84%
rename from image_properties.h
rename to cros/image_properties.h
index 49fe82f..1297547 100644
--- a/image_properties.h
+++ b/cros/image_properties.h
@@ -18,28 +18,22 @@
 // properties are meant to be constant during the life of this daemon, but can
 // be modified in dev-move or non-official builds.
 
-#ifndef UPDATE_ENGINE_IMAGE_PROPERTIES_H_
-#define UPDATE_ENGINE_IMAGE_PROPERTIES_H_
+#ifndef UPDATE_ENGINE_CROS_IMAGE_PROPERTIES_H_
+#define UPDATE_ENGINE_CROS_IMAGE_PROPERTIES_H_
 
 #include <string>
 
 namespace chromeos_update_engine {
 
-class SystemState;
-
 // The read-only system properties of the running image.
 struct ImageProperties {
   // The product id of the image used for all channels, except canary.
   std::string product_id;
   // The canary-channel product id.
   std::string canary_product_id;
-  // The system id for the Android Things SoM, empty for Chrome OS.
-  std::string system_id;
 
   // The product version of this image.
   std::string version;
-  // The system version of this image.
-  std::string system_version;
 
   // The version of all product components in key values pairs.
   std::string product_components;
@@ -81,16 +75,15 @@
 // Loads all the image properties from the running system. In case of error
 // loading any of these properties from the read-only system image a default
 // value may be returned instead.
-ImageProperties LoadImageProperties(SystemState* system_state);
+ImageProperties LoadImageProperties();
 
 // Loads the mutable image properties from the stateful partition if found or
 // the system image otherwise.
-MutableImageProperties LoadMutableImageProperties(SystemState* system_state);
+MutableImageProperties LoadMutableImageProperties();
 
 // Stores the mutable image properties in the stateful partition. Returns
 // whether the operation succeeded.
-bool StoreMutableImageProperties(SystemState* system_state,
-                                 const MutableImageProperties& properties);
+bool StoreMutableImageProperties(const MutableImageProperties& properties);
 
 // Logs the image properties.
 void LogImageProperties();
@@ -103,4 +96,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_IMAGE_PROPERTIES_H_
+#endif  // UPDATE_ENGINE_CROS_IMAGE_PROPERTIES_H_
diff --git a/image_properties_chromeos.cc b/cros/image_properties_chromeos.cc
similarity index 93%
rename from image_properties_chromeos.cc
rename to cros/image_properties_chromeos.cc
index 5ab8f05..79155b5 100644
--- a/image_properties_chromeos.cc
+++ b/cros/image_properties_chromeos.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/image_properties.h"
+#include "update_engine/cros/image_properties.h"
 
 #include <string>
 #include <vector>
@@ -26,8 +26,8 @@
 #include "update_engine/common/constants.h"
 #include "update_engine/common/hardware_interface.h"
 #include "update_engine/common/platform_constants.h"
+#include "update_engine/common/system_state.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/system_state.h"
 
 namespace {
 
@@ -86,7 +86,7 @@
 }
 }  // namespace test
 
-ImageProperties LoadImageProperties(SystemState* system_state) {
+ImageProperties LoadImageProperties() {
   ImageProperties result;
 
   brillo::KeyValueStore lsb_release;
@@ -97,7 +97,7 @@
   // In dev-mode and unofficial build we can override the image properties set
   // in the system image with the ones from the stateful partition, except the
   // channel of the current image.
-  HardwareInterface* const hardware = system_state->hardware();
+  HardwareInterface* const hardware = SystemState::Get()->hardware();
   if (!hardware->IsOfficialBuild() || !hardware->IsNormalBootMode())
     LoadLsbRelease(LsbReleaseSource::kStateful, &lsb_release);
 
@@ -124,7 +124,7 @@
   return result;
 }
 
-MutableImageProperties LoadMutableImageProperties(SystemState* system_state) {
+MutableImageProperties LoadMutableImageProperties() {
   MutableImageProperties result;
   brillo::KeyValueStore lsb_release;
   LoadLsbRelease(LsbReleaseSource::kSystem, &lsb_release);
@@ -137,8 +137,7 @@
   return result;
 }
 
-bool StoreMutableImageProperties(SystemState* system_state,
-                                 const MutableImageProperties& properties) {
+bool StoreMutableImageProperties(const MutableImageProperties& properties) {
   brillo::KeyValueStore lsb_release;
   LoadLsbRelease(LsbReleaseSource::kStateful, &lsb_release);
   lsb_release.SetString(kLsbReleaseUpdateChannelKey, properties.target_channel);
diff --git a/image_properties_chromeos_unittest.cc b/cros/image_properties_chromeos_unittest.cc
similarity index 84%
rename from image_properties_chromeos_unittest.cc
rename to cros/image_properties_chromeos_unittest.cc
index d9ed688..497554e 100644
--- a/image_properties_chromeos_unittest.cc
+++ b/cros/image_properties_chromeos_unittest.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/image_properties.h"
+#include "update_engine/cros/image_properties.h"
 
 #include <string>
 
@@ -24,7 +24,7 @@
 
 #include "update_engine/common/constants.h"
 #include "update_engine/common/test_utils.h"
-#include "update_engine/fake_system_state.h"
+#include "update_engine/cros/fake_system_state.h"
 
 using chromeos_update_engine::test_utils::WriteFileString;
 using std::string;
@@ -40,16 +40,15 @@
     EXPECT_TRUE(base::CreateDirectory(base::FilePath(
         tempdir_.GetPath().value() + kStatefulPartition + "/etc")));
     test::SetImagePropertiesRootPrefix(tempdir_.GetPath().value().c_str());
+    FakeSystemState::CreateInstance();
     SetLockDown(false);
   }
 
   void SetLockDown(bool locked_down) {
-    fake_system_state_.fake_hardware()->SetIsOfficialBuild(locked_down);
-    fake_system_state_.fake_hardware()->SetIsNormalBootMode(locked_down);
+    FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(locked_down);
+    FakeSystemState::Get()->fake_hardware()->SetIsNormalBootMode(locked_down);
   }
 
-  FakeSystemState fake_system_state_;
-
   base::ScopedTempDir tempdir_;
 };
 
@@ -61,7 +60,7 @@
                       "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
                       "CHROMEOS_RELEASE_TRACK=dev-channel\n"
                       "CHROMEOS_AUSERVER=http://www.google.com"));
-  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  ImageProperties props = LoadImageProperties();
   EXPECT_EQ("arm-generic", props.board);
   EXPECT_EQ("{87efface-864d-49a5-9bb3-4b050a7c227a}", props.product_id);
   EXPECT_EQ("0.2.2.3", props.version);
@@ -73,7 +72,7 @@
   ASSERT_TRUE(WriteFileString(
       tempdir_.GetPath().Append("etc/lsb-release").value(),
       "CHROMEOS_RELEASE_APPID={58c35cef-9d30-476e-9098-ce20377d535d}"));
-  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  ImageProperties props = LoadImageProperties();
   EXPECT_EQ("{58c35cef-9d30-476e-9098-ce20377d535d}", props.product_id);
 }
 
@@ -82,12 +81,12 @@
       WriteFileString(tempdir_.GetPath().Append("etc/lsb-release").value(),
                       "CHROMEOS_RELEASE_FOO=CHROMEOS_RELEASE_VERSION=1.2.3.4\n"
                       "CHROMEOS_RELEASE_VERSION=0.2.2.3"));
-  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  ImageProperties props = LoadImageProperties();
   EXPECT_EQ("0.2.2.3", props.version);
 }
 
 TEST_F(ImagePropertiesTest, MissingVersionTest) {
-  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  ImageProperties props = LoadImageProperties();
   EXPECT_EQ("", props.version);
 }
 
@@ -103,12 +102,11 @@
       "CHROMEOS_RELEASE_BOARD=x86-generic\n"
       "CHROMEOS_RELEASE_TRACK=beta-channel\n"
       "CHROMEOS_AUSERVER=https://www.google.com"));
-  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  ImageProperties props = LoadImageProperties();
   EXPECT_EQ("x86-generic", props.board);
   EXPECT_EQ("dev-channel", props.current_channel);
   EXPECT_EQ("https://www.google.com", props.omaha_url);
-  MutableImageProperties mutable_props =
-      LoadMutableImageProperties(&fake_system_state_);
+  MutableImageProperties mutable_props = LoadMutableImageProperties();
   EXPECT_EQ("beta-channel", mutable_props.target_channel);
 }
 
@@ -125,12 +123,11 @@
       "CHROMEOS_RELEASE_TRACK=stable-channel\n"
       "CHROMEOS_AUSERVER=http://www.google.com"));
   SetLockDown(true);
-  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  ImageProperties props = LoadImageProperties();
   EXPECT_EQ("arm-generic", props.board);
   EXPECT_EQ("dev-channel", props.current_channel);
   EXPECT_EQ("https://www.google.com", props.omaha_url);
-  MutableImageProperties mutable_props =
-      LoadMutableImageProperties(&fake_system_state_);
+  MutableImageProperties mutable_props = LoadMutableImageProperties();
   EXPECT_EQ("stable-channel", mutable_props.target_channel);
 }
 
@@ -141,7 +138,7 @@
                       "CHROMEOS_BOARD_APPID=b\n"
                       "CHROMEOS_CANARY_APPID=c\n"
                       "CHROMEOS_RELEASE_TRACK=stable-channel\n"));
-  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  ImageProperties props = LoadImageProperties();
   EXPECT_EQ("stable-channel", props.current_channel);
   EXPECT_EQ("b", props.product_id);
 }
@@ -153,7 +150,7 @@
                       "CHROMEOS_BOARD_APPID=b\n"
                       "CHROMEOS_CANARY_APPID=c\n"
                       "CHROMEOS_RELEASE_TRACK=canary-channel\n"));
-  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  ImageProperties props = LoadImageProperties();
   EXPECT_EQ("canary-channel", props.current_channel);
   EXPECT_EQ("c", props.canary_product_id);
 }
@@ -164,7 +161,7 @@
                       "CHROMEOS_RELEASE_APPID=r\n"
                       "CHROMEOS_CANARY_APPID=c\n"
                       "CHROMEOS_RELEASE_TRACK=stable-channel\n"));
-  ImageProperties props = LoadImageProperties(&fake_system_state_);
+  ImageProperties props = LoadImageProperties();
   EXPECT_EQ("stable-channel", props.current_channel);
   EXPECT_EQ("r", props.product_id);
 }
diff --git a/logging.cc b/cros/logging.cc
similarity index 93%
rename from logging.cc
rename to cros/logging.cc
index 6320e36..8b6c556 100644
--- a/logging.cc
+++ b/cros/logging.cc
@@ -25,8 +25,8 @@
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
 
+#include "update_engine/common/logging.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/logging.h"
 
 using std::string;
 
@@ -46,7 +46,7 @@
     base::ReplaceFile(
         base::FilePath(symlink_path), base::FilePath(log_path), nullptr);
   }
-  base::DeleteFile(base::FilePath(symlink_path), true);
+  base::DeletePathRecursively(base::FilePath(symlink_path));
   if (symlink(log_path.c_str(), symlink_path.c_str()) == -1) {
     PLOG(ERROR) << "Unable to create symlink " << symlink_path
                 << " pointing at " << log_path;
@@ -79,7 +79,11 @@
   if (log_to_file) {
     log_file = SetupLogFile(kSystemLogsRoot);
     log_settings.delete_old = logging::APPEND_TO_OLD_LOG_FILE;
+#if BASE_VER < 780000
     log_settings.log_file = log_file.c_str();
+#else
+    log_settings.log_file_path = log_file.c_str();
+#endif
   }
   logging::InitLogging(log_settings);
 }
diff --git a/metrics_reporter_omaha.cc b/cros/metrics_reporter_omaha.cc
similarity index 80%
rename from metrics_reporter_omaha.cc
rename to cros/metrics_reporter_omaha.cc
index 14819d8..69cdb19 100644
--- a/metrics_reporter_omaha.cc
+++ b/cros/metrics_reporter_omaha.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/metrics_reporter_omaha.h"
+#include "update_engine/cros/metrics_reporter_omaha.h"
 
 #include <memory>
 
@@ -22,18 +22,18 @@
 #include <base/strings/string_number_conversions.h>
 #include <metrics/metrics_library.h>
 
-#include "update_engine/common/clock_interface.h"
 #include "update_engine/common/constants.h"
 #include "update_engine/common/prefs_interface.h"
+#include "update_engine/common/system_state.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/cros/omaha_request_params.h"
 #include "update_engine/metrics_utils.h"
-#include "update_engine/omaha_request_params.h"
-#include "update_engine/system_state.h"
 
+using base::Time;
+using base::TimeDelta;
 using std::string;
 
 namespace chromeos_update_engine {
-
 namespace metrics {
 
 // UpdateEngine.Daily.* metrics.
@@ -135,7 +135,8 @@
     "UpdateEngine.InstallDateProvisioningSource";
 const char kMetricTimeToRebootMinutes[] = "UpdateEngine.TimeToRebootMinutes";
 
-std::unique_ptr<MetricsReporterInterface> CreateMetricsReporter() {
+std::unique_ptr<MetricsReporterInterface> CreateMetricsReporter(
+    DynamicPartitionControlInterface* dynamic_partition_control) {
   return std::make_unique<MetricsReporterOmaha>();
 }
 
@@ -144,14 +145,8 @@
 MetricsReporterOmaha::MetricsReporterOmaha()
     : metrics_lib_(new MetricsLibrary()) {}
 
-void MetricsReporterOmaha::Initialize() {
-  metrics_lib_->Init();
-}
-
 void MetricsReporterOmaha::ReportDailyMetrics(base::TimeDelta os_age) {
   string metric = metrics::kMetricDailyOSAgeDays;
-  LOG(INFO) << "Uploading " << utils::FormatTimeDelta(os_age) << " for metric "
-            << metric;
   metrics_lib_->SendToUMA(metric,
                           static_cast<int>(os_age.InDays()),
                           0,       // min: 0 days
@@ -160,7 +155,6 @@
 }
 
 void MetricsReporterOmaha::ReportUpdateCheckMetrics(
-    SystemState* system_state,
     metrics::CheckResult result,
     metrics::CheckReaction reaction,
     metrics::DownloadErrorCode download_error_code) {
@@ -172,31 +166,24 @@
     metric = metrics::kMetricCheckResult;
     value = static_cast<int>(result);
     max_value = static_cast<int>(metrics::CheckResult::kNumConstants) - 1;
-    LOG(INFO) << "Sending " << value << " for metric " << metric << " (enum)";
     metrics_lib_->SendEnumToUMA(metric, value, max_value);
   }
   if (reaction != metrics::CheckReaction::kUnset) {
     metric = metrics::kMetricCheckReaction;
     value = static_cast<int>(reaction);
     max_value = static_cast<int>(metrics::CheckReaction::kNumConstants) - 1;
-    LOG(INFO) << "Sending " << value << " for metric " << metric << " (enum)";
     metrics_lib_->SendEnumToUMA(metric, value, max_value);
   }
   if (download_error_code != metrics::DownloadErrorCode::kUnset) {
     metric = metrics::kMetricCheckDownloadErrorCode;
     value = static_cast<int>(download_error_code);
-    LOG(INFO) << "Sending " << value << " for metric " << metric << " (sparse)";
     metrics_lib_->SendSparseToUMA(metric, value);
   }
 
   base::TimeDelta time_since_last;
-  if (metrics_utils::WallclockDurationHelper(
-          system_state,
-          kPrefsMetricsCheckLastReportingTime,
-          &time_since_last)) {
+  if (WallclockDurationHelper(kPrefsMetricsCheckLastReportingTime,
+                              &time_since_last)) {
     metric = metrics::kMetricCheckTimeSinceLastCheckMinutes;
-    LOG(INFO) << "Sending " << utils::FormatTimeDelta(time_since_last)
-              << " for metric " << metric;
     metrics_lib_->SendToUMA(metric,
                             time_since_last.InMinutes(),
                             0,             // min: 0 min
@@ -206,11 +193,8 @@
 
   base::TimeDelta uptime_since_last;
   static int64_t uptime_since_last_storage = 0;
-  if (metrics_utils::MonotonicDurationHelper(
-          system_state, &uptime_since_last_storage, &uptime_since_last)) {
+  if (MonotonicDurationHelper(&uptime_since_last_storage, &uptime_since_last)) {
     metric = metrics::kMetricCheckTimeSinceLastCheckUptimeMinutes;
-    LOG(INFO) << "Sending " << utils::FormatTimeDelta(uptime_since_last)
-              << " for metric " << metric;
     metrics_lib_->SendToUMA(metric,
                             uptime_since_last.InMinutes(),
                             0,             // min: 0 min
@@ -219,19 +203,15 @@
   }
 
   // First section of target version specified for the update.
-  if (system_state && system_state->request_params()) {
+  if (SystemState::Get()->request_params()) {
     string target_version =
-        system_state->request_params()->target_version_prefix();
+        SystemState::Get()->request_params()->target_version_prefix();
     value = utils::VersionPrefix(target_version);
     if (value != 0) {
       metric = metrics::kMetricCheckTargetVersion;
-      LOG(INFO) << "Sending " << value << " for metric " << metric
-                << " (sparse)";
       metrics_lib_->SendSparseToUMA(metric, value);
-      if (system_state->request_params()->rollback_allowed()) {
+      if (SystemState::Get()->request_params()->rollback_allowed()) {
         metric = metrics::kMetricCheckRollbackTargetVersion;
-        LOG(INFO) << "Sending " << value << " for metric " << metric
-                  << " (sparse)";
         metrics_lib_->SendSparseToUMA(metric, value);
       }
     }
@@ -243,8 +223,6 @@
   metrics::AttemptResult attempt_result =
       metrics::AttemptResult::kAbnormalTermination;
 
-  LOG(INFO) << "Uploading " << static_cast<int>(attempt_result)
-            << " for metric " << metric;
   metrics_lib_->SendEnumToUMA(
       metric,
       static_cast<int>(attempt_result),
@@ -252,7 +230,6 @@
 }
 
 void MetricsReporterOmaha::ReportUpdateAttemptMetrics(
-    SystemState* system_state,
     int attempt_number,
     PayloadType payload_type,
     base::TimeDelta duration,
@@ -261,7 +238,6 @@
     metrics::AttemptResult attempt_result,
     ErrorCode internal_error_code) {
   string metric = metrics::kMetricAttemptNumber;
-  LOG(INFO) << "Uploading " << attempt_number << " for metric " << metric;
   metrics_lib_->SendToUMA(metric,
                           attempt_number,
                           0,    // min: 0 attempts
@@ -269,13 +245,9 @@
                           50);  // num_buckets
 
   metric = metrics::kMetricAttemptPayloadType;
-  LOG(INFO) << "Uploading " << utils::ToString(payload_type) << " for metric "
-            << metric;
   metrics_lib_->SendEnumToUMA(metric, payload_type, kNumPayloadTypes);
 
   metric = metrics::kMetricAttemptDurationMinutes;
-  LOG(INFO) << "Uploading " << utils::FormatTimeDelta(duration)
-            << " for metric " << metric;
   metrics_lib_->SendToUMA(metric,
                           duration.InMinutes(),
                           0,             // min: 0 min
@@ -283,8 +255,6 @@
                           50);           // num_buckets
 
   metric = metrics::kMetricAttemptDurationUptimeMinutes;
-  LOG(INFO) << "Uploading " << utils::FormatTimeDelta(duration_uptime)
-            << " for metric " << metric;
   metrics_lib_->SendToUMA(metric,
                           duration_uptime.InMinutes(),
                           0,             // min: 0 min
@@ -293,7 +263,6 @@
 
   metric = metrics::kMetricAttemptPayloadSizeMiB;
   int64_t payload_size_mib = payload_size / kNumBytesInOneMiB;
-  LOG(INFO) << "Uploading " << payload_size_mib << " for metric " << metric;
   metrics_lib_->SendToUMA(metric,
                           payload_size_mib,
                           0,     // min: 0 MiB
@@ -301,8 +270,6 @@
                           50);   // num_buckets
 
   metric = metrics::kMetricAttemptResult;
-  LOG(INFO) << "Uploading " << static_cast<int>(attempt_result)
-            << " for metric " << metric;
   metrics_lib_->SendEnumToUMA(
       metric,
       static_cast<int>(attempt_result),
@@ -313,13 +280,9 @@
   }
 
   base::TimeDelta time_since_last;
-  if (metrics_utils::WallclockDurationHelper(
-          system_state,
-          kPrefsMetricsAttemptLastReportingTime,
-          &time_since_last)) {
+  if (WallclockDurationHelper(kPrefsMetricsAttemptLastReportingTime,
+                              &time_since_last)) {
     metric = metrics::kMetricAttemptTimeSinceLastAttemptMinutes;
-    LOG(INFO) << "Sending " << utils::FormatTimeDelta(time_since_last)
-              << " for metric " << metric;
     metrics_lib_->SendToUMA(metric,
                             time_since_last.InMinutes(),
                             0,             // min: 0 min
@@ -329,11 +292,8 @@
 
   static int64_t uptime_since_last_storage = 0;
   base::TimeDelta uptime_since_last;
-  if (metrics_utils::MonotonicDurationHelper(
-          system_state, &uptime_since_last_storage, &uptime_since_last)) {
+  if (MonotonicDurationHelper(&uptime_since_last_storage, &uptime_since_last)) {
     metric = metrics::kMetricAttemptTimeSinceLastAttemptUptimeMinutes;
-    LOG(INFO) << "Sending " << utils::FormatTimeDelta(uptime_since_last)
-              << " for metric " << metric;
     metrics_lib_->SendToUMA(metric,
                             uptime_since_last.InMinutes(),
                             0,             // min: 0 min
@@ -351,8 +311,6 @@
   string metric = metrics::kMetricAttemptPayloadBytesDownloadedMiB;
   int64_t payload_bytes_downloaded_mib =
       payload_bytes_downloaded / kNumBytesInOneMiB;
-  LOG(INFO) << "Uploading " << payload_bytes_downloaded_mib << " for metric "
-            << metric;
   metrics_lib_->SendToUMA(metric,
                           payload_bytes_downloaded_mib,
                           0,     // min: 0 MiB
@@ -361,8 +319,6 @@
 
   metric = metrics::kMetricAttemptPayloadDownloadSpeedKBps;
   int64_t payload_download_speed_kbps = payload_download_speed_bps / 1000;
-  LOG(INFO) << "Uploading " << payload_download_speed_kbps << " for metric "
-            << metric;
   metrics_lib_->SendToUMA(metric,
                           payload_download_speed_kbps,
                           0,          // min: 0 kB/s
@@ -370,20 +326,15 @@
                           50);        // num_buckets
 
   metric = metrics::kMetricAttemptDownloadSource;
-  LOG(INFO) << "Uploading " << download_source << " for metric " << metric;
   metrics_lib_->SendEnumToUMA(metric, download_source, kNumDownloadSources);
 
   if (payload_download_error_code != metrics::DownloadErrorCode::kUnset) {
     metric = metrics::kMetricAttemptDownloadErrorCode;
-    LOG(INFO) << "Uploading " << static_cast<int>(payload_download_error_code)
-              << " for metric " << metric << " (sparse)";
     metrics_lib_->SendSparseToUMA(
         metric, static_cast<int>(payload_download_error_code));
   }
 
   metric = metrics::kMetricAttemptConnectionType;
-  LOG(INFO) << "Uploading " << static_cast<int>(connection_type)
-            << " for metric " << metric;
   metrics_lib_->SendEnumToUMA(
       metric,
       static_cast<int>(connection_type),
@@ -403,7 +354,6 @@
     int url_switch_count) {
   string metric = metrics::kMetricSuccessfulUpdatePayloadSizeMiB;
   int64_t mbs = payload_size / kNumBytesInOneMiB;
-  LOG(INFO) << "Uploading " << mbs << " (MiBs) for metric " << metric;
   metrics_lib_->SendToUMA(metric,
                           mbs,
                           0,     // min: 0 MiB
@@ -433,7 +383,6 @@
     }
 
     if (mbs > 0) {
-      LOG(INFO) << "Uploading " << mbs << " (MiBs) for metric " << metric;
       metrics_lib_->SendToUMA(metric,
                               mbs,
                               0,     // min: 0 MiB
@@ -443,8 +392,6 @@
   }
 
   metric = metrics::kMetricSuccessfulUpdateDownloadSourcesUsed;
-  LOG(INFO) << "Uploading 0x" << std::hex << download_sources_used
-            << " (bit flags) for metric " << metric;
   metrics_lib_->SendToUMA(metric,
                           download_sources_used,
                           0,                               // min
@@ -452,8 +399,6 @@
                           1 << kNumDownloadSources);       // num_buckets
 
   metric = metrics::kMetricSuccessfulUpdateDownloadOverheadPercentage;
-  LOG(INFO) << "Uploading " << download_overhead_percentage << "% for metric "
-            << metric;
   metrics_lib_->SendToUMA(metric,
                           download_overhead_percentage,
                           0,     // min: 0% overhead
@@ -461,8 +406,6 @@
                           50);   // num_buckets
 
   metric = metrics::kMetricSuccessfulUpdateUrlSwitchCount;
-  LOG(INFO) << "Uploading " << url_switch_count << " (count) for metric "
-            << metric;
   metrics_lib_->SendToUMA(metric,
                           url_switch_count,
                           0,    // min: 0 URL switches
@@ -470,8 +413,6 @@
                           50);  // num_buckets
 
   metric = metrics::kMetricSuccessfulUpdateTotalDurationMinutes;
-  LOG(INFO) << "Uploading " << utils::FormatTimeDelta(total_duration)
-            << " for metric " << metric;
   metrics_lib_->SendToUMA(metric,
                           static_cast<int>(total_duration.InMinutes()),
                           0,              // min: 0 min
@@ -479,8 +420,6 @@
                           50);            // num_buckets
 
   metric = metrics::kMetricSuccessfulUpdateTotalDurationUptimeMinutes;
-  LOG(INFO) << "Uploading " << utils::FormatTimeDelta(total_duration_uptime)
-            << " for metric " << metric;
   metrics_lib_->SendToUMA(metric,
                           static_cast<int>(total_duration_uptime.InMinutes()),
                           0,             // min: 0 min
@@ -488,8 +427,6 @@
                           50);           // num_buckets
 
   metric = metrics::kMetricSuccessfulUpdateRebootCount;
-  LOG(INFO) << "Uploading reboot count of " << reboot_count << " for metric "
-            << metric;
   metrics_lib_->SendToUMA(metric,
                           reboot_count,
                           0,    // min: 0 reboots
@@ -498,8 +435,6 @@
 
   metric = metrics::kMetricSuccessfulUpdatePayloadType;
   metrics_lib_->SendEnumToUMA(metric, payload_type, kNumPayloadTypes);
-  LOG(INFO) << "Uploading " << utils::ToString(payload_type) << " for metric "
-            << metric;
 
   metric = metrics::kMetricSuccessfulUpdateAttemptCount;
   metrics_lib_->SendToUMA(metric,
@@ -507,11 +442,8 @@
                           1,    // min: 1 attempt
                           50,   // max: 50 attempts
                           50);  // num_buckets
-  LOG(INFO) << "Uploading " << attempt_count << " for metric " << metric;
 
   metric = metrics::kMetricSuccessfulUpdateUpdatesAbandonedCount;
-  LOG(INFO) << "Uploading " << updates_abandoned_count << " (count) for metric "
-            << metric;
   metrics_lib_->SendToUMA(metric,
                           updates_abandoned_count,
                           0,    // min: 0 counts
@@ -523,7 +455,6 @@
     metrics::RollbackResult result) {
   string metric = metrics::kMetricRollbackResult;
   int value = static_cast<int>(result);
-  LOG(INFO) << "Sending " << value << " for metric " << metric << " (enum)";
   metrics_lib_->SendEnumToUMA(
       metric, value, static_cast<int>(metrics::RollbackResult::kNumConstants));
 }
@@ -534,7 +465,6 @@
   string metric = metrics::kMetricEnterpriseRollbackSuccess;
   if (!success)
     metric = metrics::kMetricEnterpriseRollbackFailure;
-  LOG(INFO) << "Sending " << value << " for metric " << metric;
   metrics_lib_->SendSparseToUMA(metric, value);
 }
 
@@ -551,8 +481,6 @@
     case ServerToCheck::kNone:
       return;
   }
-  LOG(INFO) << "Uploading " << static_cast<int>(result) << " for metric "
-            << metric;
   metrics_lib_->SendEnumToUMA(
       metric,
       static_cast<int>(result),
@@ -566,9 +494,6 @@
                           1,   // min value
                           50,  // max value
                           kNumDefaultUmaBuckets);
-
-  LOG(INFO) << "Uploading " << target_attempt << " (count) for metric "
-            << metric;
 }
 
 void MetricsReporterOmaha::ReportTimeToReboot(int time_to_reboot_minutes) {
@@ -578,9 +503,6 @@
                           0,             // min: 0 minute
                           30 * 24 * 60,  // max: 1 month (approx)
                           kNumDefaultUmaBuckets);
-
-  LOG(INFO) << "Uploading " << time_to_reboot_minutes << " for metric "
-            << metric;
 }
 
 void MetricsReporterOmaha::ReportInstallDateProvisioningSource(int source,
@@ -592,7 +514,6 @@
 
 void MetricsReporterOmaha::ReportInternalErrorCode(ErrorCode error_code) {
   auto metric = metrics::kMetricAttemptInternalErrorCode;
-  LOG(INFO) << "Uploading " << error_code << " for metric " << metric;
   metrics_lib_->SendEnumToUMA(metric,
                               static_cast<int>(error_code),
                               static_cast<int>(ErrorCode::kUmaReportedMax));
@@ -604,18 +525,14 @@
     bool kernel_max_rollforward_success) {
   int value = kernel_min_version;
   string metric = metrics::kMetricKernelMinVersion;
-  LOG(INFO) << "Sending " << value << " for metric " << metric;
   metrics_lib_->SendSparseToUMA(metric, value);
 
   value = kernel_max_rollforward_version;
   metric = metrics::kMetricKernelMaxRollforwardVersion;
-  LOG(INFO) << "Sending " << value << " for metric " << metric;
   metrics_lib_->SendSparseToUMA(metric, value);
 
   bool bool_value = kernel_max_rollforward_success;
   metric = metrics::kMetricKernelMaxRollforwardSetSuccess;
-  LOG(INFO) << "Sending " << bool_value << " for metric " << metric
-            << " (bool)";
   metrics_lib_->SendBoolToUMA(metric, bool_value);
 }
 
@@ -625,7 +542,6 @@
       has_time_restriction_policy
           ? metrics::kMetricSuccessfulUpdateDurationFromSeenTimeRestrictedDays
           : metrics::kMetricSuccessfulUpdateDurationFromSeenDays;
-  LOG(INFO) << "Sending " << time_to_update_days << " for metric " << metric;
 
   metrics_lib_->SendToUMA(metric,
                           time_to_update_days,
@@ -634,4 +550,44 @@
                           50);     // num_buckets
 }
 
+bool MetricsReporterOmaha::WallclockDurationHelper(
+    const std::string& state_variable_key,
+    TimeDelta* out_duration) {
+  bool ret = false;
+  Time now = SystemState::Get()->clock()->GetWallclockTime();
+  int64_t stored_value;
+  if (SystemState::Get()->prefs()->GetInt64(state_variable_key,
+                                            &stored_value)) {
+    Time stored_time = Time::FromInternalValue(stored_value);
+    if (stored_time > now) {
+      LOG(ERROR) << "Stored time-stamp used for " << state_variable_key
+                 << " is in the future.";
+    } else {
+      *out_duration = now - stored_time;
+      ret = true;
+    }
+  }
+
+  if (!SystemState::Get()->prefs()->SetInt64(state_variable_key,
+                                             now.ToInternalValue())) {
+    LOG(ERROR) << "Error storing time-stamp in " << state_variable_key;
+  }
+
+  return ret;
+}
+
+bool MetricsReporterOmaha::MonotonicDurationHelper(int64_t* storage,
+                                                   TimeDelta* out_duration) {
+  bool ret = false;
+  Time now = SystemState::Get()->clock()->GetMonotonicTime();
+  if (*storage != 0) {
+    Time stored_time = Time::FromInternalValue(*storage);
+    *out_duration = now - stored_time;
+    ret = true;
+  }
+  *storage = now.ToInternalValue();
+
+  return ret;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/metrics_reporter_omaha.h b/cros/metrics_reporter_omaha.h
similarity index 80%
rename from metrics_reporter_omaha.h
rename to cros/metrics_reporter_omaha.h
index 5680dec..b6ffcce 100644
--- a/metrics_reporter_omaha.h
+++ b/cros/metrics_reporter_omaha.h
@@ -14,21 +14,21 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_METRICS_REPORTER_OMAHA_H_
-#define UPDATE_ENGINE_METRICS_REPORTER_OMAHA_H_
+#ifndef UPDATE_ENGINE_CROS_METRICS_REPORTER_OMAHA_H_
+#define UPDATE_ENGINE_CROS_METRICS_REPORTER_OMAHA_H_
 
 #include <memory>
 #include <string>
 
 #include <base/time/time.h>
+#include <gtest/gtest_prod.h>  // for FRIEND_TEST
 #include <metrics/metrics_library.h>
 
 #include "update_engine/certificate_checker.h"
 #include "update_engine/common/constants.h"
 #include "update_engine/common/error_code.h"
-#include "update_engine/metrics_constants.h"
-#include "update_engine/metrics_reporter_interface.h"
-#include "update_engine/system_state.h"
+#include "update_engine/common/metrics_constants.h"
+#include "update_engine/common/metrics_reporter_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -108,8 +108,6 @@
 
   ~MetricsReporterOmaha() override = default;
 
-  void Initialize() override;
-
   void ReportRollbackMetrics(metrics::RollbackResult result) override;
 
   void ReportEnterpriseRollbackMetrics(
@@ -118,13 +116,11 @@
   void ReportDailyMetrics(base::TimeDelta os_age) override;
 
   void ReportUpdateCheckMetrics(
-      SystemState* system_state,
       metrics::CheckResult result,
       metrics::CheckReaction reaction,
       metrics::DownloadErrorCode download_error_code) override;
 
-  void ReportUpdateAttemptMetrics(SystemState* system_state,
-                                  int attempt_number,
+  void ReportUpdateAttemptMetrics(int attempt_number,
                                   PayloadType payload_type,
                                   base::TimeDelta duration,
                                   base::TimeDelta duration_uptime,
@@ -173,6 +169,28 @@
 
  private:
   friend class MetricsReporterOmahaTest;
+  FRIEND_TEST(MetricsReporterOmahaTest, WallclockDurationHelper);
+  FRIEND_TEST(MetricsReporterOmahaTest, MonotonicDurationHelper);
+
+  // This function returns the duration on the wallclock since the last
+  // time it was called for the same |state_variable_key| value.
+  //
+  // If the function returns |true|, the duration (always non-negative)
+  // is returned in |out_duration|. If the function returns |false|
+  // something went wrong or there was no previous measurement.
+  bool WallclockDurationHelper(const std::string& state_variable_key,
+                               base::TimeDelta* out_duration);
+
+  // This function returns the duration on the monotonic clock since the
+  // last time it was called for the same |storage| pointer.
+  //
+  // You should pass a pointer to a 64-bit integer in |storage| which
+  // should be initialized to 0.
+  //
+  // If the function returns |true|, the duration (always non-negative)
+  // is returned in |out_duration|. If the function returns |false|
+  // something went wrong or there was no previous measurement.
+  bool MonotonicDurationHelper(int64_t* storage, base::TimeDelta* out_duration);
 
   std::unique_ptr<MetricsLibraryInterface> metrics_lib_;
 
@@ -181,4 +199,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_METRICS_REPORTER_OMAHA_H_
+#endif  // UPDATE_ENGINE_CROS_METRICS_REPORTER_OMAHA_H_
diff --git a/metrics_reporter_omaha_unittest.cc b/cros/metrics_reporter_omaha_unittest.cc
similarity index 76%
rename from metrics_reporter_omaha_unittest.cc
rename to cros/metrics_reporter_omaha_unittest.cc
index 545d02f..cdc44cd 100644
--- a/metrics_reporter_omaha_unittest.cc
+++ b/cros/metrics_reporter_omaha_unittest.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/metrics_reporter_omaha.h"
+#include "update_engine/cros/metrics_reporter_omaha.h"
 
 #include <memory>
 #include <string>
@@ -25,8 +25,7 @@
 #include <metrics/metrics_library_mock.h>
 
 #include "update_engine/common/fake_clock.h"
-#include "update_engine/common/fake_prefs.h"
-#include "update_engine/fake_system_state.h"
+#include "update_engine/cros/fake_system_state.h"
 
 using base::TimeDelta;
 using testing::_;
@@ -40,12 +39,16 @@
 
   // Reset the metrics_lib_ to a mock library.
   void SetUp() override {
+    FakeSystemState::CreateInstance();
+    fake_clock_ = FakeSystemState::Get()->fake_clock();
     mock_metrics_lib_ = new testing::NiceMock<MetricsLibraryMock>();
     reporter_.metrics_lib_.reset(mock_metrics_lib_);
   }
 
   testing::NiceMock<MetricsLibraryMock>* mock_metrics_lib_;
   MetricsReporterOmaha reporter_;
+
+  FakeClock* fake_clock_;
 };
 
 TEST_F(MetricsReporterOmahaTest, ReportDailyMetrics) {
@@ -58,15 +61,9 @@
 }
 
 TEST_F(MetricsReporterOmahaTest, ReportUpdateCheckMetrics) {
-  FakeSystemState fake_system_state;
-  FakeClock fake_clock;
-  FakePrefs fake_prefs;
-
   // We need to execute the report twice to test the time since last report.
-  fake_system_state.set_clock(&fake_clock);
-  fake_system_state.set_prefs(&fake_prefs);
-  fake_clock.SetWallclockTime(base::Time::FromInternalValue(1000000));
-  fake_clock.SetMonotonicTime(base::Time::FromInternalValue(1000000));
+  fake_clock_->SetWallclockTime(base::Time::FromInternalValue(1000000));
+  fake_clock_->SetMonotonicTime(base::Time::FromInternalValue(1000000));
 
   metrics::CheckResult result = metrics::CheckResult::kUpdateAvailable;
   metrics::CheckReaction reaction = metrics::CheckReaction::kIgnored;
@@ -104,24 +101,20 @@
           metrics::kMetricCheckTimeSinceLastCheckUptimeMinutes, 1, _, _, _))
       .Times(1);
 
-  reporter_.ReportUpdateCheckMetrics(
-      &fake_system_state, result, reaction, error_code);
+  reporter_.ReportUpdateCheckMetrics(result, reaction, error_code);
 
   // Advance the clock by 1 minute and report the same metrics again.
-  fake_clock.SetWallclockTime(base::Time::FromInternalValue(61000000));
-  fake_clock.SetMonotonicTime(base::Time::FromInternalValue(61000000));
+  fake_clock_->SetWallclockTime(base::Time::FromInternalValue(61000000));
+  fake_clock_->SetMonotonicTime(base::Time::FromInternalValue(61000000));
   // Allow rollback
-  reporter_.ReportUpdateCheckMetrics(
-      &fake_system_state, result, reaction, error_code);
+  reporter_.ReportUpdateCheckMetrics(result, reaction, error_code);
 }
 
 TEST_F(MetricsReporterOmahaTest, ReportUpdateCheckMetricsPinned) {
-  FakeSystemState fake_system_state;
-
-  OmahaRequestParams params(&fake_system_state);
+  OmahaRequestParams params;
   params.set_target_version_prefix("10575.");
   params.set_rollback_allowed(false);
-  fake_system_state.set_request_params(&params);
+  FakeSystemState::Get()->set_request_params(&params);
 
   metrics::CheckResult result = metrics::CheckResult::kUpdateAvailable;
   metrics::CheckReaction reaction = metrics::CheckReaction::kIgnored;
@@ -138,17 +131,14 @@
               SendSparseToUMA(metrics::kMetricCheckRollbackTargetVersion, _))
       .Times(0);
 
-  reporter_.ReportUpdateCheckMetrics(
-      &fake_system_state, result, reaction, error_code);
+  reporter_.ReportUpdateCheckMetrics(result, reaction, error_code);
 }
 
 TEST_F(MetricsReporterOmahaTest, ReportUpdateCheckMetricsRollback) {
-  FakeSystemState fake_system_state;
-
-  OmahaRequestParams params(&fake_system_state);
+  OmahaRequestParams params;
   params.set_target_version_prefix("10575.");
   params.set_rollback_allowed(true);
-  fake_system_state.set_request_params(&params);
+  FakeSystemState::Get()->set_request_params(&params);
 
   metrics::CheckResult result = metrics::CheckResult::kUpdateAvailable;
   metrics::CheckReaction reaction = metrics::CheckReaction::kIgnored;
@@ -166,8 +156,7 @@
       SendSparseToUMA(metrics::kMetricCheckRollbackTargetVersion, 10575))
       .Times(1);
 
-  reporter_.ReportUpdateCheckMetrics(
-      &fake_system_state, result, reaction, error_code);
+  reporter_.ReportUpdateCheckMetrics(result, reaction, error_code);
 }
 
 TEST_F(MetricsReporterOmahaTest,
@@ -183,14 +172,8 @@
 }
 
 TEST_F(MetricsReporterOmahaTest, ReportUpdateAttemptMetrics) {
-  FakeSystemState fake_system_state;
-  FakeClock fake_clock;
-  FakePrefs fake_prefs;
-
-  fake_system_state.set_clock(&fake_clock);
-  fake_system_state.set_prefs(&fake_prefs);
-  fake_clock.SetWallclockTime(base::Time::FromInternalValue(1000000));
-  fake_clock.SetMonotonicTime(base::Time::FromInternalValue(1000000));
+  fake_clock_->SetWallclockTime(base::Time::FromInternalValue(1000000));
+  fake_clock_->SetMonotonicTime(base::Time::FromInternalValue(1000000));
 
   int attempt_number = 1;
   PayloadType payload_type = kPayloadTypeFull;
@@ -252,8 +235,7 @@
           metrics::kMetricAttemptTimeSinceLastAttemptUptimeMinutes, 1, _, _, _))
       .Times(1);
 
-  reporter_.ReportUpdateAttemptMetrics(&fake_system_state,
-                                       attempt_number,
+  reporter_.ReportUpdateAttemptMetrics(attempt_number,
                                        payload_type,
                                        duration,
                                        duration_uptime,
@@ -262,10 +244,9 @@
                                        internal_error_code);
 
   // Advance the clock by 1 minute and report the same metrics again.
-  fake_clock.SetWallclockTime(base::Time::FromInternalValue(61000000));
-  fake_clock.SetMonotonicTime(base::Time::FromInternalValue(61000000));
-  reporter_.ReportUpdateAttemptMetrics(&fake_system_state,
-                                       attempt_number,
+  fake_clock_->SetWallclockTime(base::Time::FromInternalValue(61000000));
+  fake_clock_->SetMonotonicTime(base::Time::FromInternalValue(61000000));
+  reporter_.ReportUpdateAttemptMetrics(attempt_number,
                                        payload_type,
                                        duration,
                                        duration_uptime,
@@ -538,4 +519,89 @@
       true /* has_time_restriction_policy */, kDaysToUpdate);
 }
 
+TEST_F(MetricsReporterOmahaTest, WallclockDurationHelper) {
+  base::TimeDelta duration;
+  const std::string state_variable_key = "test-prefs";
+
+  // Initialize wallclock to 1 sec.
+  fake_clock_->SetWallclockTime(base::Time::FromInternalValue(1000000));
+
+  // First time called so no previous measurement available.
+  EXPECT_FALSE(
+      reporter_.WallclockDurationHelper(state_variable_key, &duration));
+
+  // Next time, we should get zero since the clock didn't advance.
+  EXPECT_TRUE(reporter_.WallclockDurationHelper(state_variable_key, &duration));
+  EXPECT_EQ(duration.InSeconds(), 0);
+
+  // We can also call it as many times as we want with it being
+  // considered a failure.
+  EXPECT_TRUE(reporter_.WallclockDurationHelper(state_variable_key, &duration));
+  EXPECT_EQ(duration.InSeconds(), 0);
+  EXPECT_TRUE(reporter_.WallclockDurationHelper(state_variable_key, &duration));
+  EXPECT_EQ(duration.InSeconds(), 0);
+
+  // Advance the clock one second, then we should get 1 sec on the
+  // next call and 0 sec on the subsequent call.
+  fake_clock_->SetWallclockTime(base::Time::FromInternalValue(2000000));
+  EXPECT_TRUE(reporter_.WallclockDurationHelper(state_variable_key, &duration));
+  EXPECT_EQ(duration.InSeconds(), 1);
+  EXPECT_TRUE(reporter_.WallclockDurationHelper(state_variable_key, &duration));
+  EXPECT_EQ(duration.InSeconds(), 0);
+
+  // Advance clock two seconds and we should get 2 sec and then 0 sec.
+  fake_clock_->SetWallclockTime(base::Time::FromInternalValue(4000000));
+  EXPECT_TRUE(reporter_.WallclockDurationHelper(state_variable_key, &duration));
+  EXPECT_EQ(duration.InSeconds(), 2);
+  EXPECT_TRUE(reporter_.WallclockDurationHelper(state_variable_key, &duration));
+  EXPECT_EQ(duration.InSeconds(), 0);
+
+  // There's a possibility that the wallclock can go backwards (NTP
+  // adjustments, for example) so check that we properly handle this
+  // case.
+  fake_clock_->SetWallclockTime(base::Time::FromInternalValue(3000000));
+  EXPECT_FALSE(
+      reporter_.WallclockDurationHelper(state_variable_key, &duration));
+  fake_clock_->SetWallclockTime(base::Time::FromInternalValue(4000000));
+  EXPECT_TRUE(reporter_.WallclockDurationHelper(state_variable_key, &duration));
+  EXPECT_EQ(duration.InSeconds(), 1);
+}
+
+TEST_F(MetricsReporterOmahaTest, MonotonicDurationHelper) {
+  int64_t storage = 0;
+  base::TimeDelta duration;
+
+  // Initialize monotonic clock to 1 sec.
+  fake_clock_->SetMonotonicTime(base::Time::FromInternalValue(1000000));
+
+  // First time called so no previous measurement available.
+  EXPECT_FALSE(reporter_.MonotonicDurationHelper(&storage, &duration));
+
+  // Next time, we should get zero since the clock didn't advance.
+  EXPECT_TRUE(reporter_.MonotonicDurationHelper(&storage, &duration));
+  EXPECT_EQ(duration.InSeconds(), 0);
+
+  // We can also call it as many times as we want with it being
+  // considered a failure.
+  EXPECT_TRUE(reporter_.MonotonicDurationHelper(&storage, &duration));
+  EXPECT_EQ(duration.InSeconds(), 0);
+  EXPECT_TRUE(reporter_.MonotonicDurationHelper(&storage, &duration));
+  EXPECT_EQ(duration.InSeconds(), 0);
+
+  // Advance the clock one second, then we should get 1 sec on the
+  // next call and 0 sec on the subsequent call.
+  fake_clock_->SetMonotonicTime(base::Time::FromInternalValue(2000000));
+  EXPECT_TRUE(reporter_.MonotonicDurationHelper(&storage, &duration));
+  EXPECT_EQ(duration.InSeconds(), 1);
+  EXPECT_TRUE(reporter_.MonotonicDurationHelper(&storage, &duration));
+  EXPECT_EQ(duration.InSeconds(), 0);
+
+  // Advance clock two seconds and we should get 2 sec and then 0 sec.
+  fake_clock_->SetMonotonicTime(base::Time::FromInternalValue(4000000));
+  EXPECT_TRUE(reporter_.MonotonicDurationHelper(&storage, &duration));
+  EXPECT_EQ(duration.InSeconds(), 2);
+  EXPECT_TRUE(reporter_.MonotonicDurationHelper(&storage, &duration));
+  EXPECT_EQ(duration.InSeconds(), 0);
+}
+
 }  // namespace chromeos_update_engine
diff --git a/mock_connection_manager.h b/cros/mock_connection_manager.h
similarity index 85%
rename from mock_connection_manager.h
rename to cros/mock_connection_manager.h
index 2fff68c..899a49b 100644
--- a/mock_connection_manager.h
+++ b/cros/mock_connection_manager.h
@@ -14,12 +14,12 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_MOCK_CONNECTION_MANAGER_H_
-#define UPDATE_ENGINE_MOCK_CONNECTION_MANAGER_H_
+#ifndef UPDATE_ENGINE_CROS_MOCK_CONNECTION_MANAGER_H_
+#define UPDATE_ENGINE_CROS_MOCK_CONNECTION_MANAGER_H_
 
 #include <gmock/gmock.h>
 
-#include "update_engine/connection_manager_interface.h"
+#include "update_engine/cros/connection_manager_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -41,4 +41,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_MOCK_CONNECTION_MANAGER_H_
+#endif  // UPDATE_ENGINE_CROS_MOCK_CONNECTION_MANAGER_H_
diff --git a/mock_omaha_request_params.h b/cros/mock_omaha_request_params.h
similarity index 89%
rename from mock_omaha_request_params.h
rename to cros/mock_omaha_request_params.h
index 41bdc19..1e21812 100644
--- a/mock_omaha_request_params.h
+++ b/cros/mock_omaha_request_params.h
@@ -14,21 +14,20 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_MOCK_OMAHA_REQUEST_PARAMS_H_
-#define UPDATE_ENGINE_MOCK_OMAHA_REQUEST_PARAMS_H_
+#ifndef UPDATE_ENGINE_CROS_MOCK_OMAHA_REQUEST_PARAMS_H_
+#define UPDATE_ENGINE_CROS_MOCK_OMAHA_REQUEST_PARAMS_H_
 
 #include <string>
 
 #include <gmock/gmock.h>
 
-#include "update_engine/omaha_request_params.h"
+#include "update_engine/cros/omaha_request_params.h"
 
 namespace chromeos_update_engine {
 
 class MockOmahaRequestParams : public OmahaRequestParams {
  public:
-  explicit MockOmahaRequestParams(SystemState* system_state)
-      : OmahaRequestParams(system_state) {
+  MockOmahaRequestParams() : OmahaRequestParams() {
     // Delegate all calls to the parent instance by default. This helps the
     // migration from tests using the real RequestParams when they should have
     // use a fake or mock.
@@ -79,4 +78,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_MOCK_OMAHA_REQUEST_PARAMS_H_
+#endif  // UPDATE_ENGINE_CROS_MOCK_OMAHA_REQUEST_PARAMS_H_
diff --git a/mock_p2p_manager.h b/cros/mock_p2p_manager.h
similarity index 94%
rename from mock_p2p_manager.h
rename to cros/mock_p2p_manager.h
index fd67034..273f7f9 100644
--- a/mock_p2p_manager.h
+++ b/cros/mock_p2p_manager.h
@@ -14,12 +14,12 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_MOCK_P2P_MANAGER_H_
-#define UPDATE_ENGINE_MOCK_P2P_MANAGER_H_
+#ifndef UPDATE_ENGINE_CROS_MOCK_P2P_MANAGER_H_
+#define UPDATE_ENGINE_CROS_MOCK_P2P_MANAGER_H_
 
 #include <string>
 
-#include "update_engine/fake_p2p_manager.h"
+#include "update_engine/cros/fake_p2p_manager.h"
 
 #include <gmock/gmock.h>
 
@@ -99,4 +99,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_MOCK_P2P_MANAGER_H_
+#endif  // UPDATE_ENGINE_CROS_MOCK_P2P_MANAGER_H_
diff --git a/mock_payload_state.h b/cros/mock_payload_state.h
similarity index 90%
rename from mock_payload_state.h
rename to cros/mock_payload_state.h
index ad22de5..211b96d 100644
--- a/mock_payload_state.h
+++ b/cros/mock_payload_state.h
@@ -14,21 +14,20 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_MOCK_PAYLOAD_STATE_H_
-#define UPDATE_ENGINE_MOCK_PAYLOAD_STATE_H_
+#ifndef UPDATE_ENGINE_CROS_MOCK_PAYLOAD_STATE_H_
+#define UPDATE_ENGINE_CROS_MOCK_PAYLOAD_STATE_H_
 
 #include <string>
 
 #include <gmock/gmock.h>
 
-#include "update_engine/omaha_request_action.h"
-#include "update_engine/payload_state_interface.h"
+#include "update_engine/cros/payload_state_interface.h"
 
 namespace chromeos_update_engine {
 
 class MockPayloadState : public PayloadStateInterface {
  public:
-  bool Initialize(SystemState* system_state) { return true; }
+  bool Initialize() { return true; }
 
   // Significant methods.
   MOCK_METHOD1(SetResponse, void(const OmahaResponse& response));
@@ -81,4 +80,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_MOCK_PAYLOAD_STATE_H_
+#endif  // UPDATE_ENGINE_CROS_MOCK_PAYLOAD_STATE_H_
diff --git a/mock_power_manager.h b/cros/mock_power_manager.h
similarity index 80%
rename from mock_power_manager.h
rename to cros/mock_power_manager.h
index 8363171..d4a8682 100644
--- a/mock_power_manager.h
+++ b/cros/mock_power_manager.h
@@ -14,12 +14,12 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_MOCK_POWER_MANAGER_H_
-#define UPDATE_ENGINE_MOCK_POWER_MANAGER_H_
+#ifndef UPDATE_ENGINE_CROS_MOCK_POWER_MANAGER_H_
+#define UPDATE_ENGINE_CROS_MOCK_POWER_MANAGER_H_
 
 #include <gmock/gmock.h>
 
-#include "update_engine/power_manager_interface.h"
+#include "update_engine/cros/power_manager_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -32,4 +32,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_MOCK_POWER_MANAGER_H_
+#endif  // UPDATE_ENGINE_CROS_MOCK_POWER_MANAGER_H_
diff --git a/mock_update_attempter.h b/cros/mock_update_attempter.h
similarity index 71%
rename from mock_update_attempter.h
rename to cros/mock_update_attempter.h
index 5df5a6b..be8cfcc 100644
--- a/mock_update_attempter.h
+++ b/cros/mock_update_attempter.h
@@ -14,13 +14,13 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_MOCK_UPDATE_ATTEMPTER_H_
-#define UPDATE_ENGINE_MOCK_UPDATE_ATTEMPTER_H_
+#ifndef UPDATE_ENGINE_CROS_MOCK_UPDATE_ATTEMPTER_H_
+#define UPDATE_ENGINE_CROS_MOCK_UPDATE_ATTEMPTER_H_
 
 #include <string>
 #include <vector>
 
-#include "update_engine/update_attempter.h"
+#include "update_engine/cros/update_attempter.h"
 
 #include <gmock/gmock.h>
 
@@ -30,14 +30,10 @@
  public:
   using UpdateAttempter::UpdateAttempter;
 
-  MOCK_METHOD7(Update,
-               void(const std::string& app_version,
-                    const std::string& omaha_url,
-                    const std::string& target_channel,
-                    const std::string& target_version_prefix,
-                    bool rollback_allowed,
-                    bool obey_proxies,
-                    bool interactive));
+  MOCK_METHOD(void,
+              Update,
+              (const chromeos_update_manager::UpdateCheckParams& params),
+              (override));
 
   MOCK_METHOD1(GetStatus, bool(update_engine::UpdateEngineStatus* out_status));
 
@@ -53,9 +49,13 @@
                     UpdateAttemptFlags flags));
 
   MOCK_METHOD2(CheckForInstall,
-               bool(const std::vector<std::string>& dlc_module_ids,
+               bool(const std::vector<std::string>& dlc_ids,
                     const std::string& omaha_url));
 
+  MOCK_METHOD2(SetDlcActiveValue, bool(bool, const std::string&));
+
+  MOCK_CONST_METHOD0(GetExcluder, ExcluderInterface*(void));
+
   MOCK_METHOD0(RefreshDevicePolicy, void(void));
 
   MOCK_CONST_METHOD0(consecutive_failed_update_checks, unsigned int(void));
@@ -65,4 +65,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_MOCK_UPDATE_ATTEMPTER_H_
+#endif  // UPDATE_ENGINE_CROS_MOCK_UPDATE_ATTEMPTER_H_
diff --git a/cros/omaha_request_action.cc b/cros/omaha_request_action.cc
new file mode 100644
index 0000000..1e5c15f
--- /dev/null
+++ b/cros/omaha_request_action.cc
@@ -0,0 +1,1783 @@
+//
+// Copyright (C) 2012 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/cros/omaha_request_action.h"
+
+#include <inttypes.h>
+
+#include <limits>
+#include <map>
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <base/bind.h>
+#include <base/files/file_util.h>
+#include <base/logging.h>
+#include <base/optional.h>
+#include <base/rand_util.h>
+#include <base/strings/string_number_conversions.h>
+#include <base/strings/string_split.h>
+#include <base/strings/string_util.h>
+#include <base/strings/stringprintf.h>
+#include <base/time/time.h>
+#include <brillo/key_value_store.h>
+#include <expat.h>
+#include <metrics/metrics_library.h>
+#include <policy/libpolicy.h>
+
+#include "update_engine/common/action_pipe.h"
+#include "update_engine/common/constants.h"
+#include "update_engine/common/hardware_interface.h"
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/metrics_reporter_interface.h"
+#include "update_engine/common/platform_constants.h"
+#include "update_engine/common/prefs.h"
+#include "update_engine/common/prefs_interface.h"
+#include "update_engine/common/system_state.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/cros/connection_manager_interface.h"
+#include "update_engine/cros/omaha_request_builder_xml.h"
+#include "update_engine/cros/omaha_request_params.h"
+#include "update_engine/cros/p2p_manager.h"
+#include "update_engine/cros/payload_state_interface.h"
+#include "update_engine/cros/update_attempter.h"
+#include "update_engine/metrics_utils.h"
+
+using base::Optional;
+using base::Time;
+using base::TimeDelta;
+using chromeos_update_manager::kRollforwardInfinity;
+using std::map;
+using std::string;
+using std::vector;
+
+namespace chromeos_update_engine {
+
+// List of custom attributes that we interpret in the Omaha response:
+constexpr char kAttrDeadline[] = "deadline";
+constexpr char kAttrDisableP2PForDownloading[] = "DisableP2PForDownloading";
+constexpr char kAttrDisableP2PForSharing[] = "DisableP2PForSharing";
+constexpr char kAttrDisablePayloadBackoff[] = "DisablePayloadBackoff";
+constexpr char kAttrVersion[] = "version";
+// Deprecated: "IsDelta"
+constexpr char kAttrIsDeltaPayload[] = "IsDeltaPayload";
+constexpr char kAttrMaxFailureCountPerUrl[] = "MaxFailureCountPerUrl";
+constexpr char kAttrMaxDaysToScatter[] = "MaxDaysToScatter";
+// Deprecated: "ManifestSignatureRsa"
+// Deprecated: "ManifestSize"
+constexpr char kAttrMetadataSignatureRsa[] = "MetadataSignatureRsa";
+constexpr char kAttrMetadataSize[] = "MetadataSize";
+constexpr char kAttrMoreInfo[] = "MoreInfo";
+constexpr char kAttrNoUpdate[] = "noupdate";
+// Deprecated: "NeedsAdmin"
+constexpr char kAttrPollInterval[] = "PollInterval";
+constexpr char kAttrPowerwash[] = "Powerwash";
+constexpr char kAttrPrompt[] = "Prompt";
+constexpr char kAttrPublicKeyRsa[] = "PublicKeyRsa";
+
+// List of attributes that we interpret in the Omaha response:
+constexpr char kAttrAppId[] = "appid";
+constexpr char kAttrCodeBase[] = "codebase";
+constexpr char kAttrCohort[] = "cohort";
+constexpr char kAttrCohortHint[] = "cohorthint";
+constexpr char kAttrCohortName[] = "cohortname";
+constexpr char kAttrElapsedDays[] = "elapsed_days";
+constexpr char kAttrElapsedSeconds[] = "elapsed_seconds";
+constexpr char kAttrEvent[] = "event";
+constexpr char kAttrFp[] = "fp";
+constexpr char kAttrHashSha256[] = "hash_sha256";
+// Deprecated: "hash"; Although we still need to pass it from the server for
+// backward compatibility.
+constexpr char kAttrName[] = "name";
+// Deprecated: "sha256"; Although we still need to pass it from the server for
+// backward compatibility.
+constexpr char kAttrSize[] = "size";
+constexpr char kAttrStatus[] = "status";
+
+// List of values that we interpret in the Omaha response:
+constexpr char kValPostInstall[] = "postinstall";
+constexpr char kValNoUpdate[] = "noupdate";
+
+// updatecheck attributes.
+// Deprecated: "eol"
+constexpr char kAttrEolDate[] = "_eol_date";
+constexpr char kAttrRollback[] = "_rollback";
+constexpr char kAttrFirmwareVersion[] = "_firmware_version";
+constexpr char kAttrKernelVersion[] = "_kernel_version";
+
+// Struct used for holding data obtained when parsing the XML.
+struct OmahaParserData {
+  OmahaParserData(XML_Parser _xml_parser, int _rollback_allowed_milestones)
+      : xml_parser(_xml_parser),
+        rollback_allowed_milestones(_rollback_allowed_milestones) {}
+
+  // Pointer to the expat XML_Parser object.
+  XML_Parser xml_parser;
+
+  // Some values that we need during parsing.
+  int rollback_allowed_milestones;
+
+  // This is the state of the parser as it's processing the XML.
+  bool failed = false;
+  bool entity_decl = false;
+  string current_path;
+
+  // These are the values extracted from the XML.
+  struct DayStart {
+    string elapsed_days;
+    string elapsed_seconds;
+  } daystart;
+
+  struct App {
+    string id;
+    Optional<string> cohort;
+    Optional<string> cohorthint;
+    Optional<string> cohortname;
+
+    struct Url {
+      string codebase;
+    };
+    vector<Url> urls;
+
+    struct Manifest {
+      string version;
+    } manifest;
+
+    struct UpdateCheck {
+      string status;
+      string poll_interval;
+      string eol_date;
+      string rollback;
+      string firmware_version;
+      string kernel_version;
+      string past_firmware_version;
+      string past_kernel_version;
+    } updatecheck;
+
+    struct PostInstallAction {
+      vector<string> is_delta_payloads;
+      vector<string> metadata_signature_rsas;
+      vector<string> metadata_sizes;
+      string max_days_to_scatter;
+      string no_update;
+      string more_info_url;
+      string prompt;
+      string deadline;
+      string disable_p2p_for_downloading;
+      string disable_p2p_for_sharing;
+      string public_key_rsa;
+      string max_failure_count_per_url;
+      string disable_payload_backoff;
+      string powerwash_required;
+    };
+    Optional<PostInstallAction> postinstall_action;
+
+    struct Package {
+      string name;
+      string size;
+      string hash;
+      string fp;
+    };
+    vector<Package> packages;
+  };
+  vector<App> apps;
+};
+
+namespace {
+
+// Callback function invoked by expat.
+void ParserHandlerStart(void* user_data,
+                        const XML_Char* element,
+                        const XML_Char** attr) {
+  OmahaParserData* data = reinterpret_cast<OmahaParserData*>(user_data);
+
+  if (data->failed)
+    return;
+
+  data->current_path += string("/") + element;
+
+  map<string, string> attrs;
+  if (attr != nullptr) {
+    for (int n = 0; attr[n] != nullptr && attr[n + 1] != nullptr; n += 2) {
+      string key = attr[n];
+      string value = attr[n + 1];
+      attrs[key] = value;
+    }
+  }
+
+  if (data->current_path == "/response/daystart") {
+    data->daystart = {
+        .elapsed_days = attrs[kAttrElapsedDays],
+        .elapsed_seconds = attrs[kAttrElapsedSeconds],
+    };
+  } else if (data->current_path == "/response/app") {
+    data->apps.push_back({.id = attrs[kAttrAppId]});
+    if (attrs.find(kAttrCohort) != attrs.end())
+      data->apps.back().cohort = attrs[kAttrCohort];
+    if (attrs.find(kAttrCohortHint) != attrs.end())
+      data->apps.back().cohorthint = attrs[kAttrCohortHint];
+    if (attrs.find(kAttrCohortName) != attrs.end())
+      data->apps.back().cohortname = attrs[kAttrCohortName];
+  } else if (data->current_path == "/response/app/updatecheck") {
+    data->apps.back().updatecheck = {
+        .status = attrs[kAttrStatus],
+        .poll_interval = attrs[kAttrPollInterval],
+        .eol_date = attrs[kAttrEolDate],
+        .rollback = attrs[kAttrRollback],
+        .firmware_version = attrs[kAttrFirmwareVersion],
+        .kernel_version = attrs[kAttrKernelVersion],
+        .past_firmware_version = attrs[base::StringPrintf(
+            "%s_%i", kAttrFirmwareVersion, data->rollback_allowed_milestones)],
+        .past_kernel_version = attrs[base::StringPrintf(
+            "%s_%i", kAttrKernelVersion, data->rollback_allowed_milestones)],
+    };
+  } else if (data->current_path == "/response/app/updatecheck/urls/url") {
+    data->apps.back().urls.push_back({.codebase = attrs[kAttrCodeBase]});
+  } else if (data->current_path ==
+             "/response/app/updatecheck/manifest/packages/package") {
+    data->apps.back().packages.push_back({
+        .name = attrs[kAttrName],
+        .size = attrs[kAttrSize],
+        .hash = attrs[kAttrHashSha256],
+        .fp = attrs[kAttrFp],
+    });
+  } else if (data->current_path == "/response/app/updatecheck/manifest") {
+    data->apps.back().manifest.version = attrs[kAttrVersion];
+  } else if (data->current_path ==
+             "/response/app/updatecheck/manifest/actions/action") {
+    // We only care about the postinstall action.
+    if (attrs[kAttrEvent] == kValPostInstall) {
+      OmahaParserData::App::PostInstallAction action = {
+          .is_delta_payloads = base::SplitString(attrs[kAttrIsDeltaPayload],
+                                                 ":",
+                                                 base::TRIM_WHITESPACE,
+                                                 base::SPLIT_WANT_ALL),
+          .metadata_signature_rsas =
+              base::SplitString(attrs[kAttrMetadataSignatureRsa],
+                                ":",
+                                base::TRIM_WHITESPACE,
+                                base::SPLIT_WANT_ALL),
+          .metadata_sizes = base::SplitString(attrs[kAttrMetadataSize],
+                                              ":",
+                                              base::TRIM_WHITESPACE,
+                                              base::SPLIT_WANT_ALL),
+          .max_days_to_scatter = attrs[kAttrMaxDaysToScatter],
+          .no_update = attrs[kAttrNoUpdate],
+          .more_info_url = attrs[kAttrMoreInfo],
+          .prompt = attrs[kAttrPrompt],
+          .deadline = attrs[kAttrDeadline],
+          .disable_p2p_for_downloading = attrs[kAttrDisableP2PForDownloading],
+          .disable_p2p_for_sharing = attrs[kAttrDisableP2PForSharing],
+          .public_key_rsa = attrs[kAttrPublicKeyRsa],
+          .max_failure_count_per_url = attrs[kAttrMaxFailureCountPerUrl],
+          .disable_payload_backoff = attrs[kAttrDisablePayloadBackoff],
+          .powerwash_required = attrs[kAttrPowerwash],
+      };
+      data->apps.back().postinstall_action = std::move(action);
+    }
+  }
+}
+
+// Callback function invoked by expat.
+void ParserHandlerEnd(void* user_data, const XML_Char* element) {
+  OmahaParserData* data = reinterpret_cast<OmahaParserData*>(user_data);
+  if (data->failed)
+    return;
+
+  const string path_suffix = string("/") + element;
+
+  if (!base::EndsWith(
+          data->current_path, path_suffix, base::CompareCase::SENSITIVE)) {
+    LOG(ERROR) << "Unexpected end element '" << element
+               << "' with current_path='" << data->current_path << "'";
+    data->failed = true;
+    return;
+  }
+  data->current_path.resize(data->current_path.size() - path_suffix.size());
+}
+
+// Callback function invoked by expat.
+//
+// This is called for entity declarations. Since Omaha is guaranteed
+// to never return any XML with entities our course of action is to
+// just stop parsing. This avoids potential resource exhaustion
+// problems AKA the "billion laughs". CVE-2013-0340.
+void ParserHandlerEntityDecl(void* user_data,
+                             const XML_Char* entity_name,
+                             int is_parameter_entity,
+                             const XML_Char* value,
+                             int value_length,
+                             const XML_Char* base,
+                             const XML_Char* system_id,
+                             const XML_Char* public_id,
+                             const XML_Char* notation_name) {
+  OmahaParserData* data = reinterpret_cast<OmahaParserData*>(user_data);
+
+  LOG(ERROR) << "XML entities are not supported. Aborting parsing.";
+  data->failed = true;
+  data->entity_decl = true;
+  XML_StopParser(data->xml_parser, false);
+}
+
+}  // namespace
+
+OmahaRequestAction::OmahaRequestAction(
+    OmahaEvent* event,
+    std::unique_ptr<HttpFetcher> http_fetcher,
+    bool ping_only,
+    const string& session_id)
+    : event_(event),
+      http_fetcher_(std::move(http_fetcher)),
+      policy_provider_(std::make_unique<policy::PolicyProvider>()),
+      ping_only_(ping_only),
+      ping_active_days_(0),
+      ping_roll_call_days_(0),
+      session_id_(session_id) {
+  policy_provider_->Reload();
+}
+
+OmahaRequestAction::~OmahaRequestAction() {}
+
+// Calculates the value to use for the ping days parameter.
+int OmahaRequestAction::CalculatePingDays(const string& key) {
+  int days = kPingNeverPinged;
+  int64_t last_ping = 0;
+  if (SystemState::Get()->prefs()->GetInt64(key, &last_ping) &&
+      last_ping >= 0) {
+    days = (Time::Now() - Time::FromInternalValue(last_ping)).InDays();
+    if (days < 0) {
+      // If |days| is negative, then the system clock must have jumped
+      // back in time since the ping was sent. Mark the value so that
+      // it doesn't get sent to the server but we still update the
+      // last ping daystart preference. This way the next ping time
+      // will be correct, hopefully.
+      days = kPingTimeJump;
+      LOG(WARNING)
+          << "System clock jumped back in time. Resetting ping daystarts.";
+    }
+  }
+  return days;
+}
+
+void OmahaRequestAction::InitPingDays() {
+  // We send pings only along with update checks, not with events.
+  if (IsEvent()) {
+    return;
+  }
+  // TODO(petkov): Figure a way to distinguish active use pings
+  // vs. roll call pings. Currently, the two pings are identical. A
+  // fix needs to change this code as well as UpdateLastPingDays and ShouldPing.
+  ping_active_days_ = CalculatePingDays(kPrefsLastActivePingDay);
+  ping_roll_call_days_ = CalculatePingDays(kPrefsLastRollCallPingDay);
+}
+
+bool OmahaRequestAction::ShouldPing() const {
+  if (ping_active_days_ == kPingNeverPinged &&
+      ping_roll_call_days_ == kPingNeverPinged) {
+    int powerwash_count = SystemState::Get()->hardware()->GetPowerwashCount();
+    if (powerwash_count > 0) {
+      LOG(INFO) << "Not sending ping with a=-1 r=-1 to omaha because "
+                << "powerwash_count is " << powerwash_count;
+      return false;
+    }
+    if (SystemState::Get()->hardware()->GetFirstActiveOmahaPingSent()) {
+      LOG(INFO) << "Not sending ping with a=-1 r=-1 to omaha because "
+                << "the first_active_omaha_ping_sent is true.";
+      return false;
+    }
+    return true;
+  }
+  return ping_active_days_ > 0 || ping_roll_call_days_ > 0;
+}
+
+// static
+int OmahaRequestAction::GetInstallDate() {
+  auto* prefs = SystemState::Get()->prefs();
+  // If we have the value stored on disk, just return it.
+  int64_t stored_value;
+  if (prefs->GetInt64(kPrefsInstallDateDays, &stored_value)) {
+    // Convert and validity-check.
+    int install_date_days = static_cast<int>(stored_value);
+    if (install_date_days >= 0)
+      return install_date_days;
+    LOG(ERROR) << "Dropping stored Omaha InstallData since its value num_days="
+               << install_date_days << " looks suspicious.";
+    prefs->Delete(kPrefsInstallDateDays);
+  }
+
+  // Otherwise, if OOBE is not complete then do nothing and wait for
+  // ParseResponse() to call ParseInstallDate() and then
+  // PersistInstallDate() to set the kPrefsInstallDateDays state
+  // variable. Once that is done, we'll then report back in future
+  // Omaha requests.  This works exactly because OOBE triggers an
+  // update check.
+  //
+  // However, if OOBE is complete and the kPrefsInstallDateDays state
+  // variable is not set, there are two possibilities
+  //
+  //   1. The update check in OOBE failed so we never got a response
+  //      from Omaha (no network etc.); or
+  //
+  //   2. OOBE was done on an older version that didn't write to the
+  //      kPrefsInstallDateDays state variable.
+  //
+  // In both cases, we approximate the install date by simply
+  // inspecting the timestamp of when OOBE happened.
+
+  Time time_of_oobe;
+  if (!SystemState::Get()->hardware()->IsOOBEEnabled() ||
+      !SystemState::Get()->hardware()->IsOOBEComplete(&time_of_oobe)) {
+    LOG(INFO) << "Not generating Omaha InstallData as we have "
+              << "no prefs file and OOBE is not complete or not enabled.";
+    return -1;
+  }
+
+  int num_days;
+  if (!utils::ConvertToOmahaInstallDate(time_of_oobe, &num_days)) {
+    LOG(ERROR) << "Not generating Omaha InstallData from time of OOBE "
+               << "as its value '" << utils::ToString(time_of_oobe)
+               << "' looks suspicious.";
+    return -1;
+  }
+
+  // Persist this to disk, for future use.
+  if (!OmahaRequestAction::PersistInstallDate(num_days,
+                                              kProvisionedFromOOBEMarker))
+    return -1;
+
+  LOG(INFO) << "Set the Omaha InstallDate from OOBE time-stamp to " << num_days
+            << " days.";
+
+  return num_days;
+}
+
+void OmahaRequestAction::StorePingReply(
+    const OmahaParserData& parser_data) const {
+  const auto* params = SystemState::Get()->request_params();
+  for (const auto& app : parser_data.apps) {
+    auto it = params->dlc_apps_params().find(app.id);
+    if (it == params->dlc_apps_params().end())
+      continue;
+
+    const OmahaRequestParams::AppParams& dlc_params = it->second;
+    const string& dlc_id = dlc_params.name;
+    // Skip if the ping for this DLC was not sent.
+    if (!dlc_params.send_ping)
+      continue;
+
+    auto* prefs = SystemState::Get()->prefs();
+    // Reset the active metadata value to |kPingInactiveValue|.
+    auto active_key =
+        prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive});
+    if (!prefs->SetInt64(active_key, kPingInactiveValue))
+      LOG(ERROR) << "Failed to set the value of ping metadata '" << active_key
+                 << "'.";
+
+    auto last_rollcall_key =
+        prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall});
+    if (!prefs->SetString(last_rollcall_key, parser_data.daystart.elapsed_days))
+      LOG(ERROR) << "Failed to set the value of ping metadata '"
+                 << last_rollcall_key << "'.";
+
+    if (dlc_params.ping_active) {
+      // Write the value of elapsed_days into |kPrefsPingLastActive| only if
+      // the previous ping was an active one.
+      auto last_active_key =
+          prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive});
+      if (!prefs->SetString(last_active_key, parser_data.daystart.elapsed_days))
+        LOG(ERROR) << "Failed to set the value of ping metadata '"
+                   << last_active_key << "'.";
+    }
+  }
+}
+
+void OmahaRequestAction::PerformAction() {
+  http_fetcher_->set_delegate(this);
+  InitPingDays();
+  if (ping_only_ && !ShouldPing()) {
+    processor_->ActionComplete(this, ErrorCode::kSuccess);
+    return;
+  }
+
+  OmahaRequestBuilderXml omaha_request(event_.get(),
+                                       ping_only_,
+                                       ShouldPing(),  // include_ping
+                                       ping_active_days_,
+                                       ping_roll_call_days_,
+                                       GetInstallDate(),
+                                       session_id_);
+  string request_post = omaha_request.GetRequest();
+
+  // Set X-Goog-Update headers.
+  const auto* params = SystemState::Get()->request_params();
+  http_fetcher_->SetHeader(kXGoogleUpdateInteractivity,
+                           params->interactive() ? "fg" : "bg");
+  http_fetcher_->SetHeader(kXGoogleUpdateAppId, params->GetAppId());
+  http_fetcher_->SetHeader(
+      kXGoogleUpdateUpdater,
+      base::StringPrintf(
+          "%s-%s", constants::kOmahaUpdaterID, kOmahaUpdaterVersion));
+
+  http_fetcher_->SetPostData(
+      request_post.data(), request_post.size(), kHttpContentTypeTextXml);
+  LOG(INFO) << "Posting an Omaha request to " << params->update_url();
+  LOG(INFO) << "Request: " << request_post;
+  http_fetcher_->BeginTransfer(params->update_url());
+}
+
+void OmahaRequestAction::TerminateProcessing() {
+  http_fetcher_->TerminateTransfer();
+}
+
+// We just store the response in the buffer. Once we've received all bytes,
+// we'll look in the buffer and decide what to do.
+bool OmahaRequestAction::ReceivedBytes(HttpFetcher* fetcher,
+                                       const void* bytes,
+                                       size_t length) {
+  const uint8_t* byte_ptr = reinterpret_cast<const uint8_t*>(bytes);
+  response_buffer_.insert(response_buffer_.end(), byte_ptr, byte_ptr + length);
+  return true;
+}
+
+namespace {
+
+// Parses a 64 bit base-10 int from a string and returns it. Returns 0
+// on error. If the string contains "0", that's indistinguishable from
+// error.
+off_t ParseInt(const string& str) {
+  off_t ret = 0;
+  int rc = sscanf(str.c_str(), "%" PRIi64, &ret);  // NOLINT(runtime/printf)
+  if (rc < 1) {
+    // failure
+    return 0;
+  }
+  return ret;
+}
+
+// Parses |str| and returns |true| if, and only if, its value is "true".
+bool ParseBool(const string& str) {
+  return str == "true";
+}
+
+// Update the last ping day preferences based on the server daystart
+// response. Returns true on success, false otherwise.
+bool UpdateLastPingDays(OmahaParserData* parser_data) {
+  int64_t elapsed_seconds = 0;
+  TEST_AND_RETURN_FALSE(base::StringToInt64(
+      parser_data->daystart.elapsed_seconds, &elapsed_seconds));
+  TEST_AND_RETURN_FALSE(elapsed_seconds >= 0);
+
+  // Remember the local time that matches the server's last midnight
+  // time.
+  auto* prefs = SystemState::Get()->prefs();
+  Time daystart = Time::Now() - TimeDelta::FromSeconds(elapsed_seconds);
+  prefs->SetInt64(kPrefsLastActivePingDay, daystart.ToInternalValue());
+  prefs->SetInt64(kPrefsLastRollCallPingDay, daystart.ToInternalValue());
+  return true;
+}
+
+// Parses the package node in the given XML document and populates
+// |output_object| if valid. Returns true if we should continue the parsing.
+// False otherwise, in which case it sets any error code using |completer|.
+bool ParsePackage(OmahaParserData::App* app,
+                  OmahaResponse* output_object,
+                  bool can_exclude,
+                  ScopedActionCompleter* completer) {
+  if (app->updatecheck.status.empty() ||
+      app->updatecheck.status == kValNoUpdate) {
+    if (!app->packages.empty()) {
+      LOG(ERROR) << "No update in this <app> but <package> is not empty.";
+      completer->set_code(ErrorCode::kOmahaResponseInvalid);
+      return false;
+    }
+    return true;
+  }
+  if (app->packages.empty()) {
+    LOG(ERROR) << "Omaha Response has no packages.";
+    completer->set_code(ErrorCode::kOmahaResponseInvalid);
+    return false;
+  }
+  if (app->urls.empty()) {
+    LOG(ERROR) << "No Omaha Response URLs.";
+    completer->set_code(ErrorCode::kOmahaResponseInvalid);
+    return false;
+  }
+  for (size_t i = 0; i < app->packages.size(); i++) {
+    const auto& package = app->packages[i];
+    if (package.name.empty()) {
+      LOG(ERROR) << "Omaha Response has empty package name.";
+      completer->set_code(ErrorCode::kOmahaResponseInvalid);
+      return false;
+    }
+
+    OmahaResponse::Package out_package;
+    out_package.app_id = app->id;
+    out_package.can_exclude = can_exclude;
+    for (const auto& url : app->urls) {
+      if (url.codebase.empty()) {
+        LOG(ERROR) << "Omaha Response URL has empty codebase.";
+        completer->set_code(ErrorCode::kOmahaResponseInvalid);
+        return false;
+      }
+      out_package.payload_urls.push_back(url.codebase + package.name);
+    }
+
+    base::StringToUint64(package.size, &out_package.size);
+    if (out_package.size <= 0) {
+      LOG(ERROR) << "Omaha Response has invalid payload size: " << package.size;
+      completer->set_code(ErrorCode::kOmahaResponseInvalid);
+      return false;
+    }
+
+    if (i < app->postinstall_action->metadata_sizes.size())
+      base::StringToUint64(app->postinstall_action->metadata_sizes[i],
+                           &out_package.metadata_size);
+
+    if (i < app->postinstall_action->metadata_signature_rsas.size())
+      out_package.metadata_signature =
+          app->postinstall_action->metadata_signature_rsas[i];
+
+    out_package.hash = package.hash;
+    if (out_package.hash.empty()) {
+      LOG(ERROR) << "Omaha Response has empty hash_sha256 value.";
+      completer->set_code(ErrorCode::kOmahaResponseInvalid);
+      return false;
+    }
+
+    out_package.fp = package.fp;
+
+    if (i < app->postinstall_action->is_delta_payloads.size())
+      out_package.is_delta =
+          ParseBool(app->postinstall_action->is_delta_payloads[i]);
+
+    output_object->packages.push_back(std::move(out_package));
+  }
+
+  return true;
+}
+
+// Removes the candidate URLs which are excluded within packages, if all the
+// candidate URLs are excluded within a package, the package will be excluded.
+void ProcessExclusions(OmahaResponse* output_object,
+                       OmahaRequestParams* params,
+                       ExcluderInterface* excluder) {
+  for (auto package_it = output_object->packages.begin();
+       package_it != output_object->packages.end();
+       /* Increment logic in loop */) {
+    // If package cannot be excluded, quickly continue.
+    if (!package_it->can_exclude) {
+      ++package_it;
+      continue;
+    }
+    // Remove the excluded payload URLs.
+    for (auto payload_url_it = package_it->payload_urls.begin();
+         payload_url_it != package_it->payload_urls.end();
+         /* Increment logic in loop */) {
+      auto exclusion_name = utils::GetExclusionName(*payload_url_it);
+      // If payload URL is not excluded, quickly continue.
+      if (!excluder->IsExcluded(exclusion_name)) {
+        ++payload_url_it;
+        continue;
+      }
+      LOG(INFO) << "Excluding payload URL=" << *payload_url_it
+                << " for payload hash=" << package_it->hash;
+      payload_url_it = package_it->payload_urls.erase(payload_url_it);
+    }
+    // If there are no candidate payload URLs, remove the package.
+    if (package_it->payload_urls.empty()) {
+      LOG(INFO) << "Excluding payload hash=" << package_it->hash;
+      // Need to set DLC as not updated so correct metrics can be sent when an
+      // update is completed.
+      params->SetDlcNoUpdate(package_it->app_id);
+      package_it = output_object->packages.erase(package_it);
+      continue;
+    }
+    ++package_it;
+  }
+}
+
+// Parses the 2 key version strings kernel_version and firmware_version. If the
+// field is not present, or cannot be parsed the values default to 0xffff.
+void ParseRollbackVersions(const OmahaParserData::App& platform_app,
+                           int allowed_milestones,
+                           OmahaResponse* output_object) {
+  // Defaults to false if attribute is not present.
+  output_object->is_rollback = ParseBool(platform_app.updatecheck.rollback);
+
+  utils::ParseRollbackKeyVersion(
+      platform_app.updatecheck.firmware_version,
+      &output_object->rollback_key_version.firmware_key,
+      &output_object->rollback_key_version.firmware);
+  utils::ParseRollbackKeyVersion(
+      platform_app.updatecheck.kernel_version,
+      &output_object->rollback_key_version.kernel_key,
+      &output_object->rollback_key_version.kernel);
+
+  string firmware_version = platform_app.updatecheck.past_firmware_version;
+  string kernel_version = platform_app.updatecheck.past_kernel_version;
+
+  LOG(INFO) << "For milestone N-" << allowed_milestones
+            << " firmware_key_version=" << firmware_version
+            << " kernel_key_version=" << kernel_version;
+
+  OmahaResponse::RollbackKeyVersion version;
+  utils::ParseRollbackKeyVersion(
+      firmware_version, &version.firmware_key, &version.firmware);
+  utils::ParseRollbackKeyVersion(
+      kernel_version, &version.kernel_key, &version.kernel);
+
+  output_object->past_rollback_key_version = std::move(version);
+}
+
+void PersistEolInfo(const OmahaParserData::App& platform_app) {
+  // If EOL date attribute is not sent, don't delete the old persisted EOL
+  // date information.
+  if (!platform_app.updatecheck.eol_date.empty() &&
+      !SystemState::Get()->prefs()->SetString(
+          kPrefsOmahaEolDate, platform_app.updatecheck.eol_date)) {
+    LOG(ERROR) << "Setting EOL date failed.";
+  }
+}
+
+}  // namespace
+
+bool OmahaRequestAction::ParseResponse(OmahaParserData* parser_data,
+                                       OmahaResponse* output_object,
+                                       ScopedActionCompleter* completer) {
+  if (parser_data->apps.empty()) {
+    completer->set_code(ErrorCode::kOmahaResponseInvalid);
+    return false;
+  }
+
+  // Locate the platform App since it's an important one that has specific
+  // information attached to it that may not be available from other Apps.
+  const auto* params = SystemState::Get()->request_params();
+  auto platform_app = std::find_if(parser_data->apps.begin(),
+                                   parser_data->apps.end(),
+                                   [&params](const OmahaParserData::App& app) {
+                                     return app.id == params->GetAppId();
+                                   });
+  if (platform_app == parser_data->apps.end()) {
+    LOG(WARNING) << "Platform App is missing.";
+  } else {
+    // chromium-os:37289: The PollInterval is not supported by Omaha server
+    // currently.  But still keeping this existing code in case we ever decide
+    // to slow down the request rate from the server-side. Note that the
+    // PollInterval is not persisted, so it has to be sent by the server on
+    // every response to guarantee that the scheduler uses this value
+    // (otherwise, if the device got rebooted after the last server-indicated
+    // value, it'll revert to the default value). Also kDefaultMaxUpdateChecks
+    // value for the scattering logic is based on the assumption that we perform
+    // an update check every hour so that the max value of 8 will roughly be
+    // equivalent to one work day. If we decide to use PollInterval permanently,
+    // we should update the max_update_checks_allowed to take PollInterval into
+    // account.  Note: The parsing for PollInterval happens even before parsing
+    // of the status because we may want to specify the PollInterval even when
+    // there's no update.
+    base::StringToInt(platform_app->updatecheck.poll_interval,
+                      &output_object->poll_interval);
+
+    PersistEolInfo(*platform_app);
+
+    // Parses the rollback versions of the current image. If the fields do not
+    // exist they default to 0xffff for the 4 key versions.
+    ParseRollbackVersions(
+        *platform_app, params->rollback_allowed_milestones(), output_object);
+  }
+
+  // Check for the "elapsed_days" attribute in the "daystart"
+  // element. This is the number of days since Jan 1 2007, 0:00
+  // PST. If we don't have a persisted value of the Omaha InstallDate,
+  // we'll use it to calculate it and then persist it.
+  if (ParseInstallDate(parser_data, output_object) && !HasInstallDate()) {
+    // Since output_object->install_date_days is never negative, the
+    // elapsed_days -> install-date calculation is reduced to simply
+    // rounding down to the nearest number divisible by 7.
+    int remainder = output_object->install_date_days % 7;
+    int install_date_days_rounded =
+        output_object->install_date_days - remainder;
+    if (PersistInstallDate(install_date_days_rounded,
+                           kProvisionedFromOmahaResponse)) {
+      LOG(INFO) << "Set the Omaha InstallDate from Omaha Response to "
+                << install_date_days_rounded << " days.";
+    }
+  }
+
+  // We persist the cohorts sent by omaha even if the status is "noupdate".
+  PersistCohorts(*parser_data);
+
+  if (!ParseStatus(parser_data, output_object, completer))
+    return false;
+
+  if (!ParseParams(parser_data, output_object, completer))
+    return false;
+
+  // Package has to be parsed after Params now because ParseParams need to make
+  // sure that postinstall action exists.
+  for (auto& app : parser_data->apps) {
+    // Only allow exclusions for a non-critical package during an update. For
+    // non-critical package installations, let the errors propagate instead
+    // of being handled inside update_engine as installations are a dlcservice
+    // specific feature.
+    bool can_exclude = !params->is_install() && params->IsDlcAppId(app.id);
+    if (!ParsePackage(&app, output_object, can_exclude, completer))
+      return false;
+  }
+
+  return true;
+}
+
+bool OmahaRequestAction::ParseStatus(OmahaParserData* parser_data,
+                                     OmahaResponse* output_object,
+                                     ScopedActionCompleter* completer) {
+  output_object->update_exists = false;
+  auto* params = SystemState::Get()->request_params();
+  for (const auto& app : parser_data->apps) {
+    const string& status = app.updatecheck.status;
+    if (status == kValNoUpdate) {
+      // If the app is a DLC, allow status "noupdate" to support DLC
+      // deprecations.
+      if (params->IsDlcAppId(app.id)) {
+        LOG(INFO) << "No update for App " << app.id
+                  << " but update continuing since a DLC.";
+        params->SetDlcNoUpdate(app.id);
+        continue;
+      }
+      // Don't update if any app has status="noupdate".
+      LOG(INFO) << "No update for App " << app.id;
+      output_object->update_exists = false;
+      break;
+    } else if (status == "ok") {
+      if (ParseBool(app.postinstall_action->no_update)) {
+        // noupdate="true" in postinstall attributes means it's an update to
+        // self, only update if there's at least one app really have update.
+        LOG(INFO) << "Update to self for App " << app.id;
+      } else {
+        output_object->update_exists = true;
+      }
+    } else if (status.empty() && params->is_install() &&
+               params->GetAppId() == app.id) {
+      // Skips the platform app for install operation.
+      LOG(INFO) << "No payload (and ignore) for App " << app.id;
+    } else {
+      LOG(ERROR) << "Unknown Omaha response status: " << status;
+      completer->set_code(ErrorCode::kOmahaResponseInvalid);
+      return false;
+    }
+  }
+  if (!output_object->update_exists) {
+    SetOutputObject(*output_object);
+    completer->set_code(ErrorCode::kSuccess);
+  }
+
+  return output_object->update_exists;
+}
+
+bool OmahaRequestAction::ParseParams(OmahaParserData* parser_data,
+                                     OmahaResponse* output_object,
+                                     ScopedActionCompleter* completer) {
+  const auto* params = SystemState::Get()->request_params();
+  const OmahaParserData::App* main_app = nullptr;
+  for (const auto& app : parser_data->apps) {
+    if (app.id == params->GetAppId() && app.postinstall_action) {
+      main_app = &app;
+    } else if (params->is_install()) {
+      if (app.manifest.version != params->app_version()) {
+        LOG(WARNING) << "An app has a version: " << app.manifest.version
+                     << " that is different than platform app version: "
+                     << params->app_version();
+      }
+    }
+    if (app.postinstall_action && main_app == nullptr) {
+      main_app = &app;
+    }
+  }
+
+  if (main_app == nullptr) {
+    LOG(ERROR) << "Omaha Response has no postinstall event action.";
+    completer->set_code(ErrorCode::kOmahaResponseInvalid);
+    return false;
+  }
+
+  const OmahaParserData::App& app = *main_app;
+  // Get the optional properties one by one.
+  output_object->version = app.manifest.version;
+  output_object->more_info_url = app.postinstall_action->more_info_url;
+  output_object->prompt = ParseBool(app.postinstall_action->prompt);
+  output_object->deadline = app.postinstall_action->deadline;
+  output_object->max_days_to_scatter =
+      ParseInt(app.postinstall_action->max_days_to_scatter);
+  output_object->disable_p2p_for_downloading =
+      ParseBool(app.postinstall_action->disable_p2p_for_downloading);
+  output_object->disable_p2p_for_sharing =
+      ParseBool(app.postinstall_action->disable_p2p_for_sharing);
+  output_object->public_key_rsa = app.postinstall_action->public_key_rsa;
+
+  if (!base::StringToUint(app.postinstall_action->max_failure_count_per_url,
+                          &output_object->max_failure_count_per_url))
+    output_object->max_failure_count_per_url = kDefaultMaxFailureCountPerUrl;
+
+  output_object->disable_payload_backoff =
+      ParseBool(app.postinstall_action->disable_payload_backoff);
+  output_object->powerwash_required =
+      ParseBool(app.postinstall_action->powerwash_required);
+
+  if (output_object->version.empty()) {
+    LOG(ERROR) << "Omaha Response does not have version in manifest!";
+    completer->set_code(ErrorCode::kOmahaResponseInvalid);
+    return false;
+  }
+
+  return true;
+}
+
+// If the transfer was successful, this uses expat to parse the response
+// and fill in the appropriate fields of the output object. Also, notifies
+// the processor that we're done.
+void OmahaRequestAction::TransferComplete(HttpFetcher* fetcher,
+                                          bool successful) {
+  ScopedActionCompleter completer(processor_, this);
+  string current_response(response_buffer_.begin(), response_buffer_.end());
+  LOG(INFO) << "Omaha request response: " << current_response;
+
+  PayloadStateInterface* const payload_state =
+      SystemState::Get()->payload_state();
+
+  // Set the max kernel key version based on whether rollback is allowed.
+  SetMaxKernelKeyVersionForRollback();
+
+  // Events are best effort transactions -- assume they always succeed.
+  if (IsEvent()) {
+    CHECK(!HasOutputPipe()) << "No output pipe allowed for event requests.";
+    completer.set_code(ErrorCode::kSuccess);
+    return;
+  }
+
+  ErrorCode aux_error_code = fetcher->GetAuxiliaryErrorCode();
+  if (aux_error_code != ErrorCode::kSuccess) {
+    metrics::DownloadErrorCode download_error_code =
+        metrics_utils::GetDownloadErrorCode(aux_error_code);
+    SystemState::Get()->metrics_reporter()->ReportUpdateCheckMetrics(
+        metrics::CheckResult::kUnset,
+        metrics::CheckReaction::kUnset,
+        download_error_code);
+  }
+
+  if (!successful) {
+    int code = GetHTTPResponseCode();
+    LOG(ERROR) << "Omaha request network transfer failed with HTTPResponseCode="
+               << code;
+    // Makes sure we send proper error values.
+    if (code < 0 || code >= 1000) {
+      code = 999;
+      LOG(WARNING) << "Converting to proper HTTPResponseCode=" << code;
+    }
+    completer.set_code(static_cast<ErrorCode>(
+        static_cast<int>(ErrorCode::kOmahaRequestHTTPResponseBase) + code));
+    return;
+  }
+
+  XML_Parser parser = XML_ParserCreate(nullptr);
+  OmahaParserData parser_data(
+      parser,
+      SystemState::Get()->request_params()->rollback_allowed_milestones());
+  XML_SetUserData(parser, &parser_data);
+  XML_SetElementHandler(parser, ParserHandlerStart, ParserHandlerEnd);
+  XML_SetEntityDeclHandler(parser, ParserHandlerEntityDecl);
+  XML_Status res =
+      XML_Parse(parser,
+                reinterpret_cast<const char*>(response_buffer_.data()),
+                response_buffer_.size(),
+                XML_TRUE);
+
+  if (res != XML_STATUS_OK || parser_data.failed) {
+    LOG(ERROR) << "Omaha response not valid XML: "
+               << XML_ErrorString(XML_GetErrorCode(parser)) << " at line "
+               << XML_GetCurrentLineNumber(parser) << " col "
+               << XML_GetCurrentColumnNumber(parser);
+    XML_ParserFree(parser);
+    ErrorCode error_code = ErrorCode::kOmahaRequestXMLParseError;
+    if (response_buffer_.empty()) {
+      error_code = ErrorCode::kOmahaRequestEmptyResponseError;
+    } else if (parser_data.entity_decl) {
+      error_code = ErrorCode::kOmahaRequestXMLHasEntityDecl;
+    }
+    completer.set_code(error_code);
+    return;
+  }
+  XML_ParserFree(parser);
+
+  // Update the last ping day preferences based on the server daystart response
+  // even if we didn't send a ping. Omaha always includes the daystart in the
+  // response, but log the error if it didn't.
+  LOG_IF(ERROR, !UpdateLastPingDays(&parser_data))
+      << "Failed to update the last ping day preferences!";
+
+  // Sets first_active_omaha_ping_sent to true (vpd in CrOS). We only do this if
+  // we have got a response from omaha and if its value has never been set to
+  // true before. Failure of this function should be ignored. There should be no
+  // need to check if a=-1 has been sent because older devices have already sent
+  // their a=-1 in the past and we have to set first_active_omaha_ping_sent for
+  // future checks.
+  if (!SystemState::Get()->hardware()->GetFirstActiveOmahaPingSent()) {
+    if (!SystemState::Get()->hardware()->SetFirstActiveOmahaPingSent()) {
+      SystemState::Get()->metrics_reporter()->ReportInternalErrorCode(
+          ErrorCode::kFirstActiveOmahaPingSentPersistenceError);
+    }
+  }
+
+  // Create/update the metadata files for each DLC app received.
+  StorePingReply(parser_data);
+
+  if (!HasOutputPipe()) {
+    // Just set success to whether or not the http transfer succeeded,
+    // which must be true at this point in the code.
+    completer.set_code(ErrorCode::kSuccess);
+    return;
+  }
+
+  OmahaResponse output_object;
+  if (!ParseResponse(&parser_data, &output_object, &completer))
+    return;
+  ProcessExclusions(&output_object,
+                    SystemState::Get()->request_params(),
+                    SystemState::Get()->update_attempter()->GetExcluder());
+  output_object.update_exists = true;
+  SetOutputObject(output_object);
+
+  LoadOrPersistUpdateFirstSeenAtPref();
+
+  ErrorCode error = ErrorCode::kSuccess;
+  if (ShouldIgnoreUpdate(output_object, &error)) {
+    // No need to change output_object.update_exists here, since the value
+    // has been output to the pipe.
+    completer.set_code(error);
+    return;
+  }
+
+  // If Omaha says to disable p2p, respect that
+  if (output_object.disable_p2p_for_downloading) {
+    LOG(INFO) << "Forcibly disabling use of p2p for downloading as "
+              << "requested by Omaha.";
+    payload_state->SetUsingP2PForDownloading(false);
+  }
+  if (output_object.disable_p2p_for_sharing) {
+    LOG(INFO) << "Forcibly disabling use of p2p for sharing as "
+              << "requested by Omaha.";
+    payload_state->SetUsingP2PForSharing(false);
+  }
+
+  // Update the payload state with the current response. The payload state
+  // will automatically reset all stale state if this response is different
+  // from what's stored already. We are updating the payload state as late
+  // as possible in this method so that if a new release gets pushed and then
+  // got pulled back due to some issues, we don't want to clear our internal
+  // state unnecessarily.
+  payload_state->SetResponse(output_object);
+
+  // It could be we've already exceeded the deadline for when p2p is
+  // allowed or that we've tried too many times with p2p. Check that.
+  if (payload_state->GetUsingP2PForDownloading()) {
+    payload_state->P2PNewAttempt();
+    if (!payload_state->P2PAttemptAllowed()) {
+      LOG(INFO) << "Forcibly disabling use of p2p for downloading because "
+                << "of previous failures when using p2p.";
+      payload_state->SetUsingP2PForDownloading(false);
+    }
+  }
+
+  // From here on, we'll complete stuff in CompleteProcessing() so
+  // disable |completer| since we'll create a new one in that
+  // function.
+  completer.set_should_complete(false);
+
+  // If we're allowed to use p2p for downloading we do not pay
+  // attention to wall-clock-based waiting if the URL is indeed
+  // available via p2p. Therefore, check if the file is available via
+  // p2p before deferring...
+  if (payload_state->GetUsingP2PForDownloading()) {
+    LookupPayloadViaP2P(output_object);
+  } else {
+    CompleteProcessing();
+  }
+}
+
+void OmahaRequestAction::CompleteProcessing() {
+  ScopedActionCompleter completer(processor_, this);
+  OmahaResponse& output_object = const_cast<OmahaResponse&>(GetOutputObject());
+  PayloadStateInterface* payload_state = SystemState::Get()->payload_state();
+
+  if (ShouldDeferDownload(&output_object)) {
+    output_object.update_exists = false;
+    LOG(INFO) << "Ignoring Omaha updates as updates are deferred by policy.";
+    completer.set_code(ErrorCode::kOmahaUpdateDeferredPerPolicy);
+    return;
+  }
+
+  if (payload_state->ShouldBackoffDownload()) {
+    output_object.update_exists = false;
+    LOG(INFO) << "Ignoring Omaha updates in order to backoff our retry "
+              << "attempts.";
+    completer.set_code(ErrorCode::kOmahaUpdateDeferredForBackoff);
+    return;
+  }
+  completer.set_code(ErrorCode::kSuccess);
+}
+
+void OmahaRequestAction::OnLookupPayloadViaP2PCompleted(const string& url) {
+  LOG(INFO) << "Lookup complete, p2p-client returned URL '" << url << "'";
+  if (!url.empty()) {
+    SystemState::Get()->payload_state()->SetP2PUrl(url);
+  } else {
+    LOG(INFO) << "Forcibly disabling use of p2p for downloading "
+              << "because no suitable peer could be found.";
+    SystemState::Get()->payload_state()->SetUsingP2PForDownloading(false);
+  }
+  CompleteProcessing();
+}
+
+void OmahaRequestAction::LookupPayloadViaP2P(const OmahaResponse& response) {
+  // If the device is in the middle of an update, the state variables
+  // kPrefsUpdateStateNextDataOffset, kPrefsUpdateStateNextDataLength
+  // tracks the offset and length of the operation currently in
+  // progress. The offset is based from the end of the manifest which
+  // is kPrefsManifestMetadataSize bytes long.
+  //
+  // To make forward progress and avoid deadlocks, we need to find a
+  // peer that has at least the entire operation we're currently
+  // working on. Otherwise we may end up in a situation where two
+  // devices bounce back and forth downloading from each other,
+  // neither making any forward progress until one of them decides to
+  // stop using p2p (via kMaxP2PAttempts and kMaxP2PAttemptTimeSeconds
+  // safe-guards). See http://crbug.com/297170 for an example)
+  size_t minimum_size = 0;
+  int64_t manifest_metadata_size = 0;
+  int64_t manifest_signature_size = 0;
+  int64_t next_data_offset = 0;
+  int64_t next_data_length = 0;
+  if (SystemState::Get()->prefs()->GetInt64(kPrefsManifestMetadataSize,
+                                            &manifest_metadata_size) &&
+      manifest_metadata_size != -1 &&
+      SystemState::Get()->prefs()->GetInt64(kPrefsManifestSignatureSize,
+                                            &manifest_signature_size) &&
+      manifest_signature_size != -1 &&
+      SystemState::Get()->prefs()->GetInt64(kPrefsUpdateStateNextDataOffset,
+                                            &next_data_offset) &&
+      next_data_offset != -1 &&
+      SystemState::Get()->prefs()->GetInt64(kPrefsUpdateStateNextDataLength,
+                                            &next_data_length)) {
+    minimum_size = manifest_metadata_size + manifest_signature_size +
+                   next_data_offset + next_data_length;
+  }
+
+  // TODO(senj): Fix P2P for multiple package.
+  brillo::Blob raw_hash;
+  if (!base::HexStringToBytes(response.packages[0].hash, &raw_hash))
+    return;
+  string file_id =
+      utils::CalculateP2PFileId(raw_hash, response.packages[0].size);
+  if (SystemState::Get()->p2p_manager()) {
+    LOG(INFO) << "Checking if payload is available via p2p, file_id=" << file_id
+              << " minimum_size=" << minimum_size;
+    SystemState::Get()->p2p_manager()->LookupUrlForFile(
+        file_id,
+        minimum_size,
+        TimeDelta::FromSeconds(kMaxP2PNetworkWaitTimeSeconds),
+        base::Bind(&OmahaRequestAction::OnLookupPayloadViaP2PCompleted,
+                   base::Unretained(this)));
+  }
+}
+
+bool OmahaRequestAction::ShouldDeferDownload(OmahaResponse* output_object) {
+  const auto* params = SystemState::Get()->request_params();
+  if (params->interactive()) {
+    LOG(INFO) << "Not deferring download because update is interactive.";
+    return false;
+  }
+
+  // If we're using p2p to download _and_ we have a p2p URL, we never
+  // defer the download. This is because the download will always
+  // happen from a peer on the LAN and we've been waiting in line for
+  // our turn.
+  const PayloadStateInterface* payload_state =
+      SystemState::Get()->payload_state();
+  if (payload_state->GetUsingP2PForDownloading() &&
+      !payload_state->GetP2PUrl().empty()) {
+    LOG(INFO) << "Download not deferred because download "
+              << "will happen from a local peer (via p2p).";
+    return false;
+  }
+
+  // We should defer the downloads only if we've first satisfied the
+  // wall-clock-based-waiting period and then the update-check-based waiting
+  // period, if required.
+  if (!params->wall_clock_based_wait_enabled()) {
+    LOG(INFO) << "Wall-clock-based waiting period is not enabled,"
+              << " so no deferring needed.";
+    return false;
+  }
+
+  switch (IsWallClockBasedWaitingSatisfied(output_object)) {
+    case kWallClockWaitNotSatisfied:
+      // We haven't even satisfied the first condition, passing the
+      // wall-clock-based waiting period, so we should defer the downloads
+      // until that happens.
+      LOG(INFO) << "wall-clock-based-wait not satisfied.";
+      return true;
+
+    case kWallClockWaitDoneButUpdateCheckWaitRequired:
+      LOG(INFO) << "wall-clock-based-wait satisfied and "
+                << "update-check-based-wait required.";
+      return !IsUpdateCheckCountBasedWaitingSatisfied();
+
+    case kWallClockWaitDoneAndUpdateCheckWaitNotRequired:
+      // Wall-clock-based waiting period is satisfied, and it's determined
+      // that we do not need the update-check-based wait. so no need to
+      // defer downloads.
+      LOG(INFO) << "wall-clock-based-wait satisfied and "
+                << "update-check-based-wait is not required.";
+      return false;
+
+    default:
+      // Returning false for this default case so we err on the
+      // side of downloading updates than deferring in case of any bugs.
+      NOTREACHED();
+      return false;
+  }
+}
+
+OmahaRequestAction::WallClockWaitResult
+OmahaRequestAction::IsWallClockBasedWaitingSatisfied(
+    OmahaResponse* output_object) {
+  Time update_first_seen_at = LoadOrPersistUpdateFirstSeenAtPref();
+  if (update_first_seen_at == base::Time()) {
+    LOG(INFO) << "Not scattering as UpdateFirstSeenAt value cannot be read or "
+                 "persisted.";
+    return kWallClockWaitDoneAndUpdateCheckWaitNotRequired;
+  }
+
+  TimeDelta elapsed_time =
+      SystemState::Get()->clock()->GetWallclockTime() - update_first_seen_at;
+  TimeDelta max_scatter_period =
+      TimeDelta::FromDays(output_object->max_days_to_scatter);
+  int64_t staging_wait_time_in_days = 0;
+  // Use staging and its default max value if staging is on.
+  if (SystemState::Get()->prefs()->GetInt64(kPrefsWallClockStagingWaitPeriod,
+                                            &staging_wait_time_in_days) &&
+      staging_wait_time_in_days > 0)
+    max_scatter_period = TimeDelta::FromDays(kMaxWaitTimeStagingInDays);
+
+  const auto* params = SystemState::Get()->request_params();
+  LOG(INFO) << "Waiting Period = "
+            << utils::FormatSecs(params->waiting_period().InSeconds())
+            << ", Time Elapsed = "
+            << utils::FormatSecs(elapsed_time.InSeconds())
+            << ", MaxDaysToScatter = " << max_scatter_period.InDays();
+
+  if (!output_object->deadline.empty()) {
+    // The deadline is set for all rules which serve a delta update from a
+    // previous FSI, which means this update will be applied mostly in OOBE
+    // cases. For these cases, we shouldn't scatter so as to finish the OOBE
+    // quickly.
+    LOG(INFO) << "Not scattering as deadline flag is set.";
+    return kWallClockWaitDoneAndUpdateCheckWaitNotRequired;
+  }
+
+  if (max_scatter_period.InDays() == 0) {
+    // This means the Omaha rule creator decides that this rule
+    // should not be scattered irrespective of the policy.
+    LOG(INFO) << "Not scattering as MaxDaysToScatter in rule is 0.";
+    return kWallClockWaitDoneAndUpdateCheckWaitNotRequired;
+  }
+
+  if (elapsed_time > max_scatter_period) {
+    // This means we've waited more than the upperbound wait in the rule
+    // from the time we first saw a valid update available to us.
+    // This will prevent update starvation.
+    LOG(INFO) << "Not scattering as we're past the MaxDaysToScatter limit.";
+    return kWallClockWaitDoneAndUpdateCheckWaitNotRequired;
+  }
+
+  // This means we are required to participate in scattering.
+  // See if our turn has arrived now.
+  TimeDelta remaining_wait_time = params->waiting_period() - elapsed_time;
+  if (remaining_wait_time.InSeconds() <= 0) {
+    // Yes, it's our turn now.
+    LOG(INFO) << "Successfully passed the wall-clock-based-wait.";
+
+    // But we can't download until the update-check-count-based wait is also
+    // satisfied, so mark it as required now if update checks are enabled.
+    return params->update_check_count_wait_enabled()
+               ? kWallClockWaitDoneButUpdateCheckWaitRequired
+               : kWallClockWaitDoneAndUpdateCheckWaitNotRequired;
+  }
+
+  // Not our turn yet, so we have to wait until our turn to
+  // help scatter the downloads across all clients of the enterprise.
+  LOG(INFO) << "Update deferred for another "
+            << utils::FormatSecs(remaining_wait_time.InSeconds())
+            << " per policy.";
+  return kWallClockWaitNotSatisfied;
+}
+
+bool OmahaRequestAction::IsUpdateCheckCountBasedWaitingSatisfied() {
+  int64_t update_check_count_value;
+  const auto* params = SystemState::Get()->request_params();
+
+  if (SystemState::Get()->prefs()->Exists(kPrefsUpdateCheckCount)) {
+    if (!SystemState::Get()->prefs()->GetInt64(kPrefsUpdateCheckCount,
+                                               &update_check_count_value)) {
+      // We are unable to read the update check count from file for some reason.
+      // So let's proceed anyway so as to not stall the update.
+      LOG(ERROR) << "Unable to read update check count. "
+                 << "Skipping update-check-count-based-wait.";
+      return true;
+    }
+  } else {
+    // This file does not exist. This means we haven't started our update
+    // check count down yet, so this is the right time to start the count down.
+    update_check_count_value =
+        base::RandInt(params->min_update_checks_needed(),
+                      params->max_update_checks_allowed());
+
+    LOG(INFO) << "Randomly picked update check count value = "
+              << update_check_count_value;
+
+    // Write out the initial value of update_check_count_value.
+    if (!SystemState::Get()->prefs()->SetInt64(kPrefsUpdateCheckCount,
+                                               update_check_count_value)) {
+      // We weren't able to write the update check count file for some reason.
+      // So let's proceed anyway so as to not stall the update.
+      LOG(ERROR) << "Unable to write update check count. "
+                 << "Skipping update-check-count-based-wait.";
+      return true;
+    }
+  }
+
+  if (update_check_count_value == 0) {
+    LOG(INFO) << "Successfully passed the update-check-based-wait.";
+    return true;
+  }
+
+  if (update_check_count_value < 0 ||
+      update_check_count_value > params->max_update_checks_allowed()) {
+    // We err on the side of skipping scattering logic instead of stalling
+    // a machine from receiving any updates in case of any unexpected state.
+    LOG(ERROR) << "Invalid value for update check count detected. "
+               << "Skipping update-check-count-based-wait.";
+    return true;
+  }
+
+  // Legal value, we need to wait for more update checks to happen
+  // until this becomes 0.
+  LOG(INFO) << "Deferring Omaha updates for another "
+            << update_check_count_value << " update checks per policy";
+  return false;
+}
+
+// static
+bool OmahaRequestAction::ParseInstallDate(OmahaParserData* parser_data,
+                                          OmahaResponse* output_object) {
+  int64_t elapsed_days = 0;
+  if (!base::StringToInt64(parser_data->daystart.elapsed_days, &elapsed_days))
+    return false;
+
+  if (elapsed_days < 0)
+    return false;
+
+  output_object->install_date_days = elapsed_days;
+  return true;
+}
+
+// static
+bool OmahaRequestAction::HasInstallDate() {
+  return SystemState::Get()->prefs()->Exists(kPrefsInstallDateDays);
+}
+
+// static
+bool OmahaRequestAction::PersistInstallDate(
+    int install_date_days,
+    InstallDateProvisioningSource source) {
+  TEST_AND_RETURN_FALSE(install_date_days >= 0);
+
+  auto* prefs = SystemState::Get()->prefs();
+  if (!prefs->SetInt64(kPrefsInstallDateDays, install_date_days))
+    return false;
+
+  SystemState::Get()->metrics_reporter()->ReportInstallDateProvisioningSource(
+      static_cast<int>(source),  // Sample.
+      kProvisionedMax);          // Maximum.
+  return true;
+}
+
+void OmahaRequestAction::PersistCohortData(const string& prefs_key,
+                                           const Optional<string>& new_value) {
+  if (!new_value)
+    return;
+  const string& value = new_value.value();
+  if (value.empty() && SystemState::Get()->prefs()->Exists(prefs_key)) {
+    if (!SystemState::Get()->prefs()->Delete(prefs_key))
+      LOG(ERROR) << "Failed to remove stored " << prefs_key << "value.";
+    else
+      LOG(INFO) << "Removed stored " << prefs_key << " value.";
+  } else if (!value.empty()) {
+    if (!SystemState::Get()->prefs()->SetString(prefs_key, value))
+      LOG(INFO) << "Failed to store new setting " << prefs_key << " as "
+                << value;
+    else
+      LOG(INFO) << "Stored cohort setting " << prefs_key << " as " << value;
+  }
+}
+
+void OmahaRequestAction::PersistCohorts(const OmahaParserData& parser_data) {
+  const auto* params = SystemState::Get()->request_params();
+  for (const auto& app : parser_data.apps) {
+    // For platform App ID.
+    if (app.id == params->GetAppId()) {
+      PersistCohortData(kPrefsOmahaCohort, app.cohort);
+      PersistCohortData(kPrefsOmahaCohortName, app.cohortname);
+      PersistCohortData(kPrefsOmahaCohortHint, app.cohorthint);
+    } else if (params->IsDlcAppId(app.id)) {
+      string dlc_id;
+      if (!params->GetDlcId(app.id, &dlc_id)) {
+        LOG(WARNING) << "Skip persisting cohorts for DLC App ID=" << app.id
+                     << " as it is not in the request params.";
+        continue;
+      }
+      auto* prefs = SystemState::Get()->prefs();
+      PersistCohortData(
+          prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsOmahaCohort}),
+          app.cohort);
+      PersistCohortData(
+          prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsOmahaCohortName}),
+          app.cohortname);
+      PersistCohortData(
+          prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsOmahaCohortHint}),
+          app.cohorthint);
+    } else {
+      LOG(WARNING) << "Skip persisting cohorts for unknown App ID=" << app.id;
+    }
+  }
+}
+
+void OmahaRequestAction::ActionCompleted(ErrorCode code) {
+  // We only want to report this on "update check".
+  if (ping_only_ || event_ != nullptr)
+    return;
+
+  metrics::CheckResult result = metrics::CheckResult::kUnset;
+  metrics::CheckReaction reaction = metrics::CheckReaction::kUnset;
+  metrics::DownloadErrorCode download_error_code =
+      metrics::DownloadErrorCode::kUnset;
+
+  // Regular update attempt.
+  switch (code) {
+    case ErrorCode::kSuccess:
+      // OK, we parsed the response successfully but that does
+      // necessarily mean that an update is available.
+      if (HasOutputPipe()) {
+        const OmahaResponse& response = GetOutputObject();
+        if (response.update_exists) {
+          result = metrics::CheckResult::kUpdateAvailable;
+          reaction = metrics::CheckReaction::kUpdating;
+        } else {
+          result = metrics::CheckResult::kNoUpdateAvailable;
+        }
+      } else {
+        result = metrics::CheckResult::kNoUpdateAvailable;
+      }
+      break;
+
+    case ErrorCode::kOmahaUpdateIgnoredPerPolicy:
+    case ErrorCode::kOmahaUpdateIgnoredOverCellular:
+      result = metrics::CheckResult::kUpdateAvailable;
+      reaction = metrics::CheckReaction::kIgnored;
+      break;
+
+    case ErrorCode::kOmahaUpdateDeferredPerPolicy:
+      result = metrics::CheckResult::kUpdateAvailable;
+      reaction = metrics::CheckReaction::kDeferring;
+      break;
+
+    case ErrorCode::kOmahaUpdateDeferredForBackoff:
+      result = metrics::CheckResult::kUpdateAvailable;
+      reaction = metrics::CheckReaction::kBackingOff;
+      break;
+
+    default:
+      // We report two flavors of errors, "Download errors" and "Parsing
+      // error". Try to convert to the former and if that doesn't work
+      // we know it's the latter.
+      metrics::DownloadErrorCode tmp_error =
+          metrics_utils::GetDownloadErrorCode(code);
+      if (tmp_error != metrics::DownloadErrorCode::kInputMalformed) {
+        result = metrics::CheckResult::kDownloadError;
+        download_error_code = tmp_error;
+      } else {
+        result = metrics::CheckResult::kParsingError;
+      }
+      break;
+  }
+
+  SystemState::Get()->metrics_reporter()->ReportUpdateCheckMetrics(
+      result, reaction, download_error_code);
+}
+
+bool OmahaRequestAction::ShouldIgnoreUpdate(const OmahaResponse& response,
+                                            ErrorCode* error) const {
+  // Note: policy decision to not update to a version we rolled back from.
+  string rollback_version =
+      SystemState::Get()->payload_state()->GetRollbackVersion();
+  const auto* params = SystemState::Get()->request_params();
+  if (!rollback_version.empty()) {
+    LOG(INFO) << "Detected previous rollback from version " << rollback_version;
+    if (rollback_version == response.version) {
+      LOG(INFO) << "Received version that we rolled back from. Ignoring.";
+      *error = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
+      return true;
+    }
+  }
+
+  if (SystemState::Get()->hardware()->IsOOBEEnabled() &&
+      !SystemState::Get()->hardware()->IsOOBEComplete(nullptr) &&
+      (response.deadline.empty() ||
+       SystemState::Get()->payload_state()->GetRollbackHappened()) &&
+      params->app_version() != "ForcedUpdate") {
+    LOG(INFO) << "Ignoring a non-critical Omaha update before OOBE completion.";
+    *error = ErrorCode::kNonCriticalUpdateInOOBE;
+    return true;
+  }
+
+  if (!IsUpdateAllowedOverCurrentConnection(error, response)) {
+    LOG(INFO) << "Update is not allowed over current connection.";
+    return true;
+  }
+
+  // Currently non-critical updates always update alongside the platform update
+  // (a critical update) so this case should never actually be hit if the
+  // request to Omaha for updates are correct. In other words, stop the update
+  // from happening as there are no packages in the response to process.
+  if (response.packages.empty()) {
+    LOG(ERROR) << "All packages were excluded.";
+  }
+
+  // Note: We could technically delete the UpdateFirstSeenAt state when we
+  // return true. If we do, it'll mean a device has to restart the
+  // UpdateFirstSeenAt and thus help scattering take effect when the AU is
+  // turned on again. On the other hand, it also increases the chance of update
+  // starvation if an admin turns AU on/off more frequently. We choose to err on
+  // the side of preventing starvation at the cost of not applying scattering in
+  // those cases.
+  return false;
+}
+
+bool OmahaRequestAction::IsUpdateAllowedOverCellularByPrefs(
+    const OmahaResponse& response) const {
+  auto* prefs = SystemState::Get()->prefs();
+  bool is_allowed;
+  if (prefs->Exists(kPrefsUpdateOverCellularPermission) &&
+      prefs->GetBoolean(kPrefsUpdateOverCellularPermission, &is_allowed) &&
+      is_allowed) {
+    LOG(INFO) << "Allowing updates over cellular as permission preference is "
+                 "set to true.";
+    return true;
+  }
+
+  if (!prefs->Exists(kPrefsUpdateOverCellularTargetVersion) ||
+      !prefs->Exists(kPrefsUpdateOverCellularTargetSize)) {
+    LOG(INFO) << "Disabling updates over cellular as permission preference is "
+                 "set to false or does not exist while target does not exist.";
+    return false;
+  }
+
+  std::string target_version;
+  int64_t target_size;
+
+  if (!prefs->GetString(kPrefsUpdateOverCellularTargetVersion,
+                        &target_version) ||
+      !prefs->GetInt64(kPrefsUpdateOverCellularTargetSize, &target_size)) {
+    LOG(INFO) << "Disabling updates over cellular as the target version or "
+                 "size is not accessible.";
+    return false;
+  }
+
+  uint64_t total_packages_size = 0;
+  for (const auto& package : response.packages) {
+    total_packages_size += package.size;
+  }
+  if (target_version == response.version &&
+      static_cast<uint64_t>(target_size) == total_packages_size) {
+    LOG(INFO) << "Allowing updates over cellular as the target matches the"
+                 "omaha response.";
+    return true;
+  } else {
+    LOG(INFO) << "Disabling updates over cellular as the target does not"
+                 "match the omaha response.";
+    return false;
+  }
+}
+
+bool OmahaRequestAction::IsUpdateAllowedOverCurrentConnection(
+    ErrorCode* error, const OmahaResponse& response) const {
+  ConnectionType type;
+  ConnectionTethering tethering;
+  ConnectionManagerInterface* connection_manager =
+      SystemState::Get()->connection_manager();
+  if (!connection_manager->GetConnectionProperties(&type, &tethering)) {
+    LOG(INFO) << "We could not determine our connection type. "
+              << "Defaulting to allow updates.";
+    return true;
+  }
+
+  bool is_allowed = connection_manager->IsUpdateAllowedOver(type, tethering);
+  bool is_device_policy_set =
+      connection_manager->IsAllowedConnectionTypesForUpdateSet();
+  // Treats tethered connection as if it is cellular connection.
+  bool is_over_cellular = type == ConnectionType::kCellular ||
+                          tethering == ConnectionTethering::kConfirmed;
+
+  if (!is_over_cellular) {
+    // There's no need to further check user preferences as we are not over
+    // cellular connection.
+    if (!is_allowed)
+      *error = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
+  } else if (is_device_policy_set) {
+    // There's no need to further check user preferences as the device policy
+    // is set regarding updates over cellular.
+    if (!is_allowed)
+      *error = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
+  } else {
+    // Deivce policy is not set, so user preferences overwrite whether to
+    // allow updates over cellular.
+    is_allowed = IsUpdateAllowedOverCellularByPrefs(response);
+    if (!is_allowed)
+      *error = ErrorCode::kOmahaUpdateIgnoredOverCellular;
+  }
+
+  LOG(INFO) << "We are connected via "
+            << connection_utils::StringForConnectionType(type)
+            << ", Updates allowed: " << (is_allowed ? "Yes" : "No");
+  return is_allowed;
+}
+
+bool OmahaRequestAction::IsRollbackEnabled() const {
+  if (policy_provider_->IsConsumerDevice()) {
+    LOG(INFO) << "Rollback is not enabled for consumer devices.";
+    return false;
+  }
+
+  if (!policy_provider_->device_policy_is_loaded()) {
+    LOG(INFO) << "No device policy is loaded. Assuming rollback enabled.";
+    return true;
+  }
+
+  int allowed_milestones;
+  if (!policy_provider_->GetDevicePolicy().GetRollbackAllowedMilestones(
+          &allowed_milestones)) {
+    LOG(INFO) << "RollbackAllowedMilestones policy can't be read. "
+                 "Defaulting to rollback enabled.";
+    return true;
+  }
+
+  LOG(INFO) << "Rollback allows " << allowed_milestones << " milestones.";
+  return allowed_milestones > 0;
+}
+
+void OmahaRequestAction::SetMaxKernelKeyVersionForRollback() const {
+  int max_kernel_rollforward;
+  int min_kernel_version =
+      SystemState::Get()->hardware()->GetMinKernelKeyVersion();
+  if (IsRollbackEnabled()) {
+    // If rollback is enabled, set the max kernel key version to the current
+    // kernel key version. This has the effect of freezing kernel key roll
+    // forwards.
+    //
+    // TODO(zentaro): This behavior is temporary, and ensures that no kernel
+    // key roll forward happens until the server side components of rollback
+    // are implemented. Future changes will allow the Omaha server to return
+    // the kernel key version from max_rollback_versions in the past. At that
+    // point the max kernel key version will be set to that value, creating a
+    // sliding window of versions that can be rolled back to.
+    LOG(INFO) << "Rollback is enabled. Setting kernel_max_rollforward to "
+              << min_kernel_version;
+    max_kernel_rollforward = min_kernel_version;
+  } else {
+    // For devices that are not rollback enabled (ie. consumer devices), the
+    // max kernel key version is set to 0xfffffffe, which is logically
+    // infinity. This maintains the previous behavior that that kernel key
+    // versions roll forward each time they are incremented.
+    LOG(INFO) << "Rollback is disabled. Setting kernel_max_rollforward to "
+              << kRollforwardInfinity;
+    max_kernel_rollforward = kRollforwardInfinity;
+  }
+
+  bool max_rollforward_set =
+      SystemState::Get()->hardware()->SetMaxKernelKeyRollforward(
+          max_kernel_rollforward);
+  if (!max_rollforward_set) {
+    LOG(ERROR) << "Failed to set kernel_max_rollforward";
+  }
+  // Report metrics
+  SystemState::Get()->metrics_reporter()->ReportKeyVersionMetrics(
+      min_kernel_version, max_kernel_rollforward, max_rollforward_set);
+}
+
+base::Time OmahaRequestAction::LoadOrPersistUpdateFirstSeenAtPref() const {
+  Time update_first_seen_at;
+  int64_t update_first_seen_at_int;
+  if (SystemState::Get()->prefs()->Exists(kPrefsUpdateFirstSeenAt)) {
+    if (SystemState::Get()->prefs()->GetInt64(kPrefsUpdateFirstSeenAt,
+                                              &update_first_seen_at_int)) {
+      // Note: This timestamp could be that of ANY update we saw in the past
+      // (not necessarily this particular update we're considering to apply)
+      // but never got to apply because of some reason (e.g. stop AU policy,
+      // updates being pulled out from Omaha, changes in target version prefix,
+      // new update being rolled out, etc.). But for the purposes of scattering
+      // it doesn't matter which update the timestamp corresponds to. i.e.
+      // the clock starts ticking the first time we see an update and we're
+      // ready to apply when the random wait period is satisfied relative to
+      // that first seen timestamp.
+      update_first_seen_at = Time::FromInternalValue(update_first_seen_at_int);
+      LOG(INFO) << "Using persisted value of UpdateFirstSeenAt: "
+                << utils::ToString(update_first_seen_at);
+    } else {
+      // This seems like an unexpected error where the persisted value exists
+      // but it's not readable for some reason.
+      LOG(INFO) << "UpdateFirstSeenAt value cannot be read";
+      return base::Time();
+    }
+  } else {
+    update_first_seen_at = SystemState::Get()->clock()->GetWallclockTime();
+    update_first_seen_at_int = update_first_seen_at.ToInternalValue();
+    if (SystemState::Get()->prefs()->SetInt64(kPrefsUpdateFirstSeenAt,
+                                              update_first_seen_at_int)) {
+      LOG(INFO) << "Persisted the new value for UpdateFirstSeenAt: "
+                << utils::ToString(update_first_seen_at);
+    } else {
+      // This seems like an unexpected error where the value cannot be
+      // persisted for some reason.
+      LOG(INFO) << "UpdateFirstSeenAt value "
+                << utils::ToString(update_first_seen_at)
+                << " cannot be persisted";
+      return base::Time();
+    }
+  }
+  return update_first_seen_at;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/omaha_request_action.h b/cros/omaha_request_action.h
similarity index 74%
rename from omaha_request_action.h
rename to cros/omaha_request_action.h
index 8db5fb9..4926c7d 100644
--- a/omaha_request_action.h
+++ b/cros/omaha_request_action.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_OMAHA_REQUEST_ACTION_H_
-#define UPDATE_ENGINE_OMAHA_REQUEST_ACTION_H_
+#ifndef UPDATE_ENGINE_CROS_OMAHA_REQUEST_ACTION_H_
+#define UPDATE_ENGINE_CROS_OMAHA_REQUEST_ACTION_H_
 
 #include <fcntl.h>
 #include <sys/stat.h>
@@ -28,13 +28,14 @@
 
 #include <gtest/gtest_prod.h>  // for FRIEND_TEST
 
+#include <base/optional.h>
 #include <brillo/secure_blob.h>
 #include <curl/curl.h>
 
 #include "update_engine/common/action.h"
 #include "update_engine/common/http_fetcher.h"
-#include "update_engine/omaha_response.h"
-#include "update_engine/system_state.h"
+#include "update_engine/cros/omaha_request_builder_xml.h"
+#include "update_engine/cros/omaha_response.h"
 
 // The Omaha Request action makes a request to Omaha and can output
 // the response on the output ActionPipe.
@@ -45,60 +46,9 @@
 
 namespace chromeos_update_engine {
 
-// Encodes XML entities in a given string. Input must be ASCII-7 valid. If
-// the input is invalid, the default value is used instead.
-std::string XmlEncodeWithDefault(const std::string& input,
-                                 const std::string& default_value);
-
-// Escapes text so it can be included as character data and attribute
-// values. The |input| string must be valid ASCII-7, no UTF-8 supported.
-// Returns whether the |input| was valid and escaped properly in |output|.
-bool XmlEncode(const std::string& input, std::string* output);
-
-// This struct encapsulates the Omaha event information. For a
-// complete list of defined event types and results, see
-// http://code.google.com/p/omaha/wiki/ServerProtocol#event
-struct OmahaEvent {
-  // The Type values correspond to EVENT_TYPE values of Omaha.
-  enum Type {
-    kTypeUnknown = 0,
-    kTypeDownloadComplete = 1,
-    kTypeInstallComplete = 2,
-    kTypeUpdateComplete = 3,
-    kTypeUpdateDownloadStarted = 13,
-    kTypeUpdateDownloadFinished = 14,
-    // Chromium OS reserved type sent after the first reboot following an update
-    // completed.
-    kTypeRebootedAfterUpdate = 54,
-  };
-
-  // The Result values correspond to EVENT_RESULT values of Omaha.
-  enum Result {
-    kResultError = 0,
-    kResultSuccess = 1,
-    kResultUpdateDeferred = 9,  // When we ignore/defer updates due to policy.
-  };
-
-  OmahaEvent()
-      : type(kTypeUnknown),
-        result(kResultError),
-        error_code(ErrorCode::kError) {}
-  explicit OmahaEvent(Type in_type)
-      : type(in_type),
-        result(kResultSuccess),
-        error_code(ErrorCode::kSuccess) {}
-  OmahaEvent(Type in_type, Result in_result, ErrorCode in_error_code)
-      : type(in_type), result(in_result), error_code(in_error_code) {}
-
-  Type type;
-  Result result;
-  ErrorCode error_code;
-};
-
 class NoneType;
 class OmahaRequestAction;
 class OmahaRequestParams;
-class PrefsInterface;
 
 // This struct is declared in the .cc file.
 struct OmahaParserData;
@@ -116,14 +66,13 @@
 class OmahaRequestAction : public Action<OmahaRequestAction>,
                            public HttpFetcherDelegate {
  public:
-  static const int kNeverPinged = -1;
   static const int kPingTimeJump = -2;
-  // We choose this value of 10 as a heuristic for a work day in trying
+  // We choose this value of 3 as a heuristic for a work day in trying
   // each URL, assuming we check roughly every 45 mins. This is a good time to
-  // wait - neither too long nor too little - so we don't give up the preferred
-  // URLs that appear earlier in list too quickly before moving on to the
-  // fallback ones.
-  static const int kDefaultMaxFailureCountPerUrl = 10;
+  // wait so we don't give up the preferred URLs, but allow using the URL that
+  // appears earlier in list for every payload before resorting to the fallback
+  // URLs in the candiate URL list.
+  static const int kDefaultMaxFailureCountPerUrl = 3;
 
   // If staging is enabled, set the maximum wait time to 28 days, since that is
   // the predetermined wait time for staging.
@@ -151,10 +100,10 @@
   // OmahaRequestAction(..., new OmahaEvent(...), new WhateverHttpFetcher);
   // or
   // OmahaRequestAction(..., nullptr, new WhateverHttpFetcher);
-  OmahaRequestAction(SystemState* system_state,
-                     OmahaEvent* event,
+  OmahaRequestAction(OmahaEvent* event,
                      std::unique_ptr<HttpFetcher> http_fetcher,
-                     bool ping_only);
+                     bool ping_only,
+                     const std::string& session_id);
   ~OmahaRequestAction() override;
   typedef ActionTraits<OmahaRequestAction>::InputObjectType InputObjectType;
   typedef ActionTraits<OmahaRequestAction>::OutputObjectType OutputObjectType;
@@ -188,6 +137,9 @@
               GetInstallDateWhenOOBECompletedWithValidDate);
   FRIEND_TEST(OmahaRequestActionTest,
               GetInstallDateWhenOOBECompletedDateChanges);
+  friend class UpdateAttempterTest;
+  FRIEND_TEST(UpdateAttempterTest, SessionIdTestEnforceEmptyStrPingOmaha);
+  FRIEND_TEST(UpdateAttempterTest, SessionIdTestConsistencyInUpdateFlow);
 
   // Enumeration used in PersistInstallDate().
   enum InstallDateProvisioningSource {
@@ -202,7 +154,7 @@
   // Gets the install date, expressed as the number of PST8PDT
   // calendar weeks since January 1st 2007, times seven. Returns -1 if
   // unknown. See http://crbug.com/336838 for details about this value.
-  static int GetInstallDate(SystemState* system_state);
+  static int GetInstallDate();
 
   // Parses the Omaha Response in |doc| and sets the
   // |install_date_days| field of |output_object| to the value of the
@@ -213,25 +165,25 @@
 
   // Returns True if the kPrefsInstallDateDays state variable is set,
   // False otherwise.
-  static bool HasInstallDate(SystemState* system_state);
+  static bool HasInstallDate();
 
   // Writes |install_date_days| into the kPrefsInstallDateDays state
   // variable and emits an UMA stat for the |source| used. Returns
   // True if the value was written, False if an error occurred.
-  static bool PersistInstallDate(SystemState* system_state,
-                                 int install_date_days,
+  static bool PersistInstallDate(int install_date_days,
                                  InstallDateProvisioningSource source);
 
-  // Persist the new cohort* value received in the XML file in the |prefs_key|
-  // preference file. If the |new_value| is empty, the currently stored value
-  // will be deleted. Don't call this function with an empty |new_value| if the
-  // value was not set in the XML, since that would delete the stored value.
-  bool PersistCohortData(const std::string& prefs_key,
-                         const std::string& new_value);
+  // Persist the new cohort value received in the XML file in the |prefs_key|
+  // preference file. If the |new_value| is empty, do nothing. If the
+  // |new_value| stores and empty value, the currently stored value will be
+  // deleted. Don't call this function with an empty |new_value| if the value
+  // was not set in the XML, since that would delete the stored value.
+  void PersistCohortData(const std::string& prefs_key,
+                         const base::Optional<std::string>& new_value);
 
-  // Parse and persist the end-of-life status flag sent back in the updatecheck
-  // tag attributes. The flag will be validated and stored in the Prefs.
-  bool PersistEolStatus(const std::map<std::string, std::string>& attrs);
+  // Parses and persists the cohorts sent back in the updatecheck tag
+  // attributes.
+  void PersistCohorts(const OmahaParserData& parser_data);
 
   // If this is an update check request, initializes
   // |ping_active_days_| and |ping_roll_call_days_| to values that may
@@ -246,6 +198,10 @@
   // send to Omaha and thus we should include them in the response.
   bool ShouldPing() const;
 
+  // Process Omaha's response to a ping request and store the results in the DLC
+  // metadata directory.
+  void StorePingReply(const OmahaParserData& parser_data) const;
+
   // Returns true if the download of a new update should be deferred.
   // False if the update can be downloaded.
   bool ShouldDeferDownload(OmahaResponse* output_object);
@@ -325,12 +281,6 @@
   // kPrefsUpdateFirstSeenAt pref and returns it as a base::Time object.
   base::Time LoadOrPersistUpdateFirstSeenAtPref() const;
 
-  // Global system context.
-  SystemState* system_state_;
-
-  // Contains state that is relevant in the processing of the Omaha request.
-  OmahaRequestParams* params_;
-
   // Pointer to the OmahaEvent info. This is an UpdateCheck request if null.
   std::unique_ptr<OmahaEvent> event_;
 
@@ -352,9 +302,11 @@
   int ping_active_days_;
   int ping_roll_call_days_;
 
+  std::string session_id_;
+
   DISALLOW_COPY_AND_ASSIGN(OmahaRequestAction);
 };
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_OMAHA_REQUEST_ACTION_H_
+#endif  // UPDATE_ENGINE_CROS_OMAHA_REQUEST_ACTION_H_
diff --git a/omaha_request_action_fuzzer.cc b/cros/omaha_request_action_fuzzer.cc
similarity index 88%
rename from omaha_request_action_fuzzer.cc
rename to cros/omaha_request_action_fuzzer.cc
index 6c2f7ca..995de8c 100644
--- a/omaha_request_action_fuzzer.cc
+++ b/cros/omaha_request_action_fuzzer.cc
@@ -18,8 +18,8 @@
 
 #include "update_engine/common/mock_http_fetcher.h"
 #include "update_engine/common/test_utils.h"
-#include "update_engine/fake_system_state.h"
-#include "update_engine/omaha_request_action.h"
+#include "update_engine/cros/fake_system_state.h"
+#include "update_engine/cros/omaha_request_action.h"
 
 class Environment {
  public:
@@ -31,14 +31,14 @@
   brillo::FakeMessageLoop loop(nullptr);
   loop.SetAsCurrent();
 
-  chromeos_update_engine::FakeSystemState fake_system_state;
+  chromeos_update_engine::FakeSystemState::CreateInstance();
   auto omaha_request_action =
       std::make_unique<chromeos_update_engine::OmahaRequestAction>(
-          &fake_system_state,
           nullptr,
           std::make_unique<chromeos_update_engine::MockHttpFetcher>(
               data, size, nullptr),
-          false);
+          false,
+          "" /* session_id */);
   auto collector_action =
       std::make_unique<chromeos_update_engine::ObjectCollectorAction<
           chromeos_update_engine::OmahaResponse>>();
diff --git a/cros/omaha_request_action_unittest.cc b/cros/omaha_request_action_unittest.cc
new file mode 100644
index 0000000..01be1a8
--- /dev/null
+++ b/cros/omaha_request_action_unittest.cc
@@ -0,0 +1,3189 @@
+//
+// Copyright (C) 2012 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/cros/omaha_request_action.h"
+
+#include <stdint.h>
+
+#include <limits>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <base/bind.h>
+#include <base/files/file_util.h>
+#include <base/files/scoped_temp_dir.h>
+#include <base/memory/ptr_util.h>
+#include <base/strings/string_number_conversions.h>
+#include <base/strings/string_util.h>
+#include <base/strings/stringprintf.h>
+#include <base/time/time.h>
+#include <brillo/message_loops/fake_message_loop.h>
+#include <brillo/message_loops/message_loop.h>
+#include <brillo/message_loops/message_loop_utils.h>
+#include <expat.h>
+#include <gtest/gtest.h>
+#include <policy/libpolicy.h>
+#include <policy/mock_libpolicy.h>
+
+#include "update_engine/common/action_pipe.h"
+#include "update_engine/common/constants.h"
+#include "update_engine/common/fake_prefs.h"
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/metrics_reporter_interface.h"
+#include "update_engine/common/mock_excluder.h"
+#include "update_engine/common/mock_http_fetcher.h"
+#include "update_engine/common/platform_constants.h"
+#include "update_engine/common/prefs.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/cros/fake_system_state.h"
+#include "update_engine/cros/mock_connection_manager.h"
+#include "update_engine/cros/mock_payload_state.h"
+#include "update_engine/cros/omaha_request_builder_xml.h"
+#include "update_engine/cros/omaha_request_params.h"
+#include "update_engine/cros/omaha_utils.h"
+#include "update_engine/update_manager/rollback_prefs.h"
+
+using base::Time;
+using base::TimeDelta;
+using chromeos_update_manager::kRollforwardInfinity;
+using std::pair;
+using std::string;
+using std::vector;
+using testing::_;
+using testing::AllOf;
+using testing::AnyNumber;
+using testing::DoAll;
+using testing::Ge;
+using testing::Le;
+using testing::NiceMock;
+using testing::Return;
+using testing::ReturnPointee;
+using testing::ReturnRef;
+using testing::SaveArg;
+using testing::SetArgPointee;
+using testing::StrictMock;
+
+namespace {
+
+static_assert(kRollforwardInfinity == 0xfffffffe,
+              "Don't change the value of kRollforward infinity unless its "
+              "size has been changed in firmware.");
+
+const char kCurrentVersion[] = "0.1.0.0";
+const char kTestAppId[] = "test-app-id";
+const char kTestAppId2[] = "test-app2-id";
+const char kTestAppIdSkipUpdatecheck[] = "test-app-id-skip-updatecheck";
+const char kDlcId1[] = "dlc-id-1";
+const char kDlcId2[] = "dlc-id-2";
+
+// This is a helper struct to allow unit tests build an update response with the
+// values they care about.
+struct FakeUpdateResponse {
+  string GetRollbackVersionAttributes() const {
+    string num_milestones;
+    num_milestones = base::NumberToString(rollback_allowed_milestones);
+    const string rollback_version =
+        " _firmware_version_" + num_milestones + "=\"" +
+        past_rollback_key_version.first + "\"" + " _kernel_version_" +
+        num_milestones + "=\"" + past_rollback_key_version.second + "\"";
+
+    return (rollback ? " _rollback=\"true\"" : "") + rollback_version +
+           (!rollback_firmware_version.empty()
+                ? " _firmware_version=\"" + rollback_firmware_version + "\""
+                : "") +
+           (!rollback_kernel_version.empty()
+                ? " _kernel_version=\"" + rollback_kernel_version + "\""
+                : "");
+  }
+
+  string GetNoUpdateResponse() const {
+    string entity_str;
+    if (include_entity)
+      entity_str = "<!DOCTYPE response [<!ENTITY CrOS \"ChromeOS\">]>";
+    return "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + entity_str +
+           "<response protocol=\"3.0\">"
+           "<daystart elapsed_seconds=\"100\"/>"
+           "<app appid=\"" +
+           app_id + "\" " +
+           (include_cohorts
+                ? "cohort=\"" + cohort + "\" cohorthint=\"" + cohorthint +
+                      "\" cohortname=\"" + cohortname + "\" "
+                : "") +
+           " status=\"ok\">"
+           "<ping status=\"ok\"/>"
+           "<updatecheck status=\"noupdate\"/></app>" +
+           (multi_app_no_update
+                ? "<app appid=\"" + app_id2 +
+                      "\"><updatecheck status=\"noupdate\"/></app>"
+                : "") +
+           "</response>";
+  }
+
+  string GetUpdateResponse() const {
+    chromeos_update_engine::OmahaRequestParams request_params;
+    request_params.set_app_id(app_id);
+    return "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+           "protocol=\"3.0\">"
+           "<daystart elapsed_seconds=\"100\"" +
+           (elapsed_days.empty() ? ""
+                                 : (" elapsed_days=\"" + elapsed_days + "\"")) +
+           "/>"
+           "<app appid=\"" +
+           app_id + "\" " +
+           (include_cohorts
+                ? "cohort=\"" + cohort + "\" cohorthint=\"" + cohorthint +
+                      "\" cohortname=\"" + cohortname + "\" "
+                : "") +
+           " status=\"ok\">"
+           "<ping status=\"ok\"/><updatecheck status=\"ok\"" +
+           GetRollbackVersionAttributes() + ">" + "<urls><url codebase=\"" +
+           codebase +
+           "\"/></urls>"
+           "<manifest version=\"" +
+           version +
+           "\">"
+           "<packages><package hash=\"not-used\" name=\"" +
+           filename + "\" size=\"" + base::NumberToString(size) + "\" fp=\"" +
+           fp + "\" hash_sha256=\"" + hash + "\"/>" +
+           (multi_package ? "<package name=\"package2\" size=\"222\" fp=\"" +
+                                fp2 + "\" hash_sha256=\"hash2\"/>"
+                          : "") +
+           "</packages>"
+           "<actions><action event=\"postinstall\" MetadataSize=\"11" +
+           (multi_package ? ":22" : "") + "\" MoreInfo=\"" + more_info_url +
+           "\" Prompt=\"" + prompt +
+           "\" "
+           "IsDeltaPayload=\"true" +
+           (multi_package ? ":false" : "") +
+           "\" "
+           "MaxDaysToScatter=\"" +
+           max_days_to_scatter +
+           "\" "
+           "sha256=\"not-used\" " +
+           (deadline.empty() ? "" : ("deadline=\"" + deadline + "\" ")) +
+           (disable_p2p_for_downloading ? "DisableP2PForDownloading=\"true\" "
+                                        : "") +
+           (disable_p2p_for_sharing ? "DisableP2PForSharing=\"true\" " : "") +
+           (powerwash ? "Powerwash=\"true\" " : "") +
+           "/></actions></manifest></updatecheck></app>" +
+           (multi_app
+                ? "<app appid=\"" + app_id2 + "\"" +
+                      (include_cohorts ? " cohort=\"cohort2\"" : "") +
+                      "><updatecheck status=\"ok\"><urls><url codebase=\"" +
+                      codebase2 + "\"/></urls><manifest version=\"" + version2 +
+                      "\"><packages>"
+                      "<package name=\"package3\" size=\"333\" fp=\"" +
+                      fp2 +
+                      "\" hash_sha256=\"hash3\"/></packages>"
+                      "<actions><action event=\"postinstall\" " +
+                      (multi_app_self_update
+                           ? "noupdate=\"true\" IsDeltaPayload=\"true\" "
+                           : "IsDeltaPayload=\"false\" ") +
+                      "MetadataSize=\"33\"/></actions>"
+                      "</manifest></updatecheck></app>"
+                : "") +
+           (multi_app_no_update
+                ? "<app><updatecheck status=\"noupdate\"/></app>"
+                : "") +
+           (multi_app_skip_updatecheck
+                ? "<app appid=\"" + app_id_skip_updatecheck + "\"></app>"
+                : "") +
+           (dlc_app_update
+                ? "<app appid=\"" + request_params.GetDlcAppId(kDlcId1) +
+                      "\" " +
+                      (include_dlc_cohorts
+                           ? "cohort=\"" + dlc_cohort + "\" cohorthint=\"" +
+                                 dlc_cohorthint + "\" cohortname=\"" +
+                                 dlc_cohortname + "\" "
+                           : "") +
+                      "status=\"ok\">"
+                      "<updatecheck status=\"ok\"><urls><url codebase=\"" +
+                      codebase + "\"/><url codebase=\"" + codebase2 +
+                      "\"/></urls><manifest version=\"" + version +
+                      "\"><packages><package name=\"package3\" size=\"333\" "
+                      "fp=\"" +
+                      fp2 +
+                      "\" hash_sha256=\"hash3\"/></packages>"
+                      "<actions><action event=\"install\" run=\".signed\"/>"
+                      "<action event=\"postinstall\" MetadataSize=\"33\"/>"
+                      "</actions></manifest></updatecheck></app>"
+                : "") +
+           (dlc_app_no_update
+                ? "<app appid=\"" + request_params.GetDlcAppId(kDlcId2) +
+                      +"\" " +
+                      (include_dlc_cohorts
+                           ? "cohort=\"" + dlc_cohort + "\" cohorthint=\"" +
+                                 dlc_cohorthint + "\" cohortname=\"" +
+                                 dlc_cohortname + "\" "
+                           : "") +
+                      "><updatecheck status=\"noupdate\"/></app>"
+                : "") +
+           "</response>";
+  }
+
+  // Return the payload URL, which is split in two fields in the XML response.
+  string GetPayloadUrl() { return codebase + filename; }
+
+  string app_id = kTestAppId;
+  string app_id2 = kTestAppId2;
+  string app_id_skip_updatecheck = kTestAppIdSkipUpdatecheck;
+  string version = "1.2.3.4";
+  string version2 = "2.3.4.5";
+  string more_info_url = "http://more/info";
+  string prompt = "true";
+  string codebase = "http://code/base/";
+  string codebase2 = "http://code/base/2/";
+  string filename = "file.signed";
+  string hash = "4841534831323334";
+  string fp = "3.98ba213e";
+  string fp2 = "3.755aff78e";
+  uint64_t size = 123;
+  string deadline = "";
+  string max_days_to_scatter = "7";
+  string elapsed_days = "42";
+
+  // P2P setting defaults to allowed.
+  bool disable_p2p_for_downloading = false;
+  bool disable_p2p_for_sharing = false;
+
+  bool powerwash = false;
+
+  // Omaha cohorts settings.
+  bool include_cohorts = false;
+  string cohort = "";
+  string cohorthint = "";
+  string cohortname = "";
+  // Whether to include Omaha cohorts for DLC apps.
+  bool include_dlc_cohorts = false;
+  string dlc_cohort = "";
+  string dlc_cohorthint = "";
+  string dlc_cohortname = "";
+
+  // Whether to include the CrOS <!ENTITY> in the XML response.
+  bool include_entity = false;
+
+  // Whether to include more than one app.
+  bool multi_app = false;
+  // Whether to include an app with noupdate="true".
+  bool multi_app_self_update = false;
+  // Whether to include an additional app with status="noupdate".
+  bool multi_app_no_update = false;
+  // Whether to include an additional app with no updatecheck tag.
+  bool multi_app_skip_updatecheck = false;
+  // Whether to include more than one package in an app.
+  bool multi_package = false;
+  // Whether to include a DLC app with updatecheck tag.
+  bool dlc_app_update = false;
+  // Whether to include a DLC app with no updatecheck tag.
+  bool dlc_app_no_update = false;
+
+  // Whether the payload is a rollback.
+  bool rollback = false;
+  // The verified boot firmware key version for the rollback image.
+  string rollback_firmware_version = "";
+  // The verified boot kernel key version for the rollback image.
+  string rollback_kernel_version = "";
+  // The number of milestones back that the verified boot key version has been
+  // supplied.
+  uint32_t rollback_allowed_milestones = 0;
+  // The verified boot key version for the
+  // |current - rollback_allowed_milestones| most recent release.
+  // The pair contains <firmware_key_version, kernel_key_version> each
+  // of which is in the form "key_version.version".
+  pair<string, string> past_rollback_key_version;
+};
+
+}  // namespace
+
+namespace chromeos_update_engine {
+
+class OmahaRequestActionTestProcessorDelegate : public ActionProcessorDelegate {
+ public:
+  OmahaRequestActionTestProcessorDelegate()
+      : expected_code_(ErrorCode::kSuccess),
+        interactive_(false),
+        test_http_fetcher_headers_(false) {}
+  ~OmahaRequestActionTestProcessorDelegate() override = default;
+
+  void ProcessingDone(const ActionProcessor* processor,
+                      ErrorCode code) override {
+    brillo::MessageLoop::current()->BreakLoop();
+  }
+
+  void ActionCompleted(ActionProcessor* processor,
+                       AbstractAction* action,
+                       ErrorCode code) override {
+    // Make sure actions always succeed.
+    if (action->Type() == OmahaRequestAction::StaticType()) {
+      EXPECT_EQ(expected_code_, code);
+      // Check that the headers were set in the fetcher during the action. Note
+      // that we set this request as "interactive".
+      auto fetcher = static_cast<const MockHttpFetcher*>(
+          static_cast<OmahaRequestAction*>(action)->http_fetcher_.get());
+
+      if (test_http_fetcher_headers_) {
+        EXPECT_EQ(interactive_ ? "fg" : "bg",
+                  fetcher->GetHeader("X-Goog-Update-Interactivity"));
+        EXPECT_EQ(kTestAppId, fetcher->GetHeader("X-Goog-Update-AppId"));
+        EXPECT_NE("", fetcher->GetHeader("X-Goog-Update-Updater"));
+      }
+      post_data_ = fetcher->post_data();
+    } else if (action->Type() ==
+               ObjectCollectorAction<OmahaResponse>::StaticType()) {
+      EXPECT_EQ(ErrorCode::kSuccess, code);
+      auto collector_action =
+          static_cast<ObjectCollectorAction<OmahaResponse>*>(action);
+      omaha_response_.reset(new OmahaResponse(collector_action->object()));
+      EXPECT_TRUE(omaha_response_);
+    } else {
+      EXPECT_EQ(ErrorCode::kSuccess, code);
+    }
+  }
+  ErrorCode expected_code_;
+  brillo::Blob post_data_;
+  bool interactive_;
+  bool test_http_fetcher_headers_;
+  std::unique_ptr<OmahaResponse> omaha_response_;
+};
+
+struct TestUpdateCheckParams {
+  string http_response;
+  int fail_http_response_code;
+  bool ping_only;
+  bool is_consumer_device;
+  int rollback_allowed_milestones;
+  bool is_policy_loaded;
+  ErrorCode expected_code;
+  metrics::CheckResult expected_check_result;
+  metrics::CheckReaction expected_check_reaction;
+  metrics::DownloadErrorCode expected_download_error_code;
+  string session_id;
+};
+
+class OmahaRequestActionTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    FakeSystemState::CreateInstance();
+
+    request_params_.set_os_sp("service_pack");
+    request_params_.set_os_board("x86-generic");
+    request_params_.set_app_id(kTestAppId);
+    request_params_.set_app_version(kCurrentVersion);
+    request_params_.set_app_lang("en-US");
+    request_params_.set_current_channel("unittest");
+    request_params_.set_target_channel("unittest");
+    request_params_.set_hwid("OEM MODEL 09235 7471");
+    request_params_.set_delta_okay(true);
+    request_params_.set_interactive(false);
+    request_params_.set_update_url("http://url");
+    request_params_.set_target_version_prefix("");
+    request_params_.set_rollback_allowed(false);
+    request_params_.set_is_powerwash_allowed(false);
+    request_params_.set_is_install(false);
+    request_params_.set_dlc_apps_params({});
+
+    FakeSystemState::Get()->set_request_params(&request_params_);
+    fake_prefs_ = FakeSystemState::Get()->fake_prefs();
+
+    // Setting the default update check params. Lookup |TestUpdateCheck()|.
+    tuc_params_ = {
+        .http_response = "",
+        .fail_http_response_code = -1,
+        .ping_only = false,
+        .is_consumer_device = true,
+        .rollback_allowed_milestones = 0,
+        .is_policy_loaded = false,
+        .expected_code = ErrorCode::kSuccess,
+        .expected_check_result = metrics::CheckResult::kUpdateAvailable,
+        .expected_check_reaction = metrics::CheckReaction::kUpdating,
+        .expected_download_error_code = metrics::DownloadErrorCode::kUnset,
+    };
+
+    ON_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetExcluder())
+        .WillByDefault(Return(&mock_excluder_));
+  }
+
+  // This function uses the parameters in |tuc_params_| to do an update check.
+  // It will fill out |post_str_| with the result data and |response| with
+  // |OmahaResponse|. Returns true iff an output response was obtained from the
+  // |OmahaRequestAction|. If |fail_http_response_code| is non-negative, the
+  // transfer will fail with that code. |ping_only| is passed through to the
+  // |OmahaRequestAction| constructor.
+  //
+  // The |expected_check_result|, |expected_check_reaction| and
+  // |expected_error_code| parameters are for checking expectations about
+  // reporting UpdateEngine.Check.{Result,Reaction,DownloadError} UMA
+  // statistics. Use the appropriate ::kUnset value to specify that the given
+  // metric should not be reported.
+  bool TestUpdateCheck();
+
+  // Tests events using |event| and |https_response|. It will fill up
+  // |post_str_| with the result data.
+  void TestEvent(OmahaEvent* event, const string& http_response);
+
+  // Runs and checks a ping test. |ping_only| indicates whether it should send
+  // only a ping or also an updatecheck.
+  void PingTest(bool ping_only);
+
+  // InstallDate test helper function.
+  bool InstallDateParseHelper(const string& elapsed_days,
+                              OmahaResponse* response);
+
+  // P2P test helper function.
+  void P2PTest(bool initial_allow_p2p_for_downloading,
+               bool initial_allow_p2p_for_sharing,
+               bool omaha_disable_p2p_for_downloading,
+               bool omaha_disable_p2p_for_sharing,
+               bool payload_state_allow_p2p_attempt,
+               bool expect_p2p_client_lookup,
+               const string& p2p_client_result_url,
+               bool expected_allow_p2p_for_downloading,
+               bool expected_allow_p2p_for_sharing,
+               const string& expected_p2p_url);
+
+  StrictMock<MockExcluder> mock_excluder_;
+  FakeUpdateResponse fake_update_response_;
+  // Used by all tests.
+  OmahaRequestParams request_params_;
+
+  FakePrefs* fake_prefs_;
+
+  OmahaRequestActionTestProcessorDelegate delegate_;
+
+  bool test_http_fetcher_headers_{false};
+
+  TestUpdateCheckParams tuc_params_;
+
+  OmahaResponse response_;
+  string post_str_;
+};
+
+class OmahaRequestActionDlcPingTest : public OmahaRequestActionTest {
+ protected:
+  void SetUp() override {
+    OmahaRequestActionTest::SetUp();
+    dlc_id_ = "dlc0";
+    active_key_ = PrefsInterface::CreateSubKey(
+        {kDlcPrefsSubDir, dlc_id_, kPrefsPingActive});
+    last_active_key_ = PrefsInterface::CreateSubKey(
+        {kDlcPrefsSubDir, dlc_id_, kPrefsPingLastActive});
+    last_rollcall_key_ = PrefsInterface::CreateSubKey(
+        {kDlcPrefsSubDir, dlc_id_, kPrefsPingLastRollcall});
+
+    tuc_params_.http_response =
+        "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+        "protocol=\"3.0\"><daystart elapsed_days=\"4763\" "
+        "elapsed_seconds=\"36540\"/><app appid=\"test-app-id\" status=\"ok\">\""
+        "<updatecheck status=\"noupdate\"/></app><app "
+        "appid=\"test-app-id_dlc0\" "
+        "status=\"ok\"><ping status=\"ok\"/><updatecheck status=\"noupdate\"/>"
+        "</app></response>";
+    tuc_params_.expected_check_result =
+        metrics::CheckResult::kNoUpdateAvailable;
+    tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+  }
+
+  std::string dlc_id_;
+  std::string active_key_;
+  std::string last_active_key_;
+  std::string last_rollcall_key_;
+};
+
+bool OmahaRequestActionTest::TestUpdateCheck() {
+  brillo::FakeMessageLoop loop(nullptr);
+  loop.SetAsCurrent();
+  auto fetcher =
+      std::make_unique<MockHttpFetcher>(tuc_params_.http_response.data(),
+                                        tuc_params_.http_response.size(),
+                                        nullptr);
+  if (tuc_params_.fail_http_response_code >= 0) {
+    fetcher->FailTransfer(tuc_params_.fail_http_response_code);
+  }
+  // This ensures the tests didn't forget to update |FakeSystemState| if they
+  // are not using the default |request_params_|.
+  EXPECT_EQ(&request_params_, FakeSystemState::Get()->request_params());
+
+  auto omaha_request_action =
+      std::make_unique<OmahaRequestAction>(nullptr,
+                                           std::move(fetcher),
+                                           tuc_params_.ping_only,
+                                           tuc_params_.session_id);
+
+  auto mock_policy_provider =
+      std::make_unique<NiceMock<policy::MockPolicyProvider>>();
+  EXPECT_CALL(*mock_policy_provider, IsConsumerDevice())
+      .WillRepeatedly(Return(tuc_params_.is_consumer_device));
+
+  EXPECT_CALL(*mock_policy_provider, device_policy_is_loaded())
+      .WillRepeatedly(Return(tuc_params_.is_policy_loaded));
+
+  const policy::MockDevicePolicy device_policy;
+  const bool get_allowed_milestone_succeeds =
+      tuc_params_.rollback_allowed_milestones >= 0;
+  EXPECT_CALL(device_policy, GetRollbackAllowedMilestones(_))
+      .WillRepeatedly(
+          DoAll(SetArgPointee<0>(tuc_params_.rollback_allowed_milestones),
+                Return(get_allowed_milestone_succeeds)));
+
+  EXPECT_CALL(*mock_policy_provider, GetDevicePolicy())
+      .WillRepeatedly(ReturnRef(device_policy));
+  omaha_request_action->policy_provider_ = std::move(mock_policy_provider);
+
+  delegate_.expected_code_ = tuc_params_.expected_code;
+  delegate_.interactive_ = request_params_.interactive();
+  delegate_.test_http_fetcher_headers_ = test_http_fetcher_headers_;
+  ActionProcessor processor;
+  processor.set_delegate(&delegate_);
+
+  auto collector_action =
+      std::make_unique<ObjectCollectorAction<OmahaResponse>>();
+  BondActions(omaha_request_action.get(), collector_action.get());
+  processor.EnqueueAction(std::move(omaha_request_action));
+  processor.EnqueueAction(std::move(collector_action));
+
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
+              ReportUpdateCheckMetrics(_, _, _))
+      .Times(AnyNumber());
+
+  EXPECT_CALL(
+      *FakeSystemState::Get()->mock_metrics_reporter(),
+      ReportUpdateCheckMetrics(tuc_params_.expected_check_result,
+                               tuc_params_.expected_check_reaction,
+                               tuc_params_.expected_download_error_code))
+      .Times(tuc_params_.ping_only ? 0 : 1);
+
+  loop.PostTask(base::Bind(
+      [](ActionProcessor* processor) { processor->StartProcessing(); },
+      base::Unretained(&processor)));
+  loop.Run();
+  EXPECT_FALSE(loop.PendingTasks());
+  if (delegate_.omaha_response_)
+    response_ = *delegate_.omaha_response_;
+  post_str_ = string(delegate_.post_data_.begin(), delegate_.post_data_.end());
+  return delegate_.omaha_response_ != nullptr;
+}
+
+// Tests Event requests -- they should always succeed. |out_post_data| may be
+// null; if non-null, the post-data received by the mock HttpFetcher is
+// returned.
+void OmahaRequestActionTest::TestEvent(OmahaEvent* event,
+                                       const string& http_response) {
+  brillo::FakeMessageLoop loop(nullptr);
+  loop.SetAsCurrent();
+
+  auto action = std::make_unique<OmahaRequestAction>(
+      event,
+      std::make_unique<MockHttpFetcher>(
+          http_response.data(), http_response.size(), nullptr),
+      false,
+      "");
+  ActionProcessor processor;
+  processor.set_delegate(&delegate_);
+  processor.EnqueueAction(std::move(action));
+
+  loop.PostTask(base::Bind(
+      [](ActionProcessor* processor) { processor->StartProcessing(); },
+      base::Unretained(&processor)));
+  loop.Run();
+  EXPECT_FALSE(loop.PendingTasks());
+
+  post_str_ = string(delegate_.post_data_.begin(), delegate_.post_data_.end());
+}
+
+TEST_F(OmahaRequestActionTest, RejectEntities) {
+  fake_update_response_.include_entity = true;
+  tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+  tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLHasEntityDecl;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_FALSE(TestUpdateCheck());
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, NoUpdateTest) {
+  tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, MultiAppNoUpdateTest) {
+  fake_update_response_.multi_app_no_update = true;
+  tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, MultiAppNoPartialUpdateTest) {
+  fake_update_response_.multi_app_no_update = true;
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, NoSelfUpdateTest) {
+  tuc_params_.http_response =
+      "<response><app><updatecheck status=\"ok\"><manifest><actions><action "
+      "event=\"postinstall\" noupdate=\"true\"/></actions>"
+      "</manifest></updatecheck></app></response>";
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+  EXPECT_FALSE(response_.update_exists);
+}
+
+// Test that all the values in the response are parsed in a normal update
+// response_.
+TEST_F(OmahaRequestActionTest, ValidUpdateTest) {
+  fake_update_response_.deadline = "20101020";
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_EQ(fake_update_response_.version, response_.version);
+  EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
+            response_.packages[0].payload_urls[0]);
+  EXPECT_EQ(fake_update_response_.more_info_url, response_.more_info_url);
+  EXPECT_EQ(fake_update_response_.hash, response_.packages[0].hash);
+  EXPECT_EQ(fake_update_response_.size, response_.packages[0].size);
+  EXPECT_EQ(fake_update_response_.fp, response_.packages[0].fp);
+  EXPECT_EQ(true, response_.packages[0].is_delta);
+  EXPECT_EQ(fake_update_response_.prompt == "true", response_.prompt);
+  EXPECT_EQ(fake_update_response_.deadline, response_.deadline);
+  EXPECT_FALSE(response_.powerwash_required);
+  // Omaha cohort attributes are not set in the response, so they should not be
+  // persisted.
+  EXPECT_FALSE(fake_prefs_->Exists(kPrefsOmahaCohort));
+  EXPECT_FALSE(fake_prefs_->Exists(kPrefsOmahaCohortHint));
+  EXPECT_FALSE(fake_prefs_->Exists(kPrefsOmahaCohortName));
+}
+
+TEST_F(OmahaRequestActionTest, MultiPackageUpdateTest) {
+  fake_update_response_.multi_package = true;
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_EQ(fake_update_response_.version, response_.version);
+  EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
+            response_.packages[0].payload_urls[0]);
+  EXPECT_EQ(fake_update_response_.codebase + "package2",
+            response_.packages[1].payload_urls[0]);
+  EXPECT_EQ(fake_update_response_.hash, response_.packages[0].hash);
+  EXPECT_EQ(fake_update_response_.size, response_.packages[0].size);
+  EXPECT_EQ(fake_update_response_.fp, response_.packages[0].fp);
+  EXPECT_EQ(true, response_.packages[0].is_delta);
+  EXPECT_EQ(11u, response_.packages[0].metadata_size);
+  ASSERT_EQ(2u, response_.packages.size());
+  EXPECT_EQ(string("hash2"), response_.packages[1].hash);
+  EXPECT_EQ(222u, response_.packages[1].size);
+  EXPECT_EQ(fake_update_response_.fp2, response_.packages[1].fp);
+  EXPECT_EQ(22u, response_.packages[1].metadata_size);
+  EXPECT_EQ(false, response_.packages[1].is_delta);
+}
+
+TEST_F(OmahaRequestActionTest, MultiAppUpdateTest) {
+  fake_update_response_.multi_app = true;
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_EQ(fake_update_response_.version, response_.version);
+  EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
+            response_.packages[0].payload_urls[0]);
+  EXPECT_EQ(fake_update_response_.codebase2 + "package3",
+            response_.packages[1].payload_urls[0]);
+  EXPECT_EQ(fake_update_response_.hash, response_.packages[0].hash);
+  EXPECT_EQ(fake_update_response_.size, response_.packages[0].size);
+  EXPECT_EQ(fake_update_response_.fp, response_.packages[0].fp);
+  EXPECT_EQ(11u, response_.packages[0].metadata_size);
+  EXPECT_EQ(true, response_.packages[0].is_delta);
+  ASSERT_EQ(2u, response_.packages.size());
+  EXPECT_EQ(string("hash3"), response_.packages[1].hash);
+  EXPECT_EQ(333u, response_.packages[1].size);
+  EXPECT_EQ(fake_update_response_.fp2, response_.packages[1].fp);
+  EXPECT_EQ(33u, response_.packages[1].metadata_size);
+  EXPECT_EQ(false, response_.packages[1].is_delta);
+}
+
+TEST_F(OmahaRequestActionTest, MultiAppPartialUpdateTest) {
+  fake_update_response_.multi_app = true;
+  fake_update_response_.multi_app_self_update = true;
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_EQ(fake_update_response_.version, response_.version);
+  EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
+            response_.packages[0].payload_urls[0]);
+  EXPECT_EQ(fake_update_response_.hash, response_.packages[0].hash);
+  EXPECT_EQ(fake_update_response_.size, response_.packages[0].size);
+  EXPECT_EQ(fake_update_response_.fp, response_.packages[0].fp);
+  EXPECT_EQ(11u, response_.packages[0].metadata_size);
+  ASSERT_EQ(2u, response_.packages.size());
+  EXPECT_EQ(string("hash3"), response_.packages[1].hash);
+  EXPECT_EQ(333u, response_.packages[1].size);
+  EXPECT_EQ(fake_update_response_.fp2, response_.packages[1].fp);
+  EXPECT_EQ(33u, response_.packages[1].metadata_size);
+  EXPECT_EQ(true, response_.packages[1].is_delta);
+}
+
+TEST_F(OmahaRequestActionTest, MultiAppMultiPackageUpdateTest) {
+  fake_update_response_.multi_app = true;
+  fake_update_response_.multi_package = true;
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_EQ(fake_update_response_.version, response_.version);
+  EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
+            response_.packages[0].payload_urls[0]);
+  EXPECT_EQ(fake_update_response_.codebase + "package2",
+            response_.packages[1].payload_urls[0]);
+  EXPECT_EQ(fake_update_response_.codebase2 + "package3",
+            response_.packages[2].payload_urls[0]);
+  EXPECT_EQ(fake_update_response_.hash, response_.packages[0].hash);
+  EXPECT_EQ(fake_update_response_.size, response_.packages[0].size);
+  EXPECT_EQ(fake_update_response_.fp, response_.packages[0].fp);
+  EXPECT_EQ(11u, response_.packages[0].metadata_size);
+  EXPECT_EQ(true, response_.packages[0].is_delta);
+  ASSERT_EQ(3u, response_.packages.size());
+  EXPECT_EQ(string("hash2"), response_.packages[1].hash);
+  EXPECT_EQ(222u, response_.packages[1].size);
+  EXPECT_EQ(fake_update_response_.fp2, response_.packages[1].fp);
+  EXPECT_EQ(22u, response_.packages[1].metadata_size);
+  EXPECT_EQ(false, response_.packages[1].is_delta);
+  EXPECT_EQ(string("hash3"), response_.packages[2].hash);
+  EXPECT_EQ(333u, response_.packages[2].size);
+  EXPECT_EQ(fake_update_response_.fp2, response_.packages[2].fp);
+  EXPECT_EQ(33u, response_.packages[2].metadata_size);
+  EXPECT_EQ(false, response_.packages[2].is_delta);
+}
+
+TEST_F(OmahaRequestActionTest, PowerwashTest) {
+  fake_update_response_.powerwash = true;
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_TRUE(response_.powerwash_required);
+}
+
+TEST_F(OmahaRequestActionTest, ExtraHeadersSentInteractiveTest) {
+  request_params_.set_interactive(true);
+  test_http_fetcher_headers_ = true;
+  tuc_params_.http_response = "invalid xml>";
+  tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_FALSE(TestUpdateCheck());
+
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, ExtraHeadersSentNoInteractiveTest) {
+  request_params_.set_interactive(false);
+  test_http_fetcher_headers_ = true;
+  tuc_params_.http_response = "invalid xml>";
+  tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_FALSE(TestUpdateCheck());
+
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, ValidUpdateBlockedByConnection) {
+  // Set up a connection manager that doesn't allow a valid update over
+  // the current ethernet connection.
+  MockConnectionManager mock_cm;
+  FakeSystemState::Get()->set_connection_manager(&mock_cm);
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kEthernet),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kEthernet, _))
+      .WillRepeatedly(Return(false));
+
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.expected_code = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kIgnored;
+
+  ASSERT_FALSE(TestUpdateCheck());
+
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, ValidUpdateOverCellularAllowedByDevicePolicy) {
+  // This test tests that update over cellular is allowed as device policy
+  // says yes.
+  MockConnectionManager mock_cm;
+  FakeSystemState::Get()->set_connection_manager(&mock_cm);
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
+      .WillRepeatedly(Return(true));
+  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
+      .WillRepeatedly(Return(true));
+
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, ValidUpdateOverCellularBlockedByDevicePolicy) {
+  // This test tests that update over cellular is blocked as device policy
+  // says no.
+  MockConnectionManager mock_cm;
+  FakeSystemState::Get()->set_connection_manager(&mock_cm);
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
+      .WillRepeatedly(Return(true));
+  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
+      .WillRepeatedly(Return(false));
+
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.expected_code = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kIgnored;
+
+  ASSERT_FALSE(TestUpdateCheck());
+
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest,
+       ValidUpdateOverCellularAllowedByUserPermissionTrue) {
+  // This test tests that, when device policy is not set, update over cellular
+  // is allowed as permission for update over cellular is set to true.
+  MockConnectionManager mock_cm;
+  fake_prefs_->SetBoolean(kPrefsUpdateOverCellularPermission, true);
+  FakeSystemState::Get()->set_connection_manager(&mock_cm);
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
+      .WillRepeatedly(Return(false));
+  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
+      .WillRepeatedly(Return(true));
+
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest,
+       ValidUpdateOverCellularBlockedByUpdateTargetNotMatch) {
+  // This test tests that, when device policy is not set and permission for
+  // update over cellular is set to false or does not exist, update over
+  // cellular is blocked as update target does not match the omaha response.
+  MockConnectionManager mock_cm;
+  // A version different from the version in omaha response.
+  string diff_version = "99.99.99";
+  // A size different from the size in omaha response.
+  int64_t diff_size = 999;
+
+  fake_prefs_->SetString(kPrefsUpdateOverCellularTargetVersion, diff_version);
+  fake_prefs_->SetInt64(kPrefsUpdateOverCellularTargetSize, diff_size);
+  // This test tests cellular (3G) being the only connection type being allowed.
+  FakeSystemState::Get()->set_connection_manager(&mock_cm);
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
+      .WillRepeatedly(Return(false));
+  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
+      .WillRepeatedly(Return(true));
+
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.expected_code = ErrorCode::kOmahaUpdateIgnoredOverCellular;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kIgnored;
+
+  ASSERT_FALSE(TestUpdateCheck());
+
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest,
+       ValidUpdateOverCellularAllowedByUpdateTargetMatch) {
+  // This test tests that, when device policy is not set and permission for
+  // update over cellular is set to false or does not exist, update over
+  // cellular is allowed as update target matches the omaha response.
+  MockConnectionManager mock_cm;
+  // A version same as the version in omaha response.
+  string new_version = fake_update_response_.version;
+  // A size same as the size in omaha response.
+  int64_t new_size = fake_update_response_.size;
+
+  fake_prefs_->SetString(kPrefsUpdateOverCellularTargetVersion, new_version);
+  fake_prefs_->SetInt64(kPrefsUpdateOverCellularTargetSize, new_size);
+  FakeSystemState::Get()->set_connection_manager(&mock_cm);
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
+      .WillRepeatedly(Return(false));
+  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
+      .WillRepeatedly(Return(true));
+
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, ValidUpdateBlockedByRollback) {
+  string rollback_version = "1234.0.0";
+  MockPayloadState mock_payload_state;
+  FakeSystemState::Get()->set_payload_state(&mock_payload_state);
+  fake_update_response_.version = rollback_version;
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.expected_code = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kIgnored;
+
+  EXPECT_CALL(mock_payload_state, GetRollbackVersion())
+      .WillRepeatedly(Return(rollback_version));
+
+  ASSERT_FALSE(TestUpdateCheck());
+
+  EXPECT_FALSE(response_.update_exists);
+}
+
+// Verify that update checks called during OOBE will not try to download an
+// update if the response doesn't include the deadline field.
+TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBE) {
+  FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete();
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.expected_code = ErrorCode::kNonCriticalUpdateInOOBE;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  // TODO(senj): set better default value for metrics::checkresult in
+  // OmahaRequestAction::ActionCompleted.
+  ASSERT_FALSE(TestUpdateCheck());
+
+  EXPECT_FALSE(response_.update_exists);
+}
+
+// Verify that the IsOOBEComplete() value is ignored when the OOBE flow is not
+// enabled.
+TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBEDisabled) {
+  FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete();
+  FakeSystemState::Get()->fake_hardware()->SetIsOOBEEnabled(false);
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+}
+
+// Verify that update checks called during OOBE will still try to download an
+// update if the response includes the deadline field.
+TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBEDeadlineSet) {
+  FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete();
+  fake_update_response_.deadline = "20101020";
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+}
+
+// Verify that update checks called during OOBE will not try to download an
+// update if a rollback happened, even when the response includes the deadline
+// field.
+TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBERollback) {
+  FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete();
+  fake_update_response_.deadline = "20101020";
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.expected_code = ErrorCode::kNonCriticalUpdateInOOBE;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  EXPECT_CALL(*(FakeSystemState::Get()->mock_payload_state()),
+              GetRollbackHappened())
+      .WillOnce(Return(true));
+
+  ASSERT_FALSE(TestUpdateCheck());
+
+  EXPECT_FALSE(response_.update_exists);
+}
+
+// Verify that non-critical updates are skipped by reporting the
+// kNonCriticalUpdateInOOBE error code when attempted over cellular network -
+// i.e. when the update would need user permission. Note that reporting
+// kOmahaUpdateIgnoredOverCellular error in this case  might cause undesired UX
+// in OOBE (warning the user about an update that will be skipped).
+TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesInOOBEOverCellular) {
+  FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete();
+
+  MockConnectionManager mock_cm;
+  FakeSystemState::Get()->set_connection_manager(&mock_cm);
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.expected_code = ErrorCode::kNonCriticalUpdateInOOBE;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
+      .WillRepeatedly(Return(false));
+
+  ASSERT_FALSE(TestUpdateCheck());
+
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, WallClockBasedWaitAloneCausesScattering) {
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_update_check_count_wait_enabled(false);
+  request_params_.set_waiting_period(TimeDelta::FromDays(2));
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(Time::Now());
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring;
+
+  ASSERT_FALSE(TestUpdateCheck());
+
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest,
+       WallClockBasedWaitAloneCausesScatteringInteractive) {
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_update_check_count_wait_enabled(false);
+  request_params_.set_waiting_period(TimeDelta::FromDays(2));
+  request_params_.set_interactive(true);
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(Time::Now());
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  // Verify if we are interactive check we don't defer.
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, NoWallClockBasedWaitCausesNoScattering) {
+  request_params_.set_wall_clock_based_wait_enabled(false);
+  request_params_.set_waiting_period(TimeDelta::FromDays(2));
+  request_params_.set_update_check_count_wait_enabled(true);
+  request_params_.set_min_update_checks_needed(1);
+  request_params_.set_max_update_checks_allowed(8);
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, ZeroMaxDaysToScatterCausesNoScattering) {
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta::FromDays(2));
+  request_params_.set_update_check_count_wait_enabled(true);
+  request_params_.set_min_update_checks_needed(1);
+  request_params_.set_max_update_checks_allowed(8);
+  fake_update_response_.max_days_to_scatter = "0";
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, ZeroUpdateCheckCountCausesNoScattering) {
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta());
+  request_params_.set_update_check_count_wait_enabled(true);
+  request_params_.set_min_update_checks_needed(0);
+  request_params_.set_max_update_checks_allowed(0);
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(Time::Now());
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  int64_t count;
+  ASSERT_TRUE(fake_prefs_->GetInt64(kPrefsUpdateCheckCount, &count));
+  ASSERT_EQ(count, 0);
+  EXPECT_TRUE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, NonZeroUpdateCheckCountCausesScattering) {
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta());
+  request_params_.set_update_check_count_wait_enabled(true);
+  request_params_.set_min_update_checks_needed(1);
+  request_params_.set_max_update_checks_allowed(8);
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(Time::Now());
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring;
+
+  ASSERT_FALSE(TestUpdateCheck());
+
+  int64_t count;
+  ASSERT_TRUE(fake_prefs_->GetInt64(kPrefsUpdateCheckCount, &count));
+  ASSERT_GT(count, 0);
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest,
+       NonZeroUpdateCheckCountCausesScatteringInteractive) {
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta());
+  request_params_.set_update_check_count_wait_enabled(true);
+  request_params_.set_min_update_checks_needed(1);
+  request_params_.set_max_update_checks_allowed(8);
+  request_params_.set_interactive(true);
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(Time::Now());
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  // Verify if we are interactive check we don't defer.
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, ExistingUpdateCheckCountCausesScattering) {
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta());
+  request_params_.set_update_check_count_wait_enabled(true);
+  request_params_.set_min_update_checks_needed(1);
+  request_params_.set_max_update_checks_allowed(8);
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(Time::Now());
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring;
+
+  ASSERT_TRUE(fake_prefs_->SetInt64(kPrefsUpdateCheckCount, 5));
+  ASSERT_FALSE(TestUpdateCheck());
+
+  int64_t count;
+  ASSERT_TRUE(fake_prefs_->GetInt64(kPrefsUpdateCheckCount, &count));
+  // |count| remains the same, as the decrementing happens in update_attempter
+  // which this test doesn't exercise.
+  ASSERT_EQ(count, 5);
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest,
+       ExistingUpdateCheckCountCausesScatteringInteractive) {
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta());
+  request_params_.set_update_check_count_wait_enabled(true);
+  request_params_.set_min_update_checks_needed(1);
+  request_params_.set_max_update_checks_allowed(8);
+  request_params_.set_interactive(true);
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(Time::Now());
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(fake_prefs_->SetInt64(kPrefsUpdateCheckCount, 5));
+
+  // Verify if we are interactive check we don't defer.
+  ASSERT_TRUE(TestUpdateCheck());
+  EXPECT_TRUE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, StagingTurnedOnCausesScattering) {
+  // If staging is on, the value for max days to scatter should be ignored, and
+  // staging's scatter value should be used.
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta::FromDays(6));
+  request_params_.set_update_check_count_wait_enabled(false);
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(Time::Now());
+
+  ASSERT_TRUE(fake_prefs_->SetInt64(kPrefsWallClockStagingWaitPeriod, 6));
+  // This should not prevent scattering due to staging.
+  fake_update_response_.max_days_to_scatter = "0";
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring;
+
+  ASSERT_FALSE(TestUpdateCheck());
+
+  EXPECT_FALSE(response_.update_exists);
+
+  // Interactive updates should not be affected.
+  request_params_.set_interactive(true);
+  tuc_params_.expected_code = ErrorCode::kSuccess;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUpdating;
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, CohortsArePersisted) {
+  fake_update_response_.include_cohorts = true;
+  fake_update_response_.cohort = "s/154454/8479665";
+  fake_update_response_.cohorthint = "please-put-me-on-beta";
+  fake_update_response_.cohortname = "stable";
+  request_params_.set_dlc_apps_params(
+      {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}});
+  fake_update_response_.dlc_app_update = true;
+  fake_update_response_.include_dlc_cohorts = true;
+  fake_update_response_.dlc_cohort = "s/154454/8479665/dlc";
+  fake_update_response_.dlc_cohorthint = "please-put-me-on-beta-dlc";
+  fake_update_response_.dlc_cohortname = "stable-dlc";
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false));
+  ASSERT_TRUE(TestUpdateCheck());
+
+  string value;
+  EXPECT_TRUE(fake_prefs_->GetString(kPrefsOmahaCohort, &value));
+  EXPECT_EQ(fake_update_response_.cohort, value);
+
+  EXPECT_TRUE(fake_prefs_->GetString(kPrefsOmahaCohortHint, &value));
+  EXPECT_EQ(fake_update_response_.cohorthint, value);
+
+  EXPECT_TRUE(fake_prefs_->GetString(kPrefsOmahaCohortName, &value));
+  EXPECT_EQ(fake_update_response_.cohortname, value);
+
+  EXPECT_TRUE(fake_prefs_->GetString(
+      fake_prefs_->CreateSubKey({kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohort}),
+      &value));
+  EXPECT_EQ(fake_update_response_.dlc_cohort, value);
+
+  EXPECT_TRUE(fake_prefs_->GetString(
+      fake_prefs_->CreateSubKey(
+          {kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohortHint}),
+      &value));
+  EXPECT_EQ(fake_update_response_.dlc_cohorthint, value);
+
+  EXPECT_TRUE(fake_prefs_->GetString(
+      fake_prefs_->CreateSubKey(
+          {kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohortName}),
+      &value));
+  EXPECT_EQ(fake_update_response_.dlc_cohortname, value);
+}
+
+TEST_F(OmahaRequestActionTest, CohortsAreUpdated) {
+  EXPECT_TRUE(fake_prefs_->SetString(kPrefsOmahaCohort, "old_value"));
+  EXPECT_TRUE(fake_prefs_->SetString(kPrefsOmahaCohortHint, "old_hint"));
+  EXPECT_TRUE(fake_prefs_->SetString(kPrefsOmahaCohortName, "old_name"));
+  const string dlc_cohort_key =
+      fake_prefs_->CreateSubKey({kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohort});
+  const string dlc_cohort_hint_key = fake_prefs_->CreateSubKey(
+      {kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohortHint});
+  const string dlc_cohort_name_key = fake_prefs_->CreateSubKey(
+      {kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohortName});
+  request_params_.set_dlc_apps_params(
+      {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}});
+  EXPECT_TRUE(fake_prefs_->SetString(dlc_cohort_key, "old_value_dlc"));
+  EXPECT_TRUE(fake_prefs_->SetString(dlc_cohort_hint_key, "old_hint_dlc"));
+  EXPECT_TRUE(fake_prefs_->SetString(dlc_cohort_name_key, "old_name_dlc"));
+  fake_update_response_.include_cohorts = true;
+  fake_update_response_.cohort = "s/154454/8479665";
+  fake_update_response_.cohorthint = "please-put-me-on-beta";
+  fake_update_response_.cohortname = "";
+  fake_update_response_.dlc_app_update = true;
+  fake_update_response_.include_dlc_cohorts = true;
+  fake_update_response_.dlc_cohort = "s/154454/8479665/dlc";
+  fake_update_response_.dlc_cohorthint = "please-put-me-on-beta-dlc";
+  fake_update_response_.dlc_cohortname = "";
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false));
+  ASSERT_TRUE(TestUpdateCheck());
+
+  string value;
+  EXPECT_TRUE(fake_prefs_->GetString(kPrefsOmahaCohort, &value));
+  EXPECT_EQ(fake_update_response_.cohort, value);
+
+  EXPECT_TRUE(fake_prefs_->GetString(kPrefsOmahaCohortHint, &value));
+  EXPECT_EQ(fake_update_response_.cohorthint, value);
+
+  EXPECT_FALSE(fake_prefs_->GetString(kPrefsOmahaCohortName, &value));
+
+  EXPECT_TRUE(fake_prefs_->GetString(dlc_cohort_key, &value));
+  EXPECT_EQ(fake_update_response_.dlc_cohort, value);
+
+  EXPECT_TRUE(fake_prefs_->GetString(dlc_cohort_hint_key, &value));
+  EXPECT_EQ(fake_update_response_.dlc_cohorthint, value);
+
+  EXPECT_FALSE(fake_prefs_->GetString(dlc_cohort_name_key, &value));
+}
+
+TEST_F(OmahaRequestActionTest, CohortsAreNotModifiedWhenMissing) {
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  EXPECT_TRUE(fake_prefs_->SetString(kPrefsOmahaCohort, "old_value"));
+  const string dlc_cohort_key =
+      fake_prefs_->CreateSubKey({kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohort});
+  EXPECT_TRUE(fake_prefs_->SetString(dlc_cohort_key, "old_value_dlc"));
+  ASSERT_TRUE(TestUpdateCheck());
+
+  string value;
+  EXPECT_TRUE(fake_prefs_->GetString(kPrefsOmahaCohort, &value));
+  EXPECT_EQ("old_value", value);
+
+  EXPECT_FALSE(fake_prefs_->GetString(kPrefsOmahaCohortHint, &value));
+  EXPECT_FALSE(fake_prefs_->GetString(kPrefsOmahaCohortName, &value));
+
+  EXPECT_TRUE(fake_prefs_->GetString(dlc_cohort_key, &value));
+  EXPECT_EQ("old_value_dlc", value);
+
+  EXPECT_FALSE(fake_prefs_->GetString(
+      fake_prefs_->CreateSubKey(
+          {kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohortHint}),
+      &value));
+  EXPECT_FALSE(fake_prefs_->GetString(
+      fake_prefs_->CreateSubKey(
+          {kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohortName}),
+      &value));
+}
+
+TEST_F(OmahaRequestActionTest, CohortsArePersistedWhenNoUpdate) {
+  fake_update_response_.include_cohorts = true;
+  fake_update_response_.cohort = "s/154454/8479665";
+  fake_update_response_.cohorthint = "please-put-me-on-beta";
+  fake_update_response_.cohortname = "stable";
+  tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  string value;
+  EXPECT_TRUE(fake_prefs_->GetString(kPrefsOmahaCohort, &value));
+  EXPECT_EQ(fake_update_response_.cohort, value);
+
+  EXPECT_TRUE(fake_prefs_->GetString(kPrefsOmahaCohortHint, &value));
+  EXPECT_EQ(fake_update_response_.cohorthint, value);
+
+  EXPECT_TRUE(fake_prefs_->GetString(kPrefsOmahaCohortName, &value));
+  EXPECT_EQ(fake_update_response_.cohortname, value);
+}
+
+TEST_F(OmahaRequestActionTest, MultiAppCohortTest) {
+  fake_update_response_.multi_app = true;
+  fake_update_response_.include_cohorts = true;
+  fake_update_response_.cohort = "s/154454/8479665";
+  fake_update_response_.cohorthint = "please-put-me-on-beta";
+  fake_update_response_.cohortname = "stable";
+  request_params_.set_dlc_apps_params(
+      {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}},
+       {request_params_.GetDlcAppId(kDlcId2), {.name = kDlcId2}}});
+  fake_update_response_.dlc_app_update = true;
+  fake_update_response_.dlc_app_no_update = true;
+  fake_update_response_.include_dlc_cohorts = true;
+  fake_update_response_.dlc_cohort = "s/154454/8479665/dlc";
+  fake_update_response_.dlc_cohorthint = "please-put-me-on-beta-dlc";
+  fake_update_response_.dlc_cohortname = "stable-dlc";
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false));
+  ASSERT_TRUE(TestUpdateCheck());
+
+  string value;
+  EXPECT_TRUE(fake_prefs_->GetString(kPrefsOmahaCohort, &value));
+  EXPECT_EQ(fake_update_response_.cohort, value);
+
+  EXPECT_TRUE(fake_prefs_->GetString(kPrefsOmahaCohortHint, &value));
+  EXPECT_EQ(fake_update_response_.cohorthint, value);
+
+  EXPECT_TRUE(fake_prefs_->GetString(kPrefsOmahaCohortName, &value));
+  EXPECT_EQ(fake_update_response_.cohortname, value);
+
+  EXPECT_TRUE(fake_prefs_->GetString(
+      fake_prefs_->CreateSubKey({kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohort}),
+      &value));
+  EXPECT_EQ(fake_update_response_.dlc_cohort, value);
+  EXPECT_TRUE(fake_prefs_->GetString(
+      fake_prefs_->CreateSubKey({kDlcPrefsSubDir, kDlcId2, kPrefsOmahaCohort}),
+      &value));
+  EXPECT_EQ(fake_update_response_.dlc_cohort, value);
+
+  EXPECT_TRUE(fake_prefs_->GetString(
+      fake_prefs_->CreateSubKey(
+          {kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohortHint}),
+      &value));
+  EXPECT_EQ(fake_update_response_.dlc_cohorthint, value);
+  EXPECT_TRUE(fake_prefs_->GetString(
+      fake_prefs_->CreateSubKey(
+          {kDlcPrefsSubDir, kDlcId2, kPrefsOmahaCohortHint}),
+      &value));
+  EXPECT_EQ(fake_update_response_.dlc_cohorthint, value);
+
+  EXPECT_TRUE(fake_prefs_->GetString(
+      fake_prefs_->CreateSubKey(
+          {kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohortName}),
+      &value));
+  EXPECT_EQ(fake_update_response_.dlc_cohortname, value);
+  EXPECT_TRUE(fake_prefs_->GetString(
+      fake_prefs_->CreateSubKey(
+          {kDlcPrefsSubDir, kDlcId2, kPrefsOmahaCohortName}),
+      &value));
+  EXPECT_EQ(fake_update_response_.dlc_cohortname, value);
+}
+
+TEST_F(OmahaRequestActionTest, NoOutputPipeTest) {
+  const string http_response(fake_update_response_.GetNoUpdateResponse());
+  brillo::FakeMessageLoop loop(nullptr);
+  loop.SetAsCurrent();
+
+  auto action = std::make_unique<OmahaRequestAction>(
+      nullptr,
+      std::make_unique<MockHttpFetcher>(
+          http_response.data(), http_response.size(), nullptr),
+      false,
+      "");
+  ActionProcessor processor;
+  processor.set_delegate(&delegate_);
+  processor.EnqueueAction(std::move(action));
+
+  loop.PostTask(base::Bind(
+      [](ActionProcessor* processor) { processor->StartProcessing(); },
+      base::Unretained(&processor)));
+  loop.Run();
+  EXPECT_FALSE(loop.PendingTasks());
+  EXPECT_FALSE(processor.IsRunning());
+}
+
+TEST_F(OmahaRequestActionTest, InvalidXmlTest) {
+  tuc_params_.http_response = "invalid xml>";
+  tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_FALSE(TestUpdateCheck());
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, EmptyResponseTest) {
+  tuc_params_.expected_code = ErrorCode::kOmahaRequestEmptyResponseError;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_FALSE(TestUpdateCheck());
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, MissingStatusTest) {
+  tuc_params_.http_response =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
+      "<daystart elapsed_seconds=\"100\"/>"
+      "<app appid=\"foo\" status=\"ok\">"
+      "<ping status=\"ok\"/>"
+      "<updatecheck/></app></response>";
+  tuc_params_.expected_code = ErrorCode::kOmahaResponseInvalid;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_FALSE(TestUpdateCheck());
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, InvalidStatusTest) {
+  tuc_params_.http_response =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
+      "<daystart elapsed_seconds=\"100\"/>"
+      "<app appid=\"foo\" status=\"ok\">"
+      "<ping status=\"ok\"/>"
+      "<updatecheck status=\"InvalidStatusTest\"/></app></response>";
+  tuc_params_.expected_code = ErrorCode::kOmahaResponseInvalid;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_FALSE(TestUpdateCheck());
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, MissingNodesetTest) {
+  tuc_params_.http_response =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
+      "<daystart elapsed_seconds=\"100\"/>"
+      "<app appid=\"foo\" status=\"ok\">"
+      "<ping status=\"ok\"/>"
+      "</app></response>";
+  tuc_params_.expected_code = ErrorCode::kOmahaResponseInvalid;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_FALSE(TestUpdateCheck());
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, MissingFieldTest) {
+  tuc_params_.http_response =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
+      "<daystart elapsed_seconds=\"100\"/>"
+      // the appid needs to match that in the request params
+      "<app appid=\"" +
+      fake_update_response_.app_id +
+      "\" status=\"ok\">"
+      "<updatecheck status=\"ok\">"
+      "<urls><url codebase=\"http://missing/field/test/\"/></urls>"
+      "<manifest version=\"10.2.3.4\">"
+      "<packages><package hash=\"not-used\" name=\"f\" "
+      "size=\"587\" fp=\"3.789\" hash_sha256=\"lkq34j5345\"/></packages>"
+      "<actions><action event=\"postinstall\" "
+      "Prompt=\"false\" "
+      "IsDeltaPayload=\"false\" "
+      "sha256=\"not-used\" "
+      "/></actions></manifest></updatecheck></app></response>";
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_EQ("10.2.3.4", response_.version);
+  EXPECT_EQ("http://missing/field/test/f",
+            response_.packages[0].payload_urls[0]);
+  EXPECT_EQ("", response_.more_info_url);
+  EXPECT_EQ("lkq34j5345", response_.packages[0].hash);
+  EXPECT_EQ(string("3.789"), response_.packages[0].fp);
+  EXPECT_EQ(587u, response_.packages[0].size);
+  EXPECT_FALSE(response_.prompt);
+  EXPECT_TRUE(response_.deadline.empty());
+}
+
+namespace {
+class TerminateEarlyTestProcessorDelegate : public ActionProcessorDelegate {
+ public:
+  void ProcessingStopped(const ActionProcessor* processor) {
+    brillo::MessageLoop::current()->BreakLoop();
+  }
+};
+
+void TerminateTransferTestStarter(ActionProcessor* processor) {
+  processor->StartProcessing();
+  CHECK(processor->IsRunning());
+  processor->StopProcessing();
+}
+}  // namespace
+
+TEST_F(OmahaRequestActionTest, TerminateTransferTest) {
+  brillo::FakeMessageLoop loop(nullptr);
+  loop.SetAsCurrent();
+
+  string http_response("doesn't matter");
+  auto action = std::make_unique<OmahaRequestAction>(
+      nullptr,
+      std::make_unique<MockHttpFetcher>(
+          http_response.data(), http_response.size(), nullptr),
+      false,
+      "");
+  TerminateEarlyTestProcessorDelegate delegate;
+  ActionProcessor processor;
+  processor.set_delegate(&delegate);
+  processor.EnqueueAction(std::move(action));
+
+  loop.PostTask(base::Bind(&TerminateTransferTestStarter, &processor));
+  loop.Run();
+  EXPECT_FALSE(loop.PendingTasks());
+}
+
+TEST_F(OmahaRequestActionTest, XmlEncodeIsUsedForParams) {
+  // Make sure XML Encode is being called on the params.
+  request_params_.set_os_sp("testtheservice_pack>");
+  request_params_.set_os_board("x86 generic<id");
+  request_params_.set_current_channel("unittest_track&lt;");
+  request_params_.set_target_channel("unittest_track&lt;");
+  request_params_.set_lts_tag("unittest_hint&lt;");
+  request_params_.set_hwid("<OEM MODEL>");
+  fake_prefs_->SetString(kPrefsOmahaCohort, "evil\nstring");
+  fake_prefs_->SetString(kPrefsOmahaCohortHint, "evil&string\\");
+  fake_prefs_->SetString(
+      kPrefsOmahaCohortName,
+      base::JoinString(vector<string>(100, "My spoon is too big."), " "));
+  tuc_params_.http_response = "invalid xml>";
+  tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_FALSE(TestUpdateCheck());
+
+  EXPECT_NE(string::npos, post_str_.find("testtheservice_pack&gt;"));
+  EXPECT_EQ(string::npos, post_str_.find("testtheservice_pack>"));
+  EXPECT_NE(string::npos, post_str_.find("x86 generic&lt;id"));
+  EXPECT_EQ(string::npos, post_str_.find("x86 generic<id"));
+  EXPECT_NE(string::npos, post_str_.find("unittest_track&amp;lt;"));
+  EXPECT_EQ(string::npos, post_str_.find("unittest_track&lt;"));
+  EXPECT_NE(string::npos, post_str_.find("unittest_hint&amp;lt;"));
+  EXPECT_EQ(string::npos, post_str_.find("unittest_hint&lt;"));
+  EXPECT_NE(string::npos, post_str_.find("&lt;OEM MODEL&gt;"));
+  EXPECT_EQ(string::npos, post_str_.find("<OEM MODEL>"));
+  EXPECT_NE(string::npos, post_str_.find("cohort=\"evil\nstring\""));
+  EXPECT_EQ(string::npos, post_str_.find("cohorthint=\"evil&string\\\""));
+  EXPECT_NE(string::npos, post_str_.find("cohorthint=\"evil&amp;string\\\""));
+  // Values from Prefs that are too big are removed from the XML instead of
+  // encoded.
+  EXPECT_EQ(string::npos, post_str_.find("cohortname="));
+}
+
+TEST_F(OmahaRequestActionTest, XmlDecodeTest) {
+  fake_update_response_.deadline = "&lt;20110101";
+  fake_update_response_.more_info_url = "testthe&lt;url";
+  fake_update_response_.codebase = "testthe&amp;codebase/";
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_EQ("testthe<url", response_.more_info_url);
+  EXPECT_EQ("testthe&codebase/file.signed",
+            response_.packages[0].payload_urls[0]);
+  EXPECT_EQ("<20110101", response_.deadline);
+}
+
+TEST_F(OmahaRequestActionTest, ParseIntTest) {
+  // overflows int32_t:
+  fake_update_response_.size = 123123123123123ull;
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+  EXPECT_EQ(fake_update_response_.size, response_.packages[0].size);
+}
+
+TEST_F(OmahaRequestActionTest, FormatUpdateCheckOutputTest) {
+  NiceMock<MockPrefs> prefs;
+  FakeSystemState::Get()->set_prefs(&prefs);
+  tuc_params_.http_response = "invalid xml>";
+  tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  EXPECT_CALL(prefs, GetString(kPrefsPreviousVersion, _))
+      .WillOnce(DoAll(SetArgPointee<1>(string("")), Return(true)));
+  // An existing but empty previous version means that we didn't reboot to a new
+  // update, therefore, no need to update the previous version.
+  EXPECT_CALL(prefs, SetString(kPrefsPreviousVersion, _)).Times(0);
+  ASSERT_FALSE(TestUpdateCheck());
+
+  EXPECT_NE(
+      post_str_.find("        <ping active=\"1\" a=\"-1\" r=\"-1\"></ping>\n"
+                     "        <updatecheck></updatecheck>\n"),
+      string::npos);
+  EXPECT_NE(post_str_.find("hardware_class=\"OEM MODEL 09235 7471\""),
+            string::npos);
+  // No <event> tag should be sent if we didn't reboot to an update.
+  EXPECT_EQ(post_str_.find("<event"), string::npos);
+}
+
+TEST_F(OmahaRequestActionTest, FormatSuccessEventOutputTest) {
+  TestEvent(new OmahaEvent(OmahaEvent::kTypeUpdateDownloadStarted),
+            "invalid xml>");
+
+  string expected_event = base::StringPrintf(
+      "        <event eventtype=\"%d\" eventresult=\"%d\"></event>\n",
+      OmahaEvent::kTypeUpdateDownloadStarted,
+      OmahaEvent::kResultSuccess);
+  EXPECT_NE(post_str_.find(expected_event), string::npos);
+  EXPECT_EQ(post_str_.find("ping"), string::npos);
+  EXPECT_EQ(post_str_.find("updatecheck"), string::npos);
+}
+
+TEST_F(OmahaRequestActionTest, FormatErrorEventOutputTest) {
+  TestEvent(new OmahaEvent(OmahaEvent::kTypeDownloadComplete,
+                           OmahaEvent::kResultError,
+                           ErrorCode::kError),
+            "invalid xml>");
+
+  string expected_event = base::StringPrintf(
+      "        <event eventtype=\"%d\" eventresult=\"%d\" "
+      "errorcode=\"%d\"></event>\n",
+      OmahaEvent::kTypeDownloadComplete,
+      OmahaEvent::kResultError,
+      static_cast<int>(ErrorCode::kError));
+  EXPECT_NE(post_str_.find(expected_event), string::npos);
+  EXPECT_EQ(post_str_.find("updatecheck"), string::npos);
+}
+
+TEST_F(OmahaRequestActionTest, IsEventTest) {
+  string http_response("doesn't matter");
+  OmahaRequestAction update_check_action(
+      nullptr,
+      std::make_unique<MockHttpFetcher>(
+          http_response.data(), http_response.size(), nullptr),
+      false,
+      "");
+  EXPECT_FALSE(update_check_action.IsEvent());
+
+  OmahaRequestAction event_action(
+      new OmahaEvent(OmahaEvent::kTypeUpdateComplete),
+      std::make_unique<MockHttpFetcher>(
+          http_response.data(), http_response.size(), nullptr),
+      false,
+      "");
+  EXPECT_TRUE(event_action.IsEvent());
+}
+
+TEST_F(OmahaRequestActionTest, FormatDeltaOkayOutputTest) {
+  tuc_params_.http_response = "invalid xml>";
+  tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  for (int i = 0; i < 2; i++) {
+    bool delta_okay = i == 1;
+    const char* delta_okay_str = delta_okay ? "true" : "false";
+    request_params_.set_delta_okay(delta_okay);
+
+    ASSERT_FALSE(TestUpdateCheck());
+    EXPECT_NE(post_str_.find(
+                  base::StringPrintf(" delta_okay=\"%s\"", delta_okay_str)),
+              string::npos)
+        << "i = " << i;
+  }
+}
+
+TEST_F(OmahaRequestActionTest, FormatInteractiveOutputTest) {
+  tuc_params_.http_response = "invalid xml>";
+  tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  for (int i = 0; i < 2; i++) {
+    bool interactive = i == 1;
+    const char* interactive_str = interactive ? "ondemandupdate" : "scheduler";
+    request_params_.set_interactive(interactive);
+
+    ASSERT_FALSE(TestUpdateCheck());
+    EXPECT_NE(post_str_.find(
+                  base::StringPrintf("installsource=\"%s\"", interactive_str)),
+              string::npos)
+        << "i = " << i;
+  }
+}
+
+TEST_F(OmahaRequestActionTest, FormatTargetVersionPrefixOutputTest) {
+  tuc_params_.http_response = "invalid xml>";
+  tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  for (int i = 0; i < 2; i++) {
+    bool target_version_set = i == 1;
+    const char* target_version_prefix = target_version_set ? "10032." : "";
+    request_params_.set_target_version_prefix(target_version_prefix);
+
+    ASSERT_FALSE(TestUpdateCheck());
+    if (target_version_set) {
+      EXPECT_NE(post_str_.find("<updatecheck targetversionprefix=\"10032.\">"),
+                string::npos)
+          << "i = " << i;
+    } else {
+      EXPECT_EQ(post_str_.find("targetversionprefix"), string::npos)
+          << "i = " << i;
+    }
+  }
+}
+
+TEST_F(OmahaRequestActionTest, FormatRollbackAllowedOutputTest) {
+  tuc_params_.http_response = "invalid xml>";
+  tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  for (int i = 0; i < 4; i++) {
+    bool rollback_allowed = i / 2 == 0;
+    bool target_version_set = i % 2 == 0;
+    request_params_.set_target_version_prefix(target_version_set ? "10032."
+                                                                 : "");
+    request_params_.set_rollback_allowed(rollback_allowed);
+
+    ASSERT_FALSE(TestUpdateCheck());
+    if (rollback_allowed && target_version_set) {
+      EXPECT_NE(post_str_.find("rollback_allowed=\"true\""), string::npos)
+          << "i = " << i;
+    } else {
+      EXPECT_EQ(post_str_.find("rollback_allowed"), string::npos)
+          << "i = " << i;
+    }
+  }
+}
+
+TEST_F(OmahaRequestActionTest, OmahaEventTest) {
+  OmahaEvent default_event;
+  EXPECT_EQ(OmahaEvent::kTypeUnknown, default_event.type);
+  EXPECT_EQ(OmahaEvent::kResultError, default_event.result);
+  EXPECT_EQ(ErrorCode::kError, default_event.error_code);
+
+  OmahaEvent success_event(OmahaEvent::kTypeUpdateDownloadStarted);
+  EXPECT_EQ(OmahaEvent::kTypeUpdateDownloadStarted, success_event.type);
+  EXPECT_EQ(OmahaEvent::kResultSuccess, success_event.result);
+  EXPECT_EQ(ErrorCode::kSuccess, success_event.error_code);
+
+  OmahaEvent error_event(OmahaEvent::kTypeUpdateDownloadFinished,
+                         OmahaEvent::kResultError,
+                         ErrorCode::kError);
+  EXPECT_EQ(OmahaEvent::kTypeUpdateDownloadFinished, error_event.type);
+  EXPECT_EQ(OmahaEvent::kResultError, error_event.result);
+  EXPECT_EQ(ErrorCode::kError, error_event.error_code);
+}
+
+TEST_F(OmahaRequestActionTest, DeviceQuickFixBuildTokenIsSetTest) {
+  // If DeviceQuickFixBuildToken value is set it takes precedence over pref
+  // value.
+  constexpr char autoupdate_token[] = "autoupdate_token>";
+  constexpr char xml_encoded_autoupdate_token[] = "autoupdate_token&gt;";
+  constexpr char omaha_cohort_hint[] = "cohort_hint";
+
+  tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+  request_params_.set_autoupdate_token(autoupdate_token);
+  fake_prefs_->SetString(kPrefsOmahaCohortHint, omaha_cohort_hint);
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_NE(string::npos,
+            post_str_.find("cohorthint=\"" +
+                           string(xml_encoded_autoupdate_token) + "\""));
+  EXPECT_EQ(string::npos, post_str_.find(autoupdate_token));
+  EXPECT_EQ(string::npos, post_str_.find(omaha_cohort_hint));
+}
+
+TEST_F(OmahaRequestActionTest, DeviceQuickFixBuildTokenIsNotSetTest) {
+  // If DeviceQuickFixBuildToken is not set, pref value will be provided in
+  // cohorthint attribute.
+  constexpr char omaha_cohort_hint[] = "evil_string>";
+  constexpr char xml_encoded_cohort_hint[] = "evil_string&gt;";
+
+  tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+  fake_prefs_->SetString(kPrefsOmahaCohortHint, omaha_cohort_hint);
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_NE(
+      string::npos,
+      post_str_.find("cohorthint=\"" + string(xml_encoded_cohort_hint) + "\""));
+  EXPECT_EQ(string::npos, post_str_.find(omaha_cohort_hint));
+}
+
+TEST_F(OmahaRequestActionTest, TargetChannelHintTest) {
+  tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+  request_params_.set_lts_tag("hint>");
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_NE(string::npos, post_str_.find("ltstag=\"hint&gt;\""));
+}
+
+void OmahaRequestActionTest::PingTest(bool ping_only) {
+  NiceMock<MockPrefs> prefs;
+  FakeSystemState::Get()->set_prefs(&prefs);
+  EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _))
+      .Times(AnyNumber());
+  EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
+  // Add a few hours to the day difference to test no rounding, etc.
+  int64_t five_days_ago =
+      (Time::Now() - TimeDelta::FromHours(5 * 24 + 13)).ToInternalValue();
+  int64_t six_days_ago =
+      (Time::Now() - TimeDelta::FromHours(6 * 24 + 11)).ToInternalValue();
+  EXPECT_CALL(prefs, GetInt64(kPrefsInstallDateDays, _))
+      .WillOnce(DoAll(SetArgPointee<1>(0), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
+      .WillOnce(DoAll(SetArgPointee<1>(six_days_ago), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
+      .WillOnce(DoAll(SetArgPointee<1>(five_days_ago), Return(true)));
+
+  tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+  tuc_params_.ping_only = ping_only;
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_NE(post_str_.find("<ping active=\"1\" a=\"6\" r=\"5\"></ping>"),
+            string::npos);
+  if (ping_only) {
+    EXPECT_EQ(post_str_.find("updatecheck"), string::npos);
+    EXPECT_EQ(post_str_.find("previousversion"), string::npos);
+  } else {
+    EXPECT_NE(post_str_.find("updatecheck"), string::npos);
+    EXPECT_NE(post_str_.find("previousversion"), string::npos);
+  }
+}
+
+TEST_F(OmahaRequestActionTest, PingTestSendOnlyAPing) {
+  PingTest(true /* ping_only */);
+}
+
+TEST_F(OmahaRequestActionTest, PingTestSendAlsoAnUpdateCheck) {
+  PingTest(false /* ping_only */);
+}
+
+TEST_F(OmahaRequestActionTest, ActivePingTest) {
+  NiceMock<MockPrefs> prefs;
+  FakeSystemState::Get()->set_prefs(&prefs);
+  EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _))
+      .Times(AnyNumber());
+  EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
+  int64_t three_days_ago =
+      (Time::Now() - TimeDelta::FromHours(3 * 24 + 12)).ToInternalValue();
+  int64_t now = Time::Now().ToInternalValue();
+  EXPECT_CALL(prefs, GetInt64(kPrefsInstallDateDays, _))
+      .WillOnce(DoAll(SetArgPointee<1>(0), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
+      .WillOnce(DoAll(SetArgPointee<1>(three_days_ago), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
+      .WillOnce(DoAll(SetArgPointee<1>(now), Return(true)));
+
+  tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_NE(post_str_.find("<ping active=\"1\" a=\"3\"></ping>"), string::npos);
+}
+
+TEST_F(OmahaRequestActionTest, RollCallPingTest) {
+  NiceMock<MockPrefs> prefs;
+  FakeSystemState::Get()->set_prefs(&prefs);
+  EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _))
+      .Times(AnyNumber());
+  EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
+  int64_t four_days_ago =
+      (Time::Now() - TimeDelta::FromHours(4 * 24)).ToInternalValue();
+  int64_t now = Time::Now().ToInternalValue();
+  EXPECT_CALL(prefs, GetInt64(kPrefsInstallDateDays, _))
+      .WillOnce(DoAll(SetArgPointee<1>(0), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
+      .WillOnce(DoAll(SetArgPointee<1>(now), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
+      .WillOnce(DoAll(SetArgPointee<1>(four_days_ago), Return(true)));
+
+  tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_NE(post_str_.find("<ping active=\"1\" r=\"4\"></ping>\n"),
+            string::npos);
+}
+
+TEST_F(OmahaRequestActionTest, NoPingTest) {
+  NiceMock<MockPrefs> prefs;
+  FakeSystemState::Get()->set_prefs(&prefs);
+  EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _))
+      .Times(AnyNumber());
+  EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
+  int64_t one_hour_ago =
+      (Time::Now() - TimeDelta::FromHours(1)).ToInternalValue();
+  EXPECT_CALL(prefs, GetInt64(kPrefsInstallDateDays, _))
+      .WillOnce(DoAll(SetArgPointee<1>(0), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
+      .WillOnce(DoAll(SetArgPointee<1>(one_hour_ago), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
+      .WillOnce(DoAll(SetArgPointee<1>(one_hour_ago), Return(true)));
+  // LastActivePingDay and PrefsLastRollCallPingDay are set even if we didn't
+  // send a ping.
+  EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _))
+      .WillOnce(Return(true));
+  EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _))
+      .WillOnce(Return(true));
+
+  tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_EQ(post_str_.find("ping"), string::npos);
+}
+
+TEST_F(OmahaRequestActionTest, IgnoreEmptyPingTest) {
+  // This test ensures that we ignore empty ping only requests.
+  NiceMock<MockPrefs> prefs;
+  FakeSystemState::Get()->set_prefs(&prefs);
+  int64_t now = Time::Now().ToInternalValue();
+  EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
+      .WillOnce(DoAll(SetArgPointee<1>(now), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
+      .WillOnce(DoAll(SetArgPointee<1>(now), Return(true)));
+  EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0);
+  EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0);
+
+  tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+  tuc_params_.ping_only = true;
+  tuc_params_.expected_check_result = metrics::CheckResult::kUnset;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  EXPECT_TRUE(TestUpdateCheck());
+  EXPECT_TRUE(post_str_.empty());
+}
+
+TEST_F(OmahaRequestActionTest, BackInTimePingTest) {
+  NiceMock<MockPrefs> prefs;
+  FakeSystemState::Get()->set_prefs(&prefs);
+  EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _))
+      .Times(AnyNumber());
+  EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
+  int64_t future =
+      (Time::Now() + TimeDelta::FromHours(3 * 24 + 4)).ToInternalValue();
+  EXPECT_CALL(prefs, GetInt64(kPrefsInstallDateDays, _))
+      .WillOnce(DoAll(SetArgPointee<1>(0), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
+      .WillOnce(DoAll(SetArgPointee<1>(future), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
+      .WillOnce(DoAll(SetArgPointee<1>(future), Return(true)));
+  EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _))
+      .WillOnce(Return(true));
+  EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _))
+      .WillOnce(Return(true));
+
+  tuc_params_.http_response =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+      "protocol=\"3.0\"><daystart elapsed_seconds=\"100\"/>"
+      "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
+      "<updatecheck status=\"noupdate\"/></app></response>";
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+  EXPECT_EQ(post_str_.find("ping"), string::npos);
+}
+
+TEST_F(OmahaRequestActionTest, LastPingDayUpdateTest) {
+  // This test checks that the action updates the last ping day to now
+  // minus 200 seconds with a slack of 5 seconds. Therefore, the test
+  // may fail if it runs for longer than 5 seconds. It shouldn't run
+  // that long though.
+  int64_t midnight =
+      (Time::Now() - TimeDelta::FromSeconds(200)).ToInternalValue();
+  int64_t midnight_slack =
+      (Time::Now() - TimeDelta::FromSeconds(195)).ToInternalValue();
+  NiceMock<MockPrefs> prefs;
+  FakeSystemState::Get()->set_prefs(&prefs);
+  EXPECT_CALL(prefs, GetInt64(_, _)).Times(AnyNumber());
+  EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
+  EXPECT_CALL(prefs,
+              SetInt64(kPrefsLastActivePingDay,
+                       AllOf(Ge(midnight), Le(midnight_slack))))
+      .WillOnce(Return(true));
+  EXPECT_CALL(prefs,
+              SetInt64(kPrefsLastRollCallPingDay,
+                       AllOf(Ge(midnight), Le(midnight_slack))))
+      .WillOnce(Return(true));
+
+  tuc_params_.http_response =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+      "protocol=\"3.0\"><daystart elapsed_seconds=\"200\"/>"
+      "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
+      "<updatecheck status=\"noupdate\"/></app></response>";
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+}
+
+TEST_F(OmahaRequestActionTest, NoElapsedSecondsTest) {
+  NiceMock<MockPrefs> prefs;
+  FakeSystemState::Get()->set_prefs(&prefs);
+  EXPECT_CALL(prefs, GetInt64(_, _)).Times(AnyNumber());
+  EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
+  EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0);
+  EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0);
+
+  tuc_params_.http_response =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+      "protocol=\"3.0\"><daystart blah=\"200\"/>"
+      "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
+      "<updatecheck status=\"noupdate\"/></app></response>";
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+}
+
+TEST_F(OmahaRequestActionTest, BadElapsedSecondsTest) {
+  NiceMock<MockPrefs> prefs;
+  FakeSystemState::Get()->set_prefs(&prefs);
+  EXPECT_CALL(prefs, GetInt64(_, _)).Times(AnyNumber());
+  EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
+  EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0);
+  EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0);
+
+  tuc_params_.http_response =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+      "protocol=\"3.0\"><daystart elapsed_seconds=\"x\"/>"
+      "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
+      "<updatecheck status=\"noupdate\"/></app></response>";
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+}
+
+TEST_F(OmahaRequestActionTest, NoUniqueIDTest) {
+  tuc_params_.http_response = "invalid xml>";
+  tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_FALSE(TestUpdateCheck());
+
+  EXPECT_EQ(post_str_.find("machineid="), string::npos);
+  EXPECT_EQ(post_str_.find("userid="), string::npos);
+}
+
+TEST_F(OmahaRequestActionTest, NetworkFailureTest) {
+  const int http_error_code =
+      static_cast<int>(ErrorCode::kOmahaRequestHTTPResponseBase) + 501;
+  tuc_params_.fail_http_response_code = 501;
+  tuc_params_.expected_code = static_cast<ErrorCode>(http_error_code);
+  tuc_params_.expected_check_result = metrics::CheckResult::kDownloadError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+  tuc_params_.expected_download_error_code =
+      static_cast<metrics::DownloadErrorCode>(501);
+
+  ASSERT_FALSE(TestUpdateCheck());
+
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, NetworkFailureBadHTTPCodeTest) {
+  const int http_error_code =
+      static_cast<int>(ErrorCode::kOmahaRequestHTTPResponseBase) + 999;
+
+  tuc_params_.fail_http_response_code = 1500;
+  tuc_params_.expected_code = static_cast<ErrorCode>(http_error_code);
+  tuc_params_.expected_check_result = metrics::CheckResult::kDownloadError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+  tuc_params_.expected_download_error_code =
+      metrics::DownloadErrorCode::kHttpStatusOther;
+
+  ASSERT_FALSE(TestUpdateCheck());
+  EXPECT_FALSE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, TestUpdateFirstSeenAtGetsPersistedFirstTime) {
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta().FromDays(1));
+  request_params_.set_update_check_count_wait_enabled(false);
+
+  Time arbitrary_date;
+  ASSERT_TRUE(Time::FromString("6/4/1989", &arbitrary_date));
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(arbitrary_date);
+
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring;
+
+  ASSERT_FALSE(TestUpdateCheck());
+
+  int64_t timestamp = 0;
+  ASSERT_TRUE(fake_prefs_->GetInt64(kPrefsUpdateFirstSeenAt, &timestamp));
+  EXPECT_EQ(arbitrary_date.ToInternalValue(), timestamp);
+  EXPECT_FALSE(response_.update_exists);
+
+  // Verify if we are interactive check we don't defer.
+  request_params_.set_interactive(true);
+  tuc_params_.expected_code = ErrorCode::kSuccess;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUpdating;
+
+  ASSERT_TRUE(TestUpdateCheck());
+  EXPECT_TRUE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, TestUpdateFirstSeenAtGetsUsedIfAlreadyPresent) {
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta().FromDays(1));
+  request_params_.set_update_check_count_wait_enabled(false);
+
+  Time t1, t2;
+  ASSERT_TRUE(Time::FromString("1/1/2012", &t1));
+  ASSERT_TRUE(Time::FromString("1/3/2012", &t2));
+  ASSERT_TRUE(
+      fake_prefs_->SetInt64(kPrefsUpdateFirstSeenAt, t1.ToInternalValue()));
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(t2);
+
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+  EXPECT_TRUE(response_.update_exists);
+
+  // Make sure the timestamp t1 is unchanged showing that it was reused.
+  int64_t timestamp = 0;
+  ASSERT_TRUE(fake_prefs_->GetInt64(kPrefsUpdateFirstSeenAt, &timestamp));
+  ASSERT_TRUE(timestamp == t1.ToInternalValue());
+}
+
+TEST_F(OmahaRequestActionTest, TestChangingToMoreStableChannel) {
+  // Create a uniquely named test directory.
+  base::ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+
+  request_params_.set_root(tempdir.GetPath().value());
+  request_params_.set_app_id("{22222222-2222-2222-2222-222222222222}");
+  request_params_.set_app_version("1.2.3.4");
+  request_params_.set_product_components("o.bundle=1");
+  request_params_.set_current_channel("canary-channel");
+  EXPECT_TRUE(
+      request_params_.SetTargetChannel("stable-channel", true, nullptr));
+  request_params_.UpdateDownloadChannel();
+  EXPECT_TRUE(request_params_.ShouldPowerwash());
+
+  tuc_params_.http_response = "invalid xml>";
+  tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_FALSE(TestUpdateCheck());
+
+  EXPECT_NE(string::npos,
+            post_str_.find(
+                "appid=\"{22222222-2222-2222-2222-222222222222}\" "
+                "version=\"0.0.0.0\" from_version=\"1.2.3.4\" "
+                "track=\"stable-channel\" from_track=\"canary-channel\" "));
+  EXPECT_EQ(string::npos, post_str_.find("o.bundle"));
+}
+
+TEST_F(OmahaRequestActionTest, TestChangingToLessStableChannel) {
+  // Create a uniquely named test directory.
+  base::ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+
+  request_params_.set_root(tempdir.GetPath().value());
+  request_params_.set_app_id("{11111111-1111-1111-1111-111111111111}");
+  request_params_.set_app_version("5.6.7.8");
+  request_params_.set_product_components("o.bundle=1");
+  request_params_.set_current_channel("stable-channel");
+  EXPECT_TRUE(
+      request_params_.SetTargetChannel("canary-channel", false, nullptr));
+  request_params_.UpdateDownloadChannel();
+  EXPECT_FALSE(request_params_.ShouldPowerwash());
+
+  tuc_params_.http_response = "invalid xml>";
+  tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+  tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_FALSE(TestUpdateCheck());
+
+  EXPECT_NE(
+      string::npos,
+      post_str_.find("appid=\"{11111111-1111-1111-1111-111111111111}\" "
+                     "version=\"5.6.7.8\" "
+                     "track=\"canary-channel\" from_track=\"stable-channel\""));
+  EXPECT_EQ(string::npos, post_str_.find("from_version"));
+  EXPECT_NE(string::npos, post_str_.find("o.bundle.version=\"1\""));
+}
+
+// Checks that the initial ping with a=-1 r=-1 is not send when the device
+// was powerwashed.
+TEST_F(OmahaRequestActionTest, PingWhenPowerwashed) {
+  fake_prefs_->SetString(kPrefsPreviousVersion, "");
+
+  // Flag that the device was powerwashed in the past.
+  FakeSystemState::Get()->fake_hardware()->SetPowerwashCount(1);
+  tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  // We shouldn't send a ping in this case since powerwash > 0.
+  EXPECT_EQ(string::npos, post_str_.find("<ping"));
+}
+
+// Checks that the initial ping with a=-1 r=-1 is not send when the device
+// first_active_omaha_ping_sent is set.
+TEST_F(OmahaRequestActionTest, PingWhenFirstActiveOmahaPingIsSent) {
+  fake_prefs_->SetString(kPrefsPreviousVersion, "");
+
+  // Flag that the device was not powerwashed in the past.
+  FakeSystemState::Get()->fake_hardware()->SetPowerwashCount(0);
+
+  // Flag that the device has sent first active ping in the past.
+  FakeSystemState::Get()->fake_hardware()->SetFirstActiveOmahaPingSent();
+
+  tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  // We shouldn't send a ping in this case since
+  // first_active_omaha_ping_sent=true
+  EXPECT_EQ(string::npos, post_str_.find("<ping"));
+}
+
+// Checks that the event 54 is sent on a reboot to a new update.
+TEST_F(OmahaRequestActionTest, RebootAfterUpdateEvent) {
+  // Flag that the device was updated in a previous boot.
+  fake_prefs_->SetString(kPrefsPreviousVersion, "1.2.3.4");
+
+  tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  // An event 54 is included and has the right version.
+  EXPECT_NE(
+      string::npos,
+      post_str_.find(base::StringPrintf("<event eventtype=\"%d\"",
+                                        OmahaEvent::kTypeRebootedAfterUpdate)));
+  EXPECT_NE(string::npos,
+            post_str_.find("previousversion=\"1.2.3.4\"></event>"));
+
+  // The previous version flag should have been removed.
+  EXPECT_TRUE(fake_prefs_->Exists(kPrefsPreviousVersion));
+  string prev_version;
+  EXPECT_TRUE(fake_prefs_->GetString(kPrefsPreviousVersion, &prev_version));
+  EXPECT_TRUE(prev_version.empty());
+}
+
+void OmahaRequestActionTest::P2PTest(bool initial_allow_p2p_for_downloading,
+                                     bool initial_allow_p2p_for_sharing,
+                                     bool omaha_disable_p2p_for_downloading,
+                                     bool omaha_disable_p2p_for_sharing,
+                                     bool payload_state_allow_p2p_attempt,
+                                     bool expect_p2p_client_lookup,
+                                     const string& p2p_client_result_url,
+                                     bool expected_allow_p2p_for_downloading,
+                                     bool expected_allow_p2p_for_sharing,
+                                     const string& expected_p2p_url) {
+  bool actual_allow_p2p_for_downloading = initial_allow_p2p_for_downloading;
+  bool actual_allow_p2p_for_sharing = initial_allow_p2p_for_sharing;
+  string actual_p2p_url;
+
+  MockPayloadState mock_payload_state;
+  FakeSystemState::Get()->set_payload_state(&mock_payload_state);
+  EXPECT_CALL(mock_payload_state, P2PAttemptAllowed())
+      .WillRepeatedly(Return(payload_state_allow_p2p_attempt));
+  EXPECT_CALL(mock_payload_state, GetUsingP2PForDownloading())
+      .WillRepeatedly(ReturnPointee(&actual_allow_p2p_for_downloading));
+  EXPECT_CALL(mock_payload_state, GetUsingP2PForSharing())
+      .WillRepeatedly(ReturnPointee(&actual_allow_p2p_for_sharing));
+  EXPECT_CALL(mock_payload_state, SetUsingP2PForDownloading(_))
+      .WillRepeatedly(SaveArg<0>(&actual_allow_p2p_for_downloading));
+  EXPECT_CALL(mock_payload_state, SetUsingP2PForSharing(_))
+      .WillRepeatedly(SaveArg<0>(&actual_allow_p2p_for_sharing));
+  EXPECT_CALL(mock_payload_state, SetP2PUrl(_))
+      .WillRepeatedly(SaveArg<0>(&actual_p2p_url));
+
+  MockP2PManager mock_p2p_manager;
+  FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager);
+  mock_p2p_manager.fake().SetLookupUrlForFileResult(p2p_client_result_url);
+
+  TimeDelta timeout = TimeDelta::FromSeconds(kMaxP2PNetworkWaitTimeSeconds);
+  EXPECT_CALL(mock_p2p_manager, LookupUrlForFile(_, _, timeout, _))
+      .Times(expect_p2p_client_lookup ? 1 : 0);
+
+  fake_update_response_.disable_p2p_for_downloading =
+      omaha_disable_p2p_for_downloading;
+  fake_update_response_.disable_p2p_for_sharing = omaha_disable_p2p_for_sharing;
+
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.expected_check_result = metrics::CheckResult::kUpdateAvailable;
+
+  ASSERT_TRUE(TestUpdateCheck());
+  EXPECT_TRUE(response_.update_exists);
+
+  EXPECT_EQ(omaha_disable_p2p_for_downloading,
+            response_.disable_p2p_for_downloading);
+  EXPECT_EQ(omaha_disable_p2p_for_sharing, response_.disable_p2p_for_sharing);
+
+  EXPECT_EQ(expected_allow_p2p_for_downloading,
+            actual_allow_p2p_for_downloading);
+  EXPECT_EQ(expected_allow_p2p_for_sharing, actual_allow_p2p_for_sharing);
+  EXPECT_EQ(expected_p2p_url, actual_p2p_url);
+}
+
+TEST_F(OmahaRequestActionTest, P2PWithPeer) {
+  P2PTest(true,                   // initial_allow_p2p_for_downloading
+          true,                   // initial_allow_p2p_for_sharing
+          false,                  // omaha_disable_p2p_for_downloading
+          false,                  // omaha_disable_p2p_for_sharing
+          true,                   // payload_state_allow_p2p_attempt
+          true,                   // expect_p2p_client_lookup
+          "http://1.3.5.7/p2p",   // p2p_client_result_url
+          true,                   // expected_allow_p2p_for_downloading
+          true,                   // expected_allow_p2p_for_sharing
+          "http://1.3.5.7/p2p");  // expected_p2p_url
+}
+
+TEST_F(OmahaRequestActionTest, P2PWithoutPeer) {
+  P2PTest(true,   // initial_allow_p2p_for_downloading
+          true,   // initial_allow_p2p_for_sharing
+          false,  // omaha_disable_p2p_for_downloading
+          false,  // omaha_disable_p2p_for_sharing
+          true,   // payload_state_allow_p2p_attempt
+          true,   // expect_p2p_client_lookup
+          "",     // p2p_client_result_url
+          false,  // expected_allow_p2p_for_downloading
+          true,   // expected_allow_p2p_for_sharing
+          "");    // expected_p2p_url
+}
+
+TEST_F(OmahaRequestActionTest, P2PDownloadNotAllowed) {
+  P2PTest(false,    // initial_allow_p2p_for_downloading
+          true,     // initial_allow_p2p_for_sharing
+          false,    // omaha_disable_p2p_for_downloading
+          false,    // omaha_disable_p2p_for_sharing
+          true,     // payload_state_allow_p2p_attempt
+          false,    // expect_p2p_client_lookup
+          "unset",  // p2p_client_result_url
+          false,    // expected_allow_p2p_for_downloading
+          true,     // expected_allow_p2p_for_sharing
+          "");      // expected_p2p_url
+}
+
+TEST_F(OmahaRequestActionTest, P2PWithPeerDownloadDisabledByOmaha) {
+  P2PTest(true,     // initial_allow_p2p_for_downloading
+          true,     // initial_allow_p2p_for_sharing
+          true,     // omaha_disable_p2p_for_downloading
+          false,    // omaha_disable_p2p_for_sharing
+          true,     // payload_state_allow_p2p_attempt
+          false,    // expect_p2p_client_lookup
+          "unset",  // p2p_client_result_url
+          false,    // expected_allow_p2p_for_downloading
+          true,     // expected_allow_p2p_for_sharing
+          "");      // expected_p2p_url
+}
+
+TEST_F(OmahaRequestActionTest, P2PWithPeerSharingDisabledByOmaha) {
+  P2PTest(true,                   // initial_allow_p2p_for_downloading
+          true,                   // initial_allow_p2p_for_sharing
+          false,                  // omaha_disable_p2p_for_downloading
+          true,                   // omaha_disable_p2p_for_sharing
+          true,                   // payload_state_allow_p2p_attempt
+          true,                   // expect_p2p_client_lookup
+          "http://1.3.5.7/p2p",   // p2p_client_result_url
+          true,                   // expected_allow_p2p_for_downloading
+          false,                  // expected_allow_p2p_for_sharing
+          "http://1.3.5.7/p2p");  // expected_p2p_url
+}
+
+TEST_F(OmahaRequestActionTest, P2PWithPeerBothDisabledByOmaha) {
+  P2PTest(true,     // initial_allow_p2p_for_downloading
+          true,     // initial_allow_p2p_for_sharing
+          true,     // omaha_disable_p2p_for_downloading
+          true,     // omaha_disable_p2p_for_sharing
+          true,     // payload_state_allow_p2p_attempt
+          false,    // expect_p2p_client_lookup
+          "unset",  // p2p_client_result_url
+          false,    // expected_allow_p2p_for_downloading
+          false,    // expected_allow_p2p_for_sharing
+          "");      // expected_p2p_url
+}
+
+bool OmahaRequestActionTest::InstallDateParseHelper(const string& elapsed_days,
+                                                    OmahaResponse* response) {
+  fake_update_response_.elapsed_days = elapsed_days;
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  return TestUpdateCheck();
+}
+
+TEST_F(OmahaRequestActionTest, ParseInstallDateFromResponse) {
+  // Simulate a successful update check that happens during OOBE.  The
+  // deadline in the response is needed to force the update attempt to
+  // occur; responses without a deadline seen during OOBE will normally
+  // return ErrorCode::kNonCriticalUpdateInOOBE.
+  FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete();
+  fake_update_response_.deadline = "20101020";
+
+  // Check that we parse elapsed_days in the Omaha Response correctly.
+  // and that the kPrefsInstallDateDays value is written to.
+  EXPECT_FALSE(fake_prefs_->Exists(kPrefsInstallDateDays));
+  EXPECT_TRUE(InstallDateParseHelper("42", &response_));
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_EQ(42, response_.install_date_days);
+  EXPECT_TRUE(fake_prefs_->Exists(kPrefsInstallDateDays));
+  int64_t prefs_days;
+  EXPECT_TRUE(fake_prefs_->GetInt64(kPrefsInstallDateDays, &prefs_days));
+  EXPECT_EQ(prefs_days, 42);
+
+  // If there already is a value set, we shouldn't do anything.
+  EXPECT_TRUE(InstallDateParseHelper("7", &response_));
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_EQ(7, response_.install_date_days);
+  EXPECT_TRUE(fake_prefs_->GetInt64(kPrefsInstallDateDays, &prefs_days));
+  EXPECT_EQ(prefs_days, 42);
+
+  // Note that elapsed_days is not necessarily divisible by 7 so check
+  // that we round down correctly when populating kPrefsInstallDateDays.
+  EXPECT_TRUE(fake_prefs_->Delete(kPrefsInstallDateDays));
+  EXPECT_TRUE(InstallDateParseHelper("23", &response_));
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_EQ(23, response_.install_date_days);
+  EXPECT_TRUE(fake_prefs_->GetInt64(kPrefsInstallDateDays, &prefs_days));
+  EXPECT_EQ(prefs_days, 21);
+
+  // Check that we correctly handle elapsed_days not being included in
+  // the Omaha Response_.
+  EXPECT_TRUE(InstallDateParseHelper("", &response_));
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_EQ(-1, response_.install_date_days);
+}
+
+// If there is no prefs and OOBE is not complete, we should not
+// report anything to Omaha.
+TEST_F(OmahaRequestActionTest, GetInstallDateWhenNoPrefsNorOOBE) {
+  FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete();
+  EXPECT_EQ(OmahaRequestAction::GetInstallDate(), -1);
+  EXPECT_FALSE(fake_prefs_->Exists(kPrefsInstallDateDays));
+}
+
+// If OOBE is complete and happened on a valid date (e.g. after Jan
+// 1 2007 0:00 PST), that date should be used and written to
+// prefs. However, first try with an invalid date and check we do
+// nothing.
+TEST_F(OmahaRequestActionTest, GetInstallDateWhenOOBECompletedWithInvalidDate) {
+  Time oobe_date = Time::FromTimeT(42);  // Dec 31, 1969 16:00:42 PST.
+  FakeSystemState::Get()->fake_hardware()->SetIsOOBEComplete(oobe_date);
+  EXPECT_EQ(OmahaRequestAction::GetInstallDate(), -1);
+  EXPECT_FALSE(fake_prefs_->Exists(kPrefsInstallDateDays));
+}
+
+// Then check with a valid date. The date Jan 20, 2007 0:00 PST
+// should yield an InstallDate of 14.
+TEST_F(OmahaRequestActionTest, GetInstallDateWhenOOBECompletedWithValidDate) {
+  Time oobe_date = Time::FromTimeT(1169280000);  // Jan 20, 2007 0:00 PST.
+  FakeSystemState::Get()->fake_hardware()->SetIsOOBEComplete(oobe_date);
+  EXPECT_EQ(OmahaRequestAction::GetInstallDate(), 14);
+  EXPECT_TRUE(fake_prefs_->Exists(kPrefsInstallDateDays));
+
+  int64_t prefs_days;
+  EXPECT_TRUE(fake_prefs_->GetInt64(kPrefsInstallDateDays, &prefs_days));
+  EXPECT_EQ(prefs_days, 14);
+}
+
+// Now that we have a valid date in prefs, check that we keep using
+// that even if OOBE date reports something else. The date Jan 30,
+// 2007 0:00 PST should yield an InstallDate of 28... but since
+// there's a prefs file, we should still get 14.
+TEST_F(OmahaRequestActionTest, GetInstallDateWhenOOBECompletedDateChanges) {
+  // Set a valid date in the prefs first.
+  EXPECT_TRUE(fake_prefs_->SetInt64(kPrefsInstallDateDays, 14));
+
+  Time oobe_date = Time::FromTimeT(1170144000);  // Jan 30, 2007 0:00 PST.
+  FakeSystemState::Get()->fake_hardware()->SetIsOOBEComplete(oobe_date);
+  EXPECT_EQ(OmahaRequestAction::GetInstallDate(), 14);
+
+  int64_t prefs_days;
+  EXPECT_TRUE(fake_prefs_->GetInt64(kPrefsInstallDateDays, &prefs_days));
+  EXPECT_EQ(prefs_days, 14);
+
+  // If we delete the prefs file, we should get 28 days.
+  EXPECT_TRUE(fake_prefs_->Delete(kPrefsInstallDateDays));
+  EXPECT_EQ(OmahaRequestAction::GetInstallDate(), 28);
+  EXPECT_TRUE(fake_prefs_->GetInt64(kPrefsInstallDateDays, &prefs_days));
+  EXPECT_EQ(prefs_days, 28);
+}
+
+// Verifies that a device with no device policy, and is not a consumer
+// device sets the max kernel key version to the current version.
+// ie. the same behavior as if rollback is enabled.
+TEST_F(OmahaRequestActionTest, NoPolicyEnterpriseDevicesSetMaxRollback) {
+  FakeHardware* fake_hw = FakeSystemState::Get()->fake_hardware();
+
+  // Setup and verify some initial default values for the kernel TPM
+  // values that control verified boot and rollback.
+  const int min_kernel_version = 4;
+  fake_hw->SetMinKernelKeyVersion(min_kernel_version);
+  fake_hw->SetMaxKernelKeyRollforward(kRollforwardInfinity);
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+
+  EXPECT_CALL(
+      *FakeSystemState::Get()->mock_metrics_reporter(),
+      ReportKeyVersionMetrics(min_kernel_version, min_kernel_version, true))
+      .Times(1);
+
+  fake_update_response_.deadline = "20101020";
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.is_consumer_device = false;
+  tuc_params_.rollback_allowed_milestones = 3;
+
+  EXPECT_TRUE(TestUpdateCheck());
+  EXPECT_TRUE(response_.update_exists);
+
+  // Verify kernel_max_rollforward was set to the current minimum
+  // kernel key version. This has the effect of freezing roll
+  // forwards indefinitely. This will hold the rollback window
+  // open until a future change will be able to move this forward
+  // relative the configured window.
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMaxKernelKeyRollforward());
+}
+
+// Verifies that a conmsumer device with no device policy sets the
+// max kernel key version to the current version. ie. the same
+// behavior as if rollback is enabled.
+TEST_F(OmahaRequestActionTest, NoPolicyConsumerDevicesSetMaxRollback) {
+  FakeHardware* fake_hw = FakeSystemState::Get()->fake_hardware();
+
+  // Setup and verify some initial default values for the kernel TPM
+  // values that control verified boot and rollback.
+  const int min_kernel_version = 3;
+  fake_hw->SetMinKernelKeyVersion(min_kernel_version);
+  fake_hw->SetMaxKernelKeyRollforward(kRollforwardInfinity);
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+
+  EXPECT_CALL(
+      *FakeSystemState::Get()->mock_metrics_reporter(),
+      ReportKeyVersionMetrics(min_kernel_version, kRollforwardInfinity, true))
+      .Times(1);
+
+  fake_update_response_.deadline = "20101020";
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.is_consumer_device = true;
+  tuc_params_.rollback_allowed_milestones = 3;
+
+  EXPECT_TRUE(TestUpdateCheck());
+  EXPECT_TRUE(response_.update_exists);
+
+  // Verify that with rollback disabled that kernel_max_rollforward
+  // was set to logical infinity. This is the expected behavior for
+  // consumer devices and matches the existing behavior prior to the
+  // rollback features.
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+}
+
+// Verifies that a device with rollback enabled sets kernel_max_rollforward
+// in the TPM to prevent roll forward.
+TEST_F(OmahaRequestActionTest, RollbackEnabledDevicesSetMaxRollback) {
+  FakeHardware* fake_hw = FakeSystemState::Get()->fake_hardware();
+
+  // Setup and verify some initial default values for the kernel TPM
+  // values that control verified boot and rollback.
+  const int allowed_milestones = 4;
+  const int min_kernel_version = 3;
+  fake_hw->SetMinKernelKeyVersion(min_kernel_version);
+  fake_hw->SetMaxKernelKeyRollforward(kRollforwardInfinity);
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+
+  EXPECT_CALL(
+      *FakeSystemState::Get()->mock_metrics_reporter(),
+      ReportKeyVersionMetrics(min_kernel_version, min_kernel_version, true))
+      .Times(1);
+
+  fake_update_response_.deadline = "20101020";
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.is_consumer_device = false;
+  tuc_params_.rollback_allowed_milestones = allowed_milestones;
+  tuc_params_.is_policy_loaded = true;
+
+  EXPECT_TRUE(TestUpdateCheck());
+  EXPECT_TRUE(response_.update_exists);
+
+  // Verify that with rollback enabled that kernel_max_rollforward
+  // was set to the current minimum kernel key version. This has
+  // the effect of freezing roll forwards indefinitely. This will
+  // hold the rollback window open until a future change will
+  // be able to move this forward relative the configured window.
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMaxKernelKeyRollforward());
+}
+
+// Verifies that a device with rollback disabled sets kernel_max_rollforward
+// in the TPM to logical infinity, to allow roll forward.
+TEST_F(OmahaRequestActionTest, RollbackDisabledDevicesSetMaxRollback) {
+  FakeHardware* fake_hw = FakeSystemState::Get()->fake_hardware();
+
+  // Setup and verify some initial default values for the kernel TPM
+  // values that control verified boot and rollback.
+  const int allowed_milestones = 0;
+  const int min_kernel_version = 3;
+  fake_hw->SetMinKernelKeyVersion(min_kernel_version);
+  fake_hw->SetMaxKernelKeyRollforward(kRollforwardInfinity);
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+
+  EXPECT_CALL(
+      *FakeSystemState::Get()->mock_metrics_reporter(),
+      ReportKeyVersionMetrics(min_kernel_version, kRollforwardInfinity, true))
+      .Times(1);
+
+  fake_update_response_.deadline = "20101020";
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.is_consumer_device = false;
+  tuc_params_.rollback_allowed_milestones = allowed_milestones;
+  tuc_params_.is_policy_loaded = true;
+
+  EXPECT_TRUE(TestUpdateCheck());
+  EXPECT_TRUE(response_.update_exists);
+
+  // Verify that with rollback disabled that kernel_max_rollforward
+  // was set to logical infinity.
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+}
+
+TEST_F(OmahaRequestActionTest, RollbackResponseParsedNoEntries) {
+  fake_update_response_.rollback = true;
+  fake_update_response_.deadline = "20101020";
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.is_consumer_device = false;
+  tuc_params_.rollback_allowed_milestones = 4;
+  tuc_params_.is_policy_loaded = true;
+
+  EXPECT_TRUE(TestUpdateCheck());
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_TRUE(response_.is_rollback);
+}
+
+TEST_F(OmahaRequestActionTest, RollbackResponseValidVersionsParsed) {
+  fake_update_response_.rollback_firmware_version = "1.2";
+  fake_update_response_.rollback_kernel_version = "3.4";
+  fake_update_response_.rollback = true;
+  fake_update_response_.deadline = "20101020";
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.is_consumer_device = false;
+  tuc_params_.rollback_allowed_milestones = 4;
+  tuc_params_.is_policy_loaded = true;
+
+  EXPECT_TRUE(TestUpdateCheck());
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_TRUE(response_.is_rollback);
+  EXPECT_EQ(1, response_.rollback_key_version.firmware_key);
+  EXPECT_EQ(2, response_.rollback_key_version.firmware);
+  EXPECT_EQ(3, response_.rollback_key_version.kernel_key);
+  EXPECT_EQ(4, response_.rollback_key_version.kernel);
+}
+
+TEST_F(OmahaRequestActionTest,
+       TestUpdateFirstSeenAtPrefPersistedIfUpdateExists) {
+  FakeClock fake_clock;
+  Time now = Time::Now();
+  fake_clock.SetWallclockTime(now);
+  FakeSystemState::Get()->set_clock(&fake_clock);
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_TRUE(fake_prefs_->Exists(kPrefsUpdateFirstSeenAt));
+
+  int64_t stored_first_seen_at_time;
+  EXPECT_TRUE(fake_prefs_->GetInt64(kPrefsUpdateFirstSeenAt,
+                                    &stored_first_seen_at_time));
+  EXPECT_EQ(now.ToInternalValue(), stored_first_seen_at_time);
+}
+
+TEST_F(OmahaRequestActionTest,
+       TestUpdateFirstSeenAtPrefNotPersistedIfUpdateFails) {
+  FakeClock fake_clock;
+  Time now = Time::Now();
+  fake_clock.SetWallclockTime(now);
+  FakeSystemState::Get()->set_clock(&fake_clock);
+
+  tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_FALSE(response_.update_exists);
+  EXPECT_FALSE(fake_prefs_->Exists(kPrefsUpdateFirstSeenAt));
+}
+
+TEST_F(OmahaRequestActionTest, InstallTest) {
+  request_params_.set_is_install(true);
+  request_params_.set_dlc_apps_params(
+      {{request_params_.GetDlcAppId("dlc_no_0"), {.name = "dlc_no_0"}},
+       {request_params_.GetDlcAppId("dlc_no_1"), {.name = "dlc_no_1"}}});
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  for (const auto& it : request_params_.dlc_apps_params()) {
+    EXPECT_NE(string::npos, post_str_.find("appid=\"" + it.first + "\""));
+  }
+  EXPECT_NE(string::npos,
+            post_str_.find("appid=\"" + fake_update_response_.app_id + "\""));
+
+  // Count number of updatecheck tag in response_.
+  int updatecheck_count = 0;
+  size_t pos = 0;
+  while ((pos = post_str_.find("<updatecheck", pos)) != string::npos) {
+    updatecheck_count++;
+    pos++;
+  }
+  EXPECT_EQ(request_params_.dlc_apps_params().size(), updatecheck_count);
+  EXPECT_TRUE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, InstallMissingPlatformVersionTest) {
+  fake_update_response_.multi_app_skip_updatecheck = true;
+  fake_update_response_.multi_app_no_update = false;
+  request_params_.set_is_install(true);
+  request_params_.set_dlc_apps_params(
+      {{request_params_.GetDlcAppId("dlc_no_0"), {.name = "dlc_no_0"}},
+       {request_params_.GetDlcAppId("dlc_no_1"), {.name = "dlc_no_1"}}});
+  request_params_.set_app_id(fake_update_response_.app_id_skip_updatecheck);
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_EQ(fake_update_response_.version, response_.version);
+}
+
+TEST_F(OmahaRequestActionTest, UpdateWithDlcTest) {
+  request_params_.set_dlc_apps_params(
+      {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}});
+  fake_update_response_.dlc_app_update = true;
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false));
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_EQ(response_.packages.size(), 2u);
+  // Two candidate URLs.
+  EXPECT_EQ(response_.packages[1].payload_urls.size(), 2u);
+  EXPECT_TRUE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, UpdateWithPartiallyExcludedDlcTest) {
+  const string kDlcAppId = request_params_.GetDlcAppId(kDlcId1);
+  request_params_.set_dlc_apps_params({{kDlcAppId, {.name = kDlcId1}}});
+  fake_update_response_.dlc_app_update = true;
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  // The first DLC candidate URL is excluded.
+  EXPECT_CALL(mock_excluder_, IsExcluded(_))
+      .WillOnce(Return(true))
+      .WillOnce(Return(false));
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_EQ(response_.packages.size(), 2u);
+  // One candidate URL.
+  EXPECT_EQ(response_.packages[1].payload_urls.size(), 1u);
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_TRUE(request_params_.dlc_apps_params().at(kDlcAppId).updated);
+}
+
+TEST_F(OmahaRequestActionTest, UpdateWithExcludedDlcTest) {
+  const string kDlcAppId = request_params_.GetDlcAppId(kDlcId1);
+  request_params_.set_dlc_apps_params({{kDlcAppId, {.name = kDlcId1}}});
+  fake_update_response_.dlc_app_update = true;
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  // Both DLC candidate URLs are excluded.
+  EXPECT_CALL(mock_excluder_, IsExcluded(_))
+      .WillOnce(Return(true))
+      .WillOnce(Return(true));
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_EQ(response_.packages.size(), 1u);
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_FALSE(request_params_.dlc_apps_params().at(kDlcAppId).updated);
+}
+
+TEST_F(OmahaRequestActionTest, UpdateWithDeprecatedDlcTest) {
+  request_params_.set_dlc_apps_params(
+      {{request_params_.GetDlcAppId(kDlcId2), {.name = kDlcId2}}});
+  fake_update_response_.dlc_app_no_update = true;
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false));
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, UpdateWithDlcAndDeprecatedDlcTest) {
+  request_params_.set_dlc_apps_params(
+      {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}},
+       {request_params_.GetDlcAppId(kDlcId2), {.name = kDlcId2}}});
+  fake_update_response_.dlc_app_update = true;
+  fake_update_response_.dlc_app_no_update = true;
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false));
+  ASSERT_TRUE(TestUpdateCheck());
+
+  EXPECT_TRUE(response_.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, PastRollbackVersionsNoEntries) {
+  fake_update_response_.rollback = true;
+  fake_update_response_.rollback_allowed_milestones = 4;
+  request_params_.set_rollback_allowed_milestones(4);
+  fake_update_response_.deadline = "20101020";
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.is_consumer_device = false;
+  tuc_params_.rollback_allowed_milestones = 4;
+  tuc_params_.is_policy_loaded = true;
+
+  EXPECT_TRUE(TestUpdateCheck());
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_TRUE(response_.is_rollback);
+  EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
+            response_.past_rollback_key_version.firmware_key);
+  EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
+            response_.past_rollback_key_version.firmware);
+  EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
+            response_.past_rollback_key_version.kernel_key);
+  EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
+            response_.past_rollback_key_version.kernel);
+}
+
+TEST_F(OmahaRequestActionTest, PastRollbackVersionsValidEntries) {
+  request_params_.set_rollback_allowed_milestones(4);
+  fake_update_response_.rollback = true;
+  fake_update_response_.rollback_allowed_milestones = 4;
+  fake_update_response_.rollback_firmware_version = "4.3";
+  fake_update_response_.rollback_kernel_version = "2.1";
+  fake_update_response_.past_rollback_key_version =
+      std::make_pair("16.15", "14.13");
+  fake_update_response_.deadline = "20101020";
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.is_consumer_device = false;
+  tuc_params_.rollback_allowed_milestones = 4;
+  tuc_params_.is_policy_loaded = true;
+
+  EXPECT_TRUE(TestUpdateCheck());
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_TRUE(response_.is_rollback);
+  EXPECT_EQ(16, response_.past_rollback_key_version.firmware_key);
+  EXPECT_EQ(15, response_.past_rollback_key_version.firmware);
+  EXPECT_EQ(14, response_.past_rollback_key_version.kernel_key);
+  EXPECT_EQ(13, response_.past_rollback_key_version.kernel);
+}
+
+TEST_F(OmahaRequestActionTest, MismatchNumberOfVersions) {
+  fake_update_response_.rollback = true;
+  fake_update_response_.rollback_allowed_milestones = 2;
+  fake_update_response_.deadline = "20101020";
+  request_params_.set_rollback_allowed_milestones(4);
+
+  // Since |request_params_.rollback_allowed_milestones| is 4 but the response
+  // is constructed with |fake_update_response_.rollback_allowed_milestones| set
+  // to 2, OmahaRequestAction will look for the key values of N-4 version but
+  // only the N-2 version will exist.
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  tuc_params_.is_consumer_device = false;
+  tuc_params_.rollback_allowed_milestones = 2;
+  tuc_params_.is_policy_loaded = true;
+
+  EXPECT_TRUE(TestUpdateCheck());
+  EXPECT_TRUE(response_.update_exists);
+  EXPECT_TRUE(response_.is_rollback);
+  EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
+            response_.past_rollback_key_version.firmware_key);
+  EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
+            response_.past_rollback_key_version.firmware);
+  EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
+            response_.past_rollback_key_version.kernel_key);
+  EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
+            response_.past_rollback_key_version.kernel);
+}
+
+TEST_F(OmahaRequestActionTest, IncludeRequisitionTest) {
+  request_params_.set_device_requisition("remora");
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  ASSERT_TRUE(TestUpdateCheck());
+  EXPECT_NE(string::npos, post_str_.find("requisition=\"remora\""));
+}
+
+TEST_F(OmahaRequestActionTest, NoIncludeRequisitionTest) {
+  request_params_.set_device_requisition("");
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+  ASSERT_TRUE(TestUpdateCheck());
+  EXPECT_EQ(string::npos, post_str_.find("requisition"));
+}
+
+TEST_F(OmahaRequestActionTest, PersistEolDateTest) {
+  tuc_params_.http_response =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+      "protocol=\"3.0\"><app appid=\"test-app-id\" status=\"ok\">"
+      "<ping status=\"ok\"/><updatecheck status=\"noupdate\" "
+      "_eol_date=\"200\" _foo=\"bar\"/></app></response>";
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  string eol_date;
+  EXPECT_TRUE(FakeSystemState::Get()->prefs()->GetString(kPrefsOmahaEolDate,
+                                                         &eol_date));
+  EXPECT_EQ("200", eol_date);
+}
+
+TEST_F(OmahaRequestActionTest, PersistEolMissingDateTest) {
+  tuc_params_.http_response =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+      "protocol=\"3.0\"><app appid=\"test-app-id\" status=\"ok\">"
+      "<ping status=\"ok\"/><updatecheck status=\"noupdate\" "
+      "_foo=\"bar\"/></app></response>";
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  const string kDate = "123";
+  FakeSystemState::Get()->prefs()->SetString(kPrefsOmahaEolDate, kDate);
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  string eol_date;
+  EXPECT_TRUE(FakeSystemState::Get()->prefs()->GetString(kPrefsOmahaEolDate,
+                                                         &eol_date));
+  EXPECT_EQ(kDate, eol_date);
+}
+
+TEST_F(OmahaRequestActionTest, PersistEolBadDateTest) {
+  tuc_params_.http_response =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+      "protocol=\"3.0\"><app appid=\"test-app-id\" status=\"ok\">"
+      "<ping status=\"ok\"/><updatecheck status=\"noupdate\" "
+      "_eol_date=\"bad\" foo=\"bar\"/></app></response>";
+  tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+  tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  string eol_date;
+  EXPECT_TRUE(FakeSystemState::Get()->prefs()->GetString(kPrefsOmahaEolDate,
+                                                         &eol_date));
+  EXPECT_EQ(kEolDateInvalid, StringToEolDate(eol_date));
+}
+
+TEST_F(OmahaRequestActionDlcPingTest, StorePingReplyNoPing) {
+  OmahaRequestParams::AppParams app_param = {.name = dlc_id_};
+  request_params_.set_dlc_apps_params(
+      {{request_params_.GetDlcAppId(dlc_id_), app_param}});
+
+  ASSERT_TRUE(TestUpdateCheck());
+
+  int64_t temp_int;
+  // If there was no ping, the metadata files shouldn't exist yet.
+  EXPECT_FALSE(fake_prefs_->GetInt64(active_key_, &temp_int));
+  EXPECT_FALSE(fake_prefs_->GetInt64(last_active_key_, &temp_int));
+  EXPECT_FALSE(fake_prefs_->GetInt64(last_rollcall_key_, &temp_int));
+}
+
+TEST_F(OmahaRequestActionDlcPingTest, StorePingReplyActiveTest) {
+  // Create Active value
+  fake_prefs_->SetInt64(active_key_, 0);
+
+  OmahaRequestParams::AppParams app_param = {
+      .active_counting_type = OmahaRequestParams::kDateBased,
+      .name = dlc_id_,
+      .ping_active = 1,
+      .send_ping = true};
+  request_params_.set_dlc_apps_params(
+      {{request_params_.GetDlcAppId(dlc_id_), app_param}});
+
+  int64_t temp_int;
+  string temp_str;
+  ASSERT_TRUE(TestUpdateCheck());
+  EXPECT_TRUE(fake_prefs_->GetInt64(active_key_, &temp_int));
+  EXPECT_EQ(temp_int, kPingInactiveValue);
+  EXPECT_TRUE(fake_prefs_->GetString(last_active_key_, &temp_str));
+  EXPECT_EQ(temp_str, "4763");
+  EXPECT_TRUE(fake_prefs_->GetString(last_rollcall_key_, &temp_str));
+  EXPECT_EQ(temp_str, "4763");
+}
+
+TEST_F(OmahaRequestActionDlcPingTest, StorePingReplyInactiveTest) {
+  // Create Active value
+  fake_prefs_->SetInt64(active_key_, 0);
+
+  OmahaRequestParams::AppParams app_param = {
+      .active_counting_type = OmahaRequestParams::kDateBased,
+      .name = dlc_id_,
+      .ping_active = 0,
+      .send_ping = true};
+  request_params_.set_dlc_apps_params(
+      {{request_params_.GetDlcAppId(dlc_id_), app_param}});
+
+  // Set the previous active value to an older value than 4763.
+  fake_prefs_->SetString(last_active_key_, "555");
+
+  int64_t temp_int;
+  ASSERT_TRUE(TestUpdateCheck());
+  EXPECT_TRUE(fake_prefs_->GetInt64(active_key_, &temp_int));
+  EXPECT_EQ(temp_int, kPingInactiveValue);
+  string temp_str;
+  EXPECT_TRUE(fake_prefs_->GetString(last_active_key_, &temp_str));
+  EXPECT_EQ(temp_str, "555");
+  EXPECT_TRUE(fake_prefs_->GetString(last_rollcall_key_, &temp_str));
+  EXPECT_EQ(temp_str, "4763");
+}
+
+TEST_F(OmahaRequestActionTest, OmahaResponseUpdateCanExcludeCheck) {
+  request_params_.set_dlc_apps_params(
+      {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}});
+  fake_update_response_.dlc_app_update = true;
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false));
+  ASSERT_TRUE(TestUpdateCheck());
+  ASSERT_TRUE(delegate_.omaha_response_);
+  const auto& packages = delegate_.omaha_response_->packages;
+  ASSERT_EQ(packages.size(), 2);
+
+  EXPECT_FALSE(packages[0].can_exclude);
+  EXPECT_TRUE(packages[1].can_exclude);
+}
+
+TEST_F(OmahaRequestActionTest, OmahaResponseInstallCannotExcludeCheck) {
+  request_params_.set_is_install(true);
+  request_params_.set_dlc_apps_params(
+      {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}});
+  fake_update_response_.dlc_app_update = true;
+  tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+  EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false));
+  ASSERT_TRUE(TestUpdateCheck());
+  ASSERT_TRUE(delegate_.omaha_response_);
+  const auto& packages = delegate_.omaha_response_->packages;
+  ASSERT_EQ(packages.size(), 2);
+
+  EXPECT_FALSE(packages[0].can_exclude);
+  EXPECT_FALSE(packages[1].can_exclude);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/cros/omaha_request_builder_xml.cc b/cros/omaha_request_builder_xml.cc
new file mode 100644
index 0000000..6cd9ab8
--- /dev/null
+++ b/cros/omaha_request_builder_xml.cc
@@ -0,0 +1,462 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/cros/omaha_request_builder_xml.h"
+
+#include <inttypes.h>
+
+#include <string>
+
+#include <base/guid.h>
+#include <base/logging.h>
+#include <base/strings/string_number_conversions.h>
+#include <base/strings/string_util.h>
+#include <base/strings/stringprintf.h>
+#include <base/time/time.h>
+
+#include "update_engine/common/constants.h"
+#include "update_engine/common/system_state.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/cros/omaha_request_params.h"
+
+using std::string;
+
+namespace chromeos_update_engine {
+
+const char kNoVersion[] = "0.0.0.0";
+const int kPingNeverPinged = -1;
+const int kPingUnknownValue = -2;
+const int kPingActiveValue = 1;
+const int kPingInactiveValue = 0;
+
+bool XmlEncode(const string& input, string* output) {
+  if (std::find_if(input.begin(), input.end(), [](const char c) {
+        return c & 0x80;
+      }) != input.end()) {
+    LOG(WARNING) << "Invalid ASCII-7 string passed to the XML encoder:";
+    utils::HexDumpString(input);
+    return false;
+  }
+  output->clear();
+  // We need at least input.size() space in the output, but the code below will
+  // handle it if we need more.
+  output->reserve(input.size());
+  for (char c : input) {
+    switch (c) {
+      case '\"':
+        output->append("&quot;");
+        break;
+      case '\'':
+        output->append("&apos;");
+        break;
+      case '&':
+        output->append("&amp;");
+        break;
+      case '<':
+        output->append("&lt;");
+        break;
+      case '>':
+        output->append("&gt;");
+        break;
+      default:
+        output->push_back(c);
+    }
+  }
+  return true;
+}
+
+string XmlEncodeWithDefault(const string& input, const string& default_value) {
+  string output;
+  if (XmlEncode(input, &output))
+    return output;
+  return default_value;
+}
+
+string OmahaRequestBuilderXml::GetPing() const {
+  // Returns an XML ping element attribute assignment with attribute
+  // |name| and value |ping_days| if |ping_days| has a value that needs
+  // to be sent, or an empty string otherwise.
+  auto GetPingAttribute = [](const char* name, int ping_days) -> string {
+    if (ping_days > 0 || ping_days == kPingNeverPinged)
+      return base::StringPrintf(" %s=\"%d\"", name, ping_days);
+    return "";
+  };
+
+  string ping_active = GetPingAttribute("a", ping_active_days_);
+  string ping_roll_call = GetPingAttribute("r", ping_roll_call_days_);
+  if (!ping_active.empty() || !ping_roll_call.empty()) {
+    return base::StringPrintf("        <ping active=\"1\"%s%s></ping>\n",
+                              ping_active.c_str(),
+                              ping_roll_call.c_str());
+  }
+  return "";
+}
+
+string OmahaRequestBuilderXml::GetPingDateBased(
+    const OmahaRequestParams::AppParams& app_params) const {
+  if (!app_params.send_ping)
+    return "";
+  string ping_active = "";
+  string ping_ad = "";
+  if (app_params.ping_active == kPingActiveValue) {
+    ping_active =
+        base::StringPrintf(" active=\"%" PRId64 "\"", app_params.ping_active);
+    ping_ad = base::StringPrintf(" ad=\"%" PRId64 "\"",
+                                 app_params.ping_date_last_active);
+  }
+
+  string ping_rd = base::StringPrintf(" rd=\"%" PRId64 "\"",
+                                      app_params.ping_date_last_rollcall);
+
+  return base::StringPrintf("        <ping%s%s%s></ping>\n",
+                            ping_active.c_str(),
+                            ping_ad.c_str(),
+                            ping_rd.c_str());
+}
+
+string OmahaRequestBuilderXml::GetAppBody(const OmahaAppData& app_data) const {
+  string app_body;
+  if (event_ == nullptr) {
+    if (app_data.app_params.send_ping) {
+      switch (app_data.app_params.active_counting_type) {
+        case OmahaRequestParams::kDayBased:
+          app_body = GetPing();
+          break;
+        case OmahaRequestParams::kDateBased:
+          app_body = GetPingDateBased(app_data.app_params);
+          break;
+        default:
+          NOTREACHED();
+      }
+    }
+    if (!ping_only_) {
+      if (!app_data.skip_update) {
+        const auto* params = SystemState::Get()->request_params();
+        app_body += "        <updatecheck";
+        if (!params->target_version_prefix().empty()) {
+          app_body += base::StringPrintf(
+              " targetversionprefix=\"%s\"",
+              XmlEncodeWithDefault(params->target_version_prefix()).c_str());
+          // Rollback requires target_version_prefix set.
+          if (params->rollback_allowed()) {
+            app_body += " rollback_allowed=\"true\"";
+          }
+        }
+        if (!params->lts_tag().empty()) {
+          app_body += base::StringPrintf(
+              " ltstag=\"%s\"",
+              XmlEncodeWithDefault(params->lts_tag()).c_str());
+        }
+        app_body += "></updatecheck>\n";
+      }
+
+      // If this is the first update check after a reboot following a previous
+      // update, generate an event containing the previous version number. If
+      // the previous version preference file doesn't exist the event is still
+      // generated with a previous version of 0.0.0.0 -- this is relevant for
+      // older clients or new installs. The previous version event is not sent
+      // for ping-only requests because they come before the client has
+      // rebooted. The previous version event is also not sent if it was already
+      // sent for this new version with a previous updatecheck.
+      auto* prefs = SystemState::Get()->prefs();
+      string prev_version;
+      if (!prefs->GetString(kPrefsPreviousVersion, &prev_version)) {
+        prev_version = kNoVersion;
+      }
+      // We only store a non-empty previous version value after a successful
+      // update in the previous boot. After reporting it back to the server,
+      // we clear the previous version value so it doesn't get reported again.
+      if (!prev_version.empty()) {
+        app_body += base::StringPrintf(
+            "        <event eventtype=\"%d\" eventresult=\"%d\" "
+            "previousversion=\"%s\"></event>\n",
+            OmahaEvent::kTypeRebootedAfterUpdate,
+            OmahaEvent::kResultSuccess,
+            XmlEncodeWithDefault(prev_version, kNoVersion).c_str());
+        LOG_IF(WARNING, !prefs->SetString(kPrefsPreviousVersion, ""))
+            << "Unable to reset the previous version.";
+      }
+    }
+  } else {
+    int event_result = event_->result;
+    // The error code is an optional attribute so append it only if the result
+    // is not success.
+    string error_code;
+    if (event_result != OmahaEvent::kResultSuccess) {
+      error_code = base::StringPrintf(" errorcode=\"%d\"",
+                                      static_cast<int>(event_->error_code));
+    } else if (app_data.is_dlc && !app_data.app_params.updated) {
+      // On a |OmahaEvent::kResultSuccess|, if the event is for an update
+      // completion and the App is a DLC, send error for excluded DLCs as they
+      // did not update.
+      event_result = OmahaEvent::Result::kResultError;
+      error_code = base::StringPrintf(
+          " errorcode=\"%d\"",
+          static_cast<int>(ErrorCode::kPackageExcludedFromUpdate));
+    }
+    app_body = base::StringPrintf(
+        "        <event eventtype=\"%d\" eventresult=\"%d\"%s></event>\n",
+        event_->type,
+        event_result,
+        error_code.c_str());
+  }
+
+  return app_body;
+}
+
+string OmahaRequestBuilderXml::GetCohortArg(
+    const string& arg_name,
+    const string& prefs_key,
+    const string& override_value) const {
+  string cohort_value;
+  if (!override_value.empty()) {
+    // |override_value| take precedence over pref value.
+    cohort_value = override_value;
+  } else {
+    // There's nothing wrong with not having a given cohort setting, so we check
+    // existence first to avoid the warning log message.
+    const auto* prefs = SystemState::Get()->prefs();
+    if (!prefs->Exists(prefs_key))
+      return "";
+    if (!prefs->GetString(prefs_key, &cohort_value) || cohort_value.empty())
+      return "";
+  }
+  // This is a validity check to avoid sending a huge XML file back to Ohama due
+  // to a compromised stateful partition making the update check fail in low
+  // network environments envent after a reboot.
+  if (cohort_value.size() > 1024) {
+    LOG(WARNING) << "The omaha cohort setting " << arg_name
+                 << " has a too big value, which must be an error or an "
+                    "attacker trying to inhibit updates.";
+    return "";
+  }
+
+  string escaped_xml_value;
+  if (!XmlEncode(cohort_value, &escaped_xml_value)) {
+    LOG(WARNING) << "The omaha cohort setting " << arg_name
+                 << " is ASCII-7 invalid, ignoring it.";
+    return "";
+  }
+
+  return base::StringPrintf(
+      "%s=\"%s\" ", arg_name.c_str(), escaped_xml_value.c_str());
+}
+
+bool IsValidComponentID(const string& id) {
+  for (char c : id) {
+    if (!isalnum(c) && c != '-' && c != '_' && c != '.')
+      return false;
+  }
+  return true;
+}
+
+string OmahaRequestBuilderXml::GetApp(const OmahaAppData& app_data) const {
+  string app_body = GetAppBody(app_data);
+  string app_versions;
+  const auto* params = SystemState::Get()->request_params();
+
+  // If we are downgrading to a more stable channel and we are allowed to do
+  // powerwash, then pass 0.0.0.0 as the version. This is needed to get the
+  // highest-versioned payload on the destination channel.
+  if (params->ShouldPowerwash()) {
+    LOG(INFO) << "Passing OS version as 0.0.0.0 as we are set to powerwash "
+              << "on downgrading to the version in the more stable channel";
+    app_versions = "version=\"" + string(kNoVersion) + "\" from_version=\"" +
+                   XmlEncodeWithDefault(app_data.version, kNoVersion) + "\" ";
+  } else {
+    app_versions = "version=\"" +
+                   XmlEncodeWithDefault(app_data.version, kNoVersion) + "\" ";
+  }
+
+  string download_channel = params->download_channel();
+  string app_channels =
+      "track=\"" + XmlEncodeWithDefault(download_channel) + "\" ";
+  if (params->current_channel() != download_channel) {
+    app_channels += "from_track=\"" +
+                    XmlEncodeWithDefault(params->current_channel()) + "\" ";
+  }
+
+  string delta_okay_str =
+      params->delta_okay() && !params->is_install() ? "true" : "false";
+
+  // If install_date_days is not set (e.g. its value is -1 ), don't
+  // include the attribute.
+  string install_date_in_days_str = "";
+  if (install_date_in_days_ >= 0) {
+    install_date_in_days_str =
+        base::StringPrintf("installdate=\"%d\" ", install_date_in_days_);
+  }
+
+  string app_cohort_args;
+  string cohort_key = kPrefsOmahaCohort;
+  string cohortname_key = kPrefsOmahaCohortName;
+  string cohorthint_key = kPrefsOmahaCohortHint;
+
+  // Override the cohort keys for DLC App IDs.
+  const auto& dlc_apps_params = params->dlc_apps_params();
+  auto itr = dlc_apps_params.find(app_data.id);
+  if (itr != dlc_apps_params.end()) {
+    auto dlc_id = itr->second.name;
+    const auto* prefs = SystemState::Get()->prefs();
+    cohort_key =
+        prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsOmahaCohort});
+    cohortname_key =
+        prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsOmahaCohortName});
+    cohorthint_key =
+        prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsOmahaCohortHint});
+  }
+
+  app_cohort_args += GetCohortArg("cohort", cohort_key);
+  app_cohort_args += GetCohortArg("cohortname", cohortname_key);
+  // Policy provided value overrides pref.
+  app_cohort_args +=
+      GetCohortArg("cohorthint",
+                   cohorthint_key,
+                   params->autoupdate_token() /* override_value */);
+
+  string fingerprint_arg;
+  if (!params->os_build_fingerprint().empty()) {
+    fingerprint_arg = "fingerprint=\"" +
+                      XmlEncodeWithDefault(params->os_build_fingerprint()) +
+                      "\" ";
+  }
+
+  string buildtype_arg;
+  if (!params->os_build_type().empty()) {
+    buildtype_arg = "os_build_type=\"" +
+                    XmlEncodeWithDefault(params->os_build_type()) + "\" ";
+  }
+
+  string product_components_args;
+  if (!params->ShouldPowerwash() && !app_data.product_components.empty()) {
+    brillo::KeyValueStore store;
+    if (store.LoadFromString(app_data.product_components)) {
+      for (const string& key : store.GetKeys()) {
+        if (!IsValidComponentID(key)) {
+          LOG(ERROR) << "Invalid component id: " << key;
+          continue;
+        }
+        string version;
+        if (!store.GetString(key, &version)) {
+          LOG(ERROR) << "Failed to get version for " << key
+                     << " in product_components.";
+          continue;
+        }
+        product_components_args +=
+            base::StringPrintf("_%s.version=\"%s\" ",
+                               key.c_str(),
+                               XmlEncodeWithDefault(version).c_str());
+      }
+    } else {
+      LOG(ERROR) << "Failed to parse product_components:\n"
+                 << app_data.product_components;
+    }
+  }
+
+  string requisition_arg;
+  if (!params->device_requisition().empty()) {
+    requisition_arg = "requisition=\"" +
+                      XmlEncodeWithDefault(params->device_requisition()) +
+                      "\" ";
+  }
+
+  // clang-format off
+  string app_xml = "    <app "
+      "appid=\"" + XmlEncodeWithDefault(app_data.id) + "\" " +
+      app_cohort_args +
+      app_versions +
+      app_channels +
+      product_components_args +
+      fingerprint_arg +
+      buildtype_arg +
+      "board=\"" + XmlEncodeWithDefault(params->os_board()) + "\" " +
+      "hardware_class=\"" + XmlEncodeWithDefault(params->hwid()) + "\" " +
+      "delta_okay=\"" + delta_okay_str + "\" " +
+      install_date_in_days_str +
+
+      // DLC excluded for installs and updates.
+      (app_data.is_dlc ? "" :
+      "lang=\"" + XmlEncodeWithDefault(params->app_lang(), "en-US") + "\" " +
+      requisition_arg) +
+
+      ">\n" +
+         app_body +
+      "    </app>\n";
+  // clang-format on
+  return app_xml;
+}
+
+string OmahaRequestBuilderXml::GetOs() const {
+  const auto* params = SystemState::Get()->request_params();
+  string os_xml =
+      "    <os "
+      "version=\"" +
+      XmlEncodeWithDefault(params->os_version()) + "\" " + "platform=\"" +
+      XmlEncodeWithDefault(params->os_platform()) + "\" " + "sp=\"" +
+      XmlEncodeWithDefault(params->os_sp()) +
+      "\">"
+      "</os>\n";
+  return os_xml;
+}
+
+string OmahaRequestBuilderXml::GetRequest() const {
+  const auto* params = SystemState::Get()->request_params();
+  string os_xml = GetOs();
+  string app_xml = GetApps();
+
+  string request_xml = base::StringPrintf(
+      "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
+      "<request requestid=\"%s\" sessionid=\"%s\""
+      " protocol=\"3.0\" updater=\"%s\" updaterversion=\"%s\""
+      " installsource=\"%s\" ismachine=\"1\">\n%s%s</request>\n",
+      base::GenerateGUID().c_str() /* requestid */,
+      session_id_.c_str(),
+      constants::kOmahaUpdaterID,
+      kOmahaUpdaterVersion,
+      params->interactive() ? "ondemandupdate" : "scheduler",
+      os_xml.c_str(),
+      app_xml.c_str());
+
+  return request_xml;
+}
+
+string OmahaRequestBuilderXml::GetApps() const {
+  const auto* params = SystemState::Get()->request_params();
+  string app_xml = "";
+  OmahaAppData product_app = {
+      .id = params->GetAppId(),
+      .version = params->app_version(),
+      .product_components = params->product_components(),
+      // Skips updatecheck for platform app in case of an install operation.
+      .skip_update = params->is_install(),
+      .is_dlc = false,
+
+      .app_params = {.active_counting_type = OmahaRequestParams::kDayBased,
+                     .send_ping = include_ping_}};
+  app_xml += GetApp(product_app);
+  for (const auto& it : params->dlc_apps_params()) {
+    OmahaAppData dlc_app_data = {
+        .id = it.first,
+        .version = params->is_install() ? kNoVersion : params->app_version(),
+        .skip_update = false,
+        .is_dlc = true,
+        .app_params = it.second};
+    app_xml += GetApp(dlc_app_data);
+  }
+  return app_xml;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/cros/omaha_request_builder_xml.h b/cros/omaha_request_builder_xml.h
new file mode 100644
index 0000000..7c246f7
--- /dev/null
+++ b/cros/omaha_request_builder_xml.h
@@ -0,0 +1,192 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CROS_OMAHA_REQUEST_BUILDER_XML_H_
+#define UPDATE_ENGINE_CROS_OMAHA_REQUEST_BUILDER_XML_H_
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <gtest/gtest_prod.h>  // for FRIEND_TEST
+
+#include <brillo/secure_blob.h>
+#include <curl/curl.h>
+
+#include "update_engine/common/action.h"
+#include "update_engine/common/http_fetcher.h"
+#include "update_engine/cros/omaha_request_params.h"
+#include "update_engine/cros/omaha_response.h"
+
+namespace chromeos_update_engine {
+
+extern const char kNoVersion[];
+extern const int kPingNeverPinged;
+extern const int kPingUnknownValue;
+extern const int kPingActiveValue;
+extern const int kPingInactiveValue;
+
+// This struct encapsulates the Omaha event information. For a
+// complete list of defined event types and results, see
+// http://code.google.com/p/omaha/wiki/ServerProtocol#event
+struct OmahaEvent {
+  // The Type values correspond to EVENT_TYPE values of Omaha.
+  enum Type {
+    kTypeUnknown = 0,
+    kTypeDownloadComplete = 1,
+    kTypeInstallComplete = 2,
+    kTypeUpdateComplete = 3,
+    kTypeUpdateDownloadStarted = 13,
+    kTypeUpdateDownloadFinished = 14,
+    // Chromium OS reserved type sent after the first reboot following an update
+    // completed.
+    kTypeRebootedAfterUpdate = 54,
+  };
+
+  // The Result values correspond to EVENT_RESULT values of Omaha.
+  enum Result {
+    kResultError = 0,
+    kResultSuccess = 1,
+    kResultUpdateDeferred = 9,  // When we ignore/defer updates due to policy.
+  };
+
+  OmahaEvent()
+      : type(kTypeUnknown),
+        result(kResultError),
+        error_code(ErrorCode::kError) {}
+  explicit OmahaEvent(Type in_type)
+      : type(in_type),
+        result(kResultSuccess),
+        error_code(ErrorCode::kSuccess) {}
+  OmahaEvent(Type in_type, Result in_result, ErrorCode in_error_code)
+      : type(in_type), result(in_result), error_code(in_error_code) {}
+
+  Type type;
+  Result result;
+  ErrorCode error_code;
+};
+
+struct OmahaAppData {
+  std::string id;
+  std::string version;
+  std::string product_components;
+  bool skip_update;
+  bool is_dlc;
+  OmahaRequestParams::AppParams app_params;
+};
+
+// Encodes XML entities in a given string. Input must be ASCII-7 valid. If
+// the input is invalid, the default value is used instead.
+std::string XmlEncodeWithDefault(const std::string& input,
+                                 const std::string& default_value = "");
+
+// Escapes text so it can be included as character data and attribute
+// values. The |input| string must be valid ASCII-7, no UTF-8 supported.
+// Returns whether the |input| was valid and escaped properly in |output|.
+bool XmlEncode(const std::string& input, std::string* output);
+
+// Returns a boolean based on examining each character on whether it's a valid
+// component (meaning all characters are an alphanum excluding '-', '_', '.').
+bool IsValidComponentID(const std::string& id);
+
+class OmahaRequestBuilder {
+ public:
+  OmahaRequestBuilder() = default;
+  virtual ~OmahaRequestBuilder() = default;
+
+  virtual std::string GetRequest() const = 0;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(OmahaRequestBuilder);
+};
+
+class OmahaRequestBuilderXml : OmahaRequestBuilder {
+ public:
+  OmahaRequestBuilderXml(const OmahaEvent* event,
+                         bool ping_only,
+                         bool include_ping,
+                         int ping_active_days,
+                         int ping_roll_call_days,
+                         int install_date_in_days,
+                         const std::string& session_id)
+      : event_(event),
+        ping_only_(ping_only),
+        include_ping_(include_ping),
+        ping_active_days_(ping_active_days),
+        ping_roll_call_days_(ping_roll_call_days),
+        install_date_in_days_(install_date_in_days),
+        session_id_(session_id) {}
+
+  ~OmahaRequestBuilderXml() override = default;
+
+  // Returns an XML that corresponds to the entire Omaha request.
+  std::string GetRequest() const override;
+
+ private:
+  FRIEND_TEST(OmahaRequestBuilderXmlTest, PlatformGetAppTest);
+  FRIEND_TEST(OmahaRequestBuilderXmlTest, DlcGetAppTest);
+
+  // Returns an XML that corresponds to the entire <os> node of the Omaha
+  // request based on the member variables.
+  std::string GetOs() const;
+
+  // Returns an XML that corresponds to all <app> nodes of the Omaha
+  // request based on the given parameters.
+  std::string GetApps() const;
+
+  // Returns an XML that corresponds to the single <app> node of the Omaha
+  // request based on the given parameters.
+  std::string GetApp(const OmahaAppData& app_data) const;
+
+  // Returns an XML that goes into the body of the <app> element of the Omaha
+  // request based on the given parameters.
+  std::string GetAppBody(const OmahaAppData& app_data) const;
+
+  // Returns the cohort* argument to include in the <app> tag for the passed
+  // |arg_name| and |prefs_key|, if any. The return value is suitable to
+  // concatenate to the list of arguments and includes a space at the end.
+  std::string GetCohortArg(const std::string& arg_name,
+                           const std::string& prefs_key,
+                           const std::string& override_value = "") const;
+
+  // Returns an XML ping element if any of the elapsed days need to be
+  // sent, or an empty string otherwise.
+  std::string GetPing() const;
+
+  // Returns an XML ping element if any of the elapsed days need to be
+  // sent, or an empty string otherwise.
+  std::string GetPingDateBased(
+      const OmahaRequestParams::AppParams& app_params) const;
+
+  const OmahaEvent* event_;
+  bool ping_only_;
+  bool include_ping_;
+  int ping_active_days_;
+  int ping_roll_call_days_;
+  int install_date_in_days_;
+  std::string session_id_;
+
+  DISALLOW_COPY_AND_ASSIGN(OmahaRequestBuilderXml);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_CROS_OMAHA_REQUEST_BUILDER_XML_H_
diff --git a/cros/omaha_request_builder_xml_unittest.cc b/cros/omaha_request_builder_xml_unittest.cc
new file mode 100644
index 0000000..76a7241
--- /dev/null
+++ b/cros/omaha_request_builder_xml_unittest.cc
@@ -0,0 +1,421 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/cros/omaha_request_builder_xml.h"
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <base/guid.h>
+#include <base/strings/stringprintf.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/cros/fake_system_state.h"
+
+using std::pair;
+using std::string;
+using std::vector;
+using testing::_;
+using testing::DoAll;
+using testing::Return;
+using testing::SetArgPointee;
+
+namespace chromeos_update_engine {
+
+namespace {
+// Helper to find key and extract value from the given string |xml|, instead
+// of using a full parser. The attribute key will be followed by "=\"" as xml
+// attribute values must be within double quotes (not single quotes).
+static string FindAttributeKeyValueInXml(const string& xml,
+                                         const string& key,
+                                         const size_t val_size) {
+  string key_with_quotes = key + "=\"";
+  const size_t val_start_pos = xml.find(key);
+  if (val_start_pos == string::npos)
+    return "";
+  return xml.substr(val_start_pos + key_with_quotes.size(), val_size);
+}
+// Helper to find the count of substring in a string.
+static size_t CountSubstringInString(const string& str, const string& substr) {
+  size_t count = 0, pos = 0;
+  while ((pos = str.find(substr, pos ? pos + 1 : 0)) != string::npos)
+    ++count;
+  return count;
+}
+}  // namespace
+
+class OmahaRequestBuilderXmlTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    FakeSystemState::CreateInstance();
+    FakeSystemState::Get()->set_request_params(&params_);
+  }
+  void TearDown() override {}
+
+  static constexpr size_t kGuidSize = 36;
+
+  OmahaRequestParams params_;
+};
+
+TEST_F(OmahaRequestBuilderXmlTest, XmlEncodeTest) {
+  string output;
+  vector<pair<string, string>> xml_encode_pairs = {
+      {"ab", "ab"},
+      {"a<b", "a&lt;b"},
+      {"<&>\"\'\\", "&lt;&amp;&gt;&quot;&apos;\\"},
+      {"&lt;&amp;&gt;", "&amp;lt;&amp;amp;&amp;gt;"}};
+  for (const auto& xml_encode_pair : xml_encode_pairs) {
+    const auto& before_encoding = xml_encode_pair.first;
+    const auto& after_encoding = xml_encode_pair.second;
+    EXPECT_TRUE(XmlEncode(before_encoding, &output));
+    EXPECT_EQ(after_encoding, output);
+  }
+  // Check that unterminated UTF-8 strings are handled properly.
+  EXPECT_FALSE(XmlEncode("\xc2", &output));
+  // Fail with invalid ASCII-7 chars.
+  EXPECT_FALSE(XmlEncode("This is an 'n' with a tilde: \xc3\xb1", &output));
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, XmlEncodeWithDefaultTest) {
+  EXPECT_EQ("", XmlEncodeWithDefault(""));
+  EXPECT_EQ("&lt;&amp;&gt;", XmlEncodeWithDefault("<&>", "something else"));
+  EXPECT_EQ("<not escaped>", XmlEncodeWithDefault("\xc2", "<not escaped>"));
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, PlatformGetAppTest) {
+  params_.set_device_requisition("device requisition");
+  OmahaRequestBuilderXml omaha_request{nullptr,
+                                       false,
+                                       false,
+                                       0,
+                                       0,
+                                       0,
+                                       ""};
+  OmahaAppData dlc_app_data = {.id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
+                               .version = "",
+                               .skip_update = false,
+                               .is_dlc = false};
+
+  // Verify that the attributes that shouldn't be missing for Platform AppID are
+  // in fact present in the <app ...></app>.
+  const string app = omaha_request.GetApp(dlc_app_data);
+  EXPECT_NE(string::npos, app.find("lang="));
+  EXPECT_NE(string::npos, app.find("requisition="));
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, DlcGetAppTest) {
+  params_.set_device_requisition("device requisition");
+  OmahaRequestBuilderXml omaha_request{nullptr,
+                                       false,
+                                       false,
+                                       0,
+                                       0,
+                                       0,
+                                       ""};
+  OmahaAppData dlc_app_data = {
+      .id = "_dlc_id", .version = "", .skip_update = false, .is_dlc = true};
+
+  // Verify that the attributes that should be missing for DLC AppIDs are in
+  // fact not present in the <app ...></app>.
+  const string app = omaha_request.GetApp(dlc_app_data);
+  EXPECT_EQ(string::npos, app.find("lang="));
+  EXPECT_EQ(string::npos, app.find("requisition="));
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlRequestIdTest) {
+  OmahaRequestBuilderXml omaha_request{nullptr,
+                                       false,
+                                       false,
+                                       0,
+                                       0,
+                                       0,
+                                       ""};
+  const string request_xml = omaha_request.GetRequest();
+  const string key = "requestid";
+  const string request_id =
+      FindAttributeKeyValueInXml(request_xml, key, kGuidSize);
+  // A valid |request_id| is either a GUID version 4 or empty string.
+  if (!request_id.empty())
+    EXPECT_TRUE(base::IsValidGUID(request_id));
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlSessionIdTest) {
+  const string gen_session_id = base::GenerateGUID();
+  OmahaRequestBuilderXml omaha_request{nullptr,
+                                       false,
+                                       false,
+                                       0,
+                                       0,
+                                       0,
+                                       gen_session_id};
+  const string request_xml = omaha_request.GetRequest();
+  const string key = "sessionid";
+  const string session_id =
+      FindAttributeKeyValueInXml(request_xml, key, kGuidSize);
+  // A valid |session_id| is either a GUID version 4 or empty string.
+  if (!session_id.empty()) {
+    EXPECT_TRUE(base::IsValidGUID(session_id));
+  }
+  EXPECT_EQ(gen_session_id, session_id);
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlPlatformUpdateTest) {
+  OmahaRequestBuilderXml omaha_request{nullptr,
+                                       false,
+                                       false,
+                                       0,
+                                       0,
+                                       0,
+                                       ""};
+  const string request_xml = omaha_request.GetRequest();
+  EXPECT_EQ(1, CountSubstringInString(request_xml, "<updatecheck"))
+      << request_xml;
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlPlatformUpdateWithDlcsTest) {
+  params_.set_dlc_apps_params(
+      {{params_.GetDlcAppId("dlc_no_0"), {.name = "dlc_no_0"}},
+       {params_.GetDlcAppId("dlc_no_1"), {.name = "dlc_no_1"}}});
+  OmahaRequestBuilderXml omaha_request{nullptr,
+                                       false,
+                                       false,
+                                       0,
+                                       0,
+                                       0,
+                                       ""};
+  const string request_xml = omaha_request.GetRequest();
+  EXPECT_EQ(3, CountSubstringInString(request_xml, "<updatecheck"))
+      << request_xml;
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcInstallationTest) {
+  const std::map<std::string, OmahaRequestParams::AppParams> dlcs = {
+      {params_.GetDlcAppId("dlc_no_0"), {.name = "dlc_no_0"}},
+      {params_.GetDlcAppId("dlc_no_1"), {.name = "dlc_no_1"}}};
+  params_.set_dlc_apps_params(dlcs);
+  params_.set_is_install(true);
+  OmahaRequestBuilderXml omaha_request{nullptr,
+                                       false,
+                                       false,
+                                       0,
+                                       0,
+                                       0,
+                                       ""};
+  const string request_xml = omaha_request.GetRequest();
+  EXPECT_EQ(2, CountSubstringInString(request_xml, "<updatecheck"))
+      << request_xml;
+
+  auto FindAppId = [request_xml](size_t pos) -> size_t {
+    return request_xml.find("<app appid", pos);
+  };
+  // Skip over the Platform AppID, which is always first.
+  size_t pos = FindAppId(0);
+  for (auto&& _ : dlcs) {
+    (void)_;
+    EXPECT_NE(string::npos, (pos = FindAppId(pos + 1))) << request_xml;
+    const string dlc_app_id_version = FindAttributeKeyValueInXml(
+        request_xml.substr(pos), "version", string(kNoVersion).size());
+    EXPECT_EQ(kNoVersion, dlc_app_id_version);
+
+    const string false_str = "false";
+    const string dlc_app_id_delta_okay = FindAttributeKeyValueInXml(
+        request_xml.substr(pos), "delta_okay", false_str.length());
+    EXPECT_EQ(false_str, dlc_app_id_delta_okay);
+  }
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcNoPing) {
+  params_.set_dlc_apps_params(
+      {{params_.GetDlcAppId("dlc_no_0"), {.name = "dlc_no_0"}}});
+  OmahaRequestBuilderXml omaha_request{nullptr,
+                                       false,
+                                       false,
+                                       0,
+                                       0,
+                                       0,
+                                       ""};
+  const string request_xml = omaha_request.GetRequest();
+  EXPECT_EQ(0, CountSubstringInString(request_xml, "<ping")) << request_xml;
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcPingRollCallNoActive) {
+  params_.set_dlc_apps_params(
+      {{params_.GetDlcAppId("dlc_no_0"),
+        {.active_counting_type = OmahaRequestParams::kDateBased,
+         .name = "dlc_no_0",
+         .ping_date_last_active = 25,
+         .ping_date_last_rollcall = 36,
+         .send_ping = true}}});
+  OmahaRequestBuilderXml omaha_request{nullptr,
+                                       false,
+                                       false,
+                                       0,
+                                       0,
+                                       0,
+                                       ""};
+  const string request_xml = omaha_request.GetRequest();
+  EXPECT_EQ(1, CountSubstringInString(request_xml, "<ping rd=\"36\""))
+      << request_xml;
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcPingRollCallAndActive) {
+  params_.set_dlc_apps_params(
+      {{params_.GetDlcAppId("dlc_no_0"),
+        {.active_counting_type = OmahaRequestParams::kDateBased,
+         .name = "dlc_no_0",
+         .ping_active = 1,
+         .ping_date_last_active = 25,
+         .ping_date_last_rollcall = 36,
+         .send_ping = true}}});
+  OmahaRequestBuilderXml omaha_request{nullptr,
+                                       false,
+                                       false,
+                                       0,
+                                       0,
+                                       0,
+                                       ""};
+  const string request_xml = omaha_request.GetRequest();
+  EXPECT_EQ(1,
+            CountSubstringInString(request_xml,
+                                   "<ping active=\"1\" ad=\"25\" rd=\"36\""))
+      << request_xml;
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlUpdateCompleteEvent) {
+  OmahaEvent event(OmahaEvent::kTypeUpdateComplete);
+  OmahaRequestBuilderXml omaha_request{&event,
+                                       false,
+                                       false,
+                                       0,
+                                       0,
+                                       0,
+                                       ""};
+  const string request_xml = omaha_request.GetRequest();
+  LOG(INFO) << request_xml;
+  EXPECT_EQ(
+      1,
+      CountSubstringInString(
+          request_xml, "<event eventtype=\"3\" eventresult=\"1\"></event>"))
+      << request_xml;
+}
+
+TEST_F(OmahaRequestBuilderXmlTest,
+       GetRequestXmlUpdateCompleteEventSomeDlcsExcluded) {
+  params_.set_dlc_apps_params({
+      {params_.GetDlcAppId("dlc_1"), {.updated = true}},
+      {params_.GetDlcAppId("dlc_2"), {.updated = false}},
+  });
+  OmahaEvent event(OmahaEvent::kTypeUpdateComplete);
+  OmahaRequestBuilderXml omaha_request{&event,
+                                       false,
+                                       false,
+                                       0,
+                                       0,
+                                       0,
+                                       ""};
+  const string request_xml = omaha_request.GetRequest();
+  EXPECT_EQ(
+      2,
+      CountSubstringInString(
+          request_xml, "<event eventtype=\"3\" eventresult=\"1\"></event>"))
+      << request_xml;
+  EXPECT_EQ(
+      1,
+      CountSubstringInString(
+          request_xml,
+          "<event eventtype=\"3\" eventresult=\"0\" errorcode=\"62\"></event>"))
+      << request_xml;
+}
+
+TEST_F(OmahaRequestBuilderXmlTest,
+       GetRequestXmlUpdateCompleteEventAllDlcsExcluded) {
+  params_.set_dlc_apps_params({
+      {params_.GetDlcAppId("dlc_1"), {.updated = false}},
+      {params_.GetDlcAppId("dlc_2"), {.updated = false}},
+  });
+  OmahaEvent event(OmahaEvent::kTypeUpdateComplete);
+  OmahaRequestBuilderXml omaha_request{&event,
+                                       false,
+                                       false,
+                                       0,
+                                       0,
+                                       0,
+                                       ""};
+  const string request_xml = omaha_request.GetRequest();
+  EXPECT_EQ(
+      1,
+      CountSubstringInString(
+          request_xml, "<event eventtype=\"3\" eventresult=\"1\"></event>"))
+      << request_xml;
+  EXPECT_EQ(
+      2,
+      CountSubstringInString(
+          request_xml,
+          "<event eventtype=\"3\" eventresult=\"0\" errorcode=\"62\"></event>"))
+      << request_xml;
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcCohortMissingCheck) {
+  constexpr char kDlcId[] = "test-dlc-id";
+  params_.set_dlc_apps_params(
+      {{params_.GetDlcAppId(kDlcId), {.name = kDlcId}}});
+  OmahaEvent event(OmahaEvent::kTypeUpdateDownloadStarted);
+  OmahaRequestBuilderXml omaha_request{&event, false, false, 0, 0, 0, ""};
+  const string request_xml = omaha_request.GetRequest();
+
+  // Check that no cohorts are in the request.
+  EXPECT_EQ(0, CountSubstringInString(request_xml, "cohort=")) << request_xml;
+  EXPECT_EQ(0, CountSubstringInString(request_xml, "cohortname="))
+      << request_xml;
+  EXPECT_EQ(0, CountSubstringInString(request_xml, "cohorthint="))
+      << request_xml;
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcCohortCheck) {
+  const string kDlcId = "test-dlc-id";
+  params_.set_dlc_apps_params(
+      {{params_.GetDlcAppId(kDlcId), {.name = kDlcId}}});
+  auto* fake_prefs = FakeSystemState::Get()->fake_prefs();
+  OmahaEvent event(OmahaEvent::kTypeUpdateDownloadStarted);
+  OmahaRequestBuilderXml omaha_request{&event, false, false, 0, 0, 0, ""};
+  // DLC App ID Expectations.
+  const string dlc_cohort_key = PrefsInterface::CreateSubKey(
+      {kDlcPrefsSubDir, kDlcId, kPrefsOmahaCohort});
+  const string kDlcCohortVal = "test-cohort";
+  EXPECT_TRUE(fake_prefs->SetString(dlc_cohort_key, kDlcCohortVal));
+  const string dlc_cohort_name_key = PrefsInterface::CreateSubKey(
+      {kDlcPrefsSubDir, kDlcId, kPrefsOmahaCohortName});
+  const string kDlcCohortNameVal = "test-cohortname";
+  EXPECT_TRUE(fake_prefs->SetString(dlc_cohort_name_key, kDlcCohortNameVal));
+  const string dlc_cohort_hint_key = PrefsInterface::CreateSubKey(
+      {kDlcPrefsSubDir, kDlcId, kPrefsOmahaCohortHint});
+  const string kDlcCohortHintVal = "test-cohortval";
+  EXPECT_TRUE(fake_prefs->SetString(dlc_cohort_hint_key, kDlcCohortHintVal));
+  const string request_xml = omaha_request.GetRequest();
+
+  EXPECT_EQ(1,
+            CountSubstringInString(
+                request_xml,
+                base::StringPrintf(
+                    "cohort=\"%s\" cohortname=\"%s\" cohorthint=\"%s\"",
+                    kDlcCohortVal.c_str(),
+                    kDlcCohortNameVal.c_str(),
+                    kDlcCohortHintVal.c_str())))
+      << request_xml;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/omaha_request_params.cc b/cros/omaha_request_params.cc
similarity index 69%
rename from omaha_request_params.cc
rename to cros/omaha_request_params.cc
index 8c410f1..adcfc75 100644
--- a/omaha_request_params.cc
+++ b/cros/omaha_request_params.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/omaha_request_params.h"
+#include "update_engine/cros/omaha_request_params.h"
 
 #include <errno.h>
 #include <fcntl.h>
@@ -25,6 +25,7 @@
 #include <vector>
 
 #include <base/files/file_util.h>
+#include <base/stl_util.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
 #include <brillo/key_value_store.h>
@@ -34,14 +35,14 @@
 #include "update_engine/common/constants.h"
 #include "update_engine/common/hardware_interface.h"
 #include "update_engine/common/platform_constants.h"
+#include "update_engine/common/system_state.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/system_state.h"
+#include "update_engine/update_manager/policy.h"
 
 #define CALL_MEMBER_FN(object, member) ((object).*(member))
 
-using std::map;
+using chromeos_update_manager::UpdateCheckParams;
 using std::string;
-using std::vector;
 
 namespace chromeos_update_engine {
 
@@ -60,14 +61,14 @@
     test::SetImagePropertiesRootPrefix(nullptr);
 }
 
-bool OmahaRequestParams::Init(const string& in_app_version,
-                              const string& in_update_url,
-                              bool in_interactive) {
+bool OmahaRequestParams::Init(const string& app_version,
+                              const string& update_url,
+                              const UpdateCheckParams& params) {
   LOG(INFO) << "Initializing parameters for this update attempt";
-  image_props_ = LoadImageProperties(system_state_);
-  mutable_image_props_ = LoadMutableImageProperties(system_state_);
+  image_props_ = LoadImageProperties();
+  mutable_image_props_ = LoadMutableImageProperties();
 
-  // Sanity check the channel names.
+  // Validation check the channel names.
   if (!IsValidChannel(image_props_.current_channel))
     image_props_.current_channel = "stable-channel";
   if (!IsValidChannel(mutable_image_props_.target_channel))
@@ -77,24 +78,14 @@
   LOG(INFO) << "Running from channel " << image_props_.current_channel;
 
   os_platform_ = constants::kOmahaPlatformName;
-  if (!image_props_.system_version.empty()) {
-    if (in_app_version == "ForcedUpdate") {
-      image_props_.system_version = in_app_version;
-    }
-    os_version_ = image_props_.system_version;
-  } else {
-    os_version_ = OmahaRequestParams::kOsVersion;
-  }
-  if (!in_app_version.empty())
-    image_props_.version = in_app_version;
+  os_version_ = OmahaRequestParams::kOsVersion;
+  if (!app_version.empty())
+    image_props_.version = app_version;
 
   os_sp_ = image_props_.version + "_" + GetMachineType();
   app_lang_ = "en-US";
-  hwid_ = system_state_->hardware()->GetHardwareClass();
-  if (CollectECFWVersions()) {
-    fw_version_ = system_state_->hardware()->GetFirmwareVersion();
-    ec_version_ = system_state_->hardware()->GetECVersion();
-  }
+  hwid_ = SystemState::Get()->hardware()->GetHardwareClass();
+  device_requisition_ = SystemState::Get()->hardware()->GetDeviceRequisition();
 
   if (image_props_.current_channel == mutable_image_props_.target_channel) {
     // deltas are only okay if the /.nodelta file does not exist.  if we don't
@@ -115,17 +106,53 @@
     delta_okay_ = false;
   }
 
-  if (in_update_url.empty())
+  if (update_url.empty())
     update_url_ = image_props_.omaha_url;
   else
-    update_url_ = in_update_url;
+    update_url_ = update_url;
 
   // Set the interactive flag accordingly.
-  interactive_ = in_interactive;
+  interactive_ = params.interactive;
 
-  dlc_module_ids_.clear();
+  dlc_apps_params_.clear();
   // Set false so it will do update by default.
   is_install_ = false;
+
+  target_version_prefix_ = params.target_version_prefix;
+
+  lts_tag_ = params.lts_tag;
+
+  autoupdate_token_ = params.quick_fix_build_token;
+
+  rollback_allowed_ = params.rollback_allowed;
+
+  // Set whether saving data over rollback is requested.
+  rollback_data_save_requested_ = params.rollback_data_save_requested;
+
+  // Set how many milestones of rollback are allowed.
+  rollback_allowed_milestones_ = params.rollback_allowed_milestones;
+
+  // Set the target channel, if one was provided.
+  if (params.target_channel.empty()) {
+    LOG(INFO) << "No target channel mandated by policy.";
+  } else {
+    LOG(INFO) << "Setting target channel as mandated: "
+              << params.target_channel;
+    string error_message;
+    if (!SetTargetChannel(params.target_channel,
+                          params.rollback_on_channel_downgrade,
+                          &error_message)) {
+      LOG(ERROR) << "Setting the channel failed: " << error_message;
+    }
+
+    // Since this is the beginning of a new attempt, update the download
+    // channel. The download channel won't be updated until the next attempt,
+    // even if target channel changes meanwhile, so that how we'll know if we
+    // should cancel the current download attempt if there's such a change in
+    // target channel.
+    UpdateDownloadChannel();
+  }
+
   return true;
 }
 
@@ -134,14 +161,6 @@
           update_url_ == image_props_.omaha_url);
 }
 
-bool OmahaRequestParams::CollectECFWVersions() const {
-  return base::StartsWith(
-             hwid_, string("PARROT"), base::CompareCase::SENSITIVE) ||
-         base::StartsWith(
-             hwid_, string("SPRING"), base::CompareCase::SENSITIVE) ||
-         base::StartsWith(hwid_, string("SNOW"), base::CompareCase::SENSITIVE);
-}
-
 bool OmahaRequestParams::SetTargetChannel(const string& new_target_channel,
                                           bool is_powerwash_allowed,
                                           string* error_message) {
@@ -160,7 +179,7 @@
   new_props.target_channel = new_target_channel;
   new_props.is_powerwash_allowed = is_powerwash_allowed;
 
-  if (!StoreMutableImageProperties(system_state_, new_props)) {
+  if (!StoreMutableImageProperties(new_props)) {
     if (error_message)
       *error_message = "Error storing the new channel value.";
     return false;
@@ -217,7 +236,7 @@
 }
 
 int OmahaRequestParams::GetChannelIndex(const string& channel) const {
-  for (size_t t = 0; t < arraysize(kChannelsByStability); ++t)
+  for (size_t t = 0; t < base::size(kChannelsByStability); ++t)
     if (channel == kChannelsByStability[t])
       return t;
 
@@ -247,4 +266,29 @@
                                                : image_props_.product_id;
 }
 
+string OmahaRequestParams::GetDlcAppId(const std::string& dlc_id) const {
+  // Create APP ID according to |dlc_id| (sticking the current AppID to the
+  // DLC module ID with an underscode).
+  return GetAppId() + "_" + dlc_id;
+}
+
+bool OmahaRequestParams::IsDlcAppId(const std::string& app_id) const {
+  return dlc_apps_params().find(app_id) != dlc_apps_params().end();
+}
+
+bool OmahaRequestParams::GetDlcId(const string& app_id, string* dlc_id) const {
+  auto itr = dlc_apps_params_.find(app_id);
+  if (itr == dlc_apps_params_.end())
+    return false;
+  *dlc_id = itr->second.name;
+  return true;
+}
+
+void OmahaRequestParams::SetDlcNoUpdate(const string& app_id) {
+  auto itr = dlc_apps_params_.find(app_id);
+  if (itr == dlc_apps_params_.end())
+    return;
+  itr->second.updated = false;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/omaha_request_params.h b/cros/omaha_request_params.h
similarity index 73%
rename from omaha_request_params.h
rename to cros/omaha_request_params.h
index 18235c0..fd4c2e2 100644
--- a/omaha_request_params.h
+++ b/cros/omaha_request_params.h
@@ -14,11 +14,12 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_OMAHA_REQUEST_PARAMS_H_
-#define UPDATE_ENGINE_OMAHA_REQUEST_PARAMS_H_
+#ifndef UPDATE_ENGINE_CROS_OMAHA_REQUEST_PARAMS_H_
+#define UPDATE_ENGINE_CROS_OMAHA_REQUEST_PARAMS_H_
 
 #include <stdint.h>
 
+#include <map>
 #include <string>
 #include <vector>
 
@@ -26,16 +27,16 @@
 #include <base/time/time.h>
 #include <gtest/gtest_prod.h>  // for FRIEND_TEST
 
+#include "update_engine/common/constants.h"
 #include "update_engine/common/platform_constants.h"
-#include "update_engine/image_properties.h"
+#include "update_engine/cros/image_properties.h"
+#include "update_engine/update_manager/policy.h"
 
 // This gathers local system information and prepares info used by the
 // Omaha request action.
 
 namespace chromeos_update_engine {
 
-class SystemState;
-
 // This class encapsulates the data Omaha gets for the request, along with
 // essential state needed for the processing of the request/response.  The
 // strings in this struct should not be XML escaped.
@@ -44,13 +45,13 @@
 // reflect its lifetime more appropriately.
 class OmahaRequestParams {
  public:
-  explicit OmahaRequestParams(SystemState* system_state)
-      : system_state_(system_state),
-        os_platform_(constants::kOmahaPlatformName),
+  OmahaRequestParams()
+      : os_platform_(constants::kOmahaPlatformName),
         os_version_(kOsVersion),
         delta_okay_(true),
         interactive_(false),
         rollback_allowed_(false),
+        rollback_data_save_requested_(false),
         wall_clock_based_wait_enabled_(false),
         update_check_count_wait_enabled_(false),
         min_update_checks_needed_(kDefaultMinUpdateChecks),
@@ -59,6 +60,24 @@
 
   virtual ~OmahaRequestParams();
 
+  enum ActiveCountingType {
+    kDayBased = 0,
+    kDateBased,
+  };
+
+  struct AppParams {
+    ActiveCountingType active_counting_type;
+    // |name| is only used for DLCs to store the DLC ID.
+    std::string name;
+    int64_t ping_active;
+    int64_t ping_date_last_active;
+    int64_t ping_date_last_rollcall;
+    bool send_ping;
+    // |updated| is only used for DLCs to decide sending DBus message to
+    // dlcservice on an install/update completion.
+    bool updated = true;
+  };
+
   // Setters and getters for the various properties.
   inline std::string os_platform() const { return os_platform_; }
   inline std::string os_version() const { return os_version_; }
@@ -72,26 +91,18 @@
   inline std::string canary_app_id() const {
     return image_props_.canary_product_id;
   }
-  inline std::string system_app_id() const { return image_props_.system_id; }
-  inline void set_system_app_id(const std::string& system_app_id) {
-    image_props_.system_id = system_app_id;
-  }
   inline void set_app_id(const std::string& app_id) {
     image_props_.product_id = app_id;
     image_props_.canary_product_id = app_id;
   }
   inline std::string app_lang() const { return app_lang_; }
   inline std::string hwid() const { return hwid_; }
-  inline std::string fw_version() const { return fw_version_; }
-  inline std::string ec_version() const { return ec_version_; }
+  inline std::string device_requisition() const { return device_requisition_; }
 
   inline void set_app_version(const std::string& version) {
     image_props_.version = version;
   }
   inline std::string app_version() const { return image_props_.version; }
-  inline std::string system_version() const {
-    return image_props_.system_version;
-  }
   inline std::string product_components() const {
     return image_props_.product_components;
   }
@@ -126,12 +137,33 @@
     return target_version_prefix_;
   }
 
+  inline std::string lts_tag() const { return lts_tag_; }
+
+  inline void set_lts_tag(const std::string& hint) { lts_tag_ = hint; }
+
   inline void set_rollback_allowed(bool rollback_allowed) {
     rollback_allowed_ = rollback_allowed;
   }
 
   inline bool rollback_allowed() const { return rollback_allowed_; }
 
+  inline void set_rollback_data_save_requested(
+      bool rollback_data_save_requested) {
+    rollback_data_save_requested_ = rollback_data_save_requested;
+  }
+
+  inline bool rollback_data_save_requested() const {
+    return rollback_data_save_requested_;
+  }
+
+  inline void set_rollback_allowed_milestones(int rollback_allowed_milestones) {
+    rollback_allowed_milestones_ = rollback_allowed_milestones;
+  }
+
+  inline int rollback_allowed_milestones() const {
+    return rollback_allowed_milestones_;
+  }
+
   inline void set_wall_clock_based_wait_enabled(bool enabled) {
     wall_clock_based_wait_enabled_ = enabled;
   }
@@ -165,20 +197,41 @@
   inline int64_t max_update_checks_allowed() const {
     return max_update_checks_allowed_;
   }
-  inline void set_dlc_module_ids(
-      const std::vector<std::string>& dlc_module_ids) {
-    dlc_module_ids_ = dlc_module_ids;
+  inline void set_dlc_apps_params(
+      const std::map<std::string, AppParams>& dlc_apps_params) {
+    dlc_apps_params_ = dlc_apps_params;
   }
-  inline std::vector<std::string> dlc_module_ids() const {
-    return dlc_module_ids_;
+  inline const std::map<std::string, AppParams>& dlc_apps_params() const {
+    return dlc_apps_params_;
   }
   inline void set_is_install(bool is_install) { is_install_ = is_install; }
   inline bool is_install() const { return is_install_; }
 
-  // Returns the app id corresponding to the current value of the
+  inline void set_autoupdate_token(const std::string& token) {
+    autoupdate_token_ = token;
+  }
+  inline const std::string& autoupdate_token() const {
+    return autoupdate_token_;
+  }
+
+  // Returns the App ID corresponding to the current value of the
   // download channel.
   virtual std::string GetAppId() const;
 
+  // Returns the DLC app ID.
+  virtual std::string GetDlcAppId(const std::string& dlc_id) const;
+
+  // Returns true if the App ID is a DLC App ID that is currently part of the
+  // request parameters.
+  virtual bool IsDlcAppId(const std::string& app_id) const;
+
+  // Returns the DLC App ID if the given App ID is a DLC that is currently part
+  // of the request parameters.
+  virtual bool GetDlcId(const std::string& app_id, std::string* dlc_id) const;
+
+  // If the App ID is a DLC App ID will set to no update.
+  void SetDlcNoUpdate(const std::string& app_id);
+
   // Suggested defaults
   static const char kOsVersion[];
   static const int64_t kDefaultMinUpdateChecks = 0;
@@ -189,7 +242,7 @@
   // of the parameter. Returns true on success, false otherwise.
   bool Init(const std::string& in_app_version,
             const std::string& in_update_url,
-            bool in_interactive);
+            const chromeos_update_manager::UpdateCheckParams& params);
 
   // Permanently changes the release channel to |channel|. Performs a
   // powerwash, if required and allowed.
@@ -210,8 +263,10 @@
   // or Init is called again.
   virtual void UpdateDownloadChannel();
 
-  // Returns whether we should powerwash for this update.
-  virtual bool ShouldPowerwash() const;
+  // Returns whether we should powerwash for this update. Note that this is
+  // just an indication, the final decision to powerwash or not is made in the
+  // response handler.
+  bool ShouldPowerwash() const;
 
   // Check if the provided update URL is official, meaning either the default
   // autoupdate server or the autoupdate autotest server.
@@ -231,19 +286,19 @@
   }
   void set_app_lang(const std::string& app_lang) { app_lang_ = app_lang; }
   void set_hwid(const std::string& hwid) { hwid_ = hwid; }
-  void set_fw_version(const std::string& fw_version) {
-    fw_version_ = fw_version;
-  }
-  void set_ec_version(const std::string& ec_version) {
-    ec_version_ = ec_version;
-  }
   void set_is_powerwash_allowed(bool powerwash_allowed) {
     mutable_image_props_.is_powerwash_allowed = powerwash_allowed;
   }
+  bool is_powerwash_allowed() {
+    return mutable_image_props_.is_powerwash_allowed;
+  }
+
+  void set_device_requisition(const std::string& requisition) {
+    device_requisition_ = requisition;
+  }
 
  private:
   FRIEND_TEST(OmahaRequestParamsTest, ChannelIndexTest);
-  FRIEND_TEST(OmahaRequestParamsTest, CollectECFWVersionsTest);
   FRIEND_TEST(OmahaRequestParamsTest, IsValidChannelTest);
   FRIEND_TEST(OmahaRequestParamsTest, SetIsPowerwashAllowedTest);
   FRIEND_TEST(OmahaRequestParamsTest, SetTargetChannelInvalidTest);
@@ -266,16 +321,9 @@
   // i.e. index(target_channel) > index(current_channel).
   bool ToMoreStableChannel() const;
 
-  // Returns True if we should store the fw/ec versions based on our hwid_.
-  // Compares hwid to a set of whitelisted prefixes.
-  bool CollectECFWVersions() const;
-
   // Gets the machine type (e.g. "i686").
   std::string GetMachineType() const;
 
-  // Global system context.
-  SystemState* system_state_;
-
   // The system image properties.
   ImageProperties image_props_;
   MutableImageProperties mutable_image_props_;
@@ -306,11 +354,15 @@
   //   changed and cancel the current download attempt.
   std::string download_channel_;
 
-  std::string hwid_;        // Hardware Qualification ID of the client
-  std::string fw_version_;  // Chrome OS Firmware Version.
-  std::string ec_version_;  // Chrome OS EC Version.
-  bool delta_okay_;         // If this client can accept a delta
-  bool interactive_;        // Whether this is a user-initiated update check
+  // The value defining the parameters of the LTS (Long Term Support).
+  std::string lts_tag_;
+
+  std::string hwid_;  // Hardware Qualification ID of the client
+  // TODO(b:133324571) tracks removal of this field once it is no longer
+  // needed in AU requests. Remove by October 1st 2019.
+  std::string device_requisition_;  // Chrome OS Requisition type.
+  bool delta_okay_;                 // If this client can accept a delta
+  bool interactive_;  // Whether this is a user-initiated update check
 
   // The URL to send the Omaha request to.
   std::string update_url_;
@@ -322,6 +374,12 @@
   // Whether the client is accepting rollback images too.
   bool rollback_allowed_;
 
+  // Whether rollbacks should preserve some system state during powerwash.
+  bool rollback_data_save_requested_;
+
+  // How many milestones the client can rollback to.
+  int rollback_allowed_milestones_;
+
   // True if scattering or staging are enabled, in which case waiting_period_
   // specifies the amount of absolute time that we've to wait for before sending
   // a request to Omaha.
@@ -339,17 +397,22 @@
   // When reading files, prepend root_ to the paths. Useful for testing.
   std::string root_;
 
-  // A list of DLC module IDs to install.
-  std::vector<std::string> dlc_module_ids_;
+  // A list of DLC modules to install. A mapping from DLC App ID to |AppParams|.
+  std::map<std::string, AppParams> dlc_apps_params_;
 
   // This variable defines whether the payload is being installed in the current
   // partition. At the moment, this is used for installing DLC modules on the
   // current active partition instead of the inactive partition.
   bool is_install_;
 
+  // Token used when making an update request for a specific build.
+  // For example: Token for a Quick Fix Build:
+  // https://cloud.google.com/docs/chrome-enterprise/policies/?policy=DeviceQuickFixBuildToken
+  std::string autoupdate_token_;
+
   DISALLOW_COPY_AND_ASSIGN(OmahaRequestParams);
 };
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_OMAHA_REQUEST_PARAMS_H_
+#endif  // UPDATE_ENGINE_CROS_OMAHA_REQUEST_PARAMS_H_
diff --git a/omaha_request_params_unittest.cc b/cros/omaha_request_params_unittest.cc
similarity index 76%
rename from omaha_request_params_unittest.cc
rename to cros/omaha_request_params_unittest.cc
index 7332431..2d67ec0 100644
--- a/omaha_request_params_unittest.cc
+++ b/cros/omaha_request_params_unittest.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/omaha_request_params.h"
+#include "update_engine/cros/omaha_request_params.h"
 
 #include <stdio.h>
 
@@ -25,11 +25,10 @@
 #include <gtest/gtest.h>
 
 #include "update_engine/common/constants.h"
-#include "update_engine/common/fake_prefs.h"
 #include "update_engine/common/platform_constants.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/fake_system_state.h"
+#include "update_engine/cros/fake_system_state.h"
 
 using chromeos_update_engine::test_utils::WriteFileString;
 using std::string;
@@ -38,26 +37,23 @@
 
 class OmahaRequestParamsTest : public ::testing::Test {
  public:
-  OmahaRequestParamsTest() : params_(&fake_system_state_) {}
+  OmahaRequestParamsTest() : params_() {}
 
  protected:
   void SetUp() override {
     // Create a uniquely named test directory.
     ASSERT_TRUE(tempdir_.CreateUniqueTempDir());
     params_.set_root(tempdir_.GetPath().value());
+    FakeSystemState::CreateInstance();
     SetLockDown(false);
-    fake_system_state_.set_prefs(&fake_prefs_);
   }
 
   void SetLockDown(bool locked_down) {
-    fake_system_state_.fake_hardware()->SetIsOfficialBuild(locked_down);
-    fake_system_state_.fake_hardware()->SetIsNormalBootMode(locked_down);
+    FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(locked_down);
+    FakeSystemState::Get()->fake_hardware()->SetIsNormalBootMode(locked_down);
   }
 
-  FakeSystemState fake_system_state_;
-  OmahaRequestParams params_{&fake_system_state_};
-  FakePrefs fake_prefs_;
-
+  OmahaRequestParams params_;
   base::ScopedTempDir tempdir_;
 };
 
@@ -75,73 +71,73 @@
 }  // namespace
 
 TEST_F(OmahaRequestParamsTest, MissingChannelTest) {
-  EXPECT_TRUE(params_.Init("", "", false));
+  EXPECT_TRUE(params_.Init("", "", {}));
   // By default, if no channel is set, we should track the stable-channel.
   EXPECT_EQ("stable-channel", params_.target_channel());
 }
 
 TEST_F(OmahaRequestParamsTest, ForceVersionTest) {
-  EXPECT_TRUE(params_.Init("ForcedVersion", "", false));
+  EXPECT_TRUE(params_.Init("ForcedVersion", "", {}));
   EXPECT_EQ(string("ForcedVersion_") + GetMachineType(), params_.os_sp());
   EXPECT_EQ("ForcedVersion", params_.app_version());
 }
 
 TEST_F(OmahaRequestParamsTest, ForcedURLTest) {
-  EXPECT_TRUE(params_.Init("", "http://forced.google.com", false));
+  EXPECT_TRUE(params_.Init("", "http://forced.google.com", {}));
   EXPECT_EQ("http://forced.google.com", params_.update_url());
 }
 
 TEST_F(OmahaRequestParamsTest, MissingURLTest) {
-  EXPECT_TRUE(params_.Init("", "", false));
+  EXPECT_TRUE(params_.Init("", "", {}));
   EXPECT_EQ(constants::kOmahaDefaultProductionURL, params_.update_url());
 }
 
 TEST_F(OmahaRequestParamsTest, DeltaOKTest) {
-  EXPECT_TRUE(params_.Init("", "", false));
+  EXPECT_TRUE(params_.Init("", "", {}));
   EXPECT_TRUE(params_.delta_okay());
 }
 
 TEST_F(OmahaRequestParamsTest, NoDeltasTest) {
   ASSERT_TRUE(
       WriteFileString(tempdir_.GetPath().Append(".nodelta").value(), ""));
-  EXPECT_TRUE(params_.Init("", "", false));
+  EXPECT_TRUE(params_.Init("", "", {}));
   EXPECT_FALSE(params_.delta_okay());
 }
 
 TEST_F(OmahaRequestParamsTest, SetTargetChannelTest) {
   {
-    OmahaRequestParams params(&fake_system_state_);
+    OmahaRequestParams params;
     params.set_root(tempdir_.GetPath().value());
-    EXPECT_TRUE(params.Init("", "", false));
+    EXPECT_TRUE(params.Init("", "", {}));
     EXPECT_TRUE(params.SetTargetChannel("canary-channel", false, nullptr));
     EXPECT_FALSE(params.mutable_image_props_.is_powerwash_allowed);
   }
   params_.set_root(tempdir_.GetPath().value());
-  EXPECT_TRUE(params_.Init("", "", false));
+  EXPECT_TRUE(params_.Init("", "", {}));
   EXPECT_EQ("canary-channel", params_.target_channel());
   EXPECT_FALSE(params_.mutable_image_props_.is_powerwash_allowed);
 }
 
 TEST_F(OmahaRequestParamsTest, SetIsPowerwashAllowedTest) {
   {
-    OmahaRequestParams params(&fake_system_state_);
+    OmahaRequestParams params;
     params.set_root(tempdir_.GetPath().value());
-    EXPECT_TRUE(params.Init("", "", false));
+    EXPECT_TRUE(params.Init("", "", {}));
     EXPECT_TRUE(params.SetTargetChannel("canary-channel", true, nullptr));
     EXPECT_TRUE(params.mutable_image_props_.is_powerwash_allowed);
   }
   params_.set_root(tempdir_.GetPath().value());
-  EXPECT_TRUE(params_.Init("", "", false));
+  EXPECT_TRUE(params_.Init("", "", {}));
   EXPECT_EQ("canary-channel", params_.target_channel());
   EXPECT_TRUE(params_.mutable_image_props_.is_powerwash_allowed);
 }
 
 TEST_F(OmahaRequestParamsTest, SetTargetChannelInvalidTest) {
   {
-    OmahaRequestParams params(&fake_system_state_);
+    OmahaRequestParams params;
     params.set_root(tempdir_.GetPath().value());
     SetLockDown(true);
-    EXPECT_TRUE(params.Init("", "", false));
+    EXPECT_TRUE(params.Init("", "", {}));
     params.image_props_.allow_arbitrary_channels = false;
     string error_message;
     EXPECT_FALSE(
@@ -151,7 +147,7 @@
     EXPECT_FALSE(params.mutable_image_props_.is_powerwash_allowed);
   }
   params_.set_root(tempdir_.GetPath().value());
-  EXPECT_TRUE(params_.Init("", "", false));
+  EXPECT_TRUE(params_.Init("", "", {}));
   EXPECT_EQ("stable-channel", params_.target_channel());
   EXPECT_FALSE(params_.mutable_image_props_.is_powerwash_allowed);
 }
@@ -197,7 +193,7 @@
 
   // When set to a valid value while a change is already pending, it should
   // succeed.
-  params_.Init("", "", false);
+  params_.Init("", "", {});
   EXPECT_TRUE(params_.SetTargetChannel("beta-channel", true, nullptr));
   // The target channel should reflect the change, but the download channel
   // should continue to retain the old value ...
@@ -236,6 +232,13 @@
   EXPECT_FALSE(params_.ToMoreStableChannel());
 }
 
+TEST_F(OmahaRequestParamsTest, TargetChannelHintTest) {
+  EXPECT_TRUE(params_.Init("", "", {}));
+  const string kHint("foo-hint");
+  params_.set_lts_tag(kHint);
+  EXPECT_EQ(kHint, params_.lts_tag());
+}
+
 TEST_F(OmahaRequestParamsTest, ShouldPowerwashTest) {
   params_.mutable_image_props_.is_powerwash_allowed = false;
   EXPECT_FALSE(params_.ShouldPowerwash());
@@ -250,12 +253,42 @@
   EXPECT_TRUE(params_.ShouldPowerwash());
 }
 
-TEST_F(OmahaRequestParamsTest, CollectECFWVersionsTest) {
-  params_.hwid_ = string("STUMPY ALEX 12345");
-  EXPECT_FALSE(params_.CollectECFWVersions());
+TEST_F(OmahaRequestParamsTest, RequisitionIsSetTest) {
+  EXPECT_TRUE(params_.Init("", "", {}));
+  EXPECT_EQ("fake_requisition", params_.device_requisition());
+}
 
-  params_.hwid_ = string("SNOW 12345");
-  EXPECT_TRUE(params_.CollectECFWVersions());
+TEST_F(OmahaRequestParamsTest, GetMissingDlcId) {
+  EXPECT_TRUE(params_.Init("", "", {}));
+
+  string dlc_id;
+  EXPECT_FALSE(params_.GetDlcId("some-dlc-app-id", &dlc_id));
+}
+
+TEST_F(OmahaRequestParamsTest, GetDlcId) {
+  EXPECT_TRUE(params_.Init("", "", {}));
+  const string kExpectedDlcId = "test-dlc";
+  const string dlc_app_id = params_.GetDlcAppId(kExpectedDlcId);
+  params_.set_dlc_apps_params({{dlc_app_id, {.name = kExpectedDlcId}}});
+
+  string dlc_id;
+  EXPECT_TRUE(params_.GetDlcId(dlc_app_id, &dlc_id));
+  EXPECT_EQ(kExpectedDlcId, dlc_id);
+}
+
+TEST_F(OmahaRequestParamsTest, GetDlcAppId) {
+  EXPECT_TRUE(params_.Init("", "", {}));
+  const string kAppId = "test-app-id";
+  params_.set_app_id(kAppId);
+  const string kDlcId = "test-dlc";
+  const string expected_dlc_app_id = kAppId + "_" + kDlcId;
+
+  EXPECT_EQ(expected_dlc_app_id, params_.GetDlcAppId(kDlcId));
+}
+
+TEST_F(OmahaRequestParamsTest, AutoUpdateTokenTest) {
+  EXPECT_TRUE(params_.Init("", "", {.quick_fix_build_token = "foo-token"}));
+  EXPECT_EQ("foo-token", params_.autoupdate_token());
 }
 
 }  // namespace chromeos_update_engine
diff --git a/omaha_response.h b/cros/omaha_response.h
similarity index 81%
rename from omaha_response.h
rename to cros/omaha_response.h
index 0ac09df..3b07745 100644
--- a/omaha_response.h
+++ b/cros/omaha_response.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_OMAHA_RESPONSE_H_
-#define UPDATE_ENGINE_OMAHA_RESPONSE_H_
+#ifndef UPDATE_ENGINE_CROS_OMAHA_RESPONSE_H_
+#define UPDATE_ENGINE_CROS_OMAHA_RESPONSE_H_
 
 #include <fcntl.h>
 #include <sys/stat.h>
@@ -38,7 +38,6 @@
 
   // These are only valid if update_exists is true:
   std::string version;
-  std::string system_version;
 
   struct Package {
     // The ordered list of URLs in the Omaha response. Each item is a complete
@@ -51,6 +50,13 @@
     // True if the payload described in this response is a delta payload.
     // False if it's a full payload.
     bool is_delta = false;
+    // True if the payload can be excluded from updating if consistently faulty.
+    // False if the payload is critical to update.
+    bool can_exclude = false;
+    // The App ID associated with the package.
+    std::string app_id;
+    // The unique fingerprint value associated with the package.
+    std::string fp;
   };
   std::vector<Package> packages;
 
@@ -102,9 +108,16 @@
   // Key versions of the returned rollback image. Values are 0xffff if the
   // image not a rollback, or the fields were not present.
   RollbackKeyVersion rollback_key_version;
+
+  // Key versions of the N - rollback_allowed_milestones release. For example,
+  // if the current version is 70 and rollback_allowed_milestones is 4, this
+  // will contain the key versions of version 66. This is used to ensure that
+  // the kernel and firmware keys are at most those of v66 so that v66 can be
+  // rolled back to.
+  RollbackKeyVersion past_rollback_key_version;
 };
 static_assert(sizeof(off_t) == 8, "off_t not 64 bit");
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_OMAHA_RESPONSE_H_
+#endif  // UPDATE_ENGINE_CROS_OMAHA_RESPONSE_H_
diff --git a/cros/omaha_response_handler_action.cc b/cros/omaha_response_handler_action.cc
new file mode 100644
index 0000000..04cae3e
--- /dev/null
+++ b/cros/omaha_response_handler_action.cc
@@ -0,0 +1,339 @@
+//
+// Copyright (C) 2011 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/cros/omaha_response_handler_action.h"
+
+#include <limits>
+#include <string>
+
+#include <base/logging.h>
+#include <base/strings/string_number_conversions.h>
+#include <base/version.h>
+#include <policy/device_policy.h>
+
+#include "update_engine/common/constants.h"
+#include "update_engine/common/hardware_interface.h"
+#include "update_engine/common/prefs_interface.h"
+#include "update_engine/common/system_state.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/cros/connection_manager_interface.h"
+#include "update_engine/cros/omaha_request_params.h"
+#include "update_engine/cros/payload_state_interface.h"
+#include "update_engine/payload_consumer/delta_performer.h"
+#include "update_engine/update_manager/policy.h"
+#include "update_engine/update_manager/update_manager.h"
+
+using chromeos_update_manager::kRollforwardInfinity;
+using chromeos_update_manager::Policy;
+using chromeos_update_manager::UpdateManager;
+using std::numeric_limits;
+using std::string;
+
+namespace chromeos_update_engine {
+
+OmahaResponseHandlerAction::OmahaResponseHandlerAction()
+    : deadline_file_(constants::kOmahaResponseDeadlineFile) {}
+
+void OmahaResponseHandlerAction::PerformAction() {
+  CHECK(HasInputObject());
+  ScopedActionCompleter completer(processor_, this);
+  const OmahaResponse& response = GetInputObject();
+  if (!response.update_exists) {
+    LOG(INFO) << "There are no updates. Aborting.";
+    completer.set_code(ErrorCode::kNoUpdate);
+    return;
+  }
+
+  // All decisions as to which URL should be used have already been done. So,
+  // make the current URL as the download URL.
+  string current_url = SystemState::Get()->payload_state()->GetCurrentUrl();
+  if (current_url.empty()) {
+    // This shouldn't happen as we should always supply the HTTPS backup URL.
+    // Handling this anyway, just in case.
+    LOG(ERROR) << "There are no suitable URLs in the response to use.";
+    completer.set_code(ErrorCode::kOmahaResponseInvalid);
+    return;
+  }
+
+  // This is the url to the first package, not all packages.
+  // (For updates): All |Action|s prior to this must pass in non-excluded URLs
+  // within the |OmahaResponse|, reference exlusion logic in
+  // |OmahaRequestAction| and keep the enforcement of exclusions for updates.
+  install_plan_.download_url = current_url;
+  install_plan_.version = response.version;
+
+  OmahaRequestParams* const params = SystemState::Get()->request_params();
+  PayloadStateInterface* const payload_state =
+      SystemState::Get()->payload_state();
+
+  // If we're using p2p to download and there is a local peer, use it.
+  if (payload_state->GetUsingP2PForDownloading() &&
+      !payload_state->GetP2PUrl().empty()) {
+    LOG(INFO) << "Replacing URL " << install_plan_.download_url
+              << " with local URL " << payload_state->GetP2PUrl()
+              << " since p2p is enabled.";
+    install_plan_.download_url = payload_state->GetP2PUrl();
+    payload_state->SetUsingP2PForDownloading(true);
+  }
+
+  // Fill up the other properties based on the response.
+  string update_check_response_hash;
+  for (const auto& package : response.packages) {
+    brillo::Blob raw_hash;
+    if (!base::HexStringToBytes(package.hash, &raw_hash)) {
+      LOG(ERROR) << "Failed to convert payload hash from hex string to bytes: "
+                 << package.hash;
+      completer.set_code(ErrorCode::kOmahaResponseInvalid);
+      return;
+    }
+    install_plan_.payloads.push_back(
+        {.payload_urls = package.payload_urls,
+         .size = package.size,
+         .metadata_size = package.metadata_size,
+         .metadata_signature = package.metadata_signature,
+         .hash = raw_hash,
+         .type = package.is_delta ? InstallPayloadType::kDelta
+                                  : InstallPayloadType::kFull,
+         .fp = package.fp,
+         .app_id = package.app_id});
+    update_check_response_hash += package.hash + ":";
+  }
+  install_plan_.public_key_rsa = response.public_key_rsa;
+  install_plan_.hash_checks_mandatory = AreHashChecksMandatory(response);
+  install_plan_.is_resume = DeltaPerformer::CanResumeUpdate(
+      SystemState::Get()->prefs(), update_check_response_hash);
+  if (install_plan_.is_resume) {
+    payload_state->UpdateResumed();
+  } else {
+    payload_state->UpdateRestarted();
+    LOG_IF(WARNING,
+           !DeltaPerformer::ResetUpdateProgress(SystemState::Get()->prefs(),
+                                                false))
+        << "Unable to reset the update progress.";
+    LOG_IF(WARNING,
+           !SystemState::Get()->prefs()->SetString(
+               kPrefsUpdateCheckResponseHash, update_check_response_hash))
+        << "Unable to save the update check response hash.";
+  }
+
+  if (params->is_install()) {
+    install_plan_.target_slot =
+        SystemState::Get()->boot_control()->GetCurrentSlot();
+    install_plan_.source_slot = BootControlInterface::kInvalidSlot;
+  } else {
+    install_plan_.source_slot =
+        SystemState::Get()->boot_control()->GetCurrentSlot();
+    install_plan_.target_slot = install_plan_.source_slot == 0 ? 1 : 0;
+  }
+
+  // The Omaha response doesn't include the channel name for this image, so we
+  // use the download_channel we used during the request to tag the target slot.
+  // This will be used in the next boot to know the channel the image was
+  // downloaded from.
+  string current_channel_key =
+      kPrefsChannelOnSlotPrefix + std::to_string(install_plan_.target_slot);
+  SystemState::Get()->prefs()->SetString(current_channel_key,
+                                         params->download_channel());
+
+  // Checking whether device is able to boot up the returned rollback image.
+  if (response.is_rollback) {
+    if (!params->rollback_allowed()) {
+      LOG(ERROR) << "Received rollback image but rollback is not allowed.";
+      completer.set_code(ErrorCode::kOmahaResponseInvalid);
+      return;
+    }
+
+    // Calculate the values on the version values on current device.
+    auto min_kernel_key_version = static_cast<uint32_t>(
+        SystemState::Get()->hardware()->GetMinKernelKeyVersion());
+    auto min_firmware_key_version = static_cast<uint32_t>(
+        SystemState::Get()->hardware()->GetMinFirmwareKeyVersion());
+
+    uint32_t kernel_key_version =
+        static_cast<uint32_t>(response.rollback_key_version.kernel_key) << 16 |
+        static_cast<uint32_t>(response.rollback_key_version.kernel);
+    uint32_t firmware_key_version =
+        static_cast<uint32_t>(response.rollback_key_version.firmware_key)
+            << 16 |
+        static_cast<uint32_t>(response.rollback_key_version.firmware);
+
+    LOG(INFO) << "Rollback image versions:"
+              << " device_kernel_key_version=" << min_kernel_key_version
+              << " image_kernel_key_version=" << kernel_key_version
+              << " device_firmware_key_version=" << min_firmware_key_version
+              << " image_firmware_key_version=" << firmware_key_version;
+
+    // Don't attempt a rollback if the versions are incompatible or the
+    // target image does not specify the version information.
+    if (kernel_key_version == numeric_limits<uint32_t>::max() ||
+        firmware_key_version == numeric_limits<uint32_t>::max() ||
+        kernel_key_version < min_kernel_key_version ||
+        firmware_key_version < min_firmware_key_version) {
+      LOG(ERROR) << "Device won't be able to boot up the rollback image.";
+      completer.set_code(ErrorCode::kRollbackNotPossible);
+      return;
+    }
+    install_plan_.is_rollback = true;
+    install_plan_.rollback_data_save_requested =
+        params->rollback_data_save_requested();
+  }
+
+  // Powerwash if either the response requires it or the parameters indicated
+  // powerwash (usually because there was a channel downgrade) and we are
+  // downgrading the version. Enterprise rollback, indicated by
+  // |response.is_rollback| is dealt with separately above.
+  if (response.powerwash_required) {
+    install_plan_.powerwash_required = true;
+  } else if (params->ShouldPowerwash() && !response.is_rollback) {
+    base::Version new_version(response.version);
+    base::Version current_version(params->app_version());
+
+    if (!new_version.IsValid()) {
+      LOG(WARNING) << "Not powerwashing,"
+                   << " the update's version number is unreadable."
+                   << " Update's version number: " << response.version;
+    } else if (!current_version.IsValid()) {
+      LOG(WARNING) << "Not powerwashing,"
+                   << " the current version number is unreadable."
+                   << " Current version number: " << params->app_version();
+    } else if (new_version < current_version) {
+      install_plan_.powerwash_required = true;
+      // Always try to preserve enrollment and wifi data for enrolled devices.
+      install_plan_.rollback_data_save_requested =
+          SystemState::Get()->device_policy() &&
+          SystemState::Get()->device_policy()->IsEnterpriseEnrolled();
+    }
+  }
+
+  TEST_AND_RETURN(HasOutputPipe());
+  if (HasOutputPipe())
+    SetOutputObject(install_plan_);
+  install_plan_.Dump();
+
+  // Send the deadline data (if any) to Chrome through a file. This is a pretty
+  // hacky solution but should be OK for now.
+  //
+  // TODO(petkov): Re-architect this to avoid communication through a
+  // file. Ideally, we would include this information in D-Bus's GetStatus
+  // method and UpdateStatus signal. A potential issue is that update_engine may
+  // be unresponsive during an update download.
+  if (!deadline_file_.empty()) {
+    if (payload_state->GetRollbackHappened()) {
+      // Don't do forced update if rollback has happened since the last update
+      // check where policy was present.
+      LOG(INFO) << "Not forcing update because a rollback happened.";
+      utils::WriteFile(deadline_file_.c_str(), nullptr, 0);
+    } else {
+      utils::WriteFile(deadline_file_.c_str(),
+                       response.deadline.data(),
+                       response.deadline.size());
+    }
+    chmod(deadline_file_.c_str(), S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+  }
+
+  // Check the generated install-plan with the Policy to confirm that
+  // it can be applied at this time (or at all).
+  UpdateManager* const update_manager = SystemState::Get()->update_manager();
+  CHECK(update_manager);
+  auto ec = ErrorCode::kSuccess;
+  update_manager->PolicyRequest(
+      &Policy::UpdateCanBeApplied, &ec, &install_plan_);
+  completer.set_code(ec);
+
+  const auto allowed_milestones = params->rollback_allowed_milestones();
+  if (allowed_milestones > 0) {
+    auto max_firmware_rollforward = numeric_limits<uint32_t>::max();
+    auto max_kernel_rollforward = numeric_limits<uint32_t>::max();
+
+    // Determine the version to update the max rollforward verified boot
+    // value.
+    OmahaResponse::RollbackKeyVersion version =
+        response.past_rollback_key_version;
+
+    // Determine the max rollforward values to be set in the TPM.
+    max_firmware_rollforward = static_cast<uint32_t>(version.firmware_key)
+                                   << 16 |
+                               static_cast<uint32_t>(version.firmware);
+    max_kernel_rollforward = static_cast<uint32_t>(version.kernel_key) << 16 |
+                             static_cast<uint32_t>(version.kernel);
+
+    // In the case that the value is 0xffffffff, log a warning because the
+    // device should not be installing a rollback image without having version
+    // information.
+    if (max_firmware_rollforward == numeric_limits<uint32_t>::max() ||
+        max_kernel_rollforward == numeric_limits<uint32_t>::max()) {
+      LOG(WARNING)
+          << "Max rollforward values were not sent in rollback response: "
+          << " max_kernel_rollforward=" << max_kernel_rollforward
+          << " max_firmware_rollforward=" << max_firmware_rollforward
+          << " rollback_allowed_milestones="
+          << params->rollback_allowed_milestones();
+    } else {
+      LOG(INFO) << "Setting the max rollforward values: "
+                << " max_kernel_rollforward=" << max_kernel_rollforward
+                << " max_firmware_rollforward=" << max_firmware_rollforward
+                << " rollback_allowed_milestones="
+                << params->rollback_allowed_milestones();
+      SystemState::Get()->hardware()->SetMaxKernelKeyRollforward(
+          max_kernel_rollforward);
+      // TODO(crbug/783998): Set max firmware rollforward when implemented.
+    }
+  } else {
+    LOG(INFO) << "Rollback is not allowed. Setting max rollforward values"
+              << " to infinity";
+    // When rollback is not allowed, explicitly set the max roll forward to
+    // infinity.
+    SystemState::Get()->hardware()->SetMaxKernelKeyRollforward(
+        kRollforwardInfinity);
+    // TODO(crbug/783998): Set max firmware rollforward when implemented.
+  }
+}
+
+bool OmahaResponseHandlerAction::AreHashChecksMandatory(
+    const OmahaResponse& response) {
+  // We sometimes need to waive the hash checks in order to download from
+  // sources that don't provide hashes, such as dev server.
+  // At this point UpdateAttempter::IsAnyUpdateSourceAllowed() has already been
+  // checked, so an unofficial update URL won't get this far unless it's OK to
+  // use without a hash. Additionally, we want to always waive hash checks on
+  // unofficial builds (i.e. dev/test images).
+  // The end result is this:
+  //  * Base image:
+  //    - Official URLs require a hash.
+  //    - Unofficial URLs only get this far if the IsAnyUpdateSourceAllowed()
+  //      devmode/debugd checks pass, in which case the hash is waived.
+  //  * Dev/test image:
+  //    - Any URL is allowed through with no hash checking.
+  if (!SystemState::Get()->request_params()->IsUpdateUrlOfficial() ||
+      !SystemState::Get()->hardware()->IsOfficialBuild()) {
+    // Still do a hash check if a public key is included.
+    if (!response.public_key_rsa.empty()) {
+      // The autoupdate_CatchBadSignatures test checks for this string
+      // in log-files. Keep in sync.
+      LOG(INFO) << "Mandating payload hash checks since Omaha Response "
+                << "for unofficial build includes public RSA key.";
+      return true;
+    } else {
+      LOG(INFO) << "Waiving payload hash checks for unofficial update URL.";
+      return false;
+    }
+  }
+
+  LOG(INFO) << "Mandating hash checks for official URL on official build.";
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/omaha_response_handler_action.h b/cros/omaha_response_handler_action.h
similarity index 88%
rename from omaha_response_handler_action.h
rename to cros/omaha_response_handler_action.h
index d2e6db8..9842c94 100644
--- a/omaha_response_handler_action.h
+++ b/cros/omaha_response_handler_action.h
@@ -14,17 +14,16 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_OMAHA_RESPONSE_HANDLER_ACTION_H_
-#define UPDATE_ENGINE_OMAHA_RESPONSE_HANDLER_ACTION_H_
+#ifndef UPDATE_ENGINE_CROS_OMAHA_RESPONSE_HANDLER_ACTION_H_
+#define UPDATE_ENGINE_CROS_OMAHA_RESPONSE_HANDLER_ACTION_H_
 
 #include <string>
 
 #include <gtest/gtest_prod.h>  // for FRIEND_TEST
 
 #include "update_engine/common/action.h"
-#include "update_engine/omaha_request_action.h"
+#include "update_engine/cros/omaha_request_action.h"
 #include "update_engine/payload_consumer/install_plan.h"
-#include "update_engine/system_state.h"
 
 // This class reads in an Omaha response and converts what it sees into
 // an install plan which is passed out.
@@ -42,7 +41,7 @@
 
 class OmahaResponseHandlerAction : public Action<OmahaResponseHandlerAction> {
  public:
-  explicit OmahaResponseHandlerAction(SystemState* system_state);
+  OmahaResponseHandlerAction();
 
   typedef ActionTraits<OmahaResponseHandlerAction>::InputObjectType
       InputObjectType;
@@ -65,9 +64,6 @@
   // of the system and the contents of the Omaha response. False otherwise.
   bool AreHashChecksMandatory(const OmahaResponse& response);
 
-  // Global system context.
-  SystemState* system_state_;
-
   // The install plan, if we have an update.
   InstallPlan install_plan_;
 
@@ -90,4 +86,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_OMAHA_RESPONSE_HANDLER_ACTION_H_
+#endif  // UPDATE_ENGINE_CROS_OMAHA_RESPONSE_HANDLER_ACTION_H_
diff --git a/cros/omaha_response_handler_action_unittest.cc b/cros/omaha_response_handler_action_unittest.cc
new file mode 100644
index 0000000..c9b46b1
--- /dev/null
+++ b/cros/omaha_response_handler_action_unittest.cc
@@ -0,0 +1,1022 @@
+//
+// Copyright (C) 2011 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/cros/omaha_response_handler_action.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+#include <base/files/file_util.h>
+#include <base/files/scoped_temp_dir.h>
+#include <brillo/message_loops/fake_message_loop.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/constants.h"
+#include "update_engine/common/platform_constants.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/cros/fake_system_state.h"
+#include "update_engine/cros/mock_payload_state.h"
+#include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/update_manager/mock_policy.h"
+
+using chromeos_update_engine::test_utils::System;
+using chromeos_update_engine::test_utils::WriteFileString;
+using chromeos_update_manager::EvalStatus;
+using chromeos_update_manager::FakeUpdateManager;
+using chromeos_update_manager::kRollforwardInfinity;
+using chromeos_update_manager::MockPolicy;
+using std::string;
+using testing::_;
+using testing::DoAll;
+using testing::Return;
+using testing::SetArgPointee;
+
+namespace chromeos_update_engine {
+
+class OmahaResponseHandlerActionProcessorDelegate
+    : public ActionProcessorDelegate {
+ public:
+  OmahaResponseHandlerActionProcessorDelegate()
+      : code_(ErrorCode::kError), code_set_(false) {}
+  void ActionCompleted(ActionProcessor* processor,
+                       AbstractAction* action,
+                       ErrorCode code) {
+    if (action->Type() == OmahaResponseHandlerAction::StaticType()) {
+      auto response_handler_action =
+          static_cast<OmahaResponseHandlerAction*>(action);
+      code_ = code;
+      code_set_ = true;
+      response_handler_action_install_plan_.reset(
+          new InstallPlan(response_handler_action->install_plan_));
+    } else if (action->Type() ==
+               ObjectCollectorAction<InstallPlan>::StaticType()) {
+      auto collector_action =
+          static_cast<ObjectCollectorAction<InstallPlan>*>(action);
+      collector_action_install_plan_.reset(
+          new InstallPlan(collector_action->object()));
+    }
+  }
+  ErrorCode code_;
+  bool code_set_;
+  std::unique_ptr<InstallPlan> collector_action_install_plan_;
+  std::unique_ptr<InstallPlan> response_handler_action_install_plan_;
+};
+
+class OmahaResponseHandlerActionTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    FakeSystemState::CreateInstance();
+    // Enable MockPrefs;
+    FakeSystemState::Get()->set_prefs(nullptr);
+    FakeBootControl* fake_boot_control =
+        FakeSystemState::Get()->fake_boot_control();
+    fake_boot_control->SetPartitionDevice(kPartitionNameKernel, 0, "/dev/sdz2");
+    fake_boot_control->SetPartitionDevice(kPartitionNameRoot, 0, "/dev/sdz3");
+    fake_boot_control->SetPartitionDevice(kPartitionNameKernel, 1, "/dev/sdz4");
+    fake_boot_control->SetPartitionDevice(kPartitionNameRoot, 1, "/dev/sdz5");
+  }
+
+  // Return true iff the OmahaResponseHandlerAction succeeded.
+  // If out is non-null, it's set w/ the response from the action.
+  bool DoTest(const OmahaResponse& in,
+              const string& deadline_file,
+              InstallPlan* out);
+
+  // Delegate passed to the ActionProcessor.
+  OmahaResponseHandlerActionProcessorDelegate delegate_;
+
+  // Captures the action's result code, for tests that need to directly verify
+  // it in non-success cases.
+  ErrorCode action_result_code_;
+
+  // "Hash+"
+  const brillo::Blob expected_hash_ = {0x48, 0x61, 0x73, 0x68, 0x2b};
+};
+
+namespace {
+const char* const kLongName =
+    "very_long_name_and_no_slashes-very_long_name_and_no_slashes"
+    "very_long_name_and_no_slashes-very_long_name_and_no_slashes"
+    "very_long_name_and_no_slashes-very_long_name_and_no_slashes"
+    "very_long_name_and_no_slashes-very_long_name_and_no_slashes"
+    "very_long_name_and_no_slashes-very_long_name_and_no_slashes"
+    "very_long_name_and_no_slashes-very_long_name_and_no_slashes"
+    "very_long_name_and_no_slashes-very_long_name_and_no_slashes"
+    "-the_update_a.b.c.d_DELTA_.tgz";
+const char* const kBadVersion = "don't update me";
+const char* const kPayloadHashHex = "486173682b";
+const char* const kPayloadFp1 = "1.755aff78ec73dfc7f590893ac";
+const char* const kPayloadFp2 = "1.98ba213e0ccec0d0e8cdc74a5";
+const char* const kPayloadAppId = "test_app_id";
+}  // namespace
+
+bool OmahaResponseHandlerActionTest::DoTest(const OmahaResponse& in,
+                                            const string& test_deadline_file,
+                                            InstallPlan* out) {
+  brillo::FakeMessageLoop loop(nullptr);
+  loop.SetAsCurrent();
+  ActionProcessor processor;
+  processor.set_delegate(&delegate_);
+
+  auto feeder_action = std::make_unique<ObjectFeederAction<OmahaResponse>>();
+  feeder_action->set_obj(in);
+  if (in.update_exists && in.version != kBadVersion) {
+    string expected_hash;
+    for (const auto& package : in.packages)
+      expected_hash += package.hash + ":";
+    EXPECT_CALL(*(FakeSystemState::Get()->mock_prefs()),
+                SetString(kPrefsUpdateCheckResponseHash, expected_hash))
+        .WillOnce(Return(true));
+
+    int slot =
+        FakeSystemState::Get()->request_params()->is_install()
+            ? FakeSystemState::Get()->fake_boot_control()->GetCurrentSlot()
+            : 1 - FakeSystemState::Get()->fake_boot_control()->GetCurrentSlot();
+    string key = kPrefsChannelOnSlotPrefix + std::to_string(slot);
+    EXPECT_CALL(*(FakeSystemState::Get()->mock_prefs()),
+                SetString(key, testing::_))
+        .WillOnce(Return(true));
+  }
+
+  string current_url = in.packages.size() ? in.packages[0].payload_urls[0] : "";
+  EXPECT_CALL(*(FakeSystemState::Get()->mock_payload_state()), GetCurrentUrl())
+      .WillRepeatedly(Return(current_url));
+
+  auto response_handler_action = std::make_unique<OmahaResponseHandlerAction>();
+  if (!test_deadline_file.empty())
+    response_handler_action->deadline_file_ = test_deadline_file;
+
+  auto collector_action =
+      std::make_unique<ObjectCollectorAction<InstallPlan>>();
+
+  BondActions(feeder_action.get(), response_handler_action.get());
+  BondActions(response_handler_action.get(), collector_action.get());
+  processor.EnqueueAction(std::move(feeder_action));
+  processor.EnqueueAction(std::move(response_handler_action));
+  processor.EnqueueAction(std::move(collector_action));
+  processor.StartProcessing();
+  EXPECT_TRUE(!processor.IsRunning())
+      << "Update test to handle non-async actions";
+
+  if (out && delegate_.collector_action_install_plan_)
+    *out = *delegate_.collector_action_install_plan_;
+
+  EXPECT_TRUE(delegate_.code_set_);
+  action_result_code_ = delegate_.code_;
+  return delegate_.code_ == ErrorCode::kSuccess;
+}
+
+TEST_F(OmahaResponseHandlerActionTest, SimpleTest) {
+  ScopedTempFile test_deadline_file(
+      "omaha_response_handler_action_unittest-XXXXXX");
+  {
+    OmahaResponse in;
+    in.update_exists = true;
+    in.version = "a.b.c.d";
+    in.packages.push_back(
+        {.payload_urls = {"http://foo/the_update_a.b.c.d.tgz"},
+         .size = 12,
+         .hash = kPayloadHashHex,
+         .app_id = kPayloadAppId,
+         .fp = kPayloadFp1});
+    in.more_info_url = "http://more/info";
+    in.prompt = false;
+    in.deadline = "20101020";
+    InstallPlan install_plan;
+    EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
+    EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+    EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+    EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id);
+    EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp);
+    EXPECT_EQ(1U, install_plan.target_slot);
+    string deadline;
+    EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline));
+    EXPECT_EQ("20101020", deadline);
+    struct stat deadline_stat;
+    EXPECT_EQ(0, stat(test_deadline_file.path().c_str(), &deadline_stat));
+    EXPECT_EQ(
+        static_cast<mode_t>(S_IFREG | S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH),
+        deadline_stat.st_mode);
+    EXPECT_EQ(in.version, install_plan.version);
+  }
+  {
+    OmahaResponse in;
+    in.update_exists = true;
+    in.version = "a.b.c.d";
+    in.packages.push_back(
+        {.payload_urls = {"http://foo/the_update_a.b.c.d.tgz"},
+         .size = 12,
+         .hash = kPayloadHashHex,
+         .app_id = kPayloadAppId,
+         .fp = kPayloadFp1});
+    in.more_info_url = "http://more/info";
+    in.prompt = true;
+    InstallPlan install_plan;
+    // Set the other slot as current.
+    FakeSystemState::Get()->fake_boot_control()->SetCurrentSlot(1);
+    EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
+    EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+    EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+    EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id);
+    EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp);
+    EXPECT_EQ(0U, install_plan.target_slot);
+    string deadline;
+    EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline) &&
+                deadline.empty());
+    EXPECT_EQ(in.version, install_plan.version);
+  }
+  {
+    OmahaResponse in;
+    in.update_exists = true;
+    in.version = "a.b.c.d";
+    in.packages.push_back({.payload_urls = {kLongName},
+                           .size = 12,
+                           .hash = kPayloadHashHex,
+                           .app_id = kPayloadAppId,
+                           .fp = kPayloadFp1});
+    in.more_info_url = "http://more/info";
+    in.prompt = true;
+    in.deadline = "some-deadline";
+    InstallPlan install_plan;
+    FakeSystemState::Get()->fake_boot_control()->SetCurrentSlot(0);
+    // Because rollback happened, the deadline shouldn't be written into the
+    // file.
+    EXPECT_CALL(*(FakeSystemState::Get()->mock_payload_state()),
+                GetRollbackHappened())
+        .WillOnce(Return(true));
+    EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
+    EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+    EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+    EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id);
+    EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp);
+    EXPECT_EQ(1U, install_plan.target_slot);
+    string deadline;
+    EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline));
+    EXPECT_TRUE(deadline.empty());
+    EXPECT_EQ(in.version, install_plan.version);
+  }
+  {
+    OmahaResponse in;
+    in.update_exists = true;
+    in.version = "a.b.c.d";
+    in.packages.push_back({.payload_urls = {kLongName},
+                           .size = 12,
+                           .hash = kPayloadHashHex,
+                           .app_id = kPayloadAppId,
+                           .fp = kPayloadFp1});
+    in.more_info_url = "http://more/info";
+    in.prompt = true;
+    in.deadline = "some-deadline";
+    InstallPlan install_plan;
+    FakeSystemState::Get()->fake_boot_control()->SetCurrentSlot(0);
+    EXPECT_CALL(*(FakeSystemState::Get()->mock_payload_state()),
+                GetRollbackHappened())
+        .WillOnce(Return(false));
+    EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
+    EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+    EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+    EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id);
+    EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp);
+    EXPECT_EQ(1U, install_plan.target_slot);
+    string deadline;
+    EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline));
+    EXPECT_EQ("some-deadline", deadline);
+    EXPECT_EQ(in.version, install_plan.version);
+  }
+}
+
+TEST_F(OmahaResponseHandlerActionTest, NoUpdatesTest) {
+  OmahaResponse in;
+  in.update_exists = false;
+  InstallPlan install_plan;
+  EXPECT_FALSE(DoTest(in, "", &install_plan));
+  EXPECT_TRUE(install_plan.partitions.empty());
+}
+
+TEST_F(OmahaResponseHandlerActionTest, InstallTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "a.b.c.d";
+  in.packages.push_back(
+      {.payload_urls = {kLongName}, .size = 1, .hash = kPayloadHashHex});
+  in.packages.push_back(
+      {.payload_urls = {kLongName}, .size = 2, .hash = kPayloadHashHex});
+  in.more_info_url = "http://more/info";
+
+  OmahaRequestParams params;
+  params.set_is_install(true);
+
+  FakeSystemState::Get()->set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_EQ(install_plan.source_slot, UINT_MAX);
+}
+
+TEST_F(OmahaResponseHandlerActionTest, MultiPackageTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "a.b.c.d";
+  in.packages.push_back({.payload_urls = {"http://package/1"},
+                         .size = 1,
+                         .hash = kPayloadHashHex,
+                         .app_id = kPayloadAppId,
+                         .fp = kPayloadFp1});
+  in.packages.push_back({.payload_urls = {"http://package/2"},
+                         .size = 2,
+                         .hash = kPayloadHashHex,
+                         .app_id = kPayloadAppId,
+                         .fp = kPayloadFp2});
+  in.more_info_url = "http://more/info";
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+  EXPECT_EQ(2u, install_plan.payloads.size());
+  EXPECT_EQ(in.packages[0].size, install_plan.payloads[0].size);
+  EXPECT_EQ(in.packages[1].size, install_plan.payloads[1].size);
+  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+  EXPECT_EQ(expected_hash_, install_plan.payloads[1].hash);
+  EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id);
+  EXPECT_EQ(in.packages[1].app_id, install_plan.payloads[1].app_id);
+  EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp);
+  EXPECT_EQ(in.packages[1].fp, install_plan.payloads[1].fp);
+  EXPECT_EQ(in.version, install_plan.version);
+}
+
+TEST_F(OmahaResponseHandlerActionTest, HashChecksForHttpTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "a.b.c.d";
+  in.packages.push_back(
+      {.payload_urls = {"http://test.should/need/hash.checks.signed"},
+       .size = 12,
+       .hash = kPayloadHashHex,
+       .app_id = kPayloadAppId,
+       .fp = kPayloadFp1});
+  in.more_info_url = "http://more/info";
+  // Hash checks are always skipped for non-official update URLs.
+  EXPECT_CALL(*(FakeSystemState::Get()->mock_request_params()),
+              IsUpdateUrlOfficial())
+      .WillRepeatedly(Return(true));
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+  EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id);
+  EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp);
+  EXPECT_TRUE(install_plan.hash_checks_mandatory);
+  EXPECT_EQ(in.version, install_plan.version);
+}
+
+TEST_F(OmahaResponseHandlerActionTest, HashChecksForUnofficialUpdateUrl) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "a.b.c.d";
+  in.packages.push_back(
+      {.payload_urls = {"http://url.normally/needs/hash.checks.signed"},
+       .size = 12,
+       .hash = kPayloadHashHex,
+       .app_id = kPayloadAppId,
+       .fp = kPayloadFp1});
+  in.more_info_url = "http://more/info";
+  EXPECT_CALL(*(FakeSystemState::Get()->mock_request_params()),
+              IsUpdateUrlOfficial())
+      .WillRepeatedly(Return(false));
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+  EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id);
+  EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp);
+  EXPECT_FALSE(install_plan.hash_checks_mandatory);
+  EXPECT_EQ(in.version, install_plan.version);
+}
+
+TEST_F(OmahaResponseHandlerActionTest,
+       HashChecksForOfficialUrlUnofficialBuildTest) {
+  // Official URLs for unofficial builds (dev/test images) don't require hash.
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "a.b.c.d";
+  in.packages.push_back(
+      {.payload_urls = {"http://url.normally/needs/hash.checks.signed"},
+       .size = 12,
+       .hash = kPayloadHashHex,
+       .app_id = kPayloadAppId,
+       .fp = kPayloadFp1});
+  in.more_info_url = "http://more/info";
+  EXPECT_CALL(*(FakeSystemState::Get()->mock_request_params()),
+              IsUpdateUrlOfficial())
+      .WillRepeatedly(Return(true));
+  FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(false);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+  EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id);
+  EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp);
+  EXPECT_FALSE(install_plan.hash_checks_mandatory);
+  EXPECT_EQ(in.version, install_plan.version);
+}
+
+TEST_F(OmahaResponseHandlerActionTest, HashChecksForHttpsTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "a.b.c.d";
+  in.packages.push_back(
+      {.payload_urls = {"https://test.should/need/hash.checks.signed"},
+       .size = 12,
+       .hash = kPayloadHashHex,
+       .app_id = kPayloadAppId,
+       .fp = kPayloadFp1});
+  in.more_info_url = "http://more/info";
+  EXPECT_CALL(*(FakeSystemState::Get()->mock_request_params()),
+              IsUpdateUrlOfficial())
+      .WillRepeatedly(Return(true));
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+  EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id);
+  EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp);
+  EXPECT_TRUE(install_plan.hash_checks_mandatory);
+  EXPECT_EQ(in.version, install_plan.version);
+}
+
+TEST_F(OmahaResponseHandlerActionTest, HashChecksForBothHttpAndHttpsTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "a.b.c.d";
+  in.packages.push_back(
+      {.payload_urls = {"http://test.should.still/need/hash.checks",
+                        "https://test.should.still/need/hash.checks"},
+       .size = 12,
+       .hash = kPayloadHashHex,
+       .app_id = kPayloadAppId,
+       .fp = kPayloadFp1});
+  in.more_info_url = "http://more/info";
+  EXPECT_CALL(*(FakeSystemState::Get()->mock_request_params()),
+              IsUpdateUrlOfficial())
+      .WillRepeatedly(Return(true));
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+  EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id);
+  EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp);
+  EXPECT_TRUE(install_plan.hash_checks_mandatory);
+  EXPECT_EQ(in.version, install_plan.version);
+}
+
+TEST_F(OmahaResponseHandlerActionTest,
+       ChangeToMoreStableVersionAndChannelTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "1.0.0.0";
+  in.packages.push_back({.payload_urls = {"https://MoreStableChannelTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.more_info_url = "http://more/info";
+
+  // Create a uniquely named test directory.
+  base::ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+
+  OmahaRequestParams params;
+  FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(false);
+  params.set_root(tempdir.GetPath().value());
+  params.set_current_channel("canary-channel");
+  EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr));
+  params.UpdateDownloadChannel();
+  params.set_app_version("2.0.0.0");
+
+  FakeSystemState::Get()->set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_TRUE(install_plan.powerwash_required);
+}
+
+TEST_F(OmahaResponseHandlerActionTest,
+       ChangeToMoreStableVersionAndChannelPowerwashNotAllowedTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "1.0.0.0";
+  in.packages.push_back({.payload_urls = {"https://MoreStableChannelTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.more_info_url = "http://more/info";
+
+  // Create a uniquely named test directory.
+  base::ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+
+  OmahaRequestParams params;
+  FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(false);
+  params.set_root(tempdir.GetPath().value());
+  params.set_current_channel("canary-channel");
+  EXPECT_TRUE(params.SetTargetChannel("stable-channel", false, nullptr));
+  params.UpdateDownloadChannel();
+  params.set_app_version("2.0.0.0");
+
+  FakeSystemState::Get()->set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_FALSE(install_plan.powerwash_required);
+}
+
+TEST_F(OmahaResponseHandlerActionTest,
+       ChangeToMoreStableChannelButNewerVersionTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "12345.96.0.0";
+  in.packages.push_back({.payload_urls = {"https://ChannelDownVersionUp"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.more_info_url = "http://more/info";
+
+  // Create a uniquely named test directory.
+  base::ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+
+  OmahaRequestParams params;
+  FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(false);
+  params.set_root(tempdir.GetPath().value());
+  params.set_current_channel("beta-channel");
+  EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr));
+  params.UpdateDownloadChannel();
+  params.set_app_version("12345.48.0.0");
+
+  FakeSystemState::Get()->set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_FALSE(install_plan.powerwash_required);
+}
+
+TEST_F(OmahaResponseHandlerActionTest,
+       ChangeToMoreStableChannelButSameVersionTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "12345.0.0.0";
+  in.packages.push_back({.payload_urls = {"https://ChannelDownVersionUp"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.more_info_url = "http://more/info";
+
+  // Create a uniquely named test directory.
+  base::ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+
+  OmahaRequestParams params;
+  FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(false);
+  params.set_root(tempdir.GetPath().value());
+  params.set_current_channel("beta-channel");
+  EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr));
+  params.UpdateDownloadChannel();
+  params.set_app_version("12345.0.0.0");
+
+  FakeSystemState::Get()->set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_FALSE(install_plan.powerwash_required);
+  EXPECT_FALSE(install_plan.rollback_data_save_requested);
+}
+
+// On an enrolled device, the rollback data restore should be attempted when
+// doing a powerwash and channel downgrade.
+TEST_F(OmahaResponseHandlerActionTest,
+       ChangeToMoreStableChannelEnrolledDataRestore) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "12345.96.0.0";
+  in.packages.push_back({.payload_urls = {"https://ChannelDownEnrolled"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.more_info_url = "http://more/info";
+
+  // Create a uniquely named test directory.
+  base::ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+
+  OmahaRequestParams params;
+  FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(true);
+  params.set_root(tempdir.GetPath().value());
+  params.set_current_channel("beta-channel");
+  EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr));
+  params.UpdateDownloadChannel();
+  params.set_app_version("12347.48.0.0");
+
+  testing::NiceMock<policy::MockDevicePolicy> mock_device_policy;
+  EXPECT_CALL(mock_device_policy, IsEnterpriseEnrolled())
+      .WillOnce(Return(true));
+  FakeSystemState::Get()->set_device_policy(&mock_device_policy);
+
+  FakeSystemState::Get()->set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_TRUE(install_plan.rollback_data_save_requested);
+}
+
+// Never attempt rollback data restore if the device is not enrolled.
+TEST_F(OmahaResponseHandlerActionTest,
+       ChangeToMoreStableChannelUnenrolledNoDataRestore) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "12345.96.0.0";
+  in.packages.push_back({.payload_urls = {"https://ChannelDownEnrolled"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.more_info_url = "http://more/info";
+
+  // Create a uniquely named test directory.
+  base::ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+
+  OmahaRequestParams params;
+  FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(true);
+  params.set_root(tempdir.GetPath().value());
+  params.set_current_channel("beta-channel");
+  EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr));
+  params.UpdateDownloadChannel();
+  params.set_app_version("12347.48.0.0");
+
+  testing::NiceMock<policy::MockDevicePolicy> mock_device_policy;
+  EXPECT_CALL(mock_device_policy, IsEnterpriseEnrolled())
+      .WillOnce(Return(false));
+  FakeSystemState::Get()->set_device_policy(&mock_device_policy);
+
+  FakeSystemState::Get()->set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_FALSE(install_plan.rollback_data_save_requested);
+}
+
+// Never attempt rollback data restore if powerwash is not allowed.
+TEST_F(OmahaResponseHandlerActionTest,
+       ChangeToMoreStableChannelNoPowerwashNoDataRestore) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "12345.96.0.0";
+  in.packages.push_back(
+      {.payload_urls = {"https://URL"}, .size = 1, .hash = kPayloadHashHex});
+  in.more_info_url = "http://more/info";
+
+  // Create a uniquely named test directory.
+  base::ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+
+  OmahaRequestParams params;
+  FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(true);
+  params.set_root(tempdir.GetPath().value());
+  params.set_current_channel("beta-channel");
+  EXPECT_TRUE(params.SetTargetChannel("stable-channel", false, nullptr));
+  params.UpdateDownloadChannel();
+  params.set_app_version("12347.48.0.0");
+
+  FakeSystemState::Get()->set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_FALSE(install_plan.rollback_data_save_requested);
+}
+
+TEST_F(OmahaResponseHandlerActionTest,
+       ChangeToLessStableVersionAndChannelTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "2.0.0.0";
+  in.packages.push_back({.payload_urls = {"https://LessStableChannelTest"},
+                         .size = 15,
+                         .hash = kPayloadHashHex});
+  in.more_info_url = "http://more/info";
+
+  // Create a uniquely named test directory.
+  base::ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+
+  OmahaRequestParams params;
+  FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(false);
+  params.set_root(tempdir.GetPath().value());
+  params.set_current_channel("stable-channel");
+  EXPECT_TRUE(params.SetTargetChannel("canary-channel", false, nullptr));
+  params.UpdateDownloadChannel();
+  params.set_app_version("1.0.0.0");
+
+  FakeSystemState::Get()->set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_FALSE(install_plan.powerwash_required);
+}
+
+TEST_F(OmahaResponseHandlerActionTest, P2PUrlIsUsedAndHashChecksMandatory) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "a.b.c.d";
+  in.packages.push_back(
+      {.payload_urls = {"https://would.not/cause/hash/checks"},
+       .size = 12,
+       .hash = kPayloadHashHex,
+       .app_id = kPayloadAppId,
+       .fp = kPayloadFp1});
+  in.more_info_url = "http://more/info";
+
+  OmahaRequestParams params;
+  // We're using a real OmahaRequestParams object here so we can't mock
+  // IsUpdateUrlOfficial(), but setting the update URL to the AutoUpdate test
+  // server will cause IsUpdateUrlOfficial() to return true.
+  params.set_update_url(constants::kOmahaDefaultAUTestURL);
+  FakeSystemState::Get()->set_request_params(&params);
+
+  EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(),
+              SetUsingP2PForDownloading(true));
+
+  string p2p_url = "http://9.8.7.6/p2p";
+  EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), GetP2PUrl())
+      .WillRepeatedly(Return(p2p_url));
+  EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(),
+              GetUsingP2PForDownloading())
+      .WillRepeatedly(Return(true));
+
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+  EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id);
+  EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp);
+  EXPECT_EQ(p2p_url, install_plan.download_url);
+  EXPECT_TRUE(install_plan.hash_checks_mandatory);
+}
+
+TEST_F(OmahaResponseHandlerActionTest, RollbackTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.is_rollback = true;
+
+  // The rollback payload is 2 versions behind stable.
+  in.rollback_key_version.kernel = 24;
+  in.rollback_key_version.kernel = 23;
+  in.rollback_key_version.firmware_key = 22;
+  in.rollback_key_version.firmware = 21;
+
+  OmahaResponse::RollbackKeyVersion m4;
+  m4.firmware_key = 16;
+  m4.firmware = 15;
+  m4.kernel_key = 14;
+  m4.kernel = 13;
+
+  in.past_rollback_key_version = m4;
+
+  FakeSystemState::Get()->fake_hardware()->SetMinKernelKeyVersion(0x00010002);
+  FakeSystemState::Get()->fake_hardware()->SetMinFirmwareKeyVersion(0x00030004);
+
+  FakeSystemState::Get()->fake_hardware()->SetMaxKernelKeyRollforward(
+      0xaaaaaaaa);
+  // TODO(crbug/783998): Add support for firmware when implemented.
+
+  OmahaRequestParams params;
+  params.set_rollback_allowed(true);
+  params.set_rollback_allowed_milestones(4);
+
+  FakeSystemState::Get()->set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_TRUE(install_plan.is_rollback);
+
+  // The max rollforward should be set the values of the image
+  // rollback_allowed_milestones (4 for this test) in the past.
+  const uint32_t expected_max_kernel_rollforward =
+      static_cast<uint32_t>(m4.kernel_key) << 16 |
+      static_cast<uint32_t>(m4.kernel);
+  EXPECT_EQ(
+      expected_max_kernel_rollforward,
+      FakeSystemState::Get()->fake_hardware()->GetMaxKernelKeyRollforward());
+  // TODO(crbug/783998): Add support for firmware when implemented.
+}
+
+TEST_F(OmahaResponseHandlerActionTest, RollbackKernelVersionErrorTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.is_rollback = true;
+  in.rollback_key_version.kernel_key = 1;
+  in.rollback_key_version.kernel = 1;  // This is lower than the minimum.
+  in.rollback_key_version.firmware_key = 3;
+  in.rollback_key_version.firmware = 4;
+
+  OmahaResponse::RollbackKeyVersion m4;
+  m4.firmware_key = 16;
+  m4.firmware = 15;
+  m4.kernel_key = 14;
+  m4.kernel = 13;
+  in.past_rollback_key_version = m4;
+
+  FakeSystemState::Get()->fake_hardware()->SetMinKernelKeyVersion(0x00010002);
+  FakeSystemState::Get()->fake_hardware()->SetMinFirmwareKeyVersion(0x00030004);
+  const uint32_t current_kernel_max_rollforward = 0xaaaaaaaa;
+  FakeSystemState::Get()->fake_hardware()->SetMaxKernelKeyRollforward(
+      current_kernel_max_rollforward);
+
+  OmahaRequestParams params;
+  params.set_rollback_allowed(true);
+  params.set_rollback_allowed_milestones(4);
+
+  FakeSystemState::Get()->set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_FALSE(DoTest(in, "", &install_plan));
+
+  // Max rollforward is not changed in error cases.
+  EXPECT_EQ(
+      current_kernel_max_rollforward,
+      FakeSystemState::Get()->fake_hardware()->GetMaxKernelKeyRollforward());
+  // TODO(crbug/783998): Add support for firmware when implemented.
+}
+
+TEST_F(OmahaResponseHandlerActionTest, RollbackFirmwareVersionErrorTest) {
+  // TODO(crbug/783998): Add handling for max_firmware_rollforward when
+  // implemented.
+  OmahaResponse in;
+  in.update_exists = true;
+  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.is_rollback = true;
+  in.rollback_key_version.kernel_key = 1;
+  in.rollback_key_version.kernel = 2;
+  in.rollback_key_version.firmware_key = 3;
+  in.rollback_key_version.firmware = 3;  // This is lower than the minimum.
+
+  FakeSystemState::Get()->fake_hardware()->SetMinKernelKeyVersion(0x00010002);
+  FakeSystemState::Get()->fake_hardware()->SetMinFirmwareKeyVersion(0x00030004);
+
+  OmahaRequestParams params;
+  params.set_rollback_allowed(true);
+  params.set_rollback_allowed_milestones(4);
+
+  FakeSystemState::Get()->set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_FALSE(DoTest(in, "", &install_plan));
+}
+
+TEST_F(OmahaResponseHandlerActionTest, RollbackNotRollbackTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.is_rollback = false;
+
+  const uint32_t current_kernel_max_rollforward = 0xaaaaaaaa;
+  FakeSystemState::Get()->fake_hardware()->SetMaxKernelKeyRollforward(
+      current_kernel_max_rollforward);
+
+  OmahaRequestParams params;
+  params.set_rollback_allowed(true);
+  params.set_rollback_allowed_milestones(4);
+
+  FakeSystemState::Get()->set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_FALSE(install_plan.is_rollback);
+
+  // Max rollforward is not changed for non-rollback cases.
+  EXPECT_EQ(
+      current_kernel_max_rollforward,
+      FakeSystemState::Get()->fake_hardware()->GetMaxKernelKeyRollforward());
+  // TODO(crbug/783998): Add support for firmware when implemented.
+}
+
+TEST_F(OmahaResponseHandlerActionTest, RollbackNotAllowedTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.is_rollback = true;
+
+  OmahaRequestParams params;
+  params.set_rollback_allowed(false);
+  params.set_rollback_allowed_milestones(4);
+
+  const uint32_t current_kernel_max_rollforward = 0xaaaaaaaa;
+  FakeSystemState::Get()->fake_hardware()->SetMaxKernelKeyRollforward(
+      current_kernel_max_rollforward);
+
+  FakeSystemState::Get()->set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_FALSE(DoTest(in, "", &install_plan));
+
+  // This case generates an error so, do not update max rollforward.
+  EXPECT_EQ(
+      current_kernel_max_rollforward,
+      FakeSystemState::Get()->fake_hardware()->GetMaxKernelKeyRollforward());
+  // TODO(crbug/783998): Add support for firmware when implemented.
+}
+
+TEST_F(OmahaResponseHandlerActionTest, NormalUpdateWithZeroMilestonesAllowed) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.is_rollback = false;
+
+  OmahaRequestParams params;
+  params.set_rollback_allowed(true);
+  params.set_rollback_allowed_milestones(0);
+
+  const uint32_t current_kernel_max_rollforward = 0xaaaaaaaa;
+  FakeSystemState::Get()->fake_hardware()->SetMaxKernelKeyRollforward(
+      current_kernel_max_rollforward);
+
+  FakeSystemState::Get()->set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+
+  // When allowed_milestones is 0, this is set to infinity.
+  EXPECT_EQ(
+      kRollforwardInfinity,
+      FakeSystemState::Get()->fake_hardware()->GetMaxKernelKeyRollforward());
+  // TODO(crbug/783998): Add support for firmware when implemented.
+}
+
+TEST_F(OmahaResponseHandlerActionTest, SystemVersionTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "a.b.c.d";
+  in.packages.push_back({.payload_urls = {"http://package/1"},
+                         .size = 1,
+                         .hash = kPayloadHashHex,
+                         .app_id = kPayloadAppId,
+                         .fp = kPayloadFp1});
+  in.packages.push_back({.payload_urls = {"http://package/2"},
+                         .size = 2,
+                         .hash = kPayloadHashHex,
+                         .app_id = kPayloadAppId,
+                         .fp = kPayloadFp2});
+  in.more_info_url = "http://more/info";
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+  EXPECT_EQ(2u, install_plan.payloads.size());
+  EXPECT_EQ(in.packages[0].size, install_plan.payloads[0].size);
+  EXPECT_EQ(in.packages[1].size, install_plan.payloads[1].size);
+  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+  EXPECT_EQ(expected_hash_, install_plan.payloads[1].hash);
+  EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id);
+  EXPECT_EQ(in.packages[1].app_id, install_plan.payloads[1].app_id);
+  EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp);
+  EXPECT_EQ(in.packages[1].fp, install_plan.payloads[1].fp);
+  EXPECT_EQ(in.version, install_plan.version);
+}
+
+TEST_F(OmahaResponseHandlerActionTest, TestDeferredByPolicy) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.version = "a.b.c.d";
+  in.packages.push_back({.payload_urls = {"http://foo/the_update_a.b.c.d.tgz"},
+                         .size = 12,
+                         .hash = kPayloadHashHex,
+                         .app_id = kPayloadAppId,
+                         .fp = kPayloadFp1});
+  // Setup the UpdateManager to disallow the update.
+  MockPolicy* mock_policy = new MockPolicy();
+  FakeUpdateManager* fake_update_manager =
+      FakeSystemState::Get()->fake_update_manager();
+  fake_update_manager->set_policy(mock_policy);
+  EXPECT_CALL(*mock_policy, UpdateCanBeApplied(_, _, _, _, _))
+      .WillOnce(
+          DoAll(SetArgPointee<3>(ErrorCode::kOmahaUpdateDeferredPerPolicy),
+                Return(EvalStatus::kSucceeded)));
+  // Perform the Action. It should "fail" with kOmahaUpdateDeferredPerPolicy.
+  InstallPlan install_plan;
+  EXPECT_FALSE(DoTest(in, "", &install_plan));
+  EXPECT_EQ(ErrorCode::kOmahaUpdateDeferredPerPolicy, action_result_code_);
+  // Verify that DoTest() didn't set the output install plan.
+  EXPECT_EQ("", install_plan.version);
+  // Now verify the InstallPlan that was generated.
+  install_plan = *delegate_.response_handler_action_install_plan_;
+  EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+  EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id);
+  EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp);
+  EXPECT_EQ(1U, install_plan.target_slot);
+  EXPECT_EQ(in.version, install_plan.version);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/power_manager_android.cc b/cros/omaha_utils.cc
similarity index 63%
rename from power_manager_android.cc
rename to cros/omaha_utils.cc
index 63a0351..fc05cb9 100644
--- a/power_manager_android.cc
+++ b/cros/omaha_utils.cc
@@ -14,23 +14,24 @@
 // limitations under the License.
 //
 
-#include "update_engine/power_manager_android.h"
-
-#include <memory>
+#include "update_engine/cros/omaha_utils.h"
 
 #include <base/logging.h>
+#include <base/strings/string_number_conversions.h>
 
 namespace chromeos_update_engine {
 
-namespace power_manager {
-std::unique_ptr<PowerManagerInterface> CreatePowerManager() {
-  return std::unique_ptr<PowerManagerInterface>(new PowerManagerAndroid());
-}
-}  // namespace power_manager
+const EolDate kEolDateInvalid = -9999;
 
-bool PowerManagerAndroid::RequestReboot() {
-  LOG(WARNING) << "PowerManager not implemented.";
-  return false;
+std::string EolDateToString(EolDate eol_date) {
+  return base::NumberToString(eol_date);
+}
+
+EolDate StringToEolDate(const std::string& eol_date) {
+  EolDate date = kEolDateInvalid;
+  if (!base::StringToInt64(eol_date, &date))
+    return kEolDateInvalid;
+  return date;
 }
 
 }  // namespace chromeos_update_engine
diff --git a/cros/omaha_utils.h b/cros/omaha_utils.h
new file mode 100644
index 0000000..6741635
--- /dev/null
+++ b/cros/omaha_utils.h
@@ -0,0 +1,39 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CROS_OMAHA_UTILS_H_
+#define UPDATE_ENGINE_CROS_OMAHA_UTILS_H_
+
+#include <string>
+
+namespace chromeos_update_engine {
+
+using EolDate = int64_t;
+
+// |EolDate| indicating an invalid end-of-life date.
+extern const EolDate kEolDateInvalid;
+
+// Returns the string representation of the |eol_date|.
+std::string EolDateToString(EolDate eol_date);
+
+// Converts the end-of-life date string to an EolDate numeric value. In case
+// of an invalid string, the default |kEolDateInvalid| value will be used
+// instead.
+EolDate StringToEolDate(const std::string& eol_date);
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_CROS_OMAHA_UTILS_H_
diff --git a/omaha_utils_unittest.cc b/cros/omaha_utils_unittest.cc
similarity index 61%
rename from omaha_utils_unittest.cc
rename to cros/omaha_utils_unittest.cc
index 8ceb76b..f89f690 100644
--- a/omaha_utils_unittest.cc
+++ b/cros/omaha_utils_unittest.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/omaha_utils.h"
+#include "update_engine/cros/omaha_utils.h"
 
 #include <gtest/gtest.h>
 #include <vector>
@@ -23,20 +23,17 @@
 
 class OmahaUtilsTest : public ::testing::Test {};
 
-TEST(OmahaUtilsTest, EolStatusTest) {
-  EXPECT_EQ(EolStatus::kEol, StringToEolStatus("eol"));
-
+TEST(OmahaUtilsTest, EolDateTest) {
   // Supported values are converted back and forth properly.
-  const std::vector<EolStatus> tests = {
-      EolStatus::kSupported, EolStatus::kSecurityOnly, EolStatus::kEol};
-  for (EolStatus eol_status : tests) {
-    EXPECT_EQ(eol_status, StringToEolStatus(EolStatusToString(eol_status)))
-        << "The StringToEolStatus() was " << EolStatusToString(eol_status);
+  const std::vector<EolDate> tests = {kEolDateInvalid, -1, 0, 1};
+  for (EolDate eol_date : tests) {
+    EXPECT_EQ(eol_date, StringToEolDate(EolDateToString(eol_date)))
+        << "The StringToEolDate() was " << EolDateToString(eol_date);
   }
 
   // Invalid values are assumed as "supported".
-  EXPECT_EQ(EolStatus::kSupported, StringToEolStatus(""));
-  EXPECT_EQ(EolStatus::kSupported, StringToEolStatus("hello, world!"));
+  EXPECT_EQ(kEolDateInvalid, StringToEolDate(""));
+  EXPECT_EQ(kEolDateInvalid, StringToEolDate("hello, world!"));
 }
 
 }  // namespace chromeos_update_engine
diff --git a/p2p_manager.cc b/cros/p2p_manager.cc
similarity index 96%
rename from p2p_manager.cc
rename to cros/p2p_manager.cc
index 6720908..19e2600 100644
--- a/p2p_manager.cc
+++ b/cros/p2p_manager.cc
@@ -23,7 +23,7 @@
 #define _BSD_SOURCE
 #endif
 
-#include "update_engine/p2p_manager.h"
+#include "update_engine/cros/p2p_manager.h"
 
 #include <errno.h>
 #include <fcntl.h>
@@ -51,6 +51,7 @@
 #include <base/strings/stringprintf.h>
 
 #include "update_engine/common/subprocess.h"
+#include "update_engine/common/system_state.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/update_manager/policy.h"
 #include "update_engine/update_manager/update_manager.h"
@@ -65,7 +66,6 @@
 using chromeos_update_manager::EvalStatus;
 using chromeos_update_manager::Policy;
 using chromeos_update_manager::UpdateManager;
-using std::map;
 using std::pair;
 using std::string;
 using std::unique_ptr;
@@ -116,7 +116,6 @@
 class P2PManagerImpl : public P2PManager {
  public:
   P2PManagerImpl(Configuration* configuration,
-                 ClockInterface* clock,
                  UpdateManager* update_manager,
                  const string& file_extension,
                  const int num_files_to_keep,
@@ -171,9 +170,6 @@
   // Configuration object.
   unique_ptr<Configuration> configuration_;
 
-  // Object for telling the time.
-  ClockInterface* clock_;
-
   // A pointer to the global Update Manager.
   UpdateManager* update_manager_;
 
@@ -212,13 +208,11 @@
 const char P2PManagerImpl::kTmpExtension[] = ".tmp";
 
 P2PManagerImpl::P2PManagerImpl(Configuration* configuration,
-                               ClockInterface* clock,
                                UpdateManager* update_manager,
                                const string& file_extension,
                                const int num_files_to_keep,
                                const TimeDelta& max_file_age)
-    : clock_(clock),
-      update_manager_(update_manager),
+    : update_manager_(update_manager),
       file_extension_(file_extension),
       num_files_to_keep_(num_files_to_keep),
       max_file_age_(max_file_age) {
@@ -249,12 +243,12 @@
 
 bool P2PManagerImpl::EnsureP2P(bool should_be_running) {
   int return_code = 0;
-  string output;
+  string stderr;
 
   may_be_running_ = true;  // Unless successful, we must be conservative.
 
   vector<string> args = configuration_->GetInitctlArgs(should_be_running);
-  if (!Subprocess::SynchronousExec(args, &return_code, &output)) {
+  if (!Subprocess::SynchronousExec(args, &return_code, nullptr, &stderr)) {
     LOG(ERROR) << "Error spawning " << utils::StringVectorToString(args);
     return false;
   }
@@ -268,7 +262,7 @@
     const char* expected_error_message =
         should_be_running ? "initctl: Job is already running: p2p\n"
                           : "initctl: Unknown instance \n";
-    if (output != expected_error_message)
+    if (stderr != expected_error_message)
       return false;
   }
 
@@ -345,8 +339,9 @@
     // If instructed to keep only files younger than a given age
     // (|max_file_age_| != 0), delete files satisfying this criteria
     // right now. Otherwise add it to a list we'll consider for later.
-    if (clock_ != nullptr && max_file_age_ != TimeDelta() &&
-        clock_->GetWallclockTime() - time > max_file_age_) {
+    if (max_file_age_ != TimeDelta() &&
+        SystemState::Get()->clock()->GetWallclockTime() - time >
+            max_file_age_) {
       if (!DeleteP2PFile(name, "file too old"))
         deletion_failed = true;
     } else {
@@ -723,13 +718,11 @@
 }
 
 P2PManager* P2PManager::Construct(Configuration* configuration,
-                                  ClockInterface* clock,
                                   UpdateManager* update_manager,
                                   const string& file_extension,
                                   const int num_files_to_keep,
                                   const TimeDelta& max_file_age) {
   return new P2PManagerImpl(configuration,
-                            clock,
                             update_manager,
                             file_extension,
                             num_files_to_keep,
diff --git a/p2p_manager.h b/cros/p2p_manager.h
similarity index 97%
rename from p2p_manager.h
rename to cros/p2p_manager.h
index ef62f0d..bef7806 100644
--- a/p2p_manager.h
+++ b/cros/p2p_manager.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_P2P_MANAGER_H_
-#define UPDATE_ENGINE_P2P_MANAGER_H_
+#ifndef UPDATE_ENGINE_CROS_P2P_MANAGER_H_
+#define UPDATE_ENGINE_CROS_P2P_MANAGER_H_
 
 #include <string>
 #include <vector>
@@ -27,7 +27,6 @@
 #include <policy/device_policy.h>
 #include <policy/libpolicy.h>
 
-#include "update_engine/common/clock_interface.h"
 #include "update_engine/common/prefs_interface.h"
 #include "update_engine/update_manager/update_manager.h"
 
@@ -174,7 +173,6 @@
   // performing housekeeping (pass zero to allow files of any age).
   static P2PManager* Construct(
       Configuration* configuration,
-      ClockInterface* clock,
       chromeos_update_manager::UpdateManager* update_manager,
       const std::string& file_extension,
       const int num_files_to_keep,
@@ -183,4 +181,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_P2P_MANAGER_H_
+#endif  // UPDATE_ENGINE_CROS_P2P_MANAGER_H_
diff --git a/p2p_manager_unittest.cc b/cros/p2p_manager_unittest.cc
similarity index 94%
rename from p2p_manager_unittest.cc
rename to cros/p2p_manager_unittest.cc
index 5771ec1..17c4886 100644
--- a/p2p_manager_unittest.cc
+++ b/cros/p2p_manager_unittest.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/p2p_manager.h"
+#include "update_engine/cros/p2p_manager.h"
 
 #include <dirent.h>
 #include <fcntl.h>
@@ -30,8 +30,13 @@
 #include <base/bind.h>
 #include <base/callback.h>
 #include <base/files/file_util.h>
+#if BASE_VER < 780000  // Android
 #include <base/message_loop/message_loop.h>
+#endif  // BASE_VER < 780000
 #include <base/strings/stringprintf.h>
+#if BASE_VER >= 780000  // CrOS
+#include <base/task/single_thread_task_executor.h>
+#endif  // BASE_VER >= 780000
 #include <brillo/asynchronous_signal_handler.h>
 #include <brillo/message_loops/base_message_loop.h>
 #include <brillo/message_loops/message_loop.h>
@@ -41,12 +46,12 @@
 #include <policy/libpolicy.h>
 #include <policy/mock_device_policy.h>
 
-#include "update_engine/common/fake_clock.h"
 #include "update_engine/common/prefs.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/fake_p2p_manager_configuration.h"
+#include "update_engine/cros/fake_p2p_manager_configuration.h"
+#include "update_engine/cros/fake_system_state.h"
 #include "update_engine/update_manager/fake_update_manager.h"
 #include "update_engine/update_manager/mock_policy.h"
 
@@ -67,12 +72,13 @@
 // done.
 class P2PManagerTest : public testing::Test {
  protected:
-  P2PManagerTest() : fake_um_(&fake_clock_) {}
-  ~P2PManagerTest() override {}
+  P2PManagerTest() = default;
+  ~P2PManagerTest() override = default;
 
   // Derived from testing::Test.
   void SetUp() override {
     loop_.SetAsCurrent();
+    FakeSystemState::CreateInstance();
     async_signal_handler_.Init();
     subprocess_.Init(&async_signal_handler_);
     test_conf_ = new FakeP2PManagerConfiguration();
@@ -80,27 +86,30 @@
     // Allocate and install a mock policy implementation in the fake Update
     // Manager.  Note that the FakeUpdateManager takes ownership of the policy
     // object.
-    mock_policy_ = new chromeos_update_manager::MockPolicy(&fake_clock_);
+    mock_policy_ = new chromeos_update_manager::MockPolicy();
     fake_um_.set_policy(mock_policy_);
 
     // Construct the P2P manager under test.
     manager_.reset(P2PManager::Construct(test_conf_,
-                                         &fake_clock_,
                                          &fake_um_,
                                          "cros_au",
                                          3,
                                          TimeDelta::FromDays(5)));
   }
 
+#if BASE_VER < 780000  // Android
   base::MessageLoopForIO base_loop_;
   brillo::BaseMessageLoop loop_{&base_loop_};
+#else   // CrOS
+  base::SingleThreadTaskExecutor base_loop_{base::MessagePumpType::IO};
+  brillo::BaseMessageLoop loop_{base_loop_.task_runner()};
+#endif  // BASE_VER < 780000
   brillo::AsynchronousSignalHandler async_signal_handler_;
   Subprocess subprocess_;
 
   // The P2PManager::Configuration instance used for testing.
   FakeP2PManagerConfiguration* test_conf_;
 
-  FakeClock fake_clock_;
   chromeos_update_manager::MockPolicy* mock_policy_ = nullptr;
   chromeos_update_manager::FakeUpdateManager fake_um_;
 
@@ -139,7 +148,6 @@
   // will be freed.
   test_conf_ = new FakeP2PManagerConfiguration();
   manager_.reset(P2PManager::Construct(test_conf_,
-                                       &fake_clock_,
                                        &fake_um_,
                                        "cros_au",
                                        3,
@@ -197,14 +205,14 @@
 
   // Set the clock just so files with a timestamp before |cutoff_time|
   // will be deleted at housekeeping.
-  fake_clock_.SetWallclockTime(cutoff_time + age_limit);
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(cutoff_time +
+                                                         age_limit);
 
   // Specifically pass 0 for |num_files_to_keep| to allow any number of files.
   // Note that we need to reallocate the test_conf_ member, whose currently
   // aliased object will be freed.
   test_conf_ = new FakeP2PManagerConfiguration();
   manager_.reset(P2PManager::Construct(test_conf_,
-                                       &fake_clock_,
                                        &fake_um_,
                                        "cros_au",
                                        0 /* num_files_to_keep */,
@@ -350,7 +358,7 @@
 
 // Check that sharing a *new* file works.
 TEST_F(P2PManagerTest, ShareFile) {
-  const int kP2PTestFileSize = 1000 * 1000;  // 1 MB
+  const int kP2PTestFileSize = 1000 * 8;  // 8 KB
 
   EXPECT_TRUE(manager_->FileShare("foo", kP2PTestFileSize));
   EXPECT_EQ(manager_->FileGetPath("foo"),
@@ -369,7 +377,7 @@
 
 // Check that making a shared file visible, does what is expected.
 TEST_F(P2PManagerTest, MakeFileVisible) {
-  const int kP2PTestFileSize = 1000 * 1000;  // 1 MB
+  const int kP2PTestFileSize = 1000 * 8;  // 8 KB
 
   // First, check that it's not visible.
   manager_->FileShare("foo", kP2PTestFileSize);
diff --git a/payload_state.cc b/cros/payload_state.cc
similarity index 91%
rename from payload_state.cc
rename to cros/payload_state.cc
index 3ba6391..d417ef5 100644
--- a/payload_state.cc
+++ b/cros/payload_state.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/payload_state.h"
+#include "update_engine/cros/payload_state.h"
 
 #include <algorithm>
 #include <string>
@@ -29,14 +29,15 @@
 #include "update_engine/common/constants.h"
 #include "update_engine/common/error_code_utils.h"
 #include "update_engine/common/hardware_interface.h"
+#include "update_engine/common/metrics_reporter_interface.h"
 #include "update_engine/common/prefs.h"
+#include "update_engine/common/system_state.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/connection_manager_interface.h"
-#include "update_engine/metrics_reporter_interface.h"
+#include "update_engine/cros/connection_manager_interface.h"
+#include "update_engine/cros/omaha_request_params.h"
+#include "update_engine/cros/update_attempter.h"
 #include "update_engine/metrics_utils.h"
-#include "update_engine/omaha_request_params.h"
 #include "update_engine/payload_consumer/install_plan.h"
-#include "update_engine/system_state.h"
 
 using base::Time;
 using base::TimeDelta;
@@ -60,6 +61,8 @@
 
 PayloadState::PayloadState()
     : prefs_(nullptr),
+      powerwash_safe_prefs_(nullptr),
+      excluder_(nullptr),
       using_p2p_for_downloading_(false),
       p2p_num_attempts_(0),
       payload_attempt_number_(0),
@@ -75,10 +78,10 @@
     total_bytes_downloaded_[i] = current_bytes_downloaded_[i] = 0;
 }
 
-bool PayloadState::Initialize(SystemState* system_state) {
-  system_state_ = system_state;
-  prefs_ = system_state_->prefs();
-  powerwash_safe_prefs_ = system_state_->powerwash_safe_prefs();
+bool PayloadState::Initialize() {
+  prefs_ = SystemState::Get()->prefs();
+  powerwash_safe_prefs_ = SystemState::Get()->powerwash_safe_prefs();
+  excluder_ = SystemState::Get()->update_attempter()->GetExcluder();
   LoadResponseSignature();
   LoadPayloadAttemptNumber();
   LoadFullPayloadAttemptNumber();
@@ -193,7 +196,7 @@
 
   attempt_type_ = attempt_type;
 
-  ClockInterface* clock = system_state_->clock();
+  const auto* clock = SystemState::Get()->clock();
   attempt_start_time_boot_ = clock->GetBootTime();
   attempt_start_time_monotonic_ = clock->GetMonotonicTime();
   attempt_num_bytes_downloaded_ = 0;
@@ -202,7 +205,7 @@
   ConnectionType network_connection_type;
   ConnectionTethering tethering;
   ConnectionManagerInterface* connection_manager =
-      system_state_->connection_manager();
+      SystemState::Get()->connection_manager();
   if (!connection_manager->GetConnectionProperties(&network_connection_type,
                                                    &tethering)) {
     LOG(ERROR) << "Failed to determine connection type.";
@@ -232,7 +235,7 @@
 void PayloadState::UpdateSucceeded() {
   // Send the relevant metrics that are tracked in this class to UMA.
   CalculateUpdateDurationUptime();
-  SetUpdateTimestampEnd(system_state_->clock()->GetWallclockTime());
+  SetUpdateTimestampEnd(SystemState::Get()->clock()->GetWallclockTime());
 
   switch (attempt_type_) {
     case AttemptType::kUpdate:
@@ -242,7 +245,7 @@
       break;
 
     case AttemptType::kRollback:
-      system_state_->metrics_reporter()->ReportRollbackMetrics(
+      SystemState::Get()->metrics_reporter()->ReportRollbackMetrics(
           metrics::RollbackResult::kSuccess);
       break;
   }
@@ -252,7 +255,7 @@
   SetNumResponsesSeen(0);
   SetPayloadIndex(0);
 
-  metrics_utils::SetSystemUpdatedMarker(system_state_->clock(), prefs_);
+  metrics_utils::SetSystemUpdatedMarker(SystemState::Get()->clock(), prefs_);
 }
 
 void PayloadState::UpdateFailed(ErrorCode error) {
@@ -275,7 +278,7 @@
       break;
 
     case AttemptType::kRollback:
-      system_state_->metrics_reporter()->ReportRollbackMetrics(
+      SystemState::Get()->metrics_reporter()->ReportRollbackMetrics(
           metrics::RollbackResult::kFailed);
       break;
   }
@@ -308,6 +311,7 @@
     case ErrorCode::kUnsupportedMinorPayloadVersion:
     case ErrorCode::kPayloadTimestampError:
     case ErrorCode::kVerityCalculationError:
+      ExcludeCurrentPayload();
       IncrementUrlIndex();
       break;
 
@@ -370,6 +374,7 @@
     case ErrorCode::kUnresolvedHostRecovered:
     case ErrorCode::kNotEnoughSpace:
     case ErrorCode::kDeviceCorrupted:
+    case ErrorCode::kPackageExcludedFromUpdate:
       LOG(INFO) << "Not incrementing URL index or failure count for this error";
       break;
 
@@ -403,7 +408,7 @@
               << "will happen from local peer (via p2p).";
     return false;
   }
-  if (system_state_->request_params()->interactive()) {
+  if (SystemState::Get()->request_params()->interactive()) {
     LOG(INFO) << "Payload backoff disabled for interactive update checks.";
     return false;
   }
@@ -419,7 +424,7 @@
     }
   }
 
-  if (!system_state_->hardware()->IsOfficialBuild() &&
+  if (!SystemState::Get()->hardware()->IsOfficialBuild() &&
       !prefs_->Exists(kPrefsNoIgnoreBackoff)) {
     // Backoffs are needed only for official builds. We do not want any delays
     // or update failures due to backoffs during testing or development. Unless
@@ -448,7 +453,7 @@
 }
 
 void PayloadState::Rollback() {
-  SetRollbackVersion(system_state_->request_params()->app_version());
+  SetRollbackVersion(SystemState::Get()->request_params()->app_version());
   AttemptStarted(AttemptType::kRollback);
 }
 
@@ -458,6 +463,7 @@
 }
 
 void PayloadState::IncrementFullPayloadAttemptNumber() {
+  DCHECK(payload_index_ < response_.packages.size());
   // Update the payload attempt number for full payloads and the backoff time.
   if (response_.packages[payload_index_].is_delta) {
     LOG(INFO) << "Not incrementing payload attempt number for delta payloads";
@@ -470,10 +476,9 @@
 }
 
 void PayloadState::IncrementUrlIndex() {
+  DCHECK(payload_index_ < candidate_urls_.size());
   size_t next_url_index = url_index_ + 1;
-  size_t max_url_size = 0;
-  for (const auto& urls : candidate_urls_)
-    max_url_size = std::max(max_url_size, urls.size());
+  size_t max_url_size = candidate_urls_[payload_index_].size();
   if (next_url_index < max_url_size) {
     LOG(INFO) << "Incrementing the URL index for next attempt";
     SetUrlIndex(next_url_index);
@@ -502,10 +507,33 @@
   } else {
     LOG(INFO) << "Reached max number of failures for Url" << GetUrlIndex()
               << ". Trying next available URL";
+    ExcludeCurrentPayload();
     IncrementUrlIndex();
   }
 }
 
+void PayloadState::ExcludeCurrentPayload() {
+  if (payload_index_ >= response_.packages.size()) {
+    LOG(INFO) << "Skipping exclusion of the current payload.";
+    return;
+  }
+  const auto& package = response_.packages[payload_index_];
+  if (!package.can_exclude) {
+    LOG(INFO) << "Not excluding as marked non-excludable for package hash="
+              << package.hash;
+    return;
+  }
+  auto exclusion_name = utils::GetExclusionName(GetCurrentUrl());
+  if (!excluder_->Exclude(exclusion_name))
+    LOG(WARNING) << "Failed to exclude "
+                 << " Package Hash=" << package.hash
+                 << " CurrentUrl=" << GetCurrentUrl();
+  else
+    LOG(INFO) << "Excluded "
+              << " Package Hash=" << package.hash
+              << " CurrentUrl=" << GetCurrentUrl();
+}
+
 void PayloadState::UpdateBackoffExpiryTime() {
   if (response_.disable_payload_backoff) {
     LOG(INFO) << "Resetting backoff expiry time as payload backoff is disabled";
@@ -583,7 +611,7 @@
       return kPayloadTypeDelta;
     }
   }
-  OmahaRequestParams* params = system_state_->request_params();
+  OmahaRequestParams* params = SystemState::Get()->request_params();
   if (params->delta_okay()) {
     return kPayloadTypeFull;
   }
@@ -591,10 +619,6 @@
   return kPayloadTypeForcedFull;
 }
 
-// TODO(zeuthen): Currently we don't report the UpdateEngine.Attempt.*
-// metrics if the attempt ends abnormally, e.g. if the update_engine
-// process crashes or the device is rebooted. See
-// http://crbug.com/357676
 void PayloadState::CollectAndReportAttemptMetrics(ErrorCode code) {
   int attempt_number = GetPayloadAttemptNumber();
 
@@ -604,7 +628,7 @@
 
   int64_t payload_bytes_downloaded = attempt_num_bytes_downloaded_;
 
-  ClockInterface* clock = system_state_->clock();
+  const auto* clock = SystemState::Get()->clock();
   TimeDelta duration = clock->GetBootTime() - attempt_start_time_boot_;
   TimeDelta duration_uptime =
       clock->GetMonotonicTime() - attempt_start_time_monotonic_;
@@ -649,13 +673,13 @@
     case metrics::AttemptResult::kAbnormalTermination:
     case metrics::AttemptResult::kUpdateCanceled:
     case metrics::AttemptResult::kUpdateSucceededNotActive:
+    case metrics::AttemptResult::kUpdateSkipped:
     case metrics::AttemptResult::kNumConstants:
     case metrics::AttemptResult::kUnset:
       break;
   }
 
-  system_state_->metrics_reporter()->ReportUpdateAttemptMetrics(
-      system_state_,
+  SystemState::Get()->metrics_reporter()->ReportUpdateAttemptMetrics(
       attempt_number,
       payload_type,
       duration,
@@ -664,7 +688,7 @@
       attempt_result,
       internal_error_code);
 
-  system_state_->metrics_reporter()->ReportUpdateAttemptDownloadMetrics(
+  SystemState::Get()->metrics_reporter()->ReportUpdateAttemptDownloadMetrics(
       payload_bytes_downloaded,
       payload_download_speed_bps,
       download_source,
@@ -694,7 +718,8 @@
   if (!attempt_in_progress)
     return;
 
-  system_state_->metrics_reporter()
+  SystemState::Get()
+      ->metrics_reporter()
       ->ReportAbnormallyTerminatedUpdateAttemptMetrics();
 
   ClearPersistedAttemptMetrics();
@@ -758,7 +783,7 @@
 
   int updates_abandoned_count = num_responses_seen_ - 1;
 
-  system_state_->metrics_reporter()->ReportSuccessfulUpdateMetrics(
+  SystemState::Get()->metrics_reporter()->ReportSuccessfulUpdateMetrics(
       attempt_count,
       updates_abandoned_count,
       payload_type,
@@ -774,7 +799,7 @@
 void PayloadState::UpdateNumReboots() {
   // We only update the reboot count when the system has been detected to have
   // been rebooted.
-  if (!system_state_->system_rebooted()) {
+  if (!SystemState::Get()->system_rebooted()) {
     return;
   }
 
@@ -794,7 +819,7 @@
   SetUrlFailureCount(0);
   SetUrlSwitchCount(0);
   UpdateBackoffExpiryTime();  // This will reset the backoff expiry time.
-  SetUpdateTimestampStart(system_state_->clock()->GetWallclockTime());
+  SetUpdateTimestampStart(SystemState::Get()->clock()->GetWallclockTime());
   SetUpdateTimestampEnd(Time());  // Set to null time
   SetUpdateDurationUptime(TimeDelta::FromSeconds(0));
   ResetDownloadSourcesOnNewUpdate();
@@ -806,7 +831,6 @@
 }
 
 void PayloadState::ResetRollbackVersion() {
-  CHECK(powerwash_safe_prefs_);
   rollback_version_ = "";
   powerwash_safe_prefs_->Delete(kPrefsRollbackVersion);
 }
@@ -855,7 +879,6 @@
 }
 
 void PayloadState::LoadResponseSignature() {
-  CHECK(prefs_);
   string stored_value;
   if (prefs_->Exists(kPrefsCurrentResponseSignature) &&
       prefs_->GetString(kPrefsCurrentResponseSignature, &stored_value)) {
@@ -864,7 +887,6 @@
 }
 
 void PayloadState::SetResponseSignature(const string& response_signature) {
-  CHECK(prefs_);
   response_signature_ = response_signature;
   LOG(INFO) << "Current Response Signature = \n" << response_signature_;
   prefs_->SetString(kPrefsCurrentResponseSignature, response_signature_);
@@ -887,7 +909,6 @@
 
 void PayloadState::SetFullPayloadAttemptNumber(
     int full_payload_attempt_number) {
-  CHECK(prefs_);
   full_payload_attempt_number_ = full_payload_attempt_number;
   LOG(INFO) << "Full Payload Attempt Number = " << full_payload_attempt_number_;
   prefs_->SetInt64(kPrefsFullPayloadAttemptNumber,
@@ -895,16 +916,18 @@
 }
 
 void PayloadState::SetPayloadIndex(size_t payload_index) {
-  CHECK(prefs_);
   payload_index_ = payload_index;
   LOG(INFO) << "Payload Index = " << payload_index_;
   prefs_->SetInt64(kPrefsUpdateStatePayloadIndex, payload_index_);
 }
 
 bool PayloadState::NextPayload() {
-  if (payload_index_ + 1 >= candidate_urls_.size())
+  if (payload_index_ >= candidate_urls_.size())
     return false;
   SetPayloadIndex(payload_index_ + 1);
+  if (payload_index_ >= candidate_urls_.size())
+    return false;
+  SetUrlIndex(0);
   return true;
 }
 
@@ -913,7 +936,6 @@
 }
 
 void PayloadState::SetUrlIndex(uint32_t url_index) {
-  CHECK(prefs_);
   url_index_ = url_index;
   LOG(INFO) << "Current URL Index = " << url_index_;
   prefs_->SetInt64(kPrefsCurrentUrlIndex, url_index_);
@@ -929,7 +951,6 @@
 }
 
 void PayloadState::SetScatteringWaitPeriod(TimeDelta wait_period) {
-  CHECK(prefs_);
   scattering_wait_period_ = wait_period;
   LOG(INFO) << "Scattering Wait Period (seconds) = "
             << scattering_wait_period_.InSeconds();
@@ -947,7 +968,6 @@
 }
 
 void PayloadState::SetStagingWaitPeriod(TimeDelta wait_period) {
-  CHECK(prefs_);
   staging_wait_period_ = wait_period;
   LOG(INFO) << "Staging Wait Period (days) =" << staging_wait_period_.InDays();
   if (staging_wait_period_.InSeconds() > 0) {
@@ -963,7 +983,6 @@
 }
 
 void PayloadState::SetUrlSwitchCount(uint32_t url_switch_count) {
-  CHECK(prefs_);
   url_switch_count_ = url_switch_count;
   LOG(INFO) << "URL Switch Count = " << url_switch_count_;
   prefs_->SetInt64(kPrefsUrlSwitchCount, url_switch_count_);
@@ -974,7 +993,6 @@
 }
 
 void PayloadState::SetUrlFailureCount(uint32_t url_failure_count) {
-  CHECK(prefs_);
   url_failure_count_ = url_failure_count;
   LOG(INFO) << "Current URL (Url" << GetUrlIndex()
             << ")'s Failure Count = " << url_failure_count_;
@@ -982,7 +1000,6 @@
 }
 
 void PayloadState::LoadBackoffExpiryTime() {
-  CHECK(prefs_);
   int64_t stored_value;
   if (!prefs_->Exists(kPrefsBackoffExpiryTime))
     return;
@@ -1001,7 +1018,6 @@
 }
 
 void PayloadState::SetBackoffExpiryTime(const Time& new_time) {
-  CHECK(prefs_);
   backoff_expiry_time_ = new_time;
   LOG(INFO) << "Backoff Expiry Time = "
             << utils::ToString(backoff_expiry_time_);
@@ -1011,7 +1027,7 @@
 
 TimeDelta PayloadState::GetUpdateDuration() {
   Time end_time = update_timestamp_end_.is_null()
-                      ? system_state_->clock()->GetWallclockTime()
+                      ? SystemState::Get()->clock()->GetWallclockTime()
                       : update_timestamp_end_;
   return end_time - update_timestamp_start_;
 }
@@ -1019,10 +1035,7 @@
 void PayloadState::LoadUpdateTimestampStart() {
   int64_t stored_value;
   Time stored_time;
-
-  CHECK(prefs_);
-
-  Time now = system_state_->clock()->GetWallclockTime();
+  Time now = SystemState::Get()->clock()->GetWallclockTime();
 
   if (!prefs_->Exists(kPrefsUpdateTimestampStart)) {
     // The preference missing is not unexpected - in that case, just
@@ -1035,7 +1048,7 @@
     stored_time = Time::FromInternalValue(stored_value);
   }
 
-  // Sanity check: If the time read from disk is in the future
+  // Validation check: If the time read from disk is in the future
   // (modulo some slack to account for possible NTP drift
   // adjustments), something is fishy and we should report and
   // reset.
@@ -1070,8 +1083,6 @@
   int64_t stored_value;
   TimeDelta stored_delta;
 
-  CHECK(prefs_);
-
   if (!prefs_->Exists(kPrefsUpdateDurationUptime)) {
     // The preference missing is not unexpected - in that case, just
     // we'll use zero as the delta
@@ -1082,7 +1093,7 @@
     stored_delta = TimeDelta::FromInternalValue(stored_value);
   }
 
-  // Sanity-check: Uptime can never be greater than the wall-clock
+  // Validation check: Uptime can never be greater than the wall-clock
   // difference (modulo some slack). If it is, report and reset
   // to the wall-clock difference.
   TimeDelta diff = GetUpdateDuration() - stored_delta;
@@ -1102,14 +1113,12 @@
 }
 
 void PayloadState::LoadRollbackHappened() {
-  CHECK(powerwash_safe_prefs_);
   bool rollback_happened = false;
   powerwash_safe_prefs_->GetBoolean(kPrefsRollbackHappened, &rollback_happened);
   SetRollbackHappened(rollback_happened);
 }
 
 void PayloadState::SetRollbackHappened(bool rollback_happened) {
-  CHECK(powerwash_safe_prefs_);
   LOG(INFO) << "Setting rollback-happened to " << rollback_happened << ".";
   rollback_happened_ = rollback_happened;
   if (rollback_happened) {
@@ -1121,7 +1130,6 @@
 }
 
 void PayloadState::LoadRollbackVersion() {
-  CHECK(powerwash_safe_prefs_);
   string rollback_version;
   if (powerwash_safe_prefs_->GetString(kPrefsRollbackVersion,
                                        &rollback_version)) {
@@ -1130,8 +1138,7 @@
 }
 
 void PayloadState::SetRollbackVersion(const string& rollback_version) {
-  CHECK(powerwash_safe_prefs_);
-  LOG(INFO) << "Blacklisting version " << rollback_version;
+  LOG(INFO) << "Excluding version " << rollback_version;
   rollback_version_ = rollback_version;
   powerwash_safe_prefs_->SetString(kPrefsRollbackVersion, rollback_version);
 }
@@ -1139,7 +1146,6 @@
 void PayloadState::SetUpdateDurationUptimeExtended(const TimeDelta& value,
                                                    const Time& timestamp,
                                                    bool use_logging) {
-  CHECK(prefs_);
   update_duration_uptime_ = value;
   update_duration_uptime_timestamp_ = timestamp;
   prefs_->SetInt64(kPrefsUpdateDurationUptime,
@@ -1151,12 +1157,12 @@
 }
 
 void PayloadState::SetUpdateDurationUptime(const TimeDelta& value) {
-  Time now = system_state_->clock()->GetMonotonicTime();
+  Time now = SystemState::Get()->clock()->GetMonotonicTime();
   SetUpdateDurationUptimeExtended(value, now, true);
 }
 
 void PayloadState::CalculateUpdateDurationUptime() {
-  Time now = system_state_->clock()->GetMonotonicTime();
+  Time now = SystemState::Get()->clock()->GetMonotonicTime();
   TimeDelta uptime_since_last_update = now - update_duration_uptime_timestamp_;
 
   if (uptime_since_last_update > TimeDelta::FromSeconds(kUptimeResolution)) {
@@ -1178,8 +1184,6 @@
 void PayloadState::SetCurrentBytesDownloaded(DownloadSource source,
                                              uint64_t current_bytes_downloaded,
                                              bool log) {
-  CHECK(prefs_);
-
   if (source >= kNumDownloadSources)
     return;
 
@@ -1201,8 +1205,6 @@
 void PayloadState::SetTotalBytesDownloaded(DownloadSource source,
                                            uint64_t total_bytes_downloaded,
                                            bool log) {
-  CHECK(prefs_);
-
   if (source >= kNumDownloadSources)
     return;
 
@@ -1221,7 +1223,6 @@
 }
 
 void PayloadState::SetNumResponsesSeen(int num_responses_seen) {
-  CHECK(prefs_);
   num_responses_seen_ = num_responses_seen;
   LOG(INFO) << "Num Responses Seen = " << num_responses_seen_;
   prefs_->SetInt64(kPrefsNumResponsesSeen, num_responses_seen_);
@@ -1230,8 +1231,8 @@
 void PayloadState::ComputeCandidateUrls() {
   bool http_url_ok = true;
 
-  if (system_state_->hardware()->IsOfficialBuild()) {
-    const policy::DevicePolicy* policy = system_state_->device_policy();
+  if (SystemState::Get()->hardware()->IsOfficialBuild()) {
+    const policy::DevicePolicy* policy = SystemState::Get()->device_policy();
     if (policy && policy->GetHttpDownloadsEnabled(&http_url_ok) && !http_url_ok)
       LOG(INFO) << "Downloads via HTTP Url are not enabled by device policy";
   } else {
@@ -1264,12 +1265,14 @@
 
   // Avoid the UpdateEngineStarted actions if this is not the first time we
   // run the update engine since reboot.
-  if (!system_state_->system_rebooted())
+  if (!SystemState::Get()->system_rebooted())
     return;
 
   // Report time_to_reboot if we booted into a new update.
   metrics_utils::LoadAndReportTimeToReboot(
-      system_state_->metrics_reporter(), prefs_, system_state_->clock());
+      SystemState::Get()->metrics_reporter(),
+      prefs_,
+      SystemState::Get()->clock());
   prefs_->Delete(kPrefsSystemUpdatedMarker);
 
   // Check if it is needed to send metrics about a failed reboot into a new
@@ -1294,7 +1297,8 @@
     // since we successfully booted the new update in that case. If the boot
     // failed, we will read this value from the same version, so it will always
     // be compatible.
-    if (installed_from == system_state_->boot_control()->GetCurrentSlot()) {
+    if (installed_from ==
+        SystemState::Get()->boot_control()->GetCurrentSlot()) {
       // A reboot was pending, but the chromebook is again in the same
       // BootDevice where the update was installed from.
       int64_t target_attempt;
@@ -1305,7 +1309,7 @@
       }
 
       // Report the UMA metric of the current boot failure.
-      system_state_->metrics_reporter()->ReportFailedUpdateCount(
+      SystemState::Get()->metrics_reporter()->ReportFailedUpdateCount(
           target_attempt);
     } else {
       prefs_->Delete(kPrefsTargetVersionAttempt);
@@ -1336,7 +1340,7 @@
   prefs_->SetInt64(kPrefsTargetVersionAttempt, target_attempt + 1);
 
   prefs_->SetInt64(kPrefsTargetVersionInstalledFrom,
-                   system_state_->boot_control()->GetCurrentSlot());
+                   SystemState::Get()->boot_control()->GetCurrentSlot());
 }
 
 void PayloadState::ResetUpdateStatus() {
@@ -1358,7 +1362,6 @@
 void PayloadState::SetP2PNumAttempts(int value) {
   p2p_num_attempts_ = value;
   LOG(INFO) << "p2p Num Attempts = " << p2p_num_attempts_;
-  CHECK(prefs_);
   prefs_->SetInt64(kPrefsP2PNumAttempts, value);
 }
 
@@ -1374,7 +1377,6 @@
   p2p_first_attempt_timestamp_ = time;
   LOG(INFO) << "p2p First Attempt Timestamp = "
             << utils::ToString(p2p_first_attempt_timestamp_);
-  CHECK(prefs_);
   int64_t stored_value = time.ToInternalValue();
   prefs_->SetInt64(kPrefsP2PFirstAttemptTimestamp, stored_value);
 }
@@ -1387,10 +1389,10 @@
 }
 
 void PayloadState::P2PNewAttempt() {
-  CHECK(prefs_);
   // Set timestamp, if it hasn't been set already
   if (p2p_first_attempt_timestamp_.is_null()) {
-    SetP2PFirstAttemptTimestamp(system_state_->clock()->GetWallclockTime());
+    SetP2PFirstAttemptTimestamp(
+        SystemState::Get()->clock()->GetWallclockTime());
   }
   // Increase number of attempts
   SetP2PNumAttempts(GetP2PNumAttempts() + 1);
@@ -1405,7 +1407,7 @@
   }
 
   if (!p2p_first_attempt_timestamp_.is_null()) {
-    Time now = system_state_->clock()->GetWallclockTime();
+    Time now = SystemState::Get()->clock()->GetWallclockTime();
     TimeDelta time_spent_attempting_p2p = now - p2p_first_attempt_timestamp_;
     if (time_spent_attempting_p2p.InSeconds() < 0) {
       LOG(ERROR) << "Time spent attempting p2p is negative"
diff --git a/payload_state.h b/cros/payload_state.h
similarity index 93%
rename from payload_state.h
rename to cros/payload_state.h
index 5ef1220..db54865 100644
--- a/payload_state.h
+++ b/cros/payload_state.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_PAYLOAD_STATE_H_
-#define UPDATE_ENGINE_PAYLOAD_STATE_H_
+#ifndef UPDATE_ENGINE_CROS_PAYLOAD_STATE_H_
+#define UPDATE_ENGINE_CROS_PAYLOAD_STATE_H_
 
 #include <algorithm>
 #include <string>
@@ -24,9 +24,10 @@
 #include <base/time/time.h>
 #include <gtest/gtest_prod.h>  // for FRIEND_TEST
 
+#include "update_engine/common/excluder_interface.h"
+#include "update_engine/common/metrics_constants.h"
 #include "update_engine/common/prefs_interface.h"
-#include "update_engine/metrics_constants.h"
-#include "update_engine/payload_state_interface.h"
+#include "update_engine/cros/payload_state_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -47,7 +48,7 @@
   // It performs the initial loading of all persisted state into memory and
   // dumps the initial state for debugging purposes.  Note: the other methods
   // should be called only after calling Initialize on this object.
-  bool Initialize(SystemState* system_state);
+  bool Initialize();
 
   // Implementation of PayloadStateInterface methods.
   void SetResponse(const OmahaResponse& response) override;
@@ -156,6 +157,12 @@
   FRIEND_TEST(PayloadStateTest, RollbackHappened);
   FRIEND_TEST(PayloadStateTest, RollbackVersion);
   FRIEND_TEST(PayloadStateTest, UpdateSuccessWithWipedPrefs);
+  FRIEND_TEST(PayloadStateTest, NextPayloadResetsUrlIndex);
+  FRIEND_TEST(PayloadStateTest, ExcludeNoopForNonExcludables);
+  FRIEND_TEST(PayloadStateTest, ExcludeOnlyCanExcludables);
+  FRIEND_TEST(PayloadStateTest, IncrementFailureExclusionTest);
+  FRIEND_TEST(PayloadStateTest, HaltExclusionPostPayloadExhaustion);
+  FRIEND_TEST(PayloadStateTest, NonInfinitePayloadIndexIncrement);
 
   // Helper called when an attempt has begun, is called by
   // UpdateResumed(), UpdateRestarted() and Rollback().
@@ -180,6 +187,12 @@
   // to the next URL and resets the failure count for that URL.
   void IncrementFailureCount();
 
+  // Excludes the current payload + current candidate URL from being part of
+  // future updates/retries. Whenever |SetResponse()| or |NextPayload()| decide
+  // on the initial current URL index and the next payload respectively, it will
+  // advanced based on exclusions.
+  void ExcludeCurrentPayload();
+
   // Updates the backoff expiry time exponentially based on the current
   // payload attempt number.
   void UpdateBackoffExpiryTime();
@@ -355,14 +368,14 @@
   // check where policy was available. This info is preserved over powerwash.
   void LoadRollbackHappened();
 
-  // Loads the blacklisted version from our prefs file.
+  // Loads the excluded version from our prefs file.
   void LoadRollbackVersion();
 
-  // Blacklists this version from getting AU'd to until we receive a new update
+  // Excludes this version from getting AU'd to until we receive a new update
   // response.
   void SetRollbackVersion(const std::string& rollback_version);
 
-  // Clears any blacklisted version.
+  // Clears any excluded version.
   void ResetRollbackVersion();
 
   inline uint32_t GetUrlIndex() {
@@ -416,9 +429,6 @@
   // Get the total size of all payloads.
   int64_t GetPayloadSize();
 
-  // The global state of the system.
-  SystemState* system_state_;
-
   // Interface object with which we read/write persisted state. This must
   // be set by calling the Initialize method before calling any other method.
   PrefsInterface* prefs_;
@@ -428,6 +438,11 @@
   // This object persists across powerwashes.
   PrefsInterface* powerwash_safe_prefs_;
 
+  // Interface object with which we determine exclusion decisions for
+  // payloads/partitions during the update. This must be set by calling the
+  // Initialize method before calling any other method.
+  ExcluderInterface* excluder_;
+
   // This is the current response object from Omaha.
   OmahaResponse response_;
 
@@ -468,10 +483,9 @@
   size_t payload_index_ = 0;
 
   // The index of the current URL.  This type is different from the one in the
-  // accessor methods because PrefsInterface supports only int64_t but we want
+  // accessor methods because |PrefsInterface| supports only int64_t but we want
   // to provide a stronger abstraction of uint32_t.  Each update to this value
-  // is persisted so we resume from the same value in case of a process
-  // restart.
+  // is persisted so we resume from the same value in case of a process restart.
   size_t url_index_;
 
   // The count of failures encountered in the current attempt to download using
@@ -549,7 +563,7 @@
   // forced updates to avoid update-rollback loops.
   bool rollback_happened_;
 
-  // This stores a blacklisted version set as part of rollback. When we rollback
+  // This stores an excluded version set as part of rollback. When we rollback
   // we store the version of the os from which we are rolling back from in order
   // to guarantee that we do not re-update to it on the next au attempt after
   // reboot.
@@ -581,4 +595,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_PAYLOAD_STATE_H_
+#endif  // UPDATE_ENGINE_CROS_PAYLOAD_STATE_H_
diff --git a/payload_state_interface.h b/cros/payload_state_interface.h
similarity index 97%
rename from payload_state_interface.h
rename to cros/payload_state_interface.h
index d384a0e..9ead650 100644
--- a/payload_state_interface.h
+++ b/cros/payload_state_interface.h
@@ -14,14 +14,14 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_PAYLOAD_STATE_INTERFACE_H_
-#define UPDATE_ENGINE_PAYLOAD_STATE_INTERFACE_H_
+#ifndef UPDATE_ENGINE_CROS_PAYLOAD_STATE_INTERFACE_H_
+#define UPDATE_ENGINE_CROS_PAYLOAD_STATE_INTERFACE_H_
 
 #include <string>
 
 #include "update_engine/common/action_processor.h"
 #include "update_engine/common/constants.h"
-#include "update_engine/omaha_response.h"
+#include "update_engine/cros/omaha_response.h"
 
 namespace chromeos_update_engine {
 
@@ -212,4 +212,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_PAYLOAD_STATE_INTERFACE_H_
+#endif  // UPDATE_ENGINE_CROS_PAYLOAD_STATE_INTERFACE_H_
diff --git a/payload_state_unittest.cc b/cros/payload_state_unittest.cc
similarity index 76%
rename from payload_state_unittest.cc
rename to cros/payload_state_unittest.cc
index 869c24e..5478fca 100644
--- a/payload_state_unittest.cc
+++ b/cros/payload_state_unittest.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/payload_state.h"
+#include "update_engine/cros/payload_state.h"
 
 #include <base/files/file_path.h>
 #include <base/files/file_util.h>
@@ -23,16 +23,16 @@
 #include <gtest/gtest.h>
 
 #include "update_engine/common/constants.h"
-#include "update_engine/common/fake_clock.h"
+#include "update_engine/common/excluder_interface.h"
 #include "update_engine/common/fake_hardware.h"
-#include "update_engine/common/fake_prefs.h"
+#include "update_engine/common/metrics_reporter_interface.h"
+#include "update_engine/common/mock_excluder.h"
 #include "update_engine/common/mock_prefs.h"
 #include "update_engine/common/prefs.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/fake_system_state.h"
-#include "update_engine/metrics_reporter_interface.h"
-#include "update_engine/omaha_request_action.h"
+#include "update_engine/cros/fake_system_state.h"
+#include "update_engine/cros/omaha_request_action.h"
 
 using base::Time;
 using base::TimeDelta;
@@ -44,6 +44,7 @@
 using testing::NiceMock;
 using testing::Return;
 using testing::SetArgPointee;
+using testing::StrictMock;
 
 namespace chromeos_update_engine {
 
@@ -103,12 +104,18 @@
   EXPECT_EQ(expected_response_sign, stored_response_sign);
 }
 
-class PayloadStateTest : public ::testing::Test {};
+class PayloadStateTest : public ::testing::Test {
+ public:
+  void SetUp() { FakeSystemState::CreateInstance(); }
 
-TEST(PayloadStateTest, SetResponseWorksWithEmptyResponse) {
+  // TODO(b/171829801): Replace all the |MockPrefs| in this file with
+  // |FakePrefs| so we don't have to catch every single unimportant mock call.
+};
+
+TEST_F(PayloadStateTest, SetResponseWorksWithEmptyResponse) {
   OmahaResponse response;
-  FakeSystemState fake_system_state;
-  NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
+  FakeSystemState::Get()->set_prefs(nullptr);
+  auto* prefs = FakeSystemState::Get()->mock_prefs();
   EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0))
       .Times(AtLeast(1));
@@ -130,7 +137,7 @@
       .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsNumReboots, 0)).Times(AtLeast(1));
   PayloadState payload_state;
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   payload_state.SetResponse(response);
   string stored_response_sign = payload_state.GetResponseSignature();
   string expected_response_sign =
@@ -143,15 +150,15 @@
   EXPECT_EQ(1, payload_state.GetNumResponsesSeen());
 }
 
-TEST(PayloadStateTest, SetResponseWorksWithSingleUrl) {
+TEST_F(PayloadStateTest, SetResponseWorksWithSingleUrl) {
   OmahaResponse response;
   response.packages.push_back({.payload_urls = {"https://single.url.test"},
                                .size = 123456789,
                                .metadata_size = 58123,
                                .metadata_signature = "msign",
                                .hash = "hash"});
-  FakeSystemState fake_system_state;
-  NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
+  FakeSystemState::Get()->set_prefs(nullptr);
+  auto* prefs = FakeSystemState::Get()->mock_prefs();
   EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0))
       .Times(AtLeast(1));
@@ -173,7 +180,7 @@
       .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsNumReboots, 0)).Times(AtLeast(1));
   PayloadState payload_state;
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   payload_state.SetResponse(response);
   string stored_response_sign = payload_state.GetResponseSignature();
   string expected_response_sign =
@@ -194,7 +201,7 @@
   EXPECT_EQ(1, payload_state.GetNumResponsesSeen());
 }
 
-TEST(PayloadStateTest, SetResponseWorksWithMultipleUrls) {
+TEST_F(PayloadStateTest, SetResponseWorksWithMultipleUrls) {
   OmahaResponse response;
   response.packages.push_back({.payload_urls = {"http://multiple.url.test",
                                                 "https://multiple.url.test"},
@@ -202,8 +209,8 @@
                                .metadata_size = 558123,
                                .metadata_signature = "metasign",
                                .hash = "rhash"});
-  FakeSystemState fake_system_state;
-  NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
+  FakeSystemState::Get()->set_prefs(nullptr);
+  auto* prefs = FakeSystemState::Get()->mock_prefs();
   EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0))
       .Times(AtLeast(1));
@@ -222,7 +229,7 @@
   EXPECT_CALL(*prefs, SetInt64(kPrefsNumReboots, 0)).Times(AtLeast(1));
 
   PayloadState payload_state;
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   payload_state.SetResponse(response);
   string stored_response_sign = payload_state.GetResponseSignature();
   string expected_response_sign =
@@ -244,10 +251,10 @@
   EXPECT_EQ(1, payload_state.GetNumResponsesSeen());
 }
 
-TEST(PayloadStateTest, CanAdvanceUrlIndexCorrectly) {
+TEST_F(PayloadStateTest, CanAdvanceUrlIndexCorrectly) {
   OmahaResponse response;
-  FakeSystemState fake_system_state;
-  NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
+  FakeSystemState::Get()->set_prefs(nullptr);
+  auto* prefs = FakeSystemState::Get()->mock_prefs();
   PayloadState payload_state;
 
   EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
@@ -274,7 +281,7 @@
   EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlFailureCount, 0))
       .Times(AtLeast(4));
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
 
   // This does a SetResponse which causes all the states to be set to 0 for
   // the first time.
@@ -299,12 +306,11 @@
   EXPECT_EQ(3U, payload_state.GetUrlSwitchCount());
 }
 
-TEST(PayloadStateTest, NewResponseResetsPayloadState) {
+TEST_F(PayloadStateTest, NewResponseResetsPayloadState) {
   OmahaResponse response;
-  FakeSystemState fake_system_state;
   PayloadState payload_state;
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
 
   // Set the first response.
   SetupPayloadStateWith2Urls(
@@ -346,12 +352,12 @@
             payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpsServer));
 }
 
-TEST(PayloadStateTest, AllCountersGetUpdatedProperlyOnErrorCodesAndEvents) {
+TEST_F(PayloadStateTest, AllCountersGetUpdatedProperlyOnErrorCodesAndEvents) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
   int progress_bytes = 100;
-  NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
+  FakeSystemState::Get()->set_prefs(nullptr);
+  auto* prefs = FakeSystemState::Get()->mock_prefs();
 
   EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0))
@@ -397,7 +403,7 @@
       .Times(AtLeast(1));
   EXPECT_CALL(*prefs, SetInt64(kPrefsNumReboots, 0)).Times(AtLeast(1));
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
 
   SetupPayloadStateWith2Urls(
       "Hash5873", true, false, &payload_state, &response);
@@ -492,11 +498,12 @@
   EXPECT_FALSE(payload_state.ShouldBackoffDownload());
 }
 
-TEST(PayloadStateTest, PayloadAttemptNumberIncreasesOnSuccessfulFullDownload) {
+TEST_F(PayloadStateTest,
+       PayloadAttemptNumberIncreasesOnSuccessfulFullDownload) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
-  NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
+  FakeSystemState::Get()->set_prefs(nullptr);
+  auto* prefs = FakeSystemState::Get()->mock_prefs();
 
   EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0))
@@ -515,7 +522,7 @@
   EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlFailureCount, 0))
       .Times(AtLeast(1));
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
 
   SetupPayloadStateWith2Urls(
       "Hash8593", true, false, &payload_state, &response);
@@ -531,11 +538,12 @@
   EXPECT_EQ(0U, payload_state.GetUrlSwitchCount());
 }
 
-TEST(PayloadStateTest, PayloadAttemptNumberIncreasesOnSuccessfulDeltaDownload) {
+TEST_F(PayloadStateTest,
+       PayloadAttemptNumberIncreasesOnSuccessfulDeltaDownload) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
-  NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
+  FakeSystemState::Get()->set_prefs(nullptr);
+  auto* prefs = FakeSystemState::Get()->mock_prefs();
 
   EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
   EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0))
@@ -553,7 +561,7 @@
   EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlFailureCount, 0))
       .Times(AtLeast(1));
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
 
   SetupPayloadStateWith2Urls("Hash8593", true, true, &payload_state, &response);
 
@@ -568,12 +576,11 @@
   EXPECT_EQ(0U, payload_state.GetUrlSwitchCount());
 }
 
-TEST(PayloadStateTest, SetResponseResetsInvalidUrlIndex) {
+TEST_F(PayloadStateTest, SetResponseResetsInvalidUrlIndex) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls(
       "Hash4427", true, false, &payload_state, &response);
 
@@ -589,27 +596,26 @@
   EXPECT_EQ(1U, payload_state.GetUrlSwitchCount());
 
   // Now, simulate a corrupted url index on persisted store which gets
-  // loaded when update_engine restarts. Using a different prefs object
-  // so as to not bother accounting for the uninteresting calls above.
-  FakeSystemState fake_system_state2;
-  NiceMock<MockPrefs>* prefs2 = fake_system_state2.mock_prefs();
-  EXPECT_CALL(*prefs2, Exists(_)).WillRepeatedly(Return(true));
-  EXPECT_CALL(*prefs2, GetInt64(_, _)).Times(AtLeast(1));
-  EXPECT_CALL(*prefs2, GetInt64(kPrefsPayloadAttemptNumber, _))
+  // loaded when update_engine restarts.
+  FakeSystemState::Get()->set_prefs(nullptr);
+  auto* prefs = FakeSystemState::Get()->mock_prefs();
+  EXPECT_CALL(*prefs, Exists(_)).WillRepeatedly(Return(true));
+  EXPECT_CALL(*prefs, GetInt64(_, _)).Times(AtLeast(1));
+  EXPECT_CALL(*prefs, GetInt64(kPrefsPayloadAttemptNumber, _))
       .Times(AtLeast(1));
-  EXPECT_CALL(*prefs2, GetInt64(kPrefsFullPayloadAttemptNumber, _))
+  EXPECT_CALL(*prefs, GetInt64(kPrefsFullPayloadAttemptNumber, _))
       .Times(AtLeast(1));
-  EXPECT_CALL(*prefs2, GetInt64(kPrefsCurrentUrlIndex, _))
+  EXPECT_CALL(*prefs, GetInt64(kPrefsCurrentUrlIndex, _))
       .WillRepeatedly(DoAll(SetArgPointee<1>(2), Return(true)));
-  EXPECT_CALL(*prefs2, GetInt64(kPrefsCurrentUrlFailureCount, _))
+  EXPECT_CALL(*prefs, GetInt64(kPrefsCurrentUrlFailureCount, _))
       .Times(AtLeast(1));
-  EXPECT_CALL(*prefs2, GetInt64(kPrefsUrlSwitchCount, _)).Times(AtLeast(1));
+  EXPECT_CALL(*prefs, GetInt64(kPrefsUrlSwitchCount, _)).Times(AtLeast(1));
 
   // Note: This will be a different payload object, but the response should
   // have the same hash as before so as to not trivially reset because the
   // response was different. We want to specifically test that even if the
   // response is same, we should reset the state if we find it corrupted.
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state2));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls(
       "Hash4427", true, false, &payload_state, &response);
 
@@ -622,15 +628,14 @@
   EXPECT_EQ(0U, payload_state.GetUrlSwitchCount());
 }
 
-TEST(PayloadStateTest, NoBackoffInteractiveChecks) {
+TEST_F(PayloadStateTest, NoBackoffInteractiveChecks) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
-  OmahaRequestParams params(&fake_system_state);
-  params.Init("", "", true);  // interactive = True.
-  fake_system_state.set_request_params(&params);
+  OmahaRequestParams params;
+  params.Init("", "", {.interactive = true});
+  FakeSystemState::Get()->set_request_params(&params);
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls(
       "Hash6437", true, false, &payload_state, &response);
 
@@ -645,15 +650,14 @@
   EXPECT_FALSE(payload_state.ShouldBackoffDownload());
 }
 
-TEST(PayloadStateTest, NoBackoffForP2PUpdates) {
+TEST_F(PayloadStateTest, NoBackoffForP2PUpdates) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
-  OmahaRequestParams params(&fake_system_state);
-  params.Init("", "", false);  // interactive = False.
-  fake_system_state.set_request_params(&params);
+  OmahaRequestParams params;
+  params.Init("", "", {});
+  FakeSystemState::Get()->set_request_params(&params);
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls(
       "Hash6437", true, false, &payload_state, &response);
 
@@ -676,12 +680,11 @@
   EXPECT_TRUE(payload_state.ShouldBackoffDownload());
 }
 
-TEST(PayloadStateTest, NoBackoffForDeltaPayloads) {
+TEST_F(PayloadStateTest, NoBackoffForDeltaPayloads) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls("Hash6437", true, true, &payload_state, &response);
 
   // Simulate a successful download and see that we're ready to download
@@ -720,12 +723,11 @@
             backoff_expiry_time.ToInternalValue());
 }
 
-TEST(PayloadStateTest, BackoffPeriodsAreInCorrectRange) {
+TEST_F(PayloadStateTest, BackoffPeriodsAreInCorrectRange) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls(
       "Hash8939", true, false, &payload_state, &response);
 
@@ -741,13 +743,12 @@
   CheckPayloadBackoffState(&payload_state, 10, TimeDelta::FromDays(16));
 }
 
-TEST(PayloadStateTest, BackoffLogicCanBeDisabled) {
+TEST_F(PayloadStateTest, BackoffLogicCanBeDisabled) {
   OmahaResponse response;
   response.disable_payload_backoff = true;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls(
       "Hash8939", true, false, &payload_state, &response);
 
@@ -768,15 +769,14 @@
   EXPECT_FALSE(payload_state.ShouldBackoffDownload());
 }
 
-TEST(PayloadStateTest, BytesDownloadedMetricsGetAddedToCorrectSources) {
+TEST_F(PayloadStateTest, BytesDownloadedMetricsGetAddedToCorrectSources) {
   OmahaResponse response;
   response.disable_payload_backoff = true;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
   uint64_t https_total = 0;
   uint64_t http_total = 0;
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls(
       "Hash3286", true, false, &payload_state, &response);
   EXPECT_EQ(1, payload_state.GetNumResponsesSeen());
@@ -859,7 +859,7 @@
   EXPECT_EQ(p2p_total,
             payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpPeer));
 
-  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
               ReportSuccessfulUpdateMetrics(
                   1, _, kPayloadTypeFull, _, _, 314, _, _, _, 3));
 
@@ -877,12 +877,11 @@
   EXPECT_EQ(0, payload_state.GetNumResponsesSeen());
 }
 
-TEST(PayloadStateTest, DownloadSourcesUsedIsCorrect) {
+TEST_F(PayloadStateTest, DownloadSourcesUsedIsCorrect) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls(
       "Hash3286", true, false, &payload_state, &response);
 
@@ -900,7 +899,7 @@
   int64_t total_bytes[kNumDownloadSources] = {};
   total_bytes[kDownloadSourceHttpServer] = num_bytes;
 
-  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
               ReportSuccessfulUpdateMetrics(
                   _,
                   _,
@@ -917,12 +916,11 @@
   payload_state.UpdateSucceeded();
 }
 
-TEST(PayloadStateTest, RestartingUpdateResetsMetrics) {
+TEST_F(PayloadStateTest, RestartingUpdateResetsMetrics) {
   OmahaResponse response;
-  FakeSystemState fake_system_state;
   PayloadState payload_state;
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
 
   // Set the first response.
   SetupPayloadStateWith2Urls(
@@ -947,25 +945,24 @@
             payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpServer));
 }
 
-TEST(PayloadStateTest, NumRebootsIncrementsCorrectly) {
-  FakeSystemState fake_system_state;
-  PayloadState payload_state;
-
-  NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
+TEST_F(PayloadStateTest, NumRebootsIncrementsCorrectly) {
+  FakeSystemState::Get()->set_prefs(nullptr);
+  auto* prefs = FakeSystemState::Get()->mock_prefs();
   EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AtLeast(0));
   EXPECT_CALL(*prefs, SetInt64(kPrefsNumReboots, 1)).Times(AtLeast(1));
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  PayloadState payload_state;
+  EXPECT_TRUE(payload_state.Initialize());
 
   payload_state.UpdateRestarted();
   EXPECT_EQ(0U, payload_state.GetNumReboots());
 
-  fake_system_state.set_system_rebooted(true);
+  FakeSystemState::Get()->set_system_rebooted(true);
   payload_state.UpdateResumed();
   // Num reboots should be incremented because system rebooted detected.
   EXPECT_EQ(1U, payload_state.GetNumReboots());
 
-  fake_system_state.set_system_rebooted(false);
+  FakeSystemState::Get()->set_system_rebooted(false);
   payload_state.UpdateResumed();
   // Num reboots should now be 1 as reboot was not detected.
   EXPECT_EQ(1U, payload_state.GetNumReboots());
@@ -975,13 +972,12 @@
   EXPECT_EQ(0U, payload_state.GetNumReboots());
 }
 
-TEST(PayloadStateTest, RollbackHappened) {
-  FakeSystemState fake_system_state;
+TEST_F(PayloadStateTest, RollbackHappened) {
+  FakeSystemState::Get()->set_powerwash_safe_prefs(nullptr);
+  auto* mock_powerwash_safe_prefs =
+      FakeSystemState::Get()->mock_powerwash_safe_prefs();
   PayloadState payload_state;
-
-  NiceMock<MockPrefs>* mock_powerwash_safe_prefs =
-      fake_system_state.mock_powerwash_safe_prefs();
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
 
   // Verify pre-conditions are good.
   EXPECT_FALSE(payload_state.GetRollbackHappened());
@@ -1006,23 +1002,23 @@
   EXPECT_TRUE(payload_state.GetRollbackHappened());
 }
 
-TEST(PayloadStateTest, RollbackVersion) {
-  FakeSystemState fake_system_state;
-  PayloadState payload_state;
+TEST_F(PayloadStateTest, RollbackVersion) {
+  FakeSystemState::Get()->set_powerwash_safe_prefs(nullptr);
+  auto* mock_powerwash_safe_prefs =
+      FakeSystemState::Get()->mock_powerwash_safe_prefs();
 
-  NiceMock<MockPrefs>* mock_powerwash_safe_prefs =
-      fake_system_state.mock_powerwash_safe_prefs();
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  // Mock out the os version and make sure it's excluded correctly.
+  string rollback_version = "2345.0.0";
+  OmahaRequestParams params;
+  params.Init(rollback_version, "", {});
+  FakeSystemState::Get()->set_request_params(&params);
+
+  PayloadState payload_state;
+  EXPECT_TRUE(payload_state.Initialize());
 
   // Verify pre-conditions are good.
   EXPECT_TRUE(payload_state.GetRollbackVersion().empty());
 
-  // Mock out the os version and make sure it's blacklisted correctly.
-  string rollback_version = "2345.0.0";
-  OmahaRequestParams params(&fake_system_state);
-  params.Init(rollback_version, "", false);
-  fake_system_state.set_request_params(&params);
-
   EXPECT_CALL(*mock_powerwash_safe_prefs,
               SetString(kPrefsRollbackVersion, rollback_version));
   payload_state.Rollback();
@@ -1041,37 +1037,33 @@
 
   // Check that we report only UpdateEngine.Rollback.* metrics in
   // UpdateSucceeded().
-  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
               ReportRollbackMetrics(metrics::RollbackResult::kSuccess))
       .Times(1);
 
   payload_state.UpdateSucceeded();
 }
 
-TEST(PayloadStateTest, DurationsAreCorrect) {
+TEST_F(PayloadStateTest, DurationsAreCorrect) {
   OmahaResponse response;
   response.packages.resize(1);
-  PayloadState payload_state;
-  FakeSystemState fake_system_state;
-  FakeClock fake_clock;
-  FakePrefs fake_prefs;
 
   // Set the clock to a well-known time - 1 second on the wall-clock
   // and 2 seconds on the monotonic clock
-  fake_clock.SetWallclockTime(Time::FromInternalValue(1000000));
-  fake_clock.SetMonotonicTime(Time::FromInternalValue(2000000));
+  auto* fake_clock = FakeSystemState::Get()->fake_clock();
+  fake_clock->SetWallclockTime(Time::FromInternalValue(1000000));
+  fake_clock->SetMonotonicTime(Time::FromInternalValue(2000000));
 
-  fake_system_state.set_clock(&fake_clock);
-  fake_system_state.set_prefs(&fake_prefs);
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  PayloadState payload_state;
+  EXPECT_TRUE(payload_state.Initialize());
 
   // Check that durations are correct for a successful update where
   // time has advanced 7 seconds on the wall clock and 4 seconds on
   // the monotonic clock.
   SetupPayloadStateWith2Urls(
       "Hash8593", true, false, &payload_state, &response);
-  fake_clock.SetWallclockTime(Time::FromInternalValue(8000000));
-  fake_clock.SetMonotonicTime(Time::FromInternalValue(6000000));
+  fake_clock->SetWallclockTime(Time::FromInternalValue(8000000));
+  fake_clock->SetMonotonicTime(Time::FromInternalValue(6000000));
   payload_state.UpdateSucceeded();
   EXPECT_EQ(payload_state.GetUpdateDuration().InMicroseconds(), 7000000);
   EXPECT_EQ(payload_state.GetUpdateDurationUptime().InMicroseconds(), 4000000);
@@ -1084,8 +1076,8 @@
 
   // Advance time a bit (10 secs), simulate download progress and
   // check that durations are updated.
-  fake_clock.SetWallclockTime(Time::FromInternalValue(18000000));
-  fake_clock.SetMonotonicTime(Time::FromInternalValue(16000000));
+  fake_clock->SetWallclockTime(Time::FromInternalValue(18000000));
+  fake_clock->SetMonotonicTime(Time::FromInternalValue(16000000));
   payload_state.DownloadProgress(10);
   EXPECT_EQ(payload_state.GetUpdateDuration().InMicroseconds(), 10000000);
   EXPECT_EQ(payload_state.GetUpdateDurationUptime().InMicroseconds(), 10000000);
@@ -1093,9 +1085,9 @@
   // Now simulate a reboot by resetting monotonic time (to 5000) and
   // creating a new PayloadState object and check that we load the
   // durations correctly (e.g. they are the same as before).
-  fake_clock.SetMonotonicTime(Time::FromInternalValue(5000));
+  fake_clock->SetMonotonicTime(Time::FromInternalValue(5000));
   PayloadState payload_state2;
-  EXPECT_TRUE(payload_state2.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state2.Initialize());
   payload_state2.SetResponse(response);
   EXPECT_EQ(payload_state2.GetUpdateDuration().InMicroseconds(), 10000000);
   EXPECT_EQ(payload_state2.GetUpdateDurationUptime().InMicroseconds(),
@@ -1103,65 +1095,62 @@
 
   // Advance wall-clock by 7 seconds and monotonic clock by 6 seconds
   // and check that the durations are increased accordingly.
-  fake_clock.SetWallclockTime(Time::FromInternalValue(25000000));
-  fake_clock.SetMonotonicTime(Time::FromInternalValue(6005000));
+  fake_clock->SetWallclockTime(Time::FromInternalValue(25000000));
+  fake_clock->SetMonotonicTime(Time::FromInternalValue(6005000));
   payload_state2.UpdateSucceeded();
   EXPECT_EQ(payload_state2.GetUpdateDuration().InMicroseconds(), 17000000);
   EXPECT_EQ(payload_state2.GetUpdateDurationUptime().InMicroseconds(),
             16000000);
 }
 
-TEST(PayloadStateTest, RebootAfterSuccessfulUpdateTest) {
+TEST_F(PayloadStateTest, RebootAfterSuccessfulUpdateTest) {
   OmahaResponse response;
-  PayloadState payload_state;
-  FakeSystemState fake_system_state;
-  FakeClock fake_clock;
-  FakePrefs fake_prefs;
 
   // Set the clock to a well-known time (t = 30 seconds).
-  fake_clock.SetMonotonicTime(
+  auto* fake_clock = FakeSystemState::Get()->fake_clock();
+  fake_clock->SetMonotonicTime(
       Time::FromInternalValue(30 * Time::kMicrosecondsPerSecond));
 
-  fake_system_state.set_clock(&fake_clock);
-  fake_system_state.set_prefs(&fake_prefs);
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  PayloadState payload_state;
+  EXPECT_TRUE(payload_state.Initialize());
 
   // Make the update succeed.
   SetupPayloadStateWith2Urls(
       "Hash8593", true, false, &payload_state, &response);
   payload_state.UpdateSucceeded();
 
+  auto* fake_prefs = FakeSystemState::Get()->fake_prefs();
   // Check that the marker was written.
-  EXPECT_TRUE(fake_prefs.Exists(kPrefsSystemUpdatedMarker));
+  EXPECT_TRUE(fake_prefs->Exists(kPrefsSystemUpdatedMarker));
 
   // Now simulate a reboot and set the wallclock time to a later point
   // (t = 500 seconds). We do this by using a new PayloadState object
   // and checking that it emits the right UMA metric with the right
   // value.
-  fake_clock.SetMonotonicTime(
+  fake_clock->SetMonotonicTime(
       Time::FromInternalValue(500 * Time::kMicrosecondsPerSecond));
   PayloadState payload_state2;
-  EXPECT_TRUE(payload_state2.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state2.Initialize());
 
   // Expect 500 - 30 seconds = 470 seconds ~= 7 min 50 sec
-  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
               ReportTimeToReboot(7));
-  fake_system_state.set_system_rebooted(true);
+  FakeSystemState::Get()->set_system_rebooted(true);
 
   payload_state2.UpdateEngineStarted();
 
   // Check that the marker was nuked.
-  EXPECT_FALSE(fake_prefs.Exists(kPrefsSystemUpdatedMarker));
+  EXPECT_FALSE(fake_prefs->Exists(kPrefsSystemUpdatedMarker));
 }
 
-TEST(PayloadStateTest, RestartAfterCrash) {
+TEST_F(PayloadStateTest, RestartAfterCrash) {
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
   testing::StrictMock<MockMetricsReporter> mock_metrics_reporter;
-  fake_system_state.set_metrics_reporter(&mock_metrics_reporter);
-  NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
+  FakeSystemState::Get()->set_metrics_reporter(&mock_metrics_reporter);
+  FakeSystemState::Get()->set_prefs(nullptr);
+  auto* prefs = FakeSystemState::Get()->mock_prefs();
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
 
   // Only the |kPrefsAttemptInProgress| state variable should be read.
   EXPECT_CALL(*prefs, Exists(_)).Times(0);
@@ -1174,83 +1163,75 @@
   EXPECT_CALL(*prefs, GetBoolean(kPrefsAttemptInProgress, _));
 
   // Simulate an update_engine restart without a reboot.
-  fake_system_state.set_system_rebooted(false);
+  FakeSystemState::Get()->set_system_rebooted(false);
 
   payload_state.UpdateEngineStarted();
 }
 
-TEST(PayloadStateTest, AbnormalTerminationAttemptMetricsNoReporting) {
+TEST_F(PayloadStateTest, AbnormalTerminationAttemptMetricsNoReporting) {
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
 
   // If there's no marker at startup, ensure we don't report a metric.
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
-  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+  EXPECT_TRUE(payload_state.Initialize());
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
               ReportAbnormallyTerminatedUpdateAttemptMetrics())
       .Times(0);
   payload_state.UpdateEngineStarted();
 }
 
-TEST(PayloadStateTest, AbnormalTerminationAttemptMetricsReported) {
-  PayloadState payload_state;
-  FakeSystemState fake_system_state;
-  FakePrefs fake_prefs;
-
+TEST_F(PayloadStateTest, AbnormalTerminationAttemptMetricsReported) {
   // If we have a marker at startup, ensure it's reported and the
   // marker is then cleared.
-  fake_system_state.set_prefs(&fake_prefs);
-  fake_prefs.SetBoolean(kPrefsAttemptInProgress, true);
+  auto* fake_prefs = FakeSystemState::Get()->fake_prefs();
+  fake_prefs->SetBoolean(kPrefsAttemptInProgress, true);
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  PayloadState payload_state;
+  EXPECT_TRUE(payload_state.Initialize());
 
-  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
               ReportAbnormallyTerminatedUpdateAttemptMetrics())
       .Times(1);
   payload_state.UpdateEngineStarted();
 
-  EXPECT_FALSE(fake_prefs.Exists(kPrefsAttemptInProgress));
+  EXPECT_FALSE(fake_prefs->Exists(kPrefsAttemptInProgress));
 }
 
-TEST(PayloadStateTest, AbnormalTerminationAttemptMetricsClearedOnSucceess) {
-  PayloadState payload_state;
-  FakeSystemState fake_system_state;
-  FakePrefs fake_prefs;
-
+TEST_F(PayloadStateTest, AbnormalTerminationAttemptMetricsClearedOnSucceess) {
   // Make sure the marker is written and cleared during an attempt and
   // also that we DO NOT emit the metric (since the attempt didn't end
   // abnormally).
-  fake_system_state.set_prefs(&fake_prefs);
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  PayloadState payload_state;
+  EXPECT_TRUE(payload_state.Initialize());
   OmahaResponse response;
   response.packages.resize(1);
   payload_state.SetResponse(response);
 
-  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
               ReportAbnormallyTerminatedUpdateAttemptMetrics())
       .Times(0);
 
+  auto* fake_prefs = FakeSystemState::Get()->fake_prefs();
   // Attempt not in progress, should be clear.
-  EXPECT_FALSE(fake_prefs.Exists(kPrefsAttemptInProgress));
+  EXPECT_FALSE(fake_prefs->Exists(kPrefsAttemptInProgress));
 
   payload_state.UpdateRestarted();
 
   // Attempt not in progress, should be set.
-  EXPECT_TRUE(fake_prefs.Exists(kPrefsAttemptInProgress));
+  EXPECT_TRUE(fake_prefs->Exists(kPrefsAttemptInProgress));
 
   payload_state.UpdateSucceeded();
 
   // Attempt not in progress, should be clear.
-  EXPECT_FALSE(fake_prefs.Exists(kPrefsAttemptInProgress));
+  EXPECT_FALSE(fake_prefs->Exists(kPrefsAttemptInProgress));
 }
 
-TEST(PayloadStateTest, CandidateUrlsComputedCorrectly) {
+TEST_F(PayloadStateTest, CandidateUrlsComputedCorrectly) {
   OmahaResponse response;
-  FakeSystemState fake_system_state;
   PayloadState payload_state;
 
   policy::MockDevicePolicy disable_http_policy;
-  fake_system_state.set_device_policy(&disable_http_policy);
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  FakeSystemState::Get()->set_device_policy(&disable_http_policy);
+  EXPECT_TRUE(payload_state.Initialize());
 
   // Test with no device policy. Should default to allowing http.
   EXPECT_CALL(disable_http_policy, GetHttpDownloadsEnabled(_))
@@ -1293,7 +1274,7 @@
   // Now, pretend that the HTTP policy is turned on. We want to make sure
   // the new policy is honored.
   policy::MockDevicePolicy enable_http_policy;
-  fake_system_state.set_device_policy(&enable_http_policy);
+  FakeSystemState::Get()->set_device_policy(&enable_http_policy);
   EXPECT_CALL(enable_http_policy, GetHttpDownloadsEnabled(_))
       .WillRepeatedly(DoAll(SetArgPointee<0>(true), Return(true)));
 
@@ -1316,93 +1297,86 @@
   EXPECT_EQ(0U, payload_state.GetUrlFailureCount());
 }
 
-TEST(PayloadStateTest, PayloadTypeMetricWhenTypeIsDelta) {
+TEST_F(PayloadStateTest, PayloadTypeMetricWhenTypeIsDelta) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls("Hash6437", true, true, &payload_state, &response);
 
   // Simulate a successful download and update.
   payload_state.DownloadComplete();
-  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
               ReportSuccessfulUpdateMetrics(
                   _, _, kPayloadTypeDelta, _, _, _, _, _, _, _));
   payload_state.UpdateSucceeded();
 
   // Mock the request to a request where the delta was disabled but Omaha sends
   // a delta anyway and test again.
-  OmahaRequestParams params(&fake_system_state);
+  OmahaRequestParams params;
   params.set_delta_okay(false);
-  fake_system_state.set_request_params(&params);
+  FakeSystemState::Get()->set_request_params(&params);
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls("Hash6437", true, true, &payload_state, &response);
 
   payload_state.DownloadComplete();
 
-  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
               ReportSuccessfulUpdateMetrics(
                   _, _, kPayloadTypeDelta, _, _, _, _, _, _, _));
   payload_state.UpdateSucceeded();
 }
 
-TEST(PayloadStateTest, PayloadTypeMetricWhenTypeIsForcedFull) {
+TEST_F(PayloadStateTest, PayloadTypeMetricWhenTypeIsForcedFull) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
-
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
-  SetupPayloadStateWith2Urls(
-      "Hash6437", true, false, &payload_state, &response);
 
   // Mock the request to a request where the delta was disabled.
-  OmahaRequestParams params(&fake_system_state);
+  OmahaRequestParams params;
   params.set_delta_okay(false);
-  fake_system_state.set_request_params(&params);
+  FakeSystemState::Get()->set_request_params(&params);
+
+  EXPECT_TRUE(payload_state.Initialize());
+  SetupPayloadStateWith2Urls(
+      "Hash6437", true, false, &payload_state, &response);
 
   // Simulate a successful download and update.
   payload_state.DownloadComplete();
 
-  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
               ReportSuccessfulUpdateMetrics(
                   _, _, kPayloadTypeForcedFull, _, _, _, _, _, _, _));
   payload_state.UpdateSucceeded();
 }
 
-TEST(PayloadStateTest, PayloadTypeMetricWhenTypeIsFull) {
+TEST_F(PayloadStateTest, PayloadTypeMetricWhenTypeIsFull) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls(
       "Hash6437", true, false, &payload_state, &response);
 
   // Mock the request to a request where the delta is enabled, although the
   // result is full.
-  OmahaRequestParams params(&fake_system_state);
+  OmahaRequestParams params;
   params.set_delta_okay(true);
-  fake_system_state.set_request_params(&params);
+  FakeSystemState::Get()->set_request_params(&params);
 
   // Simulate a successful download and update.
   payload_state.DownloadComplete();
 
-  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
               ReportSuccessfulUpdateMetrics(
                   _, _, kPayloadTypeFull, _, _, _, _, _, _, _));
   payload_state.UpdateSucceeded();
 }
 
-TEST(PayloadStateTest, RebootAfterUpdateFailedMetric) {
-  FakeSystemState fake_system_state;
+TEST_F(PayloadStateTest, RebootAfterUpdateFailedMetric) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakePrefs fake_prefs;
-  fake_system_state.set_prefs(&fake_prefs);
-
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls(
       "Hash3141", true, false, &payload_state, &response);
 
@@ -1412,40 +1386,40 @@
   payload_state.ExpectRebootInNewVersion("Version:12345678");
 
   // Reboot into the same environment to get an UMA metric with a value of 1.
-  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
               ReportFailedUpdateCount(1));
   payload_state.ReportFailedBootIfNeeded();
-  Mock::VerifyAndClearExpectations(fake_system_state.mock_metrics_reporter());
+  Mock::VerifyAndClearExpectations(
+      FakeSystemState::Get()->mock_metrics_reporter());
 
   // Simulate a second update and reboot into the same environment, this should
   // send a value of 2.
   payload_state.ExpectRebootInNewVersion("Version:12345678");
 
-  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
               ReportFailedUpdateCount(2));
   payload_state.ReportFailedBootIfNeeded();
-  Mock::VerifyAndClearExpectations(fake_system_state.mock_metrics_reporter());
+  Mock::VerifyAndClearExpectations(
+      FakeSystemState::Get()->mock_metrics_reporter());
 
   // Simulate a third failed reboot to new version, but this time for a
   // different payload. This should send a value of 1 this time.
   payload_state.ExpectRebootInNewVersion("Version:3141592");
-  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
               ReportFailedUpdateCount(1));
   payload_state.ReportFailedBootIfNeeded();
-  Mock::VerifyAndClearExpectations(fake_system_state.mock_metrics_reporter());
+  Mock::VerifyAndClearExpectations(
+      FakeSystemState::Get()->mock_metrics_reporter());
 }
 
-TEST(PayloadStateTest, RebootAfterUpdateSucceed) {
-  FakeSystemState fake_system_state;
+TEST_F(PayloadStateTest, RebootAfterUpdateSucceed) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakePrefs fake_prefs;
-  fake_system_state.set_prefs(&fake_prefs);
-
-  FakeBootControl* fake_boot_control = fake_system_state.fake_boot_control();
+  FakeBootControl* fake_boot_control =
+      FakeSystemState::Get()->fake_boot_control();
   fake_boot_control->SetCurrentSlot(0);
 
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls(
       "Hash3141", true, false, &payload_state, &response);
 
@@ -1457,7 +1431,7 @@
   // Change the BootDevice to a different one, no metric should be sent.
   fake_boot_control->SetCurrentSlot(1);
 
-  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
               ReportFailedUpdateCount(_))
       .Times(0);
   payload_state.ReportFailedBootIfNeeded();
@@ -1468,14 +1442,10 @@
   payload_state.ReportFailedBootIfNeeded();
 }
 
-TEST(PayloadStateTest, RebootAfterCanceledUpdate) {
-  FakeSystemState fake_system_state;
+TEST_F(PayloadStateTest, RebootAfterCanceledUpdate) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakePrefs fake_prefs;
-
-  fake_system_state.set_prefs(&fake_prefs);
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls(
       "Hash3141", true, false, &payload_state, &response);
 
@@ -1484,7 +1454,7 @@
   payload_state.UpdateSucceeded();
   payload_state.ExpectRebootInNewVersion("Version:12345678");
 
-  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
               ReportFailedUpdateCount(_))
       .Times(0);
 
@@ -1495,15 +1465,11 @@
   payload_state.ReportFailedBootIfNeeded();
 }
 
-TEST(PayloadStateTest, UpdateSuccessWithWipedPrefs) {
-  FakeSystemState fake_system_state;
+TEST_F(PayloadStateTest, UpdateSuccessWithWipedPrefs) {
   PayloadState payload_state;
-  FakePrefs fake_prefs;
+  EXPECT_TRUE(payload_state.Initialize());
 
-  fake_system_state.set_prefs(&fake_prefs);
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
-
-  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
               ReportFailedUpdateCount(_))
       .Times(0);
 
@@ -1511,14 +1477,10 @@
   payload_state.ReportFailedBootIfNeeded();
 }
 
-TEST(PayloadStateTest, DisallowP2PAfterTooManyAttempts) {
+TEST_F(PayloadStateTest, DisallowP2PAfterTooManyAttempts) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
-  FakePrefs fake_prefs;
-  fake_system_state.set_prefs(&fake_prefs);
-
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls(
       "Hash8593", true, false, &payload_state, &response);
 
@@ -1532,22 +1494,17 @@
   EXPECT_FALSE(payload_state.P2PAttemptAllowed());
 }
 
-TEST(PayloadStateTest, DisallowP2PAfterDeadline) {
+TEST_F(PayloadStateTest, DisallowP2PAfterDeadline) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
-  FakeClock fake_clock;
-  FakePrefs fake_prefs;
-
-  fake_system_state.set_clock(&fake_clock);
-  fake_system_state.set_prefs(&fake_prefs);
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls(
       "Hash8593", true, false, &payload_state, &response);
 
   // Set the clock to 1 second.
   Time epoch = Time::FromInternalValue(1000000);
-  fake_clock.SetWallclockTime(epoch);
+  auto* fake_clock = FakeSystemState::Get()->fake_clock();
+  fake_clock->SetWallclockTime(epoch);
 
   // Do an attempt - this will set the timestamp.
   payload_state.P2PNewAttempt();
@@ -1559,7 +1516,7 @@
   EXPECT_TRUE(payload_state.P2PAttemptAllowed());
 
   // Set clock to half the deadline - this should work.
-  fake_clock.SetWallclockTime(
+  fake_clock->SetWallclockTime(
       epoch + TimeDelta::FromSeconds(kMaxP2PAttemptTimeSeconds) / 2);
   EXPECT_TRUE(payload_state.P2PAttemptAllowed());
 
@@ -1568,24 +1525,20 @@
   EXPECT_EQ(epoch, payload_state.GetP2PFirstAttemptTimestamp());
 
   // Set clock to _just_ before the deadline - this should work.
-  fake_clock.SetWallclockTime(
+  fake_clock->SetWallclockTime(
       epoch + TimeDelta::FromSeconds(kMaxP2PAttemptTimeSeconds - 1));
   EXPECT_TRUE(payload_state.P2PAttemptAllowed());
 
   // Set clock to _just_ after the deadline - this should not work.
-  fake_clock.SetWallclockTime(
+  fake_clock->SetWallclockTime(
       epoch + TimeDelta::FromSeconds(kMaxP2PAttemptTimeSeconds + 1));
   EXPECT_FALSE(payload_state.P2PAttemptAllowed());
 }
 
-TEST(PayloadStateTest, P2PStateVarsInitialValue) {
+TEST_F(PayloadStateTest, P2PStateVarsInitialValue) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
-  FakePrefs fake_prefs;
-
-  fake_system_state.set_prefs(&fake_prefs);
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls(
       "Hash8593", true, false, &payload_state, &response);
 
@@ -1594,21 +1547,16 @@
   EXPECT_EQ(0, payload_state.GetP2PNumAttempts());
 }
 
-TEST(PayloadStateTest, P2PStateVarsArePersisted) {
+TEST_F(PayloadStateTest, P2PStateVarsArePersisted) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
-  FakeClock fake_clock;
-  FakePrefs fake_prefs;
-  fake_system_state.set_clock(&fake_clock);
-  fake_system_state.set_prefs(&fake_prefs);
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls(
       "Hash8593", true, false, &payload_state, &response);
 
   // Set the clock to something known.
   Time time = Time::FromInternalValue(12345);
-  fake_clock.SetWallclockTime(time);
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(time);
 
   // New p2p attempt - as a side-effect this will update the p2p state vars.
   payload_state.P2PNewAttempt();
@@ -1618,27 +1566,21 @@
   // Now create a new PayloadState and check that it loads the state
   // vars correctly.
   PayloadState payload_state2;
-  EXPECT_TRUE(payload_state2.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state2.Initialize());
   EXPECT_EQ(1, payload_state2.GetP2PNumAttempts());
   EXPECT_EQ(time, payload_state2.GetP2PFirstAttemptTimestamp());
 }
 
-TEST(PayloadStateTest, P2PStateVarsAreClearedOnNewResponse) {
+TEST_F(PayloadStateTest, P2PStateVarsAreClearedOnNewResponse) {
   OmahaResponse response;
   PayloadState payload_state;
-  FakeSystemState fake_system_state;
-  FakeClock fake_clock;
-  FakePrefs fake_prefs;
-  fake_system_state.set_clock(&fake_clock);
-  fake_system_state.set_prefs(&fake_prefs);
-
-  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+  EXPECT_TRUE(payload_state.Initialize());
   SetupPayloadStateWith2Urls(
       "Hash8593", true, false, &payload_state, &response);
 
   // Set the clock to something known.
   Time time = Time::FromInternalValue(12345);
-  fake_clock.SetWallclockTime(time);
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(time);
 
   // New p2p attempt - as a side-effect this will update the p2p state vars.
   payload_state.P2PNewAttempt();
@@ -1655,4 +1597,162 @@
   EXPECT_EQ(null_time, payload_state.GetP2PFirstAttemptTimestamp());
 }
 
+TEST_F(PayloadStateTest, NextPayloadResetsUrlIndex) {
+  PayloadState payload_state;
+  StrictMock<MockExcluder> mock_excluder;
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetExcluder())
+      .WillOnce(Return(&mock_excluder));
+  EXPECT_TRUE(payload_state.Initialize());
+
+  OmahaResponse response;
+  response.packages.push_back(
+      {.payload_urls = {"http://test1a", "http://test2a"},
+       .size = 123456789,
+       .metadata_size = 58123,
+       .metadata_signature = "msign",
+       .hash = "hash"});
+  response.packages.push_back({.payload_urls = {"http://test1b"},
+                               .size = 123456789,
+                               .metadata_size = 58123,
+                               .metadata_signature = "msign",
+                               .hash = "hash"});
+  payload_state.SetResponse(response);
+
+  EXPECT_EQ(payload_state.GetCurrentUrl(), "http://test1a");
+  payload_state.IncrementUrlIndex();
+  EXPECT_EQ(payload_state.GetCurrentUrl(), "http://test2a");
+
+  EXPECT_TRUE(payload_state.NextPayload());
+  EXPECT_EQ(payload_state.GetCurrentUrl(), "http://test1b");
+}
+
+TEST_F(PayloadStateTest, ExcludeNoopForNonExcludables) {
+  PayloadState payload_state;
+  StrictMock<MockExcluder> mock_excluder;
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetExcluder())
+      .WillOnce(Return(&mock_excluder));
+  EXPECT_TRUE(payload_state.Initialize());
+
+  OmahaResponse response;
+  response.packages.push_back(
+      {.payload_urls = {"http://test1a", "http://test2a"},
+       .size = 123456789,
+       .metadata_size = 58123,
+       .metadata_signature = "msign",
+       .hash = "hash",
+       .can_exclude = false});
+  payload_state.SetResponse(response);
+
+  EXPECT_CALL(mock_excluder, Exclude(_)).Times(0);
+  payload_state.ExcludeCurrentPayload();
+}
+
+TEST_F(PayloadStateTest, ExcludeOnlyCanExcludables) {
+  PayloadState payload_state;
+  StrictMock<MockExcluder> mock_excluder;
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetExcluder())
+      .WillOnce(Return(&mock_excluder));
+  EXPECT_TRUE(payload_state.Initialize());
+
+  OmahaResponse response;
+  response.packages.push_back(
+      {.payload_urls = {"http://test1a", "http://test2a"},
+       .size = 123456789,
+       .metadata_size = 58123,
+       .metadata_signature = "msign",
+       .hash = "hash",
+       .can_exclude = true});
+  payload_state.SetResponse(response);
+
+  EXPECT_CALL(mock_excluder, Exclude(utils::GetExclusionName("http://test1a")))
+      .WillOnce(Return(true));
+  payload_state.ExcludeCurrentPayload();
+}
+
+TEST_F(PayloadStateTest, IncrementFailureExclusionTest) {
+  PayloadState payload_state;
+  StrictMock<MockExcluder> mock_excluder;
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetExcluder())
+      .WillOnce(Return(&mock_excluder));
+  EXPECT_TRUE(payload_state.Initialize());
+
+  OmahaResponse response;
+  // Critical package.
+  response.packages.push_back(
+      {.payload_urls = {"http://crit-test1a", "http://crit-test2a"},
+       .size = 123456789,
+       .metadata_size = 58123,
+       .metadata_signature = "msign",
+       .hash = "hash",
+       .can_exclude = false});
+  // Non-critical package.
+  response.packages.push_back(
+      {.payload_urls = {"http://test1a", "http://test2a"},
+       .size = 123456789,
+       .metadata_size = 58123,
+       .metadata_signature = "msign",
+       .hash = "hash",
+       .can_exclude = true});
+  response.max_failure_count_per_url = 2;
+  payload_state.SetResponse(response);
+
+  // Critical package won't be excluded.
+  // Increment twice as failure count allowed per URL is set to 2.
+  payload_state.IncrementFailureCount();
+  payload_state.IncrementFailureCount();
+
+  EXPECT_TRUE(payload_state.NextPayload());
+
+  // First increment failure should not exclude.
+  payload_state.IncrementFailureCount();
+
+  // Second increment failure should exclude.
+  EXPECT_CALL(mock_excluder, Exclude(utils::GetExclusionName("http://test1a")))
+      .WillOnce(Return(true));
+  payload_state.IncrementFailureCount();
+}
+
+TEST_F(PayloadStateTest, HaltExclusionPostPayloadExhaustion) {
+  PayloadState payload_state;
+  StrictMock<MockExcluder> mock_excluder;
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetExcluder())
+      .WillOnce(Return(&mock_excluder));
+  EXPECT_TRUE(payload_state.Initialize());
+
+  OmahaResponse response;
+  // Non-critical package.
+  response.packages.push_back(
+      {.payload_urls = {"http://test1a", "http://test2a"},
+       .size = 123456789,
+       .metadata_size = 58123,
+       .metadata_signature = "msign",
+       .hash = "hash",
+       .can_exclude = true});
+  payload_state.SetResponse(response);
+
+  // Exclusion should be called when excluded.
+  EXPECT_CALL(mock_excluder, Exclude(utils::GetExclusionName("http://test1a")))
+      .WillOnce(Return(true));
+  payload_state.ExcludeCurrentPayload();
+
+  // No more paylods to go through.
+  EXPECT_FALSE(payload_state.NextPayload());
+
+  // Exclusion should not be called as all |Payload|s are exhausted.
+  payload_state.ExcludeCurrentPayload();
+}
+
+TEST_F(PayloadStateTest, NonInfinitePayloadIndexIncrement) {
+  PayloadState payload_state;
+  EXPECT_TRUE(payload_state.Initialize());
+
+  payload_state.SetResponse({});
+
+  EXPECT_FALSE(payload_state.NextPayload());
+  int payload_index = payload_state.payload_index_;
+
+  EXPECT_FALSE(payload_state.NextPayload());
+  EXPECT_EQ(payload_index, payload_state.payload_index_);
+}
+
 }  // namespace chromeos_update_engine
diff --git a/common/platform_constants_chromeos.cc b/cros/platform_constants_chromeos.cc
similarity index 96%
rename from common/platform_constants_chromeos.cc
rename to cros/platform_constants_chromeos.cc
index fe94a45..5a5a521 100644
--- a/common/platform_constants_chromeos.cc
+++ b/cros/platform_constants_chromeos.cc
@@ -32,7 +32,6 @@
 const char kOmahaResponseDeadlineFile[] = "/tmp/update-check-response-deadline";
 // This directory is wiped during powerwash.
 const char kNonVolatileDirectory[] = "/var/lib/update_engine";
-const char kPostinstallMountOptions[] = "";
 
 }  // namespace constants
 }  // namespace chromeos_update_engine
diff --git a/power_manager_chromeos.cc b/cros/power_manager_chromeos.cc
similarity index 92%
rename from power_manager_chromeos.cc
rename to cros/power_manager_chromeos.cc
index 531d367..c1a2859 100644
--- a/power_manager_chromeos.cc
+++ b/cros/power_manager_chromeos.cc
@@ -14,14 +14,14 @@
 // limitations under the License.
 //
 
-#include "update_engine/power_manager_chromeos.h"
+#include "update_engine/cros/power_manager_chromeos.h"
 
 #include <memory>
 
 #include <power_manager/dbus-constants.h>
 #include <power_manager/dbus-proxies.h>
 
-#include "update_engine/dbus_connection.h"
+#include "update_engine/cros/dbus_connection.h"
 
 namespace chromeos_update_engine {
 
diff --git a/power_manager_chromeos.h b/cros/power_manager_chromeos.h
similarity index 83%
rename from power_manager_chromeos.h
rename to cros/power_manager_chromeos.h
index eeb14d8..8930508 100644
--- a/power_manager_chromeos.h
+++ b/cros/power_manager_chromeos.h
@@ -14,13 +14,13 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_POWER_MANAGER_CHROMEOS_H_
-#define UPDATE_ENGINE_POWER_MANAGER_CHROMEOS_H_
+#ifndef UPDATE_ENGINE_CROS_POWER_MANAGER_CHROMEOS_H_
+#define UPDATE_ENGINE_CROS_POWER_MANAGER_CHROMEOS_H_
 
 #include <base/macros.h>
 #include <power_manager/dbus-proxies.h>
 
-#include "update_engine/power_manager_interface.h"
+#include "update_engine/cros/power_manager_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -41,4 +41,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_POWER_MANAGER_CHROMEOS_H_
+#endif  // UPDATE_ENGINE_CROS_POWER_MANAGER_CHROMEOS_H_
diff --git a/power_manager_interface.h b/cros/power_manager_interface.h
similarity index 87%
rename from power_manager_interface.h
rename to cros/power_manager_interface.h
index 8f77650..1f712d2 100644
--- a/power_manager_interface.h
+++ b/cros/power_manager_interface.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_POWER_MANAGER_INTERFACE_H_
-#define UPDATE_ENGINE_POWER_MANAGER_INTERFACE_H_
+#ifndef UPDATE_ENGINE_CROS_POWER_MANAGER_INTERFACE_H_
+#define UPDATE_ENGINE_CROS_POWER_MANAGER_INTERFACE_H_
 
 #include <memory>
 
@@ -44,4 +44,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_POWER_MANAGER_INTERFACE_H_
+#endif  // UPDATE_ENGINE_CROS_POWER_MANAGER_INTERFACE_H_
diff --git a/real_system_state.cc b/cros/real_system_state.cc
similarity index 69%
rename from real_system_state.cc
rename to cros/real_system_state.cc
index 2f18b4d..5f89b27 100644
--- a/real_system_state.cc
+++ b/cros/real_system_state.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/real_system_state.h"
+#include "update_engine/cros/real_system_state.h"
 
 #include <memory>
 #include <string>
@@ -25,37 +25,23 @@
 #include <base/location.h>
 #include <base/time/time.h>
 #include <brillo/message_loops/message_loop.h>
-#if USE_CHROME_KIOSK_APP
 #include <chromeos/dbus/service_constants.h>
-#endif  // USE_CHROME_KIOSK_APP
 
 #include "update_engine/common/boot_control.h"
 #include "update_engine/common/boot_control_stub.h"
 #include "update_engine/common/constants.h"
-#include "update_engine/common/dlcservice.h"
+#include "update_engine/common/dlcservice_interface.h"
 #include "update_engine/common/hardware.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/metrics_reporter_omaha.h"
-#include "update_engine/update_boot_flags_action.h"
+#include "update_engine/cros/metrics_reporter_omaha.h"
 #if USE_DBUS
-#include "update_engine/dbus_connection.h"
+#include "update_engine/cros/dbus_connection.h"
 #endif  // USE_DBUS
 #include "update_engine/update_manager/state_factory.h"
 
-using brillo::MessageLoop;
-
 namespace chromeos_update_engine {
 
-RealSystemState::~RealSystemState() {
-  // Prevent any DBus communication from UpdateAttempter when shutting down the
-  // daemon.
-  if (update_attempter_)
-    update_attempter_->ClearObservers();
-}
-
 bool RealSystemState::Initialize() {
-  metrics_reporter_.Initialize();
-
   boot_control_ = boot_control::CreateBootControl();
   if (!boot_control_) {
     LOG(WARNING) << "Unable to create BootControl instance, using stub "
@@ -69,15 +55,13 @@
     return false;
   }
 
-#if USE_CHROME_KIOSK_APP
   kiosk_app_proxy_.reset(new org::chromium::KioskAppServiceInterfaceProxy(
       DBusConnection::Get()->GetDBus(), chromeos::kKioskAppServiceName));
-#endif  // USE_CHROME_KIOSK_APP
 
   LOG_IF(INFO, !hardware_->IsNormalBootMode()) << "Booted in dev mode.";
   LOG_IF(INFO, !hardware_->IsOfficialBuild()) << "Booted non-official build.";
 
-  connection_manager_ = connection_manager::CreateConnectionManager(this);
+  connection_manager_ = connection_manager::CreateConnectionManager();
   if (!connection_manager_) {
     LOG(ERROR) << "Error initializing the ConnectionManagerInterface.";
     return false;
@@ -140,7 +124,7 @@
   // will be re-initialized before every request using the actual request
   // options. This initialization here pre-loads current channel and version, so
   // the DBus service can access it.
-  if (!request_params_.Init("", "", false)) {
+  if (!request_params_.Init("", "", {})) {
     LOG(WARNING) << "Ignoring OmahaRequestParams initialization error. Some "
                     "features might not work properly.";
   }
@@ -149,8 +133,7 @@
       new CertificateChecker(prefs_.get(), &openssl_wrapper_));
   certificate_checker_->Init();
 
-  update_attempter_.reset(
-      new UpdateAttempter(this, certificate_checker_.get()));
+  update_attempter_.reset(new UpdateAttempter(certificate_checker_.get()));
 
   // Initialize the UpdateAttempter before the UpdateManager.
   update_attempter_->Init();
@@ -158,19 +141,13 @@
   // Initialize the Update Manager using the default state factory.
   chromeos_update_manager::State* um_state =
       chromeos_update_manager::DefaultStateFactory(&policy_provider_,
-#if USE_CHROME_KIOSK_APP
-                                                   kiosk_app_proxy_.get(),
-#else
-                                                   nullptr,
-#endif  // USE_CHROME_KIOSK_APP
-                                                   this);
+                                                   kiosk_app_proxy_.get());
 
   if (!um_state) {
     LOG(ERROR) << "Failed to initialize the Update Manager.";
     return false;
   }
   update_manager_.reset(new chromeos_update_manager::UpdateManager(
-      &clock_,
       base::TimeDelta::FromSeconds(5),
       base::TimeDelta::FromHours(12),
       um_state));
@@ -178,25 +155,23 @@
   // The P2P Manager depends on the Update Manager for its initialization.
   p2p_manager_.reset(
       P2PManager::Construct(nullptr,
-                            &clock_,
                             update_manager_.get(),
                             "cros_au",
                             kMaxP2PFilesToKeep,
                             base::TimeDelta::FromDays(kMaxP2PFileAgeDays)));
 
-  if (!payload_state_.Initialize(this)) {
+  if (!payload_state_.Initialize()) {
     LOG(ERROR) << "Failed to initialize the payload state object.";
     return false;
   }
 
-  // For devices that are not rollback enabled (ie. consumer devices),
-  // initialize max kernel key version to 0xfffffffe, which is logically
-  // infinity.
-  if (policy_provider_.IsConsumerDevice()) {
+  // For images that are build for debugging purposes like test images
+  // initialize max kernel key version to 0xfffffffe, which is logical infinity.
+  if (!hardware_->IsOfficialBuild()) {
     if (!hardware()->SetMaxKernelKeyRollforward(
             chromeos_update_manager::kRollforwardInfinity)) {
       LOG(ERROR) << "Failed to set kernel_max_rollforward to infinity for"
-                 << " consumer devices";
+                 << " device with test/dev image.";
     }
   }
 
@@ -204,43 +179,4 @@
   return true;
 }
 
-bool RealSystemState::StartUpdater() {
-  // Initiate update checks.
-  update_attempter_->ScheduleUpdates();
-
-  auto update_boot_flags_action =
-      std::make_unique<UpdateBootFlagsAction>(boot_control_.get());
-  processor_.EnqueueAction(std::move(update_boot_flags_action));
-  // Update boot flags after 45 seconds.
-  MessageLoop::current()->PostDelayedTask(
-      FROM_HERE,
-      base::Bind(&ActionProcessor::StartProcessing,
-                 base::Unretained(&processor_)),
-      base::TimeDelta::FromSeconds(45));
-
-  // Broadcast the update engine status on startup to ensure consistent system
-  // state on crashes.
-  MessageLoop::current()->PostTask(
-      FROM_HERE,
-      base::Bind(&UpdateAttempter::BroadcastStatus,
-                 base::Unretained(update_attempter_.get())));
-
-  // Run the UpdateEngineStarted() method on |update_attempter|.
-  MessageLoop::current()->PostTask(
-      FROM_HERE,
-      base::Bind(&UpdateAttempter::UpdateEngineStarted,
-                 base::Unretained(update_attempter_.get())));
-  return true;
-}
-
-void RealSystemState::AddObserver(ServiceObserverInterface* observer) {
-  CHECK(update_attempter_.get());
-  update_attempter_->AddObserver(observer);
-}
-
-void RealSystemState::RemoveObserver(ServiceObserverInterface* observer) {
-  CHECK(update_attempter_.get());
-  update_attempter_->RemoveObserver(observer);
-}
-
 }  // namespace chromeos_update_engine
diff --git a/cros/real_system_state.h b/cros/real_system_state.h
new file mode 100644
index 0000000..81a5e0e
--- /dev/null
+++ b/cros/real_system_state.h
@@ -0,0 +1,181 @@
+//
+// Copyright (C) 2013 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CROS_REAL_SYSTEM_STATE_H_
+#define UPDATE_ENGINE_CROS_REAL_SYSTEM_STATE_H_
+
+#include "update_engine/common/system_state.h"
+
+#include <memory>
+#include <set>
+
+#include <policy/device_policy.h>
+#include <kiosk-app/dbus-proxies.h>
+
+#include "update_engine/certificate_checker.h"
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/clock.h"
+#include "update_engine/common/daemon_state_interface.h"
+#include "update_engine/common/dlcservice_interface.h"
+#include "update_engine/common/hardware_interface.h"
+#include "update_engine/common/metrics_reporter_interface.h"
+#include "update_engine/common/prefs.h"
+#include "update_engine/cros/connection_manager_interface.h"
+#include "update_engine/cros/metrics_reporter_omaha.h"
+#include "update_engine/cros/p2p_manager.h"
+#include "update_engine/cros/payload_state.h"
+#include "update_engine/cros/power_manager_interface.h"
+#include "update_engine/cros/update_attempter.h"
+#include "update_engine/update_manager/update_manager.h"
+
+namespace chromeos_update_engine {
+
+// A real implementation of the SystemStateInterface which is
+// used by the actual product code.
+class RealSystemState : public SystemState {
+ public:
+  // Constructs all system objects that do not require separate initialization;
+  // see Initialize() below for the remaining ones.
+  RealSystemState() = default;
+  ~RealSystemState() = default;
+
+  static void SetInstance(RealSystemState* system_state) {
+    CHECK(g_pointer_ == nullptr) << "SystemState has been previously set.";
+    g_pointer_ = system_state;
+    LOG_IF(FATAL, !system_state->Initialize())
+        << "Failed to initialize system state.";
+  }
+
+  // SystemState overrides.
+  void set_device_policy(const policy::DevicePolicy* device_policy) override {
+    device_policy_ = device_policy;
+  }
+
+  const policy::DevicePolicy* device_policy() override {
+    return device_policy_;
+  }
+
+  BootControlInterface* boot_control() override { return boot_control_.get(); }
+
+  ClockInterface* clock() override { return &clock_; }
+
+  ConnectionManagerInterface* connection_manager() override {
+    return connection_manager_.get();
+  }
+
+  HardwareInterface* hardware() override { return hardware_.get(); }
+
+  MetricsReporterInterface* metrics_reporter() override {
+    return &metrics_reporter_;
+  }
+
+  PrefsInterface* prefs() override { return prefs_.get(); }
+
+  PrefsInterface* powerwash_safe_prefs() override {
+    return powerwash_safe_prefs_.get();
+  }
+
+  PayloadStateInterface* payload_state() override { return &payload_state_; }
+
+  UpdateAttempter* update_attempter() override {
+    return update_attempter_.get();
+  }
+
+  OmahaRequestParams* request_params() override { return &request_params_; }
+
+  P2PManager* p2p_manager() override { return p2p_manager_.get(); }
+
+  chromeos_update_manager::UpdateManager* update_manager() override {
+    return update_manager_.get();
+  }
+
+  PowerManagerInterface* power_manager() override {
+    return power_manager_.get();
+  }
+
+  bool system_rebooted() override { return system_rebooted_; }
+
+  DlcServiceInterface* dlcservice() override { return dlcservice_.get(); }
+
+ private:
+  // Initializes and sets systems objects that require an initialization
+  // separately from construction. Returns |true| on success.
+  bool Initialize();
+
+  // Real DBus proxies using the DBus connection.
+  std::unique_ptr<org::chromium::KioskAppServiceInterfaceProxy>
+      kiosk_app_proxy_;
+
+  // Interface for the power manager.
+  std::unique_ptr<PowerManagerInterface> power_manager_;
+
+  // Interface for dlcservice.
+  std::unique_ptr<DlcServiceInterface> dlcservice_;
+
+  // Interface for the bootloader control.
+  std::unique_ptr<BootControlInterface> boot_control_;
+
+  // Interface for the clock.
+  Clock clock_;
+
+  // The latest device policy object from the policy provider.
+  const policy::DevicePolicy* device_policy_{nullptr};
+
+  // The connection manager object that makes download decisions depending on
+  // the current type of connection.
+  std::unique_ptr<ConnectionManagerInterface> connection_manager_;
+
+  // Interface for the hardware functions.
+  std::unique_ptr<HardwareInterface> hardware_;
+
+  // The Metrics reporter for reporting UMA stats.
+  MetricsReporterOmaha metrics_reporter_;
+
+  // Interface for persisted store.
+  std::unique_ptr<PrefsInterface> prefs_;
+
+  // Interface for persisted store that persists across powerwashes.
+  std::unique_ptr<PrefsInterface> powerwash_safe_prefs_;
+
+  // All state pertaining to payload state such as response, URL, backoff
+  // states.
+  PayloadState payload_state_;
+
+  // OpenSSLWrapper and CertificateChecker used for checking SSL certificates.
+  OpenSSLWrapper openssl_wrapper_;
+  std::unique_ptr<CertificateChecker> certificate_checker_;
+
+  // Pointer to the update attempter object.
+  std::unique_ptr<UpdateAttempter> update_attempter_;
+
+  // Common parameters for all Omaha requests.
+  OmahaRequestParams request_params_;
+
+  std::unique_ptr<P2PManager> p2p_manager_;
+
+  std::unique_ptr<chromeos_update_manager::UpdateManager> update_manager_;
+
+  policy::PolicyProvider policy_provider_;
+
+  // If true, this is the first instance of the update engine since the system
+  // rebooted. Important for tracking whether you are running instance of the
+  // update engine on first boot or due to a crash/restart.
+  bool system_rebooted_{false};
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_CROS_REAL_SYSTEM_STATE_H_
diff --git a/cros/requisition_util.cc b/cros/requisition_util.cc
new file mode 100644
index 0000000..6296d0b
--- /dev/null
+++ b/cros/requisition_util.cc
@@ -0,0 +1,69 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/cros/requisition_util.h"
+
+#include <memory>
+#include <vector>
+
+#include <base/files/file_util.h>
+#include <base/json/json_file_value_serializer.h>
+#include <base/logging.h>
+#include <base/strings/string_util.h>
+
+#include "update_engine/common/subprocess.h"
+#include "update_engine/common/utils.h"
+
+using std::string;
+using std::vector;
+
+namespace {
+
+constexpr char kOemRequisitionKey[] = "oem_device_requisition";
+
+}  // namespace
+
+namespace chromeos_update_engine {
+
+string ReadDeviceRequisition(const base::FilePath& local_state) {
+  string requisition;
+  bool vpd_retval = utils::GetVpdValue(kOemRequisitionKey, &requisition);
+
+  // Some users manually convert non-CfM hardware at enrollment time, so VPD
+  // value may be missing. So check the Local State JSON as well.
+  if ((requisition.empty() || !vpd_retval) && base::PathExists(local_state)) {
+    int error_code;
+    std::string error_msg;
+    JSONFileValueDeserializer deserializer(local_state);
+    std::unique_ptr<base::Value> root =
+        deserializer.Deserialize(&error_code, &error_msg);
+    if (!root) {
+      if (error_code != 0) {
+        LOG(ERROR) << "Unable to deserialize Local State with exit code: "
+                   << error_code << " and error: " << error_msg;
+      }
+      return "";
+    }
+    auto* path = root->FindPath({"enrollment", "device_requisition"});
+    if (!path || !path->is_string()) {
+      return "";
+    }
+    path->GetAsString(&requisition);
+  }
+  return requisition;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/cros/requisition_util.h b/cros/requisition_util.h
new file mode 100644
index 0000000..6ec4783
--- /dev/null
+++ b/cros/requisition_util.h
@@ -0,0 +1,32 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CROS_REQUISITION_UTIL_H_
+#define UPDATE_ENGINE_CROS_REQUISITION_UTIL_H_
+
+#include <string>
+
+#include <base/files/file_path.h>
+
+namespace chromeos_update_engine {
+
+// Checks the VPD and Local State for the device's requisition and returns it,
+// or an empty string if the device has no requisition.
+std::string ReadDeviceRequisition(const base::FilePath& local_state);
+
+}  // namespace chromeos_update_engine
+
+#endif  //  UPDATE_ENGINE_CROS_REQUISITION_UTIL_H_
diff --git a/cros/requisition_util_unittest.cc b/cros/requisition_util_unittest.cc
new file mode 100644
index 0000000..269585e
--- /dev/null
+++ b/cros/requisition_util_unittest.cc
@@ -0,0 +1,94 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/cros/requisition_util.h"
+
+#include <string>
+
+#include <base/files/file_path.h>
+#include <base/files/file_util.h>
+#include <base/files/scoped_temp_dir.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/test_utils.h"
+
+using chromeos_update_engine::test_utils::WriteFileString;
+using std::string;
+
+namespace {
+
+const char kRemoraJSON[] =
+    "{\n"
+    "   \"the_list\": [ \"val1\", \"val2\" ],\n"
+    "   \"enrollment\": {\n"
+    "      \"autostart\": true,\n"
+    "      \"can_exit\": false,\n"
+    "      \"device_requisition\": \"remora\"\n"
+    "   },\n"
+    "   \"some_String\": \"1337\",\n"
+    "   \"some_int\": 42\n"
+    "}\n";
+
+const char kNoEnrollmentJSON[] =
+    "{\n"
+    "   \"the_list\": [ \"val1\", \"val2\" ],\n"
+    "   \"enrollment\": {\n"
+    "      \"autostart\": true,\n"
+    "      \"can_exit\": false,\n"
+    "      \"device_requisition\": \"\"\n"
+    "   },\n"
+    "   \"some_String\": \"1337\",\n"
+    "   \"some_int\": 42\n"
+    "}\n";
+}  // namespace
+
+namespace chromeos_update_engine {
+
+class RequisitionUtilTest : public ::testing::Test {
+ protected:
+  void SetUp() override { ASSERT_TRUE(root_dir_.CreateUniqueTempDir()); }
+
+  void WriteJsonToFile(const string& json) {
+    path_ =
+        base::FilePath(root_dir_.GetPath().value() + "/chronos/Local State");
+    ASSERT_TRUE(base::CreateDirectory(path_.DirName()));
+    ASSERT_TRUE(WriteFileString(path_.value(), json));
+  }
+
+  base::ScopedTempDir root_dir_;
+  base::FilePath path_;
+};
+
+TEST_F(RequisitionUtilTest, BadJsonReturnsEmpty) {
+  WriteJsonToFile("this isn't JSON");
+  EXPECT_EQ("", ReadDeviceRequisition(path_));
+}
+
+TEST_F(RequisitionUtilTest, NoFileReturnsEmpty) {
+  EXPECT_EQ("", ReadDeviceRequisition(path_));
+}
+
+TEST_F(RequisitionUtilTest, EnrollmentRequisition) {
+  WriteJsonToFile(kRemoraJSON);
+  EXPECT_EQ("remora", ReadDeviceRequisition(path_));
+}
+
+TEST_F(RequisitionUtilTest, BlankEnrollment) {
+  WriteJsonToFile(kNoEnrollmentJSON);
+  EXPECT_EQ("", ReadDeviceRequisition(path_));
+}
+
+}  // namespace chromeos_update_engine
diff --git a/shill_proxy.cc b/cros/shill_proxy.cc
similarity index 93%
rename from shill_proxy.cc
rename to cros/shill_proxy.cc
index d398bba..a3c8543 100644
--- a/shill_proxy.cc
+++ b/cros/shill_proxy.cc
@@ -14,9 +14,9 @@
 // limitations under the License.
 //
 
-#include "update_engine/shill_proxy.h"
+#include "update_engine/cros/shill_proxy.h"
 
-#include "update_engine/dbus_connection.h"
+#include "update_engine/cros/dbus_connection.h"
 
 using org::chromium::flimflam::ManagerProxy;
 using org::chromium::flimflam::ManagerProxyInterface;
diff --git a/shill_proxy.h b/cros/shill_proxy.h
similarity index 88%
rename from shill_proxy.h
rename to cros/shill_proxy.h
index 4b466c9..aff428a 100644
--- a/shill_proxy.h
+++ b/cros/shill_proxy.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_SHILL_PROXY_H_
-#define UPDATE_ENGINE_SHILL_PROXY_H_
+#ifndef UPDATE_ENGINE_CROS_SHILL_PROXY_H_
+#define UPDATE_ENGINE_CROS_SHILL_PROXY_H_
 
 #include <memory>
 #include <string>
@@ -25,7 +25,7 @@
 #include <dbus/object_path.h>
 #include <shill/dbus-proxies.h>
 
-#include "update_engine/shill_proxy_interface.h"
+#include "update_engine/cros/shill_proxy_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -51,4 +51,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_SHILL_PROXY_H_
+#endif  // UPDATE_ENGINE_CROS_SHILL_PROXY_H_
diff --git a/shill_proxy_interface.h b/cros/shill_proxy_interface.h
similarity index 91%
rename from shill_proxy_interface.h
rename to cros/shill_proxy_interface.h
index 5f6b44e..19e81f3 100644
--- a/shill_proxy_interface.h
+++ b/cros/shill_proxy_interface.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_SHILL_PROXY_INTERFACE_H_
-#define UPDATE_ENGINE_SHILL_PROXY_INTERFACE_H_
+#ifndef UPDATE_ENGINE_CROS_SHILL_PROXY_INTERFACE_H_
+#define UPDATE_ENGINE_CROS_SHILL_PROXY_INTERFACE_H_
 
 #include <memory>
 #include <string>
@@ -53,4 +53,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_SHILL_PROXY_INTERFACE_H_
+#endif  // UPDATE_ENGINE_CROS_SHILL_PROXY_INTERFACE_H_
diff --git a/update_attempter.cc b/cros/update_attempter.cc
similarity index 69%
rename from update_attempter.cc
rename to cros/update_attempter.cc
index ee571db..e039480 100644
--- a/update_attempter.cc
+++ b/cros/update_attempter.cc
@@ -14,20 +14,24 @@
 // limitations under the License.
 //
 
-#include "update_engine/update_attempter.h"
+#include "update_engine/cros/update_attempter.h"
 
 #include <stdint.h>
 
 #include <algorithm>
+#include <map>
 #include <memory>
 #include <string>
+#include <unordered_set>
 #include <utility>
 #include <vector>
 
 #include <base/bind.h>
+#include <base/compiler_specific.h>
 #include <base/files/file_util.h>
 #include <base/logging.h>
 #include <base/rand_util.h>
+#include <base/strings/string_number_conversions.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
 #include <base/time/time.h>
@@ -40,26 +44,29 @@
 
 #include "update_engine/certificate_checker.h"
 #include "update_engine/common/boot_control_interface.h"
-#include "update_engine/common/clock_interface.h"
 #include "update_engine/common/constants.h"
 #include "update_engine/common/dlcservice_interface.h"
+#include "update_engine/common/download_action.h"
+#include "update_engine/common/excluder_interface.h"
 #include "update_engine/common/hardware_interface.h"
+#include "update_engine/common/metrics_reporter_interface.h"
 #include "update_engine/common/platform_constants.h"
+#include "update_engine/common/prefs.h"
 #include "update_engine/common/prefs_interface.h"
 #include "update_engine/common/subprocess.h"
+#include "update_engine/common/system_state.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/cros/download_action_chromeos.h"
+#include "update_engine/cros/omaha_request_action.h"
+#include "update_engine/cros/omaha_request_params.h"
+#include "update_engine/cros/omaha_response_handler_action.h"
+#include "update_engine/cros/omaha_utils.h"
+#include "update_engine/cros/p2p_manager.h"
+#include "update_engine/cros/payload_state_interface.h"
+#include "update_engine/cros/power_manager_interface.h"
 #include "update_engine/libcurl_http_fetcher.h"
-#include "update_engine/metrics_reporter_interface.h"
-#include "update_engine/omaha_request_action.h"
-#include "update_engine/omaha_request_params.h"
-#include "update_engine/omaha_response_handler_action.h"
-#include "update_engine/p2p_manager.h"
-#include "update_engine/payload_consumer/download_action.h"
 #include "update_engine/payload_consumer/filesystem_verifier_action.h"
 #include "update_engine/payload_consumer/postinstall_runner_action.h"
-#include "update_engine/payload_state_interface.h"
-#include "update_engine/power_manager_interface.h"
-#include "update_engine/system_state.h"
 #include "update_engine/update_boot_flags_action.h"
 #include "update_engine/update_manager/policy.h"
 #include "update_engine/update_manager/policy_utils.h"
@@ -68,6 +75,7 @@
 
 using base::Bind;
 using base::Callback;
+using base::FilePath;
 using base::Time;
 using base::TimeDelta;
 using base::TimeTicks;
@@ -77,6 +85,7 @@
 using chromeos_update_manager::Policy;
 using chromeos_update_manager::StagingCase;
 using chromeos_update_manager::UpdateCheckParams;
+using std::map;
 using std::string;
 using std::vector;
 using update_engine::UpdateAttemptFlags;
@@ -117,14 +126,16 @@
   return code;
 }
 
-UpdateAttempter::UpdateAttempter(SystemState* system_state,
-                                 CertificateChecker* cert_checker)
+UpdateAttempter::UpdateAttempter(CertificateChecker* cert_checker)
     : processor_(new ActionProcessor()),
-      system_state_(system_state),
       cert_checker_(cert_checker),
       is_install_(false) {}
 
 UpdateAttempter::~UpdateAttempter() {
+  // Prevent any DBus communication from UpdateAttempter when shutting down the
+  // daemon.
+  ClearObservers();
+
   // CertificateChecker might not be initialized in unittests.
   if (cert_checker_)
     cert_checker_->SetObserver(nullptr);
@@ -137,48 +148,78 @@
   // Pulling from the SystemState can only be done after construction, since
   // this is an aggregate of various objects (such as the UpdateAttempter),
   // which requires them all to be constructed prior to it being used.
-  prefs_ = system_state_->prefs();
-  omaha_request_params_ = system_state_->request_params();
+  prefs_ = SystemState::Get()->prefs();
+  omaha_request_params_ = SystemState::Get()->request_params();
 
   if (cert_checker_)
     cert_checker_->SetObserver(this);
 
   // In case of update_engine restart without a reboot we need to restore the
   // reboot needed state.
-  if (GetBootTimeAtUpdate(nullptr))
+  if (GetBootTimeAtUpdate(nullptr)) {
     status_ = UpdateStatus::UPDATED_NEED_REBOOT;
-  else
+  } else {
     status_ = UpdateStatus::IDLE;
+    prefs_->Delete(kPrefsLastFp, {kDlcPrefsSubDir});
+  }
 }
 
 bool UpdateAttempter::ScheduleUpdates() {
-  if (IsUpdateRunningOrScheduled())
+  if (IsBusyOrUpdateScheduled())
     return false;
 
   chromeos_update_manager::UpdateManager* const update_manager =
-      system_state_->update_manager();
+      SystemState::Get()->update_manager();
   CHECK(update_manager);
   Callback<void(EvalStatus, const UpdateCheckParams&)> callback =
       Bind(&UpdateAttempter::OnUpdateScheduled, base::Unretained(this));
   // We limit the async policy request to a reasonably short time, to avoid a
   // starvation due to a transient bug.
-  update_manager->AsyncPolicyRequest(callback, &Policy::UpdateCheckAllowed);
+  update_manager->AsyncPolicyRequestUpdateCheckAllowed(
+      callback, &Policy::UpdateCheckAllowed);
   waiting_for_scheduled_check_ = true;
   return true;
 }
 
+bool UpdateAttempter::StartUpdater() {
+  // Initiate update checks.
+  ScheduleUpdates();
+
+  auto update_boot_flags_action = std::make_unique<UpdateBootFlagsAction>(
+      SystemState::Get()->boot_control());
+  aux_processor_.EnqueueAction(std::move(update_boot_flags_action));
+  // Update boot flags after 45 seconds.
+  MessageLoop::current()->PostDelayedTask(
+      FROM_HERE,
+      base::Bind(&ActionProcessor::StartProcessing,
+                 base::Unretained(&aux_processor_)),
+      base::TimeDelta::FromSeconds(45));
+
+  // Broadcast the update engine status on startup to ensure consistent system
+  // state on crashes.
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(&UpdateAttempter::BroadcastStatus, base::Unretained(this)));
+
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(&UpdateAttempter::UpdateEngineStarted,
+                 base::Unretained(this)));
+  return true;
+}
+
 void UpdateAttempter::CertificateChecked(ServerToCheck server_to_check,
                                          CertificateCheckResult result) {
-  system_state_->metrics_reporter()->ReportCertificateCheckMetrics(
+  SystemState::Get()->metrics_reporter()->ReportCertificateCheckMetrics(
       server_to_check, result);
 }
 
 bool UpdateAttempter::CheckAndReportDailyMetrics() {
   int64_t stored_value;
-  Time now = system_state_->clock()->GetWallclockTime();
-  if (system_state_->prefs()->Exists(kPrefsDailyMetricsLastReportedAt) &&
-      system_state_->prefs()->GetInt64(kPrefsDailyMetricsLastReportedAt,
-                                       &stored_value)) {
+  Time now = SystemState::Get()->clock()->GetWallclockTime();
+  if (SystemState::Get()->prefs()->Exists(kPrefsDailyMetricsLastReportedAt) &&
+      SystemState::Get()->prefs()->GetInt64(kPrefsDailyMetricsLastReportedAt,
+                                            &stored_value)) {
     Time last_reported_at = Time::FromInternalValue(stored_value);
     TimeDelta time_reported_since = now - last_reported_at;
     if (time_reported_since.InSeconds() < 0) {
@@ -201,8 +242,8 @@
   }
 
   LOG(INFO) << "Reporting daily metrics.";
-  system_state_->prefs()->SetInt64(kPrefsDailyMetricsLastReportedAt,
-                                   now.ToInternalValue());
+  SystemState::Get()->prefs()->SetInt64(kPrefsDailyMetricsLastReportedAt,
+                                        now.ToInternalValue());
 
   ReportOSAge();
 
@@ -211,10 +252,6 @@
 
 void UpdateAttempter::ReportOSAge() {
   struct stat sb;
-
-  if (system_state_ == nullptr)
-    return;
-
   if (stat("/etc/lsb-release", &sb) != 0) {
     PLOG(ERROR) << "Error getting file status for /etc/lsb-release "
                 << "(Note: this may happen in some unit tests)";
@@ -222,7 +259,7 @@
   }
 
   Time lsb_release_timestamp = Time::FromTimeSpec(sb.st_ctim);
-  Time now = system_state_->clock()->GetWallclockTime();
+  Time now = SystemState::Get()->clock()->GetWallclockTime();
   TimeDelta age = now - lsb_release_timestamp;
   if (age.InSeconds() < 0) {
     LOG(ERROR) << "The OS age (" << utils::FormatTimeDelta(age)
@@ -231,16 +268,10 @@
     return;
   }
 
-  system_state_->metrics_reporter()->ReportDailyMetrics(age);
+  SystemState::Get()->metrics_reporter()->ReportDailyMetrics(age);
 }
 
-void UpdateAttempter::Update(const string& app_version,
-                             const string& omaha_url,
-                             const string& target_channel,
-                             const string& target_version_prefix,
-                             bool rollback_allowed,
-                             bool obey_proxies,
-                             bool interactive) {
+void UpdateAttempter::Update(const UpdateCheckParams& params) {
   // This is normally called frequently enough so it's appropriate to use as a
   // hook for reporting daily metrics.
   // TODO(garnold) This should be hooked to a separate (reliable and consistent)
@@ -256,8 +287,7 @@
     // not performing an update check because of this.
     LOG(INFO) << "Not updating b/c we already updated and we're waiting for "
               << "reboot, we'll ping Omaha instead";
-    system_state_->metrics_reporter()->ReportUpdateCheckMetrics(
-        system_state_,
+    SystemState::Get()->metrics_reporter()->ReportUpdateCheckMetrics(
         metrics::CheckResult::kRebootPending,
         metrics::CheckReaction::kUnset,
         metrics::DownloadErrorCode::kUnset);
@@ -269,17 +299,11 @@
     return;
   }
 
-  if (!CalculateUpdateParams(app_version,
-                             omaha_url,
-                             target_channel,
-                             target_version_prefix,
-                             rollback_allowed,
-                             obey_proxies,
-                             interactive)) {
+  if (!CalculateUpdateParams(params)) {
     return;
   }
 
-  BuildUpdateActions(interactive);
+  BuildUpdateActions(params.interactive);
 
   SetStatusAndNotify(UpdateStatus::CHECKING_FOR_UPDATE);
 
@@ -306,8 +330,8 @@
   else
     LOG(INFO) << "No device policies/settings present.";
 
-  system_state_->set_device_policy(device_policy);
-  system_state_->p2p_manager()->SetDevicePolicy(device_policy);
+  SystemState::Get()->set_device_policy(device_policy);
+  SystemState::Get()->p2p_manager()->SetDevicePolicy(device_policy);
 }
 
 void UpdateAttempter::CalculateP2PParams(bool interactive) {
@@ -320,37 +344,31 @@
   // (Why would a developer want to opt in? If they are working on the
   // update_engine or p2p codebases so they can actually test their code.)
 
-  if (system_state_ != nullptr) {
-    if (!system_state_->p2p_manager()->IsP2PEnabled()) {
-      LOG(INFO) << "p2p is not enabled - disallowing p2p for both"
-                << " downloading and sharing.";
+  if (!SystemState::Get()->p2p_manager()->IsP2PEnabled()) {
+    LOG(INFO) << "p2p is not enabled - disallowing p2p for both"
+              << " downloading and sharing.";
+  } else {
+    // Allow p2p for sharing, even in interactive checks.
+    use_p2p_for_sharing = true;
+    if (!interactive) {
+      LOG(INFO) << "Non-interactive check - allowing p2p for downloading";
+      use_p2p_for_downloading = true;
     } else {
-      // Allow p2p for sharing, even in interactive checks.
-      use_p2p_for_sharing = true;
-      if (!interactive) {
-        LOG(INFO) << "Non-interactive check - allowing p2p for downloading";
-        use_p2p_for_downloading = true;
-      } else {
-        LOG(INFO) << "Forcibly disabling use of p2p for downloading "
-                  << "since this update attempt is interactive.";
-      }
+      LOG(INFO) << "Forcibly disabling use of p2p for downloading "
+                << "since this update attempt is interactive.";
     }
   }
 
-  PayloadStateInterface* const payload_state = system_state_->payload_state();
+  PayloadStateInterface* const payload_state =
+      SystemState::Get()->payload_state();
   payload_state->SetUsingP2PForDownloading(use_p2p_for_downloading);
   payload_state->SetUsingP2PForSharing(use_p2p_for_sharing);
 }
 
-bool UpdateAttempter::CalculateUpdateParams(const string& app_version,
-                                            const string& omaha_url,
-                                            const string& target_channel,
-                                            const string& target_version_prefix,
-                                            bool rollback_allowed,
-                                            bool obey_proxies,
-                                            bool interactive) {
+bool UpdateAttempter::CalculateUpdateParams(const UpdateCheckParams& params) {
   http_response_code_ = 0;
-  PayloadStateInterface* const payload_state = system_state_->payload_state();
+  PayloadStateInterface* const payload_state =
+      SystemState::Get()->payload_state();
 
   // Refresh the policy before computing all the update parameters.
   RefreshDevicePolicy();
@@ -359,19 +377,13 @@
   // policy is available again.
   UpdateRollbackHappened();
 
-  // Update the target version prefix.
-  omaha_request_params_->set_target_version_prefix(target_version_prefix);
-
-  // Set whether rollback is allowed.
-  omaha_request_params_->set_rollback_allowed(rollback_allowed);
-
-  CalculateStagingParams(interactive);
+  CalculateStagingParams(params.interactive);
   // If staging_wait_time_ wasn't set, staging is off, use scattering instead.
   if (staging_wait_time_.InSeconds() == 0) {
-    CalculateScatteringParams(interactive);
+    CalculateScatteringParams(params.interactive);
   }
 
-  CalculateP2PParams(interactive);
+  CalculateP2PParams(params.interactive);
   if (payload_state->GetUsingP2PForDownloading() ||
       payload_state->GetUsingP2PForSharing()) {
     // OK, p2p is to be used - start it and perform housekeeping.
@@ -384,34 +396,18 @@
     }
   }
 
-  if (!omaha_request_params_->Init(app_version, omaha_url, interactive)) {
+  if (!omaha_request_params_->Init(
+          forced_app_version_, forced_omaha_url_, params)) {
     LOG(ERROR) << "Unable to initialize Omaha request params.";
     return false;
   }
 
-  // Set the target channel, if one was provided.
-  if (target_channel.empty()) {
-    LOG(INFO) << "No target channel mandated by policy.";
-  } else {
-    LOG(INFO) << "Setting target channel as mandated: " << target_channel;
-    // Pass in false for powerwash_allowed until we add it to the policy
-    // protobuf.
-    string error_message;
-    if (!omaha_request_params_->SetTargetChannel(
-            target_channel, false, &error_message)) {
-      LOG(ERROR) << "Setting the channel failed: " << error_message;
-    }
-
-    // Since this is the beginning of a new attempt, update the download
-    // channel. The download channel won't be updated until the next attempt,
-    // even if target channel changes meanwhile, so that how we'll know if we
-    // should cancel the current download attempt if there's such a change in
-    // target channel.
-    omaha_request_params_->UpdateDownloadChannel();
-  }
-  // Set the DLC module ID list.
-  omaha_request_params_->set_dlc_module_ids(dlc_module_ids_);
-  omaha_request_params_->set_is_install(is_install_);
+  // The function |CalculateDlcParams| makes use of the function |GetAppId| from
+  // |OmahaRequestParams|, so to ensure that the return from |GetAppId|
+  // doesn't change, no changes to the values |download_channel_|,
+  // |image_props_.product_id| and |image_props_.canary_product_id| from
+  // |omaha_request_params_| shall be made below this line.
+  CalculateDlcParams();
 
   LOG(INFO) << "target_version_prefix = "
             << omaha_request_params_->target_version_prefix()
@@ -434,7 +430,7 @@
             << payload_state->GetUsingP2PForSharing();
 
   obeying_proxies_ = true;
-  if (obey_proxies || proxy_manual_checks_ == 0) {
+  if (proxy_manual_checks_ == 0) {
     LOG(INFO) << "forced to obey proxies";
     // If forced to obey proxies, every 20th request will not use proxies
     proxy_manual_checks_++;
@@ -458,7 +454,8 @@
   // Take a copy of the old scatter value before we update it, as
   // we need to update the waiting period if this value changes.
   TimeDelta old_scatter_factor = scatter_factor_;
-  const policy::DevicePolicy* device_policy = system_state_->device_policy();
+  const policy::DevicePolicy* device_policy =
+      SystemState::Get()->device_policy();
   if (device_policy) {
     int64_t new_scatter_factor_in_secs = 0;
     device_policy->GetScatterFactorInSeconds(&new_scatter_factor_in_secs);
@@ -472,8 +469,8 @@
     LOG(INFO) << "Scattering disabled since scatter factor is set to 0";
   } else if (interactive) {
     LOG(INFO) << "Scattering disabled as this is an interactive update check";
-  } else if (system_state_->hardware()->IsOOBEEnabled() &&
-             !system_state_->hardware()->IsOOBEComplete(nullptr)) {
+  } else if (SystemState::Get()->hardware()->IsOOBEEnabled() &&
+             !SystemState::Get()->hardware()->IsOOBEComplete(nullptr)) {
     LOG(INFO) << "Scattering disabled since OOBE is enabled but not complete "
                  "yet";
   } else {
@@ -575,19 +572,19 @@
   // fails, we'll still be able to scatter based on our in-memory value.
   // The persistence only helps in ensuring a good overall distribution
   // across multiple devices if they tend to reboot too often.
-  system_state_->payload_state()->SetScatteringWaitPeriod(
+  SystemState::Get()->payload_state()->SetScatteringWaitPeriod(
       omaha_request_params_->waiting_period());
 }
 
 void UpdateAttempter::CalculateStagingParams(bool interactive) {
-  bool oobe_complete = system_state_->hardware()->IsOOBEEnabled() &&
-                       system_state_->hardware()->IsOOBEComplete(nullptr);
-  auto device_policy = system_state_->device_policy();
+  bool oobe_complete = SystemState::Get()->hardware()->IsOOBEEnabled() &&
+                       SystemState::Get()->hardware()->IsOOBEComplete(nullptr);
+  auto device_policy = SystemState::Get()->device_policy();
   StagingCase staging_case = StagingCase::kOff;
   if (device_policy && !interactive && oobe_complete) {
     staging_wait_time_ = omaha_request_params_->waiting_period();
     staging_case = CalculateStagingCase(
-        device_policy, prefs_, &staging_wait_time_, &staging_schedule_);
+        device_policy, &staging_wait_time_, &staging_schedule_);
   }
   switch (staging_case) {
     case StagingCase::kOff:
@@ -602,8 +599,10 @@
     case StagingCase::kNoSavedValue:
       prefs_->SetInt64(kPrefsWallClockStagingWaitPeriod,
                        staging_wait_time_.InDays());
+      FALLTHROUGH;
     case StagingCase::kSetStagingFromPref:
       omaha_request_params_->set_waiting_period(staging_wait_time_);
+      FALLTHROUGH;
     case StagingCase::kNoAction:
       // Staging is on, enable wallclock based wait so that its values get used.
       omaha_request_params_->set_wall_clock_based_wait_enabled(true);
@@ -618,61 +617,180 @@
   }
 }
 
+bool UpdateAttempter::ResetDlcPrefs(const string& dlc_id) {
+  vector<string> failures;
+  for (auto& sub_key :
+       {kPrefsPingActive, kPrefsPingLastActive, kPrefsPingLastRollcall}) {
+    auto key = prefs_->CreateSubKey({kDlcPrefsSubDir, dlc_id, sub_key});
+    if (!prefs_->Delete(key))
+      failures.emplace_back(sub_key);
+  }
+  if (failures.size() != 0)
+    PLOG(ERROR) << "Failed to delete prefs (" << base::JoinString(failures, ",")
+                << " for DLC (" << dlc_id << ").";
+
+  return failures.size() == 0;
+}
+
+void UpdateAttempter::SetPref(const string& pref_key,
+                              const string& pref_value,
+                              const string& payload_id) {
+  string dlc_id;
+  if (!omaha_request_params_->GetDlcId(payload_id, &dlc_id)) {
+    // Not a DLC ID, set fingerprint in perf for platform ID.
+    prefs_->SetString(pref_key, pref_value);
+  } else {
+    // Set fingerprint in pref for DLC ID.
+    auto key = prefs_->CreateSubKey({kDlcPrefsSubDir, dlc_id, pref_key});
+    prefs_->SetString(key, pref_value);
+  }
+}
+
+bool UpdateAttempter::SetDlcActiveValue(bool is_active, const string& dlc_id) {
+  if (dlc_id.empty()) {
+    LOG(ERROR) << "Empty DLC ID passed.";
+    return false;
+  }
+  LOG(INFO) << "Set DLC (" << dlc_id << ") to "
+            << (is_active ? "Active" : "Inactive");
+  if (is_active) {
+    auto ping_active_key =
+        prefs_->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive});
+    if (!prefs_->SetInt64(ping_active_key, kPingActiveValue)) {
+      LOG(ERROR) << "Failed to set the value of ping metadata '"
+                 << kPrefsPingActive << "'.";
+      return false;
+    }
+  } else {
+    return ResetDlcPrefs(dlc_id);
+  }
+  return true;
+}
+
+int64_t UpdateAttempter::GetPingMetadata(const string& metadata_key) const {
+  // The first time a ping is sent, the metadata files containing the values
+  // sent back by the server still don't exist. A value of -1 is used to
+  // indicate this.
+  if (!SystemState::Get()->prefs()->Exists(metadata_key))
+    return kPingNeverPinged;
+
+  int64_t value;
+  if (SystemState::Get()->prefs()->GetInt64(metadata_key, &value))
+    return value;
+
+  // Return -2 when the file exists and there is a problem reading from it, or
+  // the value cannot be converted to an integer.
+  return kPingUnknownValue;
+}
+
+void UpdateAttempter::CalculateDlcParams() {
+  // Set the |dlc_ids_| only for an update. This is required to get the
+  // currently installed DLC(s).
+  if (!is_install_ &&
+      !SystemState::Get()->dlcservice()->GetDlcsToUpdate(&dlc_ids_)) {
+    LOG(INFO) << "Failed to retrieve DLC module IDs from dlcservice. Check the "
+                 "state of dlcservice, will not update DLC modules.";
+  }
+  map<string, OmahaRequestParams::AppParams> dlc_apps_params;
+  for (const auto& dlc_id : dlc_ids_) {
+    OmahaRequestParams::AppParams dlc_params{
+        .active_counting_type = OmahaRequestParams::kDateBased,
+        .name = dlc_id,
+        .send_ping = false};
+    if (is_install_) {
+      // In some cases, |SetDlcActiveValue| might fail to reset the DLC prefs
+      // when a DLC is uninstalled. To avoid having stale values from that
+      // scenario, we reset the metadata values on a new install request.
+      // Ignore failure to delete stale prefs.
+      ResetDlcPrefs(dlc_id);
+      SetDlcActiveValue(true, dlc_id);
+    } else {
+      // Only send the ping when the request is to update DLCs. When installing
+      // DLCs, we don't want to send the ping yet, since the DLCs might fail to
+      // install or might not really be active yet.
+      dlc_params.ping_active = kPingActiveValue;
+      auto ping_active_key =
+          prefs_->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive});
+      if (!prefs_->GetInt64(ping_active_key, &dlc_params.ping_active) ||
+          dlc_params.ping_active != kPingActiveValue) {
+        dlc_params.ping_active = kPingInactiveValue;
+      }
+      auto ping_last_active_key =
+          prefs_->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive});
+      dlc_params.ping_date_last_active = GetPingMetadata(ping_last_active_key);
+
+      auto ping_last_rollcall_key = prefs_->CreateSubKey(
+          {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall});
+      dlc_params.ping_date_last_rollcall =
+          GetPingMetadata(ping_last_rollcall_key);
+
+      dlc_params.send_ping = true;
+    }
+    dlc_apps_params[omaha_request_params_->GetDlcAppId(dlc_id)] = dlc_params;
+  }
+  omaha_request_params_->set_dlc_apps_params(dlc_apps_params);
+  omaha_request_params_->set_is_install(is_install_);
+}
+
 void UpdateAttempter::BuildUpdateActions(bool interactive) {
   CHECK(!processor_->IsRunning());
   processor_->set_delegate(this);
 
+  // The session ID needs to be kept throughout the update flow. The value
+  // of the session ID will reset/update only when it is a new update flow.
+  session_id_ = base::GenerateGUID();
+
   // Actions:
   auto update_check_fetcher = std::make_unique<LibcurlHttpFetcher>(
-      GetProxyResolver(), system_state_->hardware());
+      GetProxyResolver(), SystemState::Get()->hardware());
   update_check_fetcher->set_server_to_check(ServerToCheck::kUpdate);
   // Try harder to connect to the network, esp when not interactive.
   // See comment in libcurl_http_fetcher.cc.
   update_check_fetcher->set_no_network_max_retries(interactive ? 1 : 3);
+  update_check_fetcher->set_is_update_check(true);
   auto update_check_action = std::make_unique<OmahaRequestAction>(
-      system_state_, nullptr, std::move(update_check_fetcher), false);
-  auto response_handler_action =
-      std::make_unique<OmahaResponseHandlerAction>(system_state_);
-  auto update_boot_flags_action =
-      std::make_unique<UpdateBootFlagsAction>(system_state_->boot_control());
+      nullptr, std::move(update_check_fetcher), false, session_id_);
+  auto response_handler_action = std::make_unique<OmahaResponseHandlerAction>();
+  auto update_boot_flags_action = std::make_unique<UpdateBootFlagsAction>(
+      SystemState::Get()->boot_control());
   auto download_started_action = std::make_unique<OmahaRequestAction>(
-      system_state_,
       new OmahaEvent(OmahaEvent::kTypeUpdateDownloadStarted),
       std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
-                                           system_state_->hardware()),
-      false);
+                                           SystemState::Get()->hardware()),
+      false,
+      session_id_);
 
-  LibcurlHttpFetcher* download_fetcher =
-      new LibcurlHttpFetcher(GetProxyResolver(), system_state_->hardware());
+  LibcurlHttpFetcher* download_fetcher = new LibcurlHttpFetcher(
+      GetProxyResolver(), SystemState::Get()->hardware());
   download_fetcher->set_server_to_check(ServerToCheck::kDownload);
   if (interactive)
     download_fetcher->set_max_retry_count(kDownloadMaxRetryCountInteractive);
-  auto download_action =
-      std::make_unique<DownloadAction>(prefs_,
-                                       system_state_->boot_control(),
-                                       system_state_->hardware(),
-                                       system_state_,
-                                       download_fetcher,  // passes ownership
-                                       interactive);
+  download_fetcher->SetHeader(kXGoogleUpdateSessionId, session_id_);
+  auto download_action = std::make_unique<DownloadActionChromeos>(
+      prefs_,
+      SystemState::Get()->boot_control(),
+      SystemState::Get()->hardware(),
+      download_fetcher,  // passes ownership
+      interactive);
   download_action->set_delegate(this);
 
   auto download_finished_action = std::make_unique<OmahaRequestAction>(
-      system_state_,
       new OmahaEvent(OmahaEvent::kTypeUpdateDownloadFinished),
       std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
-                                           system_state_->hardware()),
-      false);
-  auto filesystem_verifier_action =
-      std::make_unique<FilesystemVerifierAction>();
+                                           SystemState::Get()->hardware()),
+      false,
+      session_id_);
+  auto filesystem_verifier_action = std::make_unique<FilesystemVerifierAction>(
+      SystemState::Get()->boot_control()->GetDynamicPartitionControl());
   auto update_complete_action = std::make_unique<OmahaRequestAction>(
-      system_state_,
       new OmahaEvent(OmahaEvent::kTypeUpdateComplete),
       std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
-                                           system_state_->hardware()),
-      false);
+                                           SystemState::Get()->hardware()),
+      false,
+      session_id_);
 
   auto postinstall_runner_action = std::make_unique<PostinstallRunnerAction>(
-      system_state_->boot_control(), system_state_->hardware());
+      SystemState::Get()->boot_control(), SystemState::Get()->hardware());
   postinstall_runner_action->set_delegate(this);
 
   // Bond them together. We have to use the leaf-types when calling
@@ -706,7 +824,8 @@
     // Enterprise-enrolled devices have an empty owner in their device policy.
     string owner;
     RefreshDevicePolicy();
-    const policy::DevicePolicy* device_policy = system_state_->device_policy();
+    const policy::DevicePolicy* device_policy =
+        SystemState::Get()->device_policy();
     if (device_policy && (!device_policy->GetOwner(&owner) || owner.empty())) {
       LOG(ERROR) << "Enterprise device detected. "
                  << "Cannot perform a powerwash for enterprise devices.";
@@ -717,7 +836,7 @@
   processor_->set_delegate(this);
 
   // Initialize the default request params.
-  if (!omaha_request_params_->Init("", "", true)) {
+  if (!omaha_request_params_->Init("", "", {.interactive = true})) {
     LOG(ERROR) << "Unable to initialize Omaha request params.";
     return false;
   }
@@ -725,26 +844,26 @@
   LOG(INFO) << "Setting rollback options.";
   install_plan_.reset(new InstallPlan());
   install_plan_->target_slot = GetRollbackSlot();
-  install_plan_->source_slot = system_state_->boot_control()->GetCurrentSlot();
+  install_plan_->source_slot =
+      SystemState::Get()->boot_control()->GetCurrentSlot();
 
-  TEST_AND_RETURN_FALSE(
-      install_plan_->LoadPartitionsFromSlots(system_state_->boot_control()));
+  TEST_AND_RETURN_FALSE(install_plan_->LoadPartitionsFromSlots(
+      SystemState::Get()->boot_control()));
   install_plan_->powerwash_required = powerwash;
 
-  LOG(INFO) << "Using this install plan:";
   install_plan_->Dump();
 
   auto install_plan_action =
       std::make_unique<InstallPlanAction>(*install_plan_);
   auto postinstall_runner_action = std::make_unique<PostinstallRunnerAction>(
-      system_state_->boot_control(), system_state_->hardware());
+      SystemState::Get()->boot_control(), SystemState::Get()->hardware());
   postinstall_runner_action->set_delegate(this);
   BondActions(install_plan_action.get(), postinstall_runner_action.get());
   processor_->EnqueueAction(std::move(install_plan_action));
   processor_->EnqueueAction(std::move(postinstall_runner_action));
 
   // Update the payload state for Rollback.
-  system_state_->payload_state()->Rollback();
+  SystemState::Get()->payload_state()->Rollback();
 
   SetStatusAndNotify(UpdateStatus::ATTEMPTING_ROLLBACK);
 
@@ -761,9 +880,10 @@
 
 BootControlInterface::Slot UpdateAttempter::GetRollbackSlot() const {
   LOG(INFO) << "UpdateAttempter::GetRollbackSlot";
-  const unsigned int num_slots = system_state_->boot_control()->GetNumSlots();
+  const unsigned int num_slots =
+      SystemState::Get()->boot_control()->GetNumSlots();
   const BootControlInterface::Slot current_slot =
-      system_state_->boot_control()->GetCurrentSlot();
+      SystemState::Get()->boot_control()->GetCurrentSlot();
 
   LOG(INFO) << "  Installed slots: " << num_slots;
   LOG(INFO) << "  Booted from slot: "
@@ -777,7 +897,7 @@
   vector<BootControlInterface::Slot> bootable_slots;
   for (BootControlInterface::Slot slot = 0; slot < num_slots; slot++) {
     if (slot != current_slot &&
-        system_state_->boot_control()->IsSlotBootable(slot)) {
+        SystemState::Get()->boot_control()->IsSlotBootable(slot)) {
       LOG(INFO) << "Found bootable slot "
                 << BootControlInterface::SlotName(slot);
       return slot;
@@ -790,19 +910,16 @@
 bool UpdateAttempter::CheckForUpdate(const string& app_version,
                                      const string& omaha_url,
                                      UpdateAttemptFlags flags) {
-  dlc_module_ids_.clear();
-  is_install_ = false;
-  bool interactive = !(flags & UpdateAttemptFlags::kFlagNonInteractive);
-
-  if (interactive && status_ != UpdateStatus::IDLE) {
-    // An update check is either in-progress, or an update has completed and the
-    // system is in UPDATED_NEED_REBOOT.  Either way, don't do an interactive
-    // update at this time
-    LOG(INFO) << "Refusing to do an interactive update with an update already "
-                 "in progress";
+  if (status_ != UpdateStatus::IDLE) {
+    LOG(INFO) << "Refusing to do an update as there is an "
+              << (is_install_ ? "install" : "update")
+              << " already in progress.";
     return false;
   }
 
+  bool interactive = !(flags & UpdateAttemptFlags::kFlagNonInteractive);
+  is_install_ = false;
+
   LOG(INFO) << "Forced update check requested.";
   forced_app_version_.clear();
   forced_omaha_url_.clear();
@@ -829,25 +946,33 @@
     // of the previously set ones.
     current_update_attempt_flags_ = flags;
     // Note: The caching for non-interactive update checks happens in
-    // OnUpdateScheduled().
+    // |OnUpdateScheduled()|.
   }
 
+  // |forced_update_pending_callback_| should always be set, but even in the
+  // case that it is not, we still return true indicating success because the
+  // scheduled periodic check will pick up these changes.
   if (forced_update_pending_callback_.get()) {
-    if (!system_state_->dlcservice()->GetInstalled(&dlc_module_ids_)) {
-      dlc_module_ids_.clear();
-    }
-    // Make sure that a scheduling request is made prior to calling the forced
-    // update pending callback.
+    // Always call |ScheduleUpdates()| before forcing an update. This is because
+    // we need an update to be scheduled for the
+    // |forced_update_pending_callback_| to have an effect. Here we don't need
+    // to care about the return value from |ScheduleUpdate()|.
     ScheduleUpdates();
     forced_update_pending_callback_->Run(true, interactive);
   }
-
   return true;
 }
 
-bool UpdateAttempter::CheckForInstall(const vector<string>& dlc_module_ids,
+bool UpdateAttempter::CheckForInstall(const vector<string>& dlc_ids,
                                       const string& omaha_url) {
-  dlc_module_ids_ = dlc_module_ids;
+  if (status_ != UpdateStatus::IDLE) {
+    LOG(INFO) << "Refusing to do an install as there is an "
+              << (is_install_ ? "install" : "update")
+              << " already in progress.";
+    return false;
+  }
+
+  dlc_ids_ = dlc_ids;
   is_install_ = true;
   forced_omaha_url_.clear();
 
@@ -857,35 +982,28 @@
   if (IsAnyUpdateSourceAllowed()) {
     forced_omaha_url_ = omaha_url;
   }
-  if (omaha_url == kScheduledAUTestURLRequest) {
-    forced_omaha_url_ = constants::kOmahaDefaultAUTestURL;
-  } else if (omaha_url == kAUTestURLRequest) {
+
+  if (omaha_url == kScheduledAUTestURLRequest ||
+      omaha_url == kAUTestURLRequest) {
     forced_omaha_url_ = constants::kOmahaDefaultAUTestURL;
   }
 
-  if (!ScheduleUpdates()) {
-    if (forced_update_pending_callback_.get()) {
-      // Make sure that a scheduling request is made prior to calling the forced
-      // update pending callback.
-      ScheduleUpdates();
-      forced_update_pending_callback_->Run(true, true);
-      return true;
-    }
-    return false;
+  // |forced_update_pending_callback_| should always be set, but even in the
+  // case that it is not, we still return true indicating success because the
+  // scheduled periodic check will pick up these changes.
+  if (forced_update_pending_callback_.get()) {
+    // Always call |ScheduleUpdates()| before forcing an update. This is because
+    // we need an update to be scheduled for the
+    // |forced_update_pending_callback_| to have an effect. Here we don't need
+    // to care about the return value from |ScheduleUpdate()|.
+    ScheduleUpdates();
+    forced_update_pending_callback_->Run(true, true);
   }
   return true;
 }
 
 bool UpdateAttempter::RebootIfNeeded() {
-#ifdef __ANDROID__
-  if (status_ != UpdateStatus::UPDATED_NEED_REBOOT) {
-    LOG(INFO) << "Reboot requested, but status is "
-              << UpdateStatusToString(status_) << ", so not rebooting.";
-    return false;
-  }
-#endif  // __ANDROID__
-
-  if (system_state_->power_manager()->RequestReboot())
+  if (SystemState::Get()->power_manager()->RequestReboot())
     return true;
 
   return RebootDirectly();
@@ -897,18 +1015,14 @@
     return;
   prefs_->SetString(kPrefsUpdateCompletedOnBootId, boot_id);
 
-  int64_t value = system_state_->clock()->GetBootTime().ToInternalValue();
+  int64_t value = SystemState::Get()->clock()->GetBootTime().ToInternalValue();
   prefs_->SetInt64(kPrefsUpdateCompletedBootTime, value);
 }
 
 bool UpdateAttempter::RebootDirectly() {
-  vector<string> command;
-  command.push_back("/sbin/shutdown");
-  command.push_back("-r");
-  command.push_back("now");
-  LOG(INFO) << "Running \"" << base::JoinString(command, " ") << "\"";
+  vector<string> command = {"/sbin/shutdown", "-r", "now"};
   int rc = 0;
-  Subprocess::SynchronousExec(command, &rc, nullptr);
+  Subprocess::SynchronousExec(command, &rc, nullptr, nullptr);
   return rc == 0;
 }
 
@@ -941,13 +1055,7 @@
     LOG(INFO) << "Update attempt flags in use = 0x" << std::hex
               << current_update_attempt_flags_;
 
-    Update(forced_app_version_,
-           forced_omaha_url_,
-           params.target_channel,
-           params.target_version_prefix,
-           params.rollback_allowed,
-           /*obey_proxies=*/false,
-           params.interactive);
+    Update(params);
     // Always clear the forced app_version and omaha_url after an update attempt
     // so the next update uses the defaults.
     forced_app_version_.clear();
@@ -963,45 +1071,37 @@
   // a bug that will most likely prevent further automatic update checks. It
   // seems better to crash in such cases and restart the update_engine daemon
   // into, hopefully, a known good state.
-  CHECK(IsUpdateRunningOrScheduled());
+  CHECK(IsBusyOrUpdateScheduled());
 }
 
 void UpdateAttempter::UpdateLastCheckedTime() {
-  last_checked_time_ = system_state_->clock()->GetWallclockTime().ToTimeT();
+  last_checked_time_ =
+      SystemState::Get()->clock()->GetWallclockTime().ToTimeT();
 }
 
 void UpdateAttempter::UpdateRollbackHappened() {
-  DCHECK(system_state_);
-  DCHECK(system_state_->payload_state());
+  DCHECK(SystemState::Get()->payload_state());
   DCHECK(policy_provider_);
-  if (system_state_->payload_state()->GetRollbackHappened() &&
+  if (SystemState::Get()->payload_state()->GetRollbackHappened() &&
       (policy_provider_->device_policy_is_loaded() ||
        policy_provider_->IsConsumerDevice())) {
     // Rollback happened, but we already went through OOBE and policy is
     // present or it's a consumer device.
-    system_state_->payload_state()->SetRollbackHappened(false);
+    SystemState::Get()->payload_state()->SetRollbackHappened(false);
   }
 }
 
-// Delegate methods:
-void UpdateAttempter::ProcessingDone(const ActionProcessor* processor,
-                                     ErrorCode code) {
-  LOG(INFO) << "Processing Done.";
-
+void UpdateAttempter::ProcessingDoneInternal(const ActionProcessor* processor,
+                                             ErrorCode code) {
   // Reset cpu shares back to normal.
   cpu_limiter_.StopLimiter();
 
-  // reset the state that's only valid for a single update pass
-  current_update_attempt_flags_ = UpdateAttemptFlags::kNone;
-
-  if (forced_update_pending_callback_.get())
-    // Clear prior interactive requests once the processor is done.
-    forced_update_pending_callback_->Run(false, false);
+  ResetInteractivityFlags();
 
   if (status_ == UpdateStatus::REPORTING_ERROR_EVENT) {
     LOG(INFO) << "Error event sent.";
 
-    // Inform scheduler of new status;
+    // Inform scheduler of new status.
     SetStatusAndNotify(UpdateStatus::IDLE);
     ScheduleUpdates();
 
@@ -1014,93 +1114,126 @@
 
   attempt_error_code_ = utils::GetBaseErrorCode(code);
 
-  if (code == ErrorCode::kSuccess) {
-    // For install operation, we do not mark update complete since we do not
-    // need reboot.
-    if (!is_install_)
-      WriteUpdateCompletedMarker();
-    ReportTimeToUpdateAppliedMetric();
-
-    prefs_->SetInt64(kPrefsDeltaUpdateFailures, 0);
-    prefs_->SetString(kPrefsPreviousVersion,
-                      omaha_request_params_->app_version());
-    DeltaPerformer::ResetUpdateProgress(prefs_, false);
-
-    system_state_->payload_state()->UpdateSucceeded();
-
-    // Since we're done with scattering fully at this point, this is the
-    // safest point delete the state files, as we're sure that the status is
-    // set to reboot (which means no more updates will be applied until reboot)
-    // This deletion is required for correctness as we want the next update
-    // check to re-create a new random number for the update check count.
-    // Similarly, we also delete the wall-clock-wait period that was persisted
-    // so that we start with a new random value for the next update check
-    // after reboot so that the same device is not favored or punished in any
-    // way.
-    prefs_->Delete(kPrefsUpdateCheckCount);
-    system_state_->payload_state()->SetScatteringWaitPeriod(TimeDelta());
-    system_state_->payload_state()->SetStagingWaitPeriod(TimeDelta());
-    prefs_->Delete(kPrefsUpdateFirstSeenAt);
-
-    if (is_install_) {
-      LOG(INFO) << "DLC successfully installed, no reboot needed.";
-      SetStatusAndNotify(UpdateStatus::IDLE);
-      ScheduleUpdates();
+  if (code != ErrorCode::kSuccess) {
+    if (ScheduleErrorEventAction()) {
       return;
     }
-
-    SetStatusAndNotify(UpdateStatus::UPDATED_NEED_REBOOT);
+    LOG(INFO) << "No update.";
+    SetStatusAndNotify(UpdateStatus::IDLE);
     ScheduleUpdates();
-    LOG(INFO) << "Update successfully applied, waiting to reboot.";
-
-    // |install_plan_| is null during rollback operations, and the stats don't
-    // make much sense then anyway.
-    if (install_plan_) {
-      // Generate an unique payload identifier.
-      string target_version_uid;
-      for (const auto& payload : install_plan_->payloads) {
-        target_version_uid +=
-            brillo::data_encoding::Base64Encode(payload.hash) + ":" +
-            payload.metadata_signature + ":";
-      }
-
-      // If we just downloaded a rollback image, we should preserve this fact
-      // over the following powerwash.
-      if (install_plan_->is_rollback) {
-        system_state_->payload_state()->SetRollbackHappened(true);
-        system_state_->metrics_reporter()->ReportEnterpriseRollbackMetrics(
-            /*success=*/true, install_plan_->version);
-      }
-
-      // Expect to reboot into the new version to send the proper metric during
-      // next boot.
-      system_state_->payload_state()->ExpectRebootInNewVersion(
-          target_version_uid);
-    } else {
-      // If we just finished a rollback, then we expect to have no Omaha
-      // response. Otherwise, it's an error.
-      if (system_state_->payload_state()->GetRollbackVersion().empty()) {
-        LOG(ERROR) << "Can't send metrics because there was no Omaha response";
-      }
-    }
     return;
   }
 
-  if (ScheduleErrorEventAction()) {
-    return;
+  ReportTimeToUpdateAppliedMetric();
+  prefs_->SetInt64(kPrefsDeltaUpdateFailures, 0);
+  prefs_->SetString(kPrefsPreviousVersion,
+                    omaha_request_params_->app_version());
+  DeltaPerformer::ResetUpdateProgress(prefs_, false);
+
+  SystemState::Get()->payload_state()->UpdateSucceeded();
+
+  // Since we're done with scattering fully at this point, this is the
+  // safest point delete the state files, as we're sure that the status is
+  // set to reboot (which means no more updates will be applied until reboot)
+  // This deletion is required for correctness as we want the next update
+  // check to re-create a new random number for the update check count.
+  // Similarly, we also delete the wall-clock-wait period that was persisted
+  // so that we start with a new random value for the next update check
+  // after reboot so that the same device is not favored or punished in any
+  // way.
+  prefs_->Delete(kPrefsUpdateCheckCount);
+  SystemState::Get()->payload_state()->SetScatteringWaitPeriod(TimeDelta());
+  SystemState::Get()->payload_state()->SetStagingWaitPeriod(TimeDelta());
+  prefs_->Delete(kPrefsUpdateFirstSeenAt);
+
+  // Note: below this comment should only be on |ErrorCode::kSuccess|.
+  if (is_install_) {
+    ProcessingDoneInstall(processor, code);
+  } else {
+    ProcessingDoneUpdate(processor, code);
   }
-  LOG(INFO) << "No update.";
+}
+
+vector<string> UpdateAttempter::GetSuccessfulDlcIds() {
+  vector<string> dlc_ids;
+  for (const auto& pr : omaha_request_params_->dlc_apps_params())
+    if (pr.second.updated)
+      dlc_ids.push_back(pr.second.name);
+  return dlc_ids;
+}
+
+void UpdateAttempter::ProcessingDoneInstall(const ActionProcessor* processor,
+                                            ErrorCode code) {
+  if (!SystemState::Get()->dlcservice()->InstallCompleted(
+          GetSuccessfulDlcIds()))
+    LOG(WARNING) << "dlcservice didn't successfully handle install completion.";
   SetStatusAndNotify(UpdateStatus::IDLE);
   ScheduleUpdates();
+  LOG(INFO) << "DLC successfully installed, no reboot needed.";
+}
+
+void UpdateAttempter::ProcessingDoneUpdate(const ActionProcessor* processor,
+                                           ErrorCode code) {
+  WriteUpdateCompletedMarker();
+
+  if (!SystemState::Get()->dlcservice()->UpdateCompleted(GetSuccessfulDlcIds()))
+    LOG(WARNING) << "dlcservice didn't successfully handle update completion.";
+  SetStatusAndNotify(UpdateStatus::UPDATED_NEED_REBOOT);
+  ScheduleUpdates();
+  LOG(INFO) << "Update successfully applied, waiting to reboot.";
+
+  // |install_plan_| is null during rollback operations, and the stats don't
+  // make much sense then anyway.
+  if (install_plan_) {
+    // Generate an unique payload identifier.
+    string target_version_uid;
+    for (const auto& payload : install_plan_->payloads) {
+      target_version_uid += brillo::data_encoding::Base64Encode(payload.hash) +
+                            ":" + payload.metadata_signature + ":";
+      // Set fingerprint value for updates only.
+      if (!is_install_)
+        SetPref(kPrefsLastFp, payload.fp, payload.app_id);
+    }
+
+    // If we just downloaded a rollback image, we should preserve this fact
+    // over the following powerwash.
+    if (install_plan_->is_rollback) {
+      SystemState::Get()->payload_state()->SetRollbackHappened(true);
+      SystemState::Get()->metrics_reporter()->ReportEnterpriseRollbackMetrics(
+          /*success=*/true, install_plan_->version);
+    }
+
+    // Expect to reboot into the new version to send the proper metric during
+    // next boot.
+    SystemState::Get()->payload_state()->ExpectRebootInNewVersion(
+        target_version_uid);
+  } else {
+    // If we just finished a rollback, then we expect to have no Omaha
+    // response. Otherwise, it's an error.
+    if (SystemState::Get()->payload_state()->GetRollbackVersion().empty()) {
+      LOG(ERROR) << "Can't send metrics because there was no Omaha response";
+    }
+  }
+}
+
+// Delegate methods:
+void UpdateAttempter::ProcessingDone(const ActionProcessor* processor,
+                                     ErrorCode code) {
+  LOG(INFO) << "Processing Done.";
+  ProcessingDoneInternal(processor, code);
+
+  // Note: do cleanups here for any variables that need to be reset after a
+  // failure, error, update, or install.
+  is_install_ = false;
 }
 
 void UpdateAttempter::ProcessingStopped(const ActionProcessor* processor) {
   // Reset cpu shares back to normal.
   cpu_limiter_.StopLimiter();
   download_progress_ = 0.0;
-  if (forced_update_pending_callback_.get())
-    // Clear prior interactive requests once the processor is done.
-    forced_update_pending_callback_->Run(false, false);
+
+  ResetInteractivityFlags();
+
   SetStatusAndNotify(UpdateStatus::IDLE);
   ScheduleUpdates();
   error_event_.reset(nullptr);
@@ -1116,9 +1249,10 @@
   // actions (update download as well as the initial update check
   // actions).
   const string type = action->Type();
-  if (type == DownloadAction::StaticType()) {
+  if (type == DownloadActionChromeos::StaticType()) {
     download_progress_ = 0.0;
-    DownloadAction* download_action = static_cast<DownloadAction*>(action);
+    DownloadActionChromeos* download_action =
+        static_cast<DownloadActionChromeos*>(action);
     http_response_code_ = download_action->GetHTTPResponseCode();
   } else if (type == OmahaRequestAction::StaticType()) {
     OmahaRequestAction* omaha_request_action =
@@ -1168,7 +1302,6 @@
           new InstallPlan(omaha_response_handler_action->install_plan()));
       UpdateLastCheckedTime();
       new_version_ = install_plan_->version;
-      new_system_version_ = install_plan_->system_version;
       new_payload_size_ = 0;
       for (const auto& payload : install_plan_->payloads)
         new_payload_size_ += payload.size;
@@ -1195,6 +1328,7 @@
         case UpdateStatus::REPORTING_ERROR_EVENT:
         case UpdateStatus::ATTEMPTING_ROLLBACK:
         case UpdateStatus::DISABLED:
+        case UpdateStatus::CLEANUP_PREVIOUS_UPDATE:
           MarkDeltaUpdateFailure();
           break;
       }
@@ -1225,7 +1359,7 @@
                                     uint64_t total) {
   // The PayloadState keeps track of how many bytes were actually downloaded
   // from a given URL for the URL skipping logic.
-  system_state_->payload_state()->DownloadProgress(bytes_progressed);
+  SystemState::Get()->payload_state()->DownloadProgress(bytes_progressed);
 
   double progress = 0;
   if (total)
@@ -1239,7 +1373,7 @@
 }
 
 void UpdateAttempter::DownloadComplete() {
-  system_state_->payload_state()->DownloadComplete();
+  SystemState::Get()->payload_state()->DownloadComplete();
 }
 
 void UpdateAttempter::ProgressUpdate(double progress) {
@@ -1254,6 +1388,15 @@
   }
 }
 
+void UpdateAttempter::ResetInteractivityFlags() {
+  // Reset the state that's only valid for a single update pass.
+  current_update_attempt_flags_ = UpdateAttemptFlags::kNone;
+
+  if (forced_update_pending_callback_.get())
+    // Clear prior interactive requests once the processor is done.
+    forced_update_pending_callback_->Run(false, false);
+}
+
 bool UpdateAttempter::ResetStatus() {
   LOG(INFO) << "Attempting to reset state from "
             << UpdateStatusToString(status_) << " to UpdateStatus::IDLE";
@@ -1272,9 +1415,10 @@
       // UpdateStatus::UPDATED_NEED_REBOOT state.
       ret_value = prefs_->Delete(kPrefsUpdateCompletedOnBootId) && ret_value;
       ret_value = prefs_->Delete(kPrefsUpdateCompletedBootTime) && ret_value;
+      ret_value = prefs_->Delete(kPrefsLastFp, {kDlcPrefsSubDir}) && ret_value;
 
       // Update the boot flags so the current slot has higher priority.
-      BootControlInterface* boot_control = system_state_->boot_control();
+      BootControlInterface* boot_control = SystemState::Get()->boot_control();
       if (!boot_control->SetActiveBootSlot(boot_control->GetCurrentSlot()))
         ret_value = false;
 
@@ -1285,7 +1429,7 @@
         ret_value = false;
 
       // Notify the PayloadState that the successful payload was canceled.
-      system_state_->payload_state()->ResetUpdateStatus();
+      SystemState::Get()->payload_state()->ResetUpdateStatus();
 
       // The previous version is used to report back to omaha after reboot that
       // we actually rebooted into the new version from this "prev-version". We
@@ -1307,11 +1451,26 @@
   out_status->last_checked_time = last_checked_time_;
   out_status->status = status_;
   out_status->current_version = omaha_request_params_->app_version();
-  out_status->current_system_version = omaha_request_params_->system_version();
   out_status->progress = download_progress_;
   out_status->new_size_bytes = new_payload_size_;
   out_status->new_version = new_version_;
-  out_status->new_system_version = new_system_version_;
+  out_status->is_enterprise_rollback =
+      install_plan_ && install_plan_->is_rollback;
+  out_status->is_install = is_install_;
+
+  string str_eol_date;
+  if (SystemState::Get()->prefs()->Exists(kPrefsOmahaEolDate) &&
+      !SystemState::Get()->prefs()->GetString(kPrefsOmahaEolDate,
+                                              &str_eol_date))
+    LOG(ERROR) << "Failed to retrieve kPrefsOmahaEolDate pref.";
+  out_status->eol_date = StringToEolDate(str_eol_date);
+
+  // A powerwash will take place either if the install plan says it is required
+  // or if an enterprise rollback is happening.
+  out_status->will_powerwash_after_reboot =
+      install_plan_ &&
+      (install_plan_->powerwash_required || install_plan_->is_rollback);
+
   return true;
 }
 
@@ -1329,13 +1488,13 @@
 uint32_t UpdateAttempter::GetErrorCodeFlags() {
   uint32_t flags = 0;
 
-  if (!system_state_->hardware()->IsNormalBootMode())
+  if (!SystemState::Get()->hardware()->IsNormalBootMode())
     flags |= static_cast<uint32_t>(ErrorCode::kDevModeFlag);
 
   if (install_plan_ && install_plan_->is_resume)
     flags |= static_cast<uint32_t>(ErrorCode::kResumedFlag);
 
-  if (!system_state_->hardware()->IsOfficialBuild())
+  if (!SystemState::Get()->hardware()->IsOfficialBuild())
     flags |= static_cast<uint32_t>(ErrorCode::kTestImageFlag);
 
   if (!omaha_request_params_->IsUpdateUrlOfficial()) {
@@ -1348,7 +1507,7 @@
 bool UpdateAttempter::ShouldCancel(ErrorCode* cancel_reason) {
   // Check if the channel we're attempting to update to is the same as the
   // target channel currently chosen by the user.
-  OmahaRequestParams* params = system_state_->request_params();
+  OmahaRequestParams* params = SystemState::Get()->request_params();
   if (params->download_channel() != params->target_channel()) {
     LOG(ERROR) << "Aborting download as target channel: "
                << params->target_channel()
@@ -1406,22 +1565,22 @@
     return false;
 
   LOG(ERROR) << "Update failed.";
-  system_state_->payload_state()->UpdateFailed(error_event_->error_code);
+  SystemState::Get()->payload_state()->UpdateFailed(error_event_->error_code);
 
   // Send metrics if it was a rollback.
   if (install_plan_ && install_plan_->is_rollback) {
-    system_state_->metrics_reporter()->ReportEnterpriseRollbackMetrics(
+    SystemState::Get()->metrics_reporter()->ReportEnterpriseRollbackMetrics(
         /*success=*/false, install_plan_->version);
   }
 
   // Send it to Omaha.
   LOG(INFO) << "Reporting the error event";
   auto error_event_action = std::make_unique<OmahaRequestAction>(
-      system_state_,
       error_event_.release(),  // Pass ownership.
       std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
-                                           system_state_->hardware()),
-      false);
+                                           SystemState::Get()->hardware()),
+      false,
+      session_id_);
   processor_->EnqueueAction(std::move(error_event_action));
   SetStatusAndNotify(UpdateStatus::REPORTING_ERROR_EVENT);
   processor_->StartProcessing();
@@ -1459,12 +1618,14 @@
 
 void UpdateAttempter::PingOmaha() {
   if (!processor_->IsRunning()) {
+    ResetInteractivityFlags();
+
     auto ping_action = std::make_unique<OmahaRequestAction>(
-        system_state_,
         nullptr,
         std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
-                                             system_state_->hardware()),
-        true);
+                                             SystemState::Get()->hardware()),
+        true,
+        "" /* session_id */);
     processor_->set_delegate(nullptr);
     processor_->EnqueueAction(std::move(ping_action));
     // Call StartProcessing() synchronously here to avoid any race conditions
@@ -1545,9 +1706,9 @@
   // in case we rebooted because of a crash of the old version, so we
   // can do a proper crash report with correct information.
   // This must be done before calling
-  // system_state_->payload_state()->UpdateEngineStarted() since it will
+  // SystemState::Get()->payload_state()->UpdateEngineStarted() since it will
   // delete SystemUpdated marker file.
-  if (system_state_->system_rebooted() &&
+  if (SystemState::Get()->system_rebooted() &&
       prefs_->Exists(kPrefsSystemUpdatedMarker)) {
     if (!prefs_->GetString(kPrefsPreviousVersion, &prev_version_)) {
       // If we fail to get the version string, make sure it stays empty.
@@ -1555,18 +1716,38 @@
     }
   }
 
-  system_state_->payload_state()->UpdateEngineStarted();
+  MoveToPrefs({kPrefsLastRollCallPingDay, kPrefsLastActivePingDay});
+
+  SystemState::Get()->payload_state()->UpdateEngineStarted();
   StartP2PAtStartup();
+
+  excluder_ = CreateExcluder();
+}
+
+void UpdateAttempter::MoveToPrefs(const vector<string>& keys) {
+  auto* powerwash_safe_prefs = SystemState::Get()->powerwash_safe_prefs();
+  for (const auto& key : keys) {
+    // Do not overwrite existing pref key with powerwash prefs.
+    if (!prefs_->Exists(key) && powerwash_safe_prefs->Exists(key)) {
+      string value;
+      if (!powerwash_safe_prefs->GetString(key, &value) ||
+          !prefs_->SetString(key, value)) {
+        PLOG(ERROR) << "Unable to add powerwash safe key " << key
+                    << " to prefs. Powerwash safe key will be deleted.";
+      }
+    }
+    // Delete keys regardless of operation success to preserve privacy.
+    powerwash_safe_prefs->Delete(key);
+  }
 }
 
 bool UpdateAttempter::StartP2PAtStartup() {
-  if (system_state_ == nullptr ||
-      !system_state_->p2p_manager()->IsP2PEnabled()) {
+  if (!SystemState::Get()->p2p_manager()->IsP2PEnabled()) {
     LOG(INFO) << "Not starting p2p at startup since it's not enabled.";
     return false;
   }
 
-  if (system_state_->p2p_manager()->CountSharedFiles() < 1) {
+  if (SystemState::Get()->p2p_manager()->CountSharedFiles() < 1) {
     LOG(INFO) << "Not starting p2p at startup since our application "
               << "is not sharing any files.";
     return false;
@@ -1576,22 +1757,19 @@
 }
 
 bool UpdateAttempter::StartP2PAndPerformHousekeeping() {
-  if (system_state_ == nullptr)
-    return false;
-
-  if (!system_state_->p2p_manager()->IsP2PEnabled()) {
+  if (!SystemState::Get()->p2p_manager()->IsP2PEnabled()) {
     LOG(INFO) << "Not starting p2p since it's not enabled.";
     return false;
   }
 
   LOG(INFO) << "Ensuring that p2p is running.";
-  if (!system_state_->p2p_manager()->EnsureP2PRunning()) {
+  if (!SystemState::Get()->p2p_manager()->EnsureP2PRunning()) {
     LOG(ERROR) << "Error starting p2p.";
     return false;
   }
 
   LOG(INFO) << "Performing p2p housekeeping.";
-  if (!system_state_->p2p_manager()->PerformHousekeeping()) {
+  if (!SystemState::Get()->p2p_manager()->PerformHousekeeping()) {
     LOG(ERROR) << "Error performing housekeeping for p2p.";
     return false;
   }
@@ -1626,7 +1804,7 @@
   return true;
 }
 
-bool UpdateAttempter::IsUpdateRunningOrScheduled() {
+bool UpdateAttempter::IsBusyOrUpdateScheduled() {
   return ((status_ != UpdateStatus::IDLE &&
            status_ != UpdateStatus::UPDATED_NEED_REBOOT) ||
           waiting_for_scheduled_check_);
@@ -1638,12 +1816,12 @@
   //  * The debugd dev features are accessible (i.e. in devmode with no owner).
   // This protects users running a base image, while still allowing a specific
   // window (gated by the debug dev features) where `cros flash` is usable.
-  if (!system_state_->hardware()->IsOfficialBuild()) {
+  if (!SystemState::Get()->hardware()->IsOfficialBuild()) {
     LOG(INFO) << "Non-official build; allowing any update source.";
     return true;
   }
 
-  if (system_state_->hardware()->AreDevFeaturesEnabled()) {
+  if (SystemState::Get()->hardware()->AreDevFeaturesEnabled()) {
     LOG(INFO) << "Developer features enabled; allowing custom update sources.";
     return true;
   }
@@ -1654,20 +1832,22 @@
 }
 
 void UpdateAttempter::ReportTimeToUpdateAppliedMetric() {
-  const policy::DevicePolicy* device_policy = system_state_->device_policy();
+  const policy::DevicePolicy* device_policy =
+      SystemState::Get()->device_policy();
   if (device_policy && device_policy->IsEnterpriseEnrolled()) {
     vector<policy::DevicePolicy::WeeklyTimeInterval> parsed_intervals;
     bool has_time_restrictions =
         device_policy->GetDisallowedTimeIntervals(&parsed_intervals);
 
     int64_t update_first_seen_at_int;
-    if (system_state_->prefs()->Exists(kPrefsUpdateFirstSeenAt)) {
-      if (system_state_->prefs()->GetInt64(kPrefsUpdateFirstSeenAt,
-                                           &update_first_seen_at_int)) {
+    if (SystemState::Get()->prefs()->Exists(kPrefsUpdateFirstSeenAt)) {
+      if (SystemState::Get()->prefs()->GetInt64(kPrefsUpdateFirstSeenAt,
+                                                &update_first_seen_at_int)) {
         TimeDelta update_delay =
-            system_state_->clock()->GetWallclockTime() -
+            SystemState::Get()->clock()->GetWallclockTime() -
             Time::FromInternalValue(update_first_seen_at_int);
-        system_state_->metrics_reporter()
+        SystemState::Get()
+            ->metrics_reporter()
             ->ReportEnterpriseUpdateSeenToDownloadDays(has_time_restrictions,
                                                        update_delay.InDays());
       }
diff --git a/update_attempter.h b/cros/update_attempter.h
similarity index 79%
rename from update_attempter.h
rename to cros/update_attempter.h
index c27f8a4..6010484 100644
--- a/update_attempter.h
+++ b/cros/update_attempter.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_UPDATE_ATTEMPTER_H_
-#define UPDATE_ENGINE_UPDATE_ATTEMPTER_H_
+#ifndef UPDATE_ENGINE_CROS_UPDATE_ATTEMPTER_H_
+#define UPDATE_ENGINE_CROS_UPDATE_ATTEMPTER_H_
 
 #include <time.h>
 
@@ -26,23 +26,25 @@
 #include <vector>
 
 #include <base/bind.h>
+#include <base/guid.h>
 #include <base/time/time.h>
 #include <gtest/gtest_prod.h>  // for FRIEND_TEST
 
-#if USE_CHROME_NETWORK_PROXY
-#include "update_engine/chrome_browser_proxy_resolver.h"
-#endif  // USE_CHROME_NETWORK_PROXY
 #include "update_engine/certificate_checker.h"
 #include "update_engine/client_library/include/update_engine/update_status.h"
 #include "update_engine/common/action_processor.h"
 #include "update_engine/common/cpu_limiter.h"
+#include "update_engine/common/daemon_state_interface.h"
+#include "update_engine/common/download_action.h"
+#include "update_engine/common/excluder_interface.h"
 #include "update_engine/common/proxy_resolver.h"
-#include "update_engine/omaha_request_params.h"
-#include "update_engine/omaha_response_handler_action.h"
-#include "update_engine/payload_consumer/download_action.h"
+#include "update_engine/common/service_observer_interface.h"
+#include "update_engine/common/system_state.h"
+#include "update_engine/cros/chrome_browser_proxy_resolver.h"
+#include "update_engine/cros/omaha_request_builder_xml.h"
+#include "update_engine/cros/omaha_request_params.h"
+#include "update_engine/cros/omaha_response_handler_action.h"
 #include "update_engine/payload_consumer/postinstall_runner_action.h"
-#include "update_engine/service_observer_interface.h"
-#include "update_engine/system_state.h"
 #include "update_engine/update_manager/policy.h"
 #include "update_engine/update_manager/staging_utils.h"
 #include "update_engine/update_manager/update_manager.h"
@@ -56,13 +58,14 @@
 class UpdateAttempter : public ActionProcessorDelegate,
                         public DownloadActionDelegate,
                         public CertificateChecker::Observer,
-                        public PostinstallRunnerAction::DelegateInterface {
+                        public PostinstallRunnerAction::DelegateInterface,
+                        public DaemonStateInterface {
  public:
   using UpdateStatus = update_engine::UpdateStatus;
   using UpdateAttemptFlags = update_engine::UpdateAttemptFlags;
   static const int kMaxDeltaUpdateFailures;
 
-  UpdateAttempter(SystemState* system_state, CertificateChecker* cert_checker);
+  explicit UpdateAttempter(CertificateChecker* cert_checker);
   ~UpdateAttempter() override;
 
   // Further initialization to be done post construction.
@@ -73,19 +76,8 @@
   virtual bool ScheduleUpdates();
 
   // Checks for update and, if a newer version is available, attempts to update
-  // the system. Non-empty |in_app_version| or |in_update_url| prevents
-  // automatic detection of the parameter.  |target_channel| denotes a
-  // policy-mandated channel we are updating to, if not empty. If |obey_proxies|
-  // is true, the update will likely respect Chrome's proxy setting. For
-  // security reasons, we may still not honor them. |interactive| should be true
-  // if this was called from the user (ie dbus).
-  virtual void Update(const std::string& app_version,
-                      const std::string& omaha_url,
-                      const std::string& target_channel,
-                      const std::string& target_version_prefix,
-                      bool rollback_allowed,
-                      bool obey_proxies,
-                      bool interactive);
+  // the system.
+  virtual void Update(const chromeos_update_manager::UpdateCheckParams& params);
 
   // ActionProcessorDelegate methods:
   void ProcessingDone(const ActionProcessor* processor,
@@ -137,7 +129,7 @@
                               UpdateAttemptFlags flags);
 
   // This is the version of CheckForUpdate called by AttemptInstall API.
-  virtual bool CheckForInstall(const std::vector<std::string>& dlc_module_ids,
+  virtual bool CheckForInstall(const std::vector<std::string>& dlc_ids,
                                const std::string& omaha_url);
 
   // This is the internal entry point for going through a rollback. This will
@@ -158,6 +150,9 @@
   // UPDATED_NEED_REBOOT. Returns true on success, false otherwise.
   bool RebootIfNeeded();
 
+  // Sets the DLC as active or inactive. See chromeos/common_service.h
+  virtual bool SetDlcActiveValue(bool is_active, const std::string& dlc_id);
+
   // DownloadActionDelegate methods:
   void BytesReceived(uint64_t bytes_progressed,
                      uint64_t bytes_received,
@@ -177,6 +172,9 @@
   // Called at update_engine startup to do various house-keeping.
   void UpdateEngineStarted();
 
+  // Returns the |Excluder| that is currently held onto.
+  virtual ExcluderInterface* GetExcluder() const { return excluder_.get(); }
+
   // Reloads the device policy from libbrillo. Note: This method doesn't
   // cause a real-time policy fetch from the policy server. It just reloads the
   // latest value that libbrillo has cached. libbrillo fetches the policies
@@ -221,15 +219,15 @@
   // 'cros flash' to function properly).
   bool IsAnyUpdateSourceAllowed() const;
 
-  // Add and remove a service observer.
-  void AddObserver(ServiceObserverInterface* observer) {
+  // |DaemonStateInterface| overrides.
+  bool StartUpdater() override;
+  void AddObserver(ServiceObserverInterface* observer) override {
     service_observers_.insert(observer);
   }
-  void RemoveObserver(ServiceObserverInterface* observer) {
+  void RemoveObserver(ServiceObserverInterface* observer) override {
     service_observers_.erase(observer);
   }
-
-  const std::set<ServiceObserverInterface*>& service_observers() {
+  const std::set<ServiceObserverInterface*>& service_observers() override {
     return service_observers_;
   }
 
@@ -245,38 +243,64 @@
   FRIEND_TEST(UpdateAttempterTest, ActionCompletedOmahaRequestTest);
   FRIEND_TEST(UpdateAttempterTest, BootTimeInUpdateMarkerFile);
   FRIEND_TEST(UpdateAttempterTest, BroadcastCompleteDownloadTest);
+  FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsInstallTest);
+  FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsNoPrefFilesTest);
+  FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsNonParseableValuesTest);
+  FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsValidValuesTest);
+  FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsRemoveStaleMetadata);
   FRIEND_TEST(UpdateAttempterTest, ChangeToDownloadingOnReceivedBytesTest);
+  FRIEND_TEST(UpdateAttempterTest, CheckForInstallNotIdleFails);
   FRIEND_TEST(UpdateAttempterTest, CheckForUpdateAUDlcTest);
   FRIEND_TEST(UpdateAttempterTest, CreatePendingErrorEventTest);
   FRIEND_TEST(UpdateAttempterTest, CreatePendingErrorEventResumedTest);
   FRIEND_TEST(UpdateAttempterTest, DisableDeltaUpdateIfNeededTest);
   FRIEND_TEST(UpdateAttempterTest, DownloadProgressAccumulationTest);
   FRIEND_TEST(UpdateAttempterTest, InstallSetsStatusIdle);
+  FRIEND_TEST(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusTrue);
+  FRIEND_TEST(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusFalse);
+  FRIEND_TEST(UpdateAttempterTest,
+              PowerwashInGetStatusTrueBecausePowerwashRequired);
+  FRIEND_TEST(UpdateAttempterTest, PowerwashInGetStatusTrueBecauseRollback);
   FRIEND_TEST(UpdateAttempterTest, MarkDeltaUpdateFailureTest);
   FRIEND_TEST(UpdateAttempterTest, PingOmahaTest);
+  FRIEND_TEST(UpdateAttempterTest, ProcessingDoneInstallError);
+  FRIEND_TEST(UpdateAttempterTest, ProcessingDoneUpdateError);
   FRIEND_TEST(UpdateAttempterTest, ReportDailyMetrics);
   FRIEND_TEST(UpdateAttempterTest, RollbackNotAllowed);
   FRIEND_TEST(UpdateAttempterTest, RollbackAfterInstall);
   FRIEND_TEST(UpdateAttempterTest, RollbackAllowed);
   FRIEND_TEST(UpdateAttempterTest, RollbackAllowedSetAndReset);
+  FRIEND_TEST(UpdateAttempterTest, ChannelDowngradeNoRollback);
+  FRIEND_TEST(UpdateAttempterTest, ChannelDowngradeRollback);
   FRIEND_TEST(UpdateAttempterTest, RollbackMetricsNotRollbackFailure);
   FRIEND_TEST(UpdateAttempterTest, RollbackMetricsNotRollbackSuccess);
   FRIEND_TEST(UpdateAttempterTest, RollbackMetricsRollbackFailure);
   FRIEND_TEST(UpdateAttempterTest, RollbackMetricsRollbackSuccess);
   FRIEND_TEST(UpdateAttempterTest, ScheduleErrorEventActionNoEventTest);
   FRIEND_TEST(UpdateAttempterTest, ScheduleErrorEventActionTest);
+  FRIEND_TEST(UpdateAttempterTest, SessionIdTestEnforceEmptyStrPingOmaha);
+  FRIEND_TEST(UpdateAttempterTest, SessionIdTestOnOmahaRequestActions);
   FRIEND_TEST(UpdateAttempterTest, SetRollbackHappenedNotRollback);
   FRIEND_TEST(UpdateAttempterTest, SetRollbackHappenedRollback);
+  FRIEND_TEST(UpdateAttempterTest, TargetChannelHintSetAndReset);
   FRIEND_TEST(UpdateAttempterTest, TargetVersionPrefixSetAndReset);
   FRIEND_TEST(UpdateAttempterTest, UpdateAfterInstall);
   FRIEND_TEST(UpdateAttempterTest, UpdateAttemptFlagsCachedAtUpdateStart);
   FRIEND_TEST(UpdateAttempterTest, UpdateDeferredByPolicyTest);
   FRIEND_TEST(UpdateAttempterTest, UpdateIsNotRunningWhenUpdateAvailable);
+  FRIEND_TEST(UpdateAttempterTest, GetSuccessfulDlcIds);
+  FRIEND_TEST(UpdateAttempterTest, QuickFixTokenWhenDeviceIsEnterpriseEnrolled);
+  FRIEND_TEST(UpdateAttempterTest, MoveToPrefs);
 
   // Returns the special flags to be added to ErrorCode values based on the
   // parameters used in the current update attempt.
   uint32_t GetErrorCodeFlags();
 
+  // ActionProcessorDelegate methods |ProcessingDone()| internal helpers.
+  void ProcessingDoneInternal(const ActionProcessor* processor, ErrorCode code);
+  void ProcessingDoneUpdate(const ActionProcessor* processor, ErrorCode code);
+  void ProcessingDoneInstall(const ActionProcessor* processor, ErrorCode code);
+
   // CertificateChecker::Observer method.
   // Report metrics about the certificate being checked.
   void CertificateChecked(ServerToCheck server_to_check,
@@ -317,10 +341,8 @@
   void MarkDeltaUpdateFailure();
 
   ProxyResolver* GetProxyResolver() {
-#if USE_CHROME_NETWORK_PROXY
     if (obeying_proxies_)
       return &chrome_proxy_resolver_;
-#endif  // USE_CHROME_NETWORK_PROXY
     return &direct_proxy_resolver_;
   }
 
@@ -334,18 +356,13 @@
   // Helper method of Update() to calculate the update-related parameters
   // from various sources and set the appropriate state. Please refer to
   // Update() method for the meaning of the parameters.
-  bool CalculateUpdateParams(const std::string& app_version,
-                             const std::string& omaha_url,
-                             const std::string& target_channel,
-                             const std::string& target_version_prefix,
-                             bool rollback_allowed,
-                             bool obey_proxies,
-                             bool interactive);
+  bool CalculateUpdateParams(
+      const chromeos_update_manager::UpdateCheckParams& params);
 
   // Calculates all the scattering related parameters (such as waiting period,
   // which type of scattering is enabled, etc.) and also updates/deletes
   // the corresponding prefs file used in scattering. Should be called
-  // only after the device policy has been loaded and set in the system_state_.
+  // only after the device policy has been loaded and set in the system state.
   void CalculateScatteringParams(bool interactive);
 
   // Sets a random value for the waiting period to wait for before downloading
@@ -371,6 +388,10 @@
   // on the |omaha_request_params_| object.
   void CalculateP2PParams(bool interactive);
 
+  // For each key, reads value from powerwash safe prefs and adds it to prefs
+  // if key doesnt already exist. Then deletes the powerwash safe keys.
+  void MoveToPrefs(const std::vector<std::string>& keys);
+
   // Starts P2P if it's enabled and there are files to actually share.
   // Called only at program startup. Returns true only if p2p was
   // started and housekeeping was performed.
@@ -398,8 +419,8 @@
   // policy is available again.
   void UpdateRollbackHappened();
 
-  // Returns whether an update is currently running or scheduled.
-  bool IsUpdateRunningOrScheduled();
+  // Returns if an update is: running, applied and needs reboot, or scheduled.
+  bool IsBusyOrUpdateScheduled();
 
   void CalculateStagingParams(bool interactive);
 
@@ -408,6 +429,32 @@
   // will only be reported for enterprise enrolled devices.
   void ReportTimeToUpdateAppliedMetric();
 
+  // Resets interactivity and forced update flags.
+  void ResetInteractivityFlags();
+
+  // Resets all the DLC prefs.
+  bool ResetDlcPrefs(const std::string& dlc_id);
+
+  // Sets given pref key for DLC and platform.
+  void SetPref(const std::string& pref_key,
+               const std::string& pref_value,
+               const std::string& payload_id);
+
+  // Get the integer values from the DLC metadata for |kPrefsPingLastActive|
+  // or |kPrefsPingLastRollcall|.
+  // The value is equal to -2 when the value cannot be read or is not numeric.
+  // The value is equal to -1 the first time it is being sent, which is
+  // when the metadata file doesn't exist.
+  int64_t GetPingMetadata(const std::string& metadata_key) const;
+
+  // Calculates the update parameters for DLCs. Sets the |dlc_ids_|
+  // parameter on the |omaha_request_params_| object.
+  void CalculateDlcParams();
+
+  // Returns the list of DLC IDs that were installed/updated, excluding the ones
+  // which had "noupdate" in the Omaha response.
+  std::vector<std::string> GetSuccessfulDlcIds();
+
   // Last status notification timestamp used for throttling. Use monotonic
   // TimeTicks to ensure that notifications are sent even if the system clock is
   // set back in the middle of an update.
@@ -415,15 +462,11 @@
 
   // Our two proxy resolvers
   DirectProxyResolver direct_proxy_resolver_;
-#if USE_CHROME_NETWORK_PROXY
   ChromeBrowserProxyResolver chrome_proxy_resolver_;
-#endif  // USE_CHROME_NETWORK_PROXY
 
   std::unique_ptr<ActionProcessor> processor_;
 
-  // External state of the system outside the update_engine process
-  // carved out separately to mock out easily in unit tests.
-  SystemState* system_state_;
+  ActionProcessor aux_processor_;
 
   // Pointer to the certificate checker instance to use.
   CertificateChecker* cert_checker_;
@@ -435,7 +478,7 @@
   std::unique_ptr<InstallPlan> install_plan_;
 
   // Pointer to the preferences store interface. This is just a cached
-  // copy of system_state->prefs() because it's used in many methods and
+  // copy of SystemState::Get()->prefs() because it's used in many methods and
   // is convenient this way.
   PrefsInterface* prefs_ = nullptr;
 
@@ -460,7 +503,6 @@
   int64_t last_checked_time_ = 0;
   std::string prev_version_;
   std::string new_version_ = "0.0.0.0";
-  std::string new_system_version_;
   uint64_t new_payload_size_ = 0;
   // Flags influencing all periodic update checks
   UpdateAttemptFlags update_attempt_flags_ = UpdateAttemptFlags::kNone;
@@ -509,7 +551,7 @@
   std::string forced_omaha_url_;
 
   // A list of DLC module IDs.
-  std::vector<std::string> dlc_module_ids_;
+  std::vector<std::string> dlc_ids_;
   // Whether the operation is install (write to the current slot not the
   // inactive slot).
   bool is_install_;
@@ -518,6 +560,12 @@
   base::TimeDelta staging_wait_time_;
   chromeos_update_manager::StagingSchedule staging_schedule_;
 
+  // This is the session ID used to track update flow to Omaha.
+  std::string session_id_;
+
+  // Interface for excluder.
+  std::unique_ptr<ExcluderInterface> excluder_;
+
   DISALLOW_COPY_AND_ASSIGN(UpdateAttempter);
 };
 
@@ -530,4 +578,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_UPDATE_ATTEMPTER_H_
+#endif  // UPDATE_ENGINE_CROS_UPDATE_ATTEMPTER_H_
diff --git a/cros/update_attempter_unittest.cc b/cros/update_attempter_unittest.cc
new file mode 100644
index 0000000..a7f5236
--- /dev/null
+++ b/cros/update_attempter_unittest.cc
@@ -0,0 +1,2479 @@
+//
+// Copyright (C) 2012 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/cros/update_attempter.h"
+
+#include <stdint.h>
+
+#include <limits>
+#include <map>
+#include <memory>
+#include <string>
+#include <unordered_set>
+
+#include <base/files/file_util.h>
+#include <base/files/scoped_temp_dir.h>
+#include <base/task/single_thread_task_executor.h>
+#include <brillo/message_loops/base_message_loop.h>
+#include <brillo/message_loops/message_loop.h>
+#include <brillo/message_loops/message_loop_utils.h>
+#include <gtest/gtest.h>
+#include <policy/libpolicy.h>
+#include <policy/mock_device_policy.h>
+#include <policy/mock_libpolicy.h>
+
+#include "update_engine/common/constants.h"
+#include "update_engine/common/dlcservice_interface.h"
+#include "update_engine/common/mock_action.h"
+#include "update_engine/common/mock_action_processor.h"
+#include "update_engine/common/mock_http_fetcher.h"
+#include "update_engine/common/mock_service_observer.h"
+#include "update_engine/common/platform_constants.h"
+#include "update_engine/common/prefs.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/cros/download_action_chromeos.h"
+#include "update_engine/cros/fake_system_state.h"
+#include "update_engine/cros/mock_p2p_manager.h"
+#include "update_engine/cros/mock_payload_state.h"
+#include "update_engine/cros/omaha_utils.h"
+#include "update_engine/libcurl_http_fetcher.h"
+#include "update_engine/payload_consumer/filesystem_verifier_action.h"
+#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_consumer/postinstall_runner_action.h"
+#include "update_engine/update_boot_flags_action.h"
+#include "update_engine/update_manager/mock_update_manager.h"
+
+using base::Time;
+using base::TimeDelta;
+using chromeos_update_manager::EvalStatus;
+using chromeos_update_manager::MockUpdateManager;
+using chromeos_update_manager::StagingSchedule;
+using chromeos_update_manager::UpdateCheckParams;
+using policy::DevicePolicy;
+using std::map;
+using std::string;
+using std::unique_ptr;
+using std::unordered_set;
+using std::vector;
+using testing::_;
+using testing::Contains;
+using testing::DoAll;
+using testing::ElementsAre;
+using testing::Field;
+using testing::InSequence;
+using testing::Invoke;
+using testing::Ne;
+using testing::NiceMock;
+using testing::Pointee;
+using testing::Property;
+using testing::Return;
+using testing::ReturnPointee;
+using testing::ReturnRef;
+using testing::SaveArg;
+using testing::SetArgPointee;
+using update_engine::UpdateAttemptFlags;
+using update_engine::UpdateEngineStatus;
+using update_engine::UpdateStatus;
+
+namespace chromeos_update_engine {
+
+namespace {
+
+const UpdateStatus kNonIdleUpdateStatuses[] = {
+    UpdateStatus::CHECKING_FOR_UPDATE,
+    UpdateStatus::UPDATE_AVAILABLE,
+    UpdateStatus::DOWNLOADING,
+    UpdateStatus::VERIFYING,
+    UpdateStatus::FINALIZING,
+    UpdateStatus::UPDATED_NEED_REBOOT,
+    UpdateStatus::REPORTING_ERROR_EVENT,
+    UpdateStatus::ATTEMPTING_ROLLBACK,
+    UpdateStatus::DISABLED,
+    UpdateStatus::NEED_PERMISSION_TO_UPDATE,
+};
+
+struct CheckForUpdateTestParams {
+  // Setups + Inputs:
+  UpdateStatus status = UpdateStatus::IDLE;
+  string app_version = "fake_app_version";
+  string omaha_url = "fake_omaha_url";
+  UpdateAttemptFlags flags = UpdateAttemptFlags::kNone;
+  bool is_official_build = true;
+  bool are_dev_features_enabled = false;
+
+  // Expects:
+  string expected_forced_app_version = "";
+  string expected_forced_omaha_url = "";
+  bool should_schedule_updates_be_called = true;
+  bool expected_result = true;
+};
+
+struct OnUpdateScheduledTestParams {
+  // Setups + Inputs:
+  UpdateCheckParams params = {};
+  EvalStatus status = EvalStatus::kFailed;
+  // Expects:
+  UpdateStatus exit_status = UpdateStatus::IDLE;
+  bool should_schedule_updates_be_called = false;
+  bool should_update_be_called = false;
+};
+
+struct ProcessingDoneTestParams {
+  // Setups + Inputs:
+  bool is_install = false;
+  UpdateStatus status = UpdateStatus::CHECKING_FOR_UPDATE;
+  ActionProcessor* processor = nullptr;
+  ErrorCode code = ErrorCode::kSuccess;
+  map<string, OmahaRequestParams::AppParams> dlc_apps_params;
+
+  // Expects:
+  const bool kExpectedIsInstall = false;
+  bool should_schedule_updates_be_called = true;
+  UpdateStatus expected_exit_status = UpdateStatus::IDLE;
+  bool should_install_completed_be_called = false;
+  bool should_update_completed_be_called = false;
+  vector<string> args_to_install_completed;
+  vector<string> args_to_update_completed;
+};
+
+class MockDlcService : public DlcServiceInterface {
+ public:
+  MOCK_METHOD1(GetDlcsToUpdate, bool(vector<string>*));
+  MOCK_METHOD1(InstallCompleted, bool(const vector<string>&));
+  MOCK_METHOD1(UpdateCompleted, bool(const vector<string>&));
+};
+
+}  // namespace
+
+const char kRollbackVersion[] = "10575.39.2";
+
+// Test a subclass rather than the main class directly so that we can mock out
+// methods within the class. There're explicit unit tests for the mocked out
+// methods.
+class UpdateAttempterUnderTest : public UpdateAttempter {
+ public:
+  UpdateAttempterUnderTest() : UpdateAttempter(nullptr) {}
+
+  void Update(const UpdateCheckParams& params) override {
+    update_called_ = true;
+    if (do_update_) {
+      UpdateAttempter::Update(params);
+      return;
+    }
+    LOG(INFO) << "[TEST] Update() disabled.";
+    status_ = UpdateStatus::CHECKING_FOR_UPDATE;
+  }
+
+  void DisableUpdate() { do_update_ = false; }
+
+  bool WasUpdateCalled() const { return update_called_; }
+
+  // Wrap the update scheduling method, allowing us to opt out of scheduled
+  // updates for testing purposes.
+  bool ScheduleUpdates() override {
+    schedule_updates_called_ = true;
+    if (do_schedule_updates_)
+      return UpdateAttempter::ScheduleUpdates();
+    LOG(INFO) << "[TEST] Update scheduling disabled.";
+    waiting_for_scheduled_check_ = true;
+    return true;
+  }
+
+  void DisableScheduleUpdates() { do_schedule_updates_ = false; }
+
+  // Indicates whether |ScheduleUpdates()| was called.
+  bool WasScheduleUpdatesCalled() const { return schedule_updates_called_; }
+
+  // Need to expose following private members of |UpdateAttempter| for tests.
+  const string& forced_app_version() const { return forced_app_version_; }
+  const string& forced_omaha_url() const { return forced_omaha_url_; }
+
+  // Need to expose |waiting_for_scheduled_check_| for testing.
+  void SetWaitingForScheduledCheck(bool waiting) {
+    waiting_for_scheduled_check_ = waiting;
+  }
+
+ private:
+  // Used for overrides of |Update()|.
+  bool update_called_ = false;
+  bool do_update_ = true;
+
+  // Used for overrides of |ScheduleUpdates()|.
+  bool schedule_updates_called_ = false;
+  bool do_schedule_updates_ = true;
+};
+
+class UpdateAttempterTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    // Override system state members.
+    FakeSystemState::CreateInstance();
+    FakeSystemState::Get()->set_connection_manager(&mock_connection_manager);
+    FakeSystemState::Get()->set_update_attempter(&attempter_);
+    FakeSystemState::Get()->set_dlcservice(&mock_dlcservice_);
+    FakeSystemState::Get()->set_update_manager(&mock_update_manager_);
+    loop_.SetAsCurrent();
+
+    prefs_ = FakeSystemState::Get()->fake_prefs();
+    certificate_checker_.reset(
+        new CertificateChecker(prefs_, &openssl_wrapper_));
+    certificate_checker_->Init();
+
+    attempter_.set_forced_update_pending_callback(
+        new base::Callback<void(bool, bool)>(base::Bind([](bool, bool) {})));
+    // Finish initializing the attempter.
+    attempter_.Init();
+
+    EXPECT_EQ(0, attempter_.http_response_code_);
+    EXPECT_EQ(UpdateStatus::IDLE, attempter_.status_);
+    EXPECT_EQ(0.0, attempter_.download_progress_);
+    EXPECT_EQ(0, attempter_.last_checked_time_);
+    EXPECT_EQ("0.0.0.0", attempter_.new_version_);
+    EXPECT_EQ(0ULL, attempter_.new_payload_size_);
+    processor_ = new NiceMock<MockActionProcessor>();
+    attempter_.processor_.reset(processor_);  // Transfers ownership.
+
+    // Setup store/load semantics of P2P properties via the mock |PayloadState|.
+    actual_using_p2p_for_downloading_ = false;
+    EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(),
+                SetUsingP2PForDownloading(_))
+        .WillRepeatedly(SaveArg<0>(&actual_using_p2p_for_downloading_));
+    EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(),
+                GetUsingP2PForDownloading())
+        .WillRepeatedly(ReturnPointee(&actual_using_p2p_for_downloading_));
+    actual_using_p2p_for_sharing_ = false;
+    EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(),
+                SetUsingP2PForSharing(_))
+        .WillRepeatedly(SaveArg<0>(&actual_using_p2p_for_sharing_));
+    EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(),
+                GetUsingP2PForDownloading())
+        .WillRepeatedly(ReturnPointee(&actual_using_p2p_for_sharing_));
+  }
+
+ public:
+  void ScheduleQuitMainLoop();
+
+  // Callbacks to run the different tests from the main loop.
+  void UpdateTestStart();
+  void UpdateTestVerify();
+  void RollbackTestStart(bool enterprise_rollback, bool valid_slot);
+  void RollbackTestVerify();
+  void PingOmahaTestStart();
+  void ReadScatterFactorFromPolicyTestStart();
+  void DecrementUpdateCheckCountTestStart();
+  void NoScatteringDoneDuringManualUpdateTestStart();
+  void P2PNotEnabledStart();
+  void P2PEnabledStart();
+  void P2PEnabledInteractiveStart();
+  void P2PEnabledStartingFailsStart();
+  void P2PEnabledHousekeepingFailsStart();
+  void SessionIdTestChange();
+  void SessionIdTestEnforceEmptyStrPingOmaha();
+  void SessionIdTestConsistencyInUpdateFlow();
+  void SessionIdTestInDownloadAction();
+  void ResetRollbackHappenedStart(bool is_consumer,
+                                  bool is_policy_available,
+                                  bool expected_reset);
+  // Staging related callbacks.
+  void SetUpStagingTest(const StagingSchedule& schedule);
+  void CheckStagingOff();
+  void StagingSetsPrefsAndTurnsOffScatteringStart();
+  void StagingOffIfInteractiveStart();
+  void StagingOffIfOobeStart();
+
+  bool actual_using_p2p_for_downloading() {
+    return actual_using_p2p_for_downloading_;
+  }
+  bool actual_using_p2p_for_sharing() { return actual_using_p2p_for_sharing_; }
+
+  // |CheckForUpdate()| related member functions.
+  void TestCheckForUpdate();
+
+  // |OnUpdateScheduled()| related member functions.
+  void TestOnUpdateScheduled();
+
+  // |ProcessingDone()| related member functions.
+  void TestProcessingDone();
+
+  base::SingleThreadTaskExecutor base_loop_{base::MessagePumpType::IO};
+  brillo::BaseMessageLoop loop_{base_loop_.task_runner()};
+
+  UpdateAttempterUnderTest attempter_;
+  OpenSSLWrapper openssl_wrapper_;
+  std::unique_ptr<CertificateChecker> certificate_checker_;
+  MockDlcService mock_dlcservice_;
+  MockUpdateManager mock_update_manager_;
+
+  NiceMock<MockActionProcessor>* processor_;
+  NiceMock<MockConnectionManager> mock_connection_manager;
+
+  FakePrefs* prefs_;
+
+  // |CheckForUpdate()| test params.
+  CheckForUpdateTestParams cfu_params_;
+
+  // |OnUpdateScheduled()| test params.
+  OnUpdateScheduledTestParams ous_params_;
+
+  // |ProcessingDone()| test params.
+  ProcessingDoneTestParams pd_params_;
+
+  bool actual_using_p2p_for_downloading_;
+  bool actual_using_p2p_for_sharing_;
+};
+
+void UpdateAttempterTest::TestCheckForUpdate() {
+  // Setup
+  attempter_.status_ = cfu_params_.status;
+  FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(
+      cfu_params_.is_official_build);
+  FakeSystemState::Get()->fake_hardware()->SetAreDevFeaturesEnabled(
+      cfu_params_.are_dev_features_enabled);
+
+  // Invocation
+  EXPECT_EQ(
+      cfu_params_.expected_result,
+      attempter_.CheckForUpdate(
+          cfu_params_.app_version, cfu_params_.omaha_url, cfu_params_.flags));
+
+  // Verify
+  EXPECT_EQ(cfu_params_.expected_forced_app_version,
+            attempter_.forced_app_version());
+  EXPECT_EQ(cfu_params_.expected_forced_omaha_url,
+            attempter_.forced_omaha_url());
+  EXPECT_EQ(cfu_params_.should_schedule_updates_be_called,
+            attempter_.WasScheduleUpdatesCalled());
+}
+
+void UpdateAttempterTest::TestProcessingDone() {
+  // Setup
+  attempter_.DisableScheduleUpdates();
+  attempter_.is_install_ = pd_params_.is_install;
+  attempter_.status_ = pd_params_.status;
+  attempter_.omaha_request_params_->set_dlc_apps_params(
+      pd_params_.dlc_apps_params);
+
+  // Expects
+  if (pd_params_.should_install_completed_be_called)
+    EXPECT_CALL(mock_dlcservice_,
+                InstallCompleted(pd_params_.args_to_install_completed))
+        .WillOnce(Return(true));
+  else
+    EXPECT_CALL(mock_dlcservice_, InstallCompleted(_)).Times(0);
+  if (pd_params_.should_update_completed_be_called)
+    EXPECT_CALL(mock_dlcservice_,
+                UpdateCompleted(pd_params_.args_to_update_completed))
+        .WillOnce(Return(true));
+  else
+    EXPECT_CALL(mock_dlcservice_, UpdateCompleted(_)).Times(0);
+
+  // Invocation
+  attempter_.ProcessingDone(pd_params_.processor, pd_params_.code);
+
+  // Verify
+  EXPECT_EQ(pd_params_.kExpectedIsInstall, attempter_.is_install_);
+  EXPECT_EQ(pd_params_.should_schedule_updates_be_called,
+            attempter_.WasScheduleUpdatesCalled());
+  EXPECT_EQ(pd_params_.expected_exit_status, attempter_.status_);
+}
+
+void UpdateAttempterTest::ScheduleQuitMainLoop() {
+  loop_.PostTask(
+      FROM_HERE,
+      base::Bind([](brillo::BaseMessageLoop* loop) { loop->BreakLoop(); },
+                 base::Unretained(&loop_)));
+}
+
+void UpdateAttempterTest::SessionIdTestChange() {
+  EXPECT_NE(UpdateStatus::UPDATED_NEED_REBOOT, attempter_.status());
+  const auto old_session_id = attempter_.session_id_;
+  attempter_.Update({});
+  EXPECT_NE(old_session_id, attempter_.session_id_);
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, SessionIdTestChange) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::SessionIdTestChange,
+                            base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::SessionIdTestEnforceEmptyStrPingOmaha() {
+  // The |session_id_| should not be changed and should remain as an empty
+  // string when |status_| is |UPDATED_NEED_REBOOT| (only for consistency)
+  // and |PingOmaha()| is called.
+  attempter_.DisableScheduleUpdates();
+  attempter_.status_ = UpdateStatus::UPDATED_NEED_REBOOT;
+  const auto old_session_id = attempter_.session_id_;
+  auto CheckIfEmptySessionId = [](AbstractAction* aa) {
+    if (aa->Type() == OmahaRequestAction::StaticType()) {
+      EXPECT_TRUE(static_cast<OmahaRequestAction*>(aa)->session_id_.empty());
+    }
+  };
+  EXPECT_CALL(*processor_, EnqueueAction(Pointee(_)))
+      .WillRepeatedly(Invoke(CheckIfEmptySessionId));
+  EXPECT_CALL(*processor_, StartProcessing());
+  attempter_.PingOmaha();
+  EXPECT_EQ(old_session_id, attempter_.session_id_);
+  EXPECT_EQ(UpdateStatus::UPDATED_NEED_REBOOT, attempter_.status_);
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, SessionIdTestEnforceEmptyStrPingOmaha) {
+  loop_.PostTask(
+      FROM_HERE,
+      base::Bind(&UpdateAttempterTest::SessionIdTestEnforceEmptyStrPingOmaha,
+                 base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::SessionIdTestConsistencyInUpdateFlow() {
+  // All session IDs passed into |OmahaRequestActions| should be enforced to
+  // have the same value in |BuildUpdateActions()|.
+  unordered_set<string> session_ids;
+  // Gather all the session IDs being passed to |OmahaRequestActions|.
+  auto CheckSessionId = [&session_ids](AbstractAction* aa) {
+    if (aa->Type() == OmahaRequestAction::StaticType())
+      session_ids.insert(static_cast<OmahaRequestAction*>(aa)->session_id_);
+  };
+  EXPECT_CALL(*processor_, EnqueueAction(Pointee(_)))
+      .WillRepeatedly(Invoke(CheckSessionId));
+  attempter_.BuildUpdateActions(false);
+  // Validate that all the session IDs are the same.
+  EXPECT_EQ(1, session_ids.size());
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, SessionIdTestConsistencyInUpdateFlow) {
+  loop_.PostTask(
+      FROM_HERE,
+      base::Bind(&UpdateAttempterTest::SessionIdTestConsistencyInUpdateFlow,
+                 base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::SessionIdTestInDownloadAction() {
+  // The session ID passed into |DownloadAction|'s |LibcurlHttpFetcher| should
+  // be enforced to be included in the HTTP header as X-Goog-Update-SessionId.
+  string header_value;
+  auto CheckSessionIdInDownloadAction = [&header_value](AbstractAction* aa) {
+    if (aa->Type() == DownloadActionChromeos::StaticType()) {
+      DownloadActionChromeos* da = static_cast<DownloadActionChromeos*>(aa);
+      EXPECT_TRUE(da->http_fetcher()->GetHeader(kXGoogleUpdateSessionId,
+                                                &header_value));
+    }
+  };
+  EXPECT_CALL(*processor_, EnqueueAction(Pointee(_)))
+      .WillRepeatedly(Invoke(CheckSessionIdInDownloadAction));
+  attempter_.BuildUpdateActions(false);
+  // Validate that X-Goog-Update_SessionId is set correctly in HTTP Header.
+  EXPECT_EQ(attempter_.session_id_, header_value);
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, SessionIdTestInDownloadAction) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::SessionIdTestInDownloadAction,
+                            base::Unretained(this)));
+  loop_.Run();
+}
+
+TEST_F(UpdateAttempterTest, ActionCompletedDownloadTest) {
+  unique_ptr<MockHttpFetcher> fetcher(new MockHttpFetcher("", 0, nullptr));
+  fetcher->FailTransfer(503);  // Sets the HTTP response code.
+  DownloadActionChromeos action(
+      prefs_, nullptr, nullptr, fetcher.release(), false /* interactive */);
+  attempter_.ActionCompleted(nullptr, &action, ErrorCode::kSuccess);
+  EXPECT_FALSE(prefs_->Exists(kPrefsDeltaUpdateFailures));
+  EXPECT_EQ(UpdateStatus::FINALIZING, attempter_.status());
+  EXPECT_EQ(0.0, attempter_.download_progress_);
+  ASSERT_EQ(nullptr, attempter_.error_event_.get());
+}
+
+TEST_F(UpdateAttempterTest, ActionCompletedErrorTest) {
+  MockAction action;
+  EXPECT_CALL(action, Type()).WillRepeatedly(Return("MockAction"));
+  attempter_.status_ = UpdateStatus::DOWNLOADING;
+  attempter_.ActionCompleted(nullptr, &action, ErrorCode::kError);
+  ASSERT_NE(nullptr, attempter_.error_event_.get());
+}
+
+TEST_F(UpdateAttempterTest, DownloadProgressAccumulationTest) {
+  // Simple test case, where all the values match (nothing was skipped)
+  uint64_t bytes_progressed_1 = 1024 * 1024;  // 1MB
+  uint64_t bytes_progressed_2 = 1024 * 1024;  // 1MB
+  uint64_t bytes_received_1 = bytes_progressed_1;
+  uint64_t bytes_received_2 = bytes_received_1 + bytes_progressed_2;
+  uint64_t bytes_total = 20 * 1024 * 1024;  // 20MB
+
+  double progress_1 =
+      static_cast<double>(bytes_received_1) / static_cast<double>(bytes_total);
+  double progress_2 =
+      static_cast<double>(bytes_received_2) / static_cast<double>(bytes_total);
+
+  EXPECT_EQ(0.0, attempter_.download_progress_);
+  // This is set via inspecting the InstallPlan payloads when the
+  // |OmahaResponseAction| is completed.
+  attempter_.new_payload_size_ = bytes_total;
+  NiceMock<MockServiceObserver> observer;
+  EXPECT_CALL(observer,
+              SendStatusUpdate(AllOf(
+                  Field(&UpdateEngineStatus::progress, progress_1),
+                  Field(&UpdateEngineStatus::status, UpdateStatus::DOWNLOADING),
+                  Field(&UpdateEngineStatus::new_size_bytes, bytes_total))));
+  EXPECT_CALL(observer,
+              SendStatusUpdate(AllOf(
+                  Field(&UpdateEngineStatus::progress, progress_2),
+                  Field(&UpdateEngineStatus::status, UpdateStatus::DOWNLOADING),
+                  Field(&UpdateEngineStatus::new_size_bytes, bytes_total))));
+  attempter_.AddObserver(&observer);
+  attempter_.BytesReceived(bytes_progressed_1, bytes_received_1, bytes_total);
+  EXPECT_EQ(progress_1, attempter_.download_progress_);
+  // This iteration validates that a later set of updates to the variables are
+  // properly handled (so that |getStatus()| will return the same progress info
+  // as the callback is receiving.
+  attempter_.BytesReceived(bytes_progressed_2, bytes_received_2, bytes_total);
+  EXPECT_EQ(progress_2, attempter_.download_progress_);
+}
+
+TEST_F(UpdateAttempterTest, ChangeToDownloadingOnReceivedBytesTest) {
+  // The transition into |UpdateStatus::DOWNLOADING| happens when the
+  // first bytes are received.
+  uint64_t bytes_progressed = 1024 * 1024;    // 1MB
+  uint64_t bytes_received = 2 * 1024 * 1024;  // 2MB
+  uint64_t bytes_total = 20 * 1024 * 1024;    // 300MB
+  attempter_.status_ = UpdateStatus::CHECKING_FOR_UPDATE;
+  // This is set via inspecting the InstallPlan payloads when the
+  // |OmahaResponseAction| is completed.
+  attempter_.new_payload_size_ = bytes_total;
+  EXPECT_EQ(0.0, attempter_.download_progress_);
+  NiceMock<MockServiceObserver> observer;
+  EXPECT_CALL(observer,
+              SendStatusUpdate(AllOf(
+                  Field(&UpdateEngineStatus::status, UpdateStatus::DOWNLOADING),
+                  Field(&UpdateEngineStatus::new_size_bytes, bytes_total))));
+  attempter_.AddObserver(&observer);
+  attempter_.BytesReceived(bytes_progressed, bytes_received, bytes_total);
+  EXPECT_EQ(UpdateStatus::DOWNLOADING, attempter_.status_);
+}
+
+TEST_F(UpdateAttempterTest, BroadcastCompleteDownloadTest) {
+  // There is a special case to ensure that at 100% downloaded,
+  // |download_progress_| is updated and broadcastest.
+  uint64_t bytes_progressed = 0;              // ignored
+  uint64_t bytes_received = 5 * 1024 * 1024;  // ignored
+  uint64_t bytes_total = 5 * 1024 * 1024;     // 300MB
+  attempter_.status_ = UpdateStatus::DOWNLOADING;
+  attempter_.new_payload_size_ = bytes_total;
+  EXPECT_EQ(0.0, attempter_.download_progress_);
+  NiceMock<MockServiceObserver> observer;
+  EXPECT_CALL(observer,
+              SendStatusUpdate(AllOf(
+                  Field(&UpdateEngineStatus::progress, 1.0),
+                  Field(&UpdateEngineStatus::status, UpdateStatus::DOWNLOADING),
+                  Field(&UpdateEngineStatus::new_size_bytes, bytes_total))));
+  attempter_.AddObserver(&observer);
+  attempter_.BytesReceived(bytes_progressed, bytes_received, bytes_total);
+  EXPECT_EQ(1.0, attempter_.download_progress_);
+}
+
+TEST_F(UpdateAttempterTest, ActionCompletedOmahaRequestTest) {
+  unique_ptr<MockHttpFetcher> fetcher(new MockHttpFetcher("", 0, nullptr));
+  fetcher->FailTransfer(500);  // Sets the HTTP response code.
+  OmahaRequestAction action(nullptr, std::move(fetcher), false, "");
+  ObjectCollectorAction<OmahaResponse> collector_action;
+  BondActions(&action, &collector_action);
+  OmahaResponse response;
+  response.poll_interval = 234;
+  action.SetOutputObject(response);
+  attempter_.ActionCompleted(nullptr, &action, ErrorCode::kSuccess);
+  EXPECT_FALSE(prefs_->Exists(kPrefsDeltaUpdateFailures));
+  EXPECT_EQ(500, attempter_.http_response_code());
+  EXPECT_EQ(UpdateStatus::IDLE, attempter_.status());
+  EXPECT_EQ(234U, attempter_.server_dictated_poll_interval_);
+  ASSERT_TRUE(attempter_.error_event_.get() == nullptr);
+}
+
+TEST_F(UpdateAttempterTest, ConstructWithUpdatedMarkerTest) {
+  string boot_id;
+  EXPECT_TRUE(utils::GetBootId(&boot_id));
+  FakeSystemState::Get()->fake_prefs()->SetString(kPrefsUpdateCompletedOnBootId,
+                                                  boot_id);
+  attempter_.Init();
+  EXPECT_EQ(UpdateStatus::UPDATED_NEED_REBOOT, attempter_.status());
+}
+
+TEST_F(UpdateAttempterTest, GetErrorCodeForActionTest) {
+  EXPECT_EQ(ErrorCode::kSuccess,
+            GetErrorCodeForAction(nullptr, ErrorCode::kSuccess));
+
+  OmahaRequestAction omaha_request_action(nullptr, nullptr, false, "");
+  EXPECT_EQ(ErrorCode::kOmahaRequestError,
+            GetErrorCodeForAction(&omaha_request_action, ErrorCode::kError));
+  OmahaResponseHandlerAction omaha_response_handler_action;
+  EXPECT_EQ(
+      ErrorCode::kOmahaResponseHandlerError,
+      GetErrorCodeForAction(&omaha_response_handler_action, ErrorCode::kError));
+  DynamicPartitionControlStub dynamic_control_stub;
+  FilesystemVerifierAction filesystem_verifier_action(&dynamic_control_stub);
+  EXPECT_EQ(
+      ErrorCode::kFilesystemVerifierError,
+      GetErrorCodeForAction(&filesystem_verifier_action, ErrorCode::kError));
+  PostinstallRunnerAction postinstall_runner_action(
+      FakeSystemState::Get()->fake_boot_control(),
+      FakeSystemState::Get()->fake_hardware());
+  EXPECT_EQ(
+      ErrorCode::kPostinstallRunnerError,
+      GetErrorCodeForAction(&postinstall_runner_action, ErrorCode::kError));
+  MockAction action_mock;
+  EXPECT_CALL(action_mock, Type()).WillOnce(Return("MockAction"));
+  EXPECT_EQ(ErrorCode::kError,
+            GetErrorCodeForAction(&action_mock, ErrorCode::kError));
+}
+
+TEST_F(UpdateAttempterTest, DisableDeltaUpdateIfNeededTest) {
+  attempter_.omaha_request_params_->set_delta_okay(true);
+  attempter_.DisableDeltaUpdateIfNeeded();
+  EXPECT_TRUE(attempter_.omaha_request_params_->delta_okay());
+  prefs_->SetInt64(kPrefsDeltaUpdateFailures,
+                   UpdateAttempter::kMaxDeltaUpdateFailures - 1);
+  attempter_.DisableDeltaUpdateIfNeeded();
+  EXPECT_TRUE(attempter_.omaha_request_params_->delta_okay());
+  prefs_->SetInt64(kPrefsDeltaUpdateFailures,
+                   UpdateAttempter::kMaxDeltaUpdateFailures);
+  attempter_.DisableDeltaUpdateIfNeeded();
+  EXPECT_FALSE(attempter_.omaha_request_params_->delta_okay());
+  attempter_.DisableDeltaUpdateIfNeeded();
+  EXPECT_FALSE(attempter_.omaha_request_params_->delta_okay());
+}
+
+TEST_F(UpdateAttempterTest, MarkDeltaUpdateFailureTest) {
+  attempter_.MarkDeltaUpdateFailure();
+
+  EXPECT_TRUE(prefs_->SetInt64(kPrefsDeltaUpdateFailures, -1));
+  attempter_.MarkDeltaUpdateFailure();
+  int64_t value = 0;
+  EXPECT_TRUE(prefs_->GetInt64(kPrefsDeltaUpdateFailures, &value));
+  EXPECT_EQ(value, 1);
+
+  attempter_.MarkDeltaUpdateFailure();
+  EXPECT_TRUE(prefs_->GetInt64(kPrefsDeltaUpdateFailures, &value));
+  EXPECT_EQ(value, 2);
+
+  EXPECT_TRUE(prefs_->SetInt64(kPrefsDeltaUpdateFailures,
+                               UpdateAttempter::kMaxDeltaUpdateFailures));
+  attempter_.MarkDeltaUpdateFailure();
+  EXPECT_TRUE(prefs_->GetInt64(kPrefsDeltaUpdateFailures, &value));
+  EXPECT_EQ(value, UpdateAttempter::kMaxDeltaUpdateFailures + 1);
+}
+
+TEST_F(UpdateAttempterTest, ScheduleErrorEventActionNoEventTest) {
+  EXPECT_CALL(*processor_, EnqueueAction(_)).Times(0);
+  EXPECT_CALL(*processor_, StartProcessing()).Times(0);
+  EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), UpdateFailed(_))
+      .Times(0);
+  OmahaResponse response;
+  string url1 = "http://url1";
+  response.packages.push_back({.payload_urls = {url1, "https://url"}});
+  EXPECT_CALL(*(FakeSystemState::Get()->mock_payload_state()), GetCurrentUrl())
+      .WillRepeatedly(Return(url1));
+  FakeSystemState::Get()->mock_payload_state()->SetResponse(response);
+  attempter_.ScheduleErrorEventAction();
+  EXPECT_EQ(url1,
+            FakeSystemState::Get()->mock_payload_state()->GetCurrentUrl());
+}
+
+TEST_F(UpdateAttempterTest, ScheduleErrorEventActionTest) {
+  EXPECT_CALL(*processor_,
+              EnqueueAction(Pointee(Property(
+                  &AbstractAction::Type, OmahaRequestAction::StaticType()))));
+  EXPECT_CALL(*processor_, StartProcessing());
+  ErrorCode err = ErrorCode::kError;
+  EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), UpdateFailed(err));
+  attempter_.error_event_.reset(new OmahaEvent(
+      OmahaEvent::kTypeUpdateComplete, OmahaEvent::kResultError, err));
+  attempter_.ScheduleErrorEventAction();
+  EXPECT_EQ(UpdateStatus::REPORTING_ERROR_EVENT, attempter_.status());
+}
+
+namespace {
+// Actions that will be built as part of an update check.
+vector<string> GetUpdateActionTypes() {
+  return {OmahaRequestAction::StaticType(),
+          OmahaResponseHandlerAction::StaticType(),
+          UpdateBootFlagsAction::StaticType(),
+          OmahaRequestAction::StaticType(),
+          DownloadActionChromeos::StaticType(),
+          OmahaRequestAction::StaticType(),
+          FilesystemVerifierAction::StaticType(),
+          PostinstallRunnerAction::StaticType(),
+          OmahaRequestAction::StaticType()};
+}
+
+// Actions that will be built as part of a user-initiated rollback.
+vector<string> GetRollbackActionTypes() {
+  return {InstallPlanAction::StaticType(),
+          PostinstallRunnerAction::StaticType()};
+}
+
+const StagingSchedule kValidStagingSchedule = {
+    {4, 10}, {10, 40}, {19, 70}, {26, 100}};
+
+}  // namespace
+
+void UpdateAttempterTest::UpdateTestStart() {
+  attempter_.set_http_response_code(200);
+
+  // Expect that the device policy is loaded by the |UpdateAttempter| at some
+  // point by calling |RefreshDevicePolicy()|.
+  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
+  EXPECT_CALL(*device_policy, LoadPolicy())
+      .Times(testing::AtLeast(1))
+      .WillRepeatedly(Return(true));
+  attempter_.policy_provider_.reset(
+      new policy::PolicyProvider(std::move(device_policy)));
+
+  {
+    InSequence s;
+    for (const auto& update_action_type : GetUpdateActionTypes()) {
+      EXPECT_CALL(*processor_,
+                  EnqueueAction(Pointee(
+                      Property(&AbstractAction::Type, update_action_type))));
+    }
+    EXPECT_CALL(*processor_, StartProcessing());
+  }
+
+  attempter_.Update({});
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::UpdateTestVerify,
+                            base::Unretained(this)));
+}
+
+void UpdateAttempterTest::UpdateTestVerify() {
+  EXPECT_EQ(0, attempter_.http_response_code());
+  EXPECT_EQ(&attempter_, processor_->delegate());
+  EXPECT_EQ(UpdateStatus::CHECKING_FOR_UPDATE, attempter_.status());
+  loop_.BreakLoop();
+}
+
+void UpdateAttempterTest::RollbackTestStart(bool enterprise_rollback,
+                                            bool valid_slot) {
+  // Create a device policy so that we can change settings.
+  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
+  EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
+  FakeSystemState::Get()->set_device_policy(device_policy.get());
+  if (enterprise_rollback) {
+    // We return an empty owner as this is an enterprise.
+    EXPECT_CALL(*device_policy, GetOwner(_))
+        .WillRepeatedly(DoAll(SetArgPointee<0>(string("")), Return(true)));
+  } else {
+    // We return a fake owner as this is an owned consumer device.
+    EXPECT_CALL(*device_policy, GetOwner(_))
+        .WillRepeatedly(DoAll(SetArgPointee<0>(string("fake.mail@fake.com")),
+                              Return(true)));
+  }
+
+  attempter_.policy_provider_.reset(
+      new policy::PolicyProvider(std::move(device_policy)));
+
+  if (valid_slot) {
+    BootControlInterface::Slot rollback_slot = 1;
+    LOG(INFO) << "Test Mark Bootable: "
+              << BootControlInterface::SlotName(rollback_slot);
+    FakeSystemState::Get()->fake_boot_control()->SetSlotBootable(rollback_slot,
+                                                                 true);
+  }
+
+  bool is_rollback_allowed = false;
+
+  // We only allow rollback on devices that are not enterprise enrolled and
+  // which have a valid slot to rollback to.
+  if (!enterprise_rollback && valid_slot) {
+    is_rollback_allowed = true;
+  }
+
+  if (is_rollback_allowed) {
+    InSequence s;
+    for (const auto& rollback_action_type : GetRollbackActionTypes()) {
+      EXPECT_CALL(*processor_,
+                  EnqueueAction(Pointee(
+                      Property(&AbstractAction::Type, rollback_action_type))));
+    }
+    EXPECT_CALL(*processor_, StartProcessing());
+
+    EXPECT_TRUE(attempter_.Rollback(true));
+    loop_.PostTask(FROM_HERE,
+                   base::Bind(&UpdateAttempterTest::RollbackTestVerify,
+                              base::Unretained(this)));
+  } else {
+    EXPECT_FALSE(attempter_.Rollback(true));
+    loop_.BreakLoop();
+  }
+}
+
+void UpdateAttempterTest::RollbackTestVerify() {
+  // Verifies the actions that were enqueued.
+  EXPECT_EQ(&attempter_, processor_->delegate());
+  EXPECT_EQ(UpdateStatus::ATTEMPTING_ROLLBACK, attempter_.status());
+  EXPECT_EQ(0U, attempter_.install_plan_->partitions.size());
+  EXPECT_EQ(attempter_.install_plan_->powerwash_required, true);
+  loop_.BreakLoop();
+}
+
+TEST_F(UpdateAttempterTest, UpdateTest) {
+  UpdateTestStart();
+  loop_.Run();
+}
+
+TEST_F(UpdateAttempterTest, RollbackTest) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::RollbackTestStart,
+                            base::Unretained(this),
+                            false,
+                            true));
+  loop_.Run();
+}
+
+TEST_F(UpdateAttempterTest, InvalidSlotRollbackTest) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::RollbackTestStart,
+                            base::Unretained(this),
+                            false,
+                            false));
+  loop_.Run();
+}
+
+TEST_F(UpdateAttempterTest, EnterpriseRollbackTest) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::RollbackTestStart,
+                            base::Unretained(this),
+                            true,
+                            true));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::PingOmahaTestStart() {
+  EXPECT_CALL(*processor_,
+              EnqueueAction(Pointee(Property(
+                  &AbstractAction::Type, OmahaRequestAction::StaticType()))));
+  EXPECT_CALL(*processor_, StartProcessing());
+  attempter_.PingOmaha();
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, PingOmahaTest) {
+  EXPECT_FALSE(attempter_.waiting_for_scheduled_check_);
+  EXPECT_FALSE(attempter_.WasScheduleUpdatesCalled());
+  // Disable scheduling of subsequnet checks; we're using the |DefaultPolicy| in
+  // testing, which is more permissive than we want to handle here.
+  attempter_.DisableScheduleUpdates();
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::PingOmahaTestStart,
+                            base::Unretained(this)));
+  brillo::MessageLoopRunMaxIterations(&loop_, 100);
+  EXPECT_EQ(UpdateStatus::UPDATED_NEED_REBOOT, attempter_.status());
+  EXPECT_TRUE(attempter_.WasScheduleUpdatesCalled());
+}
+
+TEST_F(UpdateAttempterTest, CreatePendingErrorEventTest) {
+  MockAction action;
+  const ErrorCode kCode = ErrorCode::kDownloadTransferError;
+  attempter_.CreatePendingErrorEvent(&action, kCode);
+  ASSERT_NE(nullptr, attempter_.error_event_.get());
+  EXPECT_EQ(OmahaEvent::kTypeUpdateComplete, attempter_.error_event_->type);
+  EXPECT_EQ(OmahaEvent::kResultError, attempter_.error_event_->result);
+  EXPECT_EQ(
+      static_cast<ErrorCode>(static_cast<int>(kCode) |
+                             static_cast<int>(ErrorCode::kTestOmahaUrlFlag)),
+      attempter_.error_event_->error_code);
+}
+
+TEST_F(UpdateAttempterTest, CreatePendingErrorEventResumedTest) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_resume = true;
+  MockAction action;
+  const ErrorCode kCode = ErrorCode::kInstallDeviceOpenError;
+  attempter_.CreatePendingErrorEvent(&action, kCode);
+  ASSERT_NE(nullptr, attempter_.error_event_.get());
+  EXPECT_EQ(OmahaEvent::kTypeUpdateComplete, attempter_.error_event_->type);
+  EXPECT_EQ(OmahaEvent::kResultError, attempter_.error_event_->result);
+  EXPECT_EQ(
+      static_cast<ErrorCode>(static_cast<int>(kCode) |
+                             static_cast<int>(ErrorCode::kResumedFlag) |
+                             static_cast<int>(ErrorCode::kTestOmahaUrlFlag)),
+      attempter_.error_event_->error_code);
+}
+
+TEST_F(UpdateAttempterTest, P2PNotStartedAtStartupWhenNotEnabled) {
+  MockP2PManager mock_p2p_manager;
+  FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager);
+  mock_p2p_manager.fake().SetP2PEnabled(false);
+  EXPECT_CALL(mock_p2p_manager, EnsureP2PRunning()).Times(0);
+  attempter_.UpdateEngineStarted();
+}
+
+TEST_F(UpdateAttempterTest, P2PNotStartedAtStartupWhenEnabledButNotSharing) {
+  MockP2PManager mock_p2p_manager;
+  FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager);
+  mock_p2p_manager.fake().SetP2PEnabled(true);
+  EXPECT_CALL(mock_p2p_manager, EnsureP2PRunning()).Times(0);
+  attempter_.UpdateEngineStarted();
+}
+
+TEST_F(UpdateAttempterTest, P2PStartedAtStartupWhenEnabledAndSharing) {
+  MockP2PManager mock_p2p_manager;
+  FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager);
+  mock_p2p_manager.fake().SetP2PEnabled(true);
+  mock_p2p_manager.fake().SetCountSharedFilesResult(1);
+  EXPECT_CALL(mock_p2p_manager, EnsureP2PRunning());
+  attempter_.UpdateEngineStarted();
+}
+
+TEST_F(UpdateAttempterTest, P2PNotEnabled) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::P2PNotEnabledStart,
+                            base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::P2PNotEnabledStart() {
+  // If P2P is not enabled, check that we do not attempt housekeeping
+  // and do not convey that P2P is to be used.
+  MockP2PManager mock_p2p_manager;
+  FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager);
+  mock_p2p_manager.fake().SetP2PEnabled(false);
+  EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0);
+  attempter_.Update({});
+  EXPECT_FALSE(actual_using_p2p_for_downloading_);
+  EXPECT_FALSE(actual_using_p2p_for_sharing());
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, P2PEnabledStartingFails) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::P2PEnabledStartingFailsStart,
+                            base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::P2PEnabledStartingFailsStart() {
+  // If P2P is enabled, but starting it fails ensure we don't do
+  // any housekeeping and do not convey that P2P should be used.
+  MockP2PManager mock_p2p_manager;
+  FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager);
+  mock_p2p_manager.fake().SetP2PEnabled(true);
+  mock_p2p_manager.fake().SetEnsureP2PRunningResult(false);
+  mock_p2p_manager.fake().SetPerformHousekeepingResult(false);
+  EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0);
+  attempter_.Update({});
+  EXPECT_FALSE(actual_using_p2p_for_downloading());
+  EXPECT_FALSE(actual_using_p2p_for_sharing());
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, P2PEnabledHousekeepingFails) {
+  loop_.PostTask(
+      FROM_HERE,
+      base::Bind(&UpdateAttempterTest::P2PEnabledHousekeepingFailsStart,
+                 base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::P2PEnabledHousekeepingFailsStart() {
+  // If P2P is enabled, starting it works but housekeeping fails, ensure
+  // we do not convey P2P is to be used.
+  MockP2PManager mock_p2p_manager;
+  FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager);
+  mock_p2p_manager.fake().SetP2PEnabled(true);
+  mock_p2p_manager.fake().SetEnsureP2PRunningResult(true);
+  mock_p2p_manager.fake().SetPerformHousekeepingResult(false);
+  EXPECT_CALL(mock_p2p_manager, PerformHousekeeping());
+  attempter_.Update({});
+  EXPECT_FALSE(actual_using_p2p_for_downloading());
+  EXPECT_FALSE(actual_using_p2p_for_sharing());
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, P2PEnabled) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::P2PEnabledStart,
+                            base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::P2PEnabledStart() {
+  MockP2PManager mock_p2p_manager;
+  FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager);
+  // If P2P is enabled and starting it works, check that we performed
+  // housekeeping and that we convey P2P should be used.
+  mock_p2p_manager.fake().SetP2PEnabled(true);
+  mock_p2p_manager.fake().SetEnsureP2PRunningResult(true);
+  mock_p2p_manager.fake().SetPerformHousekeepingResult(true);
+  EXPECT_CALL(mock_p2p_manager, PerformHousekeeping());
+  attempter_.Update({});
+  EXPECT_TRUE(actual_using_p2p_for_downloading());
+  EXPECT_TRUE(actual_using_p2p_for_sharing());
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, P2PEnabledInteractive) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::P2PEnabledInteractiveStart,
+                            base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::P2PEnabledInteractiveStart() {
+  MockP2PManager mock_p2p_manager;
+  FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager);
+  // For an interactive check, if P2P is enabled and starting it
+  // works, check that we performed housekeeping and that we convey
+  // P2P should be used for sharing but NOT for downloading.
+  mock_p2p_manager.fake().SetP2PEnabled(true);
+  mock_p2p_manager.fake().SetEnsureP2PRunningResult(true);
+  mock_p2p_manager.fake().SetPerformHousekeepingResult(true);
+  EXPECT_CALL(mock_p2p_manager, PerformHousekeeping());
+  attempter_.Update({.interactive = true});
+  EXPECT_FALSE(actual_using_p2p_for_downloading());
+  EXPECT_TRUE(actual_using_p2p_for_sharing());
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, ReadScatterFactorFromPolicy) {
+  loop_.PostTask(
+      FROM_HERE,
+      base::Bind(&UpdateAttempterTest::ReadScatterFactorFromPolicyTestStart,
+                 base::Unretained(this)));
+  loop_.Run();
+}
+
+// Tests that the scatter_factor_in_seconds value is properly fetched
+// from the device policy.
+void UpdateAttempterTest::ReadScatterFactorFromPolicyTestStart() {
+  int64_t scatter_factor_in_seconds = 36000;
+
+  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
+  EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
+  FakeSystemState::Get()->set_device_policy(device_policy.get());
+
+  EXPECT_CALL(*device_policy, GetScatterFactorInSeconds(_))
+      .WillRepeatedly(
+          DoAll(SetArgPointee<0>(scatter_factor_in_seconds), Return(true)));
+
+  attempter_.policy_provider_.reset(
+      new policy::PolicyProvider(std::move(device_policy)));
+
+  attempter_.Update({});
+  EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
+
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, DecrementUpdateCheckCountTest) {
+  loop_.PostTask(
+      FROM_HERE,
+      base::Bind(&UpdateAttempterTest::DecrementUpdateCheckCountTestStart,
+                 base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::DecrementUpdateCheckCountTestStart() {
+  // Tests that the scatter_factor_in_seconds value is properly fetched
+  // from the device policy and is decremented if value > 0.
+  int64_t initial_value = 5;
+  auto* fake_prefs = FakeSystemState::Get()->fake_prefs();
+  FakeSystemState::Get()->fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch());
+
+  EXPECT_TRUE(fake_prefs->SetInt64(kPrefsUpdateCheckCount, initial_value));
+
+  int64_t scatter_factor_in_seconds = 10;
+
+  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
+  EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
+  FakeSystemState::Get()->set_device_policy(device_policy.get());
+
+  EXPECT_CALL(*device_policy, GetScatterFactorInSeconds(_))
+      .WillRepeatedly(
+          DoAll(SetArgPointee<0>(scatter_factor_in_seconds), Return(true)));
+
+  attempter_.policy_provider_.reset(
+      new policy::PolicyProvider(std::move(device_policy)));
+
+  attempter_.Update({});
+  EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
+
+  // Make sure the file still exists.
+  EXPECT_TRUE(fake_prefs->Exists(kPrefsUpdateCheckCount));
+
+  int64_t new_value;
+  EXPECT_TRUE(fake_prefs->GetInt64(kPrefsUpdateCheckCount, &new_value));
+  EXPECT_EQ(initial_value - 1, new_value);
+
+  EXPECT_TRUE(
+      attempter_.omaha_request_params_->update_check_count_wait_enabled());
+
+  // However, if the count is already 0, it's not decremented. Test that.
+  initial_value = 0;
+  EXPECT_TRUE(fake_prefs->SetInt64(kPrefsUpdateCheckCount, initial_value));
+  attempter_.Update({});
+  EXPECT_TRUE(fake_prefs->Exists(kPrefsUpdateCheckCount));
+  EXPECT_TRUE(fake_prefs->GetInt64(kPrefsUpdateCheckCount, &new_value));
+  EXPECT_EQ(initial_value, new_value);
+
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, NoScatteringDoneDuringManualUpdateTestStart) {
+  loop_.PostTask(
+      FROM_HERE,
+      base::Bind(
+          &UpdateAttempterTest::NoScatteringDoneDuringManualUpdateTestStart,
+          base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::NoScatteringDoneDuringManualUpdateTestStart() {
+  // Tests that no scattering logic is enabled if the update check
+  // is manually done (as opposed to a scheduled update check)
+  int64_t initial_value = 8;
+  auto* fake_prefs = FakeSystemState::Get()->fake_prefs();
+  FakeSystemState::Get()->fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch());
+
+  EXPECT_TRUE(
+      fake_prefs->SetInt64(kPrefsWallClockScatteringWaitPeriod, initial_value));
+  EXPECT_TRUE(fake_prefs->SetInt64(kPrefsUpdateCheckCount, initial_value));
+
+  // make sure scatter_factor is non-zero as scattering is disabled
+  // otherwise.
+  int64_t scatter_factor_in_seconds = 50;
+
+  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
+  EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
+  FakeSystemState::Get()->set_device_policy(device_policy.get());
+
+  EXPECT_CALL(*device_policy, GetScatterFactorInSeconds(_))
+      .WillRepeatedly(
+          DoAll(SetArgPointee<0>(scatter_factor_in_seconds), Return(true)));
+
+  attempter_.policy_provider_.reset(
+      new policy::PolicyProvider(std::move(device_policy)));
+
+  // Trigger an interactive check so we can test that scattering is disabled.
+  attempter_.Update({.interactive = true});
+  EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
+
+  // Make sure scattering is disabled for manual (i.e. user initiated) update
+  // checks and all artifacts are removed.
+  EXPECT_FALSE(
+      attempter_.omaha_request_params_->wall_clock_based_wait_enabled());
+  EXPECT_FALSE(fake_prefs->Exists(kPrefsWallClockScatteringWaitPeriod));
+  EXPECT_EQ(0, attempter_.omaha_request_params_->waiting_period().InSeconds());
+  EXPECT_FALSE(
+      attempter_.omaha_request_params_->update_check_count_wait_enabled());
+  EXPECT_FALSE(fake_prefs->Exists(kPrefsUpdateCheckCount));
+
+  ScheduleQuitMainLoop();
+}
+
+void UpdateAttempterTest::SetUpStagingTest(const StagingSchedule& schedule) {
+  int64_t initial_value = 8;
+  EXPECT_TRUE(
+      prefs_->SetInt64(kPrefsWallClockScatteringWaitPeriod, initial_value));
+  EXPECT_TRUE(prefs_->SetInt64(kPrefsUpdateCheckCount, initial_value));
+  attempter_.scatter_factor_ = TimeDelta::FromSeconds(20);
+
+  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
+  EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
+  FakeSystemState::Get()->set_device_policy(device_policy.get());
+  EXPECT_CALL(*device_policy, GetDeviceUpdateStagingSchedule(_))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(schedule), Return(true)));
+
+  attempter_.policy_provider_.reset(
+      new policy::PolicyProvider(std::move(device_policy)));
+}
+
+TEST_F(UpdateAttempterTest, StagingSetsPrefsAndTurnsOffScattering) {
+  loop_.PostTask(
+      FROM_HERE,
+      base::Bind(
+          &UpdateAttempterTest::StagingSetsPrefsAndTurnsOffScatteringStart,
+          base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::StagingSetsPrefsAndTurnsOffScatteringStart() {
+  // Tests that staging sets its prefs properly and turns off scattering.
+  FakeSystemState::Get()->fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch());
+  SetUpStagingTest(kValidStagingSchedule);
+
+  attempter_.Update({});
+  auto* fake_prefs = FakeSystemState::Get()->fake_prefs();
+  // Check that prefs have the correct values.
+  int64_t update_count;
+  EXPECT_TRUE(fake_prefs->GetInt64(kPrefsUpdateCheckCount, &update_count));
+  int64_t waiting_time_days;
+  EXPECT_TRUE(fake_prefs->GetInt64(kPrefsWallClockStagingWaitPeriod,
+                                   &waiting_time_days));
+  EXPECT_GT(waiting_time_days, 0);
+  // Update count should have been decremented.
+  EXPECT_EQ(7, update_count);
+  // Check that Omaha parameters were updated correctly.
+  EXPECT_TRUE(
+      attempter_.omaha_request_params_->update_check_count_wait_enabled());
+  EXPECT_TRUE(
+      attempter_.omaha_request_params_->wall_clock_based_wait_enabled());
+  EXPECT_EQ(waiting_time_days,
+            attempter_.omaha_request_params_->waiting_period().InDays());
+  // Check class variables.
+  EXPECT_EQ(waiting_time_days, attempter_.staging_wait_time_.InDays());
+  EXPECT_EQ(kValidStagingSchedule, attempter_.staging_schedule_);
+  // Check that scattering is turned off
+  EXPECT_EQ(0, attempter_.scatter_factor_.InSeconds());
+  EXPECT_FALSE(fake_prefs->Exists(kPrefsWallClockScatteringWaitPeriod));
+
+  ScheduleQuitMainLoop();
+}
+
+void UpdateAttempterTest::CheckStagingOff() {
+  // Check that all prefs were removed.
+  EXPECT_FALSE(prefs_->Exists(kPrefsUpdateCheckCount));
+  EXPECT_FALSE(prefs_->Exists(kPrefsWallClockScatteringWaitPeriod));
+  EXPECT_FALSE(prefs_->Exists(kPrefsWallClockStagingWaitPeriod));
+  // Check that the Omaha parameters have the correct value.
+  EXPECT_EQ(0, attempter_.omaha_request_params_->waiting_period().InDays());
+  EXPECT_EQ(attempter_.omaha_request_params_->waiting_period(),
+            attempter_.staging_wait_time_);
+  EXPECT_FALSE(
+      attempter_.omaha_request_params_->update_check_count_wait_enabled());
+  EXPECT_FALSE(
+      attempter_.omaha_request_params_->wall_clock_based_wait_enabled());
+  // Check that scattering is turned off too.
+  EXPECT_EQ(0, attempter_.scatter_factor_.InSeconds());
+}
+
+TEST_F(UpdateAttempterTest, StagingOffIfInteractive) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::StagingOffIfInteractiveStart,
+                            base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::StagingOffIfInteractiveStart() {
+  // Tests that staging is turned off when an interactive update is requested.
+  FakeSystemState::Get()->fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch());
+  SetUpStagingTest(kValidStagingSchedule);
+
+  attempter_.Update({.interactive = true});
+  CheckStagingOff();
+
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, StagingOffIfOobe) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::StagingOffIfOobeStart,
+                            base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::StagingOffIfOobeStart() {
+  // Tests that staging is turned off if OOBE hasn't been completed.
+  FakeSystemState::Get()->fake_hardware()->SetIsOOBEEnabled(true);
+  FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete();
+  SetUpStagingTest(kValidStagingSchedule);
+
+  attempter_.Update({.interactive = true});
+  CheckStagingOff();
+
+  ScheduleQuitMainLoop();
+}
+
+// Checks that we only report daily metrics at most every 24 hours.
+TEST_F(UpdateAttempterTest, ReportDailyMetrics) {
+  auto* fake_clock = FakeSystemState::Get()->fake_clock();
+  Time epoch = Time::FromInternalValue(0);
+  fake_clock->SetWallclockTime(epoch);
+
+  // If there is no kPrefsDailyMetricsLastReportedAt state variable,
+  // we should report.
+  EXPECT_TRUE(attempter_.CheckAndReportDailyMetrics());
+  // We should not report again if no time has passed.
+  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
+
+  // We should not report if only 10 hours has passed.
+  fake_clock->SetWallclockTime(epoch + TimeDelta::FromHours(10));
+  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
+
+  // We should not report if only 24 hours - 1 sec has passed.
+  fake_clock->SetWallclockTime(epoch + TimeDelta::FromHours(24) -
+                               TimeDelta::FromSeconds(1));
+  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
+
+  // We should report if 24 hours has passed.
+  fake_clock->SetWallclockTime(epoch + TimeDelta::FromHours(24));
+  EXPECT_TRUE(attempter_.CheckAndReportDailyMetrics());
+
+  // But then we should not report again..
+  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
+
+  // .. until another 24 hours has passed
+  fake_clock->SetWallclockTime(epoch + TimeDelta::FromHours(47));
+  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
+  fake_clock->SetWallclockTime(epoch + TimeDelta::FromHours(48));
+  EXPECT_TRUE(attempter_.CheckAndReportDailyMetrics());
+  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
+
+  // .. and another 24 hours
+  fake_clock->SetWallclockTime(epoch + TimeDelta::FromHours(71));
+  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
+  fake_clock->SetWallclockTime(epoch + TimeDelta::FromHours(72));
+  EXPECT_TRUE(attempter_.CheckAndReportDailyMetrics());
+  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
+
+  // If the span between time of reporting and present time is
+  // negative, we report. This is in order to reset the timestamp and
+  // avoid an edge condition whereby a distant point in the future is
+  // in the state variable resulting in us never ever reporting again.
+  fake_clock->SetWallclockTime(epoch + TimeDelta::FromHours(71));
+  EXPECT_TRUE(attempter_.CheckAndReportDailyMetrics());
+  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
+
+  // In this case we should not update until the clock reads 71 + 24 = 95.
+  // Check that.
+  fake_clock->SetWallclockTime(epoch + TimeDelta::FromHours(94));
+  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
+  fake_clock->SetWallclockTime(epoch + TimeDelta::FromHours(95));
+  EXPECT_TRUE(attempter_.CheckAndReportDailyMetrics());
+  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
+}
+
+TEST_F(UpdateAttempterTest, BootTimeInUpdateMarkerFile) {
+  FakeSystemState::Get()->fake_clock()->SetBootTime(Time::FromTimeT(42));
+  attempter_.Init();
+
+  Time boot_time;
+  EXPECT_FALSE(attempter_.GetBootTimeAtUpdate(&boot_time));
+
+  attempter_.WriteUpdateCompletedMarker();
+
+  EXPECT_TRUE(attempter_.GetBootTimeAtUpdate(&boot_time));
+  EXPECT_EQ(boot_time.ToTimeT(), 42);
+}
+
+TEST_F(UpdateAttempterTest, AnyUpdateSourceAllowedUnofficial) {
+  FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(false);
+  EXPECT_TRUE(attempter_.IsAnyUpdateSourceAllowed());
+}
+
+TEST_F(UpdateAttempterTest, AnyUpdateSourceAllowedOfficialDevmode) {
+  FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(true);
+  FakeSystemState::Get()->fake_hardware()->SetAreDevFeaturesEnabled(true);
+  EXPECT_TRUE(attempter_.IsAnyUpdateSourceAllowed());
+}
+
+TEST_F(UpdateAttempterTest, AnyUpdateSourceDisallowedOfficialNormal) {
+  FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(true);
+  FakeSystemState::Get()->fake_hardware()->SetAreDevFeaturesEnabled(false);
+  EXPECT_FALSE(attempter_.IsAnyUpdateSourceAllowed());
+}
+
+// TODO(kimjae): Follow testing pattern with params for |CheckForInstall()|.
+// When adding, remove older tests related to |CheckForInstall()|.
+TEST_F(UpdateAttempterTest, CheckForInstallNotIdleFails) {
+  for (const auto status : kNonIdleUpdateStatuses) {
+    // GIVEN a non-idle status.
+    attempter_.status_ = status;
+
+    EXPECT_FALSE(attempter_.CheckForInstall({}, ""));
+  }
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateNotIdleFails) {
+  for (const auto status : kNonIdleUpdateStatuses) {
+    // GIVEN a non-idle status.
+    cfu_params_.status = status;
+
+    // THEN |ScheduleUpdates()| should not be called.
+    cfu_params_.should_schedule_updates_be_called = false;
+    // THEN result should indicate failure.
+    cfu_params_.expected_result = false;
+
+    TestCheckForUpdate();
+  }
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateOfficalBuildClearsSource) {
+  // GIVEN a official build.
+
+  // THEN we except forced app version + forced omaha url to be cleared.
+
+  TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateUnofficialBuildChangesSource) {
+  // GIVEN a nonofficial build with dev features enabled.
+  cfu_params_.is_official_build = false;
+  cfu_params_.are_dev_features_enabled = true;
+
+  // THEN the forced app version + forced omaha url changes based on input.
+  cfu_params_.expected_forced_app_version = cfu_params_.app_version;
+  cfu_params_.expected_forced_omaha_url = cfu_params_.omaha_url;
+
+  TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateOfficialBuildScheduledAUTest) {
+  // GIVEN a scheduled autest omaha url.
+  cfu_params_.omaha_url = "autest-scheduled";
+
+  // THEN forced app version is cleared.
+  // THEN forced omaha url changes to default constant.
+  cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL;
+
+  TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateUnofficialBuildScheduledAUTest) {
+  // GIVEN a scheduled autest omaha url.
+  cfu_params_.omaha_url = "autest-scheduled";
+  // GIVEN a nonofficial build with dev features enabled.
+  cfu_params_.is_official_build = false;
+  cfu_params_.are_dev_features_enabled = true;
+
+  // THEN forced app version changes based on input.
+  cfu_params_.expected_forced_app_version = cfu_params_.app_version;
+  // THEN forced omaha url changes to default constant.
+  cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL;
+
+  TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateOfficialBuildAUTest) {
+  // GIVEN a autest omaha url.
+  cfu_params_.omaha_url = "autest";
+
+  // THEN forced app version is cleared.
+  // THEN forced omaha url changes to default constant.
+  cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL;
+
+  TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateUnofficialBuildAUTest) {
+  // GIVEN a autest omha url.
+  cfu_params_.omaha_url = "autest";
+  // GIVEN a nonofficial build with dev features enabled.
+  cfu_params_.is_official_build = false;
+  cfu_params_.are_dev_features_enabled = true;
+
+  // THEN forced app version changes based on input.
+  cfu_params_.expected_forced_app_version = cfu_params_.app_version;
+  // THEN forced omaha url changes to default constant.
+  cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL;
+
+  TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest,
+       CheckForUpdateNonInteractiveOfficialBuildScheduledAUTest) {
+  // GIVEN a scheduled autest omaha url.
+  cfu_params_.omaha_url = "autest-scheduled";
+  // GIVEN a noninteractive update.
+  cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive;
+
+  // THEN forced app version is cleared.
+  // THEN forced omaha url changes to default constant.
+  cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL;
+
+  TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest,
+       CheckForUpdateNonInteractiveUnofficialBuildScheduledAUTest) {
+  // GIVEN a scheduled autest omaha url.
+  cfu_params_.omaha_url = "autest-scheduled";
+  // GIVEN a noninteractive update.
+  cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive;
+  // GIVEN a nonofficial build with dev features enabled.
+  cfu_params_.is_official_build = false;
+  cfu_params_.are_dev_features_enabled = true;
+
+  // THEN forced app version changes based on input.
+  cfu_params_.expected_forced_app_version = cfu_params_.app_version;
+  // THEN forced omaha url changes to default constant.
+  cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL;
+
+  TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateNonInteractiveOfficialBuildAUTest) {
+  // GIVEN a autest omaha url.
+  cfu_params_.omaha_url = "autest";
+  // GIVEN a noninteractive update.
+  cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive;
+
+  // THEN forced app version is cleared.
+  // THEN forced omaha url changes to default constant.
+  cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL;
+
+  TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateNonInteractiveUnofficialBuildAUTest) {
+  // GIVEN a autest omaha url.
+  cfu_params_.omaha_url = "autest";
+  // GIVEN a noninteractive update.
+  cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive;
+  // GIVEN a nonofficial build with dev features enabled.
+  cfu_params_.is_official_build = false;
+  cfu_params_.are_dev_features_enabled = true;
+
+  // THEN forced app version changes based on input.
+  cfu_params_.expected_forced_app_version = cfu_params_.app_version;
+  // THEN forced omaha url changes to default constant.
+  cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL;
+
+  TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateMissingForcedCallback1) {
+  // GIVEN a official build.
+  // GIVEN forced callback is not set.
+  attempter_.set_forced_update_pending_callback(nullptr);
+
+  // THEN we except forced app version + forced omaha url to be cleared.
+  // THEN |ScheduleUpdates()| should not be called.
+  cfu_params_.should_schedule_updates_be_called = false;
+
+  TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateMissingForcedCallback2) {
+  // GIVEN a nonofficial build with dev features enabled.
+  cfu_params_.is_official_build = false;
+  cfu_params_.are_dev_features_enabled = true;
+  // GIVEN forced callback is not set.
+  attempter_.set_forced_update_pending_callback(nullptr);
+
+  // THEN the forced app version + forced omaha url changes based on input.
+  cfu_params_.expected_forced_app_version = cfu_params_.app_version;
+  cfu_params_.expected_forced_omaha_url = cfu_params_.omaha_url;
+  // THEN |ScheduleUpdates()| should not be called.
+  cfu_params_.should_schedule_updates_be_called = false;
+
+  TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForInstallTest) {
+  FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(true);
+  FakeSystemState::Get()->fake_hardware()->SetAreDevFeaturesEnabled(false);
+  attempter_.CheckForInstall({}, "autest");
+  EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url());
+
+  attempter_.CheckForInstall({}, "autest-scheduled");
+  EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url());
+
+  attempter_.CheckForInstall({}, "http://omaha.phishing");
+  EXPECT_EQ("", attempter_.forced_omaha_url());
+}
+
+TEST_F(UpdateAttempterTest, InstallSetsStatusIdle) {
+  attempter_.CheckForInstall({}, "http://foo.bar");
+  attempter_.status_ = UpdateStatus::DOWNLOADING;
+  EXPECT_TRUE(attempter_.is_install_);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+  UpdateEngineStatus status;
+  attempter_.GetStatus(&status);
+  // Should set status to idle after an install operation.
+  EXPECT_EQ(UpdateStatus::IDLE, status.status);
+}
+
+TEST_F(UpdateAttempterTest, RollbackAfterInstall) {
+  attempter_.is_install_ = true;
+  attempter_.Rollback(false);
+  EXPECT_FALSE(attempter_.is_install_);
+}
+
+TEST_F(UpdateAttempterTest, UpdateAfterInstall) {
+  attempter_.is_install_ = true;
+  attempter_.CheckForUpdate("", "", UpdateAttemptFlags::kNone);
+  EXPECT_FALSE(attempter_.is_install_);
+}
+
+TEST_F(UpdateAttempterTest, TargetVersionPrefixSetAndReset) {
+  UpdateCheckParams params;
+  attempter_.CalculateUpdateParams({.target_version_prefix = "1234"});
+  EXPECT_EQ("1234",
+            FakeSystemState::Get()->request_params()->target_version_prefix());
+
+  attempter_.CalculateUpdateParams({});
+  EXPECT_TRUE(FakeSystemState::Get()
+                  ->request_params()
+                  ->target_version_prefix()
+                  .empty());
+}
+
+TEST_F(UpdateAttempterTest, TargetChannelHintSetAndReset) {
+  attempter_.CalculateUpdateParams({.lts_tag = "hint"});
+  EXPECT_EQ("hint", FakeSystemState::Get()->request_params()->lts_tag());
+
+  attempter_.CalculateUpdateParams({});
+  EXPECT_TRUE(FakeSystemState::Get()->request_params()->lts_tag().empty());
+}
+
+TEST_F(UpdateAttempterTest, RollbackAllowedSetAndReset) {
+  attempter_.CalculateUpdateParams({
+      .target_version_prefix = "1234",
+      .rollback_allowed = true,
+      .rollback_allowed_milestones = 4,
+  });
+  EXPECT_TRUE(FakeSystemState::Get()->request_params()->rollback_allowed());
+  EXPECT_EQ(
+      4,
+      FakeSystemState::Get()->request_params()->rollback_allowed_milestones());
+
+  attempter_.CalculateUpdateParams({
+      .target_version_prefix = "1234",
+      .rollback_allowed_milestones = 4,
+  });
+  EXPECT_FALSE(FakeSystemState::Get()->request_params()->rollback_allowed());
+  EXPECT_EQ(
+      4,
+      FakeSystemState::Get()->request_params()->rollback_allowed_milestones());
+}
+
+TEST_F(UpdateAttempterTest, ChannelDowngradeNoRollback) {
+  base::ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+  FakeSystemState::Get()->request_params()->set_root(tempdir.GetPath().value());
+  attempter_.CalculateUpdateParams({
+      .target_channel = "stable-channel",
+  });
+  EXPECT_FALSE(
+      FakeSystemState::Get()->request_params()->is_powerwash_allowed());
+}
+
+TEST_F(UpdateAttempterTest, ChannelDowngradeRollback) {
+  base::ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+  FakeSystemState::Get()->request_params()->set_root(tempdir.GetPath().value());
+  attempter_.CalculateUpdateParams({
+      .rollback_on_channel_downgrade = true,
+      .target_channel = "stable-channel",
+  });
+  EXPECT_TRUE(FakeSystemState::Get()->request_params()->is_powerwash_allowed());
+}
+
+TEST_F(UpdateAttempterTest, UpdateDeferredByPolicyTest) {
+  // Construct an OmahaResponseHandlerAction that has processed an InstallPlan,
+  // but the update is being deferred by the Policy.
+  OmahaResponseHandlerAction response_action;
+  response_action.install_plan_.version = "a.b.c.d";
+  response_action.install_plan_.payloads.push_back(
+      {.size = 1234ULL, .type = InstallPayloadType::kFull});
+  // Inform the UpdateAttempter that the OmahaResponseHandlerAction has
+  // completed, with the deferred-update error code.
+  attempter_.ActionCompleted(
+      nullptr, &response_action, ErrorCode::kOmahaUpdateDeferredPerPolicy);
+  {
+    UpdateEngineStatus status;
+    attempter_.GetStatus(&status);
+    EXPECT_EQ(UpdateStatus::UPDATE_AVAILABLE, status.status);
+    EXPECT_TRUE(attempter_.install_plan_);
+    EXPECT_EQ(attempter_.install_plan_->version, status.new_version);
+    EXPECT_EQ(attempter_.install_plan_->payloads[0].size,
+              status.new_size_bytes);
+  }
+  // An "error" event should have been created to tell Omaha that the update is
+  // being deferred.
+  EXPECT_TRUE(nullptr != attempter_.error_event_);
+  EXPECT_EQ(OmahaEvent::kTypeUpdateComplete, attempter_.error_event_->type);
+  EXPECT_EQ(OmahaEvent::kResultUpdateDeferred, attempter_.error_event_->result);
+  ErrorCode expected_code = static_cast<ErrorCode>(
+      static_cast<int>(ErrorCode::kOmahaUpdateDeferredPerPolicy) |
+      static_cast<int>(ErrorCode::kTestOmahaUrlFlag));
+  EXPECT_EQ(expected_code, attempter_.error_event_->error_code);
+  // End the processing
+  attempter_.ProcessingDone(nullptr, ErrorCode::kOmahaUpdateDeferredPerPolicy);
+  // Validate the state of the attempter.
+  {
+    UpdateEngineStatus status;
+    attempter_.GetStatus(&status);
+    EXPECT_EQ(UpdateStatus::REPORTING_ERROR_EVENT, status.status);
+    EXPECT_EQ(response_action.install_plan_.version, status.new_version);
+    EXPECT_EQ(response_action.install_plan_.payloads[0].size,
+              status.new_size_bytes);
+  }
+}
+
+TEST_F(UpdateAttempterTest, UpdateIsNotRunningWhenUpdateAvailable) {
+  // Default construction for |waiting_for_scheduled_check_| is false.
+  EXPECT_FALSE(attempter_.IsBusyOrUpdateScheduled());
+  // Verify in-progress update with UPDATE_AVAILABLE is running
+  attempter_.status_ = UpdateStatus::UPDATE_AVAILABLE;
+  EXPECT_TRUE(attempter_.IsBusyOrUpdateScheduled());
+}
+
+TEST_F(UpdateAttempterTest, UpdateAttemptFlagsCachedAtUpdateStart) {
+  attempter_.SetUpdateAttemptFlags(UpdateAttemptFlags::kFlagRestrictDownload);
+
+  UpdateCheckParams params = {.updates_enabled = true};
+  attempter_.OnUpdateScheduled(EvalStatus::kSucceeded, params);
+
+  EXPECT_EQ(UpdateAttemptFlags::kFlagRestrictDownload,
+            attempter_.GetCurrentUpdateAttemptFlags());
+}
+
+TEST_F(UpdateAttempterTest, RollbackNotAllowed) {
+  UpdateCheckParams params = {.updates_enabled = true,
+                              .rollback_allowed = false};
+  attempter_.OnUpdateScheduled(EvalStatus::kSucceeded, params);
+  EXPECT_FALSE(FakeSystemState::Get()->request_params()->rollback_allowed());
+}
+
+TEST_F(UpdateAttempterTest, RollbackAllowed) {
+  UpdateCheckParams params = {.updates_enabled = true,
+                              .rollback_allowed = true};
+  attempter_.OnUpdateScheduled(EvalStatus::kSucceeded, params);
+  EXPECT_TRUE(FakeSystemState::Get()->request_params()->rollback_allowed());
+}
+
+TEST_F(UpdateAttempterTest, InteractiveUpdateUsesPassedRestrictions) {
+  attempter_.SetUpdateAttemptFlags(UpdateAttemptFlags::kFlagRestrictDownload);
+
+  attempter_.CheckForUpdate("", "", UpdateAttemptFlags::kNone);
+  EXPECT_EQ(UpdateAttemptFlags::kNone,
+            attempter_.GetCurrentUpdateAttemptFlags());
+}
+
+TEST_F(UpdateAttempterTest, NonInteractiveUpdateUsesSetRestrictions) {
+  attempter_.SetUpdateAttemptFlags(UpdateAttemptFlags::kNone);
+
+  // This tests that when CheckForUpdate() is called with the non-interactive
+  // flag set, that it doesn't change the current UpdateAttemptFlags.
+  attempter_.CheckForUpdate("",
+                            "",
+                            UpdateAttemptFlags::kFlagNonInteractive |
+                                UpdateAttemptFlags::kFlagRestrictDownload);
+  EXPECT_EQ(UpdateAttemptFlags::kNone,
+            attempter_.GetCurrentUpdateAttemptFlags());
+}
+
+void UpdateAttempterTest::ResetRollbackHappenedStart(bool is_consumer,
+                                                     bool is_policy_loaded,
+                                                     bool expected_reset) {
+  EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(),
+              GetRollbackHappened())
+      .WillRepeatedly(Return(true));
+  auto mock_policy_provider =
+      std::make_unique<NiceMock<policy::MockPolicyProvider>>();
+  EXPECT_CALL(*mock_policy_provider, IsConsumerDevice())
+      .WillRepeatedly(Return(is_consumer));
+  EXPECT_CALL(*mock_policy_provider, device_policy_is_loaded())
+      .WillRepeatedly(Return(is_policy_loaded));
+  const policy::MockDevicePolicy device_policy;
+  EXPECT_CALL(*mock_policy_provider, GetDevicePolicy())
+      .WillRepeatedly(ReturnRef(device_policy));
+  EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(),
+              SetRollbackHappened(false))
+      .Times(expected_reset ? 1 : 0);
+  attempter_.policy_provider_ = std::move(mock_policy_provider);
+  attempter_.Update({});
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, ResetRollbackHappenedOobe) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::ResetRollbackHappenedStart,
+                            base::Unretained(this),
+                            /*is_consumer=*/false,
+                            /*is_policy_loaded=*/false,
+                            /*expected_reset=*/false));
+  loop_.Run();
+}
+
+TEST_F(UpdateAttempterTest, ResetRollbackHappenedConsumer) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::ResetRollbackHappenedStart,
+                            base::Unretained(this),
+                            /*is_consumer=*/true,
+                            /*is_policy_loaded=*/false,
+                            /*expected_reset=*/true));
+  loop_.Run();
+}
+
+TEST_F(UpdateAttempterTest, ResetRollbackHappenedEnterprise) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::ResetRollbackHappenedStart,
+                            base::Unretained(this),
+                            /*is_consumer=*/false,
+                            /*is_policy_loaded=*/true,
+                            /*expected_reset=*/true));
+  loop_.Run();
+}
+
+TEST_F(UpdateAttempterTest, SetRollbackHappenedRollback) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = true;
+
+  EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(),
+              SetRollbackHappened(true))
+      .Times(1);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
+TEST_F(UpdateAttempterTest, SetRollbackHappenedNotRollback) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = false;
+
+  EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(),
+              SetRollbackHappened(true))
+      .Times(0);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
+TEST_F(UpdateAttempterTest, RollbackMetricsRollbackSuccess) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = true;
+  attempter_.install_plan_->version = kRollbackVersion;
+
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
+              ReportEnterpriseRollbackMetrics(true, kRollbackVersion))
+      .Times(1);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
+TEST_F(UpdateAttempterTest, RollbackMetricsNotRollbackSuccess) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = false;
+  attempter_.install_plan_->version = kRollbackVersion;
+
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
+              ReportEnterpriseRollbackMetrics(_, _))
+      .Times(0);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
+TEST_F(UpdateAttempterTest, RollbackMetricsRollbackFailure) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = true;
+  attempter_.install_plan_->version = kRollbackVersion;
+
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
+              ReportEnterpriseRollbackMetrics(false, kRollbackVersion))
+      .Times(1);
+  MockAction action;
+  attempter_.CreatePendingErrorEvent(&action, ErrorCode::kRollbackNotPossible);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kRollbackNotPossible);
+}
+
+TEST_F(UpdateAttempterTest, RollbackMetricsNotRollbackFailure) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = false;
+  attempter_.install_plan_->version = kRollbackVersion;
+
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
+              ReportEnterpriseRollbackMetrics(_, _))
+      .Times(0);
+  MockAction action;
+  attempter_.CreatePendingErrorEvent(&action, ErrorCode::kRollbackNotPossible);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kRollbackNotPossible);
+}
+
+TEST_F(UpdateAttempterTest, TimeToUpdateAppliedMetricFailure) {
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
+              ReportEnterpriseUpdateSeenToDownloadDays(_, _))
+      .Times(0);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kOmahaUpdateDeferredPerPolicy);
+}
+
+TEST_F(UpdateAttempterTest, TimeToUpdateAppliedOnNonEnterprise) {
+  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
+  FakeSystemState::Get()->set_device_policy(device_policy.get());
+  // Make device policy return that this is not enterprise enrolled
+  EXPECT_CALL(*device_policy, IsEnterpriseEnrolled()).WillOnce(Return(false));
+
+  // Ensure that the metric is not recorded.
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
+              ReportEnterpriseUpdateSeenToDownloadDays(_, _))
+      .Times(0);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
+TEST_F(UpdateAttempterTest,
+       TimeToUpdateAppliedWithTimeRestrictionMetricSuccess) {
+  constexpr int kDaysToUpdate = 15;
+  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
+  FakeSystemState::Get()->set_device_policy(device_policy.get());
+  // Make device policy return that this is enterprise enrolled
+  EXPECT_CALL(*device_policy, IsEnterpriseEnrolled()).WillOnce(Return(true));
+  // Pretend that there's a time restriction policy in place
+  EXPECT_CALL(*device_policy, GetDisallowedTimeIntervals(_))
+      .WillOnce(Return(true));
+
+  Time update_first_seen_at = Time::Now();
+  FakeSystemState::Get()->fake_prefs()->SetInt64(
+      kPrefsUpdateFirstSeenAt, update_first_seen_at.ToInternalValue());
+
+  Time update_finished_at =
+      update_first_seen_at + TimeDelta::FromDays(kDaysToUpdate);
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(update_finished_at);
+
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
+              ReportEnterpriseUpdateSeenToDownloadDays(true, kDaysToUpdate))
+      .Times(1);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
+TEST_F(UpdateAttempterTest,
+       TimeToUpdateAppliedWithoutTimeRestrictionMetricSuccess) {
+  constexpr int kDaysToUpdate = 15;
+  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
+  FakeSystemState::Get()->set_device_policy(device_policy.get());
+  // Make device policy return that this is enterprise enrolled
+  EXPECT_CALL(*device_policy, IsEnterpriseEnrolled()).WillOnce(Return(true));
+  // Pretend that there's no time restriction policy in place
+  EXPECT_CALL(*device_policy, GetDisallowedTimeIntervals(_))
+      .WillOnce(Return(false));
+
+  Time update_first_seen_at = Time::Now();
+  FakeSystemState::Get()->fake_prefs()->SetInt64(
+      kPrefsUpdateFirstSeenAt, update_first_seen_at.ToInternalValue());
+
+  Time update_finished_at =
+      update_first_seen_at + TimeDelta::FromDays(kDaysToUpdate);
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(update_finished_at);
+
+  EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(),
+              ReportEnterpriseUpdateSeenToDownloadDays(false, kDaysToUpdate))
+      .Times(1);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
+TEST_F(UpdateAttempterTest, ProcessingDoneUpdated) {
+  // GIVEN an update finished.
+
+  // THEN update_engine should call update completion.
+  pd_params_.should_update_completed_be_called = true;
+  // THEN need reboot since update applied.
+  pd_params_.expected_exit_status = UpdateStatus::UPDATED_NEED_REBOOT;
+  // THEN install indication should be false.
+
+  TestProcessingDone();
+}
+
+TEST_F(UpdateAttempterTest, ProcessingDoneUpdatedDlcFilter) {
+  // GIVEN an update finished.
+  // GIVEN DLC |AppParams| list.
+  auto dlc_1 = "dlc_1", dlc_2 = "dlc_2";
+  pd_params_.dlc_apps_params = {{dlc_1, {.name = dlc_1, .updated = false}},
+                                {dlc_2, {.name = dlc_2}}};
+
+  // THEN update_engine should call update completion.
+  pd_params_.should_update_completed_be_called = true;
+  pd_params_.args_to_update_completed = {dlc_2};
+  // THEN need reboot since update applied.
+  pd_params_.expected_exit_status = UpdateStatus::UPDATED_NEED_REBOOT;
+  // THEN install indication should be false.
+
+  TestProcessingDone();
+}
+
+TEST_F(UpdateAttempterTest, ProcessingDoneInstalled) {
+  // GIVEN an install finished.
+  pd_params_.is_install = true;
+
+  // THEN update_engine should call install completion.
+  pd_params_.should_install_completed_be_called = true;
+  // THEN go idle.
+  // THEN install indication should be false.
+
+  TestProcessingDone();
+}
+
+TEST_F(UpdateAttempterTest, ProcessingDoneInstalledDlcFilter) {
+  // GIVEN an install finished.
+  pd_params_.is_install = true;
+  // GIVEN DLC |AppParams| list.
+  auto dlc_1 = "dlc_1", dlc_2 = "dlc_2";
+  pd_params_.dlc_apps_params = {{dlc_1, {.name = dlc_1, .updated = false}},
+                                {dlc_2, {.name = dlc_2}}};
+
+  // THEN update_engine should call install completion.
+  pd_params_.should_install_completed_be_called = true;
+  pd_params_.args_to_install_completed = {dlc_2};
+  // THEN go idle.
+  // THEN install indication should be false.
+
+  TestProcessingDone();
+}
+
+TEST_F(UpdateAttempterTest, ProcessingDoneInstallReportingError) {
+  // GIVEN an install finished.
+  pd_params_.is_install = true;
+  // GIVEN a reporting error occurred.
+  pd_params_.status = UpdateStatus::REPORTING_ERROR_EVENT;
+
+  // THEN update_engine should not call install completion.
+  // THEN go idle.
+  // THEN install indication should be false.
+
+  TestProcessingDone();
+}
+
+TEST_F(UpdateAttempterTest, ProcessingDoneNoUpdate) {
+  // GIVEN an update finished.
+  // GIVEN an action error occured.
+  pd_params_.code = ErrorCode::kNoUpdate;
+
+  // THEN update_engine should not call update completion.
+  // THEN go idle.
+  // THEN install indication should be false.
+
+  TestProcessingDone();
+}
+
+TEST_F(UpdateAttempterTest, ProcessingDoneNoInstall) {
+  // GIVEN an install finished.
+  pd_params_.is_install = true;
+  // GIVEN an action error occured.
+  pd_params_.code = ErrorCode::kNoUpdate;
+
+  // THEN update_engine should not call install completion.
+  // THEN go idle.
+  // THEN install indication should be false.
+
+  TestProcessingDone();
+}
+
+TEST_F(UpdateAttempterTest, ProcessingDoneUpdateError) {
+  // GIVEN an update finished.
+  // GIVEN an action error occured.
+  pd_params_.code = ErrorCode::kError;
+  // GIVEN an event error is set.
+  attempter_.error_event_.reset(new OmahaEvent(OmahaEvent::kTypeUpdateComplete,
+                                               OmahaEvent::kResultError,
+                                               ErrorCode::kError));
+
+  // THEN indicate a error event.
+  pd_params_.expected_exit_status = UpdateStatus::REPORTING_ERROR_EVENT;
+  // THEN install indication should be false.
+
+  // THEN update_engine should not call update completion.
+  // THEN expect critical actions of |ScheduleErrorEventAction()|.
+  EXPECT_CALL(*processor_, EnqueueAction(Pointee(_))).Times(1);
+  EXPECT_CALL(*processor_, StartProcessing()).Times(1);
+  // THEN |ScheduleUpdates()| will be called next |ProcessingDone()| so skip.
+  pd_params_.should_schedule_updates_be_called = false;
+
+  TestProcessingDone();
+}
+
+TEST_F(UpdateAttempterTest, ProcessingDoneInstallError) {
+  // GIVEN an install finished.
+  pd_params_.is_install = true;
+  // GIVEN an action error occured.
+  pd_params_.code = ErrorCode::kError;
+  // GIVEN an event error is set.
+  attempter_.error_event_.reset(new OmahaEvent(OmahaEvent::kTypeUpdateComplete,
+                                               OmahaEvent::kResultError,
+                                               ErrorCode::kError));
+
+  // THEN indicate a error event.
+  pd_params_.expected_exit_status = UpdateStatus::REPORTING_ERROR_EVENT;
+  // THEN install indication should be false.
+
+  // THEN update_engine should not call install completion.
+  // THEN expect critical actions of |ScheduleErrorEventAction()|.
+  EXPECT_CALL(*processor_, EnqueueAction(Pointee(_))).Times(1);
+  EXPECT_CALL(*processor_, StartProcessing()).Times(1);
+  // THEN |ScheduleUpdates()| will be called next |ProcessingDone()| so skip.
+  pd_params_.should_schedule_updates_be_called = false;
+
+  TestProcessingDone();
+}
+
+TEST_F(UpdateAttempterTest, QuickFixTokenWhenDeviceIsEnterpriseEnrolled) {
+  attempter_.CalculateUpdateParams({.quick_fix_build_token = "token"});
+  EXPECT_EQ("token",
+            FakeSystemState::Get()->request_params()->autoupdate_token());
+
+  attempter_.CalculateUpdateParams({});
+  EXPECT_TRUE(
+      FakeSystemState::Get()->request_params()->autoupdate_token().empty());
+}
+
+TEST_F(UpdateAttempterTest, ScheduleUpdateSpamHandlerTest) {
+  EXPECT_CALL(mock_update_manager_, AsyncPolicyRequestUpdateCheckAllowed(_, _))
+      .Times(1);
+  EXPECT_TRUE(attempter_.ScheduleUpdates());
+  // Now there is an update scheduled which means that all subsequent
+  // |ScheduleUpdates()| should fail.
+  EXPECT_FALSE(attempter_.ScheduleUpdates());
+  EXPECT_FALSE(attempter_.ScheduleUpdates());
+  EXPECT_FALSE(attempter_.ScheduleUpdates());
+}
+
+// Critical tests to always make sure that an update is scheduled. The following
+// unittest(s) try and cover the correctness in synergy between
+// |UpdateAttempter| and |UpdateManager|. Also it is good to remember the
+// actions that happen in the flow when |UpdateAttempter| get callbacked on
+// |OnUpdateScheduled()| -> (various cases which leads to) -> |ProcessingDone()|
+void UpdateAttempterTest::TestOnUpdateScheduled() {
+  // Setup
+  attempter_.SetWaitingForScheduledCheck(true);
+  attempter_.DisableUpdate();
+  attempter_.DisableScheduleUpdates();
+
+  // Invocation
+  attempter_.OnUpdateScheduled(ous_params_.status, ous_params_.params);
+
+  // Verify
+  EXPECT_EQ(ous_params_.exit_status, attempter_.status());
+  EXPECT_EQ(ous_params_.should_schedule_updates_be_called,
+            attempter_.WasScheduleUpdatesCalled());
+  EXPECT_EQ(ous_params_.should_update_be_called, attempter_.WasUpdateCalled());
+}
+
+TEST_F(UpdateAttempterTest, OnUpdatesScheduledFailed) {
+  // GIVEN failed status.
+
+  // THEN update should be scheduled.
+  ous_params_.should_schedule_updates_be_called = true;
+
+  TestOnUpdateScheduled();
+}
+
+TEST_F(UpdateAttempterTest, OnUpdatesScheduledAskMeAgainLater) {
+  // GIVEN ask me again later status.
+  ous_params_.status = EvalStatus::kAskMeAgainLater;
+
+  // THEN update should be scheduled.
+  ous_params_.should_schedule_updates_be_called = true;
+
+  TestOnUpdateScheduled();
+}
+
+TEST_F(UpdateAttempterTest, OnUpdatesScheduledContinue) {
+  // GIVEN continue status.
+  ous_params_.status = EvalStatus::kContinue;
+
+  // THEN update should be scheduled.
+  ous_params_.should_schedule_updates_be_called = true;
+
+  TestOnUpdateScheduled();
+}
+
+TEST_F(UpdateAttempterTest, OnUpdatesScheduledSucceededButUpdateDisabledFails) {
+  // GIVEN updates disabled.
+  ous_params_.params = {.updates_enabled = false};
+  // GIVEN succeeded status.
+  ous_params_.status = EvalStatus::kSucceeded;
+
+  // THEN update should not be scheduled.
+
+  TestOnUpdateScheduled();
+}
+
+TEST_F(UpdateAttempterTest, OnUpdatesScheduledSucceeded) {
+  // GIVEN updates enabled.
+  ous_params_.params = {.updates_enabled = true};
+  // GIVEN succeeded status.
+  ous_params_.status = EvalStatus::kSucceeded;
+
+  // THEN update should be called indicating status change.
+  ous_params_.exit_status = UpdateStatus::CHECKING_FOR_UPDATE;
+  ous_params_.should_update_be_called = true;
+
+  TestOnUpdateScheduled();
+}
+
+TEST_F(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusDefault) {
+  UpdateEngineStatus status;
+  attempter_.GetStatus(&status);
+  EXPECT_FALSE(status.is_enterprise_rollback);
+}
+
+TEST_F(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusFalse) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = false;
+
+  UpdateEngineStatus status;
+  attempter_.GetStatus(&status);
+  EXPECT_FALSE(status.is_enterprise_rollback);
+}
+
+TEST_F(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusTrue) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = true;
+
+  UpdateEngineStatus status;
+  attempter_.GetStatus(&status);
+  EXPECT_TRUE(status.is_enterprise_rollback);
+}
+
+TEST_F(UpdateAttempterTest, PowerwashInGetStatusDefault) {
+  UpdateEngineStatus status;
+  attempter_.GetStatus(&status);
+  EXPECT_FALSE(status.will_powerwash_after_reboot);
+}
+
+TEST_F(UpdateAttempterTest, PowerwashInGetStatusTrueBecausePowerwashRequired) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->powerwash_required = true;
+
+  UpdateEngineStatus status;
+  attempter_.GetStatus(&status);
+  EXPECT_TRUE(status.will_powerwash_after_reboot);
+}
+
+TEST_F(UpdateAttempterTest, PowerwashInGetStatusTrueBecauseRollback) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = true;
+
+  UpdateEngineStatus status;
+  attempter_.GetStatus(&status);
+  EXPECT_TRUE(status.will_powerwash_after_reboot);
+}
+
+TEST_F(UpdateAttempterTest, FutureEolTest) {
+  EolDate eol_date = std::numeric_limits<int64_t>::max();
+  EXPECT_TRUE(prefs_->SetString(kPrefsOmahaEolDate, EolDateToString(eol_date)));
+  UpdateEngineStatus status;
+  attempter_.GetStatus(&status);
+  EXPECT_EQ(eol_date, status.eol_date);
+}
+
+TEST_F(UpdateAttempterTest, PastEolTest) {
+  EolDate eol_date = 1;
+  EXPECT_TRUE(prefs_->SetString(kPrefsOmahaEolDate, EolDateToString(eol_date)));
+  UpdateEngineStatus status;
+  attempter_.GetStatus(&status);
+  EXPECT_EQ(eol_date, status.eol_date);
+}
+
+TEST_F(UpdateAttempterTest, MissingEolTest) {
+  UpdateEngineStatus status;
+  attempter_.GetStatus(&status);
+  EXPECT_EQ(kEolDateInvalid, status.eol_date);
+}
+
+TEST_F(UpdateAttempterTest, CalculateDlcParamsInstallTest) {
+  string dlc_id = "dlc0";
+  attempter_.is_install_ = true;
+  attempter_.dlc_ids_ = {dlc_id};
+  attempter_.CalculateDlcParams();
+
+  OmahaRequestParams* params = FakeSystemState::Get()->request_params();
+  EXPECT_EQ(1, params->dlc_apps_params().count(params->GetDlcAppId(dlc_id)));
+  OmahaRequestParams::AppParams dlc_app_params =
+      params->dlc_apps_params().at(params->GetDlcAppId(dlc_id));
+  EXPECT_STREQ(dlc_id.c_str(), dlc_app_params.name.c_str());
+  EXPECT_EQ(false, dlc_app_params.send_ping);
+  // When the DLC gets installed, a ping is not sent, therefore we don't store
+  // the values sent by Omaha.
+  auto last_active_key = PrefsInterface::CreateSubKey(
+      {kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive});
+  EXPECT_FALSE(FakeSystemState::Get()->prefs()->Exists(last_active_key));
+  auto last_rollcall_key = PrefsInterface::CreateSubKey(
+      {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall});
+  EXPECT_FALSE(FakeSystemState::Get()->prefs()->Exists(last_rollcall_key));
+}
+
+TEST_F(UpdateAttempterTest, CalculateDlcParamsNoPrefFilesTest) {
+  string dlc_id = "dlc0";
+  EXPECT_CALL(mock_dlcservice_, GetDlcsToUpdate(_))
+      .WillOnce(
+          DoAll(SetArgPointee<0>(std::vector<string>({dlc_id})), Return(true)));
+
+  attempter_.is_install_ = false;
+  attempter_.CalculateDlcParams();
+
+  OmahaRequestParams* params = FakeSystemState::Get()->request_params();
+  EXPECT_EQ(1, params->dlc_apps_params().count(params->GetDlcAppId(dlc_id)));
+  OmahaRequestParams::AppParams dlc_app_params =
+      params->dlc_apps_params().at(params->GetDlcAppId(dlc_id));
+  EXPECT_STREQ(dlc_id.c_str(), dlc_app_params.name.c_str());
+
+  EXPECT_EQ(true, dlc_app_params.send_ping);
+  EXPECT_EQ(0, dlc_app_params.ping_active);
+  EXPECT_EQ(-1, dlc_app_params.ping_date_last_active);
+  EXPECT_EQ(-1, dlc_app_params.ping_date_last_rollcall);
+}
+
+TEST_F(UpdateAttempterTest, CalculateDlcParamsNonParseableValuesTest) {
+  string dlc_id = "dlc0";
+  MemoryPrefs prefs;
+  FakeSystemState::Get()->set_prefs(&prefs);
+  EXPECT_CALL(mock_dlcservice_, GetDlcsToUpdate(_))
+      .WillOnce(
+          DoAll(SetArgPointee<0>(std::vector<string>({dlc_id})), Return(true)));
+
+  // Write non numeric values in the metadata files.
+  auto active_key =
+      PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive});
+  auto last_active_key = PrefsInterface::CreateSubKey(
+      {kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive});
+  auto last_rollcall_key = PrefsInterface::CreateSubKey(
+      {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall});
+  FakeSystemState::Get()->prefs()->SetString(active_key, "z2yz");
+  FakeSystemState::Get()->prefs()->SetString(last_active_key, "z2yz");
+  FakeSystemState::Get()->prefs()->SetString(last_rollcall_key, "z2yz");
+  attempter_.is_install_ = false;
+  attempter_.CalculateDlcParams();
+
+  OmahaRequestParams* params = FakeSystemState::Get()->request_params();
+  EXPECT_EQ(1, params->dlc_apps_params().count(params->GetDlcAppId(dlc_id)));
+  OmahaRequestParams::AppParams dlc_app_params =
+      params->dlc_apps_params().at(params->GetDlcAppId(dlc_id));
+  EXPECT_STREQ(dlc_id.c_str(), dlc_app_params.name.c_str());
+
+  EXPECT_EQ(true, dlc_app_params.send_ping);
+  EXPECT_EQ(0, dlc_app_params.ping_active);
+  EXPECT_EQ(-2, dlc_app_params.ping_date_last_active);
+  EXPECT_EQ(-2, dlc_app_params.ping_date_last_rollcall);
+}
+
+TEST_F(UpdateAttempterTest, CalculateDlcParamsValidValuesTest) {
+  string dlc_id = "dlc0";
+  EXPECT_CALL(mock_dlcservice_, GetDlcsToUpdate(_))
+      .WillOnce(
+          DoAll(SetArgPointee<0>(std::vector<string>({dlc_id})), Return(true)));
+
+  // Write numeric values in the metadata files.
+  auto active_key =
+      PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive});
+  auto last_active_key = PrefsInterface::CreateSubKey(
+      {kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive});
+  auto last_rollcall_key = PrefsInterface::CreateSubKey(
+      {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall});
+
+  FakeSystemState::Get()->prefs()->SetInt64(active_key, 1);
+  FakeSystemState::Get()->prefs()->SetInt64(last_active_key, 78);
+  FakeSystemState::Get()->prefs()->SetInt64(last_rollcall_key, 99);
+  attempter_.is_install_ = false;
+  attempter_.CalculateDlcParams();
+
+  OmahaRequestParams* params = FakeSystemState::Get()->request_params();
+  EXPECT_EQ(1, params->dlc_apps_params().count(params->GetDlcAppId(dlc_id)));
+  OmahaRequestParams::AppParams dlc_app_params =
+      params->dlc_apps_params().at(params->GetDlcAppId(dlc_id));
+  EXPECT_STREQ(dlc_id.c_str(), dlc_app_params.name.c_str());
+
+  EXPECT_EQ(true, dlc_app_params.send_ping);
+  EXPECT_EQ(1, dlc_app_params.ping_active);
+  EXPECT_EQ(78, dlc_app_params.ping_date_last_active);
+  EXPECT_EQ(99, dlc_app_params.ping_date_last_rollcall);
+}
+
+TEST_F(UpdateAttempterTest, CalculateDlcParamsRemoveStaleMetadata) {
+  string dlc_id = "dlc0";
+  auto active_key =
+      PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive});
+  auto last_active_key = PrefsInterface::CreateSubKey(
+      {kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive});
+  auto last_rollcall_key = PrefsInterface::CreateSubKey(
+      {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall});
+  FakeSystemState::Get()->prefs()->SetInt64(active_key, kPingInactiveValue);
+  FakeSystemState::Get()->prefs()->SetInt64(last_active_key, 0);
+  FakeSystemState::Get()->prefs()->SetInt64(last_rollcall_key, 0);
+  EXPECT_TRUE(FakeSystemState::Get()->prefs()->Exists(active_key));
+  EXPECT_TRUE(FakeSystemState::Get()->prefs()->Exists(last_active_key));
+  EXPECT_TRUE(FakeSystemState::Get()->prefs()->Exists(last_rollcall_key));
+
+  attempter_.dlc_ids_ = {dlc_id};
+  attempter_.is_install_ = true;
+  attempter_.CalculateDlcParams();
+
+  EXPECT_FALSE(FakeSystemState::Get()->prefs()->Exists(last_active_key));
+  EXPECT_FALSE(FakeSystemState::Get()->prefs()->Exists(last_rollcall_key));
+  // Active key is set on install.
+  EXPECT_TRUE(FakeSystemState::Get()->prefs()->Exists(active_key));
+  int64_t temp_int;
+  EXPECT_TRUE(FakeSystemState::Get()->prefs()->GetInt64(active_key, &temp_int));
+  EXPECT_EQ(temp_int, kPingActiveValue);
+}
+
+TEST_F(UpdateAttempterTest, SetDlcActiveValue) {
+  string dlc_id = "dlc0";
+  attempter_.SetDlcActiveValue(true, dlc_id);
+  int64_t temp_int;
+  auto active_key =
+      PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive});
+  EXPECT_TRUE(FakeSystemState::Get()->prefs()->Exists(active_key));
+  EXPECT_TRUE(FakeSystemState::Get()->prefs()->GetInt64(active_key, &temp_int));
+  EXPECT_EQ(temp_int, kPingActiveValue);
+}
+
+TEST_F(UpdateAttempterTest, SetDlcInactive) {
+  string dlc_id = "dlc0";
+  auto sub_keys = {
+      kPrefsPingActive, kPrefsPingLastActive, kPrefsPingLastRollcall};
+  for (auto& sub_key : sub_keys) {
+    auto key = PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, sub_key});
+    FakeSystemState::Get()->prefs()->SetInt64(key, 1);
+    EXPECT_TRUE(FakeSystemState::Get()->prefs()->Exists(key));
+  }
+  attempter_.SetDlcActiveValue(false, dlc_id);
+  for (auto& sub_key : sub_keys) {
+    auto key = PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, sub_key});
+    EXPECT_FALSE(FakeSystemState::Get()->prefs()->Exists(key));
+  }
+}
+
+TEST_F(UpdateAttempterTest, GetSuccessfulDlcIds) {
+  auto dlc_1 = "1", dlc_2 = "2", dlc_3 = "3";
+  attempter_.omaha_request_params_->set_dlc_apps_params(
+      {{dlc_1, {.name = dlc_1, .updated = false}},
+       {dlc_2, {.name = dlc_2}},
+       {dlc_3, {.name = dlc_3, .updated = false}}});
+  EXPECT_THAT(attempter_.GetSuccessfulDlcIds(), ElementsAre(dlc_2));
+}
+
+TEST_F(UpdateAttempterTest, MoveToPrefs) {
+  string key1 = kPrefsLastActivePingDay;
+  string key2 = kPrefsPingLastRollcall;
+
+  FakePrefs fake_prefs;
+  EXPECT_TRUE(fake_prefs.SetString(key2, "current-rollcall"));
+  FakeSystemState::Get()->set_prefs(&fake_prefs);
+
+  FakePrefs powerwash_safe_prefs;
+  EXPECT_TRUE(powerwash_safe_prefs.SetString(key1, "powerwash-last-active"));
+  EXPECT_TRUE(powerwash_safe_prefs.SetString(key2, "powerwash-last-rollcall"));
+  FakeSystemState::Get()->set_powerwash_safe_prefs(&powerwash_safe_prefs);
+
+  attempter_.Init();
+  attempter_.MoveToPrefs({key1, key2});
+
+  string pref_value_1;
+  fake_prefs.GetString(key1, &pref_value_1);
+  EXPECT_EQ(pref_value_1, "powerwash-last-active");
+  // Do not overwrite if value already exists.
+  string pref_value_2;
+  fake_prefs.GetString(key2, &pref_value_2);
+  EXPECT_EQ(pref_value_2, "current-rollcall");
+
+  // Make sure keys are deleted from powerwash safe prefs regardless of whether
+  // they are written to prefs.
+  EXPECT_FALSE(FakeSystemState::Get()->powerwash_safe_prefs()->Exists(key1));
+  EXPECT_FALSE(FakeSystemState::Get()->powerwash_safe_prefs()->Exists(key2));
+}
+
+}  // namespace chromeos_update_engine
diff --git a/update_engine_client.cc b/cros/update_engine_client.cc
similarity index 79%
rename from update_engine_client.cc
rename to cros/update_engine_client.cc
index 7446041..6f20f11 100644
--- a/update_engine_client.cc
+++ b/cros/update_engine_client.cc
@@ -26,26 +26,33 @@
 #include <base/command_line.h>
 #include <base/logging.h>
 #include <base/macros.h>
+#include <base/strings/string_number_conversions.h>
 #include <base/strings/string_split.h>
 #include <base/threading/platform_thread.h>
+#include <base/threading/thread_task_runner_handle.h>
 #include <brillo/daemons/daemon.h>
 #include <brillo/flag_helper.h>
+#include <brillo/key_value_store.h>
 
 #include "update_engine/client.h"
 #include "update_engine/common/error_code.h"
 #include "update_engine/common/error_code_utils.h"
-#include "update_engine/omaha_utils.h"
+#include "update_engine/cros/omaha_utils.h"
 #include "update_engine/status_update_handler.h"
 #include "update_engine/update_status.h"
 #include "update_engine/update_status_utils.h"
 
-using chromeos_update_engine::EolStatus;
+using brillo::KeyValueStore;
+using chromeos_update_engine::EolDate;
+using chromeos_update_engine::EolDateToString;
 using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::UpdateEngineStatusToString;
 using chromeos_update_engine::UpdateStatusToString;
 using chromeos_update_engine::utils::ErrorCodeToString;
 using std::string;
 using std::unique_ptr;
 using std::vector;
+using update_engine::UpdateEngineStatus;
 using update_engine::UpdateStatus;
 
 namespace {
@@ -80,7 +87,7 @@
 
     // We can't call QuitWithExitCode from OnInit(), so we delay the execution
     // of the ProcessFlags method after the Daemon initialization is done.
-    base::MessageLoop::current()->task_runner()->PostTask(
+    base::ThreadTaskRunnerHandle::Get()->PostTask(
         FROM_HERE,
         base::Bind(&UpdateEngineClient::ProcessFlagsAndExit,
                    base::Unretained(this)));
@@ -132,80 +139,47 @@
  public:
   ~WatchingStatusUpdateHandler() override = default;
 
-  void HandleStatusUpdate(int64_t last_checked_time,
-                          double progress,
-                          UpdateStatus current_operation,
-                          const string& new_version,
-                          int64_t new_size) override;
+  void HandleStatusUpdate(const UpdateEngineStatus& status) override;
 };
 
 void WatchingStatusUpdateHandler::HandleStatusUpdate(
-    int64_t last_checked_time,
-    double progress,
-    UpdateStatus current_operation,
-    const string& new_version,
-    int64_t new_size) {
-  LOG(INFO) << "Got status update:";
-  LOG(INFO) << "  last_checked_time: " << last_checked_time;
-  LOG(INFO) << "  progress: " << progress;
-  LOG(INFO) << "  current_operation: "
-            << UpdateStatusToString(current_operation);
-  LOG(INFO) << "  new_version: " << new_version;
-  LOG(INFO) << "  new_size: " << new_size;
+    const UpdateEngineStatus& status) {
+  LOG(INFO) << "Got status update: " << UpdateEngineStatusToString(status);
 }
 
 bool UpdateEngineClient::ShowStatus() {
-  int64_t last_checked_time = 0;
-  double progress = 0.0;
-  UpdateStatus current_op;
-  string new_version;
-  int64_t new_size = 0;
-
+  UpdateEngineStatus status;
   int retry_count = kShowStatusRetryCount;
   while (retry_count > 0) {
-    if (client_->GetStatus(&last_checked_time,
-                           &progress,
-                           &current_op,
-                           &new_version,
-                           &new_size)) {
+    if (client_->GetStatus(&status)) {
       break;
     }
     if (--retry_count == 0) {
       return false;
     }
-    LOG(WARNING) << "Will try " << retry_count << " more times!";
+    LOG(WARNING)
+        << "Failed to get the update_engine status. This can happen when the"
+           " update_engine is busy doing a heavy operation or if the"
+           " update-engine service is down. If it doesn't resolve, a restart of"
+           " the update-engine service is needed."
+           " Will try "
+        << retry_count << " more times!";
     base::PlatformThread::Sleep(
         base::TimeDelta::FromSeconds(kShowStatusRetryIntervalInSeconds));
   }
 
-  printf("LAST_CHECKED_TIME=%" PRIi64
-         "\nPROGRESS=%f\nCURRENT_OP=%s\n"
-         "NEW_VERSION=%s\nNEW_SIZE=%" PRIi64 "\n",
-         last_checked_time,
-         progress,
-         UpdateStatusToString(current_op),
-         new_version.c_str(),
-         new_size);
+  printf("%s", UpdateEngineStatusToString(status).c_str());
 
   return true;
 }
 
 int UpdateEngineClient::GetNeedReboot() {
-  int64_t last_checked_time = 0;
-  double progress = 0.0;
-  UpdateStatus current_op;
-  string new_version;
-  int64_t new_size = 0;
-
-  if (!client_->GetStatus(&last_checked_time,
-                          &progress,
-                          &current_op,
-                          &new_version,
-                          &new_size)) {
+  UpdateEngineStatus status;
+  if (!client_->GetStatus(&status)) {
     return 1;
   }
 
-  if (current_op == UpdateStatus::UPDATED_NEED_REBOOT) {
+  if (status.status == UpdateStatus::UPDATED_NEED_REBOOT) {
     return 0;
   }
 
@@ -220,35 +194,26 @@
 
   ~UpdateWaitHandler() override = default;
 
-  void HandleStatusUpdate(int64_t last_checked_time,
-                          double progress,
-                          UpdateStatus current_operation,
-                          const string& new_version,
-                          int64_t new_size) override;
+  void HandleStatusUpdate(const UpdateEngineStatus& status) override;
 
  private:
   bool exit_on_error_;
   update_engine::UpdateEngineClient* client_;
 };
 
-void UpdateWaitHandler::HandleStatusUpdate(int64_t /* last_checked_time */,
-                                           double /* progress */,
-                                           UpdateStatus current_operation,
-                                           const string& /* new_version */,
-                                           int64_t /* new_size */) {
-  if (exit_on_error_ && current_operation == UpdateStatus::IDLE) {
-    int last_attempt_error;
+void UpdateWaitHandler::HandleStatusUpdate(const UpdateEngineStatus& status) {
+  if (exit_on_error_ && status.status == UpdateStatus::IDLE) {
+    int last_attempt_error = static_cast<int>(ErrorCode::kSuccess);
     ErrorCode code = ErrorCode::kSuccess;
     if (client_ && client_->GetLastAttemptError(&last_attempt_error))
       code = static_cast<ErrorCode>(last_attempt_error);
 
     LOG(ERROR) << "Update failed, current operation is "
-               << UpdateStatusToString(current_operation)
-               << ", last error code is " << ErrorCodeToString(code) << "("
-               << last_attempt_error << ")";
+               << UpdateStatusToString(status.status) << ", last error code is "
+               << ErrorCodeToString(code) << "(" << last_attempt_error << ")";
     exit(1);
   }
-  if (current_operation == UpdateStatus::UPDATED_NEED_REBOOT) {
+  if (status.status == UpdateStatus::UPDATED_NEED_REBOOT) {
     LOG(INFO) << "Update succeeded -- reboot needed.";
     exit(0);
   }
@@ -321,8 +286,6 @@
               "Show the previous OS version used before the update reboot.");
   DEFINE_bool(last_attempt_error, false, "Show the last attempt error.");
   DEFINE_bool(eol_status, false, "Show the current end-of-life status.");
-  DEFINE_bool(install, false, "Requests an install.");
-  DEFINE_string(dlc_module_ids, "", "colon-separated list of DLC IDs.");
 
   // Boilerplate init commands.
   base::CommandLine::Init(argc_, argv_);
@@ -507,30 +470,6 @@
     }
   }
 
-  if (FLAGS_install) {
-    // Parse DLC module IDs.
-    vector<string> dlc_module_ids;
-    if (!FLAGS_dlc_module_ids.empty()) {
-      dlc_module_ids = base::SplitString(FLAGS_dlc_module_ids,
-                                         ":",
-                                         base::TRIM_WHITESPACE,
-                                         base::SPLIT_WANT_ALL);
-    }
-    if (dlc_module_ids.empty()) {
-      LOG(ERROR) << "dlc_module_ids is empty:" << FLAGS_dlc_module_ids;
-      return 1;
-    }
-    if (!client_->AttemptInstall(FLAGS_omaha_url, dlc_module_ids)) {
-      LOG(ERROR) << "AttemptInstall failed.";
-      return 1;
-    }
-    return 0;
-  } else if (!FLAGS_dlc_module_ids.empty()) {
-    LOG(ERROR) << "dlc_module_ids is not empty while install is not set:"
-               << FLAGS_dlc_module_ids;
-    return 1;
-  }
-
   // Initiate an update check, if necessary.
   if (do_update_request) {
     LOG_IF(WARNING, FLAGS_reboot) << "-reboot flag ignored.";
@@ -539,7 +478,7 @@
       app_version = "ForcedUpdate";
       LOG(INFO) << "Forcing an update by setting app_version to ForcedUpdate.";
     }
-    LOG(INFO) << "Initiating update check and install.";
+    LOG(INFO) << "Initiating update check.";
     if (!client_->AttemptUpdate(
             app_version, FLAGS_omaha_url, FLAGS_interactive)) {
       LOG(ERROR) << "Error checking for update.";
@@ -622,21 +561,26 @@
       LOG(ERROR) << "Error getting last attempt error.";
     } else {
       ErrorCode code = static_cast<ErrorCode>(last_attempt_error);
-      printf(
-          "ERROR_CODE=%i\n"
-          "ERROR_MESSAGE=%s\n",
-          last_attempt_error,
-          ErrorCodeToString(code).c_str());
+
+      KeyValueStore last_attempt_error_store;
+      last_attempt_error_store.SetString(
+          "ERROR_CODE", base::NumberToString(last_attempt_error));
+      last_attempt_error_store.SetString("ERROR_MESSAGE",
+                                         ErrorCodeToString(code));
+      printf("%s", last_attempt_error_store.SaveToString().c_str());
     }
   }
 
   if (FLAGS_eol_status) {
-    int eol_status;
-    if (!client_->GetEolStatus(&eol_status)) {
-      LOG(ERROR) << "Error getting the end-of-life status.";
+    UpdateEngineStatus status;
+    if (!client_->GetStatus(&status)) {
+      LOG(ERROR) << "Error GetStatus() for getting EOL info.";
     } else {
-      EolStatus eol_status_code = static_cast<EolStatus>(eol_status);
-      printf("EOL_STATUS=%s\n", EolStatusToString(eol_status_code));
+      EolDate eol_date_code = status.eol_date;
+
+      KeyValueStore eol_status_store;
+      eol_status_store.SetString("EOL_DATE", EolDateToString(eol_date_code));
+      printf("%s", eol_status_store.SaveToString().c_str());
     }
   }
 
diff --git a/daemon.cc b/daemon.cc
deleted file mode 100644
index d42344a..0000000
--- a/daemon.cc
+++ /dev/null
@@ -1,117 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/daemon.h"
-
-#include <sysexits.h>
-
-#include <base/bind.h>
-#include <base/location.h>
-#if USE_BINDER
-#include <binderwrapper/binder_wrapper.h>
-#endif  // USE_BINDER
-
-#if USE_OMAHA
-#include "update_engine/real_system_state.h"
-#else  // !USE_OMAHA
-#include "update_engine/daemon_state_android.h"
-#endif  // USE_OMAHA
-
-namespace chromeos_update_engine {
-
-int UpdateEngineDaemon::OnInit() {
-  // Register the |subprocess_| singleton with this Daemon as the signal
-  // handler.
-  subprocess_.Init(this);
-
-  int exit_code = Daemon::OnInit();
-  if (exit_code != EX_OK)
-    return exit_code;
-
-#if USE_BINDER
-  android::BinderWrapper::Create();
-  binder_watcher_.Init();
-#endif  // USE_BINDER
-
-#if USE_OMAHA
-  // Initialize update engine global state but continue if something fails.
-  // TODO(deymo): Move the daemon_state_ initialization to a factory method
-  // avoiding the explicit re-usage of the |bus| instance, shared between
-  // D-Bus service and D-Bus client calls.
-  RealSystemState* real_system_state = new RealSystemState();
-  daemon_state_.reset(real_system_state);
-  LOG_IF(ERROR, !real_system_state->Initialize())
-      << "Failed to initialize system state.";
-#else  // !USE_OMAHA
-  DaemonStateAndroid* daemon_state_android = new DaemonStateAndroid();
-  daemon_state_.reset(daemon_state_android);
-  LOG_IF(ERROR, !daemon_state_android->Initialize())
-      << "Failed to initialize system state.";
-#endif  // USE_OMAHA
-
-#if USE_BINDER
-  // Create the Binder Service.
-#if USE_OMAHA
-  binder_service_ = new BinderUpdateEngineBrilloService{real_system_state};
-#else   // !USE_OMAHA
-  binder_service_ = new BinderUpdateEngineAndroidService{
-      daemon_state_android->service_delegate()};
-#endif  // USE_OMAHA
-  auto binder_wrapper = android::BinderWrapper::Get();
-  if (!binder_wrapper->RegisterService(binder_service_->ServiceName(),
-                                       binder_service_)) {
-    LOG(ERROR) << "Failed to register binder service.";
-  }
-
-  daemon_state_->AddObserver(binder_service_.get());
-#endif  // USE_BINDER
-
-#if USE_DBUS
-  // Create the DBus service.
-  dbus_adaptor_.reset(new UpdateEngineAdaptor(real_system_state));
-  daemon_state_->AddObserver(dbus_adaptor_.get());
-
-  dbus_adaptor_->RegisterAsync(base::Bind(&UpdateEngineDaemon::OnDBusRegistered,
-                                          base::Unretained(this)));
-  LOG(INFO) << "Waiting for DBus object to be registered.";
-#else   // !USE_DBUS
-  daemon_state_->StartUpdater();
-#endif  // USE_DBUS
-  return EX_OK;
-}
-
-#if USE_DBUS
-void UpdateEngineDaemon::OnDBusRegistered(bool succeeded) {
-  if (!succeeded) {
-    LOG(ERROR) << "Registering the UpdateEngineAdaptor";
-    QuitWithExitCode(1);
-    return;
-  }
-
-  // Take ownership of the service now that everything is initialized. We need
-  // to this now and not before to avoid exposing a well known DBus service
-  // path that doesn't have the service it is supposed to implement.
-  if (!dbus_adaptor_->RequestOwnership()) {
-    LOG(ERROR) << "Unable to take ownership of the DBus service, is there "
-               << "other update_engine daemon running?";
-    QuitWithExitCode(1);
-    return;
-  }
-  daemon_state_->StartUpdater();
-}
-#endif  // USE_DBUS
-
-}  // namespace chromeos_update_engine
diff --git a/daemon.h b/daemon.h
deleted file mode 100644
index c10bb28..0000000
--- a/daemon.h
+++ /dev/null
@@ -1,87 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_DAEMON_H_
-#define UPDATE_ENGINE_DAEMON_H_
-
-#include <memory>
-#include <string>
-
-#if USE_BINDER
-#include <brillo/binder_watcher.h>
-#endif  // USE_BINDER
-#include <brillo/daemons/daemon.h>
-
-#if USE_BINDER
-#if USE_OMAHA
-#include "update_engine/binder_service_brillo.h"
-#else  // !USE_OMAHA
-#include "update_engine/binder_service_android.h"
-#endif  // USE_OMAHA
-#endif  // USE_BINDER
-#include "update_engine/common/subprocess.h"
-#include "update_engine/daemon_state_interface.h"
-#if USE_DBUS
-#include "update_engine/dbus_service.h"
-#endif  // USE_DBUS
-
-namespace chromeos_update_engine {
-
-class UpdateEngineDaemon : public brillo::Daemon {
- public:
-  UpdateEngineDaemon() = default;
-
- protected:
-  int OnInit() override;
-
- private:
-#if USE_DBUS
-  // Run from the main loop when the |dbus_adaptor_| object is registered. At
-  // this point we can request ownership of the DBus service name and continue
-  // initialization.
-  void OnDBusRegistered(bool succeeded);
-
-  // Main D-Bus service adaptor.
-  std::unique_ptr<UpdateEngineAdaptor> dbus_adaptor_;
-#endif  // USE_DBUS
-
-  // The Subprocess singleton class requires a brillo::MessageLoop in the
-  // current thread, so we need to initialize it from this class instead of
-  // the main() function.
-  Subprocess subprocess_;
-
-#if USE_BINDER
-  brillo::BinderWatcher binder_watcher_;
-#endif  // USE_BINDER
-
-#if USE_BINDER
-#if USE_OMAHA
-  android::sp<BinderUpdateEngineBrilloService> binder_service_;
-#else  // !USE_OMAHA
-  android::sp<BinderUpdateEngineAndroidService> binder_service_;
-#endif  // USE_OMAHA
-#endif  // USE_BINDER
-
-  // The daemon state with all the required daemon classes for the configured
-  // platform.
-  std::unique_ptr<DaemonStateInterface> daemon_state_;
-
-  DISALLOW_COPY_AND_ASSIGN(UpdateEngineDaemon);
-};
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_DAEMON_H_
diff --git a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml
index f81d4ed..ac2f021 100644
--- a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml
+++ b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml
@@ -1,4 +1,19 @@
 <?xml version="1.0" encoding="utf-8" ?>
+<!--
+  Copyright (C) 2019 The Android Open Source Project
+
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+!-->
 <node name="/org/chromium/UpdateEngine">
   <interface name="org.chromium.UpdateEngineInterface">
     <annotation name="org.freedesktop.DBus.GLib.CSymbol"
@@ -20,7 +35,12 @@
       <arg type="i" name="flags" direction="in" />
     </method>
     <method name="AttemptInstall">
-      <arg type="s" name="dlc_request" direction="in" />
+      <arg type="s" name="omaha_url" direction="in" />
+      <arg type="as" name="dlc_ids" direction="in">
+        <tp:docstring>
+          The list of DLC IDs that needs to be installed.
+        </tp:docstring>
+      </arg>
     </method>
     <method name="AttemptRollback">
       <arg type="b" name="powerwash" direction="in" />
@@ -30,12 +50,26 @@
     </method>
     <method name="ResetStatus">
     </method>
-    <method name="GetStatus">
-      <arg type="x" name="last_checked_time" direction="out" />
-      <arg type="d" name="progress" direction="out" />
-      <arg type="s" name="current_operation" direction="out" />
-      <arg type="s" name="new_version" direction="out" />
-      <arg type="x" name="new_size" direction="out" />
+    <method name="SetDlcActiveValue">
+      <arg type="b" name="is_active" direction="in">
+        <tp:docstring>
+          If the DLC is being set to active or inactive.
+        </tp:docstring>
+      </arg>
+      <arg type="s" name="dlc_id" direction="in">
+        <tp:docstring>
+          The ID of the DLC module that will be set to active/inactive.
+        </tp:docstring>
+      </arg>
+    </method>
+    <method name="GetStatusAdvanced">
+      <arg type="ay" name="status" direction="out">
+        <tp:docstring>
+          The current status serialized in a protobuf.
+        </tp:docstring>
+        <annotation name="org.chromium.DBus.Argument.ProtobufClass"
+                    value="update_engine::StatusResult"/>
+      </arg>
     </method>
     <method name="RebootIfNeeded">
     </method>
@@ -80,12 +114,14 @@
     <method name="GetDurationSinceUpdate">
       <arg type="x" name="usec_wallclock" direction="out" />
     </method>
-    <signal name="StatusUpdate">
-      <arg type="x" name="last_checked_time" />
-      <arg type="d" name="progress" />
-      <arg type="s" name="current_operation" />
-      <arg type="s" name="new_version" />
-      <arg type="x" name="new_size" />
+    <signal name="StatusUpdateAdvanced">
+      <arg type="ay" name="status" direction="out">
+        <tp:docstring>
+          The current status serialized in a protobuf.
+        </tp:docstring>
+        <annotation name="org.chromium.DBus.Argument.ProtobufClass"
+                    value="update_engine::StatusResult"/>
+      </arg>
     </signal>
     <method name="GetPrevVersion">
       <arg type="s" name="prev_version" direction="out" />
@@ -96,8 +132,5 @@
     <method name="GetLastAttemptError">
       <arg type="i" name="last_attempt_error" direction="out" />
     </method>
-    <method name="GetEolStatus">
-      <arg type="i" name="eol_status" direction="out" />
-    </method>
   </interface>
 </node>
diff --git a/dlcservice_chromeos.cc b/dlcservice_chromeos.cc
deleted file mode 100644
index e95f08f..0000000
--- a/dlcservice_chromeos.cc
+++ /dev/null
@@ -1,54 +0,0 @@
-//
-// Copyright (C) 2018 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/dlcservice_chromeos.h"
-
-#include <dlcservice/dbus-proxies.h>
-#include <dlcservice/proto_bindings/dlcservice.pb.h>
-
-#include "update_engine/dbus_connection.h"
-
-using std::string;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-std::unique_ptr<DlcServiceInterface> CreateDlcService() {
-  return std::make_unique<DlcServiceChromeOS>();
-}
-
-bool DlcServiceChromeOS::GetInstalled(vector<string>* dlc_module_ids) {
-  if (!dlc_module_ids)
-    return false;
-  org::chromium::DlcServiceInterfaceProxy dlcservice_proxy(
-      DBusConnection::Get()->GetDBus());
-  string dlc_module_list_str;
-  if (!dlcservice_proxy.GetInstalled(&dlc_module_list_str, nullptr)) {
-    LOG(ERROR) << "dlcservice does not return installed DLC module list.";
-    return false;
-  }
-  dlcservice::DlcModuleList dlc_module_list;
-  if (!dlc_module_list.ParseFromString(dlc_module_list_str)) {
-    LOG(ERROR) << "Errors parsing DlcModuleList protobuf.";
-    return false;
-  }
-  for (const auto& dlc_module_info : dlc_module_list.dlc_module_infos()) {
-    dlc_module_ids->emplace_back(dlc_module_info.dlc_id());
-  }
-  return true;
-}
-
-}  // namespace chromeos_update_engine
diff --git a/dlcservice_chromeos.h b/dlcservice_chromeos.h
deleted file mode 100644
index 8d103c1..0000000
--- a/dlcservice_chromeos.h
+++ /dev/null
@@ -1,44 +0,0 @@
-//
-// Copyright (C) 2018 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_DLCSERVICE_CHROMEOS_H_
-#define UPDATE_ENGINE_DLCSERVICE_CHROMEOS_H_
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "update_engine/common/dlcservice_interface.h"
-
-namespace chromeos_update_engine {
-
-// The Chrome OS implementation of the DlcServiceInterface. This interface
-// interacts with dlcservice via D-Bus.
-class DlcServiceChromeOS : public DlcServiceInterface {
- public:
-  DlcServiceChromeOS() = default;
-  ~DlcServiceChromeOS() = default;
-
-  // BootControlInterface overrides.
-  bool GetInstalled(std::vector<std::string>* dlc_module_ids) override;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(DlcServiceChromeOS);
-};
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_DLCSERVICE_CHROMEOS_H_
diff --git a/download_action.cc b/download_action.cc
new file mode 100644
index 0000000..62a8423
--- /dev/null
+++ b/download_action.cc
@@ -0,0 +1,281 @@
+//
+// Copyright (C) 2011 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/common/download_action.h"
+
+#include <errno.h>
+
+#include <algorithm>
+#include <string>
+
+#include <base/files/file_path.h>
+#include <base/metrics/statistics_recorder.h>
+#include <base/strings/stringprintf.h>
+
+#include "update_engine/common/action_pipe.h"
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/error_code_utils.h"
+#include "update_engine/common/multi_range_http_fetcher.h"
+#include "update_engine/common/prefs_interface.h"
+#include "update_engine/common/utils.h"
+
+using base::FilePath;
+using std::string;
+
+namespace chromeos_update_engine {
+
+DownloadAction::DownloadAction(PrefsInterface* prefs,
+                               BootControlInterface* boot_control,
+                               HardwareInterface* hardware,
+                               HttpFetcher* http_fetcher,
+                               bool interactive)
+    : prefs_(prefs),
+      boot_control_(boot_control),
+      hardware_(hardware),
+      http_fetcher_(new MultiRangeHttpFetcher(http_fetcher)),
+      interactive_(interactive),
+      code_(ErrorCode::kSuccess),
+      delegate_(nullptr) {}
+
+DownloadAction::~DownloadAction() {}
+
+void DownloadAction::PerformAction() {
+  http_fetcher_->set_delegate(this);
+
+  // Get the InstallPlan and read it
+  CHECK(HasInputObject());
+  install_plan_ = GetInputObject();
+  install_plan_.Dump();
+
+  bytes_received_ = 0;
+  bytes_received_previous_payloads_ = 0;
+  bytes_total_ = 0;
+  for (const auto& payload : install_plan_.payloads)
+    bytes_total_ += payload.size;
+
+  if (install_plan_.is_resume) {
+    int64_t payload_index = 0;
+    if (prefs_->GetInt64(kPrefsUpdateStatePayloadIndex, &payload_index) &&
+        static_cast<size_t>(payload_index) < install_plan_.payloads.size()) {
+      // Save the index for the resume payload before downloading any previous
+      // payload, otherwise it will be overwritten.
+      resume_payload_index_ = payload_index;
+      for (int i = 0; i < payload_index; i++)
+        install_plan_.payloads[i].already_applied = true;
+    }
+  }
+  CHECK_GE(install_plan_.payloads.size(), 1UL);
+  if (!payload_)
+    payload_ = &install_plan_.payloads[0];
+
+  LOG(INFO) << "Marking new slot as unbootable";
+  if (!boot_control_->MarkSlotUnbootable(install_plan_.target_slot)) {
+    LOG(WARNING) << "Unable to mark new slot "
+                 << BootControlInterface::SlotName(install_plan_.target_slot)
+                 << ". Proceeding with the update anyway.";
+  }
+
+  StartDownloading();
+}
+
+bool DownloadAction::LoadCachedManifest(int64_t manifest_size) {
+  std::string cached_manifest_bytes;
+  if (!prefs_->GetString(kPrefsManifestBytes, &cached_manifest_bytes) ||
+      cached_manifest_bytes.size() <= 0) {
+    LOG(INFO) << "Cached Manifest data not found";
+    return false;
+  }
+  if (static_cast<int64_t>(cached_manifest_bytes.size()) != manifest_size) {
+    LOG(WARNING) << "Cached metadata has unexpected size: "
+                 << cached_manifest_bytes.size() << " vs. " << manifest_size;
+    return false;
+  }
+
+  ErrorCode error;
+  const bool success =
+      delta_performer_->Write(
+          cached_manifest_bytes.data(), cached_manifest_bytes.size(), &error) &&
+      delta_performer_->IsManifestValid();
+  if (success) {
+    LOG(INFO) << "Successfully parsed cached manifest";
+  } else {
+    // If parsing of cached data failed, fall back to fetch them using HTTP
+    LOG(WARNING) << "Cached manifest data fails to load, error code:"
+                 << static_cast<int>(error) << "," << error;
+  }
+  return success;
+}
+
+void DownloadAction::StartDownloading() {
+  download_active_ = true;
+  http_fetcher_->ClearRanges();
+
+  if (delta_performer_ != nullptr) {
+    LOG(INFO) << "Using writer for test.";
+  } else {
+    delta_performer_.reset(new DeltaPerformer(prefs_,
+                                              boot_control_,
+                                              hardware_,
+                                              delegate_,
+                                              &install_plan_,
+                                              payload_,
+                                              interactive_));
+  }
+
+  if (install_plan_.is_resume &&
+      payload_ == &install_plan_.payloads[resume_payload_index_]) {
+    // Resuming an update so parse the cached manifest first
+    int64_t manifest_metadata_size = 0;
+    int64_t manifest_signature_size = 0;
+    prefs_->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size);
+    prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size);
+
+    // TODO(zhangkelvin) Add unittest for success and fallback route
+    if (!LoadCachedManifest(manifest_metadata_size + manifest_signature_size)) {
+      if (delta_performer_) {
+        // Create a new DeltaPerformer to reset all its state
+        delta_performer_ = std::make_unique<DeltaPerformer>(prefs_,
+                                                            boot_control_,
+                                                            hardware_,
+                                                            delegate_,
+                                                            &install_plan_,
+                                                            payload_,
+                                                            interactive_);
+      }
+      http_fetcher_->AddRange(base_offset_,
+                              manifest_metadata_size + manifest_signature_size);
+    }
+
+    // If there're remaining unprocessed data blobs, fetch them. Be careful
+    // not to request data beyond the end of the payload to avoid 416 HTTP
+    // response error codes.
+    int64_t next_data_offset = 0;
+    prefs_->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset);
+    uint64_t resume_offset =
+        manifest_metadata_size + manifest_signature_size + next_data_offset;
+    if (!payload_->size) {
+      http_fetcher_->AddRange(base_offset_ + resume_offset);
+    } else if (resume_offset < payload_->size) {
+      http_fetcher_->AddRange(base_offset_ + resume_offset,
+                              payload_->size - resume_offset);
+    }
+  } else {
+    if (payload_->size) {
+      http_fetcher_->AddRange(base_offset_, payload_->size);
+    } else {
+      // If no payload size is passed we assume we read until the end of the
+      // stream.
+      http_fetcher_->AddRange(base_offset_);
+    }
+  }
+
+  http_fetcher_->BeginTransfer(install_plan_.download_url);
+}
+
+void DownloadAction::SuspendAction() {
+  http_fetcher_->Pause();
+}
+
+void DownloadAction::ResumeAction() {
+  http_fetcher_->Unpause();
+}
+
+void DownloadAction::TerminateProcessing() {
+  if (delta_performer_) {
+    delta_performer_->Close();
+    delta_performer_.reset();
+  }
+  download_active_ = false;
+  // Terminates the transfer. The action is terminated, if necessary, when the
+  // TransferTerminated callback is received.
+  http_fetcher_->TerminateTransfer();
+}
+
+void DownloadAction::SeekToOffset(off_t offset) {
+  bytes_received_ = offset;
+}
+
+bool DownloadAction::ReceivedBytes(HttpFetcher* fetcher,
+                                   const void* bytes,
+                                   size_t length) {
+  bytes_received_ += length;
+  uint64_t bytes_downloaded_total =
+      bytes_received_previous_payloads_ + bytes_received_;
+  if (delegate_ && download_active_) {
+    delegate_->BytesReceived(length, bytes_downloaded_total, bytes_total_);
+  }
+  if (delta_performer_ && !delta_performer_->Write(bytes, length, &code_)) {
+    if (code_ != ErrorCode::kSuccess) {
+      LOG(ERROR) << "Error " << utils::ErrorCodeToString(code_) << " (" << code_
+                 << ") in DeltaPerformer's Write method when "
+                 << "processing the received payload -- Terminating processing";
+    }
+    // Don't tell the action processor that the action is complete until we get
+    // the TransferTerminated callback. Otherwise, this and the HTTP fetcher
+    // objects may get destroyed before all callbacks are complete.
+    TerminateProcessing();
+    return false;
+  }
+
+  return true;
+}
+
+void DownloadAction::TransferComplete(HttpFetcher* fetcher, bool successful) {
+  if (delta_performer_) {
+    LOG_IF(WARNING, delta_performer_->Close() != 0)
+        << "Error closing the writer.";
+  }
+  download_active_ = false;
+  ErrorCode code =
+      successful ? ErrorCode::kSuccess : ErrorCode::kDownloadTransferError;
+  if (code == ErrorCode::kSuccess) {
+    if (delta_performer_ && !payload_->already_applied)
+      code = delta_performer_->VerifyPayload(payload_->hash, payload_->size);
+    if (code == ErrorCode::kSuccess) {
+      CHECK_EQ(install_plan_.payloads.size(), 1UL);
+      // All payloads have been applied and verified.
+      if (delegate_)
+        delegate_->DownloadComplete();
+
+      // Log UpdateEngine.DownloadAction.* histograms to help diagnose
+      // long-blocking operations.
+      std::string histogram_output;
+      base::StatisticsRecorder::WriteGraph("UpdateEngine.DownloadAction.",
+                                           &histogram_output);
+      LOG(INFO) << histogram_output;
+    } else {
+      LOG(ERROR) << "Download of " << install_plan_.download_url
+                 << " failed due to payload verification error.";
+    }
+  }
+
+  // Write the path to the output pipe if we're successful.
+  if (code == ErrorCode::kSuccess && HasOutputPipe())
+    SetOutputObject(install_plan_);
+  processor_->ActionComplete(this, code);
+}
+
+void DownloadAction::TransferTerminated(HttpFetcher* fetcher) {
+  if (code_ != ErrorCode::kSuccess) {
+    processor_->ActionComplete(this, code_);
+  } else if (payload_->already_applied) {
+    LOG(INFO) << "TransferTerminated with ErrorCode::kSuccess when the current "
+                 "payload has already applied, treating as TransferComplete.";
+    TransferComplete(fetcher, true);
+  }
+}
+
+}  // namespace chromeos_update_engine
diff --git a/download_action_android_unittest.cc b/download_action_android_unittest.cc
new file mode 100644
index 0000000..fef2d24
--- /dev/null
+++ b/download_action_android_unittest.cc
@@ -0,0 +1,180 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <unistd.h>
+#include <cstdint>
+#include <memory>
+
+#include <gmock/gmock.h>
+#include <gmock/gmock-actions.h>
+#include <gmock/gmock-function-mocker.h>
+#include <gmock/gmock-spec-builders.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/action_pipe.h"
+#include "update_engine/common/boot_control_stub.h"
+#include "update_engine/common/constants.h"
+#include "update_engine/common/download_action.h"
+#include "update_engine/common/fake_hardware.h"
+#include "update_engine/common/mock_action_processor.h"
+#include "update_engine/common/mock_http_fetcher.h"
+#include "update_engine/common/mock_prefs.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_generator/annotated_operation.h"
+#include "update_engine/payload_generator/payload_file.h"
+#include "update_engine/payload_generator/payload_signer.h"
+
+namespace chromeos_update_engine {
+using testing::_;
+using testing::DoAll;
+using testing::Return;
+using testing::SetArgPointee;
+
+extern const char* kUnittestPrivateKeyPath;
+extern const char* kUnittestPublicKeyPath;
+
+class DownloadActionTest : public ::testing::Test {
+ public:
+  static constexpr int64_t METADATA_SIZE = 1024;
+  static constexpr int64_t SIGNATURE_SIZE = 256;
+  std::shared_ptr<ActionPipe<InstallPlan>> action_pipe{
+      new ActionPipe<InstallPlan>()};
+};
+
+TEST_F(DownloadActionTest, CacheManifestInvalid) {
+  std::string data(METADATA_SIZE + SIGNATURE_SIZE, '-');
+  MockPrefs prefs;
+  EXPECT_CALL(prefs, GetInt64(kPrefsUpdateStatePayloadIndex, _))
+      .WillRepeatedly(DoAll(SetArgPointee<1>(0L), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsManifestMetadataSize, _))
+      .WillRepeatedly(DoAll(SetArgPointee<1>(METADATA_SIZE), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsManifestSignatureSize, _))
+      .WillRepeatedly(DoAll(SetArgPointee<1>(SIGNATURE_SIZE), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsUpdateStateNextDataOffset, _))
+      .WillRepeatedly(DoAll(SetArgPointee<1>(0L), Return(true)));
+  EXPECT_CALL(prefs, GetString(kPrefsManifestBytes, _))
+      .WillRepeatedly(DoAll(SetArgPointee<1>(data), Return(true)));
+
+  BootControlStub boot_control;
+  MockHttpFetcher* http_fetcher =
+      new MockHttpFetcher(data.data(), data.size(), nullptr);
+  http_fetcher->set_delay(false);
+  InstallPlan install_plan;
+  auto& payload = install_plan.payloads.emplace_back();
+  install_plan.download_url = "http://fake_url.invalid";
+  payload.size = data.size();
+  payload.payload_urls.emplace_back("http://fake_url.invalid");
+  install_plan.is_resume = true;
+  action_pipe->set_contents(install_plan);
+
+  // takes ownership of passed in HttpFetcher
+  auto download_action = std::make_unique<DownloadAction>(
+      &prefs, &boot_control, nullptr, http_fetcher, false /* interactive */);
+  download_action->set_in_pipe(action_pipe);
+  MockActionProcessor mock_processor;
+  download_action->SetProcessor(&mock_processor);
+  download_action->PerformAction();
+  ASSERT_EQ(download_action->http_fetcher()->GetBytesDownloaded(), data.size());
+}
+
+TEST_F(DownloadActionTest, CacheManifestValid) {
+  // Create a valid manifest
+  PayloadGenerationConfig config;
+  config.version.major = kMaxSupportedMajorPayloadVersion;
+  config.version.minor = kMaxSupportedMinorPayloadVersion;
+
+  PayloadFile payload_file;
+  ASSERT_TRUE(payload_file.Init(config));
+  PartitionConfig partition_config{"system"};
+  ScopedTempFile partition_file("part-system-XXXXXX", true);
+  ftruncate(partition_file.fd(), 4096);
+  partition_config.size = 4096;
+  partition_config.path = partition_file.path();
+  ASSERT_TRUE(
+      payload_file.AddPartition(partition_config, partition_config, {}, {}, 0));
+  ScopedTempFile blob_file("Blob-XXXXXX");
+  ScopedTempFile manifest_file("Manifest-XXXXXX");
+  uint64_t metadata_size;
+  std::string private_key =
+      test_utils::GetBuildArtifactsPath(kUnittestPrivateKeyPath);
+  payload_file.WritePayload(
+      manifest_file.path(), blob_file.path(), private_key, &metadata_size);
+  uint64_t signature_blob_length = 0;
+  ASSERT_TRUE(PayloadSigner::SignatureBlobLength({private_key},
+                                                 &signature_blob_length));
+  std::string data;
+  ASSERT_TRUE(utils::ReadFile(manifest_file.path(), &data));
+  data.resize(metadata_size + signature_blob_length);
+
+  // Setup the prefs so that manifest is cached
+  MockPrefs prefs;
+  EXPECT_CALL(prefs, GetInt64(kPrefsUpdateStatePayloadIndex, _))
+      .WillRepeatedly(DoAll(SetArgPointee<1>(0L), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsManifestMetadataSize, _))
+      .WillRepeatedly(DoAll(SetArgPointee<1>(metadata_size), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsManifestSignatureSize, _))
+      .WillRepeatedly(
+          DoAll(SetArgPointee<1>(signature_blob_length), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsUpdateStateNextDataOffset, _))
+      .WillRepeatedly(DoAll(SetArgPointee<1>(0L), Return(true)));
+  EXPECT_CALL(prefs, GetString(kPrefsManifestBytes, _))
+      .WillRepeatedly(DoAll(SetArgPointee<1>(data), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsUpdateStateNextOperation, _))
+      .WillRepeatedly(DoAll(SetArgPointee<1>(0), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsUpdateStatePayloadIndex, _))
+      .WillRepeatedly(DoAll(SetArgPointee<1>(0), Return(true)));
+
+  BootControlStub boot_control;
+  MockHttpFetcher* http_fetcher =
+      new MockHttpFetcher(data.data(), data.size(), nullptr);
+  http_fetcher->set_delay(false);
+  InstallPlan install_plan;
+  auto& payload = install_plan.payloads.emplace_back();
+  install_plan.download_url = "http://fake_url.invalid";
+  payload.size = data.size();
+  payload.payload_urls.emplace_back("http://fake_url.invalid");
+  install_plan.is_resume = true;
+  auto& install_part = install_plan.partitions.emplace_back();
+  install_part.source_path = partition_file.path();
+  install_part.target_path = partition_file.path();
+  action_pipe->set_contents(install_plan);
+
+  FakeHardware hardware;
+  // takes ownership of passed in HttpFetcher
+  auto download_action = std::make_unique<DownloadAction>(
+      &prefs, &boot_control, &hardware, http_fetcher, false /* interactive */);
+
+  auto delta_performer = std::make_unique<DeltaPerformer>(&prefs,
+                                                          &boot_control,
+                                                          &hardware,
+                                                          nullptr,
+                                                          &install_plan,
+                                                          &payload,
+                                                          false);
+  delta_performer->set_public_key_path(kUnittestPublicKeyPath);
+  download_action->SetTestFileWriter(std::move(delta_performer));
+  download_action->set_in_pipe(action_pipe);
+  MockActionProcessor mock_processor;
+  download_action->SetProcessor(&mock_processor);
+  download_action->PerformAction();
+
+  // Manifest is cached, so no data should be downloaded from http fetcher.
+  ASSERT_EQ(download_action->http_fetcher()->GetBytesDownloaded(), 0UL);
+}
+}  // namespace chromeos_update_engine
diff --git a/hardware_android.cc b/hardware_android.cc
deleted file mode 100644
index 068468b..0000000
--- a/hardware_android.cc
+++ /dev/null
@@ -1,220 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/hardware_android.h"
-
-#include <sys/types.h>
-
-#include <memory>
-
-#include <android-base/properties.h>
-#include <base/files/file_util.h>
-#include <bootloader_message/bootloader_message.h>
-
-#include "update_engine/common/hardware.h"
-#include "update_engine/common/platform_constants.h"
-
-using android::base::GetBoolProperty;
-using android::base::GetIntProperty;
-using android::base::GetProperty;
-using std::string;
-
-namespace chromeos_update_engine {
-
-namespace {
-
-// Android properties that identify the hardware and potentially non-updatable
-// parts of the bootloader (such as the bootloader version and the baseband
-// version).
-const char kPropBootBootloader[] = "ro.boot.bootloader";
-const char kPropBootBaseband[] = "ro.boot.baseband";
-const char kPropProductManufacturer[] = "ro.product.manufacturer";
-const char kPropBootHardwareSKU[] = "ro.boot.hardware.sku";
-const char kPropBootRevision[] = "ro.boot.revision";
-const char kPropBuildDateUTC[] = "ro.build.date.utc";
-
-}  // namespace
-
-namespace hardware {
-
-// Factory defined in hardware.h.
-std::unique_ptr<HardwareInterface> CreateHardware() {
-  return std::make_unique<HardwareAndroid>();
-}
-
-}  // namespace hardware
-
-// In Android there are normally three kinds of builds: eng, userdebug and user.
-// These builds target respectively a developer build, a debuggable version of
-// the final product and the pristine final product the end user will run.
-// Apart from the ro.build.type property name, they differ in the following
-// properties that characterize the builds:
-// * eng builds: ro.secure=0 and ro.debuggable=1
-// * userdebug builds: ro.secure=1 and ro.debuggable=1
-// * user builds: ro.secure=1 and ro.debuggable=0
-//
-// See IsOfficialBuild() and IsNormalMode() for the meaning of these options in
-// Android.
-
-bool HardwareAndroid::IsOfficialBuild() const {
-  // We run an official build iff ro.secure == 1, because we expect the build to
-  // behave like the end user product and check for updates. Note that while
-  // developers are able to build "official builds" by just running "make user",
-  // that will only result in a more restrictive environment. The important part
-  // is that we don't produce and push "non-official" builds to the end user.
-  //
-  // In case of a non-bool value, we take the most restrictive option and
-  // assume we are in an official-build.
-  return GetBoolProperty("ro.secure", true);
-}
-
-bool HardwareAndroid::IsNormalBootMode() const {
-  // We are running in "dev-mode" iff ro.debuggable == 1. In dev-mode the
-  // update_engine will allow extra developers options, such as providing a
-  // different update URL. In case of error, we assume the build is in
-  // normal-mode.
-  return !GetBoolProperty("ro.debuggable", false);
-}
-
-bool HardwareAndroid::AreDevFeaturesEnabled() const {
-  return !IsNormalBootMode();
-}
-
-bool HardwareAndroid::IsOOBEEnabled() const {
-  // No OOBE flow blocking updates for Android-based boards.
-  return false;
-}
-
-bool HardwareAndroid::IsOOBEComplete(base::Time* out_time_of_oobe) const {
-  LOG(WARNING) << "OOBE is not enabled but IsOOBEComplete() called.";
-  if (out_time_of_oobe)
-    *out_time_of_oobe = base::Time();
-  return true;
-}
-
-string HardwareAndroid::GetHardwareClass() const {
-  auto manufacturer = GetProperty(kPropProductManufacturer, "");
-  auto sku = GetProperty(kPropBootHardwareSKU, "");
-  auto revision = GetProperty(kPropBootRevision, "");
-
-  return manufacturer + ":" + sku + ":" + revision;
-}
-
-string HardwareAndroid::GetFirmwareVersion() const {
-  return GetProperty(kPropBootBootloader, "");
-}
-
-string HardwareAndroid::GetECVersion() const {
-  return GetProperty(kPropBootBaseband, "");
-}
-
-int HardwareAndroid::GetMinKernelKeyVersion() const {
-  LOG(WARNING) << "STUB: No Kernel key version is available.";
-  return -1;
-}
-
-int HardwareAndroid::GetMinFirmwareKeyVersion() const {
-  LOG(WARNING) << "STUB: No Firmware key version is available.";
-  return -1;
-}
-
-int HardwareAndroid::GetMaxFirmwareKeyRollforward() const {
-  LOG(WARNING) << "STUB: Getting firmware_max_rollforward is not supported.";
-  return -1;
-}
-
-bool HardwareAndroid::SetMaxFirmwareKeyRollforward(
-    int firmware_max_rollforward) {
-  LOG(WARNING) << "STUB: Setting firmware_max_rollforward is not supported.";
-  return false;
-}
-
-bool HardwareAndroid::SetMaxKernelKeyRollforward(int kernel_max_rollforward) {
-  LOG(WARNING) << "STUB: Setting kernel_max_rollforward is not supported.";
-  return false;
-}
-
-int HardwareAndroid::GetPowerwashCount() const {
-  LOG(WARNING) << "STUB: Assuming no factory reset was performed.";
-  return 0;
-}
-
-bool HardwareAndroid::SchedulePowerwash(bool is_rollback) {
-  LOG(INFO) << "Scheduling a powerwash to BCB.";
-  LOG_IF(WARNING, is_rollback) << "is_rollback was true but isn't supported.";
-  string err;
-  if (!update_bootloader_message({"--wipe_data", "--reason=wipe_data_from_ota"},
-                                 &err)) {
-    LOG(ERROR) << "Failed to update bootloader message: " << err;
-    return false;
-  }
-  return true;
-}
-
-bool HardwareAndroid::CancelPowerwash() {
-  string err;
-  if (!clear_bootloader_message(&err)) {
-    LOG(ERROR) << "Failed to clear bootloader message: " << err;
-    return false;
-  }
-  return true;
-}
-
-bool HardwareAndroid::GetNonVolatileDirectory(base::FilePath* path) const {
-  base::FilePath local_path(constants::kNonVolatileDirectory);
-  if (!base::PathExists(local_path)) {
-    LOG(ERROR) << "Non-volatile directory not found: " << local_path.value();
-    return false;
-  }
-  *path = local_path;
-  return true;
-}
-
-bool HardwareAndroid::GetPowerwashSafeDirectory(base::FilePath* path) const {
-  // On Android, we don't have a directory persisted across powerwash.
-  return false;
-}
-
-int64_t HardwareAndroid::GetBuildTimestamp() const {
-  return GetIntProperty<int64_t>(kPropBuildDateUTC, 0);
-}
-
-// Returns true if the device runs an userdebug build, and explicitly allows OTA
-// downgrade.
-bool HardwareAndroid::AllowDowngrade() const {
-  return GetBoolProperty("ro.ota.allow_downgrade", false) &&
-         GetBoolProperty("ro.debuggable", false);
-}
-
-bool HardwareAndroid::GetFirstActiveOmahaPingSent() const {
-  LOG(WARNING) << "STUB: Assuming first active omaha was never set.";
-  return false;
-}
-
-bool HardwareAndroid::SetFirstActiveOmahaPingSent() {
-  LOG(WARNING) << "STUB: Assuming first active omaha is set.";
-  // We will set it true, so its failure doesn't cause escalation.
-  return true;
-}
-
-void HardwareAndroid::SetWarmReset(bool warm_reset) {
-  constexpr char warm_reset_prop[] = "ota.warm_reset";
-  if (!android::base::SetProperty(warm_reset_prop, warm_reset ? "1" : "0")) {
-    LOG(WARNING) << "Failed to set prop " << warm_reset_prop;
-  }
-}
-
-}  // namespace chromeos_update_engine
diff --git a/image_properties_android.cc b/image_properties_android.cc
deleted file mode 100644
index 2d418b3..0000000
--- a/image_properties_android.cc
+++ /dev/null
@@ -1,246 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/image_properties.h"
-
-#include <fcntl.h>
-
-#include <string>
-
-#include <android-base/properties.h>
-#include <base/logging.h>
-#include <base/strings/string_util.h>
-#include <bootloader_message/bootloader_message.h>
-#include <brillo/osrelease_reader.h>
-#include <brillo/strings/string_utils.h>
-
-#include "update_engine/common/boot_control_interface.h"
-#include "update_engine/common/constants.h"
-#include "update_engine/common/platform_constants.h"
-#include "update_engine/common/prefs_interface.h"
-#include "update_engine/common/utils.h"
-#include "update_engine/system_state.h"
-
-using android::base::GetProperty;
-using std::string;
-
-namespace chromeos_update_engine {
-
-namespace {
-
-// Build time properties name used in Android Things.
-const char kProductId[] = "product_id";
-const char kProductVersion[] = "product_version";
-const char kSystemId[] = "system_id";
-const char kSystemVersion[] = "system_version";
-
-// The path to the product_components file which stores the version of each
-// components in OEM partition.
-const char kProductComponentsPath[] = "/oem/os-release.d/product_components";
-
-// Prefs used to store the powerwash settings.
-const char kPrefsImgPropPowerwashAllowed[] = "img-prop-powerwash-allowed";
-
-// System properties that identifies the "board".
-const char kPropProductName[] = "ro.product.name";
-const char kPropBuildFingerprint[] = "ro.build.fingerprint";
-const char kPropBuildType[] = "ro.build.type";
-
-// Default channel from factory.prop
-const char kPropDefaultChannel[] = "ro.update.default_channel";
-
-// A prefix added to the path, used for testing.
-const char* root_prefix = nullptr;
-
-string GetStringWithDefault(const brillo::OsReleaseReader& osrelease,
-                            const string& key,
-                            const string& default_value) {
-  string result;
-  if (osrelease.GetString(key, &result))
-    return result;
-  LOG(INFO) << "Cannot load ImageProperty " << key << ", using default value "
-            << default_value;
-  return default_value;
-}
-
-// Open misc partition for read or write and output the fd in |out_fd|.
-bool OpenMisc(bool write, int* out_fd) {
-  string misc_device;
-  int flags = write ? O_WRONLY | O_SYNC : O_RDONLY;
-  if (root_prefix) {
-    // Use a file for unittest and create one if doesn't exist.
-    misc_device = base::FilePath(root_prefix).Append("misc").value();
-    if (write)
-      flags |= O_CREAT;
-  } else {
-    string err;
-    misc_device = get_bootloader_message_blk_device(&err);
-    if (misc_device.empty()) {
-      LOG(ERROR) << "Unable to get misc block device: " << err;
-      return false;
-    }
-  }
-
-  int fd = HANDLE_EINTR(open(misc_device.c_str(), flags, 0600));
-  if (fd < 0) {
-    PLOG(ERROR) << "Opening misc failed";
-    return false;
-  }
-  *out_fd = fd;
-  return true;
-}
-
-// The offset and size of the channel field in misc partition.
-constexpr size_t kChannelOffset =
-    BOOTLOADER_MESSAGE_OFFSET_IN_MISC +
-    offsetof(bootloader_message_ab, update_channel);
-constexpr size_t kChannelSize = sizeof(bootloader_message_ab::update_channel);
-
-// Read channel from misc partition to |out_channel|, return false if unable to
-// read misc or no channel is set in misc.
-bool ReadChannelFromMisc(string* out_channel) {
-  int fd;
-  TEST_AND_RETURN_FALSE(OpenMisc(false, &fd));
-  ScopedFdCloser fd_closer(&fd);
-  char channel[kChannelSize] = {0};
-  ssize_t bytes_read = 0;
-  if (!utils::PReadAll(
-          fd, channel, kChannelSize - 1, kChannelOffset, &bytes_read) ||
-      bytes_read != kChannelSize - 1) {
-    PLOG(ERROR) << "Reading update channel from misc failed";
-    return false;
-  }
-  if (channel[0] == '\0') {
-    LOG(INFO) << "No channel set in misc.";
-    return false;
-  }
-  if (!base::EndsWith(channel, "-channel", base::CompareCase::SENSITIVE)) {
-    LOG(ERROR) << "Channel " << channel << " doesn't end with -channel.";
-    return false;
-  }
-  out_channel->assign(channel);
-  return true;
-}
-
-// Write |in_channel| to misc partition, return false if failed to write.
-bool WriteChannelToMisc(const string& in_channel) {
-  int fd;
-  TEST_AND_RETURN_FALSE(OpenMisc(true, &fd));
-  ScopedFdCloser fd_closer(&fd);
-  if (in_channel.size() >= kChannelSize) {
-    LOG(ERROR) << "Channel name is too long: " << in_channel
-               << ", the maximum length is " << kChannelSize - 1;
-    return false;
-  }
-  char channel[kChannelSize] = {0};
-  memcpy(channel, in_channel.data(), in_channel.size());
-  if (!utils::PWriteAll(fd, channel, kChannelSize, kChannelOffset)) {
-    PLOG(ERROR) << "Writing update channel to misc failed";
-    return false;
-  }
-  return true;
-}
-
-string GetTargetChannel() {
-  string channel;
-  if (!ReadChannelFromMisc(&channel))
-    channel = GetProperty(kPropDefaultChannel, "stable-channel");
-  return channel;
-}
-}  // namespace
-
-namespace test {
-void SetImagePropertiesRootPrefix(const char* test_root_prefix) {
-  root_prefix = test_root_prefix;
-}
-}  // namespace test
-
-ImageProperties LoadImageProperties(SystemState* system_state) {
-  ImageProperties result;
-
-  brillo::OsReleaseReader osrelease;
-  if (root_prefix)
-    osrelease.LoadTestingOnly(base::FilePath(root_prefix));
-  else
-    osrelease.Load();
-  result.product_id =
-      GetStringWithDefault(osrelease, kProductId, "invalid-product");
-  result.system_id = GetStringWithDefault(
-      osrelease, kSystemId, "developer-boards:brillo-starter-board");
-  // Update the system id to match the prefix of product id for testing.
-  string prefix, not_used, system_id;
-  if (brillo::string_utils::SplitAtFirst(
-          result.product_id, ":", &prefix, &not_used, false) &&
-      brillo::string_utils::SplitAtFirst(
-          result.system_id, ":", &not_used, &system_id, false)) {
-    result.system_id = prefix + ":" + system_id;
-  }
-  result.canary_product_id = result.product_id;
-  result.version = GetStringWithDefault(osrelease, kProductVersion, "0.0.0.0");
-  result.system_version =
-      GetStringWithDefault(osrelease, kSystemVersion, "0.0.0.0");
-  // Can't read it with OsReleaseReader because it has multiple lines.
-  utils::ReadFile(kProductComponentsPath, &result.product_components);
-
-  result.board = GetProperty(kPropProductName, "brillo");
-  result.build_fingerprint = GetProperty(kPropBuildFingerprint, "none");
-  result.build_type = GetProperty(kPropBuildType, "");
-
-  // Android doesn't have channel information in system image, we try to read
-  // the channel of current slot from prefs and then fallback to use the
-  // persisted target channel as current channel.
-  string current_channel_key =
-      kPrefsChannelOnSlotPrefix +
-      std::to_string(system_state->boot_control()->GetCurrentSlot());
-  string current_channel;
-  if (!system_state->prefs()->Exists(current_channel_key) ||
-      !system_state->prefs()->GetString(current_channel_key, &current_channel))
-    current_channel = GetTargetChannel();
-  result.current_channel = current_channel;
-  result.allow_arbitrary_channels = true;
-
-  // Brillo only supports the official omaha URL.
-  result.omaha_url = constants::kOmahaDefaultProductionURL;
-
-  return result;
-}
-
-MutableImageProperties LoadMutableImageProperties(SystemState* system_state) {
-  MutableImageProperties result;
-  result.target_channel = GetTargetChannel();
-  if (!system_state->prefs()->GetBoolean(kPrefsImgPropPowerwashAllowed,
-                                         &result.is_powerwash_allowed)) {
-    result.is_powerwash_allowed = false;
-  }
-  return result;
-}
-
-bool StoreMutableImageProperties(SystemState* system_state,
-                                 const MutableImageProperties& properties) {
-  bool ret = true;
-  if (!WriteChannelToMisc(properties.target_channel))
-    ret = false;
-  if (!system_state->prefs()->SetBoolean(kPrefsImgPropPowerwashAllowed,
-                                         properties.is_powerwash_allowed))
-    ret = false;
-  return ret;
-}
-
-void LogImageProperties() {
-  // TODO(*): Implement this.
-}
-
-}  // namespace chromeos_update_engine
diff --git a/image_properties_android_unittest.cc b/image_properties_android_unittest.cc
deleted file mode 100644
index 607284a..0000000
--- a/image_properties_android_unittest.cc
+++ /dev/null
@@ -1,123 +0,0 @@
-//
-// Copyright (C) 2017 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/image_properties.h"
-
-#include <string>
-
-#include <base/files/file_util.h>
-#include <base/files/scoped_temp_dir.h>
-#include <gtest/gtest.h>
-
-#include "update_engine/common/constants.h"
-#include "update_engine/common/fake_prefs.h"
-#include "update_engine/common/test_utils.h"
-#include "update_engine/fake_system_state.h"
-
-using chromeos_update_engine::test_utils::WriteFileString;
-using std::string;
-
-namespace chromeos_update_engine {
-
-class ImagePropertiesTest : public ::testing::Test {
- protected:
-  void SetUp() override {
-    // Create a uniquely named test directory.
-    ASSERT_TRUE(tempdir_.CreateUniqueTempDir());
-    osrelease_dir_ = tempdir_.GetPath().Append("etc/os-release.d");
-    EXPECT_TRUE(base::CreateDirectory(osrelease_dir_));
-    test::SetImagePropertiesRootPrefix(tempdir_.GetPath().value().c_str());
-  }
-
-  void WriteOsRelease(const string& key, const string& value) {
-    ASSERT_TRUE(WriteFileString(osrelease_dir_.Append(key).value(), value));
-  }
-
-  void WriteChannel(const string& channel) {
-    string misc(2080, '\0');
-    misc += channel;
-    misc.resize(4096);
-    ASSERT_TRUE(
-        WriteFileString(tempdir_.GetPath().Append("misc").value(), misc));
-  }
-
-  FakeSystemState fake_system_state_;
-
-  base::ScopedTempDir tempdir_;
-  base::FilePath osrelease_dir_;
-};
-
-TEST_F(ImagePropertiesTest, SimpleTest) {
-  WriteOsRelease("product_id", "abc");
-  WriteOsRelease("system_id", "def");
-  WriteOsRelease("product_version", "1.2.3.4");
-  WriteOsRelease("system_version", "5.6.7.8");
-  ImageProperties props = LoadImageProperties(&fake_system_state_);
-  EXPECT_EQ("abc", props.product_id);
-  EXPECT_EQ("def", props.system_id);
-  EXPECT_EQ("1.2.3.4", props.version);
-  EXPECT_EQ("5.6.7.8", props.system_version);
-  EXPECT_EQ("stable-channel", props.current_channel);
-  EXPECT_EQ(constants::kOmahaDefaultProductionURL, props.omaha_url);
-}
-
-TEST_F(ImagePropertiesTest, IDPrefixTest) {
-  WriteOsRelease("product_id", "abc:def");
-  WriteOsRelease("system_id", "foo:bar");
-  ImageProperties props = LoadImageProperties(&fake_system_state_);
-  EXPECT_EQ("abc:def", props.product_id);
-  EXPECT_EQ("abc:bar", props.system_id);
-}
-
-TEST_F(ImagePropertiesTest, IDInvalidPrefixTest) {
-  WriteOsRelease("product_id", "def");
-  WriteOsRelease("system_id", "foo:bar");
-  ImageProperties props = LoadImageProperties(&fake_system_state_);
-  EXPECT_EQ("def", props.product_id);
-  EXPECT_EQ("foo:bar", props.system_id);
-
-  WriteOsRelease("product_id", "abc:def");
-  WriteOsRelease("system_id", "bar");
-  props = LoadImageProperties(&fake_system_state_);
-  EXPECT_EQ("abc:def", props.product_id);
-  EXPECT_EQ("bar", props.system_id);
-}
-
-TEST_F(ImagePropertiesTest, LoadChannelTest) {
-  WriteChannel("unittest-channel");
-  ImageProperties props = LoadImageProperties(&fake_system_state_);
-  EXPECT_EQ("unittest-channel", props.current_channel);
-}
-
-TEST_F(ImagePropertiesTest, DefaultStableChannelTest) {
-  WriteChannel("");
-  ImageProperties props = LoadImageProperties(&fake_system_state_);
-  EXPECT_EQ("stable-channel", props.current_channel);
-}
-
-TEST_F(ImagePropertiesTest, StoreLoadMutableChannelTest) {
-  FakePrefs prefs;
-  fake_system_state_.set_prefs(&prefs);
-  WriteChannel("previous-channel");
-  MutableImageProperties props;
-  props.target_channel = "new-channel";
-  EXPECT_TRUE(StoreMutableImageProperties(&fake_system_state_, props));
-  MutableImageProperties loaded_props =
-      LoadMutableImageProperties(&fake_system_state_);
-  EXPECT_EQ(props.target_channel, loaded_props.target_channel);
-}
-
-}  // namespace chromeos_update_engine
diff --git a/init/update-engine.conf b/init/update-engine.conf
index d3681db..36c89d7 100644
--- a/init/update-engine.conf
+++ b/init/update-engine.conf
@@ -25,6 +25,7 @@
 # The default is 10 failures every 5 seconds, but even if we crash early, it is
 # hard to catch that. So here we set the crash rate as 10 failures every 20
 # seconds which will include the default and more.
+respawn
 respawn limit 10 20
 
 expect fork
@@ -36,7 +37,17 @@
 # Put update_engine process in its own cgroup.
 # Default cpu.shares is 1024.
 post-start script
-  cgroup_dir="/sys/fs/cgroup/cpu/${UPSTART_JOB}"
-  mkdir -p "${cgroup_dir}"
-  echo $(status | cut -f 4 -d ' ') > "${cgroup_dir}/tasks"
+  pid=$(status | cut -f 4 -d ' ')
+
+  cgroup_cpu_dir="/sys/fs/cgroup/cpu/${UPSTART_JOB}"
+  mkdir -p "${cgroup_cpu_dir}"
+  echo ${pid} > "${cgroup_cpu_dir}/tasks"
+
+  # Assigns net_cls handle 1:1 to packets generated from update_engine. For
+  # routing and tagging purposes, that value will be redefined in
+  # patchpanel/routing_service.h .
+  cgroup_net_cls_dir="/sys/fs/cgroup/net_cls/${UPSTART_JOB}"
+  mkdir -p "${cgroup_net_cls_dir}"
+  echo ${pid} > "${cgroup_net_cls_dir}/tasks"
+  echo "0x10001" > "${cgroup_net_cls_dir}/net_cls.classid"
 end script
diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc
index ce3475d..1599aac 100644
--- a/libcurl_http_fetcher.cc
+++ b/libcurl_http_fetcher.cc
@@ -16,6 +16,8 @@
 
 #include "update_engine/libcurl_http_fetcher.h"
 
+#include <netinet/in.h>
+#include <resolv.h>
 #include <sys/types.h>
 #include <unistd.h>
 
@@ -26,8 +28,10 @@
 #include <base/format_macros.h>
 #include <base/location.h>
 #include <base/logging.h>
+#include <base/strings/string_split.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
+#include <base/threading/thread_task_runner_handle.h>
 
 #ifdef __ANDROID__
 #include <cutils/qtaguid.h>
@@ -75,18 +79,11 @@
 #ifdef __ANDROID__
   qtaguid_untagSocket(item);
 #endif  // __ANDROID__
+
   LibcurlHttpFetcher* fetcher = static_cast<LibcurlHttpFetcher*>(clientp);
   // Stop watching the socket before closing it.
-  for (size_t t = 0; t < arraysize(fetcher->fd_task_maps_); ++t) {
-    const auto fd_task_pair = fetcher->fd_task_maps_[t].find(item);
-    if (fd_task_pair != fetcher->fd_task_maps_[t].end()) {
-      if (!MessageLoop::current()->CancelTask(fd_task_pair->second)) {
-        LOG(WARNING) << "Error canceling the watch task "
-                     << fd_task_pair->second << " for "
-                     << (t ? "writing" : "reading") << " the fd " << item;
-      }
-      fetcher->fd_task_maps_[t].erase(item);
-    }
+  for (size_t t = 0; t < base::size(fetcher->fd_controller_maps_); ++t) {
+    fetcher->fd_controller_maps_[t].erase(item);
   }
 
   // Documentation for this callback says to return 0 on success or 1 on error.
@@ -269,11 +266,11 @@
     } else if (base::StartsWith(
                    url_, "https://", base::CompareCase::INSENSITIVE_ASCII)) {
       SetCurlOptionsForHttps();
-#if !USE_OMAHA
+#ifdef __ANDROID__
     } else if (base::StartsWith(
                    url_, "file://", base::CompareCase::INSENSITIVE_ASCII)) {
       SetCurlOptionsForFile();
-#endif
+#endif  // __ANDROID__
     } else {
       LOG(ERROR) << "Received invalid URI: " << url_;
       // Lock down to no protocol supported for the transfer.
@@ -305,6 +302,7 @@
   LOG(INFO) << "Setting up curl options for HTTPS";
   CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_SSL_VERIFYPEER, 1), CURLE_OK);
   CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_SSL_VERIFYHOST, 2), CURLE_OK);
+  CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_CAINFO, nullptr), CURLE_OK);
   CHECK_EQ(curl_easy_setopt(
                curl_handle_, CURLOPT_CAPATH, constants::kCACertificatesPath),
            CURLE_OK);
@@ -391,6 +389,37 @@
   extra_headers_[base::ToLowerASCII(header_name)] = header_line;
 }
 
+// Inputs: header_name, header_value
+// Example:
+//   extra_headers_ = { {"foo":"foo: 123"}, {"bar":"bar:"} }
+//   string tmp = "gibberish";
+//   Case 1:
+//     GetHeader("foo", &tmp) -> tmp = "123", return true.
+//   Case 2:
+//     GetHeader("bar", &tmp) -> tmp = "", return true.
+//   Case 3:
+//     GetHeader("moo", &tmp) -> tmp = "", return false.
+bool LibcurlHttpFetcher::GetHeader(const string& header_name,
+                                   string* header_value) const {
+  // Initially clear |header_value| to handle both success and failures without
+  // leaving |header_value| in a unclear state.
+  header_value->clear();
+  auto header_key = base::ToLowerASCII(header_name);
+  auto header_line_itr = extra_headers_.find(header_key);
+  // If the |header_name| was never set, indicate so by returning false.
+  if (header_line_itr == extra_headers_.end())
+    return false;
+  // From |SetHeader()| the check for |header_name| to not include ":" is
+  // verified, so finding the first index of ":" is a safe operation.
+  auto header_line = header_line_itr->second;
+  *header_value = header_line.substr(header_line.find(':') + 1);
+  // The following is neccessary to remove the leading ' ' before the header
+  // value that was place only if |header_value| passed to |SetHeader()| was
+  // a non-empty string.
+  header_value->erase(0, 1);
+  return true;
+}
+
 void LibcurlHttpFetcher::CurlPerformOnce() {
   CHECK(transfer_in_progress_);
   int running_handles = 0;
@@ -406,6 +435,18 @@
     }
   }
 
+  // When retcode is not |CURLM_OK| at this point, libcurl has an internal error
+  // that it is less likely to recover from (libcurl bug, out-of-memory, etc.).
+  // In case of an update check, we send UMA metrics and log the error.
+  if (is_update_check_ &&
+      (retcode == CURLM_OUT_OF_MEMORY || retcode == CURLM_INTERNAL_ERROR)) {
+    auxiliary_error_code_ = ErrorCode::kInternalLibCurlError;
+    LOG(ERROR) << "curl_multi_perform is in an unrecoverable error condition: "
+               << retcode;
+  } else if (retcode != CURLM_OK) {
+    LOG(ERROR) << "curl_multi_perform returns error: " << retcode;
+  }
+
   // If the transfer completes while paused, we should ignore the failure once
   // the fetcher is unpaused.
   if (running_handles == 0 && transfer_paused_ && !ignore_failure_) {
@@ -417,6 +458,18 @@
     // There's either more work to do or we are paused, so we just keep the
     // file descriptors to watch up to date and exit, until we are done with the
     // work and we are not paused.
+    //
+    // When there's no |base::SingleThreadTaskRunner| on current thread, it's
+    // not possible to watch file descriptors. Just poll it later. This usually
+    // happens if |brillo::FakeMessageLoop| is used.
+    if (!base::ThreadTaskRunnerHandle::IsSet()) {
+      MessageLoop::current()->PostDelayedTask(
+          FROM_HERE,
+          base::Bind(&LibcurlHttpFetcher::CurlPerformOnce,
+                     base::Unretained(this)),
+          TimeDelta::FromSeconds(1));
+      return;
+    }
     SetupMessageLoopSources();
     return;
   }
@@ -428,13 +481,35 @@
   if (http_response_code_) {
     LOG(INFO) << "HTTP response code: " << http_response_code_;
     no_network_retry_count_ = 0;
+    unresolved_host_state_machine_.UpdateState(false);
   } else {
     LOG(ERROR) << "Unable to get http response code.";
+    CURLcode curl_code = GetCurlCode();
+    LOG(ERROR) << "Return code for the transfer: " << curl_code;
+    if (curl_code == CURLE_COULDNT_RESOLVE_HOST) {
+      LOG(ERROR) << "libcurl can not resolve host.";
+      unresolved_host_state_machine_.UpdateState(true);
+      auxiliary_error_code_ = ErrorCode::kUnresolvedHostError;
+    }
   }
 
   // we're done!
   CleanUp();
 
+  if (unresolved_host_state_machine_.GetState() ==
+      UnresolvedHostStateMachine::State::kRetry) {
+    // Based on
+    // https://curl.haxx.se/docs/todo.html#updated_DNS_server_while_running,
+    // update_engine process should call res_init() and unconditionally retry.
+    res_init();
+    no_network_max_retries_++;
+    LOG(INFO) << "Will retry after reloading resolv.conf because last attempt "
+                 "failed to resolve host.";
+  } else if (unresolved_host_state_machine_.GetState() ==
+             UnresolvedHostStateMachine::State::kRetriedSuccess) {
+    auxiliary_error_code_ = ErrorCode::kUnresolvedHostRecovered;
+  }
+
   // TODO(petkov): This temporary code tries to deal with the case where the
   // update engine performs an update check while the network is not ready
   // (e.g., right after resume). Longer term, we should check if the network
@@ -615,15 +690,15 @@
 
   // We should iterate through all file descriptors up to libcurl's fd_max or
   // the highest one we're tracking, whichever is larger.
-  for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) {
-    if (!fd_task_maps_[t].empty())
-      fd_max = max(fd_max, fd_task_maps_[t].rbegin()->first);
+  for (size_t t = 0; t < base::size(fd_controller_maps_); ++t) {
+    if (!fd_controller_maps_[t].empty())
+      fd_max = max(fd_max, fd_controller_maps_[t].rbegin()->first);
   }
 
   // For each fd, if we're not tracking it, track it. If we are tracking it, but
   // libcurl doesn't care about it anymore, stop tracking it. After this loop,
-  // there should be exactly as many tasks scheduled in fd_task_maps_[0|1] as
-  // there are read/write fds that we're tracking.
+  // there should be exactly as many tasks scheduled in
+  // fd_controller_maps_[0|1] as there are read/write fds that we're tracking.
   for (int fd = 0; fd <= fd_max; ++fd) {
     // Note that fd_exc is unused in the current version of libcurl so is_exc
     // should always be false.
@@ -632,21 +707,14 @@
         is_exc || (FD_ISSET(fd, &fd_read) != 0),  // track 0 -- read
         is_exc || (FD_ISSET(fd, &fd_write) != 0)  // track 1 -- write
     };
-    MessageLoop::WatchMode watch_modes[2] = {
-        MessageLoop::WatchMode::kWatchRead,
-        MessageLoop::WatchMode::kWatchWrite,
-    };
 
-    for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) {
-      auto fd_task_it = fd_task_maps_[t].find(fd);
-      bool tracked = fd_task_it != fd_task_maps_[t].end();
+    for (size_t t = 0; t < base::size(fd_controller_maps_); ++t) {
+      bool tracked =
+          fd_controller_maps_[t].find(fd) != fd_controller_maps_[t].end();
 
       if (!must_track[t]) {
         // If we have an outstanding io_channel, remove it.
-        if (tracked) {
-          MessageLoop::current()->CancelTask(fd_task_it->second);
-          fd_task_maps_[t].erase(fd_task_it);
-        }
+        fd_controller_maps_[t].erase(fd);
         continue;
       }
 
@@ -655,14 +723,21 @@
         continue;
 
       // Track a new fd.
-      fd_task_maps_[t][fd] = MessageLoop::current()->WatchFileDescriptor(
-          FROM_HERE,
-          fd,
-          watch_modes[t],
-          true,  // persistent
-          base::Bind(&LibcurlHttpFetcher::CurlPerformOnce,
-                     base::Unretained(this)));
-
+      switch (t) {
+        case 0:  // Read
+          fd_controller_maps_[t][fd] =
+              base::FileDescriptorWatcher::WatchReadable(
+                  fd,
+                  base::BindRepeating(&LibcurlHttpFetcher::CurlPerformOnce,
+                                      base::Unretained(this)));
+          break;
+        case 1:  // Write
+          fd_controller_maps_[t][fd] =
+              base::FileDescriptorWatcher::WatchWritable(
+                  fd,
+                  base::BindRepeating(&LibcurlHttpFetcher::CurlPerformOnce,
+                                      base::Unretained(this)));
+      }
       static int io_counter = 0;
       io_counter++;
       if (io_counter % 50 == 0) {
@@ -714,15 +789,8 @@
   MessageLoop::current()->CancelTask(timeout_id_);
   timeout_id_ = MessageLoop::kTaskIdNull;
 
-  for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) {
-    for (const auto& fd_taks_pair : fd_task_maps_[t]) {
-      if (!MessageLoop::current()->CancelTask(fd_taks_pair.second)) {
-        LOG(WARNING) << "Error canceling the watch task " << fd_taks_pair.second
-                     << " for " << (t ? "writing" : "reading") << " the fd "
-                     << fd_taks_pair.first;
-      }
-    }
-    fd_task_maps_[t].clear();
+  for (size_t t = 0; t < base::size(fd_controller_maps_); ++t) {
+    fd_controller_maps_[t].clear();
   }
 
   if (curl_http_headers_) {
@@ -755,6 +823,66 @@
                                CURLINFO_RESPONSE_CODE,
                                &http_response_code) == CURLE_OK) {
     http_response_code_ = static_cast<int>(http_response_code);
+  } else {
+    LOG(ERROR) << "Unable to get http response code from curl_easy_getinfo";
+  }
+}
+
+CURLcode LibcurlHttpFetcher::GetCurlCode() {
+  CURLcode curl_code = CURLE_OK;
+  while (true) {
+    // Repeated calls to |curl_multi_info_read| will return a new struct each
+    // time, until a NULL is returned as a signal that there is no more to get
+    // at this point.
+    int msgs_in_queue;
+    CURLMsg* curl_msg =
+        curl_multi_info_read(curl_multi_handle_, &msgs_in_queue);
+    if (curl_msg == nullptr)
+      break;
+    // When |curl_msg| is |CURLMSG_DONE|, a transfer of an easy handle is done,
+    // and then data contains the return code for this transfer.
+    if (curl_msg->msg == CURLMSG_DONE) {
+      // Make sure |curl_multi_handle_| has one and only one easy handle
+      // |curl_handle_|.
+      CHECK_EQ(curl_handle_, curl_msg->easy_handle);
+      // Transfer return code reference:
+      // https://curl.haxx.se/libcurl/c/libcurl-errors.html
+      curl_code = curl_msg->data.result;
+    }
+  }
+
+  // Gets connection error if exists.
+  long connect_error = 0;  // NOLINT(runtime/int) - curl needs long.
+  CURLcode res =
+      curl_easy_getinfo(curl_handle_, CURLINFO_OS_ERRNO, &connect_error);
+  if (res == CURLE_OK && connect_error) {
+    LOG(ERROR) << "Connect error code from the OS: " << connect_error;
+  }
+
+  return curl_code;
+}
+
+void UnresolvedHostStateMachine::UpdateState(bool failed_to_resolve_host) {
+  switch (state_) {
+    case State::kInit:
+      if (failed_to_resolve_host) {
+        state_ = State::kRetry;
+      }
+      break;
+    case State::kRetry:
+      if (failed_to_resolve_host) {
+        state_ = State::kNotRetry;
+      } else {
+        state_ = State::kRetriedSuccess;
+      }
+      break;
+    case State::kNotRetry:
+      break;
+    case State::kRetriedSuccess:
+      break;
+    default:
+      NOTREACHED();
+      break;
   }
 }
 
diff --git a/libcurl_http_fetcher.h b/libcurl_http_fetcher.h
index 25a2df3..4e91b69 100644
--- a/libcurl_http_fetcher.h
+++ b/libcurl_http_fetcher.h
@@ -24,6 +24,7 @@
 
 #include <curl/curl.h>
 
+#include <base/files/file_descriptor_watcher_posix.h>
 #include <base/logging.h>
 #include <base/macros.h>
 #include <brillo/message_loops/message_loop.h>
@@ -37,6 +38,48 @@
 
 namespace chromeos_update_engine {
 
+// |UnresolvedHostStateMachine| is a representation of internal state machine of
+// |LibcurlHttpFetcher|.
+class UnresolvedHostStateMachine {
+ public:
+  UnresolvedHostStateMachine() = default;
+  enum class State {
+    kInit = 0,
+    kRetry = 1,
+    kRetriedSuccess = 2,
+    kNotRetry = 3,
+  };
+
+  State GetState() { return state_; }
+
+  // Updates the following internal state machine:
+  //
+  // |kInit|
+  //   |
+  //   |
+  //   \/
+  // (Try, host Unresolved)
+  //   |
+  //   |
+  //   \/
+  // |kRetry| --> (Retry, host resolved)
+  //   |                                  |
+  //   |                                  |
+  //   \/                                 \/
+  // (Retry, host Unresolved)    |kRetriedSuccess|
+  //   |
+  //   |
+  //   \/
+  // |kNotRetry|
+  //
+  void UpdateState(bool failed_to_resolve_host);
+
+ private:
+  State state_ = {State::kInit};
+
+  DISALLOW_COPY_AND_ASSIGN(UnresolvedHostStateMachine);
+};
+
 class LibcurlHttpFetcher : public HttpFetcher {
  public:
   LibcurlHttpFetcher(ProxyResolver* proxy_resolver,
@@ -61,6 +104,9 @@
   void SetHeader(const std::string& header_name,
                  const std::string& header_value) override;
 
+  bool GetHeader(const std::string& header_name,
+                 std::string* header_value) const override;
+
   // Suspend the transfer by calling curl_easy_pause(CURLPAUSE_ALL).
   void Pause() override;
 
@@ -85,6 +131,8 @@
     no_network_max_retries_ = retries;
   }
 
+  int get_no_network_max_retries() { return no_network_max_retries_; }
+
   void set_server_to_check(ServerToCheck server_to_check) {
     server_to_check_ = server_to_check;
   }
@@ -106,7 +154,13 @@
     max_retry_count_ = max_retry_count;
   }
 
+  void set_is_update_check(bool is_update_check) {
+    is_update_check_ = is_update_check;
+  }
+
  private:
+  FRIEND_TEST(LibcurlHttpFetcherTest, HostResolvedTest);
+
   // libcurl's CURLOPT_CLOSESOCKETFUNCTION callback function. Called when
   // closing a socket created with the CURLOPT_OPENSOCKETFUNCTION callback.
   static int LibcurlCloseSocketCallback(void* clientp, curl_socket_t item);
@@ -116,7 +170,10 @@
   void ProxiesResolved();
 
   // Asks libcurl for the http response code and stores it in the object.
-  void GetHttpResponseCode();
+  virtual void GetHttpResponseCode();
+
+  // Returns the last |CURLcode|.
+  CURLcode GetCurlCode();
 
   // Checks whether stored HTTP response is within the success range.
   inline bool IsHttpResponseSuccess() {
@@ -161,7 +218,7 @@
   }
 
   // Cleans up the following if they are non-null:
-  // curl(m) handles, fd_task_maps_, timeout_id_.
+  // curl(m) handles, fd_controller_maps_(fd_task_maps_), timeout_id_.
   void CleanUp();
 
   // Force terminate the transfer. This will invoke the delegate's (if any)
@@ -198,7 +255,8 @@
   // the message loop. libcurl may open/close descriptors and switch their
   // directions so maintain two separate lists so that watch conditions can be
   // set appropriately.
-  std::map<int, brillo::MessageLoop::TaskId> fd_task_maps_[2];
+  std::map<int, std::unique_ptr<base::FileDescriptorWatcher::Controller>>
+      fd_controller_maps_[2];
 
   // The TaskId of the timer we're waiting on. kTaskIdNull if we are not waiting
   // on it.
@@ -265,6 +323,12 @@
   // ServerToCheck::kNone.
   ServerToCheck server_to_check_{ServerToCheck::kNone};
 
+  // True if this object is for update check.
+  bool is_update_check_{false};
+
+  // Internal state machine.
+  UnresolvedHostStateMachine unresolved_host_state_machine_;
+
   int low_speed_limit_bps_{kDownloadLowSpeedLimitBps};
   int low_speed_time_seconds_{kDownloadLowSpeedTimeSeconds};
   int connect_timeout_seconds_{kDownloadConnectTimeoutSeconds};
diff --git a/libcurl_http_fetcher_unittest.cc b/libcurl_http_fetcher_unittest.cc
new file mode 100644
index 0000000..5d67570
--- /dev/null
+++ b/libcurl_http_fetcher_unittest.cc
@@ -0,0 +1,205 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/libcurl_http_fetcher.h"
+
+#include <string>
+
+#include <brillo/message_loops/fake_message_loop.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/fake_hardware.h"
+#include "update_engine/common/mock_proxy_resolver.h"
+#include "update_engine/mock_libcurl_http_fetcher.h"
+
+using std::string;
+
+namespace chromeos_update_engine {
+
+namespace {
+constexpr char kHeaderName[] = "X-Goog-Test-Header";
+}
+
+class LibcurlHttpFetcherTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    loop_.SetAsCurrent();
+    fake_hardware_.SetIsOfficialBuild(true);
+    fake_hardware_.SetIsOOBEEnabled(false);
+  }
+
+  brillo::FakeMessageLoop loop_{nullptr};
+  FakeHardware fake_hardware_;
+  MockLibcurlHttpFetcher libcurl_fetcher_{nullptr, &fake_hardware_};
+  UnresolvedHostStateMachine state_machine_;
+};
+
+TEST_F(LibcurlHttpFetcherTest, GetEmptyHeaderValueTest) {
+  const string header_value = "";
+  string actual_header_value;
+  libcurl_fetcher_.SetHeader(kHeaderName, header_value);
+  EXPECT_TRUE(libcurl_fetcher_.GetHeader(kHeaderName, &actual_header_value));
+  EXPECT_EQ("", actual_header_value);
+}
+
+TEST_F(LibcurlHttpFetcherTest, GetHeaderTest) {
+  const string header_value = "This-is-value 123";
+  string actual_header_value;
+  libcurl_fetcher_.SetHeader(kHeaderName, header_value);
+  EXPECT_TRUE(libcurl_fetcher_.GetHeader(kHeaderName, &actual_header_value));
+  EXPECT_EQ(header_value, actual_header_value);
+}
+
+TEST_F(LibcurlHttpFetcherTest, GetNonExistentHeaderValueTest) {
+  string actual_header_value;
+  // Skip |SetHeaader()| call.
+  EXPECT_FALSE(libcurl_fetcher_.GetHeader(kHeaderName, &actual_header_value));
+  // Even after a failed |GetHeaderValue()|, enforce that the passed pointer to
+  // modifiable string was cleared to be empty.
+  EXPECT_EQ("", actual_header_value);
+}
+
+TEST_F(LibcurlHttpFetcherTest, GetHeaderEdgeCaseTest) {
+  const string header_value = "\a\b\t\v\f\r\\ edge:-case: \a\b\t\v\f\r\\";
+  string actual_header_value;
+  libcurl_fetcher_.SetHeader(kHeaderName, header_value);
+  EXPECT_TRUE(libcurl_fetcher_.GetHeader(kHeaderName, &actual_header_value));
+  EXPECT_EQ(header_value, actual_header_value);
+}
+
+TEST_F(LibcurlHttpFetcherTest, InvalidURLTest) {
+  int no_network_max_retries = 1;
+  libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries);
+
+  libcurl_fetcher_.BeginTransfer("not-a-URL");
+  while (loop_.PendingTasks()) {
+    loop_.RunOnce(true);
+  }
+
+  EXPECT_EQ(libcurl_fetcher_.get_no_network_max_retries(),
+            no_network_max_retries);
+}
+
+TEST_F(LibcurlHttpFetcherTest, CouldNotResolveHostTest) {
+  int no_network_max_retries = 1;
+  libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries);
+
+  libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid");
+
+  // It's slower on Android that libcurl handle may not finish within 1 cycle.
+  // Will need to wait for more cycles until it finishes. Original test didn't
+  // correctly handle when we need to re-watch libcurl fds.
+  while (loop_.PendingTasks() &&
+         libcurl_fetcher_.GetAuxiliaryErrorCode() == ErrorCode::kSuccess) {
+    loop_.RunOnce(true);
+  }
+
+  EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(),
+            ErrorCode::kUnresolvedHostError);
+
+  while (loop_.PendingTasks()) {
+    loop_.RunOnce(true);
+  }
+  // The auxilary error code should've have been changed.
+  EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(),
+            ErrorCode::kUnresolvedHostError);
+
+  // If libcurl fails to resolve the name, we call res_init() to reload
+  // resolv.conf and retry exactly once more. See crbug.com/982813 for details.
+  EXPECT_EQ(libcurl_fetcher_.get_no_network_max_retries(),
+            no_network_max_retries + 1);
+}
+
+TEST_F(LibcurlHttpFetcherTest, HostResolvedTest) {
+  int no_network_max_retries = 2;
+  libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries);
+
+  // This test actually sends request to internet but according to
+  // https://tools.ietf.org/html/rfc2606#section-2, .invalid domain names are
+  // reserved and sure to be invalid. Ideally we should mock libcurl or
+  // reorganize LibcurlHttpFetcher so the part that sends request can be mocked
+  // easily.
+  // TODO(xiaochu) Refactor LibcurlHttpFetcher (and its relates) so it's
+  // easier to mock the part that depends on internet connectivity.
+  libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid");
+
+  // It's slower on Android that libcurl handle may not finish within 1 cycle.
+  // Will need to wait for more cycles until it finishes. Original test didn't
+  // correctly handle when we need to re-watch libcurl fds.
+  while (loop_.PendingTasks() &&
+         libcurl_fetcher_.GetAuxiliaryErrorCode() == ErrorCode::kSuccess) {
+    loop_.RunOnce(true);
+  }
+
+  EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(),
+            ErrorCode::kUnresolvedHostError);
+
+  // The second time, it will resolve, with error code 200 but we set the
+  // download size be smaller than the transfer size so it will retry again.
+  EXPECT_CALL(libcurl_fetcher_, GetHttpResponseCode())
+      .WillOnce(testing::Invoke(
+          [this]() { libcurl_fetcher_.http_response_code_ = 200; }))
+      .WillRepeatedly(testing::Invoke(
+          [this]() { libcurl_fetcher_.http_response_code_ = 0; }));
+  libcurl_fetcher_.transfer_size_ = 10;
+
+  // It's slower on Android that libcurl handle may not finish within 1 cycle.
+  // Will need to wait for more cycles until it finishes. Original test didn't
+  // correctly handle when we need to re-watch libcurl fds.
+  while (loop_.PendingTasks() && libcurl_fetcher_.GetAuxiliaryErrorCode() ==
+                                     ErrorCode::kUnresolvedHostError) {
+    loop_.RunOnce(true);
+  }
+
+  EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(),
+            ErrorCode::kUnresolvedHostRecovered);
+
+  while (loop_.PendingTasks()) {
+    loop_.RunOnce(true);
+  }
+  // The auxilary error code should not have been changed.
+  EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(),
+            ErrorCode::kUnresolvedHostRecovered);
+
+  // If libcurl fails to resolve the name, we call res_init() to reload
+  // resolv.conf and retry exactly once more. See crbug.com/982813 for details.
+  EXPECT_EQ(libcurl_fetcher_.get_no_network_max_retries(),
+            no_network_max_retries + 1);
+}
+
+TEST_F(LibcurlHttpFetcherTest, HttpFetcherStateMachineRetryFailedTest) {
+  state_machine_.UpdateState(true);
+  state_machine_.UpdateState(true);
+  EXPECT_EQ(state_machine_.GetState(),
+            UnresolvedHostStateMachine::State::kNotRetry);
+}
+
+TEST_F(LibcurlHttpFetcherTest, HttpFetcherStateMachineRetrySucceedTest) {
+  state_machine_.UpdateState(true);
+  state_machine_.UpdateState(false);
+  EXPECT_EQ(state_machine_.GetState(),
+            UnresolvedHostStateMachine::State::kRetriedSuccess);
+}
+
+TEST_F(LibcurlHttpFetcherTest, HttpFetcherStateMachineNoRetryTest) {
+  state_machine_.UpdateState(false);
+  state_machine_.UpdateState(false);
+  EXPECT_EQ(state_machine_.GetState(),
+            UnresolvedHostStateMachine::State::kInit);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/main.cc b/main.cc
index 4377a15..a23a08b 100644
--- a/main.cc
+++ b/main.cc
@@ -23,10 +23,11 @@
 #include <base/logging.h>
 #include <brillo/flag_helper.h>
 
+#include "update_engine/common/daemon_base.h"
+#include "update_engine/common/logging.h"
+#include "update_engine/common/subprocess.h"
 #include "update_engine/common/terminator.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/daemon.h"
-#include "update_engine/logging.h"
 
 using std::string;
 
@@ -63,8 +64,8 @@
   // Done _after_ log file creation.
   umask(S_IRWXG | S_IRWXO);
 
-  chromeos_update_engine::UpdateEngineDaemon update_engine_daemon;
-  int exit_code = update_engine_daemon.Run();
+  auto daemon = chromeos_update_engine::DaemonBase::CreateInstance();
+  int exit_code = daemon->Run();
 
   chromeos_update_engine::Subprocess::Get().FlushBufferedLogsAtExit();
 
diff --git a/metrics_utils.cc b/metrics_utils.cc
index 9abc3ef..34da5a1 100644
--- a/metrics_utils.cc
+++ b/metrics_utils.cc
@@ -23,7 +23,6 @@
 #include "update_engine/common/clock_interface.h"
 #include "update_engine/common/constants.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/system_state.h"
 
 using base::Time;
 using base::TimeDelta;
@@ -95,6 +94,7 @@
     case ErrorCode::kPostinstallRunnerError:
     case ErrorCode::kPostinstallBootedFromFirmwareB:
     case ErrorCode::kPostinstallFirmwareRONotUpdatable:
+    case ErrorCode::kPostInstallMountError:
       return metrics::AttemptResult::kPostInstallFailed;
 
     case ErrorCode::kUserCanceled:
@@ -111,10 +111,6 @@
     case ErrorCode::kDownloadInvalidMetadataSignature:
     case ErrorCode::kOmahaResponseInvalid:
     case ErrorCode::kOmahaUpdateIgnoredPerPolicy:
-    // TODO(deymo): The next two items belong in their own category; they
-    // should not be counted as internal errors. b/27112092
-    case ErrorCode::kOmahaUpdateDeferredPerPolicy:
-    case ErrorCode::kNonCriticalUpdateInOOBE:
     case ErrorCode::kOmahaErrorInHTTPResponse:
     case ErrorCode::kDownloadMetadataSignatureMissingError:
     case ErrorCode::kOmahaUpdateDeferredForBackoff:
@@ -124,8 +120,13 @@
     case ErrorCode::kOmahaUpdateIgnoredOverCellular:
     case ErrorCode::kNoUpdate:
     case ErrorCode::kFirstActiveOmahaPingSentPersistenceError:
+    case ErrorCode::kPackageExcludedFromUpdate:
       return metrics::AttemptResult::kInternalError;
 
+    case ErrorCode::kOmahaUpdateDeferredPerPolicy:
+    case ErrorCode::kNonCriticalUpdateInOOBE:
+      return metrics::AttemptResult::kUpdateSkipped;
+
     // Special flags. These can't happen (we mask them out above) but
     // the compiler doesn't know that. Just break out so we can warn and
     // return |kInternalError|.
@@ -188,6 +189,7 @@
     case ErrorCode::kOmahaResponseHandlerError:
     case ErrorCode::kFilesystemCopierError:
     case ErrorCode::kPostinstallRunnerError:
+    case ErrorCode::kPostInstallMountError:
     case ErrorCode::kPayloadMismatchedType:
     case ErrorCode::kInstallDeviceOpenError:
     case ErrorCode::kKernelDeviceOpenError:
@@ -240,6 +242,7 @@
     case ErrorCode::kVerityCalculationError:
     case ErrorCode::kNotEnoughSpace:
     case ErrorCode::kDeviceCorrupted:
+    case ErrorCode::kPackageExcludedFromUpdate:
       break;
 
     // Special flags. These can't happen (we mask them out above) but
@@ -280,12 +283,6 @@
       else
         return metrics::ConnectionType::kWifi;
 
-    case ConnectionType::kWimax:
-      return metrics::ConnectionType::kWimax;
-
-    case ConnectionType::kBluetooth:
-      return metrics::ConnectionType::kBluetooth;
-
     case ConnectionType::kCellular:
       return metrics::ConnectionType::kCellular;
   }
@@ -297,48 +294,6 @@
   return metrics::ConnectionType::kUnknown;
 }
 
-bool WallclockDurationHelper(SystemState* system_state,
-                             const std::string& state_variable_key,
-                             TimeDelta* out_duration) {
-  bool ret = false;
-
-  Time now = system_state->clock()->GetWallclockTime();
-  int64_t stored_value;
-  if (system_state->prefs()->GetInt64(state_variable_key, &stored_value)) {
-    Time stored_time = Time::FromInternalValue(stored_value);
-    if (stored_time > now) {
-      LOG(ERROR) << "Stored time-stamp used for " << state_variable_key
-                 << " is in the future.";
-    } else {
-      *out_duration = now - stored_time;
-      ret = true;
-    }
-  }
-
-  if (!system_state->prefs()->SetInt64(state_variable_key,
-                                       now.ToInternalValue())) {
-    LOG(ERROR) << "Error storing time-stamp in " << state_variable_key;
-  }
-
-  return ret;
-}
-
-bool MonotonicDurationHelper(SystemState* system_state,
-                             int64_t* storage,
-                             TimeDelta* out_duration) {
-  bool ret = false;
-
-  Time now = system_state->clock()->GetMonotonicTime();
-  if (*storage != 0) {
-    Time stored_time = Time::FromInternalValue(*storage);
-    *out_duration = now - stored_time;
-    ret = true;
-  }
-  *storage = now.ToInternalValue();
-
-  return ret;
-}
-
 int64_t GetPersistedValue(const std::string& key, PrefsInterface* prefs) {
   CHECK(prefs);
   if (!prefs->Exists(key))
@@ -408,8 +363,7 @@
     return false;
 
   Time system_updated_at = Time::FromInternalValue(stored_value);
-  base::TimeDelta time_to_reboot =
-      clock->GetMonotonicTime() - system_updated_at;
+  TimeDelta time_to_reboot = clock->GetMonotonicTime() - system_updated_at;
   if (time_to_reboot.ToInternalValue() < 0) {
     LOG(ERROR) << "time_to_reboot is negative - system_updated_at: "
                << utils::ToString(system_updated_at);
diff --git a/metrics_utils.h b/metrics_utils.h
index 8f1aad1..3aac4e5 100644
--- a/metrics_utils.h
+++ b/metrics_utils.h
@@ -22,16 +22,14 @@
 #include <base/time/time.h>
 
 #include "update_engine/common/clock_interface.h"
+#include "update_engine/common/connection_utils.h"
 #include "update_engine/common/error_code.h"
+#include "update_engine/common/metrics_constants.h"
+#include "update_engine/common/metrics_reporter_interface.h"
 #include "update_engine/common/prefs_interface.h"
-#include "update_engine/connection_utils.h"
-#include "update_engine/metrics_constants.h"
-#include "update_engine/metrics_reporter_interface.h"
 
 namespace chromeos_update_engine {
 
-class SystemState;
-
 namespace metrics_utils {
 
 // Transforms a ErrorCode value into a metrics::DownloadErrorCode.
@@ -50,29 +48,6 @@
 metrics::ConnectionType GetConnectionType(ConnectionType type,
                                           ConnectionTethering tethering);
 
-// This function returns the duration on the wallclock since the last
-// time it was called for the same |state_variable_key| value.
-//
-// If the function returns |true|, the duration (always non-negative)
-// is returned in |out_duration|. If the function returns |false|
-// something went wrong or there was no previous measurement.
-bool WallclockDurationHelper(SystemState* system_state,
-                             const std::string& state_variable_key,
-                             base::TimeDelta* out_duration);
-
-// This function returns the duration on the monotonic clock since the
-// last time it was called for the same |storage| pointer.
-//
-// You should pass a pointer to a 64-bit integer in |storage| which
-// should be initialized to 0.
-//
-// If the function returns |true|, the duration (always non-negative)
-// is returned in |out_duration|. If the function returns |false|
-// something went wrong or there was no previous measurement.
-bool MonotonicDurationHelper(SystemState* system_state,
-                             int64_t* storage,
-                             base::TimeDelta* out_duration);
-
 // Returns the persisted value from prefs for the given key. It also
 // validates that the value returned is non-negative.
 int64_t GetPersistedValue(const std::string& key, PrefsInterface* prefs);
diff --git a/metrics_utils_unittest.cc b/metrics_utils_unittest.cc
index e7c4c26..93b48fb 100644
--- a/metrics_utils_unittest.cc
+++ b/metrics_utils_unittest.cc
@@ -18,10 +18,6 @@
 
 #include <gtest/gtest.h>
 
-#include "update_engine/common/fake_clock.h"
-#include "update_engine/common/fake_prefs.h"
-#include "update_engine/fake_system_state.h"
-
 namespace chromeos_update_engine {
 namespace metrics_utils {
 
@@ -41,12 +37,6 @@
   EXPECT_EQ(
       metrics::ConnectionType::kWifi,
       GetConnectionType(ConnectionType::kWifi, ConnectionTethering::kUnknown));
-  EXPECT_EQ(
-      metrics::ConnectionType::kWimax,
-      GetConnectionType(ConnectionType::kWimax, ConnectionTethering::kUnknown));
-  EXPECT_EQ(metrics::ConnectionType::kBluetooth,
-            GetConnectionType(ConnectionType::kBluetooth,
-                              ConnectionTethering::kUnknown));
   EXPECT_EQ(metrics::ConnectionType::kCellular,
             GetConnectionType(ConnectionType::kCellular,
                               ConnectionTethering::kUnknown));
@@ -80,116 +70,5 @@
       GetConnectionType(ConnectionType::kWifi, ConnectionTethering::kUnknown));
 }
 
-TEST(MetricsUtilsTest, WallclockDurationHelper) {
-  FakeSystemState fake_system_state;
-  FakeClock fake_clock;
-  base::TimeDelta duration;
-  const std::string state_variable_key = "test-prefs";
-  FakePrefs fake_prefs;
-
-  fake_system_state.set_clock(&fake_clock);
-  fake_system_state.set_prefs(&fake_prefs);
-
-  // Initialize wallclock to 1 sec.
-  fake_clock.SetWallclockTime(base::Time::FromInternalValue(1000000));
-
-  // First time called so no previous measurement available.
-  EXPECT_FALSE(metrics_utils::WallclockDurationHelper(
-      &fake_system_state, state_variable_key, &duration));
-
-  // Next time, we should get zero since the clock didn't advance.
-  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(
-      &fake_system_state, state_variable_key, &duration));
-  EXPECT_EQ(duration.InSeconds(), 0);
-
-  // We can also call it as many times as we want with it being
-  // considered a failure.
-  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(
-      &fake_system_state, state_variable_key, &duration));
-  EXPECT_EQ(duration.InSeconds(), 0);
-  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(
-      &fake_system_state, state_variable_key, &duration));
-  EXPECT_EQ(duration.InSeconds(), 0);
-
-  // Advance the clock one second, then we should get 1 sec on the
-  // next call and 0 sec on the subsequent call.
-  fake_clock.SetWallclockTime(base::Time::FromInternalValue(2000000));
-  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(
-      &fake_system_state, state_variable_key, &duration));
-  EXPECT_EQ(duration.InSeconds(), 1);
-  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(
-      &fake_system_state, state_variable_key, &duration));
-  EXPECT_EQ(duration.InSeconds(), 0);
-
-  // Advance clock two seconds and we should get 2 sec and then 0 sec.
-  fake_clock.SetWallclockTime(base::Time::FromInternalValue(4000000));
-  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(
-      &fake_system_state, state_variable_key, &duration));
-  EXPECT_EQ(duration.InSeconds(), 2);
-  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(
-      &fake_system_state, state_variable_key, &duration));
-  EXPECT_EQ(duration.InSeconds(), 0);
-
-  // There's a possibility that the wallclock can go backwards (NTP
-  // adjustments, for example) so check that we properly handle this
-  // case.
-  fake_clock.SetWallclockTime(base::Time::FromInternalValue(3000000));
-  EXPECT_FALSE(metrics_utils::WallclockDurationHelper(
-      &fake_system_state, state_variable_key, &duration));
-  fake_clock.SetWallclockTime(base::Time::FromInternalValue(4000000));
-  EXPECT_TRUE(metrics_utils::WallclockDurationHelper(
-      &fake_system_state, state_variable_key, &duration));
-  EXPECT_EQ(duration.InSeconds(), 1);
-}
-
-TEST(MetricsUtilsTest, MonotonicDurationHelper) {
-  int64_t storage = 0;
-  FakeSystemState fake_system_state;
-  FakeClock fake_clock;
-  base::TimeDelta duration;
-
-  fake_system_state.set_clock(&fake_clock);
-
-  // Initialize monotonic clock to 1 sec.
-  fake_clock.SetMonotonicTime(base::Time::FromInternalValue(1000000));
-
-  // First time called so no previous measurement available.
-  EXPECT_FALSE(metrics_utils::MonotonicDurationHelper(
-      &fake_system_state, &storage, &duration));
-
-  // Next time, we should get zero since the clock didn't advance.
-  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(
-      &fake_system_state, &storage, &duration));
-  EXPECT_EQ(duration.InSeconds(), 0);
-
-  // We can also call it as many times as we want with it being
-  // considered a failure.
-  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(
-      &fake_system_state, &storage, &duration));
-  EXPECT_EQ(duration.InSeconds(), 0);
-  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(
-      &fake_system_state, &storage, &duration));
-  EXPECT_EQ(duration.InSeconds(), 0);
-
-  // Advance the clock one second, then we should get 1 sec on the
-  // next call and 0 sec on the subsequent call.
-  fake_clock.SetMonotonicTime(base::Time::FromInternalValue(2000000));
-  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(
-      &fake_system_state, &storage, &duration));
-  EXPECT_EQ(duration.InSeconds(), 1);
-  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(
-      &fake_system_state, &storage, &duration));
-  EXPECT_EQ(duration.InSeconds(), 0);
-
-  // Advance clock two seconds and we should get 2 sec and then 0 sec.
-  fake_clock.SetMonotonicTime(base::Time::FromInternalValue(4000000));
-  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(
-      &fake_system_state, &storage, &duration));
-  EXPECT_EQ(duration.InSeconds(), 2);
-  EXPECT_TRUE(metrics_utils::MonotonicDurationHelper(
-      &fake_system_state, &storage, &duration));
-  EXPECT_EQ(duration.InSeconds(), 0);
-}
-
 }  // namespace metrics_utils
 }  // namespace chromeos_update_engine
diff --git a/mock_boot_control_hal.h b/mock_boot_control_hal.h
deleted file mode 100644
index 4e9cb50..0000000
--- a/mock_boot_control_hal.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//
-// Copyright (C) 2018 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include <android/hardware/boot/1.0/IBootControl.h>
-#include <stdint.h>
-
-#include <gmock/gmock.h>
-
-namespace chromeos_update_engine {
-
-class MockBootControlHal
-    : public ::android::hardware::boot::V1_0::IBootControl {
- public:
-  MOCK_METHOD0(getNumberSlots, ::android::hardware::Return<uint32_t>());
-  MOCK_METHOD0(getCurrentSlot, ::android::hardware::Return<uint32_t>());
-  MOCK_METHOD1(markBootSuccessful,
-               ::android::hardware::Return<void>(markBootSuccessful_cb));
-  MOCK_METHOD2(setActiveBootSlot,
-               ::android::hardware::Return<void>(uint32_t,
-                                                 setActiveBootSlot_cb));
-  MOCK_METHOD2(setSlotAsUnbootable,
-               ::android::hardware::Return<void>(uint32_t,
-                                                 setSlotAsUnbootable_cb));
-  MOCK_METHOD1(
-      isSlotBootable,
-      ::android::hardware::Return<::android::hardware::boot::V1_0::BoolResult>(
-          uint32_t));
-  MOCK_METHOD1(
-      isSlotMarkedSuccessful,
-      ::android::hardware::Return<::android::hardware::boot::V1_0::BoolResult>(
-          uint32_t));
-  MOCK_METHOD2(getSuffix,
-               ::android::hardware::Return<void>(uint32_t, getSuffix_cb));
-};
-
-}  // namespace chromeos_update_engine
diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h
deleted file mode 100644
index 1e4e5fd..0000000
--- a/mock_dynamic_partition_control.h
+++ /dev/null
@@ -1,110 +0,0 @@
-//
-// Copyright (C) 2018 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include <stdint.h>
-
-#include <memory>
-#include <set>
-#include <string>
-
-#include <gmock/gmock.h>
-
-#include "update_engine/common/boot_control_interface.h"
-#include "update_engine/common/dynamic_partition_control_interface.h"
-#include "update_engine/dynamic_partition_control_android.h"
-
-namespace chromeos_update_engine {
-
-class MockDynamicPartitionControl : public DynamicPartitionControlInterface {
- public:
-  MOCK_METHOD5(MapPartitionOnDeviceMapper,
-               bool(const std::string&,
-                    const std::string&,
-                    uint32_t,
-                    bool,
-                    std::string*));
-  MOCK_METHOD0(Cleanup, void());
-  MOCK_METHOD0(GetDynamicPartitionsFeatureFlag, FeatureFlag());
-  MOCK_METHOD5(
-      PreparePartitionsForUpdate,
-      bool(uint32_t, uint32_t, const DeltaArchiveManifest&, bool, uint64_t*));
-  MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag());
-  MOCK_METHOD1(FinishUpdate, bool(bool));
-  MOCK_METHOD0(CleanupSuccessfulUpdate, ErrorCode());
-  MOCK_METHOD3(GetCleanupPreviousUpdateAction,
-               std::unique_ptr<AbstractAction>(
-                   BootControlInterface*,
-                   PrefsInterface*,
-                   CleanupPreviousUpdateActionDelegateInterface*));
-};
-
-class MockDynamicPartitionControlAndroid
-    : public DynamicPartitionControlAndroid {
- public:
-  MOCK_METHOD5(MapPartitionOnDeviceMapper,
-               bool(const std::string&,
-                    const std::string&,
-                    uint32_t,
-                    bool,
-                    std::string*));
-  MOCK_METHOD1(UnmapPartitionOnDeviceMapper, bool(const std::string&));
-  MOCK_METHOD0(Cleanup, void());
-  MOCK_METHOD1(DeviceExists, bool(const std::string&));
-  MOCK_METHOD1(GetState, ::android::dm::DmDeviceState(const std::string&));
-  MOCK_METHOD2(GetDmDevicePathByName, bool(const std::string&, std::string*));
-  MOCK_METHOD3(LoadMetadataBuilder,
-               std::unique_ptr<::android::fs_mgr::MetadataBuilder>(
-                   const std::string&, uint32_t, uint32_t));
-  MOCK_METHOD3(StoreMetadata,
-               bool(const std::string&,
-                    android::fs_mgr::MetadataBuilder*,
-                    uint32_t));
-  MOCK_METHOD1(GetDeviceDir, bool(std::string*));
-  MOCK_METHOD0(GetDynamicPartitionsFeatureFlag, FeatureFlag());
-  MOCK_METHOD1(GetSuperPartitionName, std::string(uint32_t));
-  MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag());
-  MOCK_METHOD1(FinishUpdate, bool(bool));
-  MOCK_METHOD5(
-      GetSystemOtherPath,
-      bool(uint32_t, uint32_t, const std::string&, std::string*, bool*));
-  MOCK_METHOD2(EraseSystemOtherAvbFooter, bool(uint32_t, uint32_t));
-  MOCK_METHOD0(IsAvbEnabledOnSystemOther, std::optional<bool>());
-
-  void set_fake_mapped_devices(const std::set<std::string>& fake) override {
-    DynamicPartitionControlAndroid::set_fake_mapped_devices(fake);
-  }
-
-  bool RealGetSystemOtherPath(uint32_t source_slot,
-                              uint32_t target_slot,
-                              const std::string& partition_name_suffix,
-                              std::string* path,
-                              bool* should_unmap) {
-    return DynamicPartitionControlAndroid::GetSystemOtherPath(
-        source_slot, target_slot, partition_name_suffix, path, should_unmap);
-  }
-
-  bool RealEraseSystemOtherAvbFooter(uint32_t source_slot,
-                                     uint32_t target_slot) {
-    return DynamicPartitionControlAndroid::EraseSystemOtherAvbFooter(
-        source_slot, target_slot);
-  }
-
-  std::optional<bool> RealIsAvbEnabledInFstab(const std::string& path) {
-    return DynamicPartitionControlAndroid::IsAvbEnabledInFstab(path);
-  }
-};
-
-}  // namespace chromeos_update_engine
diff --git a/mock_libcurl_http_fetcher.h b/mock_libcurl_http_fetcher.h
new file mode 100644
index 0000000..a14f953
--- /dev/null
+++ b/mock_libcurl_http_fetcher.h
@@ -0,0 +1,37 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_MOCK_LIBCURL_HTTP_FETCHER_H_
+#define UPDATE_ENGINE_MOCK_LIBCURL_HTTP_FETCHER_H_
+
+#include <gmock/gmock.h>
+
+#include "update_engine/libcurl_http_fetcher.h"
+
+namespace chromeos_update_engine {
+
+class MockLibcurlHttpFetcher : public LibcurlHttpFetcher {
+ public:
+  MockLibcurlHttpFetcher(ProxyResolver* proxy_resolver,
+                         HardwareInterface* hardware)
+      : LibcurlHttpFetcher(proxy_resolver, hardware) {}
+
+  MOCK_METHOD0(GetHttpResponseCode, void());
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_MOCK_LIBCURL_HTTP_FETCHER_H_
diff --git a/omaha_request_action.cc b/omaha_request_action.cc
deleted file mode 100644
index fae9471..0000000
--- a/omaha_request_action.cc
+++ /dev/null
@@ -1,1997 +0,0 @@
-//
-// Copyright (C) 2012 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/omaha_request_action.h"
-
-#include <inttypes.h>
-
-#include <limits>
-#include <map>
-#include <sstream>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include <base/bind.h>
-#include <base/logging.h>
-#include <base/rand_util.h>
-#include <base/strings/string_number_conversions.h>
-#include <base/strings/string_split.h>
-#include <base/strings/string_util.h>
-#include <base/strings/stringprintf.h>
-#include <base/time/time.h>
-#include <brillo/key_value_store.h>
-#include <expat.h>
-#include <metrics/metrics_library.h>
-#include <policy/libpolicy.h>
-
-#include "update_engine/common/action_pipe.h"
-#include "update_engine/common/constants.h"
-#include "update_engine/common/hardware_interface.h"
-#include "update_engine/common/hash_calculator.h"
-#include "update_engine/common/platform_constants.h"
-#include "update_engine/common/prefs_interface.h"
-#include "update_engine/common/utils.h"
-#include "update_engine/connection_manager_interface.h"
-#include "update_engine/metrics_reporter_interface.h"
-#include "update_engine/metrics_utils.h"
-#include "update_engine/omaha_request_params.h"
-#include "update_engine/p2p_manager.h"
-#include "update_engine/payload_state_interface.h"
-
-using base::Time;
-using base::TimeDelta;
-using chromeos_update_manager::kRollforwardInfinity;
-using std::map;
-using std::numeric_limits;
-using std::string;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-// List of custom attributes that we interpret in the Omaha response:
-constexpr char kAttrDeadline[] = "deadline";
-constexpr char kAttrDisableP2PForDownloading[] = "DisableP2PForDownloading";
-constexpr char kAttrDisableP2PForSharing[] = "DisableP2PForSharing";
-constexpr char kAttrDisablePayloadBackoff[] = "DisablePayloadBackoff";
-constexpr char kAttrVersion[] = "version";
-// Deprecated: "IsDelta"
-constexpr char kAttrIsDeltaPayload[] = "IsDeltaPayload";
-constexpr char kAttrMaxFailureCountPerUrl[] = "MaxFailureCountPerUrl";
-constexpr char kAttrMaxDaysToScatter[] = "MaxDaysToScatter";
-// Deprecated: "ManifestSignatureRsa"
-// Deprecated: "ManifestSize"
-constexpr char kAttrMetadataSignatureRsa[] = "MetadataSignatureRsa";
-constexpr char kAttrMetadataSize[] = "MetadataSize";
-constexpr char kAttrMoreInfo[] = "MoreInfo";
-constexpr char kAttrNoUpdate[] = "noupdate";
-// Deprecated: "NeedsAdmin"
-constexpr char kAttrPollInterval[] = "PollInterval";
-constexpr char kAttrPowerwash[] = "Powerwash";
-constexpr char kAttrPrompt[] = "Prompt";
-constexpr char kAttrPublicKeyRsa[] = "PublicKeyRsa";
-
-// List of attributes that we interpret in the Omaha response:
-constexpr char kAttrAppId[] = "appid";
-constexpr char kAttrCodeBase[] = "codebase";
-constexpr char kAttrCohort[] = "cohort";
-constexpr char kAttrCohortHint[] = "cohorthint";
-constexpr char kAttrCohortName[] = "cohortname";
-constexpr char kAttrElapsedDays[] = "elapsed_days";
-constexpr char kAttrElapsedSeconds[] = "elapsed_seconds";
-constexpr char kAttrEvent[] = "event";
-constexpr char kAttrHashSha256[] = "hash_sha256";
-// Deprecated: "hash"; Although we still need to pass it from the server for
-// backward compatibility.
-constexpr char kAttrName[] = "name";
-// Deprecated: "sha256"; Although we still need to pass it from the server for
-// backward compatibility.
-constexpr char kAttrSize[] = "size";
-constexpr char kAttrStatus[] = "status";
-
-// List of values that we interpret in the Omaha response:
-constexpr char kValPostInstall[] = "postinstall";
-constexpr char kValNoUpdate[] = "noupdate";
-
-constexpr char kOmahaUpdaterVersion[] = "0.1.0.0";
-
-// X-Goog-Update headers.
-constexpr char kXGoogleUpdateInteractivity[] = "X-Goog-Update-Interactivity";
-constexpr char kXGoogleUpdateAppId[] = "X-Goog-Update-AppId";
-constexpr char kXGoogleUpdateUpdater[] = "X-Goog-Update-Updater";
-
-// updatecheck attributes (without the underscore prefix).
-constexpr char kAttrEol[] = "eol";
-constexpr char kAttrRollback[] = "rollback";
-constexpr char kAttrFirmwareVersion[] = "firmware_version";
-constexpr char kAttrKernelVersion[] = "kernel_version";
-
-namespace {
-
-// Returns an XML ping element attribute assignment with attribute
-// |name| and value |ping_days| if |ping_days| has a value that needs
-// to be sent, or an empty string otherwise.
-string GetPingAttribute(const string& name, int ping_days) {
-  if (ping_days > 0 || ping_days == OmahaRequestAction::kNeverPinged)
-    return base::StringPrintf(" %s=\"%d\"", name.c_str(), ping_days);
-  return "";
-}
-
-// Returns an XML ping element if any of the elapsed days need to be
-// sent, or an empty string otherwise.
-string GetPingXml(int ping_active_days, int ping_roll_call_days) {
-  string ping_active = GetPingAttribute("a", ping_active_days);
-  string ping_roll_call = GetPingAttribute("r", ping_roll_call_days);
-  if (!ping_active.empty() || !ping_roll_call.empty()) {
-    return base::StringPrintf("        <ping active=\"1\"%s%s></ping>\n",
-                              ping_active.c_str(),
-                              ping_roll_call.c_str());
-  }
-  return "";
-}
-
-// Returns an XML that goes into the body of the <app> element of the Omaha
-// request based on the given parameters.
-string GetAppBody(const OmahaEvent* event,
-                  OmahaRequestParams* params,
-                  bool ping_only,
-                  bool include_ping,
-                  bool skip_updatecheck,
-                  int ping_active_days,
-                  int ping_roll_call_days,
-                  PrefsInterface* prefs) {
-  string app_body;
-  if (event == nullptr) {
-    if (include_ping)
-      app_body = GetPingXml(ping_active_days, ping_roll_call_days);
-    if (!ping_only) {
-      if (!skip_updatecheck) {
-        app_body += "        <updatecheck";
-        if (!params->target_version_prefix().empty()) {
-          app_body += base::StringPrintf(
-              " targetversionprefix=\"%s\"",
-              XmlEncodeWithDefault(params->target_version_prefix(), "")
-                  .c_str());
-          // Rollback requires target_version_prefix set.
-          if (params->rollback_allowed()) {
-            app_body += " rollback_allowed=\"true\"";
-          }
-        }
-        app_body += "></updatecheck>\n";
-      }
-
-      // If this is the first update check after a reboot following a previous
-      // update, generate an event containing the previous version number. If
-      // the previous version preference file doesn't exist the event is still
-      // generated with a previous version of 0.0.0.0 -- this is relevant for
-      // older clients or new installs. The previous version event is not sent
-      // for ping-only requests because they come before the client has
-      // rebooted. The previous version event is also not sent if it was already
-      // sent for this new version with a previous updatecheck.
-      string prev_version;
-      if (!prefs->GetString(kPrefsPreviousVersion, &prev_version)) {
-        prev_version = "0.0.0.0";
-      }
-      // We only store a non-empty previous version value after a successful
-      // update in the previous boot. After reporting it back to the server,
-      // we clear the previous version value so it doesn't get reported again.
-      if (!prev_version.empty()) {
-        app_body += base::StringPrintf(
-            "        <event eventtype=\"%d\" eventresult=\"%d\" "
-            "previousversion=\"%s\"></event>\n",
-            OmahaEvent::kTypeRebootedAfterUpdate,
-            OmahaEvent::kResultSuccess,
-            XmlEncodeWithDefault(prev_version, "0.0.0.0").c_str());
-        LOG_IF(WARNING, !prefs->SetString(kPrefsPreviousVersion, ""))
-            << "Unable to reset the previous version.";
-      }
-    }
-  } else {
-    // The error code is an optional attribute so append it only if the result
-    // is not success.
-    string error_code;
-    if (event->result != OmahaEvent::kResultSuccess) {
-      error_code = base::StringPrintf(" errorcode=\"%d\"",
-                                      static_cast<int>(event->error_code));
-    }
-    app_body = base::StringPrintf(
-        "        <event eventtype=\"%d\" eventresult=\"%d\"%s></event>\n",
-        event->type,
-        event->result,
-        error_code.c_str());
-  }
-
-  return app_body;
-}
-
-// Returns the cohort* argument to include in the <app> tag for the passed
-// |arg_name| and |prefs_key|, if any. The return value is suitable to
-// concatenate to the list of arguments and includes a space at the end.
-string GetCohortArgXml(PrefsInterface* prefs,
-                       const string arg_name,
-                       const string prefs_key) {
-  // There's nothing wrong with not having a given cohort setting, so we check
-  // existence first to avoid the warning log message.
-  if (!prefs->Exists(prefs_key))
-    return "";
-  string cohort_value;
-  if (!prefs->GetString(prefs_key, &cohort_value) || cohort_value.empty())
-    return "";
-  // This is a sanity check to avoid sending a huge XML file back to Ohama due
-  // to a compromised stateful partition making the update check fail in low
-  // network environments envent after a reboot.
-  if (cohort_value.size() > 1024) {
-    LOG(WARNING) << "The omaha cohort setting " << arg_name
-                 << " has a too big value, which must be an error or an "
-                    "attacker trying to inhibit updates.";
-    return "";
-  }
-
-  string escaped_xml_value;
-  if (!XmlEncode(cohort_value, &escaped_xml_value)) {
-    LOG(WARNING) << "The omaha cohort setting " << arg_name
-                 << " is ASCII-7 invalid, ignoring it.";
-    return "";
-  }
-
-  return base::StringPrintf(
-      "%s=\"%s\" ", arg_name.c_str(), escaped_xml_value.c_str());
-}
-
-struct OmahaAppData {
-  string id;
-  string version;
-  string product_components;
-};
-
-bool IsValidComponentID(const string& id) {
-  for (char c : id) {
-    if (!isalnum(c) && c != '-' && c != '_' && c != '.')
-      return false;
-  }
-  return true;
-}
-
-// Returns an XML that corresponds to the entire <app> node of the Omaha
-// request based on the given parameters.
-string GetAppXml(const OmahaEvent* event,
-                 OmahaRequestParams* params,
-                 const OmahaAppData& app_data,
-                 bool ping_only,
-                 bool include_ping,
-                 bool skip_updatecheck,
-                 int ping_active_days,
-                 int ping_roll_call_days,
-                 int install_date_in_days,
-                 SystemState* system_state) {
-  string app_body = GetAppBody(event,
-                               params,
-                               ping_only,
-                               include_ping,
-                               skip_updatecheck,
-                               ping_active_days,
-                               ping_roll_call_days,
-                               system_state->prefs());
-  string app_versions;
-
-  // If we are downgrading to a more stable channel and we are allowed to do
-  // powerwash, then pass 0.0.0.0 as the version. This is needed to get the
-  // highest-versioned payload on the destination channel.
-  if (params->ShouldPowerwash()) {
-    LOG(INFO) << "Passing OS version as 0.0.0.0 as we are set to powerwash "
-              << "on downgrading to the version in the more stable channel";
-    app_versions = "version=\"0.0.0.0\" from_version=\"" +
-                   XmlEncodeWithDefault(app_data.version, "0.0.0.0") + "\" ";
-  } else {
-    app_versions = "version=\"" +
-                   XmlEncodeWithDefault(app_data.version, "0.0.0.0") + "\" ";
-  }
-
-  string download_channel = params->download_channel();
-  string app_channels =
-      "track=\"" + XmlEncodeWithDefault(download_channel, "") + "\" ";
-  if (params->current_channel() != download_channel) {
-    app_channels += "from_track=\"" +
-                    XmlEncodeWithDefault(params->current_channel(), "") + "\" ";
-  }
-
-  string delta_okay_str = params->delta_okay() ? "true" : "false";
-
-  // If install_date_days is not set (e.g. its value is -1 ), don't
-  // include the attribute.
-  string install_date_in_days_str = "";
-  if (install_date_in_days >= 0) {
-    install_date_in_days_str =
-        base::StringPrintf("installdate=\"%d\" ", install_date_in_days);
-  }
-
-  string app_cohort_args;
-  app_cohort_args +=
-      GetCohortArgXml(system_state->prefs(), "cohort", kPrefsOmahaCohort);
-  app_cohort_args += GetCohortArgXml(
-      system_state->prefs(), "cohorthint", kPrefsOmahaCohortHint);
-  app_cohort_args += GetCohortArgXml(
-      system_state->prefs(), "cohortname", kPrefsOmahaCohortName);
-
-  string fingerprint_arg;
-  if (!params->os_build_fingerprint().empty()) {
-    fingerprint_arg = "fingerprint=\"" +
-                      XmlEncodeWithDefault(params->os_build_fingerprint(), "") +
-                      "\" ";
-  }
-
-  string buildtype_arg;
-  if (!params->os_build_type().empty()) {
-    buildtype_arg = "os_build_type=\"" +
-                    XmlEncodeWithDefault(params->os_build_type(), "") + "\" ";
-  }
-
-  string product_components_args;
-  if (!params->ShouldPowerwash() && !app_data.product_components.empty()) {
-    brillo::KeyValueStore store;
-    if (store.LoadFromString(app_data.product_components)) {
-      for (const string& key : store.GetKeys()) {
-        if (!IsValidComponentID(key)) {
-          LOG(ERROR) << "Invalid component id: " << key;
-          continue;
-        }
-        string version;
-        if (!store.GetString(key, &version)) {
-          LOG(ERROR) << "Failed to get version for " << key
-                     << " in product_components.";
-          continue;
-        }
-        product_components_args +=
-            base::StringPrintf("_%s.version=\"%s\" ",
-                               key.c_str(),
-                               XmlEncodeWithDefault(version, "").c_str());
-      }
-    } else {
-      LOG(ERROR) << "Failed to parse product_components:\n"
-                 << app_data.product_components;
-    }
-  }
-
-  // clang-format off
-  string app_xml = "    <app "
-      "appid=\"" + XmlEncodeWithDefault(app_data.id, "") + "\" " +
-      app_cohort_args +
-      app_versions +
-      app_channels +
-      product_components_args +
-      fingerprint_arg +
-      buildtype_arg +
-      "lang=\"" + XmlEncodeWithDefault(params->app_lang(), "en-US") + "\" " +
-      "board=\"" + XmlEncodeWithDefault(params->os_board(), "") + "\" " +
-      "hardware_class=\"" + XmlEncodeWithDefault(params->hwid(), "") + "\" " +
-      "delta_okay=\"" + delta_okay_str + "\" "
-      "fw_version=\"" + XmlEncodeWithDefault(params->fw_version(), "") + "\" " +
-      "ec_version=\"" + XmlEncodeWithDefault(params->ec_version(), "") + "\" " +
-      install_date_in_days_str +
-      ">\n" +
-         app_body +
-      "    </app>\n";
-  // clang-format on
-  return app_xml;
-}
-
-// Returns an XML that corresponds to the entire <os> node of the Omaha
-// request based on the given parameters.
-string GetOsXml(OmahaRequestParams* params) {
-  string os_xml =
-      "    <os "
-      "version=\"" +
-      XmlEncodeWithDefault(params->os_version(), "") + "\" " + "platform=\"" +
-      XmlEncodeWithDefault(params->os_platform(), "") + "\" " + "sp=\"" +
-      XmlEncodeWithDefault(params->os_sp(), "") +
-      "\">"
-      "</os>\n";
-  return os_xml;
-}
-
-// Returns an XML that corresponds to the entire Omaha request based on the
-// given parameters.
-string GetRequestXml(const OmahaEvent* event,
-                     OmahaRequestParams* params,
-                     bool ping_only,
-                     bool include_ping,
-                     int ping_active_days,
-                     int ping_roll_call_days,
-                     int install_date_in_days,
-                     SystemState* system_state) {
-  string os_xml = GetOsXml(params);
-  OmahaAppData product_app = {
-      .id = params->GetAppId(),
-      .version = params->app_version(),
-      .product_components = params->product_components()};
-  // Skips updatecheck for platform app in case of an install operation.
-  string app_xml = GetAppXml(event,
-                             params,
-                             product_app,
-                             ping_only,
-                             include_ping,
-                             params->is_install(), /* skip_updatecheck */
-                             ping_active_days,
-                             ping_roll_call_days,
-                             install_date_in_days,
-                             system_state);
-  if (!params->system_app_id().empty()) {
-    OmahaAppData system_app = {.id = params->system_app_id(),
-                               .version = params->system_version()};
-    app_xml += GetAppXml(event,
-                         params,
-                         system_app,
-                         ping_only,
-                         include_ping,
-                         false, /* skip_updatecheck */
-                         ping_active_days,
-                         ping_roll_call_days,
-                         install_date_in_days,
-                         system_state);
-  }
-  // Create APP ID according to |dlc_module_id| (sticking the current AppID to
-  // the DLC module ID with an underscode).
-  for (const auto& dlc_module_id : params->dlc_module_ids()) {
-    OmahaAppData dlc_module_app = {
-        .id = params->GetAppId() + "_" + dlc_module_id,
-        .version = params->app_version()};
-    app_xml += GetAppXml(event,
-                         params,
-                         dlc_module_app,
-                         ping_only,
-                         include_ping,
-                         false, /* skip_updatecheck */
-                         ping_active_days,
-                         ping_roll_call_days,
-                         install_date_in_days,
-                         system_state);
-  }
-
-  string install_source = base::StringPrintf(
-      "installsource=\"%s\" ",
-      (params->interactive() ? "ondemandupdate" : "scheduler"));
-
-  string updater_version = XmlEncodeWithDefault(
-      base::StringPrintf(
-          "%s-%s", constants::kOmahaUpdaterID, kOmahaUpdaterVersion),
-      "");
-  string request_xml =
-      "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
-      "<request protocol=\"3.0\" " +
-      ("version=\"" + updater_version +
-       "\" "
-       "updaterversion=\"" +
-       updater_version + "\" " + install_source + "ismachine=\"1\">\n") +
-      os_xml + app_xml + "</request>\n";
-
-  return request_xml;
-}
-
-}  // namespace
-
-// Struct used for holding data obtained when parsing the XML.
-struct OmahaParserData {
-  explicit OmahaParserData(XML_Parser _xml_parser) : xml_parser(_xml_parser) {}
-
-  // Pointer to the expat XML_Parser object.
-  XML_Parser xml_parser;
-
-  // This is the state of the parser as it's processing the XML.
-  bool failed = false;
-  bool entity_decl = false;
-  string current_path;
-
-  // These are the values extracted from the XML.
-  string updatecheck_poll_interval;
-  map<string, string> updatecheck_attrs;
-  string daystart_elapsed_days;
-  string daystart_elapsed_seconds;
-
-  struct App {
-    string id;
-    vector<string> url_codebase;
-    string manifest_version;
-    map<string, string> action_postinstall_attrs;
-    string updatecheck_status;
-    string cohort;
-    string cohorthint;
-    string cohortname;
-    bool cohort_set = false;
-    bool cohorthint_set = false;
-    bool cohortname_set = false;
-
-    struct Package {
-      string name;
-      string size;
-      string hash;
-    };
-    vector<Package> packages;
-  };
-  vector<App> apps;
-};
-
-namespace {
-
-// Callback function invoked by expat.
-void ParserHandlerStart(void* user_data,
-                        const XML_Char* element,
-                        const XML_Char** attr) {
-  OmahaParserData* data = reinterpret_cast<OmahaParserData*>(user_data);
-
-  if (data->failed)
-    return;
-
-  data->current_path += string("/") + element;
-
-  map<string, string> attrs;
-  if (attr != nullptr) {
-    for (int n = 0; attr[n] != nullptr && attr[n + 1] != nullptr; n += 2) {
-      string key = attr[n];
-      string value = attr[n + 1];
-      attrs[key] = value;
-    }
-  }
-
-  if (data->current_path == "/response/app") {
-    OmahaParserData::App app;
-    if (attrs.find(kAttrAppId) != attrs.end()) {
-      app.id = attrs[kAttrAppId];
-    }
-    if (attrs.find(kAttrCohort) != attrs.end()) {
-      app.cohort_set = true;
-      app.cohort = attrs[kAttrCohort];
-    }
-    if (attrs.find(kAttrCohortHint) != attrs.end()) {
-      app.cohorthint_set = true;
-      app.cohorthint = attrs[kAttrCohortHint];
-    }
-    if (attrs.find(kAttrCohortName) != attrs.end()) {
-      app.cohortname_set = true;
-      app.cohortname = attrs[kAttrCohortName];
-    }
-    data->apps.push_back(std::move(app));
-  } else if (data->current_path == "/response/app/updatecheck") {
-    if (!data->apps.empty())
-      data->apps.back().updatecheck_status = attrs[kAttrStatus];
-    if (data->updatecheck_poll_interval.empty())
-      data->updatecheck_poll_interval = attrs[kAttrPollInterval];
-    // Omaha sends arbitrary key-value pairs as extra attributes starting with
-    // an underscore.
-    for (const auto& attr : attrs) {
-      if (!attr.first.empty() && attr.first[0] == '_')
-        data->updatecheck_attrs[attr.first.substr(1)] = attr.second;
-    }
-  } else if (data->current_path == "/response/daystart") {
-    // Get the install-date.
-    data->daystart_elapsed_days = attrs[kAttrElapsedDays];
-    data->daystart_elapsed_seconds = attrs[kAttrElapsedSeconds];
-  } else if (data->current_path == "/response/app/updatecheck/urls/url") {
-    // Look at all <url> elements.
-    if (!data->apps.empty())
-      data->apps.back().url_codebase.push_back(attrs[kAttrCodeBase]);
-  } else if (data->current_path ==
-             "/response/app/updatecheck/manifest/packages/package") {
-    // Look at all <package> elements.
-    if (!data->apps.empty())
-      data->apps.back().packages.push_back({.name = attrs[kAttrName],
-                                            .size = attrs[kAttrSize],
-                                            .hash = attrs[kAttrHashSha256]});
-  } else if (data->current_path == "/response/app/updatecheck/manifest") {
-    // Get the version.
-    if (!data->apps.empty())
-      data->apps.back().manifest_version = attrs[kAttrVersion];
-  } else if (data->current_path ==
-             "/response/app/updatecheck/manifest/actions/action") {
-    // We only care about the postinstall action.
-    if (attrs[kAttrEvent] == kValPostInstall && !data->apps.empty()) {
-      data->apps.back().action_postinstall_attrs = std::move(attrs);
-    }
-  }
-}
-
-// Callback function invoked by expat.
-void ParserHandlerEnd(void* user_data, const XML_Char* element) {
-  OmahaParserData* data = reinterpret_cast<OmahaParserData*>(user_data);
-  if (data->failed)
-    return;
-
-  const string path_suffix = string("/") + element;
-
-  if (!base::EndsWith(
-          data->current_path, path_suffix, base::CompareCase::SENSITIVE)) {
-    LOG(ERROR) << "Unexpected end element '" << element
-               << "' with current_path='" << data->current_path << "'";
-    data->failed = true;
-    return;
-  }
-  data->current_path.resize(data->current_path.size() - path_suffix.size());
-}
-
-// Callback function invoked by expat.
-//
-// This is called for entity declarations. Since Omaha is guaranteed
-// to never return any XML with entities our course of action is to
-// just stop parsing. This avoids potential resource exhaustion
-// problems AKA the "billion laughs". CVE-2013-0340.
-void ParserHandlerEntityDecl(void* user_data,
-                             const XML_Char* entity_name,
-                             int is_parameter_entity,
-                             const XML_Char* value,
-                             int value_length,
-                             const XML_Char* base,
-                             const XML_Char* system_id,
-                             const XML_Char* public_id,
-                             const XML_Char* notation_name) {
-  OmahaParserData* data = reinterpret_cast<OmahaParserData*>(user_data);
-
-  LOG(ERROR) << "XML entities are not supported. Aborting parsing.";
-  data->failed = true;
-  data->entity_decl = true;
-  XML_StopParser(data->xml_parser, false);
-}
-
-}  // namespace
-
-bool XmlEncode(const string& input, string* output) {
-  if (std::find_if(input.begin(), input.end(), [](const char c) {
-        return c & 0x80;
-      }) != input.end()) {
-    LOG(WARNING) << "Invalid ASCII-7 string passed to the XML encoder:";
-    utils::HexDumpString(input);
-    return false;
-  }
-  output->clear();
-  // We need at least input.size() space in the output, but the code below will
-  // handle it if we need more.
-  output->reserve(input.size());
-  for (char c : input) {
-    switch (c) {
-      case '\"':
-        output->append("&quot;");
-        break;
-      case '\'':
-        output->append("&apos;");
-        break;
-      case '&':
-        output->append("&amp;");
-        break;
-      case '<':
-        output->append("&lt;");
-        break;
-      case '>':
-        output->append("&gt;");
-        break;
-      default:
-        output->push_back(c);
-    }
-  }
-  return true;
-}
-
-string XmlEncodeWithDefault(const string& input, const string& default_value) {
-  string output;
-  if (XmlEncode(input, &output))
-    return output;
-  return default_value;
-}
-
-OmahaRequestAction::OmahaRequestAction(
-    SystemState* system_state,
-    OmahaEvent* event,
-    std::unique_ptr<HttpFetcher> http_fetcher,
-    bool ping_only)
-    : system_state_(system_state),
-      params_(system_state->request_params()),
-      event_(event),
-      http_fetcher_(std::move(http_fetcher)),
-      policy_provider_(std::make_unique<policy::PolicyProvider>()),
-      ping_only_(ping_only),
-      ping_active_days_(0),
-      ping_roll_call_days_(0) {
-  policy_provider_->Reload();
-}
-
-OmahaRequestAction::~OmahaRequestAction() {}
-
-// Calculates the value to use for the ping days parameter.
-int OmahaRequestAction::CalculatePingDays(const string& key) {
-  int days = kNeverPinged;
-  int64_t last_ping = 0;
-  if (system_state_->prefs()->GetInt64(key, &last_ping) && last_ping >= 0) {
-    days = (Time::Now() - Time::FromInternalValue(last_ping)).InDays();
-    if (days < 0) {
-      // If |days| is negative, then the system clock must have jumped
-      // back in time since the ping was sent. Mark the value so that
-      // it doesn't get sent to the server but we still update the
-      // last ping daystart preference. This way the next ping time
-      // will be correct, hopefully.
-      days = kPingTimeJump;
-      LOG(WARNING)
-          << "System clock jumped back in time. Resetting ping daystarts.";
-    }
-  }
-  return days;
-}
-
-void OmahaRequestAction::InitPingDays() {
-  // We send pings only along with update checks, not with events.
-  if (IsEvent()) {
-    return;
-  }
-  // TODO(petkov): Figure a way to distinguish active use pings
-  // vs. roll call pings. Currently, the two pings are identical. A
-  // fix needs to change this code as well as UpdateLastPingDays and ShouldPing.
-  ping_active_days_ = CalculatePingDays(kPrefsLastActivePingDay);
-  ping_roll_call_days_ = CalculatePingDays(kPrefsLastRollCallPingDay);
-}
-
-bool OmahaRequestAction::ShouldPing() const {
-  if (ping_active_days_ == OmahaRequestAction::kNeverPinged &&
-      ping_roll_call_days_ == OmahaRequestAction::kNeverPinged) {
-    int powerwash_count = system_state_->hardware()->GetPowerwashCount();
-    if (powerwash_count > 0) {
-      LOG(INFO) << "Not sending ping with a=-1 r=-1 to omaha because "
-                << "powerwash_count is " << powerwash_count;
-      return false;
-    }
-    if (system_state_->hardware()->GetFirstActiveOmahaPingSent()) {
-      LOG(INFO) << "Not sending ping with a=-1 r=-1 to omaha because "
-                << "the first_active_omaha_ping_sent is true";
-      return false;
-    }
-    return true;
-  }
-  return ping_active_days_ > 0 || ping_roll_call_days_ > 0;
-}
-
-// static
-int OmahaRequestAction::GetInstallDate(SystemState* system_state) {
-  PrefsInterface* prefs = system_state->prefs();
-  if (prefs == nullptr)
-    return -1;
-
-  // If we have the value stored on disk, just return it.
-  int64_t stored_value;
-  if (prefs->GetInt64(kPrefsInstallDateDays, &stored_value)) {
-    // Convert and sanity-check.
-    int install_date_days = static_cast<int>(stored_value);
-    if (install_date_days >= 0)
-      return install_date_days;
-    LOG(ERROR) << "Dropping stored Omaha InstallData since its value num_days="
-               << install_date_days << " looks suspicious.";
-    prefs->Delete(kPrefsInstallDateDays);
-  }
-
-  // Otherwise, if OOBE is not complete then do nothing and wait for
-  // ParseResponse() to call ParseInstallDate() and then
-  // PersistInstallDate() to set the kPrefsInstallDateDays state
-  // variable. Once that is done, we'll then report back in future
-  // Omaha requests.  This works exactly because OOBE triggers an
-  // update check.
-  //
-  // However, if OOBE is complete and the kPrefsInstallDateDays state
-  // variable is not set, there are two possibilities
-  //
-  //   1. The update check in OOBE failed so we never got a response
-  //      from Omaha (no network etc.); or
-  //
-  //   2. OOBE was done on an older version that didn't write to the
-  //      kPrefsInstallDateDays state variable.
-  //
-  // In both cases, we approximate the install date by simply
-  // inspecting the timestamp of when OOBE happened.
-
-  Time time_of_oobe;
-  if (!system_state->hardware()->IsOOBEEnabled() ||
-      !system_state->hardware()->IsOOBEComplete(&time_of_oobe)) {
-    LOG(INFO) << "Not generating Omaha InstallData as we have "
-              << "no prefs file and OOBE is not complete or not enabled.";
-    return -1;
-  }
-
-  int num_days;
-  if (!utils::ConvertToOmahaInstallDate(time_of_oobe, &num_days)) {
-    LOG(ERROR) << "Not generating Omaha InstallData from time of OOBE "
-               << "as its value '" << utils::ToString(time_of_oobe)
-               << "' looks suspicious.";
-    return -1;
-  }
-
-  // Persist this to disk, for future use.
-  if (!OmahaRequestAction::PersistInstallDate(
-          system_state, num_days, kProvisionedFromOOBEMarker))
-    return -1;
-
-  LOG(INFO) << "Set the Omaha InstallDate from OOBE time-stamp to " << num_days
-            << " days";
-
-  return num_days;
-}
-
-void OmahaRequestAction::PerformAction() {
-  http_fetcher_->set_delegate(this);
-  InitPingDays();
-  if (ping_only_ && !ShouldPing()) {
-    processor_->ActionComplete(this, ErrorCode::kSuccess);
-    return;
-  }
-
-  string request_post(GetRequestXml(event_.get(),
-                                    params_,
-                                    ping_only_,
-                                    ShouldPing(),  // include_ping
-                                    ping_active_days_,
-                                    ping_roll_call_days_,
-                                    GetInstallDate(system_state_),
-                                    system_state_));
-
-  // Set X-Goog-Update headers.
-  http_fetcher_->SetHeader(kXGoogleUpdateInteractivity,
-                           params_->interactive() ? "fg" : "bg");
-  http_fetcher_->SetHeader(kXGoogleUpdateAppId, params_->GetAppId());
-  http_fetcher_->SetHeader(
-      kXGoogleUpdateUpdater,
-      base::StringPrintf(
-          "%s-%s", constants::kOmahaUpdaterID, kOmahaUpdaterVersion));
-
-  http_fetcher_->SetPostData(
-      request_post.data(), request_post.size(), kHttpContentTypeTextXml);
-  LOG(INFO) << "Posting an Omaha request to " << params_->update_url();
-  LOG(INFO) << "Request: " << request_post;
-  http_fetcher_->BeginTransfer(params_->update_url());
-}
-
-void OmahaRequestAction::TerminateProcessing() {
-  http_fetcher_->TerminateTransfer();
-}
-
-// We just store the response in the buffer. Once we've received all bytes,
-// we'll look in the buffer and decide what to do.
-bool OmahaRequestAction::ReceivedBytes(HttpFetcher* fetcher,
-                                       const void* bytes,
-                                       size_t length) {
-  const uint8_t* byte_ptr = reinterpret_cast<const uint8_t*>(bytes);
-  response_buffer_.insert(response_buffer_.end(), byte_ptr, byte_ptr + length);
-  return true;
-}
-
-namespace {
-
-// Parses a 64 bit base-10 int from a string and returns it. Returns 0
-// on error. If the string contains "0", that's indistinguishable from
-// error.
-off_t ParseInt(const string& str) {
-  off_t ret = 0;
-  int rc = sscanf(str.c_str(), "%" PRIi64, &ret);  // NOLINT(runtime/printf)
-  if (rc < 1) {
-    // failure
-    return 0;
-  }
-  return ret;
-}
-
-// Parses |str| and returns |true| if, and only if, its value is "true".
-bool ParseBool(const string& str) {
-  return str == "true";
-}
-
-// Update the last ping day preferences based on the server daystart
-// response. Returns true on success, false otherwise.
-bool UpdateLastPingDays(OmahaParserData* parser_data, PrefsInterface* prefs) {
-  int64_t elapsed_seconds = 0;
-  TEST_AND_RETURN_FALSE(base::StringToInt64(
-      parser_data->daystart_elapsed_seconds, &elapsed_seconds));
-  TEST_AND_RETURN_FALSE(elapsed_seconds >= 0);
-
-  // Remember the local time that matches the server's last midnight
-  // time.
-  Time daystart = Time::Now() - TimeDelta::FromSeconds(elapsed_seconds);
-  prefs->SetInt64(kPrefsLastActivePingDay, daystart.ToInternalValue());
-  prefs->SetInt64(kPrefsLastRollCallPingDay, daystart.ToInternalValue());
-  return true;
-}
-
-// Parses the package node in the given XML document and populates
-// |output_object| if valid. Returns true if we should continue the parsing.
-// False otherwise, in which case it sets any error code using |completer|.
-bool ParsePackage(OmahaParserData::App* app,
-                  OmahaResponse* output_object,
-                  ScopedActionCompleter* completer) {
-  if (app->updatecheck_status.empty() ||
-      app->updatecheck_status == kValNoUpdate) {
-    if (!app->packages.empty()) {
-      LOG(ERROR) << "No update in this <app> but <package> is not empty.";
-      completer->set_code(ErrorCode::kOmahaResponseInvalid);
-      return false;
-    }
-    return true;
-  }
-  if (app->packages.empty()) {
-    LOG(ERROR) << "Omaha Response has no packages";
-    completer->set_code(ErrorCode::kOmahaResponseInvalid);
-    return false;
-  }
-  if (app->url_codebase.empty()) {
-    LOG(ERROR) << "No Omaha Response URLs";
-    completer->set_code(ErrorCode::kOmahaResponseInvalid);
-    return false;
-  }
-  LOG(INFO) << "Found " << app->url_codebase.size() << " url(s)";
-  vector<string> metadata_sizes =
-      base::SplitString(app->action_postinstall_attrs[kAttrMetadataSize],
-                        ":",
-                        base::TRIM_WHITESPACE,
-                        base::SPLIT_WANT_ALL);
-  vector<string> metadata_signatures = base::SplitString(
-      app->action_postinstall_attrs[kAttrMetadataSignatureRsa],
-      ":",
-      base::TRIM_WHITESPACE,
-      base::SPLIT_WANT_ALL);
-  vector<string> is_delta_payloads =
-      base::SplitString(app->action_postinstall_attrs[kAttrIsDeltaPayload],
-                        ":",
-                        base::TRIM_WHITESPACE,
-                        base::SPLIT_WANT_ALL);
-  for (size_t i = 0; i < app->packages.size(); i++) {
-    const auto& package = app->packages[i];
-    if (package.name.empty()) {
-      LOG(ERROR) << "Omaha Response has empty package name";
-      completer->set_code(ErrorCode::kOmahaResponseInvalid);
-      return false;
-    }
-    LOG(INFO) << "Found package " << package.name;
-
-    OmahaResponse::Package out_package;
-    for (const string& codebase : app->url_codebase) {
-      if (codebase.empty()) {
-        LOG(ERROR) << "Omaha Response URL has empty codebase";
-        completer->set_code(ErrorCode::kOmahaResponseInvalid);
-        return false;
-      }
-      out_package.payload_urls.push_back(codebase + package.name);
-    }
-    // Parse the payload size.
-    base::StringToUint64(package.size, &out_package.size);
-    if (out_package.size <= 0) {
-      LOG(ERROR) << "Omaha Response has invalid payload size: " << package.size;
-      completer->set_code(ErrorCode::kOmahaResponseInvalid);
-      return false;
-    }
-    LOG(INFO) << "Payload size = " << out_package.size << " bytes";
-
-    if (i < metadata_sizes.size())
-      base::StringToUint64(metadata_sizes[i], &out_package.metadata_size);
-    LOG(INFO) << "Payload metadata size = " << out_package.metadata_size
-              << " bytes";
-
-    if (i < metadata_signatures.size())
-      out_package.metadata_signature = metadata_signatures[i];
-    LOG(INFO) << "Payload metadata signature = "
-              << out_package.metadata_signature;
-
-    out_package.hash = package.hash;
-    if (out_package.hash.empty()) {
-      LOG(ERROR) << "Omaha Response has empty hash_sha256 value";
-      completer->set_code(ErrorCode::kOmahaResponseInvalid);
-      return false;
-    }
-    LOG(INFO) << "Payload hash = " << out_package.hash;
-
-    if (i < is_delta_payloads.size())
-      out_package.is_delta = ParseBool(is_delta_payloads[i]);
-    LOG(INFO) << "Payload is delta = " << utils::ToString(out_package.is_delta);
-
-    output_object->packages.push_back(std::move(out_package));
-  }
-
-  return true;
-}
-
-// Parses the 2 key version strings kernel_version and firmware_version. If the
-// field is not present, or cannot be parsed the values default to 0xffff.
-void ParseRollbackVersions(OmahaParserData* parser_data,
-                           OmahaResponse* output_object) {
-  utils::ParseRollbackKeyVersion(
-      parser_data->updatecheck_attrs[kAttrFirmwareVersion],
-      &output_object->rollback_key_version.firmware_key,
-      &output_object->rollback_key_version.firmware);
-  utils::ParseRollbackKeyVersion(
-      parser_data->updatecheck_attrs[kAttrKernelVersion],
-      &output_object->rollback_key_version.kernel_key,
-      &output_object->rollback_key_version.kernel);
-}
-
-}  // namespace
-
-bool OmahaRequestAction::ParseResponse(OmahaParserData* parser_data,
-                                       OmahaResponse* output_object,
-                                       ScopedActionCompleter* completer) {
-  if (parser_data->apps.empty()) {
-    completer->set_code(ErrorCode::kOmahaResponseInvalid);
-    return false;
-  }
-  LOG(INFO) << "Found " << parser_data->apps.size() << " <app>.";
-
-  // chromium-os:37289: The PollInterval is not supported by Omaha server
-  // currently.  But still keeping this existing code in case we ever decide to
-  // slow down the request rate from the server-side. Note that the PollInterval
-  // is not persisted, so it has to be sent by the server on every response to
-  // guarantee that the scheduler uses this value (otherwise, if the device got
-  // rebooted after the last server-indicated value, it'll revert to the default
-  // value). Also kDefaultMaxUpdateChecks value for the scattering logic is
-  // based on the assumption that we perform an update check every hour so that
-  // the max value of 8 will roughly be equivalent to one work day. If we decide
-  // to use PollInterval permanently, we should update the
-  // max_update_checks_allowed to take PollInterval into account.  Note: The
-  // parsing for PollInterval happens even before parsing of the status because
-  // we may want to specify the PollInterval even when there's no update.
-  base::StringToInt(parser_data->updatecheck_poll_interval,
-                    &output_object->poll_interval);
-
-  // Check for the "elapsed_days" attribute in the "daystart"
-  // element. This is the number of days since Jan 1 2007, 0:00
-  // PST. If we don't have a persisted value of the Omaha InstallDate,
-  // we'll use it to calculate it and then persist it.
-  if (ParseInstallDate(parser_data, output_object) &&
-      !HasInstallDate(system_state_)) {
-    // Since output_object->install_date_days is never negative, the
-    // elapsed_days -> install-date calculation is reduced to simply
-    // rounding down to the nearest number divisible by 7.
-    int remainder = output_object->install_date_days % 7;
-    int install_date_days_rounded =
-        output_object->install_date_days - remainder;
-    if (PersistInstallDate(system_state_,
-                           install_date_days_rounded,
-                           kProvisionedFromOmahaResponse)) {
-      LOG(INFO) << "Set the Omaha InstallDate from Omaha Response to "
-                << install_date_days_rounded << " days";
-    }
-  }
-
-  // We persist the cohorts sent by omaha even if the status is "noupdate".
-  for (const auto& app : parser_data->apps) {
-    if (app.id == params_->GetAppId()) {
-      if (app.cohort_set)
-        PersistCohortData(kPrefsOmahaCohort, app.cohort);
-      if (app.cohorthint_set)
-        PersistCohortData(kPrefsOmahaCohortHint, app.cohorthint);
-      if (app.cohortname_set)
-        PersistCohortData(kPrefsOmahaCohortName, app.cohortname);
-      break;
-    }
-  }
-
-  // Parse the updatecheck attributes.
-  PersistEolStatus(parser_data->updatecheck_attrs);
-  // Rollback-related updatecheck attributes.
-  // Defaults to false if attribute is not present.
-  output_object->is_rollback =
-      ParseBool(parser_data->updatecheck_attrs[kAttrRollback]);
-
-  // Parses the rollback versions of the current image. If the fields do not
-  // exist they default to 0xffff for the 4 key versions.
-  ParseRollbackVersions(parser_data, output_object);
-
-  if (!ParseStatus(parser_data, output_object, completer))
-    return false;
-
-  if (!ParseParams(parser_data, output_object, completer))
-    return false;
-
-  // Package has to be parsed after Params now because ParseParams need to make
-  // sure that postinstall action exists.
-  for (auto& app : parser_data->apps)
-    if (!ParsePackage(&app, output_object, completer))
-      return false;
-
-  return true;
-}
-
-bool OmahaRequestAction::ParseStatus(OmahaParserData* parser_data,
-                                     OmahaResponse* output_object,
-                                     ScopedActionCompleter* completer) {
-  output_object->update_exists = false;
-  for (const auto& app : parser_data->apps) {
-    const string& status = app.updatecheck_status;
-    if (status == kValNoUpdate) {
-      // Don't update if any app has status="noupdate".
-      LOG(INFO) << "No update for <app> " << app.id;
-      output_object->update_exists = false;
-      break;
-    } else if (status == "ok") {
-      auto const& attr_no_update =
-          app.action_postinstall_attrs.find(kAttrNoUpdate);
-      if (attr_no_update != app.action_postinstall_attrs.end() &&
-          attr_no_update->second == "true") {
-        // noupdate="true" in postinstall attributes means it's an update to
-        // self, only update if there's at least one app really have update.
-        LOG(INFO) << "Update to self for <app> " << app.id;
-      } else {
-        LOG(INFO) << "Update for <app> " << app.id;
-        output_object->update_exists = true;
-      }
-    } else if (status.empty() && params_->is_install() &&
-               params_->GetAppId() == app.id) {
-      // Skips the platform app for install operation.
-      LOG(INFO) << "No payload (and ignore) for <app> " << app.id;
-    } else {
-      LOG(ERROR) << "Unknown Omaha response status: " << status;
-      completer->set_code(ErrorCode::kOmahaResponseInvalid);
-      return false;
-    }
-  }
-  if (!output_object->update_exists) {
-    SetOutputObject(*output_object);
-    completer->set_code(ErrorCode::kSuccess);
-  }
-
-  return output_object->update_exists;
-}
-
-bool OmahaRequestAction::ParseParams(OmahaParserData* parser_data,
-                                     OmahaResponse* output_object,
-                                     ScopedActionCompleter* completer) {
-  map<string, string> attrs;
-  for (auto& app : parser_data->apps) {
-    if (app.id == params_->GetAppId()) {
-      // this is the app (potentially the only app)
-      output_object->version = app.manifest_version;
-    } else if (!params_->system_app_id().empty() &&
-               app.id == params_->system_app_id()) {
-      // this is the system app (this check is intentionally skipped if there is
-      // no system_app_id set)
-      output_object->system_version = app.manifest_version;
-    } else if (params_->is_install() &&
-               app.manifest_version != params_->app_version()) {
-      LOG(WARNING) << "An app has a different version (" << app.manifest_version
-                   << ") that is different than platform app version ("
-                   << params_->app_version() << ")";
-    }
-    if (!app.action_postinstall_attrs.empty() && attrs.empty()) {
-      attrs = app.action_postinstall_attrs;
-    }
-  }
-  if (params_->is_install()) {
-    LOG(INFO) << "Use request version for Install operation.";
-    output_object->version = params_->app_version();
-  }
-  if (output_object->version.empty()) {
-    LOG(ERROR) << "Omaha Response does not have version in manifest!";
-    completer->set_code(ErrorCode::kOmahaResponseInvalid);
-    return false;
-  }
-
-  LOG(INFO) << "Received omaha response to update to version "
-            << output_object->version;
-
-  if (attrs.empty()) {
-    LOG(ERROR) << "Omaha Response has no postinstall event action";
-    completer->set_code(ErrorCode::kOmahaResponseInvalid);
-    return false;
-  }
-
-  // Get the optional properties one by one.
-  output_object->more_info_url = attrs[kAttrMoreInfo];
-  output_object->prompt = ParseBool(attrs[kAttrPrompt]);
-  output_object->deadline = attrs[kAttrDeadline];
-  output_object->max_days_to_scatter = ParseInt(attrs[kAttrMaxDaysToScatter]);
-  output_object->disable_p2p_for_downloading =
-      ParseBool(attrs[kAttrDisableP2PForDownloading]);
-  output_object->disable_p2p_for_sharing =
-      ParseBool(attrs[kAttrDisableP2PForSharing]);
-  output_object->public_key_rsa = attrs[kAttrPublicKeyRsa];
-
-  string max = attrs[kAttrMaxFailureCountPerUrl];
-  if (!base::StringToUint(max, &output_object->max_failure_count_per_url))
-    output_object->max_failure_count_per_url = kDefaultMaxFailureCountPerUrl;
-
-  output_object->disable_payload_backoff =
-      ParseBool(attrs[kAttrDisablePayloadBackoff]);
-  output_object->powerwash_required = ParseBool(attrs[kAttrPowerwash]);
-
-  return true;
-}
-
-// If the transfer was successful, this uses expat to parse the response
-// and fill in the appropriate fields of the output object. Also, notifies
-// the processor that we're done.
-void OmahaRequestAction::TransferComplete(HttpFetcher* fetcher,
-                                          bool successful) {
-  ScopedActionCompleter completer(processor_, this);
-  string current_response(response_buffer_.begin(), response_buffer_.end());
-  LOG(INFO) << "Omaha request response: " << current_response;
-
-  PayloadStateInterface* const payload_state = system_state_->payload_state();
-
-  // Set the max kernel key version based on whether rollback is allowed.
-  SetMaxKernelKeyVersionForRollback();
-
-  // Events are best effort transactions -- assume they always succeed.
-  if (IsEvent()) {
-    CHECK(!HasOutputPipe()) << "No output pipe allowed for event requests.";
-    completer.set_code(ErrorCode::kSuccess);
-    return;
-  }
-
-  if (!successful) {
-    LOG(ERROR) << "Omaha request network transfer failed.";
-    int code = GetHTTPResponseCode();
-    // Makes sure we send sane error values.
-    if (code < 0 || code >= 1000) {
-      code = 999;
-    }
-    completer.set_code(static_cast<ErrorCode>(
-        static_cast<int>(ErrorCode::kOmahaRequestHTTPResponseBase) + code));
-    return;
-  }
-
-  XML_Parser parser = XML_ParserCreate(nullptr);
-  OmahaParserData parser_data(parser);
-  XML_SetUserData(parser, &parser_data);
-  XML_SetElementHandler(parser, ParserHandlerStart, ParserHandlerEnd);
-  XML_SetEntityDeclHandler(parser, ParserHandlerEntityDecl);
-  XML_Status res =
-      XML_Parse(parser,
-                reinterpret_cast<const char*>(response_buffer_.data()),
-                response_buffer_.size(),
-                XML_TRUE);
-
-  if (res != XML_STATUS_OK || parser_data.failed) {
-    LOG(ERROR) << "Omaha response not valid XML: "
-               << XML_ErrorString(XML_GetErrorCode(parser)) << " at line "
-               << XML_GetCurrentLineNumber(parser) << " col "
-               << XML_GetCurrentColumnNumber(parser);
-    XML_ParserFree(parser);
-    ErrorCode error_code = ErrorCode::kOmahaRequestXMLParseError;
-    if (response_buffer_.empty()) {
-      error_code = ErrorCode::kOmahaRequestEmptyResponseError;
-    } else if (parser_data.entity_decl) {
-      error_code = ErrorCode::kOmahaRequestXMLHasEntityDecl;
-    }
-    completer.set_code(error_code);
-    return;
-  }
-  XML_ParserFree(parser);
-
-  // Update the last ping day preferences based on the server daystart response
-  // even if we didn't send a ping. Omaha always includes the daystart in the
-  // response, but log the error if it didn't.
-  LOG_IF(ERROR, !UpdateLastPingDays(&parser_data, system_state_->prefs()))
-      << "Failed to update the last ping day preferences!";
-
-  // Sets first_active_omaha_ping_sent to true (vpd in CrOS). We only do this if
-  // we have got a response from omaha and if its value has never been set to
-  // true before. Failure of this function should be ignored. There should be no
-  // need to check if a=-1 has been sent because older devices have already sent
-  // their a=-1 in the past and we have to set first_active_omaha_ping_sent for
-  // future checks.
-  if (!system_state_->hardware()->GetFirstActiveOmahaPingSent()) {
-    if (!system_state_->hardware()->SetFirstActiveOmahaPingSent()) {
-      system_state_->metrics_reporter()->ReportInternalErrorCode(
-          ErrorCode::kFirstActiveOmahaPingSentPersistenceError);
-    }
-  }
-
-  if (!HasOutputPipe()) {
-    // Just set success to whether or not the http transfer succeeded,
-    // which must be true at this point in the code.
-    completer.set_code(ErrorCode::kSuccess);
-    return;
-  }
-
-  OmahaResponse output_object;
-  if (!ParseResponse(&parser_data, &output_object, &completer))
-    return;
-  output_object.update_exists = true;
-  SetOutputObject(output_object);
-
-  LoadOrPersistUpdateFirstSeenAtPref();
-
-  ErrorCode error = ErrorCode::kSuccess;
-  if (ShouldIgnoreUpdate(output_object, &error)) {
-    // No need to change output_object.update_exists here, since the value
-    // has been output to the pipe.
-    completer.set_code(error);
-    return;
-  }
-
-  // If Omaha says to disable p2p, respect that
-  if (output_object.disable_p2p_for_downloading) {
-    LOG(INFO) << "Forcibly disabling use of p2p for downloading as "
-              << "requested by Omaha.";
-    payload_state->SetUsingP2PForDownloading(false);
-  }
-  if (output_object.disable_p2p_for_sharing) {
-    LOG(INFO) << "Forcibly disabling use of p2p for sharing as "
-              << "requested by Omaha.";
-    payload_state->SetUsingP2PForSharing(false);
-  }
-
-  // Update the payload state with the current response. The payload state
-  // will automatically reset all stale state if this response is different
-  // from what's stored already. We are updating the payload state as late
-  // as possible in this method so that if a new release gets pushed and then
-  // got pulled back due to some issues, we don't want to clear our internal
-  // state unnecessarily.
-  payload_state->SetResponse(output_object);
-
-  // It could be we've already exceeded the deadline for when p2p is
-  // allowed or that we've tried too many times with p2p. Check that.
-  if (payload_state->GetUsingP2PForDownloading()) {
-    payload_state->P2PNewAttempt();
-    if (!payload_state->P2PAttemptAllowed()) {
-      LOG(INFO) << "Forcibly disabling use of p2p for downloading because "
-                << "of previous failures when using p2p.";
-      payload_state->SetUsingP2PForDownloading(false);
-    }
-  }
-
-  // From here on, we'll complete stuff in CompleteProcessing() so
-  // disable |completer| since we'll create a new one in that
-  // function.
-  completer.set_should_complete(false);
-
-  // If we're allowed to use p2p for downloading we do not pay
-  // attention to wall-clock-based waiting if the URL is indeed
-  // available via p2p. Therefore, check if the file is available via
-  // p2p before deferring...
-  if (payload_state->GetUsingP2PForDownloading()) {
-    LookupPayloadViaP2P(output_object);
-  } else {
-    CompleteProcessing();
-  }
-}
-
-void OmahaRequestAction::CompleteProcessing() {
-  ScopedActionCompleter completer(processor_, this);
-  OmahaResponse& output_object = const_cast<OmahaResponse&>(GetOutputObject());
-  PayloadStateInterface* payload_state = system_state_->payload_state();
-
-  if (ShouldDeferDownload(&output_object)) {
-    output_object.update_exists = false;
-    LOG(INFO) << "Ignoring Omaha updates as updates are deferred by policy.";
-    completer.set_code(ErrorCode::kOmahaUpdateDeferredPerPolicy);
-    return;
-  }
-
-  if (payload_state->ShouldBackoffDownload()) {
-    output_object.update_exists = false;
-    LOG(INFO) << "Ignoring Omaha updates in order to backoff our retry "
-              << "attempts";
-    completer.set_code(ErrorCode::kOmahaUpdateDeferredForBackoff);
-    return;
-  }
-  completer.set_code(ErrorCode::kSuccess);
-}
-
-void OmahaRequestAction::OnLookupPayloadViaP2PCompleted(const string& url) {
-  LOG(INFO) << "Lookup complete, p2p-client returned URL '" << url << "'";
-  if (!url.empty()) {
-    system_state_->payload_state()->SetP2PUrl(url);
-  } else {
-    LOG(INFO) << "Forcibly disabling use of p2p for downloading "
-              << "because no suitable peer could be found.";
-    system_state_->payload_state()->SetUsingP2PForDownloading(false);
-  }
-  CompleteProcessing();
-}
-
-void OmahaRequestAction::LookupPayloadViaP2P(const OmahaResponse& response) {
-  // If the device is in the middle of an update, the state variables
-  // kPrefsUpdateStateNextDataOffset, kPrefsUpdateStateNextDataLength
-  // tracks the offset and length of the operation currently in
-  // progress. The offset is based from the end of the manifest which
-  // is kPrefsManifestMetadataSize bytes long.
-  //
-  // To make forward progress and avoid deadlocks, we need to find a
-  // peer that has at least the entire operation we're currently
-  // working on. Otherwise we may end up in a situation where two
-  // devices bounce back and forth downloading from each other,
-  // neither making any forward progress until one of them decides to
-  // stop using p2p (via kMaxP2PAttempts and kMaxP2PAttemptTimeSeconds
-  // safe-guards). See http://crbug.com/297170 for an example)
-  size_t minimum_size = 0;
-  int64_t manifest_metadata_size = 0;
-  int64_t manifest_signature_size = 0;
-  int64_t next_data_offset = 0;
-  int64_t next_data_length = 0;
-  if (system_state_ &&
-      system_state_->prefs()->GetInt64(kPrefsManifestMetadataSize,
-                                       &manifest_metadata_size) &&
-      manifest_metadata_size != -1 &&
-      system_state_->prefs()->GetInt64(kPrefsManifestSignatureSize,
-                                       &manifest_signature_size) &&
-      manifest_signature_size != -1 &&
-      system_state_->prefs()->GetInt64(kPrefsUpdateStateNextDataOffset,
-                                       &next_data_offset) &&
-      next_data_offset != -1 &&
-      system_state_->prefs()->GetInt64(kPrefsUpdateStateNextDataLength,
-                                       &next_data_length)) {
-    minimum_size = manifest_metadata_size + manifest_signature_size +
-                   next_data_offset + next_data_length;
-  }
-
-  // TODO(senj): Fix P2P for multiple package.
-  brillo::Blob raw_hash;
-  if (!base::HexStringToBytes(response.packages[0].hash, &raw_hash))
-    return;
-  string file_id =
-      utils::CalculateP2PFileId(raw_hash, response.packages[0].size);
-  if (system_state_->p2p_manager()) {
-    LOG(INFO) << "Checking if payload is available via p2p, file_id=" << file_id
-              << " minimum_size=" << minimum_size;
-    system_state_->p2p_manager()->LookupUrlForFile(
-        file_id,
-        minimum_size,
-        TimeDelta::FromSeconds(kMaxP2PNetworkWaitTimeSeconds),
-        base::Bind(&OmahaRequestAction::OnLookupPayloadViaP2PCompleted,
-                   base::Unretained(this)));
-  }
-}
-
-bool OmahaRequestAction::ShouldDeferDownload(OmahaResponse* output_object) {
-  if (params_->interactive()) {
-    LOG(INFO) << "Not deferring download because update is interactive.";
-    return false;
-  }
-
-  // If we're using p2p to download _and_ we have a p2p URL, we never
-  // defer the download. This is because the download will always
-  // happen from a peer on the LAN and we've been waiting in line for
-  // our turn.
-  const PayloadStateInterface* payload_state = system_state_->payload_state();
-  if (payload_state->GetUsingP2PForDownloading() &&
-      !payload_state->GetP2PUrl().empty()) {
-    LOG(INFO) << "Download not deferred because download "
-              << "will happen from a local peer (via p2p).";
-    return false;
-  }
-
-  // We should defer the downloads only if we've first satisfied the
-  // wall-clock-based-waiting period and then the update-check-based waiting
-  // period, if required.
-  if (!params_->wall_clock_based_wait_enabled()) {
-    LOG(INFO) << "Wall-clock-based waiting period is not enabled,"
-              << " so no deferring needed.";
-    return false;
-  }
-
-  switch (IsWallClockBasedWaitingSatisfied(output_object)) {
-    case kWallClockWaitNotSatisfied:
-      // We haven't even satisfied the first condition, passing the
-      // wall-clock-based waiting period, so we should defer the downloads
-      // until that happens.
-      LOG(INFO) << "wall-clock-based-wait not satisfied.";
-      return true;
-
-    case kWallClockWaitDoneButUpdateCheckWaitRequired:
-      LOG(INFO) << "wall-clock-based-wait satisfied and "
-                << "update-check-based-wait required.";
-      return !IsUpdateCheckCountBasedWaitingSatisfied();
-
-    case kWallClockWaitDoneAndUpdateCheckWaitNotRequired:
-      // Wall-clock-based waiting period is satisfied, and it's determined
-      // that we do not need the update-check-based wait. so no need to
-      // defer downloads.
-      LOG(INFO) << "wall-clock-based-wait satisfied and "
-                << "update-check-based-wait is not required.";
-      return false;
-
-    default:
-      // Returning false for this default case so we err on the
-      // side of downloading updates than deferring in case of any bugs.
-      NOTREACHED();
-      return false;
-  }
-}
-
-OmahaRequestAction::WallClockWaitResult
-OmahaRequestAction::IsWallClockBasedWaitingSatisfied(
-    OmahaResponse* output_object) {
-  Time update_first_seen_at = LoadOrPersistUpdateFirstSeenAtPref();
-  if (update_first_seen_at == base::Time()) {
-    LOG(INFO) << "Not scattering as UpdateFirstSeenAt value cannot be read or "
-                 "persisted";
-    return kWallClockWaitDoneAndUpdateCheckWaitNotRequired;
-  }
-
-  TimeDelta elapsed_time =
-      system_state_->clock()->GetWallclockTime() - update_first_seen_at;
-  TimeDelta max_scatter_period =
-      TimeDelta::FromDays(output_object->max_days_to_scatter);
-  int64_t staging_wait_time_in_days = 0;
-  // Use staging and its default max value if staging is on.
-  if (system_state_->prefs()->GetInt64(kPrefsWallClockStagingWaitPeriod,
-                                       &staging_wait_time_in_days) &&
-      staging_wait_time_in_days > 0)
-    max_scatter_period = TimeDelta::FromDays(kMaxWaitTimeStagingInDays);
-
-  LOG(INFO) << "Waiting Period = "
-            << utils::FormatSecs(params_->waiting_period().InSeconds())
-            << ", Time Elapsed = "
-            << utils::FormatSecs(elapsed_time.InSeconds())
-            << ", MaxDaysToScatter = " << max_scatter_period.InDays();
-
-  if (!output_object->deadline.empty()) {
-    // The deadline is set for all rules which serve a delta update from a
-    // previous FSI, which means this update will be applied mostly in OOBE
-    // cases. For these cases, we shouldn't scatter so as to finish the OOBE
-    // quickly.
-    LOG(INFO) << "Not scattering as deadline flag is set";
-    return kWallClockWaitDoneAndUpdateCheckWaitNotRequired;
-  }
-
-  if (max_scatter_period.InDays() == 0) {
-    // This means the Omaha rule creator decides that this rule
-    // should not be scattered irrespective of the policy.
-    LOG(INFO) << "Not scattering as MaxDaysToScatter in rule is 0.";
-    return kWallClockWaitDoneAndUpdateCheckWaitNotRequired;
-  }
-
-  if (elapsed_time > max_scatter_period) {
-    // This means we've waited more than the upperbound wait in the rule
-    // from the time we first saw a valid update available to us.
-    // This will prevent update starvation.
-    LOG(INFO) << "Not scattering as we're past the MaxDaysToScatter limit.";
-    return kWallClockWaitDoneAndUpdateCheckWaitNotRequired;
-  }
-
-  // This means we are required to participate in scattering.
-  // See if our turn has arrived now.
-  TimeDelta remaining_wait_time = params_->waiting_period() - elapsed_time;
-  if (remaining_wait_time.InSeconds() <= 0) {
-    // Yes, it's our turn now.
-    LOG(INFO) << "Successfully passed the wall-clock-based-wait.";
-
-    // But we can't download until the update-check-count-based wait is also
-    // satisfied, so mark it as required now if update checks are enabled.
-    return params_->update_check_count_wait_enabled()
-               ? kWallClockWaitDoneButUpdateCheckWaitRequired
-               : kWallClockWaitDoneAndUpdateCheckWaitNotRequired;
-  }
-
-  // Not our turn yet, so we have to wait until our turn to
-  // help scatter the downloads across all clients of the enterprise.
-  LOG(INFO) << "Update deferred for another "
-            << utils::FormatSecs(remaining_wait_time.InSeconds())
-            << " per policy.";
-  return kWallClockWaitNotSatisfied;
-}
-
-bool OmahaRequestAction::IsUpdateCheckCountBasedWaitingSatisfied() {
-  int64_t update_check_count_value;
-
-  if (system_state_->prefs()->Exists(kPrefsUpdateCheckCount)) {
-    if (!system_state_->prefs()->GetInt64(kPrefsUpdateCheckCount,
-                                          &update_check_count_value)) {
-      // We are unable to read the update check count from file for some reason.
-      // So let's proceed anyway so as to not stall the update.
-      LOG(ERROR) << "Unable to read update check count. "
-                 << "Skipping update-check-count-based-wait.";
-      return true;
-    }
-  } else {
-    // This file does not exist. This means we haven't started our update
-    // check count down yet, so this is the right time to start the count down.
-    update_check_count_value =
-        base::RandInt(params_->min_update_checks_needed(),
-                      params_->max_update_checks_allowed());
-
-    LOG(INFO) << "Randomly picked update check count value = "
-              << update_check_count_value;
-
-    // Write out the initial value of update_check_count_value.
-    if (!system_state_->prefs()->SetInt64(kPrefsUpdateCheckCount,
-                                          update_check_count_value)) {
-      // We weren't able to write the update check count file for some reason.
-      // So let's proceed anyway so as to not stall the update.
-      LOG(ERROR) << "Unable to write update check count. "
-                 << "Skipping update-check-count-based-wait.";
-      return true;
-    }
-  }
-
-  if (update_check_count_value == 0) {
-    LOG(INFO) << "Successfully passed the update-check-based-wait.";
-    return true;
-  }
-
-  if (update_check_count_value < 0 ||
-      update_check_count_value > params_->max_update_checks_allowed()) {
-    // We err on the side of skipping scattering logic instead of stalling
-    // a machine from receiving any updates in case of any unexpected state.
-    LOG(ERROR) << "Invalid value for update check count detected. "
-               << "Skipping update-check-count-based-wait.";
-    return true;
-  }
-
-  // Legal value, we need to wait for more update checks to happen
-  // until this becomes 0.
-  LOG(INFO) << "Deferring Omaha updates for another "
-            << update_check_count_value << " update checks per policy";
-  return false;
-}
-
-// static
-bool OmahaRequestAction::ParseInstallDate(OmahaParserData* parser_data,
-                                          OmahaResponse* output_object) {
-  int64_t elapsed_days = 0;
-  if (!base::StringToInt64(parser_data->daystart_elapsed_days, &elapsed_days))
-    return false;
-
-  if (elapsed_days < 0)
-    return false;
-
-  output_object->install_date_days = elapsed_days;
-  return true;
-}
-
-// static
-bool OmahaRequestAction::HasInstallDate(SystemState* system_state) {
-  PrefsInterface* prefs = system_state->prefs();
-  if (prefs == nullptr)
-    return false;
-
-  return prefs->Exists(kPrefsInstallDateDays);
-}
-
-// static
-bool OmahaRequestAction::PersistInstallDate(
-    SystemState* system_state,
-    int install_date_days,
-    InstallDateProvisioningSource source) {
-  TEST_AND_RETURN_FALSE(install_date_days >= 0);
-
-  PrefsInterface* prefs = system_state->prefs();
-  if (prefs == nullptr)
-    return false;
-
-  if (!prefs->SetInt64(kPrefsInstallDateDays, install_date_days))
-    return false;
-
-  system_state->metrics_reporter()->ReportInstallDateProvisioningSource(
-      static_cast<int>(source),  // Sample.
-      kProvisionedMax);          // Maximum.
-  return true;
-}
-
-bool OmahaRequestAction::PersistCohortData(const string& prefs_key,
-                                           const string& new_value) {
-  if (new_value.empty() && system_state_->prefs()->Exists(prefs_key)) {
-    LOG(INFO) << "Removing stored " << prefs_key << " value.";
-    return system_state_->prefs()->Delete(prefs_key);
-  } else if (!new_value.empty()) {
-    LOG(INFO) << "Storing new setting " << prefs_key << " as " << new_value;
-    return system_state_->prefs()->SetString(prefs_key, new_value);
-  }
-  return true;
-}
-
-bool OmahaRequestAction::PersistEolStatus(const map<string, string>& attrs) {
-  auto eol_attr = attrs.find(kAttrEol);
-  if (eol_attr != attrs.end()) {
-    return system_state_->prefs()->SetString(kPrefsOmahaEolStatus,
-                                             eol_attr->second);
-  } else if (system_state_->prefs()->Exists(kPrefsOmahaEolStatus)) {
-    return system_state_->prefs()->Delete(kPrefsOmahaEolStatus);
-  }
-  return true;
-}
-
-void OmahaRequestAction::ActionCompleted(ErrorCode code) {
-  // We only want to report this on "update check".
-  if (ping_only_ || event_ != nullptr)
-    return;
-
-  metrics::CheckResult result = metrics::CheckResult::kUnset;
-  metrics::CheckReaction reaction = metrics::CheckReaction::kUnset;
-  metrics::DownloadErrorCode download_error_code =
-      metrics::DownloadErrorCode::kUnset;
-
-  // Regular update attempt.
-  switch (code) {
-    case ErrorCode::kSuccess:
-      // OK, we parsed the response successfully but that does
-      // necessarily mean that an update is available.
-      if (HasOutputPipe()) {
-        const OmahaResponse& response = GetOutputObject();
-        if (response.update_exists) {
-          result = metrics::CheckResult::kUpdateAvailable;
-          reaction = metrics::CheckReaction::kUpdating;
-        } else {
-          result = metrics::CheckResult::kNoUpdateAvailable;
-        }
-      } else {
-        result = metrics::CheckResult::kNoUpdateAvailable;
-      }
-      break;
-
-    case ErrorCode::kOmahaUpdateIgnoredPerPolicy:
-    case ErrorCode::kOmahaUpdateIgnoredOverCellular:
-      result = metrics::CheckResult::kUpdateAvailable;
-      reaction = metrics::CheckReaction::kIgnored;
-      break;
-
-    case ErrorCode::kOmahaUpdateDeferredPerPolicy:
-      result = metrics::CheckResult::kUpdateAvailable;
-      reaction = metrics::CheckReaction::kDeferring;
-      break;
-
-    case ErrorCode::kOmahaUpdateDeferredForBackoff:
-      result = metrics::CheckResult::kUpdateAvailable;
-      reaction = metrics::CheckReaction::kBackingOff;
-      break;
-
-    default:
-      // We report two flavors of errors, "Download errors" and "Parsing
-      // error". Try to convert to the former and if that doesn't work
-      // we know it's the latter.
-      metrics::DownloadErrorCode tmp_error =
-          metrics_utils::GetDownloadErrorCode(code);
-      if (tmp_error != metrics::DownloadErrorCode::kInputMalformed) {
-        result = metrics::CheckResult::kDownloadError;
-        download_error_code = tmp_error;
-      } else {
-        result = metrics::CheckResult::kParsingError;
-      }
-      break;
-  }
-
-  system_state_->metrics_reporter()->ReportUpdateCheckMetrics(
-      system_state_, result, reaction, download_error_code);
-}
-
-bool OmahaRequestAction::ShouldIgnoreUpdate(const OmahaResponse& response,
-                                            ErrorCode* error) const {
-  // Note: policy decision to not update to a version we rolled back from.
-  string rollback_version =
-      system_state_->payload_state()->GetRollbackVersion();
-  if (!rollback_version.empty()) {
-    LOG(INFO) << "Detected previous rollback from version " << rollback_version;
-    if (rollback_version == response.version) {
-      LOG(INFO) << "Received version that we rolled back from. Ignoring.";
-      *error = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
-      return true;
-    }
-  }
-
-  if (system_state_->hardware()->IsOOBEEnabled() &&
-      !system_state_->hardware()->IsOOBEComplete(nullptr) &&
-      (response.deadline.empty() ||
-       system_state_->payload_state()->GetRollbackHappened()) &&
-      params_->app_version() != "ForcedUpdate") {
-    LOG(INFO) << "Ignoring a non-critical Omaha update before OOBE completion.";
-    *error = ErrorCode::kNonCriticalUpdateInOOBE;
-    return true;
-  }
-
-  if (!IsUpdateAllowedOverCurrentConnection(error, response)) {
-    LOG(INFO) << "Update is not allowed over current connection.";
-    return true;
-  }
-
-  // Note: We could technically delete the UpdateFirstSeenAt state when we
-  // return true. If we do, it'll mean a device has to restart the
-  // UpdateFirstSeenAt and thus help scattering take effect when the AU is
-  // turned on again. On the other hand, it also increases the chance of update
-  // starvation if an admin turns AU on/off more frequently. We choose to err on
-  // the side of preventing starvation at the cost of not applying scattering in
-  // those cases.
-  return false;
-}
-
-bool OmahaRequestAction::IsUpdateAllowedOverCellularByPrefs(
-    const OmahaResponse& response) const {
-  PrefsInterface* prefs = system_state_->prefs();
-
-  if (!prefs) {
-    LOG(INFO) << "Disabling updates over cellular as the preferences are "
-                 "not available.";
-    return false;
-  }
-
-  bool is_allowed;
-
-  if (prefs->Exists(kPrefsUpdateOverCellularPermission) &&
-      prefs->GetBoolean(kPrefsUpdateOverCellularPermission, &is_allowed) &&
-      is_allowed) {
-    LOG(INFO) << "Allowing updates over cellular as permission preference is "
-                 "set to true.";
-    return true;
-  }
-
-  if (!prefs->Exists(kPrefsUpdateOverCellularTargetVersion) ||
-      !prefs->Exists(kPrefsUpdateOverCellularTargetSize)) {
-    LOG(INFO) << "Disabling updates over cellular as permission preference is "
-                 "set to false or does not exist while target does not exist.";
-    return false;
-  }
-
-  std::string target_version;
-  int64_t target_size;
-
-  if (!prefs->GetString(kPrefsUpdateOverCellularTargetVersion,
-                        &target_version) ||
-      !prefs->GetInt64(kPrefsUpdateOverCellularTargetSize, &target_size)) {
-    LOG(INFO) << "Disabling updates over cellular as the target version or "
-                 "size is not accessible.";
-    return false;
-  }
-
-  uint64_t total_packages_size = 0;
-  for (const auto& package : response.packages) {
-    total_packages_size += package.size;
-  }
-  if (target_version == response.version &&
-      static_cast<uint64_t>(target_size) == total_packages_size) {
-    LOG(INFO) << "Allowing updates over cellular as the target matches the"
-                 "omaha response.";
-    return true;
-  } else {
-    LOG(INFO) << "Disabling updates over cellular as the target does not"
-                 "match the omaha response.";
-    return false;
-  }
-}
-
-bool OmahaRequestAction::IsUpdateAllowedOverCurrentConnection(
-    ErrorCode* error, const OmahaResponse& response) const {
-  ConnectionType type;
-  ConnectionTethering tethering;
-  ConnectionManagerInterface* connection_manager =
-      system_state_->connection_manager();
-  if (!connection_manager->GetConnectionProperties(&type, &tethering)) {
-    LOG(INFO) << "We could not determine our connection type. "
-              << "Defaulting to allow updates.";
-    return true;
-  }
-
-  bool is_allowed = connection_manager->IsUpdateAllowedOver(type, tethering);
-  bool is_device_policy_set =
-      connection_manager->IsAllowedConnectionTypesForUpdateSet();
-  // Treats tethered connection as if it is cellular connection.
-  bool is_over_cellular = type == ConnectionType::kCellular ||
-                          tethering == ConnectionTethering::kConfirmed;
-
-  if (!is_over_cellular) {
-    // There's no need to further check user preferences as we are not over
-    // cellular connection.
-    if (!is_allowed)
-      *error = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
-  } else if (is_device_policy_set) {
-    // There's no need to further check user preferences as the device policy
-    // is set regarding updates over cellular.
-    if (!is_allowed)
-      *error = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
-  } else {
-    // Deivce policy is not set, so user preferences overwrite whether to
-    // allow updates over cellular.
-    is_allowed = IsUpdateAllowedOverCellularByPrefs(response);
-    if (!is_allowed)
-      *error = ErrorCode::kOmahaUpdateIgnoredOverCellular;
-  }
-
-  LOG(INFO) << "We are connected via "
-            << connection_utils::StringForConnectionType(type)
-            << ", Updates allowed: " << (is_allowed ? "Yes" : "No");
-  return is_allowed;
-}
-
-bool OmahaRequestAction::IsRollbackEnabled() const {
-  if (policy_provider_->IsConsumerDevice()) {
-    LOG(INFO) << "Rollback is not enabled for consumer devices.";
-    return false;
-  }
-
-  if (!policy_provider_->device_policy_is_loaded()) {
-    LOG(INFO) << "No device policy is loaded. Assuming rollback enabled.";
-    return true;
-  }
-
-  int allowed_milestones;
-  if (!policy_provider_->GetDevicePolicy().GetRollbackAllowedMilestones(
-          &allowed_milestones)) {
-    LOG(INFO) << "RollbackAllowedMilestones policy can't be read. "
-                 "Defaulting to rollback enabled.";
-    return true;
-  }
-
-  LOG(INFO) << "Rollback allows " << allowed_milestones << " milestones.";
-  return allowed_milestones > 0;
-}
-
-void OmahaRequestAction::SetMaxKernelKeyVersionForRollback() const {
-  int max_kernel_rollforward;
-  int min_kernel_version = system_state_->hardware()->GetMinKernelKeyVersion();
-  if (IsRollbackEnabled()) {
-    // If rollback is enabled, set the max kernel key version to the current
-    // kernel key version. This has the effect of freezing kernel key roll
-    // forwards.
-    //
-    // TODO(zentaro): This behavior is temporary, and ensures that no kernel
-    // key roll forward happens until the server side components of rollback
-    // are implemented. Future changes will allow the Omaha server to return
-    // the kernel key version from max_rollback_versions in the past. At that
-    // point the max kernel key version will be set to that value, creating a
-    // sliding window of versions that can be rolled back to.
-    LOG(INFO) << "Rollback is enabled. Setting kernel_max_rollforward to "
-              << min_kernel_version;
-    max_kernel_rollforward = min_kernel_version;
-  } else {
-    // For devices that are not rollback enabled (ie. consumer devices), the
-    // max kernel key version is set to 0xfffffffe, which is logically
-    // infinity. This maintains the previous behavior that that kernel key
-    // versions roll forward each time they are incremented.
-    LOG(INFO) << "Rollback is disabled. Setting kernel_max_rollforward to "
-              << kRollforwardInfinity;
-    max_kernel_rollforward = kRollforwardInfinity;
-  }
-
-  bool max_rollforward_set =
-      system_state_->hardware()->SetMaxKernelKeyRollforward(
-          max_kernel_rollforward);
-  if (!max_rollforward_set) {
-    LOG(ERROR) << "Failed to set kernel_max_rollforward";
-  }
-  // Report metrics
-  system_state_->metrics_reporter()->ReportKeyVersionMetrics(
-      min_kernel_version, max_kernel_rollforward, max_rollforward_set);
-}
-
-base::Time OmahaRequestAction::LoadOrPersistUpdateFirstSeenAtPref() const {
-  Time update_first_seen_at;
-  int64_t update_first_seen_at_int;
-  if (system_state_->prefs()->Exists(kPrefsUpdateFirstSeenAt)) {
-    if (system_state_->prefs()->GetInt64(kPrefsUpdateFirstSeenAt,
-                                         &update_first_seen_at_int)) {
-      // Note: This timestamp could be that of ANY update we saw in the past
-      // (not necessarily this particular update we're considering to apply)
-      // but never got to apply because of some reason (e.g. stop AU policy,
-      // updates being pulled out from Omaha, changes in target version prefix,
-      // new update being rolled out, etc.). But for the purposes of scattering
-      // it doesn't matter which update the timestamp corresponds to. i.e.
-      // the clock starts ticking the first time we see an update and we're
-      // ready to apply when the random wait period is satisfied relative to
-      // that first seen timestamp.
-      update_first_seen_at = Time::FromInternalValue(update_first_seen_at_int);
-      LOG(INFO) << "Using persisted value of UpdateFirstSeenAt: "
-                << utils::ToString(update_first_seen_at);
-    } else {
-      // This seems like an unexpected error where the persisted value exists
-      // but it's not readable for some reason.
-      LOG(INFO) << "UpdateFirstSeenAt value cannot be read";
-      return base::Time();
-    }
-  } else {
-    update_first_seen_at = system_state_->clock()->GetWallclockTime();
-    update_first_seen_at_int = update_first_seen_at.ToInternalValue();
-    if (system_state_->prefs()->SetInt64(kPrefsUpdateFirstSeenAt,
-                                         update_first_seen_at_int)) {
-      LOG(INFO) << "Persisted the new value for UpdateFirstSeenAt: "
-                << utils::ToString(update_first_seen_at);
-    } else {
-      // This seems like an unexpected error where the value cannot be
-      // persisted for some reason.
-      LOG(INFO) << "UpdateFirstSeenAt value "
-                << utils::ToString(update_first_seen_at)
-                << " cannot be persisted";
-      return base::Time();
-    }
-  }
-  return update_first_seen_at;
-}
-
-}  // namespace chromeos_update_engine
diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc
deleted file mode 100644
index 1786bcc..0000000
--- a/omaha_request_action_unittest.cc
+++ /dev/null
@@ -1,3147 +0,0 @@
-//
-// Copyright (C) 2012 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/omaha_request_action.h"
-
-#include <stdint.h>
-
-#include <memory>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include <base/bind.h>
-#include <base/files/file_util.h>
-#include <base/files/scoped_temp_dir.h>
-#include <base/memory/ptr_util.h>
-#include <base/strings/string_number_conversions.h>
-#include <base/strings/string_util.h>
-#include <base/strings/stringprintf.h>
-#include <base/time/time.h>
-#include <brillo/message_loops/fake_message_loop.h>
-#include <brillo/message_loops/message_loop.h>
-#include <brillo/message_loops/message_loop_utils.h>
-#include <gtest/gtest.h>
-#include <policy/libpolicy.h>
-#include <policy/mock_libpolicy.h>
-
-#include "update_engine/common/action_pipe.h"
-#include "update_engine/common/constants.h"
-#include "update_engine/common/fake_prefs.h"
-#include "update_engine/common/hash_calculator.h"
-#include "update_engine/common/mock_http_fetcher.h"
-#include "update_engine/common/platform_constants.h"
-#include "update_engine/common/prefs.h"
-#include "update_engine/common/test_utils.h"
-#include "update_engine/fake_system_state.h"
-#include "update_engine/metrics_reporter_interface.h"
-#include "update_engine/mock_connection_manager.h"
-#include "update_engine/mock_payload_state.h"
-#include "update_engine/omaha_request_params.h"
-#include "update_engine/update_manager/rollback_prefs.h"
-
-using base::Time;
-using base::TimeDelta;
-using chromeos_update_manager::kRollforwardInfinity;
-using std::string;
-using std::vector;
-using testing::_;
-using testing::AllOf;
-using testing::AnyNumber;
-using testing::DoAll;
-using testing::Ge;
-using testing::Le;
-using testing::NiceMock;
-using testing::Return;
-using testing::ReturnPointee;
-using testing::ReturnRef;
-using testing::SaveArg;
-using testing::SetArgPointee;
-
-namespace {
-
-static_assert(kRollforwardInfinity == 0xfffffffe,
-              "Don't change the value of kRollforward infinity unless its "
-              "size has been changed in firmware.");
-
-const char kCurrentVersion[] = "0.1.0.0";
-const char kTestAppId[] = "test-app-id";
-const char kTestAppId2[] = "test-app2-id";
-const char kTestAppIdSkipUpdatecheck[] = "test-app-id-skip-updatecheck";
-
-// This is a helper struct to allow unit tests build an update response with the
-// values they care about.
-struct FakeUpdateResponse {
-  string GetRollbackVersionAttributes() const {
-    return (rollback ? " _rollback=\"true\"" : "") +
-           (!rollback_firmware_version.empty()
-                ? " _firmware_version=\"" + rollback_firmware_version + "\""
-                : "") +
-           (!rollback_kernel_version.empty()
-                ? " _kernel_version=\"" + rollback_kernel_version + "\""
-                : "");
-  }
-
-  string GetNoUpdateResponse() const {
-    string entity_str;
-    if (include_entity)
-      entity_str = "<!DOCTYPE response [<!ENTITY CrOS \"ChromeOS\">]>";
-    return "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + entity_str +
-           "<response protocol=\"3.0\">"
-           "<daystart elapsed_seconds=\"100\"/>"
-           "<app appid=\"" +
-           app_id + "\" " +
-           (include_cohorts
-                ? "cohort=\"" + cohort + "\" cohorthint=\"" + cohorthint +
-                      "\" cohortname=\"" + cohortname + "\" "
-                : "") +
-           " status=\"ok\">"
-           "<ping status=\"ok\"/>"
-           "<updatecheck status=\"noupdate\"/></app>" +
-           (multi_app_no_update
-                ? "<app appid=\"" + app_id2 +
-                      "\"><updatecheck status=\"noupdate\"/></app>"
-                : "") +
-           "</response>";
-  }
-
-  string GetUpdateResponse() const {
-    return "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
-           "protocol=\"3.0\">"
-           "<daystart elapsed_seconds=\"100\"" +
-           (elapsed_days.empty() ? ""
-                                 : (" elapsed_days=\"" + elapsed_days + "\"")) +
-           "/>"
-           "<app appid=\"" +
-           app_id + "\" " +
-           (include_cohorts
-                ? "cohort=\"" + cohort + "\" cohorthint=\"" + cohorthint +
-                      "\" cohortname=\"" + cohortname + "\" "
-                : "") +
-           " status=\"ok\">"
-           "<ping status=\"ok\"/><updatecheck status=\"ok\"" +
-           GetRollbackVersionAttributes() + ">" + "<urls><url codebase=\"" +
-           codebase +
-           "\"/></urls>"
-           "<manifest version=\"" +
-           version +
-           "\">"
-           "<packages><package hash=\"not-used\" name=\"" +
-           filename + "\" size=\"" + base::Int64ToString(size) +
-           "\" hash_sha256=\"" + hash + "\"/>" +
-           (multi_package ? "<package name=\"package2\" size=\"222\" "
-                            "hash_sha256=\"hash2\"/>"
-                          : "") +
-           "</packages>"
-           "<actions><action event=\"postinstall\" MetadataSize=\"11" +
-           (multi_package ? ":22" : "") + "\" MoreInfo=\"" + more_info_url +
-           "\" Prompt=\"" + prompt +
-           "\" "
-           "IsDeltaPayload=\"true" +
-           (multi_package ? ":false" : "") +
-           "\" "
-           "MaxDaysToScatter=\"" +
-           max_days_to_scatter +
-           "\" "
-           "sha256=\"not-used\" " +
-           (deadline.empty() ? "" : ("deadline=\"" + deadline + "\" ")) +
-           (disable_p2p_for_downloading ? "DisableP2PForDownloading=\"true\" "
-                                        : "") +
-           (disable_p2p_for_sharing ? "DisableP2PForSharing=\"true\" " : "") +
-           (powerwash ? "Powerwash=\"true\" " : "") +
-           "/></actions></manifest></updatecheck></app>" +
-           (multi_app
-                ? "<app appid=\"" + app_id2 + "\"" +
-                      (include_cohorts ? " cohort=\"cohort2\"" : "") +
-                      "><updatecheck status=\"ok\"><urls><url codebase=\"" +
-                      codebase2 + "\"/></urls><manifest version=\"" + version2 +
-                      "\"><packages>"
-                      "<package name=\"package3\" size=\"333\" "
-                      "hash_sha256=\"hash3\"/></packages>"
-                      "<actions><action event=\"postinstall\" " +
-                      (multi_app_self_update
-                           ? "noupdate=\"true\" IsDeltaPayload=\"true\" "
-                           : "IsDeltaPayload=\"false\" ") +
-                      "MetadataSize=\"33\"/></actions>"
-                      "</manifest></updatecheck></app>"
-                : "") +
-           (multi_app_no_update
-                ? "<app><updatecheck status=\"noupdate\"/></app>"
-                : "") +
-           (multi_app_skip_updatecheck
-                ? "<app appid=\"" + app_id_skip_updatecheck + "\"></app>"
-                : "") +
-           "</response>";
-  }
-
-  // Return the payload URL, which is split in two fields in the XML response.
-  string GetPayloadUrl() { return codebase + filename; }
-
-  string app_id = kTestAppId;
-  string app_id2 = kTestAppId2;
-  string app_id_skip_updatecheck = kTestAppIdSkipUpdatecheck;
-  string current_version = kCurrentVersion;
-  string version = "1.2.3.4";
-  string version2 = "2.3.4.5";
-  string more_info_url = "http://more/info";
-  string prompt = "true";
-  string codebase = "http://code/base/";
-  string codebase2 = "http://code/base/2/";
-  string filename = "file.signed";
-  string hash = "4841534831323334";
-  uint64_t size = 123;
-  string deadline = "";
-  string max_days_to_scatter = "7";
-  string elapsed_days = "42";
-
-  // P2P setting defaults to allowed.
-  bool disable_p2p_for_downloading = false;
-  bool disable_p2p_for_sharing = false;
-
-  bool powerwash = false;
-
-  // Omaha cohorts settings.
-  bool include_cohorts = false;
-  string cohort = "";
-  string cohorthint = "";
-  string cohortname = "";
-
-  // Whether to include the CrOS <!ENTITY> in the XML response.
-  bool include_entity = false;
-
-  // Whether to include more than one app.
-  bool multi_app = false;
-  // Whether to include an app with noupdate="true".
-  bool multi_app_self_update = false;
-  // Whether to include an additional app with status="noupdate".
-  bool multi_app_no_update = false;
-  // Whether to include an additional app with no updatecheck tag.
-  bool multi_app_skip_updatecheck = false;
-  // Whether to include more than one package in an app.
-  bool multi_package = false;
-
-  // Whether the payload is a rollback.
-  bool rollback = false;
-  // The verified boot firmware key version for the rollback image.
-  string rollback_firmware_version = "";
-  // The verified boot kernel key version for the rollback image.
-  string rollback_kernel_version = "";
-};
-
-}  // namespace
-
-namespace chromeos_update_engine {
-
-class OmahaRequestActionTestProcessorDelegate : public ActionProcessorDelegate {
- public:
-  OmahaRequestActionTestProcessorDelegate()
-      : expected_code_(ErrorCode::kSuccess),
-        interactive_(false),
-        test_http_fetcher_headers_(false) {}
-  ~OmahaRequestActionTestProcessorDelegate() override = default;
-
-  void ProcessingDone(const ActionProcessor* processor,
-                      ErrorCode code) override {
-    brillo::MessageLoop::current()->BreakLoop();
-  }
-
-  void ActionCompleted(ActionProcessor* processor,
-                       AbstractAction* action,
-                       ErrorCode code) override {
-    // Make sure actions always succeed.
-    if (action->Type() == OmahaRequestAction::StaticType()) {
-      EXPECT_EQ(expected_code_, code);
-      // Check that the headers were set in the fetcher during the action. Note
-      // that we set this request as "interactive".
-      auto fetcher = static_cast<const MockHttpFetcher*>(
-          static_cast<OmahaRequestAction*>(action)->http_fetcher_.get());
-
-      if (test_http_fetcher_headers_) {
-        EXPECT_EQ(interactive_ ? "fg" : "bg",
-                  fetcher->GetHeader("X-Goog-Update-Interactivity"));
-        EXPECT_EQ(kTestAppId, fetcher->GetHeader("X-Goog-Update-AppId"));
-        EXPECT_NE("", fetcher->GetHeader("X-Goog-Update-Updater"));
-      }
-      post_data_ = fetcher->post_data();
-    } else if (action->Type() ==
-               ObjectCollectorAction<OmahaResponse>::StaticType()) {
-      EXPECT_EQ(ErrorCode::kSuccess, code);
-      auto collector_action =
-          static_cast<ObjectCollectorAction<OmahaResponse>*>(action);
-      omaha_response_.reset(new OmahaResponse(collector_action->object()));
-      EXPECT_TRUE(omaha_response_);
-    } else {
-      EXPECT_EQ(ErrorCode::kSuccess, code);
-    }
-  }
-  ErrorCode expected_code_;
-  brillo::Blob post_data_;
-  bool interactive_;
-  bool test_http_fetcher_headers_;
-  std::unique_ptr<OmahaResponse> omaha_response_;
-};
-
-class OmahaRequestActionTest : public ::testing::Test {
- protected:
-  void SetUp() override {
-    request_params_.set_os_sp("service_pack");
-    request_params_.set_os_board("x86-generic");
-    request_params_.set_app_id(kTestAppId);
-    request_params_.set_app_version(kCurrentVersion);
-    request_params_.set_app_lang("en-US");
-    request_params_.set_current_channel("unittest");
-    request_params_.set_target_channel("unittest");
-    request_params_.set_hwid("OEM MODEL 09235 7471");
-    request_params_.set_fw_version("ChromeOSFirmware.1.0");
-    request_params_.set_ec_version("0X0A1");
-    request_params_.set_delta_okay(true);
-    request_params_.set_interactive(false);
-    request_params_.set_update_url("http://url");
-    request_params_.set_target_version_prefix("");
-    request_params_.set_rollback_allowed(false);
-    request_params_.set_is_powerwash_allowed(false);
-    request_params_.set_is_install(false);
-    request_params_.set_dlc_module_ids({});
-
-    fake_system_state_.set_request_params(&request_params_);
-    fake_system_state_.set_prefs(&fake_prefs_);
-  }
-
-  // Returns true iff an output response was obtained from the
-  // OmahaRequestAction. |prefs| may be null, in which case a local MockPrefs
-  // is used. |payload_state| may be null, in which case a local mock is used.
-  // |p2p_manager| may be null, in which case a local mock is used.
-  // |connection_manager| may be null, in which case a local mock is used.
-  // out_response may be null. If |fail_http_response_code| is non-negative,
-  // the transfer will fail with that code. |ping_only| is passed through to the
-  // OmahaRequestAction constructor. out_post_data may be null; if non-null, the
-  // post-data received by the mock HttpFetcher is returned.
-  //
-  // The |expected_check_result|, |expected_check_reaction| and
-  // |expected_error_code| parameters are for checking expectations
-  // about reporting UpdateEngine.Check.{Result,Reaction,DownloadError}
-  // UMA statistics. Use the appropriate ::kUnset value to specify that
-  // the given metric should not be reported.
-  bool TestUpdateCheck(const string& http_response,
-                       int fail_http_response_code,
-                       bool ping_only,
-                       bool is_consumer_device,
-                       int rollback_allowed_milestones,
-                       bool is_policy_loaded,
-                       ErrorCode expected_code,
-                       metrics::CheckResult expected_check_result,
-                       metrics::CheckReaction expected_check_reaction,
-                       metrics::DownloadErrorCode expected_download_error_code,
-                       OmahaResponse* out_response,
-                       brillo::Blob* out_post_data);
-
-  // Overload of TestUpdateCheck that does not supply |is_consumer_device| or
-  // |rollback_allowed_milestones| which are only required for rollback tests.
-  bool TestUpdateCheck(const string& http_response,
-                       int fail_http_response_code,
-                       bool ping_only,
-                       ErrorCode expected_code,
-                       metrics::CheckResult expected_check_result,
-                       metrics::CheckReaction expected_check_reaction,
-                       metrics::DownloadErrorCode expected_download_error_code,
-                       OmahaResponse* out_response,
-                       brillo::Blob* out_post_data);
-
-  void TestRollbackCheck(bool is_consumer_device,
-                         int rollback_allowed_milestones,
-                         bool is_policy_loaded,
-                         OmahaResponse* out_response);
-
-  void TestEvent(OmahaEvent* event,
-                 const string& http_response,
-                 brillo::Blob* out_post_data);
-
-  // Runs and checks a ping test. |ping_only| indicates whether it should send
-  // only a ping or also an updatecheck.
-  void PingTest(bool ping_only);
-
-  // InstallDate test helper function.
-  bool InstallDateParseHelper(const string& elapsed_days,
-                              OmahaResponse* response);
-
-  // P2P test helper function.
-  void P2PTest(bool initial_allow_p2p_for_downloading,
-               bool initial_allow_p2p_for_sharing,
-               bool omaha_disable_p2p_for_downloading,
-               bool omaha_disable_p2p_for_sharing,
-               bool payload_state_allow_p2p_attempt,
-               bool expect_p2p_client_lookup,
-               const string& p2p_client_result_url,
-               bool expected_allow_p2p_for_downloading,
-               bool expected_allow_p2p_for_sharing,
-               const string& expected_p2p_url);
-
-  FakeSystemState fake_system_state_;
-  FakeUpdateResponse fake_update_response_;
-  // Used by all tests.
-  OmahaRequestParams request_params_{&fake_system_state_};
-
-  FakePrefs fake_prefs_;
-
-  OmahaRequestActionTestProcessorDelegate delegate_;
-
-  bool test_http_fetcher_headers_{false};
-};
-
-bool OmahaRequestActionTest::TestUpdateCheck(
-    const string& http_response,
-    int fail_http_response_code,
-    bool ping_only,
-    bool is_consumer_device,
-    int rollback_allowed_milestones,
-    bool is_policy_loaded,
-    ErrorCode expected_code,
-    metrics::CheckResult expected_check_result,
-    metrics::CheckReaction expected_check_reaction,
-    metrics::DownloadErrorCode expected_download_error_code,
-    OmahaResponse* out_response,
-    brillo::Blob* out_post_data) {
-  brillo::FakeMessageLoop loop(nullptr);
-  loop.SetAsCurrent();
-  auto fetcher = std::make_unique<MockHttpFetcher>(
-      http_response.data(), http_response.size(), nullptr);
-  if (fail_http_response_code >= 0) {
-    fetcher->FailTransfer(fail_http_response_code);
-  }
-  // This ensures the tests didn't forget to update fake_system_state_ if they
-  // are not using the default request_params_.
-  EXPECT_EQ(&request_params_, fake_system_state_.request_params());
-
-  auto omaha_request_action = std::make_unique<OmahaRequestAction>(
-      &fake_system_state_, nullptr, std::move(fetcher), ping_only);
-
-  auto mock_policy_provider =
-      std::make_unique<NiceMock<policy::MockPolicyProvider>>();
-  EXPECT_CALL(*mock_policy_provider, IsConsumerDevice())
-      .WillRepeatedly(Return(is_consumer_device));
-
-  EXPECT_CALL(*mock_policy_provider, device_policy_is_loaded())
-      .WillRepeatedly(Return(is_policy_loaded));
-
-  const policy::MockDevicePolicy device_policy;
-  const bool get_allowed_milestone_succeeds = rollback_allowed_milestones >= 0;
-  EXPECT_CALL(device_policy, GetRollbackAllowedMilestones(_))
-      .WillRepeatedly(DoAll(SetArgPointee<0>(rollback_allowed_milestones),
-                            Return(get_allowed_milestone_succeeds)));
-
-  EXPECT_CALL(*mock_policy_provider, GetDevicePolicy())
-      .WillRepeatedly(ReturnRef(device_policy));
-  omaha_request_action->policy_provider_ = std::move(mock_policy_provider);
-
-  delegate_.expected_code_ = expected_code;
-  delegate_.interactive_ = request_params_.interactive();
-  delegate_.test_http_fetcher_headers_ = test_http_fetcher_headers_;
-  ActionProcessor processor;
-  processor.set_delegate(&delegate_);
-
-  auto collector_action =
-      std::make_unique<ObjectCollectorAction<OmahaResponse>>();
-  BondActions(omaha_request_action.get(), collector_action.get());
-  processor.EnqueueAction(std::move(omaha_request_action));
-  processor.EnqueueAction(std::move(collector_action));
-
-  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
-              ReportUpdateCheckMetrics(_, _, _, _))
-      .Times(AnyNumber());
-
-  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
-              ReportUpdateCheckMetrics(_,
-                                       expected_check_result,
-                                       expected_check_reaction,
-                                       expected_download_error_code))
-      .Times(ping_only ? 0 : 1);
-
-  loop.PostTask(base::Bind(
-      [](ActionProcessor* processor) { processor->StartProcessing(); },
-      base::Unretained(&processor)));
-  loop.Run();
-  EXPECT_FALSE(loop.PendingTasks());
-  if (delegate_.omaha_response_ && out_response)
-    *out_response = *delegate_.omaha_response_;
-  if (out_post_data)
-    *out_post_data = delegate_.post_data_;
-  return delegate_.omaha_response_ != nullptr;
-}
-
-bool OmahaRequestActionTest::TestUpdateCheck(
-    const string& http_response,
-    int fail_http_response_code,
-    bool ping_only,
-    ErrorCode expected_code,
-    metrics::CheckResult expected_check_result,
-    metrics::CheckReaction expected_check_reaction,
-    metrics::DownloadErrorCode expected_download_error_code,
-    OmahaResponse* out_response,
-    brillo::Blob* out_post_data) {
-  return TestUpdateCheck(http_response,
-                         fail_http_response_code,
-                         ping_only,
-                         true,   // is_consumer_device
-                         0,      // rollback_allowed_milestones
-                         false,  // is_policy_loaded
-                         expected_code,
-                         expected_check_result,
-                         expected_check_reaction,
-                         expected_download_error_code,
-                         out_response,
-                         out_post_data);
-}
-
-void OmahaRequestActionTest::TestRollbackCheck(bool is_consumer_device,
-                                               int rollback_allowed_milestones,
-                                               bool is_policy_loaded,
-                                               OmahaResponse* out_response) {
-  fake_update_response_.deadline = "20101020";
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              is_consumer_device,
-                              rollback_allowed_milestones,
-                              is_policy_loaded,
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              out_response,
-                              nullptr));
-  ASSERT_TRUE(out_response->update_exists);
-}
-
-// Tests Event requests -- they should always succeed. |out_post_data| may be
-// null; if non-null, the post-data received by the mock HttpFetcher is
-// returned.
-void OmahaRequestActionTest::TestEvent(OmahaEvent* event,
-                                       const string& http_response,
-                                       brillo::Blob* out_post_data) {
-  brillo::FakeMessageLoop loop(nullptr);
-  loop.SetAsCurrent();
-
-  auto action = std::make_unique<OmahaRequestAction>(
-      &fake_system_state_,
-      event,
-      std::make_unique<MockHttpFetcher>(
-          http_response.data(), http_response.size(), nullptr),
-      false);
-  ActionProcessor processor;
-  processor.set_delegate(&delegate_);
-  processor.EnqueueAction(std::move(action));
-
-  loop.PostTask(base::Bind(
-      [](ActionProcessor* processor) { processor->StartProcessing(); },
-      base::Unretained(&processor)));
-  loop.Run();
-  EXPECT_FALSE(loop.PendingTasks());
-
-  if (out_post_data)
-    *out_post_data = delegate_.post_data_;
-}
-
-TEST_F(OmahaRequestActionTest, RejectEntities) {
-  OmahaResponse response;
-  fake_update_response_.include_entity = true;
-  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaRequestXMLHasEntityDecl,
-                               metrics::CheckResult::kParsingError,
-                               metrics::CheckReaction::kUnset,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, NoUpdateTest) {
-  OmahaResponse response;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kNoUpdateAvailable,
-                              metrics::CheckReaction::kUnset,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, MultiAppNoUpdateTest) {
-  OmahaResponse response;
-  fake_update_response_.multi_app_no_update = true;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kNoUpdateAvailable,
-                              metrics::CheckReaction::kUnset,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, MultiAppNoPartialUpdateTest) {
-  OmahaResponse response;
-  fake_update_response_.multi_app_no_update = true;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kNoUpdateAvailable,
-                              metrics::CheckReaction::kUnset,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, NoSelfUpdateTest) {
-  OmahaResponse response;
-  ASSERT_TRUE(TestUpdateCheck(
-      "<response><app><updatecheck status=\"ok\"><manifest><actions><action "
-      "event=\"postinstall\" noupdate=\"true\"/></actions>"
-      "</manifest></updatecheck></app></response>",
-      -1,
-      false,  // ping_only
-      ErrorCode::kSuccess,
-      metrics::CheckResult::kNoUpdateAvailable,
-      metrics::CheckReaction::kUnset,
-      metrics::DownloadErrorCode::kUnset,
-      &response,
-      nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-// Test that all the values in the response are parsed in a normal update
-// response.
-TEST_F(OmahaRequestActionTest, ValidUpdateTest) {
-  OmahaResponse response;
-  fake_update_response_.deadline = "20101020";
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-  EXPECT_EQ(fake_update_response_.version, response.version);
-  EXPECT_EQ("", response.system_version);
-  EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
-            response.packages[0].payload_urls[0]);
-  EXPECT_EQ(fake_update_response_.more_info_url, response.more_info_url);
-  EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash);
-  EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
-  EXPECT_EQ(true, response.packages[0].is_delta);
-  EXPECT_EQ(fake_update_response_.prompt == "true", response.prompt);
-  EXPECT_EQ(fake_update_response_.deadline, response.deadline);
-  EXPECT_FALSE(response.powerwash_required);
-  // Omaha cohort attributes are not set in the response, so they should not be
-  // persisted.
-  EXPECT_FALSE(fake_prefs_.Exists(kPrefsOmahaCohort));
-  EXPECT_FALSE(fake_prefs_.Exists(kPrefsOmahaCohortHint));
-  EXPECT_FALSE(fake_prefs_.Exists(kPrefsOmahaCohortName));
-}
-
-TEST_F(OmahaRequestActionTest, MultiPackageUpdateTest) {
-  OmahaResponse response;
-  fake_update_response_.multi_package = true;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-  EXPECT_EQ(fake_update_response_.version, response.version);
-  EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
-            response.packages[0].payload_urls[0]);
-  EXPECT_EQ(fake_update_response_.codebase + "package2",
-            response.packages[1].payload_urls[0]);
-  EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash);
-  EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
-  EXPECT_EQ(true, response.packages[0].is_delta);
-  EXPECT_EQ(11u, response.packages[0].metadata_size);
-  ASSERT_EQ(2u, response.packages.size());
-  EXPECT_EQ(string("hash2"), response.packages[1].hash);
-  EXPECT_EQ(222u, response.packages[1].size);
-  EXPECT_EQ(22u, response.packages[1].metadata_size);
-  EXPECT_EQ(false, response.packages[1].is_delta);
-}
-
-TEST_F(OmahaRequestActionTest, MultiAppUpdateTest) {
-  OmahaResponse response;
-  fake_update_response_.multi_app = true;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-  EXPECT_EQ(fake_update_response_.version, response.version);
-  EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
-            response.packages[0].payload_urls[0]);
-  EXPECT_EQ(fake_update_response_.codebase2 + "package3",
-            response.packages[1].payload_urls[0]);
-  EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash);
-  EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
-  EXPECT_EQ(11u, response.packages[0].metadata_size);
-  EXPECT_EQ(true, response.packages[0].is_delta);
-  ASSERT_EQ(2u, response.packages.size());
-  EXPECT_EQ(string("hash3"), response.packages[1].hash);
-  EXPECT_EQ(333u, response.packages[1].size);
-  EXPECT_EQ(33u, response.packages[1].metadata_size);
-  EXPECT_EQ(false, response.packages[1].is_delta);
-}
-
-TEST_F(OmahaRequestActionTest, MultiAppAndSystemUpdateTest) {
-  OmahaResponse response;
-  fake_update_response_.multi_app = true;
-  // trigger the lining up of the app and system versions
-  request_params_.set_system_app_id(fake_update_response_.app_id2);
-
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-  EXPECT_EQ(fake_update_response_.version, response.version);
-  EXPECT_EQ(fake_update_response_.version2, response.system_version);
-  EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
-            response.packages[0].payload_urls[0]);
-  EXPECT_EQ(fake_update_response_.codebase2 + "package3",
-            response.packages[1].payload_urls[0]);
-  EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash);
-  EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
-  EXPECT_EQ(11u, response.packages[0].metadata_size);
-  EXPECT_EQ(true, response.packages[0].is_delta);
-  ASSERT_EQ(2u, response.packages.size());
-  EXPECT_EQ(string("hash3"), response.packages[1].hash);
-  EXPECT_EQ(333u, response.packages[1].size);
-  EXPECT_EQ(33u, response.packages[1].metadata_size);
-  EXPECT_EQ(false, response.packages[1].is_delta);
-}
-
-TEST_F(OmahaRequestActionTest, MultiAppPartialUpdateTest) {
-  OmahaResponse response;
-  fake_update_response_.multi_app = true;
-  fake_update_response_.multi_app_self_update = true;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-  EXPECT_EQ(fake_update_response_.version, response.version);
-  EXPECT_EQ("", response.system_version);
-  EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
-            response.packages[0].payload_urls[0]);
-  EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash);
-  EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
-  EXPECT_EQ(11u, response.packages[0].metadata_size);
-  ASSERT_EQ(2u, response.packages.size());
-  EXPECT_EQ(string("hash3"), response.packages[1].hash);
-  EXPECT_EQ(333u, response.packages[1].size);
-  EXPECT_EQ(33u, response.packages[1].metadata_size);
-  EXPECT_EQ(true, response.packages[1].is_delta);
-}
-
-TEST_F(OmahaRequestActionTest, MultiAppMultiPackageUpdateTest) {
-  OmahaResponse response;
-  fake_update_response_.multi_app = true;
-  fake_update_response_.multi_package = true;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-  EXPECT_EQ(fake_update_response_.version, response.version);
-  EXPECT_EQ("", response.system_version);
-  EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
-            response.packages[0].payload_urls[0]);
-  EXPECT_EQ(fake_update_response_.codebase + "package2",
-            response.packages[1].payload_urls[0]);
-  EXPECT_EQ(fake_update_response_.codebase2 + "package3",
-            response.packages[2].payload_urls[0]);
-  EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash);
-  EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
-  EXPECT_EQ(11u, response.packages[0].metadata_size);
-  EXPECT_EQ(true, response.packages[0].is_delta);
-  ASSERT_EQ(3u, response.packages.size());
-  EXPECT_EQ(string("hash2"), response.packages[1].hash);
-  EXPECT_EQ(222u, response.packages[1].size);
-  EXPECT_EQ(22u, response.packages[1].metadata_size);
-  EXPECT_EQ(false, response.packages[1].is_delta);
-  EXPECT_EQ(string("hash3"), response.packages[2].hash);
-  EXPECT_EQ(333u, response.packages[2].size);
-  EXPECT_EQ(33u, response.packages[2].metadata_size);
-  EXPECT_EQ(false, response.packages[2].is_delta);
-}
-
-TEST_F(OmahaRequestActionTest, PowerwashTest) {
-  OmahaResponse response;
-  fake_update_response_.powerwash = true;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-  EXPECT_TRUE(response.powerwash_required);
-}
-
-TEST_F(OmahaRequestActionTest, ExtraHeadersSentInteractiveTest) {
-  OmahaResponse response;
-  request_params_.set_interactive(true);
-  test_http_fetcher_headers_ = true;
-  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaRequestXMLParseError,
-                               metrics::CheckResult::kParsingError,
-                               metrics::CheckReaction::kUnset,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, ExtraHeadersSentNoInteractiveTest) {
-  OmahaResponse response;
-  request_params_.set_interactive(false);
-  test_http_fetcher_headers_ = true;
-  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaRequestXMLParseError,
-                               metrics::CheckResult::kParsingError,
-                               metrics::CheckReaction::kUnset,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, ValidUpdateBlockedByConnection) {
-  OmahaResponse response;
-  // Set up a connection manager that doesn't allow a valid update over
-  // the current ethernet connection.
-  MockConnectionManager mock_cm;
-  fake_system_state_.set_connection_manager(&mock_cm);
-
-  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
-      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kEthernet),
-                            SetArgPointee<1>(ConnectionTethering::kUnknown),
-                            Return(true)));
-  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kEthernet, _))
-      .WillRepeatedly(Return(false));
-
-  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaUpdateIgnoredPerPolicy,
-                               metrics::CheckResult::kUpdateAvailable,
-                               metrics::CheckReaction::kIgnored,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, ValidUpdateOverCellularAllowedByDevicePolicy) {
-  // This test tests that update over cellular is allowed as device policy
-  // says yes.
-  OmahaResponse response;
-  MockConnectionManager mock_cm;
-
-  fake_system_state_.set_connection_manager(&mock_cm);
-
-  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
-      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
-                            SetArgPointee<1>(ConnectionTethering::kUnknown),
-                            Return(true)));
-  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
-      .WillRepeatedly(Return(true));
-  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
-      .WillRepeatedly(Return(true));
-
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, ValidUpdateOverCellularBlockedByDevicePolicy) {
-  // This test tests that update over cellular is blocked as device policy
-  // says no.
-  OmahaResponse response;
-  MockConnectionManager mock_cm;
-
-  fake_system_state_.set_connection_manager(&mock_cm);
-
-  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
-      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
-                            SetArgPointee<1>(ConnectionTethering::kUnknown),
-                            Return(true)));
-  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
-      .WillRepeatedly(Return(true));
-  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
-      .WillRepeatedly(Return(false));
-
-  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaUpdateIgnoredPerPolicy,
-                               metrics::CheckResult::kUpdateAvailable,
-                               metrics::CheckReaction::kIgnored,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest,
-       ValidUpdateOverCellularAllowedByUserPermissionTrue) {
-  // This test tests that, when device policy is not set, update over cellular
-  // is allowed as permission for update over cellular is set to true.
-  OmahaResponse response;
-  MockConnectionManager mock_cm;
-
-  fake_prefs_.SetBoolean(kPrefsUpdateOverCellularPermission, true);
-  fake_system_state_.set_connection_manager(&mock_cm);
-
-  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
-      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
-                            SetArgPointee<1>(ConnectionTethering::kUnknown),
-                            Return(true)));
-  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
-      .WillRepeatedly(Return(false));
-  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
-      .WillRepeatedly(Return(true));
-
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest,
-       ValidUpdateOverCellularBlockedByUpdateTargetNotMatch) {
-  // This test tests that, when device policy is not set and permission for
-  // update over cellular is set to false or does not exist, update over
-  // cellular is blocked as update target does not match the omaha response.
-  OmahaResponse response;
-  MockConnectionManager mock_cm;
-  // A version different from the version in omaha response.
-  string diff_version = "99.99.99";
-  // A size different from the size in omaha response.
-  int64_t diff_size = 999;
-
-  fake_prefs_.SetString(kPrefsUpdateOverCellularTargetVersion, diff_version);
-  fake_prefs_.SetInt64(kPrefsUpdateOverCellularTargetSize, diff_size);
-  // This test tests cellular (3G) being the only connection type being allowed.
-  fake_system_state_.set_connection_manager(&mock_cm);
-
-  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
-      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
-                            SetArgPointee<1>(ConnectionTethering::kUnknown),
-                            Return(true)));
-  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
-      .WillRepeatedly(Return(false));
-  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
-      .WillRepeatedly(Return(true));
-
-  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaUpdateIgnoredOverCellular,
-                               metrics::CheckResult::kUpdateAvailable,
-                               metrics::CheckReaction::kIgnored,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest,
-       ValidUpdateOverCellularAllowedByUpdateTargetMatch) {
-  // This test tests that, when device policy is not set and permission for
-  // update over cellular is set to false or does not exist, update over
-  // cellular is allowed as update target matches the omaha response.
-  OmahaResponse response;
-  MockConnectionManager mock_cm;
-  // A version same as the version in omaha response.
-  string new_version = fake_update_response_.version;
-  // A size same as the size in omaha response.
-  int64_t new_size = fake_update_response_.size;
-
-  fake_prefs_.SetString(kPrefsUpdateOverCellularTargetVersion, new_version);
-  fake_prefs_.SetInt64(kPrefsUpdateOverCellularTargetSize, new_size);
-  fake_system_state_.set_connection_manager(&mock_cm);
-
-  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
-      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
-                            SetArgPointee<1>(ConnectionTethering::kUnknown),
-                            Return(true)));
-  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
-      .WillRepeatedly(Return(false));
-  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
-      .WillRepeatedly(Return(true));
-
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, ValidUpdateBlockedByRollback) {
-  string rollback_version = "1234.0.0";
-  OmahaResponse response;
-
-  MockPayloadState mock_payload_state;
-  fake_system_state_.set_payload_state(&mock_payload_state);
-
-  EXPECT_CALL(mock_payload_state, GetRollbackVersion())
-      .WillRepeatedly(Return(rollback_version));
-
-  fake_update_response_.version = rollback_version;
-  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaUpdateIgnoredPerPolicy,
-                               metrics::CheckResult::kUpdateAvailable,
-                               metrics::CheckReaction::kIgnored,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-// Verify that update checks called during OOBE will not try to download an
-// update if the response doesn't include the deadline field.
-TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBE) {
-  OmahaResponse response;
-  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
-
-  // TODO(senj): set better default value for metrics::checkresult in
-  // OmahaRequestAction::ActionCompleted.
-  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kNonCriticalUpdateInOOBE,
-                               metrics::CheckResult::kParsingError,
-                               metrics::CheckReaction::kUnset,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-// Verify that the IsOOBEComplete() value is ignored when the OOBE flow is not
-// enabled.
-TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBEDisabled) {
-  OmahaResponse response;
-  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
-  fake_system_state_.fake_hardware()->SetIsOOBEEnabled(false);
-
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-}
-
-// Verify that update checks called during OOBE will still try to download an
-// update if the response includes the deadline field.
-TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBEDeadlineSet) {
-  OmahaResponse response;
-  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
-  fake_update_response_.deadline = "20101020";
-
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-}
-
-// Verify that update checks called during OOBE will not try to download an
-// update if a rollback happened, even when the response includes the deadline
-// field.
-TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBERollback) {
-  OmahaResponse response;
-  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
-  fake_update_response_.deadline = "20101020";
-  EXPECT_CALL(*(fake_system_state_.mock_payload_state()), GetRollbackHappened())
-      .WillOnce(Return(true));
-
-  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kNonCriticalUpdateInOOBE,
-                               metrics::CheckResult::kParsingError,
-                               metrics::CheckReaction::kUnset,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-// Verify that non-critical updates are skipped by reporting the
-// kNonCriticalUpdateInOOBE error code when attempted over cellular network -
-// i.e. when the update would need user permission. Note that reporting
-// kOmahaUpdateIgnoredOverCellular error in this case  might cause undesired UX
-// in OOBE (warning the user about an update that will be skipped).
-TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesInOOBEOverCellular) {
-  OmahaResponse response;
-  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
-
-  MockConnectionManager mock_cm;
-  fake_system_state_.set_connection_manager(&mock_cm);
-
-  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
-      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
-                            SetArgPointee<1>(ConnectionTethering::kUnknown),
-                            Return(true)));
-  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
-      .WillRepeatedly(Return(false));
-
-  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kNonCriticalUpdateInOOBE,
-                               metrics::CheckResult::kParsingError,
-                               metrics::CheckReaction::kUnset,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, WallClockBasedWaitAloneCausesScattering) {
-  OmahaResponse response;
-  request_params_.set_wall_clock_based_wait_enabled(true);
-  request_params_.set_update_check_count_wait_enabled(false);
-  request_params_.set_waiting_period(TimeDelta::FromDays(2));
-
-  fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
-
-  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaUpdateDeferredPerPolicy,
-                               metrics::CheckResult::kUpdateAvailable,
-                               metrics::CheckReaction::kDeferring,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               nullptr));
-  EXPECT_FALSE(response.update_exists);
-
-  // Verify if we are interactive check we don't defer.
-  request_params_.set_interactive(true);
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, NoWallClockBasedWaitCausesNoScattering) {
-  OmahaResponse response;
-  request_params_.set_wall_clock_based_wait_enabled(false);
-  request_params_.set_waiting_period(TimeDelta::FromDays(2));
-  request_params_.set_update_check_count_wait_enabled(true);
-  request_params_.set_min_update_checks_needed(1);
-  request_params_.set_max_update_checks_allowed(8);
-
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, ZeroMaxDaysToScatterCausesNoScattering) {
-  OmahaResponse response;
-  request_params_.set_wall_clock_based_wait_enabled(true);
-  request_params_.set_waiting_period(TimeDelta::FromDays(2));
-  request_params_.set_update_check_count_wait_enabled(true);
-  request_params_.set_min_update_checks_needed(1);
-  request_params_.set_max_update_checks_allowed(8);
-
-  fake_update_response_.max_days_to_scatter = "0";
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, ZeroUpdateCheckCountCausesNoScattering) {
-  OmahaResponse response;
-  request_params_.set_wall_clock_based_wait_enabled(true);
-  request_params_.set_waiting_period(TimeDelta());
-  request_params_.set_update_check_count_wait_enabled(true);
-  request_params_.set_min_update_checks_needed(0);
-  request_params_.set_max_update_checks_allowed(0);
-
-  fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
-
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-
-  int64_t count;
-  ASSERT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateCheckCount, &count));
-  ASSERT_EQ(count, 0);
-  EXPECT_TRUE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, NonZeroUpdateCheckCountCausesScattering) {
-  OmahaResponse response;
-  request_params_.set_wall_clock_based_wait_enabled(true);
-  request_params_.set_waiting_period(TimeDelta());
-  request_params_.set_update_check_count_wait_enabled(true);
-  request_params_.set_min_update_checks_needed(1);
-  request_params_.set_max_update_checks_allowed(8);
-
-  fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
-
-  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaUpdateDeferredPerPolicy,
-                               metrics::CheckResult::kUpdateAvailable,
-                               metrics::CheckReaction::kDeferring,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               nullptr));
-
-  int64_t count;
-  ASSERT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateCheckCount, &count));
-  ASSERT_GT(count, 0);
-  EXPECT_FALSE(response.update_exists);
-
-  // Verify if we are interactive check we don't defer.
-  request_params_.set_interactive(true);
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, ExistingUpdateCheckCountCausesScattering) {
-  OmahaResponse response;
-  request_params_.set_wall_clock_based_wait_enabled(true);
-  request_params_.set_waiting_period(TimeDelta());
-  request_params_.set_update_check_count_wait_enabled(true);
-  request_params_.set_min_update_checks_needed(1);
-  request_params_.set_max_update_checks_allowed(8);
-
-  fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
-
-  ASSERT_TRUE(fake_prefs_.SetInt64(kPrefsUpdateCheckCount, 5));
-
-  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaUpdateDeferredPerPolicy,
-                               metrics::CheckResult::kUpdateAvailable,
-                               metrics::CheckReaction::kDeferring,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               nullptr));
-
-  int64_t count;
-  ASSERT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateCheckCount, &count));
-  // count remains the same, as the decrementing happens in update_attempter
-  // which this test doesn't exercise.
-  ASSERT_EQ(count, 5);
-  EXPECT_FALSE(response.update_exists);
-
-  // Verify if we are interactive check we don't defer.
-  request_params_.set_interactive(true);
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, StagingTurnedOnCausesScattering) {
-  // If staging is on, the value for max days to scatter should be ignored, and
-  // staging's scatter value should be used.
-  OmahaResponse response;
-  request_params_.set_wall_clock_based_wait_enabled(true);
-  request_params_.set_waiting_period(TimeDelta::FromDays(6));
-  request_params_.set_update_check_count_wait_enabled(false);
-
-  fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
-
-  ASSERT_TRUE(fake_prefs_.SetInt64(kPrefsWallClockStagingWaitPeriod, 6));
-  // This should not prevent scattering due to staging.
-  fake_update_response_.max_days_to_scatter = "0";
-  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaUpdateDeferredPerPolicy,
-                               metrics::CheckResult::kUpdateAvailable,
-                               metrics::CheckReaction::kDeferring,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               nullptr));
-  EXPECT_FALSE(response.update_exists);
-
-  // Interactive updates should not be affected.
-  request_params_.set_interactive(true);
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, CohortsArePersisted) {
-  OmahaResponse response;
-  fake_update_response_.include_cohorts = true;
-  fake_update_response_.cohort = "s/154454/8479665";
-  fake_update_response_.cohorthint = "please-put-me-on-beta";
-  fake_update_response_.cohortname = "stable";
-
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-
-  string value;
-  EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohort, &value));
-  EXPECT_EQ(fake_update_response_.cohort, value);
-
-  EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohortHint, &value));
-  EXPECT_EQ(fake_update_response_.cohorthint, value);
-
-  EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohortName, &value));
-  EXPECT_EQ(fake_update_response_.cohortname, value);
-}
-
-TEST_F(OmahaRequestActionTest, CohortsAreUpdated) {
-  OmahaResponse response;
-  EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohort, "old_value"));
-  EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohortHint, "old_hint"));
-  EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohortName, "old_name"));
-  fake_update_response_.include_cohorts = true;
-  fake_update_response_.cohort = "s/154454/8479665";
-  fake_update_response_.cohorthint = "please-put-me-on-beta";
-  fake_update_response_.cohortname = "";
-
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-
-  string value;
-  EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohort, &value));
-  EXPECT_EQ(fake_update_response_.cohort, value);
-
-  EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohortHint, &value));
-  EXPECT_EQ(fake_update_response_.cohorthint, value);
-
-  EXPECT_FALSE(fake_prefs_.GetString(kPrefsOmahaCohortName, &value));
-}
-
-TEST_F(OmahaRequestActionTest, CohortsAreNotModifiedWhenMissing) {
-  OmahaResponse response;
-  EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohort, "old_value"));
-
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-
-  string value;
-  EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohort, &value));
-  EXPECT_EQ("old_value", value);
-
-  EXPECT_FALSE(fake_prefs_.GetString(kPrefsOmahaCohortHint, &value));
-  EXPECT_FALSE(fake_prefs_.GetString(kPrefsOmahaCohortName, &value));
-}
-
-TEST_F(OmahaRequestActionTest, CohortsArePersistedWhenNoUpdate) {
-  OmahaResponse response;
-  fake_update_response_.include_cohorts = true;
-  fake_update_response_.cohort = "s/154454/8479665";
-  fake_update_response_.cohorthint = "please-put-me-on-beta";
-  fake_update_response_.cohortname = "stable";
-
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kNoUpdateAvailable,
-                              metrics::CheckReaction::kUnset,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-
-  string value;
-  EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohort, &value));
-  EXPECT_EQ(fake_update_response_.cohort, value);
-
-  EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohortHint, &value));
-  EXPECT_EQ(fake_update_response_.cohorthint, value);
-
-  EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohortName, &value));
-  EXPECT_EQ(fake_update_response_.cohortname, value);
-}
-
-TEST_F(OmahaRequestActionTest, MultiAppCohortTest) {
-  OmahaResponse response;
-  fake_update_response_.multi_app = true;
-  fake_update_response_.include_cohorts = true;
-  fake_update_response_.cohort = "s/154454/8479665";
-  fake_update_response_.cohorthint = "please-put-me-on-beta";
-  fake_update_response_.cohortname = "stable";
-
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-
-  string value;
-  EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohort, &value));
-  EXPECT_EQ(fake_update_response_.cohort, value);
-
-  EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohortHint, &value));
-  EXPECT_EQ(fake_update_response_.cohorthint, value);
-
-  EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohortName, &value));
-  EXPECT_EQ(fake_update_response_.cohortname, value);
-}
-
-TEST_F(OmahaRequestActionTest, NoOutputPipeTest) {
-  const string http_response(fake_update_response_.GetNoUpdateResponse());
-
-  brillo::FakeMessageLoop loop(nullptr);
-  loop.SetAsCurrent();
-
-  auto action = std::make_unique<OmahaRequestAction>(
-      &fake_system_state_,
-      nullptr,
-      std::make_unique<MockHttpFetcher>(
-          http_response.data(), http_response.size(), nullptr),
-      false);
-  ActionProcessor processor;
-  processor.set_delegate(&delegate_);
-  processor.EnqueueAction(std::move(action));
-
-  loop.PostTask(base::Bind(
-      [](ActionProcessor* processor) { processor->StartProcessing(); },
-      base::Unretained(&processor)));
-  loop.Run();
-  EXPECT_FALSE(loop.PendingTasks());
-  EXPECT_FALSE(processor.IsRunning());
-}
-
-TEST_F(OmahaRequestActionTest, InvalidXmlTest) {
-  OmahaResponse response;
-  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaRequestXMLParseError,
-                               metrics::CheckResult::kParsingError,
-                               metrics::CheckReaction::kUnset,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, EmptyResponseTest) {
-  OmahaResponse response;
-  ASSERT_FALSE(TestUpdateCheck("",
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaRequestEmptyResponseError,
-                               metrics::CheckResult::kParsingError,
-                               metrics::CheckReaction::kUnset,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, MissingStatusTest) {
-  OmahaResponse response;
-  ASSERT_FALSE(TestUpdateCheck(
-      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
-      "<daystart elapsed_seconds=\"100\"/>"
-      "<app appid=\"foo\" status=\"ok\">"
-      "<ping status=\"ok\"/>"
-      "<updatecheck/></app></response>",
-      -1,
-      false,  // ping_only
-      ErrorCode::kOmahaResponseInvalid,
-      metrics::CheckResult::kParsingError,
-      metrics::CheckReaction::kUnset,
-      metrics::DownloadErrorCode::kUnset,
-      &response,
-      nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, InvalidStatusTest) {
-  OmahaResponse response;
-  ASSERT_FALSE(TestUpdateCheck(
-      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
-      "<daystart elapsed_seconds=\"100\"/>"
-      "<app appid=\"foo\" status=\"ok\">"
-      "<ping status=\"ok\"/>"
-      "<updatecheck status=\"InvalidStatusTest\"/></app></response>",
-      -1,
-      false,  // ping_only
-      ErrorCode::kOmahaResponseInvalid,
-      metrics::CheckResult::kParsingError,
-      metrics::CheckReaction::kUnset,
-      metrics::DownloadErrorCode::kUnset,
-      &response,
-      nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, MissingNodesetTest) {
-  OmahaResponse response;
-  ASSERT_FALSE(TestUpdateCheck(
-      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
-      "<daystart elapsed_seconds=\"100\"/>"
-      "<app appid=\"foo\" status=\"ok\">"
-      "<ping status=\"ok\"/>"
-      "</app></response>",
-      -1,
-      false,  // ping_only
-      ErrorCode::kOmahaResponseInvalid,
-      metrics::CheckResult::kParsingError,
-      metrics::CheckReaction::kUnset,
-      metrics::DownloadErrorCode::kUnset,
-      &response,
-      nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, MissingFieldTest) {
-  string input_response =
-      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
-      "<daystart elapsed_seconds=\"100\"/>"
-      // the appid needs to match that in the request params
-      "<app appid=\"" +
-      fake_update_response_.app_id +
-      "\" status=\"ok\">"
-      "<updatecheck status=\"ok\">"
-      "<urls><url codebase=\"http://missing/field/test/\"/></urls>"
-      "<manifest version=\"10.2.3.4\">"
-      "<packages><package hash=\"not-used\" name=\"f\" "
-      "size=\"587\" hash_sha256=\"lkq34j5345\"/></packages>"
-      "<actions><action event=\"postinstall\" "
-      "Prompt=\"false\" "
-      "IsDeltaPayload=\"false\" "
-      "sha256=\"not-used\" "
-      "/></actions></manifest></updatecheck></app></response>";
-  LOG(INFO) << "Input Response = " << input_response;
-
-  OmahaResponse response;
-  ASSERT_TRUE(TestUpdateCheck(input_response,
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-  EXPECT_EQ("10.2.3.4", response.version);
-  EXPECT_EQ("http://missing/field/test/f",
-            response.packages[0].payload_urls[0]);
-  EXPECT_EQ("", response.more_info_url);
-  EXPECT_EQ("lkq34j5345", response.packages[0].hash);
-  EXPECT_EQ(587u, response.packages[0].size);
-  EXPECT_FALSE(response.prompt);
-  EXPECT_TRUE(response.deadline.empty());
-}
-
-namespace {
-class TerminateEarlyTestProcessorDelegate : public ActionProcessorDelegate {
- public:
-  void ProcessingStopped(const ActionProcessor* processor) {
-    brillo::MessageLoop::current()->BreakLoop();
-  }
-};
-
-void TerminateTransferTestStarter(ActionProcessor* processor) {
-  processor->StartProcessing();
-  CHECK(processor->IsRunning());
-  processor->StopProcessing();
-}
-}  // namespace
-
-TEST_F(OmahaRequestActionTest, TerminateTransferTest) {
-  brillo::FakeMessageLoop loop(nullptr);
-  loop.SetAsCurrent();
-
-  string http_response("doesn't matter");
-  auto action = std::make_unique<OmahaRequestAction>(
-      &fake_system_state_,
-      nullptr,
-      std::make_unique<MockHttpFetcher>(
-          http_response.data(), http_response.size(), nullptr),
-      false);
-  TerminateEarlyTestProcessorDelegate delegate;
-  ActionProcessor processor;
-  processor.set_delegate(&delegate);
-  processor.EnqueueAction(std::move(action));
-
-  loop.PostTask(base::Bind(&TerminateTransferTestStarter, &processor));
-  loop.Run();
-  EXPECT_FALSE(loop.PendingTasks());
-}
-
-TEST_F(OmahaRequestActionTest, XmlEncodeTest) {
-  string output;
-  EXPECT_TRUE(XmlEncode("ab", &output));
-  EXPECT_EQ("ab", output);
-  EXPECT_TRUE(XmlEncode("a<b", &output));
-  EXPECT_EQ("a&lt;b", output);
-  EXPECT_TRUE(XmlEncode("<&>\"\'\\", &output));
-  EXPECT_EQ("&lt;&amp;&gt;&quot;&apos;\\", output);
-  EXPECT_TRUE(XmlEncode("&lt;&amp;&gt;", &output));
-  EXPECT_EQ("&amp;lt;&amp;amp;&amp;gt;", output);
-  // Check that unterminated UTF-8 strings are handled properly.
-  EXPECT_FALSE(XmlEncode("\xc2", &output));
-  // Fail with invalid ASCII-7 chars.
-  EXPECT_FALSE(XmlEncode("This is an 'n' with a tilde: \xc3\xb1", &output));
-}
-
-TEST_F(OmahaRequestActionTest, XmlEncodeWithDefaultTest) {
-  EXPECT_EQ("&lt;&amp;&gt;", XmlEncodeWithDefault("<&>", "something else"));
-  EXPECT_EQ("<not escaped>", XmlEncodeWithDefault("\xc2", "<not escaped>"));
-}
-
-TEST_F(OmahaRequestActionTest, XmlEncodeIsUsedForParams) {
-  brillo::Blob post_data;
-
-  // Make sure XML Encode is being called on the params
-  request_params_.set_os_sp("testtheservice_pack>");
-  request_params_.set_os_board("x86 generic<id");
-  request_params_.set_current_channel("unittest_track&lt;");
-  request_params_.set_target_channel("unittest_track&lt;");
-  request_params_.set_hwid("<OEM MODEL>");
-  fake_prefs_.SetString(kPrefsOmahaCohort, "evil\nstring");
-  fake_prefs_.SetString(kPrefsOmahaCohortHint, "evil&string\\");
-  fake_prefs_.SetString(
-      kPrefsOmahaCohortName,
-      base::JoinString(vector<string>(100, "My spoon is too big."), " "));
-  OmahaResponse response;
-  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaRequestXMLParseError,
-                               metrics::CheckResult::kParsingError,
-                               metrics::CheckReaction::kUnset,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               &post_data));
-  // convert post_data to string
-  string post_str(post_data.begin(), post_data.end());
-  EXPECT_NE(string::npos, post_str.find("testtheservice_pack&gt;"));
-  EXPECT_EQ(string::npos, post_str.find("testtheservice_pack>"));
-  EXPECT_NE(string::npos, post_str.find("x86 generic&lt;id"));
-  EXPECT_EQ(string::npos, post_str.find("x86 generic<id"));
-  EXPECT_NE(string::npos, post_str.find("unittest_track&amp;lt;"));
-  EXPECT_EQ(string::npos, post_str.find("unittest_track&lt;"));
-  EXPECT_NE(string::npos, post_str.find("&lt;OEM MODEL&gt;"));
-  EXPECT_EQ(string::npos, post_str.find("<OEM MODEL>"));
-  EXPECT_NE(string::npos, post_str.find("cohort=\"evil\nstring\""));
-  EXPECT_EQ(string::npos, post_str.find("cohorthint=\"evil&string\\\""));
-  EXPECT_NE(string::npos, post_str.find("cohorthint=\"evil&amp;string\\\""));
-  // Values from Prefs that are too big are removed from the XML instead of
-  // encoded.
-  EXPECT_EQ(string::npos, post_str.find("cohortname="));
-}
-
-TEST_F(OmahaRequestActionTest, XmlDecodeTest) {
-  OmahaResponse response;
-  fake_update_response_.deadline = "&lt;20110101";
-  fake_update_response_.more_info_url = "testthe&lt;url";
-  fake_update_response_.codebase = "testthe&amp;codebase/";
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-
-  EXPECT_EQ("testthe<url", response.more_info_url);
-  EXPECT_EQ("testthe&codebase/file.signed",
-            response.packages[0].payload_urls[0]);
-  EXPECT_EQ("<20110101", response.deadline);
-}
-
-TEST_F(OmahaRequestActionTest, ParseIntTest) {
-  OmahaResponse response;
-  // overflows int32_t:
-  fake_update_response_.size = 123123123123123ull;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-
-  EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
-}
-
-TEST_F(OmahaRequestActionTest, FormatUpdateCheckOutputTest) {
-  brillo::Blob post_data;
-  NiceMock<MockPrefs> prefs;
-  fake_system_state_.set_prefs(&prefs);
-
-  EXPECT_CALL(prefs, GetString(kPrefsPreviousVersion, _))
-      .WillOnce(DoAll(SetArgPointee<1>(string("")), Return(true)));
-  // An existing but empty previous version means that we didn't reboot to a new
-  // update, therefore, no need to update the previous version.
-  EXPECT_CALL(prefs, SetString(kPrefsPreviousVersion, _)).Times(0);
-  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaRequestXMLParseError,
-                               metrics::CheckResult::kParsingError,
-                               metrics::CheckReaction::kUnset,
-                               metrics::DownloadErrorCode::kUnset,
-                               nullptr,  // response
-                               &post_data));
-  // convert post_data to string
-  string post_str(post_data.begin(), post_data.end());
-  EXPECT_NE(
-      post_str.find("        <ping active=\"1\" a=\"-1\" r=\"-1\"></ping>\n"
-                    "        <updatecheck></updatecheck>\n"),
-      string::npos);
-  EXPECT_NE(post_str.find("hardware_class=\"OEM MODEL 09235 7471\""),
-            string::npos);
-  EXPECT_NE(post_str.find("fw_version=\"ChromeOSFirmware.1.0\""), string::npos);
-  EXPECT_NE(post_str.find("ec_version=\"0X0A1\""), string::npos);
-  // No <event> tag should be sent if we didn't reboot to an update.
-  EXPECT_EQ(post_str.find("<event"), string::npos);
-}
-
-TEST_F(OmahaRequestActionTest, FormatSuccessEventOutputTest) {
-  brillo::Blob post_data;
-  TestEvent(new OmahaEvent(OmahaEvent::kTypeUpdateDownloadStarted),
-            "invalid xml>",
-            &post_data);
-  // convert post_data to string
-  string post_str(post_data.begin(), post_data.end());
-  string expected_event = base::StringPrintf(
-      "        <event eventtype=\"%d\" eventresult=\"%d\"></event>\n",
-      OmahaEvent::kTypeUpdateDownloadStarted,
-      OmahaEvent::kResultSuccess);
-  EXPECT_NE(post_str.find(expected_event), string::npos);
-  EXPECT_EQ(post_str.find("ping"), string::npos);
-  EXPECT_EQ(post_str.find("updatecheck"), string::npos);
-}
-
-TEST_F(OmahaRequestActionTest, FormatErrorEventOutputTest) {
-  brillo::Blob post_data;
-  TestEvent(new OmahaEvent(OmahaEvent::kTypeDownloadComplete,
-                           OmahaEvent::kResultError,
-                           ErrorCode::kError),
-            "invalid xml>",
-            &post_data);
-  // convert post_data to string
-  string post_str(post_data.begin(), post_data.end());
-  string expected_event = base::StringPrintf(
-      "        <event eventtype=\"%d\" eventresult=\"%d\" "
-      "errorcode=\"%d\"></event>\n",
-      OmahaEvent::kTypeDownloadComplete,
-      OmahaEvent::kResultError,
-      static_cast<int>(ErrorCode::kError));
-  EXPECT_NE(post_str.find(expected_event), string::npos);
-  EXPECT_EQ(post_str.find("updatecheck"), string::npos);
-}
-
-TEST_F(OmahaRequestActionTest, IsEventTest) {
-  string http_response("doesn't matter");
-  OmahaRequestAction update_check_action(
-      &fake_system_state_,
-      nullptr,
-      std::make_unique<MockHttpFetcher>(
-          http_response.data(), http_response.size(), nullptr),
-      false);
-  EXPECT_FALSE(update_check_action.IsEvent());
-
-  OmahaRequestAction event_action(
-      &fake_system_state_,
-      new OmahaEvent(OmahaEvent::kTypeUpdateComplete),
-      std::make_unique<MockHttpFetcher>(
-          http_response.data(), http_response.size(), nullptr),
-      false);
-  EXPECT_TRUE(event_action.IsEvent());
-}
-
-TEST_F(OmahaRequestActionTest, FormatDeltaOkayOutputTest) {
-  for (int i = 0; i < 2; i++) {
-    bool delta_okay = i == 1;
-    const char* delta_okay_str = delta_okay ? "true" : "false";
-    brillo::Blob post_data;
-
-    request_params_.set_delta_okay(delta_okay);
-
-    ASSERT_FALSE(TestUpdateCheck("invalid xml>",
-                                 -1,
-                                 false,  // ping_only
-                                 ErrorCode::kOmahaRequestXMLParseError,
-                                 metrics::CheckResult::kParsingError,
-                                 metrics::CheckReaction::kUnset,
-                                 metrics::DownloadErrorCode::kUnset,
-                                 nullptr,
-                                 &post_data));
-    // convert post_data to string
-    string post_str(post_data.begin(), post_data.end());
-    EXPECT_NE(
-        post_str.find(base::StringPrintf(" delta_okay=\"%s\"", delta_okay_str)),
-        string::npos)
-        << "i = " << i;
-  }
-}
-
-TEST_F(OmahaRequestActionTest, FormatInteractiveOutputTest) {
-  for (int i = 0; i < 2; i++) {
-    bool interactive = i == 1;
-    const char* interactive_str = interactive ? "ondemandupdate" : "scheduler";
-    brillo::Blob post_data;
-    FakeSystemState fake_system_state;
-
-    request_params_.set_interactive(interactive);
-
-    ASSERT_FALSE(TestUpdateCheck("invalid xml>",
-                                 -1,
-                                 false,  // ping_only
-                                 ErrorCode::kOmahaRequestXMLParseError,
-                                 metrics::CheckResult::kParsingError,
-                                 metrics::CheckReaction::kUnset,
-                                 metrics::DownloadErrorCode::kUnset,
-                                 nullptr,
-                                 &post_data));
-    // convert post_data to string
-    string post_str(post_data.begin(), post_data.end());
-    EXPECT_NE(post_str.find(
-                  base::StringPrintf("installsource=\"%s\"", interactive_str)),
-              string::npos)
-        << "i = " << i;
-  }
-}
-
-TEST_F(OmahaRequestActionTest, FormatTargetVersionPrefixOutputTest) {
-  for (int i = 0; i < 2; i++) {
-    bool target_version_set = i == 1;
-    const char* target_version_prefix = target_version_set ? "10032." : "";
-    brillo::Blob post_data;
-    FakeSystemState fake_system_state;
-
-    request_params_.set_target_version_prefix(target_version_prefix);
-
-    ASSERT_FALSE(TestUpdateCheck("invalid xml>",
-                                 -1,
-                                 false,  // ping_only
-                                 ErrorCode::kOmahaRequestXMLParseError,
-                                 metrics::CheckResult::kParsingError,
-                                 metrics::CheckReaction::kUnset,
-                                 metrics::DownloadErrorCode::kUnset,
-                                 nullptr,
-                                 &post_data));
-    // convert post_data to string
-    string post_str(post_data.begin(), post_data.end());
-    if (target_version_set) {
-      EXPECT_NE(post_str.find("<updatecheck targetversionprefix=\"10032.\">"),
-                string::npos)
-          << "i = " << i;
-    } else {
-      EXPECT_EQ(post_str.find("targetversionprefix"), string::npos)
-          << "i = " << i;
-    }
-  }
-}
-
-TEST_F(OmahaRequestActionTest, FormatRollbackAllowedOutputTest) {
-  for (int i = 0; i < 4; i++) {
-    bool rollback_allowed = i / 2 == 0;
-    bool target_version_set = i % 2 == 0;
-    brillo::Blob post_data;
-    FakeSystemState fake_system_state;
-
-    request_params_.set_target_version_prefix(target_version_set ? "10032."
-                                                                 : "");
-    request_params_.set_rollback_allowed(rollback_allowed);
-
-    ASSERT_FALSE(TestUpdateCheck("invalid xml>",
-                                 -1,
-                                 false,  // ping_only
-                                 ErrorCode::kOmahaRequestXMLParseError,
-                                 metrics::CheckResult::kParsingError,
-                                 metrics::CheckReaction::kUnset,
-                                 metrics::DownloadErrorCode::kUnset,
-                                 nullptr,
-                                 &post_data));
-    // convert post_data to string
-    string post_str(post_data.begin(), post_data.end());
-    if (rollback_allowed && target_version_set) {
-      EXPECT_NE(post_str.find("rollback_allowed=\"true\""), string::npos)
-          << "i = " << i;
-    } else {
-      EXPECT_EQ(post_str.find("rollback_allowed"), string::npos) << "i = " << i;
-    }
-  }
-}
-
-TEST_F(OmahaRequestActionTest, OmahaEventTest) {
-  OmahaEvent default_event;
-  EXPECT_EQ(OmahaEvent::kTypeUnknown, default_event.type);
-  EXPECT_EQ(OmahaEvent::kResultError, default_event.result);
-  EXPECT_EQ(ErrorCode::kError, default_event.error_code);
-
-  OmahaEvent success_event(OmahaEvent::kTypeUpdateDownloadStarted);
-  EXPECT_EQ(OmahaEvent::kTypeUpdateDownloadStarted, success_event.type);
-  EXPECT_EQ(OmahaEvent::kResultSuccess, success_event.result);
-  EXPECT_EQ(ErrorCode::kSuccess, success_event.error_code);
-
-  OmahaEvent error_event(OmahaEvent::kTypeUpdateDownloadFinished,
-                         OmahaEvent::kResultError,
-                         ErrorCode::kError);
-  EXPECT_EQ(OmahaEvent::kTypeUpdateDownloadFinished, error_event.type);
-  EXPECT_EQ(OmahaEvent::kResultError, error_event.result);
-  EXPECT_EQ(ErrorCode::kError, error_event.error_code);
-}
-
-void OmahaRequestActionTest::PingTest(bool ping_only) {
-  NiceMock<MockPrefs> prefs;
-  fake_system_state_.set_prefs(&prefs);
-  EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _))
-      .Times(AnyNumber());
-  EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
-  // Add a few hours to the day difference to test no rounding, etc.
-  int64_t five_days_ago =
-      (Time::Now() - TimeDelta::FromHours(5 * 24 + 13)).ToInternalValue();
-  int64_t six_days_ago =
-      (Time::Now() - TimeDelta::FromHours(6 * 24 + 11)).ToInternalValue();
-  EXPECT_CALL(prefs, GetInt64(kPrefsInstallDateDays, _))
-      .WillOnce(DoAll(SetArgPointee<1>(0), Return(true)));
-  EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
-      .WillOnce(DoAll(SetArgPointee<1>(six_days_ago), Return(true)));
-  EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
-      .WillOnce(DoAll(SetArgPointee<1>(five_days_ago), Return(true)));
-  brillo::Blob post_data;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
-                              -1,
-                              ping_only,
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kNoUpdateAvailable,
-                              metrics::CheckReaction::kUnset,
-                              metrics::DownloadErrorCode::kUnset,
-                              nullptr,
-                              &post_data));
-  string post_str(post_data.begin(), post_data.end());
-  EXPECT_NE(post_str.find("<ping active=\"1\" a=\"6\" r=\"5\"></ping>"),
-            string::npos);
-  if (ping_only) {
-    EXPECT_EQ(post_str.find("updatecheck"), string::npos);
-    EXPECT_EQ(post_str.find("previousversion"), string::npos);
-  } else {
-    EXPECT_NE(post_str.find("updatecheck"), string::npos);
-    EXPECT_NE(post_str.find("previousversion"), string::npos);
-  }
-}
-
-TEST_F(OmahaRequestActionTest, PingTestSendOnlyAPing) {
-  PingTest(true /* ping_only */);
-}
-
-TEST_F(OmahaRequestActionTest, PingTestSendAlsoAnUpdateCheck) {
-  PingTest(false /* ping_only */);
-}
-
-TEST_F(OmahaRequestActionTest, ActivePingTest) {
-  NiceMock<MockPrefs> prefs;
-  fake_system_state_.set_prefs(&prefs);
-  EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _))
-      .Times(AnyNumber());
-  EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
-  int64_t three_days_ago =
-      (Time::Now() - TimeDelta::FromHours(3 * 24 + 12)).ToInternalValue();
-  int64_t now = Time::Now().ToInternalValue();
-  EXPECT_CALL(prefs, GetInt64(kPrefsInstallDateDays, _))
-      .WillOnce(DoAll(SetArgPointee<1>(0), Return(true)));
-  EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
-      .WillOnce(DoAll(SetArgPointee<1>(three_days_ago), Return(true)));
-  EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
-      .WillOnce(DoAll(SetArgPointee<1>(now), Return(true)));
-  brillo::Blob post_data;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kNoUpdateAvailable,
-                              metrics::CheckReaction::kUnset,
-                              metrics::DownloadErrorCode::kUnset,
-                              nullptr,
-                              &post_data));
-  string post_str(post_data.begin(), post_data.end());
-  EXPECT_NE(post_str.find("<ping active=\"1\" a=\"3\"></ping>"), string::npos);
-}
-
-TEST_F(OmahaRequestActionTest, RollCallPingTest) {
-  NiceMock<MockPrefs> prefs;
-  fake_system_state_.set_prefs(&prefs);
-  EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _))
-      .Times(AnyNumber());
-  EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
-  int64_t four_days_ago =
-      (Time::Now() - TimeDelta::FromHours(4 * 24)).ToInternalValue();
-  int64_t now = Time::Now().ToInternalValue();
-  EXPECT_CALL(prefs, GetInt64(kPrefsInstallDateDays, _))
-      .WillOnce(DoAll(SetArgPointee<1>(0), Return(true)));
-  EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
-      .WillOnce(DoAll(SetArgPointee<1>(now), Return(true)));
-  EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
-      .WillOnce(DoAll(SetArgPointee<1>(four_days_ago), Return(true)));
-  brillo::Blob post_data;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kNoUpdateAvailable,
-                              metrics::CheckReaction::kUnset,
-                              metrics::DownloadErrorCode::kUnset,
-                              nullptr,
-                              &post_data));
-  string post_str(post_data.begin(), post_data.end());
-  EXPECT_NE(post_str.find("<ping active=\"1\" r=\"4\"></ping>\n"),
-            string::npos);
-}
-
-TEST_F(OmahaRequestActionTest, NoPingTest) {
-  NiceMock<MockPrefs> prefs;
-  fake_system_state_.set_prefs(&prefs);
-  EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _))
-      .Times(AnyNumber());
-  EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
-  int64_t one_hour_ago =
-      (Time::Now() - TimeDelta::FromHours(1)).ToInternalValue();
-  EXPECT_CALL(prefs, GetInt64(kPrefsInstallDateDays, _))
-      .WillOnce(DoAll(SetArgPointee<1>(0), Return(true)));
-  EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
-      .WillOnce(DoAll(SetArgPointee<1>(one_hour_ago), Return(true)));
-  EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
-      .WillOnce(DoAll(SetArgPointee<1>(one_hour_ago), Return(true)));
-  // LastActivePingDay and PrefsLastRollCallPingDay are set even if we didn't
-  // send a ping.
-  EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _))
-      .WillOnce(Return(true));
-  EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _))
-      .WillOnce(Return(true));
-  brillo::Blob post_data;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kNoUpdateAvailable,
-                              metrics::CheckReaction::kUnset,
-                              metrics::DownloadErrorCode::kUnset,
-                              nullptr,
-                              &post_data));
-  string post_str(post_data.begin(), post_data.end());
-  EXPECT_EQ(post_str.find("ping"), string::npos);
-}
-
-TEST_F(OmahaRequestActionTest, IgnoreEmptyPingTest) {
-  // This test ensures that we ignore empty ping only requests.
-  NiceMock<MockPrefs> prefs;
-  fake_system_state_.set_prefs(&prefs);
-  int64_t now = Time::Now().ToInternalValue();
-  EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
-      .WillOnce(DoAll(SetArgPointee<1>(now), Return(true)));
-  EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
-      .WillOnce(DoAll(SetArgPointee<1>(now), Return(true)));
-  EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0);
-  EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0);
-  brillo::Blob post_data;
-  EXPECT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
-                              -1,
-                              true,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUnset,
-                              metrics::CheckReaction::kUnset,
-                              metrics::DownloadErrorCode::kUnset,
-                              nullptr,
-                              &post_data));
-  EXPECT_EQ(0U, post_data.size());
-}
-
-TEST_F(OmahaRequestActionTest, BackInTimePingTest) {
-  NiceMock<MockPrefs> prefs;
-  fake_system_state_.set_prefs(&prefs);
-  EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _))
-      .Times(AnyNumber());
-  EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
-  int64_t future =
-      (Time::Now() + TimeDelta::FromHours(3 * 24 + 4)).ToInternalValue();
-  EXPECT_CALL(prefs, GetInt64(kPrefsInstallDateDays, _))
-      .WillOnce(DoAll(SetArgPointee<1>(0), Return(true)));
-  EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
-      .WillOnce(DoAll(SetArgPointee<1>(future), Return(true)));
-  EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
-      .WillOnce(DoAll(SetArgPointee<1>(future), Return(true)));
-  EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _))
-      .WillOnce(Return(true));
-  EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _))
-      .WillOnce(Return(true));
-  brillo::Blob post_data;
-  ASSERT_TRUE(
-      TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
-                      "protocol=\"3.0\"><daystart elapsed_seconds=\"100\"/>"
-                      "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
-                      "<updatecheck status=\"noupdate\"/></app></response>",
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      &post_data));
-  string post_str(post_data.begin(), post_data.end());
-  EXPECT_EQ(post_str.find("ping"), string::npos);
-}
-
-TEST_F(OmahaRequestActionTest, LastPingDayUpdateTest) {
-  // This test checks that the action updates the last ping day to now
-  // minus 200 seconds with a slack of 5 seconds. Therefore, the test
-  // may fail if it runs for longer than 5 seconds. It shouldn't run
-  // that long though.
-  int64_t midnight =
-      (Time::Now() - TimeDelta::FromSeconds(200)).ToInternalValue();
-  int64_t midnight_slack =
-      (Time::Now() - TimeDelta::FromSeconds(195)).ToInternalValue();
-  NiceMock<MockPrefs> prefs;
-  fake_system_state_.set_prefs(&prefs);
-  EXPECT_CALL(prefs, GetInt64(_, _)).Times(AnyNumber());
-  EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
-  EXPECT_CALL(prefs,
-              SetInt64(kPrefsLastActivePingDay,
-                       AllOf(Ge(midnight), Le(midnight_slack))))
-      .WillOnce(Return(true));
-  EXPECT_CALL(prefs,
-              SetInt64(kPrefsLastRollCallPingDay,
-                       AllOf(Ge(midnight), Le(midnight_slack))))
-      .WillOnce(Return(true));
-  ASSERT_TRUE(
-      TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
-                      "protocol=\"3.0\"><daystart elapsed_seconds=\"200\"/>"
-                      "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
-                      "<updatecheck status=\"noupdate\"/></app></response>",
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      nullptr));
-}
-
-TEST_F(OmahaRequestActionTest, NoElapsedSecondsTest) {
-  NiceMock<MockPrefs> prefs;
-  fake_system_state_.set_prefs(&prefs);
-  EXPECT_CALL(prefs, GetInt64(_, _)).Times(AnyNumber());
-  EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
-  EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0);
-  EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0);
-  ASSERT_TRUE(
-      TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
-                      "protocol=\"3.0\"><daystart blah=\"200\"/>"
-                      "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
-                      "<updatecheck status=\"noupdate\"/></app></response>",
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      nullptr));
-}
-
-TEST_F(OmahaRequestActionTest, BadElapsedSecondsTest) {
-  NiceMock<MockPrefs> prefs;
-  fake_system_state_.set_prefs(&prefs);
-  EXPECT_CALL(prefs, GetInt64(_, _)).Times(AnyNumber());
-  EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
-  EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0);
-  EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0);
-  ASSERT_TRUE(
-      TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
-                      "protocol=\"3.0\"><daystart elapsed_seconds=\"x\"/>"
-                      "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
-                      "<updatecheck status=\"noupdate\"/></app></response>",
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      nullptr));
-}
-
-TEST_F(OmahaRequestActionTest, ParseUpdateCheckAttributesTest) {
-  // Test that the "eol" flags is only parsed from the "_eol" attribute and not
-  // the "eol" attribute.
-  ASSERT_TRUE(
-      TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
-                      "protocol=\"3.0\"><app appid=\"foo\" status=\"ok\">"
-                      "<ping status=\"ok\"/><updatecheck status=\"noupdate\" "
-                      "_eol=\"security-only\" eol=\"eol\" _foo=\"bar\"/>"
-                      "</app></response>",
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      nullptr));
-  string eol_pref;
-  EXPECT_TRUE(
-      fake_system_state_.prefs()->GetString(kPrefsOmahaEolStatus, &eol_pref));
-  // Note that the eol="eol" attribute should be ignored and the _eol should be
-  // used instead.
-  EXPECT_EQ("security-only", eol_pref);
-}
-
-TEST_F(OmahaRequestActionTest, NoUniqueIDTest) {
-  brillo::Blob post_data;
-  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaRequestXMLParseError,
-                               metrics::CheckResult::kParsingError,
-                               metrics::CheckReaction::kUnset,
-                               metrics::DownloadErrorCode::kUnset,
-                               nullptr,  // response
-                               &post_data));
-  // convert post_data to string
-  string post_str(post_data.begin(), post_data.end());
-  EXPECT_EQ(post_str.find("machineid="), string::npos);
-  EXPECT_EQ(post_str.find("userid="), string::npos);
-}
-
-TEST_F(OmahaRequestActionTest, NetworkFailureTest) {
-  OmahaResponse response;
-  const int http_error_code =
-      static_cast<int>(ErrorCode::kOmahaRequestHTTPResponseBase) + 501;
-  ASSERT_FALSE(TestUpdateCheck("",
-                               501,
-                               false,  // ping_only
-                               static_cast<ErrorCode>(http_error_code),
-                               metrics::CheckResult::kDownloadError,
-                               metrics::CheckReaction::kUnset,
-                               static_cast<metrics::DownloadErrorCode>(501),
-                               &response,
-                               nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, NetworkFailureBadHTTPCodeTest) {
-  OmahaResponse response;
-  const int http_error_code =
-      static_cast<int>(ErrorCode::kOmahaRequestHTTPResponseBase) + 999;
-  ASSERT_FALSE(TestUpdateCheck("",
-                               1500,
-                               false,  // ping_only
-                               static_cast<ErrorCode>(http_error_code),
-                               metrics::CheckResult::kDownloadError,
-                               metrics::CheckReaction::kUnset,
-                               metrics::DownloadErrorCode::kHttpStatusOther,
-                               &response,
-                               nullptr));
-  EXPECT_FALSE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, TestUpdateFirstSeenAtGetsPersistedFirstTime) {
-  OmahaResponse response;
-  request_params_.set_wall_clock_based_wait_enabled(true);
-  request_params_.set_waiting_period(TimeDelta().FromDays(1));
-  request_params_.set_update_check_count_wait_enabled(false);
-
-  Time arbitrary_date;
-  ASSERT_TRUE(Time::FromString("6/4/1989", &arbitrary_date));
-  fake_system_state_.fake_clock()->SetWallclockTime(arbitrary_date);
-  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaUpdateDeferredPerPolicy,
-                               metrics::CheckResult::kUpdateAvailable,
-                               metrics::CheckReaction::kDeferring,
-                               metrics::DownloadErrorCode::kUnset,
-                               &response,
-                               nullptr));
-
-  int64_t timestamp = 0;
-  ASSERT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateFirstSeenAt, &timestamp));
-  EXPECT_EQ(arbitrary_date.ToInternalValue(), timestamp);
-  EXPECT_FALSE(response.update_exists);
-
-  // Verify if we are interactive check we don't defer.
-  request_params_.set_interactive(true);
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-}
-
-TEST_F(OmahaRequestActionTest, TestUpdateFirstSeenAtGetsUsedIfAlreadyPresent) {
-  OmahaResponse response;
-  request_params_.set_wall_clock_based_wait_enabled(true);
-  request_params_.set_waiting_period(TimeDelta().FromDays(1));
-  request_params_.set_update_check_count_wait_enabled(false);
-
-  Time t1, t2;
-  ASSERT_TRUE(Time::FromString("1/1/2012", &t1));
-  ASSERT_TRUE(Time::FromString("1/3/2012", &t2));
-  ASSERT_TRUE(
-      fake_prefs_.SetInt64(kPrefsUpdateFirstSeenAt, t1.ToInternalValue()));
-  fake_system_state_.fake_clock()->SetWallclockTime(t2);
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-
-  EXPECT_TRUE(response.update_exists);
-
-  // Make sure the timestamp t1 is unchanged showing that it was reused.
-  int64_t timestamp = 0;
-  ASSERT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateFirstSeenAt, &timestamp));
-  ASSERT_TRUE(timestamp == t1.ToInternalValue());
-}
-
-TEST_F(OmahaRequestActionTest, TestChangingToMoreStableChannel) {
-  // Create a uniquely named test directory.
-  base::ScopedTempDir tempdir;
-  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
-
-  brillo::Blob post_data;
-  request_params_.set_root(tempdir.GetPath().value());
-  request_params_.set_app_id("{22222222-2222-2222-2222-222222222222}");
-  request_params_.set_app_version("1.2.3.4");
-  request_params_.set_product_components("o.bundle=1");
-  request_params_.set_current_channel("canary-channel");
-  EXPECT_TRUE(
-      request_params_.SetTargetChannel("stable-channel", true, nullptr));
-  request_params_.UpdateDownloadChannel();
-  EXPECT_TRUE(request_params_.ShouldPowerwash());
-  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaRequestXMLParseError,
-                               metrics::CheckResult::kParsingError,
-                               metrics::CheckReaction::kUnset,
-                               metrics::DownloadErrorCode::kUnset,
-                               nullptr,  // response
-                               &post_data));
-  // convert post_data to string
-  string post_str(post_data.begin(), post_data.end());
-  EXPECT_NE(
-      string::npos,
-      post_str.find("appid=\"{22222222-2222-2222-2222-222222222222}\" "
-                    "version=\"0.0.0.0\" from_version=\"1.2.3.4\" "
-                    "track=\"stable-channel\" from_track=\"canary-channel\" "));
-  EXPECT_EQ(string::npos, post_str.find("o.bundle"));
-}
-
-TEST_F(OmahaRequestActionTest, TestChangingToLessStableChannel) {
-  // Create a uniquely named test directory.
-  base::ScopedTempDir tempdir;
-  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
-
-  brillo::Blob post_data;
-  request_params_.set_root(tempdir.GetPath().value());
-  request_params_.set_app_id("{11111111-1111-1111-1111-111111111111}");
-  request_params_.set_app_version("5.6.7.8");
-  request_params_.set_product_components("o.bundle=1");
-  request_params_.set_current_channel("stable-channel");
-  EXPECT_TRUE(
-      request_params_.SetTargetChannel("canary-channel", false, nullptr));
-  request_params_.UpdateDownloadChannel();
-  EXPECT_FALSE(request_params_.ShouldPowerwash());
-  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
-                               -1,
-                               false,  // ping_only
-                               ErrorCode::kOmahaRequestXMLParseError,
-                               metrics::CheckResult::kParsingError,
-                               metrics::CheckReaction::kUnset,
-                               metrics::DownloadErrorCode::kUnset,
-                               nullptr,  // response
-                               &post_data));
-  // Convert post_data to string.
-  string post_str(post_data.begin(), post_data.end());
-  EXPECT_NE(
-      string::npos,
-      post_str.find("appid=\"{11111111-1111-1111-1111-111111111111}\" "
-                    "version=\"5.6.7.8\" "
-                    "track=\"canary-channel\" from_track=\"stable-channel\""));
-  EXPECT_EQ(string::npos, post_str.find("from_version"));
-  EXPECT_NE(string::npos, post_str.find("o.bundle.version=\"1\""));
-}
-
-// Checks that the initial ping with a=-1 r=-1 is not send when the device
-// was powerwashed.
-TEST_F(OmahaRequestActionTest, PingWhenPowerwashed) {
-  fake_prefs_.SetString(kPrefsPreviousVersion, "");
-
-  // Flag that the device was powerwashed in the past.
-  fake_system_state_.fake_hardware()->SetPowerwashCount(1);
-
-  brillo::Blob post_data;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kNoUpdateAvailable,
-                              metrics::CheckReaction::kUnset,
-                              metrics::DownloadErrorCode::kUnset,
-                              nullptr,
-                              &post_data));
-  // We shouldn't send a ping in this case since powerwash > 0.
-  string post_str(post_data.begin(), post_data.end());
-  EXPECT_EQ(string::npos, post_str.find("<ping"));
-}
-
-// Checks that the initial ping with a=-1 r=-1 is not send when the device
-// first_active_omaha_ping_sent is set.
-TEST_F(OmahaRequestActionTest, PingWhenFirstActiveOmahaPingIsSent) {
-  fake_prefs_.SetString(kPrefsPreviousVersion, "");
-
-  // Flag that the device was not powerwashed in the past.
-  fake_system_state_.fake_hardware()->SetPowerwashCount(0);
-
-  // Flag that the device has sent first active ping in the past.
-  fake_system_state_.fake_hardware()->SetFirstActiveOmahaPingSent();
-
-  brillo::Blob post_data;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kNoUpdateAvailable,
-                              metrics::CheckReaction::kUnset,
-                              metrics::DownloadErrorCode::kUnset,
-                              nullptr,
-                              &post_data));
-  // We shouldn't send a ping in this case since
-  // first_active_omaha_ping_sent=true
-  string post_str(post_data.begin(), post_data.end());
-  EXPECT_EQ(string::npos, post_str.find("<ping"));
-}
-
-// Checks that the event 54 is sent on a reboot to a new update.
-TEST_F(OmahaRequestActionTest, RebootAfterUpdateEvent) {
-  // Flag that the device was updated in a previous boot.
-  fake_prefs_.SetString(kPrefsPreviousVersion, "1.2.3.4");
-
-  brillo::Blob post_data;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kNoUpdateAvailable,
-                              metrics::CheckReaction::kUnset,
-                              metrics::DownloadErrorCode::kUnset,
-                              nullptr,
-                              &post_data));
-  string post_str(post_data.begin(), post_data.end());
-
-  // An event 54 is included and has the right version.
-  EXPECT_NE(
-      string::npos,
-      post_str.find(base::StringPrintf("<event eventtype=\"%d\"",
-                                       OmahaEvent::kTypeRebootedAfterUpdate)));
-  EXPECT_NE(string::npos,
-            post_str.find("previousversion=\"1.2.3.4\"></event>"));
-
-  // The previous version flag should have been removed.
-  EXPECT_TRUE(fake_prefs_.Exists(kPrefsPreviousVersion));
-  string prev_version;
-  EXPECT_TRUE(fake_prefs_.GetString(kPrefsPreviousVersion, &prev_version));
-  EXPECT_TRUE(prev_version.empty());
-}
-
-void OmahaRequestActionTest::P2PTest(bool initial_allow_p2p_for_downloading,
-                                     bool initial_allow_p2p_for_sharing,
-                                     bool omaha_disable_p2p_for_downloading,
-                                     bool omaha_disable_p2p_for_sharing,
-                                     bool payload_state_allow_p2p_attempt,
-                                     bool expect_p2p_client_lookup,
-                                     const string& p2p_client_result_url,
-                                     bool expected_allow_p2p_for_downloading,
-                                     bool expected_allow_p2p_for_sharing,
-                                     const string& expected_p2p_url) {
-  OmahaResponse response;
-  bool actual_allow_p2p_for_downloading = initial_allow_p2p_for_downloading;
-  bool actual_allow_p2p_for_sharing = initial_allow_p2p_for_sharing;
-  string actual_p2p_url;
-
-  MockPayloadState mock_payload_state;
-  fake_system_state_.set_payload_state(&mock_payload_state);
-  EXPECT_CALL(mock_payload_state, P2PAttemptAllowed())
-      .WillRepeatedly(Return(payload_state_allow_p2p_attempt));
-  EXPECT_CALL(mock_payload_state, GetUsingP2PForDownloading())
-      .WillRepeatedly(ReturnPointee(&actual_allow_p2p_for_downloading));
-  EXPECT_CALL(mock_payload_state, GetUsingP2PForSharing())
-      .WillRepeatedly(ReturnPointee(&actual_allow_p2p_for_sharing));
-  EXPECT_CALL(mock_payload_state, SetUsingP2PForDownloading(_))
-      .WillRepeatedly(SaveArg<0>(&actual_allow_p2p_for_downloading));
-  EXPECT_CALL(mock_payload_state, SetUsingP2PForSharing(_))
-      .WillRepeatedly(SaveArg<0>(&actual_allow_p2p_for_sharing));
-  EXPECT_CALL(mock_payload_state, SetP2PUrl(_))
-      .WillRepeatedly(SaveArg<0>(&actual_p2p_url));
-
-  MockP2PManager mock_p2p_manager;
-  fake_system_state_.set_p2p_manager(&mock_p2p_manager);
-  mock_p2p_manager.fake().SetLookupUrlForFileResult(p2p_client_result_url);
-
-  TimeDelta timeout = TimeDelta::FromSeconds(kMaxP2PNetworkWaitTimeSeconds);
-  EXPECT_CALL(mock_p2p_manager, LookupUrlForFile(_, _, timeout, _))
-      .Times(expect_p2p_client_lookup ? 1 : 0);
-
-  fake_update_response_.disable_p2p_for_downloading =
-      omaha_disable_p2p_for_downloading;
-  fake_update_response_.disable_p2p_for_sharing = omaha_disable_p2p_for_sharing;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-
-  EXPECT_EQ(omaha_disable_p2p_for_downloading,
-            response.disable_p2p_for_downloading);
-  EXPECT_EQ(omaha_disable_p2p_for_sharing, response.disable_p2p_for_sharing);
-
-  EXPECT_EQ(expected_allow_p2p_for_downloading,
-            actual_allow_p2p_for_downloading);
-  EXPECT_EQ(expected_allow_p2p_for_sharing, actual_allow_p2p_for_sharing);
-  EXPECT_EQ(expected_p2p_url, actual_p2p_url);
-}
-
-TEST_F(OmahaRequestActionTest, P2PWithPeer) {
-  P2PTest(true,                   // initial_allow_p2p_for_downloading
-          true,                   // initial_allow_p2p_for_sharing
-          false,                  // omaha_disable_p2p_for_downloading
-          false,                  // omaha_disable_p2p_for_sharing
-          true,                   // payload_state_allow_p2p_attempt
-          true,                   // expect_p2p_client_lookup
-          "http://1.3.5.7/p2p",   // p2p_client_result_url
-          true,                   // expected_allow_p2p_for_downloading
-          true,                   // expected_allow_p2p_for_sharing
-          "http://1.3.5.7/p2p");  // expected_p2p_url
-}
-
-TEST_F(OmahaRequestActionTest, P2PWithoutPeer) {
-  P2PTest(true,   // initial_allow_p2p_for_downloading
-          true,   // initial_allow_p2p_for_sharing
-          false,  // omaha_disable_p2p_for_downloading
-          false,  // omaha_disable_p2p_for_sharing
-          true,   // payload_state_allow_p2p_attempt
-          true,   // expect_p2p_client_lookup
-          "",     // p2p_client_result_url
-          false,  // expected_allow_p2p_for_downloading
-          true,   // expected_allow_p2p_for_sharing
-          "");    // expected_p2p_url
-}
-
-TEST_F(OmahaRequestActionTest, P2PDownloadNotAllowed) {
-  P2PTest(false,    // initial_allow_p2p_for_downloading
-          true,     // initial_allow_p2p_for_sharing
-          false,    // omaha_disable_p2p_for_downloading
-          false,    // omaha_disable_p2p_for_sharing
-          true,     // payload_state_allow_p2p_attempt
-          false,    // expect_p2p_client_lookup
-          "unset",  // p2p_client_result_url
-          false,    // expected_allow_p2p_for_downloading
-          true,     // expected_allow_p2p_for_sharing
-          "");      // expected_p2p_url
-}
-
-TEST_F(OmahaRequestActionTest, P2PWithPeerDownloadDisabledByOmaha) {
-  P2PTest(true,     // initial_allow_p2p_for_downloading
-          true,     // initial_allow_p2p_for_sharing
-          true,     // omaha_disable_p2p_for_downloading
-          false,    // omaha_disable_p2p_for_sharing
-          true,     // payload_state_allow_p2p_attempt
-          false,    // expect_p2p_client_lookup
-          "unset",  // p2p_client_result_url
-          false,    // expected_allow_p2p_for_downloading
-          true,     // expected_allow_p2p_for_sharing
-          "");      // expected_p2p_url
-}
-
-TEST_F(OmahaRequestActionTest, P2PWithPeerSharingDisabledByOmaha) {
-  P2PTest(true,                   // initial_allow_p2p_for_downloading
-          true,                   // initial_allow_p2p_for_sharing
-          false,                  // omaha_disable_p2p_for_downloading
-          true,                   // omaha_disable_p2p_for_sharing
-          true,                   // payload_state_allow_p2p_attempt
-          true,                   // expect_p2p_client_lookup
-          "http://1.3.5.7/p2p",   // p2p_client_result_url
-          true,                   // expected_allow_p2p_for_downloading
-          false,                  // expected_allow_p2p_for_sharing
-          "http://1.3.5.7/p2p");  // expected_p2p_url
-}
-
-TEST_F(OmahaRequestActionTest, P2PWithPeerBothDisabledByOmaha) {
-  P2PTest(true,     // initial_allow_p2p_for_downloading
-          true,     // initial_allow_p2p_for_sharing
-          true,     // omaha_disable_p2p_for_downloading
-          true,     // omaha_disable_p2p_for_sharing
-          true,     // payload_state_allow_p2p_attempt
-          false,    // expect_p2p_client_lookup
-          "unset",  // p2p_client_result_url
-          false,    // expected_allow_p2p_for_downloading
-          false,    // expected_allow_p2p_for_sharing
-          "");      // expected_p2p_url
-}
-
-bool OmahaRequestActionTest::InstallDateParseHelper(const string& elapsed_days,
-                                                    OmahaResponse* response) {
-  fake_update_response_.elapsed_days = elapsed_days;
-  return TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                         -1,
-                         false,  // ping_only
-                         ErrorCode::kSuccess,
-                         metrics::CheckResult::kUpdateAvailable,
-                         metrics::CheckReaction::kUpdating,
-                         metrics::DownloadErrorCode::kUnset,
-                         response,
-                         nullptr);
-}
-
-TEST_F(OmahaRequestActionTest, ParseInstallDateFromResponse) {
-  OmahaResponse response;
-
-  // Simulate a successful update check that happens during OOBE.  The
-  // deadline in the response is needed to force the update attempt to
-  // occur; responses without a deadline seen during OOBE will normally
-  // return ErrorCode::kNonCriticalUpdateInOOBE.
-  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
-  fake_update_response_.deadline = "20101020";
-
-  // Check that we parse elapsed_days in the Omaha Response correctly.
-  // and that the kPrefsInstallDateDays value is written to.
-  EXPECT_FALSE(fake_prefs_.Exists(kPrefsInstallDateDays));
-  EXPECT_TRUE(InstallDateParseHelper("42", &response));
-  EXPECT_TRUE(response.update_exists);
-  EXPECT_EQ(42, response.install_date_days);
-  EXPECT_TRUE(fake_prefs_.Exists(kPrefsInstallDateDays));
-  int64_t prefs_days;
-  EXPECT_TRUE(fake_prefs_.GetInt64(kPrefsInstallDateDays, &prefs_days));
-  EXPECT_EQ(prefs_days, 42);
-
-  // If there already is a value set, we shouldn't do anything.
-  EXPECT_TRUE(InstallDateParseHelper("7", &response));
-  EXPECT_TRUE(response.update_exists);
-  EXPECT_EQ(7, response.install_date_days);
-  EXPECT_TRUE(fake_prefs_.GetInt64(kPrefsInstallDateDays, &prefs_days));
-  EXPECT_EQ(prefs_days, 42);
-
-  // Note that elapsed_days is not necessarily divisible by 7 so check
-  // that we round down correctly when populating kPrefsInstallDateDays.
-  EXPECT_TRUE(fake_prefs_.Delete(kPrefsInstallDateDays));
-  EXPECT_TRUE(InstallDateParseHelper("23", &response));
-  EXPECT_TRUE(response.update_exists);
-  EXPECT_EQ(23, response.install_date_days);
-  EXPECT_TRUE(fake_prefs_.GetInt64(kPrefsInstallDateDays, &prefs_days));
-  EXPECT_EQ(prefs_days, 21);
-
-  // Check that we correctly handle elapsed_days not being included in
-  // the Omaha Response.
-  EXPECT_TRUE(InstallDateParseHelper("", &response));
-  EXPECT_TRUE(response.update_exists);
-  EXPECT_EQ(-1, response.install_date_days);
-}
-
-// If there is no prefs and OOBE is not complete, we should not
-// report anything to Omaha.
-TEST_F(OmahaRequestActionTest, GetInstallDateWhenNoPrefsNorOOBE) {
-  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
-  EXPECT_EQ(OmahaRequestAction::GetInstallDate(&fake_system_state_), -1);
-  EXPECT_FALSE(fake_prefs_.Exists(kPrefsInstallDateDays));
-}
-
-// If OOBE is complete and happened on a valid date (e.g. after Jan
-// 1 2007 0:00 PST), that date should be used and written to
-// prefs. However, first try with an invalid date and check we do
-// nothing.
-TEST_F(OmahaRequestActionTest, GetInstallDateWhenOOBECompletedWithInvalidDate) {
-  Time oobe_date = Time::FromTimeT(42);  // Dec 31, 1969 16:00:42 PST.
-  fake_system_state_.fake_hardware()->SetIsOOBEComplete(oobe_date);
-  EXPECT_EQ(OmahaRequestAction::GetInstallDate(&fake_system_state_), -1);
-  EXPECT_FALSE(fake_prefs_.Exists(kPrefsInstallDateDays));
-}
-
-// Then check with a valid date. The date Jan 20, 2007 0:00 PST
-// should yield an InstallDate of 14.
-TEST_F(OmahaRequestActionTest, GetInstallDateWhenOOBECompletedWithValidDate) {
-  Time oobe_date = Time::FromTimeT(1169280000);  // Jan 20, 2007 0:00 PST.
-  fake_system_state_.fake_hardware()->SetIsOOBEComplete(oobe_date);
-  EXPECT_EQ(OmahaRequestAction::GetInstallDate(&fake_system_state_), 14);
-  EXPECT_TRUE(fake_prefs_.Exists(kPrefsInstallDateDays));
-
-  int64_t prefs_days;
-  EXPECT_TRUE(fake_prefs_.GetInt64(kPrefsInstallDateDays, &prefs_days));
-  EXPECT_EQ(prefs_days, 14);
-}
-
-// Now that we have a valid date in prefs, check that we keep using
-// that even if OOBE date reports something else. The date Jan 30,
-// 2007 0:00 PST should yield an InstallDate of 28... but since
-// there's a prefs file, we should still get 14.
-TEST_F(OmahaRequestActionTest, GetInstallDateWhenOOBECompletedDateChanges) {
-  // Set a valid date in the prefs first.
-  EXPECT_TRUE(fake_prefs_.SetInt64(kPrefsInstallDateDays, 14));
-
-  Time oobe_date = Time::FromTimeT(1170144000);  // Jan 30, 2007 0:00 PST.
-  fake_system_state_.fake_hardware()->SetIsOOBEComplete(oobe_date);
-  EXPECT_EQ(OmahaRequestAction::GetInstallDate(&fake_system_state_), 14);
-
-  int64_t prefs_days;
-  EXPECT_TRUE(fake_prefs_.GetInt64(kPrefsInstallDateDays, &prefs_days));
-  EXPECT_EQ(prefs_days, 14);
-
-  // If we delete the prefs file, we should get 28 days.
-  EXPECT_TRUE(fake_prefs_.Delete(kPrefsInstallDateDays));
-  EXPECT_EQ(OmahaRequestAction::GetInstallDate(&fake_system_state_), 28);
-  EXPECT_TRUE(fake_prefs_.GetInt64(kPrefsInstallDateDays, &prefs_days));
-  EXPECT_EQ(prefs_days, 28);
-}
-
-// Verifies that a device with no device policy, and is not a consumer
-// device sets the max kernel key version to the current version.
-// ie. the same behavior as if rollback is enabled.
-TEST_F(OmahaRequestActionTest, NoPolicyEnterpriseDevicesSetMaxRollback) {
-  FakeHardware* fake_hw = fake_system_state_.fake_hardware();
-
-  // Setup and verify some initial default values for the kernel TPM
-  // values that control verified boot and rollback.
-  const int min_kernel_version = 4;
-  fake_hw->SetMinKernelKeyVersion(min_kernel_version);
-  fake_hw->SetMaxKernelKeyRollforward(kRollforwardInfinity);
-  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
-  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
-
-  EXPECT_CALL(
-      *fake_system_state_.mock_metrics_reporter(),
-      ReportKeyVersionMetrics(min_kernel_version, min_kernel_version, true))
-      .Times(1);
-
-  OmahaResponse response;
-  TestRollbackCheck(false /* is_consumer_device */,
-                    3 /* rollback_allowed_milestones */,
-                    false /* is_policy_loaded */,
-                    &response);
-
-  // Verify kernel_max_rollforward was set to the current minimum
-  // kernel key version. This has the effect of freezing roll
-  // forwards indefinitely. This will hold the rollback window
-  // open until a future change will be able to move this forward
-  // relative the configured window.
-  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
-  EXPECT_EQ(min_kernel_version, fake_hw->GetMaxKernelKeyRollforward());
-}
-
-// Verifies that a conmsumer device with no device policy sets the
-// max kernel key version to the current version. ie. the same
-// behavior as if rollback is enabled.
-TEST_F(OmahaRequestActionTest, NoPolicyConsumerDevicesSetMaxRollback) {
-  FakeHardware* fake_hw = fake_system_state_.fake_hardware();
-
-  // Setup and verify some initial default values for the kernel TPM
-  // values that control verified boot and rollback.
-  const int min_kernel_version = 3;
-  fake_hw->SetMinKernelKeyVersion(min_kernel_version);
-  fake_hw->SetMaxKernelKeyRollforward(kRollforwardInfinity);
-  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
-  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
-
-  EXPECT_CALL(
-      *fake_system_state_.mock_metrics_reporter(),
-      ReportKeyVersionMetrics(min_kernel_version, kRollforwardInfinity, true))
-      .Times(1);
-
-  OmahaResponse response;
-  TestRollbackCheck(true /* is_consumer_device */,
-                    3 /* rollback_allowed_milestones */,
-                    false /* is_policy_loaded */,
-                    &response);
-
-  // Verify that with rollback disabled that kernel_max_rollforward
-  // was set to logical infinity. This is the expected behavior for
-  // consumer devices and matches the existing behavior prior to the
-  // rollback features.
-  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
-  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
-}
-
-// Verifies that a device with rollback enabled sets kernel_max_rollforward
-// in the TPM to prevent roll forward.
-TEST_F(OmahaRequestActionTest, RollbackEnabledDevicesSetMaxRollback) {
-  FakeHardware* fake_hw = fake_system_state_.fake_hardware();
-
-  // Setup and verify some initial default values for the kernel TPM
-  // values that control verified boot and rollback.
-  const int allowed_milestones = 4;
-  const int min_kernel_version = 3;
-  fake_hw->SetMinKernelKeyVersion(min_kernel_version);
-  fake_hw->SetMaxKernelKeyRollforward(kRollforwardInfinity);
-  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
-  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
-
-  EXPECT_CALL(
-      *fake_system_state_.mock_metrics_reporter(),
-      ReportKeyVersionMetrics(min_kernel_version, min_kernel_version, true))
-      .Times(1);
-
-  OmahaResponse response;
-  TestRollbackCheck(false /* is_consumer_device */,
-                    allowed_milestones,
-                    true /* is_policy_loaded */,
-                    &response);
-
-  // Verify that with rollback enabled that kernel_max_rollforward
-  // was set to the current minimum kernel key version. This has
-  // the effect of freezing roll forwards indefinitely. This will
-  // hold the rollback window open until a future change will
-  // be able to move this forward relative the configured window.
-  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
-  EXPECT_EQ(min_kernel_version, fake_hw->GetMaxKernelKeyRollforward());
-}
-
-// Verifies that a device with rollback disabled sets kernel_max_rollforward
-// in the TPM to logical infinity, to allow roll forward.
-TEST_F(OmahaRequestActionTest, RollbackDisabledDevicesSetMaxRollback) {
-  FakeHardware* fake_hw = fake_system_state_.fake_hardware();
-
-  // Setup and verify some initial default values for the kernel TPM
-  // values that control verified boot and rollback.
-  const int allowed_milestones = 0;
-  const int min_kernel_version = 3;
-  fake_hw->SetMinKernelKeyVersion(min_kernel_version);
-  fake_hw->SetMaxKernelKeyRollforward(kRollforwardInfinity);
-  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
-  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
-
-  EXPECT_CALL(
-      *fake_system_state_.mock_metrics_reporter(),
-      ReportKeyVersionMetrics(min_kernel_version, kRollforwardInfinity, true))
-      .Times(1);
-
-  OmahaResponse response;
-  TestRollbackCheck(false /* is_consumer_device */,
-                    allowed_milestones,
-                    true /* is_policy_loaded */,
-                    &response);
-
-  // Verify that with rollback disabled that kernel_max_rollforward
-  // was set to logical infinity.
-  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
-  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
-}
-
-TEST_F(OmahaRequestActionTest, RollbackResponseParsedNoEntries) {
-  OmahaResponse response;
-  fake_update_response_.rollback = true;
-  TestRollbackCheck(false /* is_consumer_device */,
-                    4 /* rollback_allowed_milestones */,
-                    true /* is_policy_loaded */,
-                    &response);
-  EXPECT_TRUE(response.is_rollback);
-}
-
-TEST_F(OmahaRequestActionTest, RollbackResponseValidVersionsParsed) {
-  OmahaResponse response;
-  fake_update_response_.rollback_firmware_version = "1.2";
-  fake_update_response_.rollback_kernel_version = "3.4";
-  fake_update_response_.rollback = true;
-  TestRollbackCheck(false /* is_consumer_device */,
-                    4 /* rollback_allowed_milestones */,
-                    true /* is_policy_loaded */,
-                    &response);
-  EXPECT_TRUE(response.is_rollback);
-  EXPECT_EQ(1, response.rollback_key_version.firmware_key);
-  EXPECT_EQ(2, response.rollback_key_version.firmware);
-  EXPECT_EQ(3, response.rollback_key_version.kernel_key);
-  EXPECT_EQ(4, response.rollback_key_version.kernel);
-}
-
-TEST_F(OmahaRequestActionTest,
-       TestUpdateFirstSeenAtPrefPersistedIfUpdateExists) {
-  FakeClock fake_clock;
-  Time now = Time::Now();
-  fake_clock.SetWallclockTime(now);
-  fake_system_state_.set_clock(&fake_clock);
-
-  OmahaResponse response;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-  EXPECT_TRUE(fake_prefs_.Exists(kPrefsUpdateFirstSeenAt));
-
-  int64_t stored_first_seen_at_time;
-  EXPECT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateFirstSeenAt,
-                                   &stored_first_seen_at_time));
-  EXPECT_EQ(now.ToInternalValue(), stored_first_seen_at_time);
-}
-
-TEST_F(OmahaRequestActionTest,
-       TestUpdateFirstSeenAtPrefNotPersistedIfUpdateFails) {
-  FakeClock fake_clock;
-  Time now = Time::Now();
-  fake_clock.SetWallclockTime(now);
-  fake_system_state_.set_clock(&fake_clock);
-
-  OmahaResponse response;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kNoUpdateAvailable,
-                              metrics::CheckReaction::kUnset,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_FALSE(response.update_exists);
-  EXPECT_FALSE(fake_prefs_.Exists(kPrefsUpdateFirstSeenAt));
-}
-
-TEST_F(OmahaRequestActionTest, InstallTest) {
-  OmahaResponse response;
-  request_params_.set_is_install(true);
-  request_params_.set_dlc_module_ids({"dlc_no_0", "dlc_no_1"});
-  brillo::Blob post_data;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              true,   // is_consumer_device
-                              0,      // rollback_allowed_milestones
-                              false,  // is_policy_loaded
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              &post_data));
-  // Convert post_data to string.
-  string post_str(post_data.begin(), post_data.end());
-  for (const auto& dlc_module_id : request_params_.dlc_module_ids()) {
-    EXPECT_NE(string::npos,
-              post_str.find("appid=\"" + fake_update_response_.app_id + "_" +
-                            dlc_module_id + "\""));
-  }
-  EXPECT_NE(string::npos,
-            post_str.find("appid=\"" + fake_update_response_.app_id + "\""));
-
-  // Count number of updatecheck tag in response.
-  int updatecheck_count = 0;
-  size_t pos = 0;
-  while ((pos = post_str.find("<updatecheck", pos)) != string::npos) {
-    updatecheck_count++;
-    pos++;
-  }
-  EXPECT_EQ(request_params_.dlc_module_ids().size(), updatecheck_count);
-}
-
-TEST_F(OmahaRequestActionTest, InstallMissingPlatformVersionTest) {
-  fake_update_response_.multi_app_skip_updatecheck = true;
-  fake_update_response_.multi_app_no_update = false;
-  request_params_.set_is_install(true);
-  request_params_.set_dlc_module_ids({"dlc_no_0", "dlc_no_1"});
-  request_params_.set_app_id(fake_update_response_.app_id_skip_updatecheck);
-  OmahaResponse response;
-  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
-                              -1,
-                              false,  // ping_only
-                              ErrorCode::kSuccess,
-                              metrics::CheckResult::kUpdateAvailable,
-                              metrics::CheckReaction::kUpdating,
-                              metrics::DownloadErrorCode::kUnset,
-                              &response,
-                              nullptr));
-  EXPECT_TRUE(response.update_exists);
-  EXPECT_EQ(fake_update_response_.current_version, response.version);
-}
-
-}  // namespace chromeos_update_engine
diff --git a/omaha_response_handler_action.cc b/omaha_response_handler_action.cc
deleted file mode 100644
index ab41b84..0000000
--- a/omaha_response_handler_action.cc
+++ /dev/null
@@ -1,247 +0,0 @@
-//
-// Copyright (C) 2011 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/omaha_response_handler_action.h"
-
-#include <limits>
-#include <string>
-
-#include <base/logging.h>
-#include <base/strings/string_number_conversions.h>
-#include <policy/device_policy.h>
-
-#include "update_engine/common/constants.h"
-#include "update_engine/common/hardware_interface.h"
-#include "update_engine/common/prefs_interface.h"
-#include "update_engine/common/utils.h"
-#include "update_engine/connection_manager_interface.h"
-#include "update_engine/omaha_request_params.h"
-#include "update_engine/payload_consumer/delta_performer.h"
-#include "update_engine/payload_state_interface.h"
-#include "update_engine/update_manager/policy.h"
-#include "update_engine/update_manager/update_manager.h"
-
-using chromeos_update_manager::Policy;
-using chromeos_update_manager::UpdateManager;
-using std::numeric_limits;
-using std::string;
-
-namespace chromeos_update_engine {
-
-OmahaResponseHandlerAction::OmahaResponseHandlerAction(
-    SystemState* system_state)
-    : system_state_(system_state),
-      deadline_file_(constants::kOmahaResponseDeadlineFile) {}
-
-void OmahaResponseHandlerAction::PerformAction() {
-  CHECK(HasInputObject());
-  ScopedActionCompleter completer(processor_, this);
-  const OmahaResponse& response = GetInputObject();
-  if (!response.update_exists) {
-    LOG(INFO) << "There are no updates. Aborting.";
-    completer.set_code(ErrorCode::kNoUpdate);
-    return;
-  }
-
-  // All decisions as to which URL should be used have already been done. So,
-  // make the current URL as the download URL.
-  string current_url = system_state_->payload_state()->GetCurrentUrl();
-  if (current_url.empty()) {
-    // This shouldn't happen as we should always supply the HTTPS backup URL.
-    // Handling this anyway, just in case.
-    LOG(ERROR) << "There are no suitable URLs in the response to use.";
-    completer.set_code(ErrorCode::kOmahaResponseInvalid);
-    return;
-  }
-
-  // This is the url to the first package, not all packages.
-  install_plan_.download_url = current_url;
-  install_plan_.version = response.version;
-  install_plan_.system_version = response.system_version;
-
-  OmahaRequestParams* const params = system_state_->request_params();
-  PayloadStateInterface* const payload_state = system_state_->payload_state();
-
-  // If we're using p2p to download and there is a local peer, use it.
-  if (payload_state->GetUsingP2PForDownloading() &&
-      !payload_state->GetP2PUrl().empty()) {
-    LOG(INFO) << "Replacing URL " << install_plan_.download_url
-              << " with local URL " << payload_state->GetP2PUrl()
-              << " since p2p is enabled.";
-    install_plan_.download_url = payload_state->GetP2PUrl();
-    payload_state->SetUsingP2PForDownloading(true);
-  }
-
-  // Fill up the other properties based on the response.
-  string update_check_response_hash;
-  for (const auto& package : response.packages) {
-    brillo::Blob raw_hash;
-    if (!base::HexStringToBytes(package.hash, &raw_hash)) {
-      LOG(ERROR) << "Failed to convert payload hash from hex string to bytes: "
-                 << package.hash;
-      completer.set_code(ErrorCode::kOmahaResponseInvalid);
-      return;
-    }
-    install_plan_.payloads.push_back(
-        {.size = package.size,
-         .metadata_size = package.metadata_size,
-         .metadata_signature = package.metadata_signature,
-         .hash = raw_hash,
-         .type = package.is_delta ? InstallPayloadType::kDelta
-                                  : InstallPayloadType::kFull});
-    update_check_response_hash += package.hash + ":";
-  }
-  install_plan_.public_key_rsa = response.public_key_rsa;
-  install_plan_.hash_checks_mandatory = AreHashChecksMandatory(response);
-  install_plan_.is_resume = DeltaPerformer::CanResumeUpdate(
-      system_state_->prefs(), update_check_response_hash);
-  if (install_plan_.is_resume) {
-    payload_state->UpdateResumed();
-  } else {
-    payload_state->UpdateRestarted();
-    LOG_IF(WARNING,
-           !DeltaPerformer::ResetUpdateProgress(system_state_->prefs(), false))
-        << "Unable to reset the update progress.";
-    LOG_IF(WARNING,
-           !system_state_->prefs()->SetString(kPrefsUpdateCheckResponseHash,
-                                              update_check_response_hash))
-        << "Unable to save the update check response hash.";
-  }
-
-  if (params->is_install()) {
-    install_plan_.target_slot = system_state_->boot_control()->GetCurrentSlot();
-    install_plan_.source_slot = BootControlInterface::kInvalidSlot;
-  } else {
-    install_plan_.source_slot = system_state_->boot_control()->GetCurrentSlot();
-    install_plan_.target_slot = install_plan_.source_slot == 0 ? 1 : 0;
-  }
-
-  // The Omaha response doesn't include the channel name for this image, so we
-  // use the download_channel we used during the request to tag the target slot.
-  // This will be used in the next boot to know the channel the image was
-  // downloaded from.
-  string current_channel_key =
-      kPrefsChannelOnSlotPrefix + std::to_string(install_plan_.target_slot);
-  system_state_->prefs()->SetString(current_channel_key,
-                                    params->download_channel());
-
-  // Checking whether device is able to boot up the returned rollback image.
-  if (response.is_rollback) {
-    if (!params->rollback_allowed()) {
-      LOG(ERROR) << "Received rollback image but rollback is not allowed.";
-      completer.set_code(ErrorCode::kOmahaResponseInvalid);
-      return;
-    }
-    auto min_kernel_key_version = static_cast<uint32_t>(
-        system_state_->hardware()->GetMinKernelKeyVersion());
-    auto min_firmware_key_version = static_cast<uint32_t>(
-        system_state_->hardware()->GetMinFirmwareKeyVersion());
-    uint32_t kernel_key_version =
-        static_cast<uint32_t>(response.rollback_key_version.kernel_key) << 16 |
-        static_cast<uint32_t>(response.rollback_key_version.kernel);
-    uint32_t firmware_key_version =
-        static_cast<uint32_t>(response.rollback_key_version.firmware_key)
-            << 16 |
-        static_cast<uint32_t>(response.rollback_key_version.firmware);
-
-    // Don't attempt a rollback if the versions are incompatible or the
-    // target image does not specify the version information.
-    if (kernel_key_version == numeric_limits<uint32_t>::max() ||
-        firmware_key_version == numeric_limits<uint32_t>::max() ||
-        kernel_key_version < min_kernel_key_version ||
-        firmware_key_version < min_firmware_key_version) {
-      LOG(ERROR) << "Device won't be able to boot up the rollback image.";
-      completer.set_code(ErrorCode::kRollbackNotPossible);
-      return;
-    }
-    install_plan_.is_rollback = true;
-  }
-
-  if (response.powerwash_required || params->ShouldPowerwash())
-    install_plan_.powerwash_required = true;
-
-  TEST_AND_RETURN(HasOutputPipe());
-  if (HasOutputPipe())
-    SetOutputObject(install_plan_);
-  LOG(INFO) << "Using this install plan:";
-  install_plan_.Dump();
-
-  // Send the deadline data (if any) to Chrome through a file. This is a pretty
-  // hacky solution but should be OK for now.
-  //
-  // TODO(petkov): Re-architect this to avoid communication through a
-  // file. Ideally, we would include this information in D-Bus's GetStatus
-  // method and UpdateStatus signal. A potential issue is that update_engine may
-  // be unresponsive during an update download.
-  if (!deadline_file_.empty()) {
-    if (payload_state->GetRollbackHappened()) {
-      // Don't do forced update if rollback has happened since the last update
-      // check where policy was present.
-      LOG(INFO) << "Not forcing update because a rollback happened.";
-      utils::WriteFile(deadline_file_.c_str(), nullptr, 0);
-    } else {
-      utils::WriteFile(deadline_file_.c_str(),
-                       response.deadline.data(),
-                       response.deadline.size());
-    }
-    chmod(deadline_file_.c_str(), S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
-  }
-
-  // Check the generated install-plan with the Policy to confirm that
-  // it can be applied at this time (or at all).
-  UpdateManager* const update_manager = system_state_->update_manager();
-  CHECK(update_manager);
-  auto ec = ErrorCode::kSuccess;
-  update_manager->PolicyRequest(
-      &Policy::UpdateCanBeApplied, &ec, &install_plan_);
-  completer.set_code(ec);
-}
-
-bool OmahaResponseHandlerAction::AreHashChecksMandatory(
-    const OmahaResponse& response) {
-  // We sometimes need to waive the hash checks in order to download from
-  // sources that don't provide hashes, such as dev server.
-  // At this point UpdateAttempter::IsAnyUpdateSourceAllowed() has already been
-  // checked, so an unofficial update URL won't get this far unless it's OK to
-  // use without a hash. Additionally, we want to always waive hash checks on
-  // unofficial builds (i.e. dev/test images).
-  // The end result is this:
-  //  * Base image:
-  //    - Official URLs require a hash.
-  //    - Unofficial URLs only get this far if the IsAnyUpdateSourceAllowed()
-  //      devmode/debugd checks pass, in which case the hash is waived.
-  //  * Dev/test image:
-  //    - Any URL is allowed through with no hash checking.
-  if (!system_state_->request_params()->IsUpdateUrlOfficial() ||
-      !system_state_->hardware()->IsOfficialBuild()) {
-    // Still do a hash check if a public key is included.
-    if (!response.public_key_rsa.empty()) {
-      // The autoupdate_CatchBadSignatures test checks for this string
-      // in log-files. Keep in sync.
-      LOG(INFO) << "Mandating payload hash checks since Omaha Response "
-                << "for unofficial build includes public RSA key.";
-      return true;
-    } else {
-      LOG(INFO) << "Waiving payload hash checks for unofficial update URL.";
-      return false;
-    }
-  }
-
-  LOG(INFO) << "Mandating hash checks for official URL on official build.";
-  return true;
-}
-
-}  // namespace chromeos_update_engine
diff --git a/omaha_response_handler_action_unittest.cc b/omaha_response_handler_action_unittest.cc
deleted file mode 100644
index b47040b..0000000
--- a/omaha_response_handler_action_unittest.cc
+++ /dev/null
@@ -1,688 +0,0 @@
-//
-// Copyright (C) 2011 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/omaha_response_handler_action.h"
-
-#include <memory>
-#include <string>
-#include <utility>
-
-#include <base/files/file_util.h>
-#include <base/files/scoped_temp_dir.h>
-#include <brillo/message_loops/fake_message_loop.h>
-#include <gtest/gtest.h>
-
-#include "update_engine/common/constants.h"
-#include "update_engine/common/platform_constants.h"
-#include "update_engine/common/test_utils.h"
-#include "update_engine/common/utils.h"
-#include "update_engine/fake_system_state.h"
-#include "update_engine/mock_payload_state.h"
-#include "update_engine/payload_consumer/payload_constants.h"
-#include "update_engine/update_manager/mock_policy.h"
-
-using chromeos_update_engine::test_utils::System;
-using chromeos_update_engine::test_utils::WriteFileString;
-using chromeos_update_manager::EvalStatus;
-using chromeos_update_manager::FakeUpdateManager;
-using chromeos_update_manager::MockPolicy;
-using std::string;
-using testing::_;
-using testing::DoAll;
-using testing::Return;
-using testing::SetArgPointee;
-
-namespace chromeos_update_engine {
-
-class OmahaResponseHandlerActionProcessorDelegate
-    : public ActionProcessorDelegate {
- public:
-  OmahaResponseHandlerActionProcessorDelegate()
-      : code_(ErrorCode::kError), code_set_(false) {}
-  void ActionCompleted(ActionProcessor* processor,
-                       AbstractAction* action,
-                       ErrorCode code) {
-    if (action->Type() == OmahaResponseHandlerAction::StaticType()) {
-      auto response_handler_action =
-          static_cast<OmahaResponseHandlerAction*>(action);
-      code_ = code;
-      code_set_ = true;
-      response_handler_action_install_plan_.reset(
-          new InstallPlan(response_handler_action->install_plan_));
-    } else if (action->Type() ==
-               ObjectCollectorAction<InstallPlan>::StaticType()) {
-      auto collector_action =
-          static_cast<ObjectCollectorAction<InstallPlan>*>(action);
-      collector_action_install_plan_.reset(
-          new InstallPlan(collector_action->object()));
-    }
-  }
-  ErrorCode code_;
-  bool code_set_;
-  std::unique_ptr<InstallPlan> collector_action_install_plan_;
-  std::unique_ptr<InstallPlan> response_handler_action_install_plan_;
-};
-
-class OmahaResponseHandlerActionTest : public ::testing::Test {
- protected:
-  void SetUp() override {
-    FakeBootControl* fake_boot_control = fake_system_state_.fake_boot_control();
-    fake_boot_control->SetPartitionDevice(kPartitionNameKernel, 0, "/dev/sdz2");
-    fake_boot_control->SetPartitionDevice(kPartitionNameRoot, 0, "/dev/sdz3");
-    fake_boot_control->SetPartitionDevice(kPartitionNameKernel, 1, "/dev/sdz4");
-    fake_boot_control->SetPartitionDevice(kPartitionNameRoot, 1, "/dev/sdz5");
-  }
-
-  // Return true iff the OmahaResponseHandlerAction succeeded.
-  // If out is non-null, it's set w/ the response from the action.
-  bool DoTest(const OmahaResponse& in,
-              const string& deadline_file,
-              InstallPlan* out);
-
-  // Delegate passed to the ActionProcessor.
-  OmahaResponseHandlerActionProcessorDelegate delegate_;
-
-  // Captures the action's result code, for tests that need to directly verify
-  // it in non-success cases.
-  ErrorCode action_result_code_;
-
-  FakeSystemState fake_system_state_;
-  // "Hash+"
-  const brillo::Blob expected_hash_ = {0x48, 0x61, 0x73, 0x68, 0x2b};
-};
-
-namespace {
-const char* const kLongName =
-    "very_long_name_and_no_slashes-very_long_name_and_no_slashes"
-    "very_long_name_and_no_slashes-very_long_name_and_no_slashes"
-    "very_long_name_and_no_slashes-very_long_name_and_no_slashes"
-    "very_long_name_and_no_slashes-very_long_name_and_no_slashes"
-    "very_long_name_and_no_slashes-very_long_name_and_no_slashes"
-    "very_long_name_and_no_slashes-very_long_name_and_no_slashes"
-    "very_long_name_and_no_slashes-very_long_name_and_no_slashes"
-    "-the_update_a.b.c.d_DELTA_.tgz";
-const char* const kBadVersion = "don't update me";
-const char* const kPayloadHashHex = "486173682b";
-}  // namespace
-
-bool OmahaResponseHandlerActionTest::DoTest(const OmahaResponse& in,
-                                            const string& test_deadline_file,
-                                            InstallPlan* out) {
-  brillo::FakeMessageLoop loop(nullptr);
-  loop.SetAsCurrent();
-  ActionProcessor processor;
-  processor.set_delegate(&delegate_);
-
-  auto feeder_action = std::make_unique<ObjectFeederAction<OmahaResponse>>();
-  feeder_action->set_obj(in);
-  if (in.update_exists && in.version != kBadVersion) {
-    string expected_hash;
-    for (const auto& package : in.packages)
-      expected_hash += package.hash + ":";
-    EXPECT_CALL(*(fake_system_state_.mock_prefs()),
-                SetString(kPrefsUpdateCheckResponseHash, expected_hash))
-        .WillOnce(Return(true));
-
-    int slot =
-        fake_system_state_.request_params()->is_install()
-            ? fake_system_state_.fake_boot_control()->GetCurrentSlot()
-            : 1 - fake_system_state_.fake_boot_control()->GetCurrentSlot();
-    string key = kPrefsChannelOnSlotPrefix + std::to_string(slot);
-    EXPECT_CALL(*(fake_system_state_.mock_prefs()), SetString(key, testing::_))
-        .WillOnce(Return(true));
-  }
-
-  string current_url = in.packages.size() ? in.packages[0].payload_urls[0] : "";
-  EXPECT_CALL(*(fake_system_state_.mock_payload_state()), GetCurrentUrl())
-      .WillRepeatedly(Return(current_url));
-
-  auto response_handler_action =
-      std::make_unique<OmahaResponseHandlerAction>(&fake_system_state_);
-  if (!test_deadline_file.empty())
-    response_handler_action->deadline_file_ = test_deadline_file;
-
-  auto collector_action =
-      std::make_unique<ObjectCollectorAction<InstallPlan>>();
-
-  BondActions(feeder_action.get(), response_handler_action.get());
-  BondActions(response_handler_action.get(), collector_action.get());
-  processor.EnqueueAction(std::move(feeder_action));
-  processor.EnqueueAction(std::move(response_handler_action));
-  processor.EnqueueAction(std::move(collector_action));
-  processor.StartProcessing();
-  EXPECT_TRUE(!processor.IsRunning())
-      << "Update test to handle non-async actions";
-
-  if (out && delegate_.collector_action_install_plan_)
-    *out = *delegate_.collector_action_install_plan_;
-
-  EXPECT_TRUE(delegate_.code_set_);
-  action_result_code_ = delegate_.code_;
-  return delegate_.code_ == ErrorCode::kSuccess;
-}
-
-TEST_F(OmahaResponseHandlerActionTest, SimpleTest) {
-  test_utils::ScopedTempFile test_deadline_file(
-      "omaha_response_handler_action_unittest-XXXXXX");
-  {
-    OmahaResponse in;
-    in.update_exists = true;
-    in.version = "a.b.c.d";
-    in.packages.push_back(
-        {.payload_urls = {"http://foo/the_update_a.b.c.d.tgz"},
-         .size = 12,
-         .hash = kPayloadHashHex});
-    in.more_info_url = "http://more/info";
-    in.prompt = false;
-    in.deadline = "20101020";
-    InstallPlan install_plan;
-    EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
-    EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
-    EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
-    EXPECT_EQ(1U, install_plan.target_slot);
-    string deadline;
-    EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline));
-    EXPECT_EQ("20101020", deadline);
-    struct stat deadline_stat;
-    EXPECT_EQ(0, stat(test_deadline_file.path().c_str(), &deadline_stat));
-    EXPECT_EQ(
-        static_cast<mode_t>(S_IFREG | S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH),
-        deadline_stat.st_mode);
-    EXPECT_EQ(in.version, install_plan.version);
-  }
-  {
-    OmahaResponse in;
-    in.update_exists = true;
-    in.version = "a.b.c.d";
-    in.packages.push_back(
-        {.payload_urls = {"http://foo/the_update_a.b.c.d.tgz"},
-         .size = 12,
-         .hash = kPayloadHashHex});
-    in.more_info_url = "http://more/info";
-    in.prompt = true;
-    InstallPlan install_plan;
-    // Set the other slot as current.
-    fake_system_state_.fake_boot_control()->SetCurrentSlot(1);
-    EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
-    EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
-    EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
-    EXPECT_EQ(0U, install_plan.target_slot);
-    string deadline;
-    EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline) &&
-                deadline.empty());
-    EXPECT_EQ(in.version, install_plan.version);
-  }
-  {
-    OmahaResponse in;
-    in.update_exists = true;
-    in.version = "a.b.c.d";
-    in.packages.push_back(
-        {.payload_urls = {kLongName}, .size = 12, .hash = kPayloadHashHex});
-    in.more_info_url = "http://more/info";
-    in.prompt = true;
-    in.deadline = "some-deadline";
-    InstallPlan install_plan;
-    fake_system_state_.fake_boot_control()->SetCurrentSlot(0);
-    // Because rollback happened, the deadline shouldn't be written into the
-    // file.
-    EXPECT_CALL(*(fake_system_state_.mock_payload_state()),
-                GetRollbackHappened())
-        .WillOnce(Return(true));
-    EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
-    EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
-    EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
-    EXPECT_EQ(1U, install_plan.target_slot);
-    string deadline;
-    EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline));
-    EXPECT_TRUE(deadline.empty());
-    EXPECT_EQ(in.version, install_plan.version);
-  }
-  {
-    OmahaResponse in;
-    in.update_exists = true;
-    in.version = "a.b.c.d";
-    in.packages.push_back(
-        {.payload_urls = {kLongName}, .size = 12, .hash = kPayloadHashHex});
-    in.more_info_url = "http://more/info";
-    in.prompt = true;
-    in.deadline = "some-deadline";
-    InstallPlan install_plan;
-    fake_system_state_.fake_boot_control()->SetCurrentSlot(0);
-    EXPECT_CALL(*(fake_system_state_.mock_payload_state()),
-                GetRollbackHappened())
-        .WillOnce(Return(false));
-    EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
-    EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
-    EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
-    EXPECT_EQ(1U, install_plan.target_slot);
-    string deadline;
-    EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline));
-    EXPECT_EQ("some-deadline", deadline);
-    EXPECT_EQ(in.version, install_plan.version);
-  }
-}
-
-TEST_F(OmahaResponseHandlerActionTest, NoUpdatesTest) {
-  OmahaResponse in;
-  in.update_exists = false;
-  InstallPlan install_plan;
-  EXPECT_FALSE(DoTest(in, "", &install_plan));
-  EXPECT_TRUE(install_plan.partitions.empty());
-}
-
-TEST_F(OmahaResponseHandlerActionTest, InstallTest) {
-  OmahaResponse in;
-  in.update_exists = true;
-  in.version = "a.b.c.d";
-  in.packages.push_back(
-      {.payload_urls = {kLongName}, .size = 1, .hash = kPayloadHashHex});
-  in.packages.push_back(
-      {.payload_urls = {kLongName}, .size = 2, .hash = kPayloadHashHex});
-  in.more_info_url = "http://more/info";
-
-  OmahaRequestParams params(&fake_system_state_);
-  params.set_is_install(true);
-
-  fake_system_state_.set_request_params(&params);
-  InstallPlan install_plan;
-  EXPECT_TRUE(DoTest(in, "", &install_plan));
-  EXPECT_EQ(install_plan.source_slot, UINT_MAX);
-}
-
-TEST_F(OmahaResponseHandlerActionTest, MultiPackageTest) {
-  OmahaResponse in;
-  in.update_exists = true;
-  in.version = "a.b.c.d";
-  in.packages.push_back({.payload_urls = {"http://package/1"},
-                         .size = 1,
-                         .hash = kPayloadHashHex});
-  in.packages.push_back({.payload_urls = {"http://package/2"},
-                         .size = 2,
-                         .hash = kPayloadHashHex});
-  in.more_info_url = "http://more/info";
-  InstallPlan install_plan;
-  EXPECT_TRUE(DoTest(in, "", &install_plan));
-  EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
-  EXPECT_EQ(2u, install_plan.payloads.size());
-  EXPECT_EQ(in.packages[0].size, install_plan.payloads[0].size);
-  EXPECT_EQ(in.packages[1].size, install_plan.payloads[1].size);
-  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
-  EXPECT_EQ(expected_hash_, install_plan.payloads[1].hash);
-  EXPECT_EQ(in.version, install_plan.version);
-}
-
-TEST_F(OmahaResponseHandlerActionTest, HashChecksForHttpTest) {
-  OmahaResponse in;
-  in.update_exists = true;
-  in.version = "a.b.c.d";
-  in.packages.push_back(
-      {.payload_urls = {"http://test.should/need/hash.checks.signed"},
-       .size = 12,
-       .hash = kPayloadHashHex});
-  in.more_info_url = "http://more/info";
-  // Hash checks are always skipped for non-official update URLs.
-  EXPECT_CALL(*(fake_system_state_.mock_request_params()),
-              IsUpdateUrlOfficial())
-      .WillRepeatedly(Return(true));
-  InstallPlan install_plan;
-  EXPECT_TRUE(DoTest(in, "", &install_plan));
-  EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
-  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
-  EXPECT_TRUE(install_plan.hash_checks_mandatory);
-  EXPECT_EQ(in.version, install_plan.version);
-}
-
-TEST_F(OmahaResponseHandlerActionTest, HashChecksForUnofficialUpdateUrl) {
-  OmahaResponse in;
-  in.update_exists = true;
-  in.version = "a.b.c.d";
-  in.packages.push_back(
-      {.payload_urls = {"http://url.normally/needs/hash.checks.signed"},
-       .size = 12,
-       .hash = kPayloadHashHex});
-  in.more_info_url = "http://more/info";
-  EXPECT_CALL(*(fake_system_state_.mock_request_params()),
-              IsUpdateUrlOfficial())
-      .WillRepeatedly(Return(false));
-  InstallPlan install_plan;
-  EXPECT_TRUE(DoTest(in, "", &install_plan));
-  EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
-  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
-  EXPECT_FALSE(install_plan.hash_checks_mandatory);
-  EXPECT_EQ(in.version, install_plan.version);
-}
-
-TEST_F(OmahaResponseHandlerActionTest,
-       HashChecksForOfficialUrlUnofficialBuildTest) {
-  // Official URLs for unofficial builds (dev/test images) don't require hash.
-  OmahaResponse in;
-  in.update_exists = true;
-  in.version = "a.b.c.d";
-  in.packages.push_back(
-      {.payload_urls = {"http://url.normally/needs/hash.checks.signed"},
-       .size = 12,
-       .hash = kPayloadHashHex});
-  in.more_info_url = "http://more/info";
-  EXPECT_CALL(*(fake_system_state_.mock_request_params()),
-              IsUpdateUrlOfficial())
-      .WillRepeatedly(Return(true));
-  fake_system_state_.fake_hardware()->SetIsOfficialBuild(false);
-  InstallPlan install_plan;
-  EXPECT_TRUE(DoTest(in, "", &install_plan));
-  EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
-  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
-  EXPECT_FALSE(install_plan.hash_checks_mandatory);
-  EXPECT_EQ(in.version, install_plan.version);
-}
-
-TEST_F(OmahaResponseHandlerActionTest, HashChecksForHttpsTest) {
-  OmahaResponse in;
-  in.update_exists = true;
-  in.version = "a.b.c.d";
-  in.packages.push_back(
-      {.payload_urls = {"https://test.should/need/hash.checks.signed"},
-       .size = 12,
-       .hash = kPayloadHashHex});
-  in.more_info_url = "http://more/info";
-  EXPECT_CALL(*(fake_system_state_.mock_request_params()),
-              IsUpdateUrlOfficial())
-      .WillRepeatedly(Return(true));
-  InstallPlan install_plan;
-  EXPECT_TRUE(DoTest(in, "", &install_plan));
-  EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
-  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
-  EXPECT_TRUE(install_plan.hash_checks_mandatory);
-  EXPECT_EQ(in.version, install_plan.version);
-}
-
-TEST_F(OmahaResponseHandlerActionTest, HashChecksForBothHttpAndHttpsTest) {
-  OmahaResponse in;
-  in.update_exists = true;
-  in.version = "a.b.c.d";
-  in.packages.push_back(
-      {.payload_urls = {"http://test.should.still/need/hash.checks",
-                        "https://test.should.still/need/hash.checks"},
-       .size = 12,
-       .hash = kPayloadHashHex});
-  in.more_info_url = "http://more/info";
-  EXPECT_CALL(*(fake_system_state_.mock_request_params()),
-              IsUpdateUrlOfficial())
-      .WillRepeatedly(Return(true));
-  InstallPlan install_plan;
-  EXPECT_TRUE(DoTest(in, "", &install_plan));
-  EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
-  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
-  EXPECT_TRUE(install_plan.hash_checks_mandatory);
-  EXPECT_EQ(in.version, install_plan.version);
-}
-
-TEST_F(OmahaResponseHandlerActionTest, ChangeToMoreStableChannelTest) {
-  OmahaResponse in;
-  in.update_exists = true;
-  in.version = "a.b.c.d";
-  in.packages.push_back({.payload_urls = {"https://MoreStableChannelTest"},
-                         .size = 1,
-                         .hash = kPayloadHashHex});
-  in.more_info_url = "http://more/info";
-
-  // Create a uniquely named test directory.
-  base::ScopedTempDir tempdir;
-  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
-
-  OmahaRequestParams params(&fake_system_state_);
-  fake_system_state_.fake_hardware()->SetIsOfficialBuild(false);
-  params.set_root(tempdir.GetPath().value());
-  params.set_current_channel("canary-channel");
-  // The ImageProperties in Android uses prefs to store MutableImageProperties.
-#ifdef __ANDROID__
-  EXPECT_CALL(*fake_system_state_.mock_prefs(), SetBoolean(_, true))
-      .WillOnce(Return(true));
-#endif  // __ANDROID__
-  EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr));
-  params.UpdateDownloadChannel();
-  EXPECT_TRUE(params.ShouldPowerwash());
-
-  fake_system_state_.set_request_params(&params);
-  InstallPlan install_plan;
-  EXPECT_TRUE(DoTest(in, "", &install_plan));
-  EXPECT_TRUE(install_plan.powerwash_required);
-}
-
-TEST_F(OmahaResponseHandlerActionTest, ChangeToLessStableChannelTest) {
-  OmahaResponse in;
-  in.update_exists = true;
-  in.version = "a.b.c.d";
-  in.packages.push_back({.payload_urls = {"https://LessStableChannelTest"},
-                         .size = 15,
-                         .hash = kPayloadHashHex});
-  in.more_info_url = "http://more/info";
-
-  // Create a uniquely named test directory.
-  base::ScopedTempDir tempdir;
-  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
-
-  OmahaRequestParams params(&fake_system_state_);
-  fake_system_state_.fake_hardware()->SetIsOfficialBuild(false);
-  params.set_root(tempdir.GetPath().value());
-  params.set_current_channel("stable-channel");
-  // The ImageProperties in Android uses prefs to store MutableImageProperties.
-#ifdef __ANDROID__
-  EXPECT_CALL(*fake_system_state_.mock_prefs(), SetBoolean(_, false))
-      .WillOnce(Return(true));
-#endif  // __ANDROID__
-  EXPECT_TRUE(params.SetTargetChannel("canary-channel", false, nullptr));
-  params.UpdateDownloadChannel();
-  EXPECT_FALSE(params.ShouldPowerwash());
-
-  fake_system_state_.set_request_params(&params);
-  InstallPlan install_plan;
-  EXPECT_TRUE(DoTest(in, "", &install_plan));
-  EXPECT_FALSE(install_plan.powerwash_required);
-}
-
-TEST_F(OmahaResponseHandlerActionTest, P2PUrlIsUsedAndHashChecksMandatory) {
-  OmahaResponse in;
-  in.update_exists = true;
-  in.version = "a.b.c.d";
-  in.packages.push_back(
-      {.payload_urls = {"https://would.not/cause/hash/checks"},
-       .size = 12,
-       .hash = kPayloadHashHex});
-  in.more_info_url = "http://more/info";
-
-  OmahaRequestParams params(&fake_system_state_);
-  // We're using a real OmahaRequestParams object here so we can't mock
-  // IsUpdateUrlOfficial(), but setting the update URL to the AutoUpdate test
-  // server will cause IsUpdateUrlOfficial() to return true.
-  params.set_update_url(constants::kOmahaDefaultAUTestURL);
-  fake_system_state_.set_request_params(&params);
-
-  EXPECT_CALL(*fake_system_state_.mock_payload_state(),
-              SetUsingP2PForDownloading(true));
-
-  string p2p_url = "http://9.8.7.6/p2p";
-  EXPECT_CALL(*fake_system_state_.mock_payload_state(), GetP2PUrl())
-      .WillRepeatedly(Return(p2p_url));
-  EXPECT_CALL(*fake_system_state_.mock_payload_state(),
-              GetUsingP2PForDownloading())
-      .WillRepeatedly(Return(true));
-
-  InstallPlan install_plan;
-  EXPECT_TRUE(DoTest(in, "", &install_plan));
-  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
-  EXPECT_EQ(p2p_url, install_plan.download_url);
-  EXPECT_TRUE(install_plan.hash_checks_mandatory);
-}
-
-TEST_F(OmahaResponseHandlerActionTest, RollbackTest) {
-  OmahaResponse in;
-  in.update_exists = true;
-  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
-                         .size = 1,
-                         .hash = kPayloadHashHex});
-  in.is_rollback = true;
-  in.rollback_key_version.kernel = 1;
-  in.rollback_key_version.kernel = 2;
-  in.rollback_key_version.firmware_key = 3;
-  in.rollback_key_version.firmware = 4;
-
-  fake_system_state_.fake_hardware()->SetMinKernelKeyVersion(0x00010002);
-  fake_system_state_.fake_hardware()->SetMinFirmwareKeyVersion(0x00030004);
-
-  OmahaRequestParams params(&fake_system_state_);
-  params.set_rollback_allowed(true);
-
-  fake_system_state_.set_request_params(&params);
-  InstallPlan install_plan;
-  EXPECT_TRUE(DoTest(in, "", &install_plan));
-  EXPECT_TRUE(install_plan.is_rollback);
-}
-
-TEST_F(OmahaResponseHandlerActionTest, RollbackKernelVersionErrorTest) {
-  OmahaResponse in;
-  in.update_exists = true;
-  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
-                         .size = 1,
-                         .hash = kPayloadHashHex});
-  in.is_rollback = true;
-  in.rollback_key_version.kernel_key = 1;
-  in.rollback_key_version.kernel = 1;  // This is lower than the minimum.
-  in.rollback_key_version.firmware_key = 3;
-  in.rollback_key_version.firmware = 4;
-
-  fake_system_state_.fake_hardware()->SetMinKernelKeyVersion(0x00010002);
-  fake_system_state_.fake_hardware()->SetMinFirmwareKeyVersion(0x00030004);
-
-  OmahaRequestParams params(&fake_system_state_);
-  params.set_rollback_allowed(true);
-
-  fake_system_state_.set_request_params(&params);
-  InstallPlan install_plan;
-  EXPECT_FALSE(DoTest(in, "", &install_plan));
-}
-
-TEST_F(OmahaResponseHandlerActionTest, RollbackFirmwareVersionErrorTest) {
-  OmahaResponse in;
-  in.update_exists = true;
-  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
-                         .size = 1,
-                         .hash = kPayloadHashHex});
-  in.is_rollback = true;
-  in.rollback_key_version.kernel_key = 1;
-  in.rollback_key_version.kernel = 2;
-  in.rollback_key_version.firmware_key = 3;
-  in.rollback_key_version.firmware = 3;  // This is lower than the minimum.
-
-  fake_system_state_.fake_hardware()->SetMinKernelKeyVersion(0x00010002);
-  fake_system_state_.fake_hardware()->SetMinFirmwareKeyVersion(0x00030004);
-
-  OmahaRequestParams params(&fake_system_state_);
-  params.set_rollback_allowed(true);
-
-  fake_system_state_.set_request_params(&params);
-  InstallPlan install_plan;
-  EXPECT_FALSE(DoTest(in, "", &install_plan));
-}
-
-TEST_F(OmahaResponseHandlerActionTest, RollbackNotRollbackTest) {
-  OmahaResponse in;
-  in.update_exists = true;
-  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
-                         .size = 1,
-                         .hash = kPayloadHashHex});
-  in.is_rollback = false;
-
-  OmahaRequestParams params(&fake_system_state_);
-  params.set_rollback_allowed(true);
-
-  fake_system_state_.set_request_params(&params);
-  InstallPlan install_plan;
-  EXPECT_TRUE(DoTest(in, "", &install_plan));
-  EXPECT_FALSE(install_plan.is_rollback);
-}
-
-TEST_F(OmahaResponseHandlerActionTest, RollbackNotAllowedTest) {
-  OmahaResponse in;
-  in.update_exists = true;
-  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
-                         .size = 1,
-                         .hash = kPayloadHashHex});
-  in.is_rollback = true;
-
-  OmahaRequestParams params(&fake_system_state_);
-  params.set_rollback_allowed(false);
-
-  fake_system_state_.set_request_params(&params);
-  InstallPlan install_plan;
-  EXPECT_FALSE(DoTest(in, "", &install_plan));
-}
-
-TEST_F(OmahaResponseHandlerActionTest, SystemVersionTest) {
-  OmahaResponse in;
-  in.update_exists = true;
-  in.version = "a.b.c.d";
-  in.system_version = "b.c.d.e";
-  in.packages.push_back({.payload_urls = {"http://package/1"},
-                         .size = 1,
-                         .hash = kPayloadHashHex});
-  in.packages.push_back({.payload_urls = {"http://package/2"},
-                         .size = 2,
-                         .hash = kPayloadHashHex});
-  in.more_info_url = "http://more/info";
-  InstallPlan install_plan;
-  EXPECT_TRUE(DoTest(in, "", &install_plan));
-  EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
-  EXPECT_EQ(2u, install_plan.payloads.size());
-  EXPECT_EQ(in.packages[0].size, install_plan.payloads[0].size);
-  EXPECT_EQ(in.packages[1].size, install_plan.payloads[1].size);
-  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
-  EXPECT_EQ(expected_hash_, install_plan.payloads[1].hash);
-  EXPECT_EQ(in.version, install_plan.version);
-  EXPECT_EQ(in.system_version, install_plan.system_version);
-}
-
-TEST_F(OmahaResponseHandlerActionTest, TestDeferredByPolicy) {
-  OmahaResponse in;
-  in.update_exists = true;
-  in.version = "a.b.c.d";
-  in.packages.push_back({.payload_urls = {"http://foo/the_update_a.b.c.d.tgz"},
-                         .size = 12,
-                         .hash = kPayloadHashHex});
-  // Setup the UpdateManager to disallow the update.
-  FakeClock fake_clock;
-  MockPolicy* mock_policy = new MockPolicy(&fake_clock);
-  FakeUpdateManager* fake_update_manager =
-      fake_system_state_.fake_update_manager();
-  fake_update_manager->set_policy(mock_policy);
-  EXPECT_CALL(*mock_policy, UpdateCanBeApplied(_, _, _, _, _))
-      .WillOnce(
-          DoAll(SetArgPointee<3>(ErrorCode::kOmahaUpdateDeferredPerPolicy),
-                Return(EvalStatus::kSucceeded)));
-  // Perform the Action. It should "fail" with kOmahaUpdateDeferredPerPolicy.
-  InstallPlan install_plan;
-  EXPECT_FALSE(DoTest(in, "", &install_plan));
-  EXPECT_EQ(ErrorCode::kOmahaUpdateDeferredPerPolicy, action_result_code_);
-  // Verify that DoTest() didn't set the output install plan.
-  EXPECT_EQ("", install_plan.version);
-  // Now verify the InstallPlan that was generated.
-  install_plan = *delegate_.response_handler_action_install_plan_;
-  EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
-  EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
-  EXPECT_EQ(1U, install_plan.target_slot);
-  EXPECT_EQ(in.version, install_plan.version);
-}
-
-}  // namespace chromeos_update_engine
diff --git a/omaha_utils.cc b/omaha_utils.cc
deleted file mode 100644
index 6bd7525..0000000
--- a/omaha_utils.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/omaha_utils.h"
-
-#include <base/logging.h>
-
-namespace chromeos_update_engine {
-
-namespace {
-
-// The possible string values for the end-of-life status.
-const char kEolStatusSupported[] = "supported";
-const char kEolStatusSecurityOnly[] = "security-only";
-const char kEolStatusEol[] = "eol";
-
-}  // namespace
-
-const char* EolStatusToString(EolStatus eol_status) {
-  switch (eol_status) {
-    case EolStatus::kSupported:
-      return kEolStatusSupported;
-    case EolStatus::kSecurityOnly:
-      return kEolStatusSecurityOnly;
-    case EolStatus::kEol:
-      return kEolStatusEol;
-  }
-  // Only reached if an invalid number is casted to |EolStatus|.
-  LOG(WARNING) << "Invalid EolStatus value: " << static_cast<int>(eol_status);
-  return kEolStatusSupported;
-}
-
-EolStatus StringToEolStatus(const std::string& eol_status) {
-  if (eol_status == kEolStatusSupported || eol_status.empty())
-    return EolStatus::kSupported;
-  if (eol_status == kEolStatusSecurityOnly)
-    return EolStatus::kSecurityOnly;
-  if (eol_status == kEolStatusEol)
-    return EolStatus::kEol;
-  LOG(WARNING) << "Invalid end-of-life attribute: " << eol_status;
-  return EolStatus::kSupported;
-}
-
-}  // namespace chromeos_update_engine
diff --git a/omaha_utils.h b/omaha_utils.h
deleted file mode 100644
index 8614540..0000000
--- a/omaha_utils.h
+++ /dev/null
@@ -1,40 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_OMAHA_UTILS_H_
-#define UPDATE_ENGINE_OMAHA_UTILS_H_
-
-#include <string>
-
-namespace chromeos_update_engine {
-
-// The end-of-life status of the device.
-enum class EolStatus {
-  kSupported = 0,
-  kSecurityOnly,
-  kEol,
-};
-
-// Returns the string representation of the |eol_status|.
-const char* EolStatusToString(EolStatus eol_status);
-
-// Converts the end-of-life status string to an EolStatus numeric value. In case
-// of an invalid string, the default "supported" value will be used instead.
-EolStatus StringToEolStatus(const std::string& eol_status);
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_OMAHA_UTILS_H_
diff --git a/parcelable_update_engine_status.cc b/parcelable_update_engine_status.cc
deleted file mode 100644
index 8a2dbeb..0000000
--- a/parcelable_update_engine_status.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/parcelable_update_engine_status.h"
-#include "update_engine/update_status_utils.h"
-
-#include <binder/Parcel.h>
-
-using update_engine::UpdateEngineStatus;
-
-namespace android {
-namespace brillo {
-
-ParcelableUpdateEngineStatus::ParcelableUpdateEngineStatus(
-    const UpdateEngineStatus& status)
-    : last_checked_time_(status.last_checked_time),
-      current_operation_(
-          chromeos_update_engine::UpdateStatusToString(status.status)),
-      progress_(status.progress),
-      current_version_(String16{status.current_version.c_str()}),
-      current_system_version_(String16{status.current_system_version.c_str()}),
-      new_size_(status.new_size_bytes),
-      new_version_(String16{status.new_version.c_str()}),
-      new_system_version_(String16{status.new_system_version.c_str()}) {}
-
-status_t ParcelableUpdateEngineStatus::writeToParcel(Parcel* parcel) const {
-  status_t status;
-
-  status = parcel->writeInt64(last_checked_time_);
-  if (status != OK) {
-    return status;
-  }
-
-  status = parcel->writeString16(current_operation_);
-  if (status != OK) {
-    return status;
-  }
-
-  status = parcel->writeDouble(progress_);
-  if (status != OK) {
-    return status;
-  }
-
-  status = parcel->writeString16(current_version_);
-  if (status != OK) {
-    return status;
-  }
-
-  status = parcel->writeString16(current_system_version_);
-  if (status != OK) {
-    return status;
-  }
-
-  status = parcel->writeInt64(new_size_);
-  if (status != OK) {
-    return status;
-  }
-
-  status = parcel->writeString16(new_version_);
-  if (status != OK) {
-    return status;
-  }
-
-  return parcel->writeString16(new_system_version_);
-}
-
-status_t ParcelableUpdateEngineStatus::readFromParcel(const Parcel* parcel) {
-  status_t status;
-
-  status = parcel->readInt64(&last_checked_time_);
-  if (status != OK) {
-    return status;
-  }
-
-  status = parcel->readString16(&current_operation_);
-  if (status != OK) {
-    return status;
-  }
-
-  status = parcel->readDouble(&progress_);
-  if (status != OK) {
-    return status;
-  }
-
-  status = parcel->readString16(&current_version_);
-  if (status != OK) {
-    return status;
-  }
-
-  status = parcel->readString16(&current_system_version_);
-  if (status != OK) {
-    return status;
-  }
-
-  status = parcel->readInt64(&new_size_);
-  if (status != OK) {
-    return status;
-  }
-
-  status = parcel->readString16(&new_version_);
-  if (status != OK) {
-    return status;
-  }
-
-  return parcel->readString16(&new_system_version_);
-}
-
-}  // namespace brillo
-}  // namespace android
diff --git a/parcelable_update_engine_status.h b/parcelable_update_engine_status.h
deleted file mode 100644
index 3feac76..0000000
--- a/parcelable_update_engine_status.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_
-#define UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_
-
-#include <binder/Parcelable.h>
-#include <utils/String16.h>
-
-#include "update_engine/client_library/include/update_engine/update_status.h"
-
-namespace android {
-namespace brillo {
-
-// Parcelable object containing the current status of update engine, to be sent
-// over binder to clients from the server.
-class ParcelableUpdateEngineStatus : public Parcelable {
- public:
-  ParcelableUpdateEngineStatus() = default;
-  explicit ParcelableUpdateEngineStatus(
-      const update_engine::UpdateEngineStatus& status);
-  virtual ~ParcelableUpdateEngineStatus() = default;
-
-  status_t writeToParcel(Parcel* parcel) const override;
-  status_t readFromParcel(const Parcel* parcel) override;
-
-  // This list is kept in the Parcelable serialization order.
-
-  // When the update_engine last checked for updates (seconds since unix Epoch)
-  int64_t last_checked_time_;
-  // The current status/operation of the update_engine.
-  android::String16 current_operation_;
-  // The current progress (0.0f-1.0f).
-  double progress_;
-  // The current product version.
-  android::String16 current_version_;
-  // The current system version.
-  android::String16 current_system_version_;
-  // The size of the update (bytes).  This is int64_t for java compatibility.
-  int64_t new_size_;
-  // The new product version.
-  android::String16 new_version_;
-  // The new system version, if there is one (empty, otherwise).
-  android::String16 new_system_version_;
-};
-
-}  // namespace brillo
-}  // namespace android
-
-#endif  // UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_
diff --git a/parcelable_update_engine_status_unittest.cc b/parcelable_update_engine_status_unittest.cc
deleted file mode 100644
index 20decb6..0000000
--- a/parcelable_update_engine_status_unittest.cc
+++ /dev/null
@@ -1,92 +0,0 @@
-//
-// Copyright (C) 2017 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/parcelable_update_engine_status.h"
-#include "update_engine/update_status_utils.h"
-
-#include <binder/Parcel.h>
-#include <gtest/gtest.h>
-
-using android::Parcel;
-using android::status_t;
-using android::String16;
-using android::brillo::ParcelableUpdateEngineStatus;
-using update_engine::UpdateEngineStatus;
-using update_engine::UpdateStatus;
-
-TEST(ParcelableUpdateEngineStatusTest, TestCreationFromUpdateEngineStatus) {
-  // This test creates an object and verifies that all the UpdateEngineStatus
-  // values are properly reflected in the Parcelable version of the class.
-
-  UpdateEngineStatus ue_status = {123456789,
-                                  UpdateStatus::DOWNLOADING,
-                                  "0.1.2.3",
-                                  "1.2.3.4",
-                                  0.5f,
-                                  34567,
-                                  "2.3.4.5",
-                                  "3.4.5.6"};
-  ParcelableUpdateEngineStatus parcelable_status(ue_status);
-  EXPECT_EQ(ue_status.last_checked_time, parcelable_status.last_checked_time_);
-  EXPECT_EQ(
-      String16{chromeos_update_engine::UpdateStatusToString(ue_status.status)},
-      parcelable_status.current_operation_);
-  EXPECT_EQ(String16{ue_status.current_version.c_str()},
-            parcelable_status.current_version_);
-  EXPECT_EQ(String16{ue_status.current_system_version.c_str()},
-            parcelable_status.current_system_version_);
-  EXPECT_EQ(ue_status.progress, parcelable_status.progress_);
-  EXPECT_EQ(static_cast<int64_t>(ue_status.new_size_bytes),
-            parcelable_status.new_size_);
-  EXPECT_EQ(String16{ue_status.new_version.c_str()},
-            parcelable_status.new_version_);
-  EXPECT_EQ(String16{ue_status.new_system_version.c_str()},
-            parcelable_status.new_system_version_);
-}
-
-TEST(ParcelableUpdateEngineStatusTest, TestParceling) {
-  // This tests the writeToParcel and readFromParcel methods for being correctly
-  // matched.
-  UpdateEngineStatus ue_status = {123456789,
-                                  UpdateStatus::DOWNLOADING,
-                                  "0.1.2.3",
-                                  "1.2.3.4",
-                                  0.5f,
-                                  34567,
-                                  "2.3.4.5",
-                                  "3.4.5.6"};
-  ParcelableUpdateEngineStatus source_status(ue_status);
-  Parcel parcel_source, parcel_target;
-  status_t status = source_status.writeToParcel(&parcel_source);
-  EXPECT_EQ(::android::OK, status);
-  size_t parcel_len = parcel_source.dataSize();
-  status = parcel_target.setData(parcel_source.data(), parcel_len);
-  EXPECT_EQ(::android::OK, status);
-  ParcelableUpdateEngineStatus target_status;
-  status = target_status.readFromParcel(&parcel_target);
-  EXPECT_EQ(::android::OK, status);
-
-  EXPECT_EQ(source_status.last_checked_time_, target_status.last_checked_time_);
-  EXPECT_EQ(source_status.current_operation_, target_status.current_operation_);
-  EXPECT_EQ(source_status.current_version_, target_status.current_version_);
-  EXPECT_EQ(source_status.current_system_version_,
-            target_status.current_system_version_);
-  EXPECT_EQ(source_status.progress_, target_status.progress_);
-  EXPECT_EQ(source_status.new_size_, target_status.new_size_);
-  EXPECT_EQ(source_status.new_version_, target_status.new_version_);
-  EXPECT_EQ(source_status.new_system_version_,
-            target_status.new_system_version_);
-}
diff --git a/payload_consumer/bzip_extent_writer.cc b/payload_consumer/bzip_extent_writer.cc
index 0c25c71..26fdc5f 100644
--- a/payload_consumer/bzip_extent_writer.cc
+++ b/payload_consumer/bzip_extent_writer.cc
@@ -29,8 +29,7 @@
   TEST_AND_RETURN(input_buffer_.empty());
 }
 
-bool BzipExtentWriter::Init(FileDescriptorPtr fd,
-                            const RepeatedPtrField<Extent>& extents,
+bool BzipExtentWriter::Init(const RepeatedPtrField<Extent>& extents,
                             uint32_t block_size) {
   // Init bzip2 stream
   int rc = BZ2_bzDecompressInit(&stream_,
@@ -39,7 +38,7 @@
 
   TEST_AND_RETURN_FALSE(rc == BZ_OK);
 
-  return next_->Init(fd, extents, block_size);
+  return next_->Init(extents, block_size);
 }
 
 bool BzipExtentWriter::Write(const void* bytes, size_t count) {
diff --git a/payload_consumer/bzip_extent_writer.h b/payload_consumer/bzip_extent_writer.h
index ec181a7..38c041a 100644
--- a/payload_consumer/bzip_extent_writer.h
+++ b/payload_consumer/bzip_extent_writer.h
@@ -40,8 +40,7 @@
   }
   ~BzipExtentWriter() override;
 
-  bool Init(FileDescriptorPtr fd,
-            const google::protobuf::RepeatedPtrField<Extent>& extents,
+  bool Init(const google::protobuf::RepeatedPtrField<Extent>& extents,
             uint32_t block_size) override;
   bool Write(const void* bytes, size_t count) override;
 
diff --git a/payload_consumer/bzip_extent_writer_unittest.cc b/payload_consumer/bzip_extent_writer_unittest.cc
index 125e1e5..c93545a 100644
--- a/payload_consumer/bzip_extent_writer_unittest.cc
+++ b/payload_consumer/bzip_extent_writer_unittest.cc
@@ -29,7 +29,6 @@
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/extent_ranges.h"
 
-using google::protobuf::RepeatedPtrField;
 using std::min;
 using std::string;
 using std::vector;
@@ -49,7 +48,7 @@
   void TearDown() override { fd_->Close(); }
 
   FileDescriptorPtr fd_;
-  test_utils::ScopedTempFile temp_file_{"BzipExtentWriterTest-file.XXXXXX"};
+  ScopedTempFile temp_file_{"BzipExtentWriterTest-file.XXXXXX"};
 };
 
 TEST_F(BzipExtentWriterTest, SimpleTest) {
@@ -64,9 +63,8 @@
       0x22, 0x9c, 0x28, 0x48, 0x66, 0x61, 0xb8, 0xea, 0x00,
   };
 
-  BzipExtentWriter bzip_writer(std::make_unique<DirectExtentWriter>());
-  EXPECT_TRUE(
-      bzip_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
+  BzipExtentWriter bzip_writer(std::make_unique<DirectExtentWriter>(fd_));
+  EXPECT_TRUE(bzip_writer.Init({extents.begin(), extents.end()}, kBlockSize));
   EXPECT_TRUE(bzip_writer.Write(test, sizeof(test)));
 
   brillo::Blob buf;
@@ -97,9 +95,8 @@
 
   vector<Extent> extents = {ExtentForBytes(kBlockSize, 0, kDecompressedLength)};
 
-  BzipExtentWriter bzip_writer(std::make_unique<DirectExtentWriter>());
-  EXPECT_TRUE(
-      bzip_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
+  BzipExtentWriter bzip_writer(std::make_unique<DirectExtentWriter>(fd_));
+  EXPECT_TRUE(bzip_writer.Init({extents.begin(), extents.end()}, kBlockSize));
 
   brillo::Blob original_compressed_data = compressed_data;
   for (brillo::Blob::size_type i = 0; i < compressed_data.size();
diff --git a/payload_consumer/cached_file_descriptor_unittest.cc b/payload_consumer/cached_file_descriptor_unittest.cc
index d2965fc..b64420a 100644
--- a/payload_consumer/cached_file_descriptor_unittest.cc
+++ b/payload_consumer/cached_file_descriptor_unittest.cc
@@ -73,7 +73,7 @@
 
  protected:
   FileDescriptorPtr fd_{new EintrSafeFileDescriptor};
-  test_utils::ScopedTempFile temp_file_{"CachedFileDescriptor-file.XXXXXX"};
+  ScopedTempFile temp_file_{"CachedFileDescriptor-file.XXXXXX"};
   int value_{1};
   FileDescriptorPtr cfd_;
 };
diff --git a/payload_consumer/certificate_parser_stub.cc b/payload_consumer/certificate_parser_stub.cc
index 95fd6e8..a365ab8 100644
--- a/payload_consumer/certificate_parser_stub.cc
+++ b/payload_consumer/certificate_parser_stub.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include <payload_consumer/certificate_parser_stub.h>
+#include "update_engine/payload_consumer/certificate_parser_stub.h"
 
 namespace chromeos_update_engine {
 bool CertificateParserStub::ReadPublicKeysFromCertificates(
diff --git a/payload_consumer/certificate_parser_stub.h b/payload_consumer/certificate_parser_stub.h
index f4f8825..a51c2c6 100644
--- a/payload_consumer/certificate_parser_stub.h
+++ b/payload_consumer/certificate_parser_stub.h
@@ -23,7 +23,7 @@
 
 #include <base/macros.h>
 
-#include "payload_consumer/certificate_parser_interface.h"
+#include "update_engine/payload_consumer/certificate_parser_interface.h"
 
 namespace chromeos_update_engine {
 class CertificateParserStub : public CertificateParserInterface {
diff --git a/payload_consumer/cow_writer_file_descriptor.cc b/payload_consumer/cow_writer_file_descriptor.cc
new file mode 100644
index 0000000..2de6664
--- /dev/null
+++ b/payload_consumer/cow_writer_file_descriptor.cc
@@ -0,0 +1,151 @@
+//
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/cow_writer_file_descriptor.h"
+
+#include <memory>
+#include <utility>
+
+#include <base/logging.h>
+
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
+
+namespace chromeos_update_engine {
+CowWriterFileDescriptor::CowWriterFileDescriptor(
+    std::unique_ptr<android::snapshot::ISnapshotWriter> cow_writer)
+    : cow_writer_(std::move(cow_writer)),
+      cow_reader_(cow_writer_->OpenReader()) {
+  CHECK_NE(cow_writer_, nullptr);
+  CHECK_NE(cow_reader_, nullptr);
+}
+
+bool CowWriterFileDescriptor::Open(const char* path, int flags, mode_t mode) {
+  LOG(ERROR) << "CowWriterFileDescriptor doesn't support Open()";
+  return false;
+}
+bool CowWriterFileDescriptor::Open(const char* path, int flags) {
+  LOG(ERROR) << "CowWriterFileDescriptor doesn't support Open()";
+  return false;
+}
+
+ssize_t CowWriterFileDescriptor::Read(void* buf, size_t count) {
+  if (dirty_) {
+    // OK, CowReader provides a snapshot view of what the cow contains. Which
+    // means any writes happened after opening a CowReader isn't visible to
+    // that CowReader. Therefore, we re-open CowReader whenever we attempt a
+    // read after write. This does incur an overhead everytime you read after
+    // write.
+    // The usage of |dirty_| flag to coordinate re-open is a very coarse grained
+    // checked. This implementation has suboptimal performance. For better
+    // performance, keep track of blocks which are overwritten, and only re-open
+    // if reading a dirty block.
+    // TODO(b/173432386) Implement finer grained dirty checks
+    const auto offset = cow_reader_->Seek(0, SEEK_CUR);
+    cow_reader_.reset();
+    if (!cow_writer_->Finalize()) {
+      LOG(ERROR) << "Failed to Finalize() cow writer";
+      return -1;
+    }
+    cow_reader_ = cow_writer_->OpenReader();
+    if (cow_reader_ == nullptr) {
+      LOG(ERROR) << "Failed to re-open cow reader after writing to COW";
+      return -1;
+    }
+    const auto pos = cow_reader_->Seek(offset, SEEK_SET);
+    if (pos != offset) {
+      LOG(ERROR) << "Failed to seek to previous position after re-opening cow "
+                    "reader, expected "
+                 << offset << " actual: " << pos;
+      return -1;
+    }
+    dirty_ = false;
+  }
+  return cow_reader_->Read(buf, count);
+}
+
+ssize_t CowWriterFileDescriptor::Write(const void* buf, size_t count) {
+  auto offset = cow_reader_->Seek(0, SEEK_CUR);
+  CHECK_EQ(offset % cow_writer_->options().block_size, 0);
+  auto success = cow_writer_->AddRawBlocks(
+      offset / cow_writer_->options().block_size, buf, count);
+  if (success) {
+    if (cow_reader_->Seek(count, SEEK_CUR) < 0) {
+      return -1;
+    }
+    dirty_ = true;
+    return count;
+  }
+  return -1;
+}
+
+off64_t CowWriterFileDescriptor::Seek(const off64_t offset, int whence) {
+  return cow_reader_->Seek(offset, whence);
+}
+
+uint64_t CowWriterFileDescriptor::BlockDevSize() {
+  LOG(ERROR) << "CowWriterFileDescriptor doesn't support BlockDevSize()";
+  return 0;
+}
+
+bool CowWriterFileDescriptor::BlkIoctl(int request,
+                                       uint64_t start,
+                                       uint64_t length,
+                                       int* result) {
+  LOG(ERROR) << "CowWriterFileDescriptor doesn't support BlkIoctl()";
+  return false;
+}
+
+bool CowWriterFileDescriptor::Flush() {
+  // CowWriter already automatilly flushes, no need to do anything.
+  return true;
+}
+
+bool CowWriterFileDescriptor::Close() {
+  if (cow_writer_) {
+    // b/186196758
+    // When calling
+    // InitializeAppend(kEndOfInstall), the SnapshotWriter only reads up to the
+    // given label. But OpenReader() completely disregards the resume label and
+    // reads all ops. Therefore, update_engine sees the verity data. However,
+    // when calling SnapshotWriter::Finalize(), data after resume label are
+    // discarded, therefore verity data is gone. To prevent phantom reads, don't
+    // call Finalize() unless we actually write something.
+    if (dirty_) {
+      TEST_AND_RETURN_FALSE(cow_writer_->Finalize());
+    }
+    cow_writer_ = nullptr;
+  }
+  if (cow_reader_) {
+    TEST_AND_RETURN_FALSE(cow_reader_->Close());
+    cow_reader_ = nullptr;
+  }
+  return true;
+}
+
+bool CowWriterFileDescriptor::IsSettingErrno() {
+  return false;
+}
+
+bool CowWriterFileDescriptor::IsOpen() {
+  return cow_writer_ != nullptr && cow_reader_ != nullptr;
+}
+
+CowWriterFileDescriptor::~CowWriterFileDescriptor() {
+  Close();
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/cow_writer_file_descriptor.h b/payload_consumer/cow_writer_file_descriptor.h
new file mode 100644
index 0000000..5d9ffc6
--- /dev/null
+++ b/payload_consumer/cow_writer_file_descriptor.h
@@ -0,0 +1,66 @@
+//
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <cstdint>
+#include <memory>
+
+#include <libsnapshot/snapshot_writer.h>
+
+#include "update_engine/payload_consumer/file_descriptor.h"
+
+namespace chromeos_update_engine {
+
+// A Readable/Writable FileDescriptor class. This is a simple wrapper around
+// CowWriter. Only intended to be used by FileSystemVerifierAction for writing
+// FEC. Writes must be block aligned(4096) or write will fail.
+class CowWriterFileDescriptor final : public FileDescriptor {
+ public:
+  explicit CowWriterFileDescriptor(
+      std::unique_ptr<android::snapshot::ISnapshotWriter> cow_writer);
+  ~CowWriterFileDescriptor();
+
+  bool Open(const char* path, int flags, mode_t mode) override;
+  bool Open(const char* path, int flags) override;
+
+  ssize_t Read(void* buf, size_t count) override;
+
+  // |count| must be block aligned, current offset of this fd must also be block
+  // aligned.
+  ssize_t Write(const void* buf, size_t count) override;
+
+  off64_t Seek(off64_t offset, int whence) override;
+
+  uint64_t BlockDevSize() override;
+
+  bool BlkIoctl(int request,
+                uint64_t start,
+                uint64_t length,
+                int* result) override;
+
+  bool Flush() override;
+
+  bool Close() override;
+
+  bool IsSettingErrno() override;
+
+  bool IsOpen() override;
+
+ private:
+  std::unique_ptr<android::snapshot::ISnapshotWriter> cow_writer_;
+  FileDescriptorPtr cow_reader_;
+  bool dirty_ = false;
+};
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/cow_writer_file_descriptor_unittest.cc b/payload_consumer/cow_writer_file_descriptor_unittest.cc
new file mode 100644
index 0000000..c596e3b
--- /dev/null
+++ b/payload_consumer/cow_writer_file_descriptor_unittest.cc
@@ -0,0 +1,120 @@
+//
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/cow_writer_file_descriptor.h"
+
+#include <cstring>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include <android-base/unique_fd.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <libsnapshot/snapshot_writer.h>
+
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+constexpr size_t BLOCK_SIZE = 4096;
+constexpr size_t PARTITION_SIZE = BLOCK_SIZE * 10;
+
+using android::base::unique_fd;
+using android::snapshot::CompressedSnapshotWriter;
+using android::snapshot::CowOptions;
+using android::snapshot::ISnapshotWriter;
+
+class CowWriterFileDescriptorUnittest : public ::testing::Test {
+ public:
+  void SetUp() override {
+    ASSERT_EQ(ftruncate64(cow_device_file_.fd(), PARTITION_SIZE), 0)
+        << "Failed to truncate cow_device file to " << PARTITION_SIZE
+        << strerror(errno);
+    ASSERT_EQ(ftruncate64(cow_source_file_.fd(), PARTITION_SIZE), 0)
+        << "Failed to truncate cow_source file to " << PARTITION_SIZE
+        << strerror(errno);
+  }
+
+  std::unique_ptr<CompressedSnapshotWriter> GetCowWriter() {
+    const CowOptions options{.block_size = BLOCK_SIZE, .compression = "gz"};
+    auto snapshot_writer = std::make_unique<CompressedSnapshotWriter>(options);
+    int fd = open(cow_device_file_.path().c_str(), O_RDWR);
+    EXPECT_NE(fd, -1);
+    EXPECT_TRUE(snapshot_writer->SetCowDevice(unique_fd{fd}));
+    snapshot_writer->SetSourceDevice(cow_source_file_.path());
+    return snapshot_writer;
+  }
+  CowWriterFileDescriptor GetCowFd() {
+    auto cow_writer = GetCowWriter();
+    return CowWriterFileDescriptor{std::move(cow_writer)};
+  }
+
+  ScopedTempFile cow_source_file_{"cow_source.XXXXXX", true};
+  ScopedTempFile cow_device_file_{"cow_device.XXXXXX", true};
+};
+
+TEST_F(CowWriterFileDescriptorUnittest, ReadAfterWrite) {
+  std::vector<unsigned char> buffer;
+  buffer.resize(BLOCK_SIZE);
+  std::fill(buffer.begin(), buffer.end(), 234);
+
+  std::vector<unsigned char> verity_data;
+  verity_data.resize(BLOCK_SIZE);
+  std::fill(verity_data.begin(), verity_data.end(), 0xAA);
+
+  auto cow_writer = GetCowWriter();
+  cow_writer->Initialize();
+
+  // Simulate Writing InstallOp data
+  ASSERT_TRUE(cow_writer->AddRawBlocks(0, buffer.data(), buffer.size()));
+  ASSERT_TRUE(cow_writer->AddZeroBlocks(1, 2));
+  ASSERT_TRUE(cow_writer->AddCopy(3, 1));
+  // Fake label to simulate "end of install"
+  ASSERT_TRUE(cow_writer->AddLabel(23));
+  ASSERT_TRUE(
+      cow_writer->AddRawBlocks(4, verity_data.data(), verity_data.size()));
+  ASSERT_TRUE(cow_writer->Finalize());
+
+  cow_writer = GetCowWriter();
+  ASSERT_NE(nullptr, cow_writer);
+  ASSERT_TRUE(cow_writer->InitializeAppend(23));
+  auto cow_fd =
+      std::make_unique<CowWriterFileDescriptor>(std::move(cow_writer));
+
+  ASSERT_EQ((ssize_t)BLOCK_SIZE * 4, cow_fd->Seek(BLOCK_SIZE * 4, SEEK_SET));
+  std::vector<unsigned char> read_back(4096);
+  ASSERT_EQ((ssize_t)read_back.size(),
+            cow_fd->Read(read_back.data(), read_back.size()));
+  ASSERT_EQ(verity_data, read_back);
+
+  // Since we didn't write anything to this instance of cow_fd, destructor
+  // should not call Finalize(). As finalize will drop ops after resume label,
+  // causing subsequent reads to fail.
+  cow_writer = GetCowWriter();
+  ASSERT_NE(nullptr, cow_writer);
+  ASSERT_TRUE(cow_writer->InitializeAppend(23));
+  cow_fd = std::make_unique<CowWriterFileDescriptor>(std::move(cow_writer));
+
+  ASSERT_EQ((ssize_t)BLOCK_SIZE * 4, cow_fd->Seek(BLOCK_SIZE * 4, SEEK_SET));
+  ASSERT_EQ((ssize_t)read_back.size(),
+            cow_fd->Read(read_back.data(), read_back.size()));
+  ASSERT_EQ(verity_data, read_back)
+      << "Could not read verity data afeter InitializeAppend() => Read() => "
+         "InitializeAppend() sequence. If no writes happened while CowWriterFd "
+         "is open, Finalize() should not be called.";
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index 4c4ff04..a57169b 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -23,6 +23,7 @@
 #include <cstring>
 #include <map>
 #include <memory>
+#include <set>
 #include <string>
 #include <utility>
 #include <vector>
@@ -40,24 +41,26 @@
 #include <puffin/puffpatch.h>
 
 #include "update_engine/common/constants.h"
+#include "update_engine/common/download_action.h"
+#include "update_engine/common/error_code.h"
+#include "update_engine/common/error_code_utils.h"
 #include "update_engine/common/hardware_interface.h"
 #include "update_engine/common/prefs_interface.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/terminator.h"
+#include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/bzip_extent_writer.h"
 #include "update_engine/payload_consumer/cached_file_descriptor.h"
 #include "update_engine/payload_consumer/certificate_parser_interface.h"
-#include "update_engine/payload_consumer/download_action.h"
 #include "update_engine/payload_consumer/extent_reader.h"
 #include "update_engine/payload_consumer/extent_writer.h"
+#include "update_engine/payload_consumer/partition_update_generator_interface.h"
+#include "update_engine/payload_consumer/partition_writer.h"
 #if USE_FEC
 #include "update_engine/payload_consumer/fec_file_descriptor.h"
 #endif  // USE_FEC
 #include "update_engine/payload_consumer/file_descriptor_utils.h"
 #include "update_engine/payload_consumer/mount_history.h"
-#if USE_MTD
-#include "update_engine/payload_consumer/mtd_file_descriptor.h"
-#endif  // USE_MTD
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/payload_consumer/xz_extent_writer.h"
@@ -77,103 +80,6 @@
 namespace {
 const int kUpdateStateOperationInvalid = -1;
 const int kMaxResumedUpdateFailures = 10;
-#if USE_MTD
-const int kUbiVolumeAttachTimeout = 5 * 60;
-#endif
-
-const uint64_t kCacheSize = 1024 * 1024;  // 1MB
-
-FileDescriptorPtr CreateFileDescriptor(const char* path) {
-  FileDescriptorPtr ret;
-#if USE_MTD
-  if (strstr(path, "/dev/ubi") == path) {
-    if (!UbiFileDescriptor::IsUbi(path)) {
-      // The volume might not have been attached at boot time.
-      int volume_no;
-      if (utils::SplitPartitionName(path, nullptr, &volume_no)) {
-        utils::TryAttachingUbiVolume(volume_no, kUbiVolumeAttachTimeout);
-      }
-    }
-    if (UbiFileDescriptor::IsUbi(path)) {
-      LOG(INFO) << path << " is a UBI device.";
-      ret.reset(new UbiFileDescriptor);
-    }
-  } else if (MtdFileDescriptor::IsMtd(path)) {
-    LOG(INFO) << path << " is an MTD device.";
-    ret.reset(new MtdFileDescriptor);
-  } else {
-    LOG(INFO) << path << " is not an MTD nor a UBI device.";
-#endif
-    ret.reset(new EintrSafeFileDescriptor);
-#if USE_MTD
-  }
-#endif
-  return ret;
-}
-
-// Opens path for read/write. On success returns an open FileDescriptor
-// and sets *err to 0. On failure, sets *err to errno and returns nullptr.
-FileDescriptorPtr OpenFile(const char* path,
-                           int mode,
-                           bool cache_writes,
-                           int* err) {
-  // Try to mark the block device read-only based on the mode. Ignore any
-  // failure since this won't work when passing regular files.
-  bool read_only = (mode & O_ACCMODE) == O_RDONLY;
-  utils::SetBlockDeviceReadOnly(path, read_only);
-
-  FileDescriptorPtr fd = CreateFileDescriptor(path);
-  if (cache_writes && !read_only) {
-    fd = FileDescriptorPtr(new CachedFileDescriptor(fd, kCacheSize));
-    LOG(INFO) << "Caching writes.";
-  }
-#if USE_MTD
-  // On NAND devices, we can either read, or write, but not both. So here we
-  // use O_WRONLY.
-  if (UbiFileDescriptor::IsUbi(path) || MtdFileDescriptor::IsMtd(path)) {
-    mode = O_WRONLY;
-  }
-#endif
-  if (!fd->Open(path, mode, 000)) {
-    *err = errno;
-    PLOG(ERROR) << "Unable to open file " << path;
-    return nullptr;
-  }
-  *err = 0;
-  return fd;
-}
-
-// Discard the tail of the block device referenced by |fd|, from the offset
-// |data_size| until the end of the block device. Returns whether the data was
-// discarded.
-bool DiscardPartitionTail(const FileDescriptorPtr& fd, uint64_t data_size) {
-  uint64_t part_size = fd->BlockDevSize();
-  if (!part_size || part_size <= data_size)
-    return false;
-
-  struct blkioctl_request {
-    int number;
-    const char* name;
-  };
-  const vector<blkioctl_request> blkioctl_requests = {
-      {BLKDISCARD, "BLKDISCARD"},
-      {BLKSECDISCARD, "BLKSECDISCARD"},
-#ifdef BLKZEROOUT
-      {BLKZEROOUT, "BLKZEROOUT"},
-#endif
-  };
-  for (const auto& req : blkioctl_requests) {
-    int error = 0;
-    if (fd->BlkIoctl(req.number, data_size, part_size - data_size, &error) &&
-        error == 0) {
-      return true;
-    }
-    LOG(WARNING) << "Error discarding the last "
-                 << (part_size - data_size) / 1024 << " KiB using ioctl("
-                 << req.name << ")";
-  }
-  return false;
-}
 
 }  // namespace
 
@@ -292,12 +198,9 @@
   if (op_result)
     return true;
 
-  size_t partition_first_op_num =
-      current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0;
   LOG(ERROR) << "Failed to perform " << op_type_name << " operation "
              << next_operation_num_ << ", which is the operation "
-             << next_operation_num_ - partition_first_op_num
-             << " in partition \""
+             << GetPartitionOperationNum() << " in partition \""
              << partitions_[current_partition_].partition_name() << "\"";
   if (*error == ErrorCode::kSuccess)
     *error = ErrorCode::kDownloadOperationExecutionError;
@@ -319,33 +222,12 @@
 }
 
 int DeltaPerformer::CloseCurrentPartition() {
-  int err = 0;
-  if (source_fd_ && !source_fd_->Close()) {
-    err = errno;
-    PLOG(ERROR) << "Error closing source partition";
-    if (!err)
-      err = 1;
+  if (!partition_writer_) {
+    return 0;
   }
-  source_fd_.reset();
-  if (source_ecc_fd_ && !source_ecc_fd_->Close()) {
-    err = errno;
-    PLOG(ERROR) << "Error closing ECC source partition";
-    if (!err)
-      err = 1;
-  }
-  source_ecc_fd_.reset();
-  source_ecc_open_failure_ = false;
-  source_path_.clear();
-
-  if (target_fd_ && !target_fd_->Close()) {
-    err = errno;
-    PLOG(ERROR) << "Error closing target partition";
-    if (!err)
-      err = 1;
-  }
-  target_fd_.reset();
-  target_path_.clear();
-  return -err;
+  int err = partition_writer_->Close();
+  partition_writer_ = nullptr;
+  return err;
 }
 
 bool DeltaPerformer::OpenCurrentPartition() {
@@ -357,91 +239,29 @@
       install_plan_->partitions.size() - partitions_.size();
   const InstallPlan::Partition& install_part =
       install_plan_->partitions[num_previous_partitions + current_partition_];
-  // Open source fds if we have a delta payload with minor version >= 2.
-  if (payload_->type == InstallPayloadType::kDelta &&
-      GetMinorVersion() != kInPlaceMinorPayloadVersion &&
-      // With dynamic partitions we could create a new partition in a
-      // delta payload, and we shouldn't open source partition in that case.
-      install_part.source_size > 0) {
-    source_path_ = install_part.source_path;
-    int err;
-    source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, false, &err);
-    if (!source_fd_) {
-      LOG(ERROR) << "Unable to open source partition "
-                 << partition.partition_name() << " on slot "
-                 << BootControlInterface::SlotName(install_plan_->source_slot)
-                 << ", file " << source_path_;
-      return false;
-    }
-  }
+  auto dynamic_control = boot_control_->GetDynamicPartitionControl();
+  partition_writer_ = CreatePartitionWriter(
+      partition,
+      install_part,
+      dynamic_control,
+      block_size_,
+      interactive_,
+      IsDynamicPartition(install_part.name, install_plan_->target_slot));
+  // Open source fds if we have a delta payload, or for partitions in the
+  // partial update.
+  bool source_may_exist = manifest_.partial_update() ||
+                          payload_->type == InstallPayloadType::kDelta;
+  const size_t partition_operation_num = GetPartitionOperationNum();
 
-  target_path_ = install_part.target_path;
-  int err;
-
-  int flags = O_RDWR;
-  if (!interactive_)
-    flags |= O_DSYNC;
-
-  LOG(INFO) << "Opening " << target_path_ << " partition with"
-            << (interactive_ ? "out" : "") << " O_DSYNC";
-
-  target_fd_ = OpenFile(target_path_.c_str(), flags, true, &err);
-  if (!target_fd_) {
-    LOG(ERROR) << "Unable to open target partition "
-               << partition.partition_name() << " on slot "
-               << BootControlInterface::SlotName(install_plan_->target_slot)
-               << ", file " << target_path_;
-    return false;
-  }
-
-  LOG(INFO) << "Applying " << partition.operations().size()
-            << " operations to partition \"" << partition.partition_name()
-            << "\"";
-
-  // Discard the end of the partition, but ignore failures.
-  DiscardPartitionTail(target_fd_, install_part.target_size);
-
+  TEST_AND_RETURN_FALSE(partition_writer_->Init(
+      install_plan_, source_may_exist, partition_operation_num));
+  CheckpointUpdateProgress(true);
   return true;
 }
 
-bool DeltaPerformer::OpenCurrentECCPartition() {
-  if (source_ecc_fd_)
-    return true;
-
-  if (source_ecc_open_failure_)
-    return false;
-
-  if (current_partition_ >= partitions_.size())
-    return false;
-
-  // No support for ECC in minor version 1 or full payloads.
-  if (payload_->type == InstallPayloadType::kFull ||
-      GetMinorVersion() == kInPlaceMinorPayloadVersion)
-    return false;
-
-#if USE_FEC
-  const PartitionUpdate& partition = partitions_[current_partition_];
-  size_t num_previous_partitions =
-      install_plan_->partitions.size() - partitions_.size();
-  const InstallPlan::Partition& install_part =
-      install_plan_->partitions[num_previous_partitions + current_partition_];
-  string path = install_part.source_path;
-  FileDescriptorPtr fd(new FecFileDescriptor());
-  if (!fd->Open(path.c_str(), O_RDONLY, 0)) {
-    PLOG(ERROR) << "Unable to open ECC source partition "
-                << partition.partition_name() << " on slot "
-                << BootControlInterface::SlotName(install_plan_->source_slot)
-                << ", file " << path;
-    source_ecc_open_failure_ = true;
-    return false;
-  }
-  source_ecc_fd_ = fd;
-#else
-  // No support for ECC compiled.
-  source_ecc_open_failure_ = true;
-#endif  // USE_FEC
-
-  return !source_ecc_open_failure_;
+size_t DeltaPerformer::GetPartitionOperationNum() {
+  return next_operation_num_ -
+         (current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0);
 }
 
 namespace {
@@ -465,15 +285,6 @@
 
 }  // namespace
 
-uint32_t DeltaPerformer::GetMinorVersion() const {
-  if (manifest_.has_minor_version()) {
-    return manifest_.minor_version();
-  }
-  return payload_->type == InstallPayloadType::kDelta
-             ? kMaxSupportedMinorPayloadVersion
-             : kFullPayloadMinorVersion;
-}
-
 bool DeltaPerformer::IsHeaderParsed() const {
   return metadata_size_ != 0;
 }
@@ -505,6 +316,21 @@
         return MetadataParseResult::kError;
       }
     }
+
+    // Check that the |metadata signature size_| and |metadata_size_| are not
+    // very big numbers. This is necessary since |update_engine| needs to write
+    // these values into the buffer before being able to use them, and if an
+    // attacker sets these values to a very big number, the buffer will overflow
+    // and |update_engine| will crash. A simple way of solving this is to check
+    // that the size of both values is smaller than the payload itself.
+    if (metadata_size_ + metadata_signature_size_ > payload_->size) {
+      LOG(ERROR) << "The size of the metadata_size(" << metadata_size_ << ")"
+                 << " or metadata signature(" << metadata_signature_size_ << ")"
+                 << " is greater than the size of the payload"
+                 << "(" << payload_->size << ")";
+      *error = ErrorCode::kDownloadInvalidMetadataSize;
+      return MetadataParseResult::kError;
+    }
   }
 
   // Now that we have validated the metadata size, we should wait for the full
@@ -527,6 +353,7 @@
                  << "Trusting metadata size in payload = " << metadata_size_;
   }
 
+  // NOLINTNEXTLINE(whitespace/braces)
   auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
   if (!payload_verifier) {
     LOG(ERROR) << "Failed to create payload verifier.";
@@ -567,7 +394,7 @@
 #define OP_DURATION_HISTOGRAM(_op_name, _start_time)                         \
   LOCAL_HISTOGRAM_CUSTOM_TIMES(                                              \
       "UpdateEngine.DownloadAction.InstallOperation::" _op_name ".Duration", \
-      base::TimeTicks::Now() - _start_time,                                  \
+      (base::TimeTicks::Now() - _start_time),                                \
       base::TimeDelta::FromMilliseconds(10),                                 \
       base::TimeDelta::FromMinutes(5),                                       \
       20);
@@ -608,6 +435,10 @@
     if ((*error = ValidateManifest()) != ErrorCode::kSuccess)
       return false;
     manifest_valid_ = true;
+    if (!install_plan_->is_resume) {
+      auto begin = reinterpret_cast<const char*>(buffer_.data());
+      prefs_->SetString(kPrefsManifestBytes, {begin, buffer_.size()});
+    }
 
     // Clear the download buffer.
     DiscardBuffer(false, metadata_size_);
@@ -667,6 +498,9 @@
     // We know there are more operations to perform because we didn't reach the
     // |num_total_operations_| limit yet.
     if (next_operation_num_ >= acc_num_operations_[current_partition_]) {
+      if (partition_writer_) {
+        TEST_AND_RETURN_FALSE(partition_writer_->FinishedInstallOps());
+      }
       CloseCurrentPartition();
       // Skip until there are operations for current_partition_.
       while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
@@ -677,12 +511,9 @@
         return false;
       }
     }
-    const size_t partition_operation_num =
-        next_operation_num_ -
-        (current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0);
 
     const InstallOperation& op =
-        partitions_[current_partition_].operations(partition_operation_num);
+        partitions_[current_partition_].operations(GetPartitionOperationNum());
 
     CopyDataToBuffer(&c_bytes, &count, op.data_length());
 
@@ -690,27 +521,24 @@
     if (!CanPerformInstallOperation(op))
       return true;
 
-    // Validate the operation only if the metadata signature is present.
-    // Otherwise, keep the old behavior. This serves as a knob to disable
-    // the validation logic in case we find some regression after rollout.
-    // NOTE: If hash checks are mandatory and if metadata_signature is empty,
-    // we would have already failed in ParsePayloadMetadata method and thus not
-    // even be here. So no need to handle that case again here.
-    if (!payload_->metadata_signature.empty()) {
-      // Note: Validate must be called only if CanPerformInstallOperation is
-      // called. Otherwise, we might be failing operations before even if there
-      // isn't sufficient data to compute the proper hash.
-      *error = ValidateOperationHash(op);
-      if (*error != ErrorCode::kSuccess) {
-        if (install_plan_->hash_checks_mandatory) {
-          LOG(ERROR) << "Mandatory operation hash check failed";
-          return false;
-        }
-
-        // For non-mandatory cases, just send a UMA stat.
-        LOG(WARNING) << "Ignoring operation validation errors";
-        *error = ErrorCode::kSuccess;
+    // Validate the operation unconditionally. This helps prevent the
+    // exploitation of vulnerabilities in the patching libraries, e.g. bspatch.
+    // The hash of the patch data for a given operation is embedded in the
+    // payload metadata; and thus has been verified against the public key on
+    // device.
+    // Note: Validate must be called only if CanPerformInstallOperation is
+    // called. Otherwise, we might be failing operations before even if there
+    // isn't sufficient data to compute the proper hash.
+    *error = ValidateOperationHash(op);
+    if (*error != ErrorCode::kSuccess) {
+      if (install_plan_->hash_checks_mandatory) {
+        LOG(ERROR) << "Mandatory operation hash check failed";
+        return false;
       }
+
+      // For non-mandatory cases, just send a UMA stat.
+      LOG(WARNING) << "Ignoring operation validation errors";
+      *error = ErrorCode::kSuccess;
     }
 
     // Makes sure we unblock exit when this operation completes.
@@ -732,14 +560,6 @@
         op_result = PerformZeroOrDiscardOperation(op);
         OP_DURATION_HISTOGRAM("ZERO_OR_DISCARD", op_start_time);
         break;
-      case InstallOperation::MOVE:
-        op_result = PerformMoveOperation(op);
-        OP_DURATION_HISTOGRAM("MOVE", op_start_time);
-        break;
-      case InstallOperation::BSDIFF:
-        op_result = PerformBsdiffOperation(op);
-        OP_DURATION_HISTOGRAM("BSDIFF", op_start_time);
-        break;
       case InstallOperation::SOURCE_COPY:
         op_result = PerformSourceCopyOperation(op, error);
         OP_DURATION_HISTOGRAM("SOURCE_COPY", op_start_time);
@@ -759,19 +579,19 @@
     if (!HandleOpResult(op_result, InstallOperationTypeName(op.type()), error))
       return false;
 
-    if (!target_fd_->Flush()) {
-      return false;
-    }
-
     next_operation_num_++;
     UpdateOverallProgress(false, "Completed ");
     CheckpointUpdateProgress(false);
   }
 
-  // In major version 2, we don't add dummy operation to the payload.
+  if (partition_writer_) {
+    TEST_AND_RETURN_FALSE(partition_writer_->FinishedInstallOps());
+  }
+  CloseCurrentPartition();
+
+  // In major version 2, we don't add unused operation to the payload.
   // If we already extracted the signature we should skip this step.
-  if (major_payload_version_ == kBrilloMajorPayloadVersion &&
-      manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
+  if (manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
       signatures_message_data_.empty()) {
     if (manifest_.signatures_offset() != buffer_offset_) {
       LOG(ERROR) << "Payload signatures offset points to blob offset "
@@ -806,49 +626,65 @@
 }
 
 bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) {
-  if (major_payload_version_ == kBrilloMajorPayloadVersion) {
-    partitions_.clear();
-    for (const PartitionUpdate& partition : manifest_.partitions()) {
-      partitions_.push_back(partition);
-    }
-  } else if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
-    LOG(INFO) << "Converting update information from old format.";
-    PartitionUpdate root_part;
-    root_part.set_partition_name(kPartitionNameRoot);
-#ifdef __ANDROID__
-    LOG(WARNING) << "Legacy payload major version provided to an Android "
-                    "build. Assuming no post-install. Please use major version "
-                    "2 or newer.";
-    root_part.set_run_postinstall(false);
-#else
-    root_part.set_run_postinstall(true);
-#endif  // __ANDROID__
-    if (manifest_.has_old_rootfs_info()) {
-      *root_part.mutable_old_partition_info() = manifest_.old_rootfs_info();
-      manifest_.clear_old_rootfs_info();
-    }
-    if (manifest_.has_new_rootfs_info()) {
-      *root_part.mutable_new_partition_info() = manifest_.new_rootfs_info();
-      manifest_.clear_new_rootfs_info();
-    }
-    *root_part.mutable_operations() = manifest_.install_operations();
-    manifest_.clear_install_operations();
-    partitions_.push_back(std::move(root_part));
+  partitions_.clear();
+  for (const PartitionUpdate& partition : manifest_.partitions()) {
+    partitions_.push_back(partition);
+  }
 
-    PartitionUpdate kern_part;
-    kern_part.set_partition_name(kPartitionNameKernel);
-    kern_part.set_run_postinstall(false);
-    if (manifest_.has_old_kernel_info()) {
-      *kern_part.mutable_old_partition_info() = manifest_.old_kernel_info();
-      manifest_.clear_old_kernel_info();
+  // For VAB and partial updates, the partition preparation will copy the
+  // dynamic partitions metadata to the target metadata slot, and rename the
+  // slot suffix of the partitions in the metadata.
+  if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) {
+    uint64_t required_size = 0;
+    if (!PreparePartitionsForUpdate(&required_size)) {
+      if (required_size > 0) {
+        *error = ErrorCode::kNotEnoughSpace;
+      } else {
+        *error = ErrorCode::kInstallDeviceOpenError;
+      }
+      return false;
     }
-    if (manifest_.has_new_kernel_info()) {
-      *kern_part.mutable_new_partition_info() = manifest_.new_kernel_info();
-      manifest_.clear_new_kernel_info();
+  }
+
+  // Partitions in manifest are no longer needed after preparing partitions.
+  manifest_.clear_partitions();
+  // TODO(xunchang) TBD: allow partial update only on devices with dynamic
+  // partition.
+  if (manifest_.partial_update()) {
+    std::set<std::string> touched_partitions;
+    for (const auto& partition_update : partitions_) {
+      touched_partitions.insert(partition_update.partition_name());
     }
-    *kern_part.mutable_operations() = manifest_.kernel_install_operations();
-    manifest_.clear_kernel_install_operations();
-    partitions_.push_back(std::move(kern_part));
+
+    auto generator = partition_update_generator::Create(boot_control_,
+                                                        manifest_.block_size());
+    std::vector<PartitionUpdate> untouched_static_partitions;
+    TEST_AND_RETURN_FALSE(
+        generator->GenerateOperationsForPartitionsNotInPayload(
+            install_plan_->source_slot,
+            install_plan_->target_slot,
+            touched_partitions,
+            &untouched_static_partitions));
+    partitions_.insert(partitions_.end(),
+                       untouched_static_partitions.begin(),
+                       untouched_static_partitions.end());
+
+    // Save the untouched dynamic partitions in install plan.
+    std::vector<std::string> dynamic_partitions;
+    if (!boot_control_->GetDynamicPartitionControl()
+             ->ListDynamicPartitionsForSlot(install_plan_->source_slot,
+                                            boot_control_->GetCurrentSlot(),
+                                            &dynamic_partitions)) {
+      LOG(ERROR) << "Failed to load dynamic partitions from slot "
+                 << install_plan_->source_slot;
+      return false;
+    }
+    install_plan_->untouched_dynamic_partitions.clear();
+    for (const auto& name : dynamic_partitions) {
+      if (touched_partitions.find(name) == touched_partitions.end()) {
+        install_plan_->untouched_dynamic_partitions.push_back(name);
+      }
+    }
   }
 
   // Fill in the InstallPlan::partitions based on the partitions from the
@@ -924,22 +760,9 @@
     install_plan_->partitions.push_back(install_part);
   }
 
-  if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) {
-    uint64_t required_size = 0;
-    if (!PreparePartitionsForUpdate(&required_size)) {
-      if (required_size > 0) {
-        *error = ErrorCode::kNotEnoughSpace;
-      } else {
-        *error = ErrorCode::kInstallDeviceOpenError;
-      }
-      return false;
-    }
-  }
-
-  if (major_payload_version_ == kBrilloMajorPayloadVersion) {
-    manifest_.clear_partitions();
-  }
-
+  // TODO(xunchang) only need to load the partitions for those in payload.
+  // Because we have already loaded the other once when generating SOURCE_COPY
+  // operations.
   if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) {
     LOG(ERROR) << "Unable to determine all the partition devices.";
     *error = ErrorCode::kInstallDeviceOpenError;
@@ -984,6 +807,7 @@
   } else {
     LOG(INFO) << "Preparing partitions for new update. last hash = "
               << last_hash << ", new hash = " << update_check_response_hash;
+    ResetUpdateProgress(prefs, false);
   }
 
   if (!boot_control->GetDynamicPartitionControl()->PreparePartitionsForUpdate(
@@ -1028,30 +852,10 @@
 
   // Since we delete data off the beginning of the buffer as we use it,
   // the data we need should be exactly at the beginning of the buffer.
-  TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
 
-  // Extract the signature message if it's in this operation.
-  if (ExtractSignatureMessageFromOperation(operation)) {
-    // If this is dummy replace operation, we ignore it after extracting the
-    // signature.
-    DiscardBuffer(true, 0);
-    return true;
-  }
-
-  // Setup the ExtentWriter stack based on the operation type.
-  std::unique_ptr<ExtentWriter> writer = std::make_unique<DirectExtentWriter>();
-
-  if (operation.type() == InstallOperation::REPLACE_BZ) {
-    writer.reset(new BzipExtentWriter(std::move(writer)));
-  } else if (operation.type() == InstallOperation::REPLACE_XZ) {
-    writer.reset(new XzExtentWriter(std::move(writer)));
-  }
-
-  TEST_AND_RETURN_FALSE(
-      writer->Init(target_fd_, operation.dst_extents(), block_size_));
-  TEST_AND_RETURN_FALSE(writer->Write(buffer_.data(), operation.data_length()));
-
+  TEST_AND_RETURN_FALSE(partition_writer_->PerformReplaceOperation(
+      operation, buffer_.data(), buffer_.size()));
   // Update buffer
   DiscardBuffer(true, buffer_.size());
   return true;
@@ -1066,92 +870,13 @@
   TEST_AND_RETURN_FALSE(!operation.has_data_offset());
   TEST_AND_RETURN_FALSE(!operation.has_data_length());
 
-#ifdef BLKZEROOUT
-  bool attempt_ioctl = true;
-  int request =
-      (operation.type() == InstallOperation::ZERO ? BLKZEROOUT : BLKDISCARD);
-#else   // !defined(BLKZEROOUT)
-  bool attempt_ioctl = false;
-  int request = 0;
-#endif  // !defined(BLKZEROOUT)
-
-  brillo::Blob zeros;
-  for (const Extent& extent : operation.dst_extents()) {
-    const uint64_t start = extent.start_block() * block_size_;
-    const uint64_t length = extent.num_blocks() * block_size_;
-    if (attempt_ioctl) {
-      int result = 0;
-      if (target_fd_->BlkIoctl(request, start, length, &result) && result == 0)
-        continue;
-      attempt_ioctl = false;
-    }
-    // In case of failure, we fall back to writing 0 to the selected region.
-    zeros.resize(16 * block_size_);
-    for (uint64_t offset = 0; offset < length; offset += zeros.size()) {
-      uint64_t chunk_length =
-          min(length - offset, static_cast<uint64_t>(zeros.size()));
-      TEST_AND_RETURN_FALSE(utils::PWriteAll(
-          target_fd_, zeros.data(), chunk_length, start + offset));
-    }
-  }
-  return true;
+  return partition_writer_->PerformZeroOrDiscardOperation(operation);
 }
 
-bool DeltaPerformer::PerformMoveOperation(const InstallOperation& operation) {
-  // Calculate buffer size. Note, this function doesn't do a sliding
-  // window to copy in case the source and destination blocks overlap.
-  // If we wanted to do a sliding window, we could program the server
-  // to generate deltas that effectively did a sliding window.
-
-  uint64_t blocks_to_read = 0;
-  for (int i = 0; i < operation.src_extents_size(); i++)
-    blocks_to_read += operation.src_extents(i).num_blocks();
-
-  uint64_t blocks_to_write = 0;
-  for (int i = 0; i < operation.dst_extents_size(); i++)
-    blocks_to_write += operation.dst_extents(i).num_blocks();
-
-  DCHECK_EQ(blocks_to_write, blocks_to_read);
-  brillo::Blob buf(blocks_to_write * block_size_);
-
-  // Read in bytes.
-  ssize_t bytes_read = 0;
-  for (int i = 0; i < operation.src_extents_size(); i++) {
-    ssize_t bytes_read_this_iteration = 0;
-    const Extent& extent = operation.src_extents(i);
-    const size_t bytes = extent.num_blocks() * block_size_;
-    TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole);
-    TEST_AND_RETURN_FALSE(utils::PReadAll(target_fd_,
-                                          &buf[bytes_read],
-                                          bytes,
-                                          extent.start_block() * block_size_,
-                                          &bytes_read_this_iteration));
-    TEST_AND_RETURN_FALSE(bytes_read_this_iteration ==
-                          static_cast<ssize_t>(bytes));
-    bytes_read += bytes_read_this_iteration;
-  }
-
-  // Write bytes out.
-  ssize_t bytes_written = 0;
-  for (int i = 0; i < operation.dst_extents_size(); i++) {
-    const Extent& extent = operation.dst_extents(i);
-    const size_t bytes = extent.num_blocks() * block_size_;
-    TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole);
-    TEST_AND_RETURN_FALSE(utils::PWriteAll(target_fd_,
-                                           &buf[bytes_written],
-                                           bytes,
-                                           extent.start_block() * block_size_));
-    bytes_written += bytes;
-  }
-  DCHECK_EQ(bytes_written, bytes_read);
-  DCHECK_EQ(bytes_written, static_cast<ssize_t>(buf.size()));
-  return true;
-}
-
-bool DeltaPerformer::ValidateSourceHash(const brillo::Blob& calculated_hash,
-                                        const InstallOperation& operation,
-                                        const FileDescriptorPtr source_fd,
-                                        ErrorCode* error) {
+bool PartitionWriter::ValidateSourceHash(const brillo::Blob& calculated_hash,
+                                         const InstallOperation& operation,
+                                         const FileDescriptorPtr source_fd,
+                                         ErrorCode* error) {
   brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
                                     operation.src_sha256_hash().end());
   if (calculated_hash != expected_source_hash) {
@@ -1192,169 +917,7 @@
     TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
   if (operation.has_dst_length())
     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
-
-  TEST_AND_RETURN_FALSE(source_fd_ != nullptr);
-
-  // The device may optimize the SOURCE_COPY operation.
-  // Being this a device-specific optimization let DynamicPartitionController
-  // decide it the operation should be skipped.
-  const PartitionUpdate& partition = partitions_[current_partition_];
-  const auto& partition_control = boot_control_->GetDynamicPartitionControl();
-
-  InstallOperation buf;
-  bool should_optimize = partition_control->OptimizeOperation(
-      partition.partition_name(), operation, &buf);
-  const InstallOperation& optimized = should_optimize ? buf : operation;
-
-  if (operation.has_src_sha256_hash()) {
-    bool read_ok;
-    brillo::Blob source_hash;
-    brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
-                                      operation.src_sha256_hash().end());
-
-    // We fall back to use the error corrected device if the hash of the raw
-    // device doesn't match or there was an error reading the source partition.
-    // Note that this code will also fall back if writing the target partition
-    // fails.
-    if (should_optimize) {
-      // Hash operation.src_extents(), then copy optimized.src_extents to
-      // optimized.dst_extents.
-      read_ok =
-          fd_utils::ReadAndHashExtents(
-              source_fd_, operation.src_extents(), block_size_, &source_hash) &&
-          fd_utils::CopyAndHashExtents(source_fd_,
-                                       optimized.src_extents(),
-                                       target_fd_,
-                                       optimized.dst_extents(),
-                                       block_size_,
-                                       nullptr /* skip hashing */);
-    } else {
-      read_ok = fd_utils::CopyAndHashExtents(source_fd_,
-                                             operation.src_extents(),
-                                             target_fd_,
-                                             operation.dst_extents(),
-                                             block_size_,
-                                             &source_hash);
-    }
-    if (read_ok && expected_source_hash == source_hash)
-      return true;
-
-    if (!OpenCurrentECCPartition()) {
-      // The following function call will return false since the source hash
-      // mismatches, but we still want to call it so it prints the appropriate
-      // log message.
-      return ValidateSourceHash(source_hash, operation, source_fd_, error);
-    }
-
-    LOG(WARNING) << "Source hash from RAW device mismatched: found "
-                 << base::HexEncode(source_hash.data(), source_hash.size())
-                 << ", expected "
-                 << base::HexEncode(expected_source_hash.data(),
-                                    expected_source_hash.size());
-
-    if (should_optimize) {
-      TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
-          source_ecc_fd_, operation.src_extents(), block_size_, &source_hash));
-      TEST_AND_RETURN_FALSE(
-          fd_utils::CopyAndHashExtents(source_ecc_fd_,
-                                       optimized.src_extents(),
-                                       target_fd_,
-                                       optimized.dst_extents(),
-                                       block_size_,
-                                       nullptr /* skip hashing */));
-    } else {
-      TEST_AND_RETURN_FALSE(
-          fd_utils::CopyAndHashExtents(source_ecc_fd_,
-                                       operation.src_extents(),
-                                       target_fd_,
-                                       operation.dst_extents(),
-                                       block_size_,
-                                       &source_hash));
-    }
-    TEST_AND_RETURN_FALSE(
-        ValidateSourceHash(source_hash, operation, source_ecc_fd_, error));
-    // At this point reading from the the error corrected device worked, but
-    // reading from the raw device failed, so this is considered a recovered
-    // failure.
-    source_ecc_recovered_failures_++;
-  } else {
-    // When the operation doesn't include a source hash, we attempt the error
-    // corrected device first since we can't verify the block in the raw device
-    // at this point, but we fall back to the raw device since the error
-    // corrected device can be shorter or not available.
-
-    if (OpenCurrentECCPartition() &&
-        fd_utils::CopyAndHashExtents(source_ecc_fd_,
-                                     optimized.src_extents(),
-                                     target_fd_,
-                                     optimized.dst_extents(),
-                                     block_size_,
-                                     nullptr)) {
-      return true;
-    }
-    TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_,
-                                                       optimized.src_extents(),
-                                                       target_fd_,
-                                                       optimized.dst_extents(),
-                                                       block_size_,
-                                                       nullptr));
-  }
-  return true;
-}
-
-FileDescriptorPtr DeltaPerformer::ChooseSourceFD(
-    const InstallOperation& operation, ErrorCode* error) {
-  if (source_fd_ == nullptr) {
-    LOG(ERROR) << "ChooseSourceFD fail: source_fd_ == nullptr";
-    return nullptr;
-  }
-
-  if (!operation.has_src_sha256_hash()) {
-    // When the operation doesn't include a source hash, we attempt the error
-    // corrected device first since we can't verify the block in the raw device
-    // at this point, but we first need to make sure all extents are readable
-    // since the error corrected device can be shorter or not available.
-    if (OpenCurrentECCPartition() &&
-        fd_utils::ReadAndHashExtents(
-            source_ecc_fd_, operation.src_extents(), block_size_, nullptr)) {
-      return source_ecc_fd_;
-    }
-    return source_fd_;
-  }
-
-  brillo::Blob source_hash;
-  brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
-                                    operation.src_sha256_hash().end());
-  if (fd_utils::ReadAndHashExtents(
-          source_fd_, operation.src_extents(), block_size_, &source_hash) &&
-      source_hash == expected_source_hash) {
-    return source_fd_;
-  }
-  // We fall back to use the error corrected device if the hash of the raw
-  // device doesn't match or there was an error reading the source partition.
-  if (!OpenCurrentECCPartition()) {
-    // The following function call will return false since the source hash
-    // mismatches, but we still want to call it so it prints the appropriate
-    // log message.
-    ValidateSourceHash(source_hash, operation, source_fd_, error);
-    return nullptr;
-  }
-  LOG(WARNING) << "Source hash from RAW device mismatched: found "
-               << base::HexEncode(source_hash.data(), source_hash.size())
-               << ", expected "
-               << base::HexEncode(expected_source_hash.data(),
-                                  expected_source_hash.size());
-
-  if (fd_utils::ReadAndHashExtents(
-          source_ecc_fd_, operation.src_extents(), block_size_, &source_hash) &&
-      ValidateSourceHash(source_hash, operation, source_ecc_fd_, error)) {
-    // At this point reading from the the error corrected device worked, but
-    // reading from the raw device failed, so this is considered a recovered
-    // failure.
-    source_ecc_recovered_failures_++;
-    return source_ecc_fd_;
-  }
-  return nullptr;
+  return partition_writer_->PerformSourceCopyOperation(operation, error);
 }
 
 bool DeltaPerformer::ExtentsToBsdiffPositionsString(
@@ -1379,110 +942,6 @@
   return true;
 }
 
-bool DeltaPerformer::PerformBsdiffOperation(const InstallOperation& operation) {
-  // Since we delete data off the beginning of the buffer as we use it,
-  // the data we need should be exactly at the beginning of the buffer.
-  TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
-  TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
-
-  string input_positions;
-  TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.src_extents(),
-                                                       block_size_,
-                                                       operation.src_length(),
-                                                       &input_positions));
-  string output_positions;
-  TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.dst_extents(),
-                                                       block_size_,
-                                                       operation.dst_length(),
-                                                       &output_positions));
-
-  TEST_AND_RETURN_FALSE(bsdiff::bspatch(target_path_.c_str(),
-                                        target_path_.c_str(),
-                                        buffer_.data(),
-                                        buffer_.size(),
-                                        input_positions.c_str(),
-                                        output_positions.c_str()) == 0);
-  DiscardBuffer(true, buffer_.size());
-
-  if (operation.dst_length() % block_size_) {
-    // Zero out rest of final block.
-    // TODO(adlr): build this into bspatch; it's more efficient that way.
-    const Extent& last_extent =
-        operation.dst_extents(operation.dst_extents_size() - 1);
-    const uint64_t end_byte =
-        (last_extent.start_block() + last_extent.num_blocks()) * block_size_;
-    const uint64_t begin_byte =
-        end_byte - (block_size_ - operation.dst_length() % block_size_);
-    brillo::Blob zeros(end_byte - begin_byte);
-    TEST_AND_RETURN_FALSE(utils::PWriteAll(
-        target_fd_, zeros.data(), end_byte - begin_byte, begin_byte));
-  }
-  return true;
-}
-
-namespace {
-
-class BsdiffExtentFile : public bsdiff::FileInterface {
- public:
-  BsdiffExtentFile(std::unique_ptr<ExtentReader> reader, size_t size)
-      : BsdiffExtentFile(std::move(reader), nullptr, size) {}
-  BsdiffExtentFile(std::unique_ptr<ExtentWriter> writer, size_t size)
-      : BsdiffExtentFile(nullptr, std::move(writer), size) {}
-
-  ~BsdiffExtentFile() override = default;
-
-  bool Read(void* buf, size_t count, size_t* bytes_read) override {
-    TEST_AND_RETURN_FALSE(reader_->Read(buf, count));
-    *bytes_read = count;
-    offset_ += count;
-    return true;
-  }
-
-  bool Write(const void* buf, size_t count, size_t* bytes_written) override {
-    TEST_AND_RETURN_FALSE(writer_->Write(buf, count));
-    *bytes_written = count;
-    offset_ += count;
-    return true;
-  }
-
-  bool Seek(off_t pos) override {
-    if (reader_ != nullptr) {
-      TEST_AND_RETURN_FALSE(reader_->Seek(pos));
-      offset_ = pos;
-    } else {
-      // For writes technically there should be no change of position, or it
-      // should be equivalent of current offset.
-      TEST_AND_RETURN_FALSE(offset_ == static_cast<uint64_t>(pos));
-    }
-    return true;
-  }
-
-  bool Close() override { return true; }
-
-  bool GetSize(uint64_t* size) override {
-    *size = size_;
-    return true;
-  }
-
- private:
-  BsdiffExtentFile(std::unique_ptr<ExtentReader> reader,
-                   std::unique_ptr<ExtentWriter> writer,
-                   size_t size)
-      : reader_(std::move(reader)),
-        writer_(std::move(writer)),
-        size_(size),
-        offset_(0) {}
-
-  std::unique_ptr<ExtentReader> reader_;
-  std::unique_ptr<ExtentWriter> writer_;
-  uint64_t size_;
-  uint64_t offset_;
-
-  DISALLOW_COPY_AND_ASSIGN(BsdiffExtentFile);
-};
-
-}  // namespace
-
 bool DeltaPerformer::PerformSourceBsdiffOperation(
     const InstallOperation& operation, ErrorCode* error) {
   // Since we delete data off the beginning of the buffer as we use it,
@@ -1494,153 +953,24 @@
   if (operation.has_dst_length())
     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
 
-  FileDescriptorPtr source_fd = ChooseSourceFD(operation, error);
-  TEST_AND_RETURN_FALSE(source_fd != nullptr);
-
-  auto reader = std::make_unique<DirectExtentReader>();
-  TEST_AND_RETURN_FALSE(
-      reader->Init(source_fd, operation.src_extents(), block_size_));
-  auto src_file = std::make_unique<BsdiffExtentFile>(
-      std::move(reader),
-      utils::BlocksInExtents(operation.src_extents()) * block_size_);
-
-  auto writer = std::make_unique<DirectExtentWriter>();
-  TEST_AND_RETURN_FALSE(
-      writer->Init(target_fd_, operation.dst_extents(), block_size_));
-  auto dst_file = std::make_unique<BsdiffExtentFile>(
-      std::move(writer),
-      utils::BlocksInExtents(operation.dst_extents()) * block_size_);
-
-  TEST_AND_RETURN_FALSE(bsdiff::bspatch(std::move(src_file),
-                                        std::move(dst_file),
-                                        buffer_.data(),
-                                        buffer_.size()) == 0);
+  TEST_AND_RETURN_FALSE(partition_writer_->PerformSourceBsdiffOperation(
+      operation, error, buffer_.data(), buffer_.size()));
   DiscardBuffer(true, buffer_.size());
   return true;
 }
 
-namespace {
-
-// A class to be passed to |puffpatch| for reading from |source_fd_| and writing
-// into |target_fd_|.
-class PuffinExtentStream : public puffin::StreamInterface {
- public:
-  // Constructor for creating a stream for reading from an |ExtentReader|.
-  PuffinExtentStream(std::unique_ptr<ExtentReader> reader, uint64_t size)
-      : PuffinExtentStream(std::move(reader), nullptr, size) {}
-
-  // Constructor for creating a stream for writing to an |ExtentWriter|.
-  PuffinExtentStream(std::unique_ptr<ExtentWriter> writer, uint64_t size)
-      : PuffinExtentStream(nullptr, std::move(writer), size) {}
-
-  ~PuffinExtentStream() override = default;
-
-  bool GetSize(uint64_t* size) const override {
-    *size = size_;
-    return true;
-  }
-
-  bool GetOffset(uint64_t* offset) const override {
-    *offset = offset_;
-    return true;
-  }
-
-  bool Seek(uint64_t offset) override {
-    if (is_read_) {
-      TEST_AND_RETURN_FALSE(reader_->Seek(offset));
-      offset_ = offset;
-    } else {
-      // For writes technically there should be no change of position, or it
-      // should equivalent of current offset.
-      TEST_AND_RETURN_FALSE(offset_ == offset);
-    }
-    return true;
-  }
-
-  bool Read(void* buffer, size_t count) override {
-    TEST_AND_RETURN_FALSE(is_read_);
-    TEST_AND_RETURN_FALSE(reader_->Read(buffer, count));
-    offset_ += count;
-    return true;
-  }
-
-  bool Write(const void* buffer, size_t count) override {
-    TEST_AND_RETURN_FALSE(!is_read_);
-    TEST_AND_RETURN_FALSE(writer_->Write(buffer, count));
-    offset_ += count;
-    return true;
-  }
-
-  bool Close() override { return true; }
-
- private:
-  PuffinExtentStream(std::unique_ptr<ExtentReader> reader,
-                     std::unique_ptr<ExtentWriter> writer,
-                     uint64_t size)
-      : reader_(std::move(reader)),
-        writer_(std::move(writer)),
-        size_(size),
-        offset_(0),
-        is_read_(reader_ ? true : false) {}
-
-  std::unique_ptr<ExtentReader> reader_;
-  std::unique_ptr<ExtentWriter> writer_;
-  uint64_t size_;
-  uint64_t offset_;
-  bool is_read_;
-
-  DISALLOW_COPY_AND_ASSIGN(PuffinExtentStream);
-};
-
-}  // namespace
-
 bool DeltaPerformer::PerformPuffDiffOperation(const InstallOperation& operation,
                                               ErrorCode* error) {
   // Since we delete data off the beginning of the buffer as we use it,
   // the data we need should be exactly at the beginning of the buffer.
   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
-
-  FileDescriptorPtr source_fd = ChooseSourceFD(operation, error);
-  TEST_AND_RETURN_FALSE(source_fd != nullptr);
-
-  auto reader = std::make_unique<DirectExtentReader>();
-  TEST_AND_RETURN_FALSE(
-      reader->Init(source_fd, operation.src_extents(), block_size_));
-  puffin::UniqueStreamPtr src_stream(new PuffinExtentStream(
-      std::move(reader),
-      utils::BlocksInExtents(operation.src_extents()) * block_size_));
-
-  auto writer = std::make_unique<DirectExtentWriter>();
-  TEST_AND_RETURN_FALSE(
-      writer->Init(target_fd_, operation.dst_extents(), block_size_));
-  puffin::UniqueStreamPtr dst_stream(new PuffinExtentStream(
-      std::move(writer),
-      utils::BlocksInExtents(operation.dst_extents()) * block_size_));
-
-  const size_t kMaxCacheSize = 5 * 1024 * 1024;  // Total 5MB cache.
-  TEST_AND_RETURN_FALSE(puffin::PuffPatch(std::move(src_stream),
-                                          std::move(dst_stream),
-                                          buffer_.data(),
-                                          buffer_.size(),
-                                          kMaxCacheSize));
+  TEST_AND_RETURN_FALSE(partition_writer_->PerformPuffDiffOperation(
+      operation, error, buffer_.data(), buffer_.size()));
   DiscardBuffer(true, buffer_.size());
   return true;
 }
 
-bool DeltaPerformer::ExtractSignatureMessageFromOperation(
-    const InstallOperation& operation) {
-  if (operation.type() != InstallOperation::REPLACE ||
-      !manifest_.has_signatures_offset() ||
-      manifest_.signatures_offset() != operation.data_offset()) {
-    return false;
-  }
-  TEST_AND_RETURN_FALSE(manifest_.has_signatures_size() &&
-                        manifest_.signatures_size() == operation.data_length());
-  TEST_AND_RETURN_FALSE(ExtractSignatureMessage());
-  return true;
-}
-
 bool DeltaPerformer::ExtractSignatureMessage() {
   TEST_AND_RETURN_FALSE(signatures_message_data_.empty());
   TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset());
@@ -1648,20 +978,6 @@
   signatures_message_data_.assign(
       buffer_.begin(), buffer_.begin() + manifest_.signatures_size());
 
-  // Save the signature blob because if the update is interrupted after the
-  // download phase we don't go through this path anymore. Some alternatives to
-  // consider:
-  //
-  // 1. On resume, re-download the signature blob from the server and re-verify
-  // it.
-  //
-  // 2. Verify the signature as soon as it's received and don't checkpoint the
-  // blob and the signed sha-256 context.
-  LOG_IF(WARNING,
-         !prefs_->SetString(kPrefsUpdateStateSignatureBlob,
-                            signatures_message_data_))
-      << "Unable to store the signature blob.";
-
   LOG(INFO) << "Extracted signature data of size "
             << manifest_.signatures_size() << " at "
             << manifest_.signatures_offset();
@@ -1676,8 +992,8 @@
     return utils::ReadFile(public_key_path_, out_public_key);
   }
 
-  // If this is an official build then we are not allowed to use public key from
-  // Omaha response.
+  // If this is an official build then we are not allowed to use public key
+  // from Omaha response.
   if (!hardware_->IsOfficialBuild() && !install_plan_->public_key_rsa.empty()) {
     LOG(INFO) << "Verifying using public key from Omaha response.";
     return brillo::data_encoding::Base64Decode(install_plan_->public_key_rsa,
@@ -1710,19 +1026,21 @@
 }
 
 ErrorCode DeltaPerformer::ValidateManifest() {
-  // Perform assorted checks to sanity check the manifest, make sure it
+  // Perform assorted checks to validation check the manifest, make sure it
   // matches data from other sources, and that it is a supported version.
-
-  bool has_old_fields =
-      (manifest_.has_old_kernel_info() || manifest_.has_old_rootfs_info());
-  for (const PartitionUpdate& partition : manifest_.partitions()) {
-    has_old_fields = has_old_fields || partition.has_old_partition_info();
-  }
+  bool has_old_fields = std::any_of(manifest_.partitions().begin(),
+                                    manifest_.partitions().end(),
+                                    [](const PartitionUpdate& partition) {
+                                      return partition.has_old_partition_info();
+                                    });
 
   // The presence of an old partition hash is the sole indicator for a delta
-  // update.
+  // update. Also, always treat the partial update as delta so that we can
+  // perform the minor version check correctly.
   InstallPayloadType actual_payload_type =
-      has_old_fields ? InstallPayloadType::kDelta : InstallPayloadType::kFull;
+      (has_old_fields || manifest_.partial_update())
+          ? InstallPayloadType::kDelta
+          : InstallPayloadType::kFull;
 
   if (payload_->type == InstallPayloadType::kUnknown) {
     LOG(INFO) << "Detected a '"
@@ -1737,8 +1055,8 @@
                << "' payload.";
     return ErrorCode::kPayloadMismatchedType;
   }
-
   // Check that the minor version is compatible.
+  // TODO(xunchang) increment minor version & add check for partial update
   if (actual_payload_type == InstallPayloadType::kFull) {
     if (manifest_.minor_version() != kFullPayloadMinorVersion) {
       LOG(ERROR) << "Manifest contains minor version "
@@ -1759,43 +1077,114 @@
     }
   }
 
-  if (major_payload_version_ != kChromeOSMajorPayloadVersion) {
-    if (manifest_.has_old_rootfs_info() || manifest_.has_new_rootfs_info() ||
-        manifest_.has_old_kernel_info() || manifest_.has_new_kernel_info() ||
-        manifest_.install_operations_size() != 0 ||
-        manifest_.kernel_install_operations_size() != 0) {
-      LOG(ERROR) << "Manifest contains deprecated field only supported in "
-                 << "major payload version 1, but the payload major version is "
-                 << major_payload_version_;
-      return ErrorCode::kPayloadMismatchedType;
+  ErrorCode error_code = CheckTimestampError();
+  if (error_code != ErrorCode::kSuccess) {
+    if (error_code == ErrorCode::kPayloadTimestampError) {
+      if (!hardware_->AllowDowngrade()) {
+        return ErrorCode::kPayloadTimestampError;
+      }
+      LOG(INFO) << "The current OS build allows downgrade, continuing to apply"
+                   " the payload with an older timestamp.";
+    } else {
+      LOG(ERROR) << "Timestamp check returned "
+                 << utils::ErrorCodeToString(error_code);
+      return error_code;
     }
   }
 
+  // TODO(crbug.com/37661) we should be adding more and more manifest checks,
+  // such as partition boundaries, etc.
+
+  return ErrorCode::kSuccess;
+}
+
+ErrorCode DeltaPerformer::CheckTimestampError() const {
+  bool is_partial_update =
+      manifest_.has_partial_update() && manifest_.partial_update();
+  const auto& partitions = manifest_.partitions();
+
+  // Check version field for a given PartitionUpdate object. If an error
+  // is encountered, set |error_code| accordingly. If downgrade is detected,
+  // |downgrade_detected| is set. Return true if the program should continue
+  // to check the next partition or not, or false if it should exit early due
+  // to errors.
+  auto&& timestamp_valid = [this](const PartitionUpdate& partition,
+                                  bool allow_empty_version,
+                                  bool* downgrade_detected) -> ErrorCode {
+    const auto& partition_name = partition.partition_name();
+    if (!partition.has_version()) {
+      if (hardware_->GetVersionForLogging(partition_name).empty()) {
+        LOG(INFO) << partition_name << " does't have version, skipping "
+                  << "downgrade check.";
+        return ErrorCode::kSuccess;
+      }
+
+      if (allow_empty_version) {
+        return ErrorCode::kSuccess;
+      }
+      LOG(ERROR)
+          << "PartitionUpdate " << partition_name
+          << " doesn't have a version field. Not allowed in partial updates.";
+      return ErrorCode::kDownloadManifestParseError;
+    }
+
+    auto error_code =
+        hardware_->IsPartitionUpdateValid(partition_name, partition.version());
+    switch (error_code) {
+      case ErrorCode::kSuccess:
+        break;
+      case ErrorCode::kPayloadTimestampError:
+        *downgrade_detected = true;
+        LOG(WARNING) << "PartitionUpdate " << partition_name
+                     << " has an older version than partition on device.";
+        break;
+      default:
+        LOG(ERROR) << "IsPartitionUpdateValid(" << partition_name
+                   << ") returned" << utils::ErrorCodeToString(error_code);
+        break;
+    }
+    return error_code;
+  };
+
+  bool downgrade_detected = false;
+
+  if (is_partial_update) {
+    // for partial updates, all partition MUST have valid timestamps
+    // But max_timestamp can be empty
+    for (const auto& partition : partitions) {
+      auto error_code = timestamp_valid(
+          partition, false /* allow_empty_version */, &downgrade_detected);
+      if (error_code != ErrorCode::kSuccess &&
+          error_code != ErrorCode::kPayloadTimestampError) {
+        return error_code;
+      }
+    }
+    if (downgrade_detected) {
+      return ErrorCode::kPayloadTimestampError;
+    }
+    return ErrorCode::kSuccess;
+  }
+
+  // For non-partial updates, check max_timestamp first.
   if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) {
     LOG(ERROR) << "The current OS build timestamp ("
                << hardware_->GetBuildTimestamp()
                << ") is newer than the maximum timestamp in the manifest ("
                << manifest_.max_timestamp() << ")";
-    if (!hardware_->AllowDowngrade()) {
-      return ErrorCode::kPayloadTimestampError;
-    }
-    LOG(INFO) << "The current OS build allows downgrade, continuing to apply"
-                 " the payload with an older timestamp.";
+    return ErrorCode::kPayloadTimestampError;
   }
-
-  if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
-    if (manifest_.has_dynamic_partition_metadata()) {
-      LOG(ERROR)
-          << "Should not contain dynamic_partition_metadata for major version "
-          << kChromeOSMajorPayloadVersion
-          << ". Please use major version 2 or above.";
-      return ErrorCode::kPayloadMismatchedType;
+  // Otherwise... partitions can have empty timestamps.
+  for (const auto& partition : partitions) {
+    auto error_code = timestamp_valid(
+        partition, true /* allow_empty_version */, &downgrade_detected);
+    if (error_code != ErrorCode::kSuccess &&
+        error_code != ErrorCode::kPayloadTimestampError) {
+      return error_code;
     }
   }
-
-  // TODO(garnold) we should be adding more and more manifest checks, such as
-  // partition boundaries etc (see chromium-os:37661).
-
+  if (downgrade_detected) {
+    return ErrorCode::kPayloadTimestampError;
+  }
   return ErrorCode::kSuccess;
 }
 
@@ -1803,10 +1192,11 @@
     const InstallOperation& operation) {
   if (!operation.data_sha256_hash().size()) {
     if (!operation.data_length()) {
-      // Operations that do not have any data blob won't have any operation hash
-      // either. So, these operations are always considered validated since the
-      // metadata that contains all the non-data-blob portions of the operation
-      // has already been validated. This is true for both HTTP and HTTPS cases.
+      // Operations that do not have any data blob won't have any operation
+      // hash either. So, these operations are always considered validated
+      // since the metadata that contains all the non-data-blob portions of
+      // the operation has already been validated. This is true for both HTTP
+      // and HTTPS cases.
       return ErrorCode::kSuccess;
     }
 
@@ -1815,7 +1205,7 @@
     // corresponding update should have been produced with the operation
     // hashes. So if it happens it means either we've turned operation hash
     // generation off in DeltaDiffGenerator or it's a regression of some sort.
-    // One caveat though: The last operation is a dummy signature operation
+    // One caveat though: The last operation is a unused signature operation
     // that doesn't have a hash at the time the manifest is created. So we
     // should not complaint about that operation. This operation can be
     // recognized by the fact that it's offset is mentioned in the manifest.
@@ -1890,12 +1280,7 @@
       ErrorCode::kPayloadHashMismatchError,
       payload_hash_calculator_.raw_hash() == update_check_response_hash);
 
-  TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError,
-                      !signatures_message_data_.empty());
-  brillo::Blob hash_data = signed_hash_calculator_.raw_hash();
-  TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
-                      hash_data.size() == kSHA256Size);
-
+  // NOLINTNEXTLINE(whitespace/braces)
   auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
   if (!perform_verification) {
     LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
@@ -1905,6 +1290,13 @@
     LOG(ERROR) << "Failed to create the payload verifier.";
     return ErrorCode::kDownloadPayloadPubKeyVerificationError;
   }
+
+  TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError,
+                      !signatures_message_data_.empty());
+  brillo::Blob hash_data = signed_hash_calculator_.raw_hash();
+  TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
+                      hash_data.size() == kSHA256Size);
+
   if (!payload_verifier->VerifySignature(signatures_message_data_, hash_data)) {
     // The autoupdate_CatchBadSignatures test checks for this string
     // in log-files. Keep in sync.
@@ -1944,13 +1336,13 @@
     return false;
 
   int64_t resumed_update_failures;
-  // Note that storing this value is optional, but if it is there it should not
-  // be more than the limit.
+  // Note that storing this value is optional, but if it is there it should
+  // not be more than the limit.
   if (prefs->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures) &&
       resumed_update_failures > kMaxResumedUpdateFailures)
     return false;
 
-  // Sanity check the rest.
+  // Validation check the rest.
   int64_t next_data_offset = -1;
   if (!(prefs->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
         next_data_offset >= 0))
@@ -2001,18 +1393,38 @@
   return true;
 }
 
-bool DeltaPerformer::CheckpointUpdateProgress(bool force) {
+bool DeltaPerformer::ShouldCheckpoint() {
   base::TimeTicks curr_time = base::TimeTicks::Now();
-  if (force || curr_time > update_checkpoint_time_) {
+  if (curr_time > update_checkpoint_time_) {
     update_checkpoint_time_ = curr_time + update_checkpoint_wait_;
-  } else {
+    return true;
+  }
+  return false;
+}
+
+bool DeltaPerformer::CheckpointUpdateProgress(bool force) {
+  if (!force && !ShouldCheckpoint()) {
     return false;
   }
-
   Terminator::set_exit_blocked(true);
-  if (last_updated_buffer_offset_ != buffer_offset_) {
+  if (last_updated_operation_num_ != next_operation_num_ || force) {
     // Resets the progress in case we die in the middle of the state update.
     ResetUpdateProgress(prefs_, true);
+    if (!signatures_message_data_.empty()) {
+      // Save the signature blob because if the update is interrupted after the
+      // download phase we don't go through this path anymore. Some alternatives
+      // to consider:
+      //
+      // 1. On resume, re-download the signature blob from the server and
+      // re-verify it.
+      //
+      // 2. Verify the signature as soon as it's received and don't checkpoint
+      // the blob and the signed sha-256 context.
+      LOG_IF(WARNING,
+             !prefs_->SetString(kPrefsUpdateStateSignatureBlob,
+                                signatures_message_data_))
+          << "Unable to store the signature blob.";
+    }
     TEST_AND_RETURN_FALSE(prefs_->SetString(
         kPrefsUpdateStateSHA256Context, payload_hash_calculator_.GetContext()));
     TEST_AND_RETURN_FALSE(
@@ -2020,12 +1432,13 @@
                           signed_hash_calculator_.GetContext()));
     TEST_AND_RETURN_FALSE(
         prefs_->SetInt64(kPrefsUpdateStateNextDataOffset, buffer_offset_));
-    last_updated_buffer_offset_ = buffer_offset_;
+    last_updated_operation_num_ = next_operation_num_;
 
     if (next_operation_num_ < num_total_operations_) {
       size_t partition_index = current_partition_;
-      while (next_operation_num_ >= acc_num_operations_[partition_index])
+      while (next_operation_num_ >= acc_num_operations_[partition_index]) {
         partition_index++;
+      }
       const size_t partition_operation_num =
           next_operation_num_ -
           (partition_index ? acc_num_operations_[partition_index - 1] : 0);
@@ -2037,6 +1450,14 @@
       TEST_AND_RETURN_FALSE(
           prefs_->SetInt64(kPrefsUpdateStateNextDataLength, 0));
     }
+    if (partition_writer_) {
+      partition_writer_->CheckpointUpdateProgress(GetPartitionOperationNum());
+    } else {
+      CHECK_EQ(next_operation_num_, num_total_operations_)
+          << "Partition writer is null, we are expected to finish all "
+             "operations: "
+          << next_operation_num_ << "/" << num_total_operations_;
+    }
   }
   TEST_AND_RETURN_FALSE(
       prefs_->SetInt64(kPrefsUpdateStateNextOperation, next_operation_num_));
@@ -2104,4 +1525,26 @@
   return true;
 }
 
+bool DeltaPerformer::IsDynamicPartition(const std::string& part_name,
+                                        uint32_t slot) {
+  return boot_control_->GetDynamicPartitionControl()->IsDynamicPartition(
+      part_name, slot);
+}
+
+std::unique_ptr<PartitionWriter> DeltaPerformer::CreatePartitionWriter(
+    const PartitionUpdate& partition_update,
+    const InstallPlan::Partition& install_part,
+    DynamicPartitionControlInterface* dynamic_control,
+    size_t block_size,
+    bool is_interactive,
+    bool is_dynamic_partition) {
+  return partition_writer::CreatePartitionWriter(
+      partition_update,
+      install_part,
+      dynamic_control,
+      block_size_,
+      interactive_,
+      IsDynamicPartition(install_part.name, install_plan_->target_slot));
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index 01fcc5c..c54316b 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -35,6 +35,7 @@
 #include "update_engine/payload_consumer/file_descriptor.h"
 #include "update_engine/payload_consumer/file_writer.h"
 #include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_consumer/partition_writer.h"
 #include "update_engine/payload_consumer/payload_metadata.h"
 #include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/update_metadata.pb.h"
@@ -48,7 +49,6 @@
 
 // This class performs the actions in a delta update synchronously. The delta
 // update itself should be passed in in chunks as it is received.
-
 class DeltaPerformer : public FileWriter {
  public:
   // Defines the granularity of progress logging in terms of how many "completed
@@ -78,7 +78,9 @@
         download_delegate_(download_delegate),
         install_plan_(install_plan),
         payload_(payload),
-        interactive_(interactive) {}
+        interactive_(interactive) {
+    CHECK(install_plan_);
+  }
 
   // FileWriter's Write implementation where caller doesn't care about
   // error codes.
@@ -100,10 +102,6 @@
   // work. Returns whether the required file descriptors were successfully open.
   bool OpenCurrentPartition();
 
-  // Attempt to open the error-corrected device for the current partition.
-  // Returns whether the operation succeeded.
-  bool OpenCurrentECCPartition();
-
   // Closes the current partition file descriptors if open. Returns 0 on success
   // or -errno on error.
   int CloseCurrentPartition();
@@ -172,9 +170,11 @@
   // Return true if header parsing is finished and no errors occurred.
   bool IsHeaderParsed() const;
 
-  // Returns the delta minor version. If this value is defined in the manifest,
-  // it returns that value, otherwise it returns the default value.
-  uint32_t GetMinorVersion() const;
+  // Checkpoints the update progress into persistent storage to allow this
+  // update attempt to be resumed after reboot.
+  // If |force| is false, checkpoint may be throttled.
+  // Exposed for testing purposes.
+  bool CheckpointUpdateProgress(bool force);
 
   // Compare |calculated_hash| with source hash in |operation|, return false and
   // dump hash and set |error| if don't match.
@@ -202,14 +202,33 @@
       const std::string& update_check_response_hash,
       uint64_t* required_size);
 
+ protected:
+  // Exposed as virtual for testing purposes.
+  virtual std::unique_ptr<PartitionWriter> CreatePartitionWriter(
+      const PartitionUpdate& partition_update,
+      const InstallPlan::Partition& install_part,
+      DynamicPartitionControlInterface* dynamic_control,
+      size_t block_size,
+      bool is_interactive,
+      bool is_dynamic_partition);
+
+  // return true if it has been long enough and a checkpoint should be saved.
+  // Exposed for unittest purposes.
+  virtual bool ShouldCheckpoint();
+
  private:
   friend class DeltaPerformerTest;
   friend class DeltaPerformerIntegrationTest;
   FRIEND_TEST(DeltaPerformerTest, BrilloMetadataSignatureSizeTest);
   FRIEND_TEST(DeltaPerformerTest, BrilloParsePayloadMetadataTest);
-  FRIEND_TEST(DeltaPerformerTest, ChooseSourceFDTest);
   FRIEND_TEST(DeltaPerformerTest, UsePublicKeyFromResponse);
 
+  // Obtain the operation index for current partition. If all operations for
+  // current partition is are finished, return # of operations. This is mostly
+  // intended to be used by CheckpointUpdateProgress, where partition writer
+  // needs to know the current operation number to properly checkpoint update.
+  size_t GetPartitionOperationNum();
+
   // Parse and move the update instructions of all partitions into our local
   // |partitions_| variable based on the version of the payload. Requires the
   // manifest to be parsed and valid.
@@ -254,8 +273,6 @@
   // set even if it fails.
   bool PerformReplaceOperation(const InstallOperation& operation);
   bool PerformZeroOrDiscardOperation(const InstallOperation& operation);
-  bool PerformMoveOperation(const InstallOperation& operation);
-  bool PerformBsdiffOperation(const InstallOperation& operation);
   bool PerformSourceCopyOperation(const InstallOperation& operation,
                                   ErrorCode* error);
   bool PerformSourceBsdiffOperation(const InstallOperation& operation,
@@ -263,18 +280,6 @@
   bool PerformPuffDiffOperation(const InstallOperation& operation,
                                 ErrorCode* error);
 
-  // For a given operation, choose the source fd to be used (raw device or error
-  // correction device) based on the source operation hash.
-  // Returns nullptr if the source hash mismatch cannot be corrected, and set
-  // the |error| accordingly.
-  FileDescriptorPtr ChooseSourceFD(const InstallOperation& operation,
-                                   ErrorCode* error);
-
-  // Extracts the payload signature message from the blob on the |operation| if
-  // the offset matches the one specified by the manifest. Returns whether the
-  // signature was extracted.
-  bool ExtractSignatureMessageFromOperation(const InstallOperation& operation);
-
   // Extracts the payload signature message from the current |buffer_| if the
   // offset matches the one specified by the manifest. Returns whether the
   // signature was extracted.
@@ -287,11 +292,6 @@
   // accordingly.
   void DiscardBuffer(bool do_advance_offset, size_t signed_hash_buffer_size);
 
-  // Checkpoints the update progress into persistent storage to allow this
-  // update attempt to be resumed after reboot.
-  // If |force| is false, checkpoint may be throttled.
-  bool CheckpointUpdateProgress(bool force);
-
   // Primes the required update state. Returns true if the update state was
   // successfully initialized to a saved resume state or if the update is a new
   // update. Returns false otherwise.
@@ -315,6 +315,18 @@
   // Also see comment for the static PreparePartitionsForUpdate().
   bool PreparePartitionsForUpdate(uint64_t* required_size);
 
+  // Check if current manifest contains timestamp errors.
+  // Return:
+  // - kSuccess if update is valid.
+  // - kPayloadTimestampError if downgrade is detected
+  // - kDownloadManifestParseError if |new_version| has an incorrect format
+  // - Other error values if the source of error is known, or kError for
+  //   a generic error on the device.
+  ErrorCode CheckTimestampError() const;
+
+  // Check if partition `part_name` is a dynamic partition.
+  bool IsDynamicPartition(const std::string& part_name, uint32_t slot);
+
   // Update Engine preference store.
   PrefsInterface* prefs_;
 
@@ -332,34 +344,6 @@
   // Pointer to the current payload in install_plan_.payloads.
   InstallPlan::Payload* payload_{nullptr};
 
-  // File descriptor of the source partition. Only set while updating a
-  // partition when using a delta payload.
-  FileDescriptorPtr source_fd_{nullptr};
-
-  // File descriptor of the error corrected source partition. Only set while
-  // updating partition using a delta payload for a partition where error
-  // correction is available. The size of the error corrected device is smaller
-  // than the underlying raw device, since it doesn't include the error
-  // correction blocks.
-  FileDescriptorPtr source_ecc_fd_{nullptr};
-
-  // The total number of operations that failed source hash verification but
-  // passed after falling back to the error-corrected |source_ecc_fd_| device.
-  uint64_t source_ecc_recovered_failures_{0};
-
-  // Whether opening the current partition as an error-corrected device failed.
-  // Used to avoid re-opening the same source partition if it is not actually
-  // error corrected.
-  bool source_ecc_open_failure_{false};
-
-  // File descriptor of the target partition. Only set while performing the
-  // operations of a given partition.
-  FileDescriptorPtr target_fd_{nullptr};
-
-  // Paths the |source_fd_| and |target_fd_| refer to.
-  std::string source_path_;
-  std::string target_path_;
-
   PayloadMetadata payload_metadata_;
 
   // Parsed manifest. Set after enough bytes to parse the manifest were
@@ -380,28 +364,28 @@
   // otherwise 0.
   size_t num_total_operations_{0};
 
-  // The list of partitions to update as found in the manifest major version 2.
-  // When parsing an older manifest format, the information is converted over to
-  // this format instead.
+  // The list of partitions to update as found in the manifest major
+  // version 2. When parsing an older manifest format, the information is
+  // converted over to this format instead.
   std::vector<PartitionUpdate> partitions_;
 
   // Index in the list of partitions (|partitions_| member) of the current
   // partition being processed.
   size_t current_partition_{0};
 
-  // Index of the next operation to perform in the manifest. The index is linear
-  // on the total number of operation on the manifest.
+  // Index of the next operation to perform in the manifest. The index is
+  // linear on the total number of operation on the manifest.
   size_t next_operation_num_{0};
 
   // A buffer used for accumulating downloaded data. Initially, it stores the
-  // payload metadata; once that's downloaded and parsed, it stores data for the
-  // next update operation.
+  // payload metadata; once that's downloaded and parsed, it stores data for
+  // the next update operation.
   brillo::Blob buffer_;
   // Offset of buffer_ in the binary blobs section of the update.
   uint64_t buffer_offset_{0};
 
-  // Last |buffer_offset_| value updated as part of the progress update.
-  uint64_t last_updated_buffer_offset_{std::numeric_limits<uint64_t>::max()};
+  // Last |next_operation_num_| value updated as part of the progress update.
+  uint64_t last_updated_operation_num_{std::numeric_limits<uint64_t>::max()};
 
   // The block size (parsed from the manifest).
   uint32_t block_size_{0};
@@ -437,8 +421,9 @@
   // If |true|, the update is user initiated (vs. periodic update checks).
   bool interactive_{false};
 
-  // The timeout after which we should force emitting a progress log (constant),
-  // and the actual point in time for the next forced log to be emitted.
+  // The timeout after which we should force emitting a progress log
+  // (constant), and the actual point in time for the next forced log to be
+  // emitted.
   const base::TimeDelta forced_progress_log_wait_{
       base::TimeDelta::FromSeconds(kProgressLogTimeoutSeconds)};
   base::TimeTicks forced_progress_log_time_;
@@ -449,6 +434,8 @@
       base::TimeDelta::FromSeconds(kCheckpointFrequencySeconds)};
   base::TimeTicks update_checkpoint_time_;
 
+  std::unique_ptr<PartitionWriter> partition_writer_;
+
   DISALLOW_COPY_AND_ASSIGN(DeltaPerformer);
 };
 
diff --git a/payload_consumer/delta_performer_fuzzer.cc b/payload_consumer/delta_performer_fuzzer.cc
new file mode 100644
index 0000000..0ce5081
--- /dev/null
+++ b/payload_consumer/delta_performer_fuzzer.cc
@@ -0,0 +1,105 @@
+//
+// Copyright 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <string>
+
+#include <base/logging.h>
+#include <fuzzer/FuzzedDataProvider.h>
+
+#include "update_engine/common/download_action.h"
+#include "update_engine/common/fake_boot_control.h"
+#include "update_engine/common/fake_hardware.h"
+#include "update_engine/common/prefs.h"
+#include "update_engine/payload_consumer/delta_performer.h"
+#include "update_engine/payload_consumer/install_plan.h"
+
+namespace chromeos_update_engine {
+
+class FakeDownloadActionDelegate : public DownloadActionDelegate {
+ public:
+  FakeDownloadActionDelegate() = default;
+  ~FakeDownloadActionDelegate() = default;
+
+  // DownloadActionDelegate overrides;
+  void BytesReceived(uint64_t bytes_progressed,
+                     uint64_t bytes_received,
+                     uint64_t total) override{};
+
+  bool ShouldCancel(ErrorCode* cancel_reason) override { return false; };
+
+  void DownloadComplete() override{};
+
+  DISALLOW_COPY_AND_ASSIGN(FakeDownloadActionDelegate);
+};
+
+void FuzzDeltaPerformer(const uint8_t* data, size_t size) {
+  MemoryPrefs prefs;
+  FakeBootControl boot_control;
+  FakeHardware hardware;
+  FakeDownloadActionDelegate download_action_delegate;
+
+  FuzzedDataProvider data_provider(data, size);
+
+  InstallPlan install_plan{
+      .target_slot = 1,
+      .partitions = {InstallPlan::Partition{
+          .source_path = "/dev/zero",
+          .source_size = 4096,
+          .target_path = "/dev/null",
+          .target_size = 4096,
+      }},
+      .hash_checks_mandatory = true,
+  };
+
+  InstallPlan::Payload payload{
+      .size = data_provider.ConsumeIntegralInRange<uint64_t>(0, 10000),
+      .metadata_size = data_provider.ConsumeIntegralInRange<uint64_t>(0, 1000),
+      .hash = data_provider.ConsumeBytes<uint8_t>(32),
+      .type = static_cast<InstallPayloadType>(
+          data_provider.ConsumeIntegralInRange(0, 3)),
+      .already_applied = data_provider.ConsumeBool(),
+  };
+
+  DeltaPerformer performer(&prefs,
+                           &boot_control,
+                           &hardware,
+                           &download_action_delegate,
+                           &install_plan,
+                           &payload,
+                           data_provider.ConsumeBool());
+  do {
+    auto chunk_size = data_provider.ConsumeIntegralInRange<size_t>(0, 100);
+    auto data = data_provider.ConsumeBytes<uint8_t>(chunk_size);
+    if (!performer.Write(data.data(), data.size()))
+      break;
+  } while (data_provider.remaining_bytes() > 0);
+}
+
+}  // namespace chromeos_update_engine
+
+class Environment {
+ public:
+  Environment() { logging::SetMinLogLevel(logging::LOG_FATAL); }
+};
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  if (size > 1000000) {
+    return 0;
+  }
+
+  static Environment env;
+  chromeos_update_engine::FuzzDeltaPerformer(data, size);
+  return 0;
+}
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index 4797137..4fab975 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -25,8 +25,10 @@
 
 #include <base/files/file_path.h>
 #include <base/files/file_util.h>
+#include <base/stl_util.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
+#include <gmock/gmock-matchers.h>
 #include <google/protobuf/repeated_field.h>
 #include <gtest/gtest.h>
 #include <openssl/pem.h>
@@ -34,10 +36,13 @@
 #include "update_engine/common/constants.h"
 #include "update_engine/common/fake_boot_control.h"
 #include "update_engine/common/fake_hardware.h"
+#include "update_engine/common/fake_prefs.h"
+#include "update_engine/common/hardware_interface.h"
+#include "update_engine/common/mock_download_action.h"
 #include "update_engine/common/mock_prefs.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/mock_download_action.h"
+#include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/payload_metadata.h"
 #include "update_engine/payload_consumer/payload_verifier.h"
@@ -47,13 +52,18 @@
 
 namespace chromeos_update_engine {
 
+using std::list;
 using std::string;
+using std::unique_ptr;
 using std::vector;
 using test_utils::GetBuildArtifactsPath;
 using test_utils::kRandomString;
 using test_utils::ScopedLoopMounter;
 using test_utils::System;
 using testing::_;
+using testing::IsEmpty;
+using testing::NiceMock;
+using testing::Not;
 using testing::Return;
 
 extern const char* kUnittestPrivateKeyPath;
@@ -71,21 +81,24 @@
 
 namespace {
 struct DeltaState {
-  string a_img;
-  string b_img;
-  string result_img;
+  unique_ptr<ScopedTempFile> a_img;
+  unique_ptr<ScopedTempFile> b_img;
+  unique_ptr<ScopedTempFile> result_img;
   size_t image_size;
 
-  string delta_path;
+  unique_ptr<ScopedTempFile> delta_file;
+  // The in-memory copy of delta file.
+  brillo::Blob delta;
   uint64_t metadata_size;
+  uint32_t metadata_signature_size;
 
-  string old_kernel;
+  unique_ptr<ScopedTempFile> old_kernel;
   brillo::Blob old_kernel_data;
 
-  string new_kernel;
+  unique_ptr<ScopedTempFile> new_kernel;
   brillo::Blob new_kernel_data;
 
-  string result_kernel;
+  unique_ptr<ScopedTempFile> result_kernel;
   brillo::Blob result_kernel_data;
   size_t kernel_size;
 
@@ -93,9 +106,6 @@
   // the DeltaPerformer.
   InstallPlan install_plan;
 
-  // The in-memory copy of delta file.
-  brillo::Blob delta;
-
   // Mock and fake instances used by the delta performer.
   FakeBootControl fake_boot_control_;
   FakeHardware fake_hardware_;
@@ -122,7 +132,41 @@
 
 }  // namespace
 
-class DeltaPerformerIntegrationTest : public ::testing::Test {};
+class DeltaPerformerIntegrationTest : public ::testing::Test {
+ public:
+  void RunManifestValidation(const DeltaArchiveManifest& manifest,
+                             uint64_t major_version,
+                             ErrorCode expected) {
+    FakePrefs prefs;
+    InstallPlan::Payload payload;
+    InstallPlan install_plan;
+    DeltaPerformer performer{&prefs,
+                             nullptr,
+                             &fake_hardware_,
+                             nullptr,
+                             &install_plan,
+                             &payload,
+                             false /* interactive*/};
+    // Delta performer will treat manifest as kDelta payload
+    // if it's a partial update.
+    payload.type = manifest.partial_update() ? InstallPayloadType::kDelta
+                                             : InstallPayloadType::kFull;
+
+    // The Manifest we are validating.
+    performer.manifest_.CopyFrom(manifest);
+    performer.major_payload_version_ = major_version;
+
+    EXPECT_EQ(expected, performer.ValidateManifest());
+  }
+  void AddPartition(DeltaArchiveManifest* manifest,
+                    string name,
+                    int timestamp) {
+    auto& partition = *manifest->add_partitions();
+    partition.set_version(std::to_string(timestamp));
+    partition.set_partition_name(name);
+  }
+  FakeHardware fake_hardware_;
+};
 
 static void CompareFilesByBlock(const string& a_file,
                                 const string& b_file,
@@ -187,15 +231,18 @@
   size_t signature_size;
   ASSERT_TRUE(PayloadSigner::GetMaximumSignatureSize(private_key_path,
                                                      &signature_size));
-  brillo::Blob hash;
+  brillo::Blob metadata_hash, payload_hash;
   ASSERT_TRUE(PayloadSigner::HashPayloadForSigning(
-      payload_path, {signature_size}, &hash, nullptr));
-  brillo::Blob signature;
-  ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature));
+      payload_path, {signature_size}, &payload_hash, &metadata_hash));
+  brillo::Blob metadata_signature, payload_signature;
+  ASSERT_TRUE(PayloadSigner::SignHash(
+      payload_hash, private_key_path, &payload_signature));
+  ASSERT_TRUE(PayloadSigner::SignHash(
+      metadata_hash, private_key_path, &metadata_signature));
   ASSERT_TRUE(PayloadSigner::AddSignatureToPayload(payload_path,
                                                    {signature_size},
-                                                   {signature},
-                                                   {},
+                                                   {payload_signature},
+                                                   {metadata_signature},
                                                    payload_path,
                                                    out_metadata_size));
   EXPECT_TRUE(PayloadSigner::VerifySignedPayload(
@@ -216,39 +263,56 @@
   }
   string signature_size_string = base::JoinString(signature_size_strings, ":");
 
-  test_utils::ScopedTempFile hash_file("hash.XXXXXX");
+  ScopedTempFile hash_file("hash.XXXXXX"), metadata_hash_file("hash.XXXXXX");
   string delta_generator_path = GetBuildArtifactsPath("delta_generator");
   ASSERT_EQ(0,
             System(base::StringPrintf(
-                "%s -in_file=%s -signature_size=%s -out_hash_file=%s",
+                "%s -in_file=%s -signature_size=%s -out_hash_file=%s "
+                "-out_metadata_hash_file=%s",
                 delta_generator_path.c_str(),
                 payload_path.c_str(),
                 signature_size_string.c_str(),
-                hash_file.path().c_str())));
+                hash_file.path().c_str(),
+                metadata_hash_file.path().c_str())));
 
   // Sign the hash with all private keys.
-  vector<test_utils::ScopedTempFile> sig_files;
-  vector<string> sig_file_paths;
+  list<ScopedTempFile> sig_files, metadata_sig_files;
+  vector<string> sig_file_paths, metadata_sig_file_paths;
   for (const auto& key_path : private_key_paths) {
     brillo::Blob hash, signature;
     ASSERT_TRUE(utils::ReadFile(hash_file.path(), &hash));
     ASSERT_TRUE(PayloadSigner::SignHash(hash, key_path, &signature));
 
-    test_utils::ScopedTempFile sig_file("signature.XXXXXX");
-    ASSERT_TRUE(test_utils::WriteFileVector(sig_file.path(), signature));
-    sig_file_paths.push_back(sig_file.path());
-    sig_files.push_back(std::move(sig_file));
+    sig_files.emplace_back("signature.XXXXXX");
+    ASSERT_TRUE(
+        test_utils::WriteFileVector(sig_files.back().path(), signature));
+    sig_file_paths.push_back(sig_files.back().path());
+
+    brillo::Blob metadata_hash, metadata_signature;
+    ASSERT_TRUE(utils::ReadFile(metadata_hash_file.path(), &metadata_hash));
+    ASSERT_TRUE(
+        PayloadSigner::SignHash(metadata_hash, key_path, &metadata_signature));
+
+    metadata_sig_files.emplace_back("metadata_signature.XXXXXX");
+    ASSERT_TRUE(test_utils::WriteFileVector(metadata_sig_files.back().path(),
+                                            metadata_signature));
+    metadata_sig_file_paths.push_back(metadata_sig_files.back().path());
   }
   string sig_files_string = base::JoinString(sig_file_paths, ":");
+  string metadata_sig_files_string =
+      base::JoinString(metadata_sig_file_paths, ":");
 
   // Add the signature to the payload.
   ASSERT_EQ(0,
             System(base::StringPrintf("%s --signature_size=%s -in_file=%s "
-                                      "-payload_signature_file=%s -out_file=%s",
+                                      "-payload_signature_file=%s "
+                                      "-metadata_signature_file=%s "
+                                      "-out_file=%s",
                                       delta_generator_path.c_str(),
                                       signature_size_string.c_str(),
                                       payload_path.c_str(),
                                       sig_files_string.c_str(),
+                                      metadata_sig_files_string.c_str(),
                                       payload_path.c_str())));
 
   int verify_result = System(base::StringPrintf("%s -in_file=%s -public_key=%s",
@@ -314,7 +378,7 @@
         GetBuildArtifactsPath(kUnittestPrivateKey2Path));
   }
 
-  std::string public_key;
+  string public_key;
   if (signature_test == kSignatureGeneratedShellRotateCl2) {
     public_key = GetBuildArtifactsPath(kUnittestPublicKey2Path);
   } else if (signature_test == kSignatureGeneratedShellECKey) {
@@ -330,49 +394,27 @@
 
 static void GenerateDeltaFile(bool full_kernel,
                               bool full_rootfs,
-                              bool noop,
                               ssize_t chunk_size,
                               SignatureTest signature_test,
                               DeltaState* state,
                               uint32_t minor_version) {
-  EXPECT_TRUE(utils::MakeTempFile("a_img.XXXXXX", &state->a_img, nullptr));
-  EXPECT_TRUE(utils::MakeTempFile("b_img.XXXXXX", &state->b_img, nullptr));
+  state->a_img.reset(new ScopedTempFile("a_img.XXXXXX"));
+  state->b_img.reset(new ScopedTempFile("b_img.XXXXXX"));
 
   // result_img is used in minor version 2. Instead of applying the update
   // in-place on A, we apply it to a new image, result_img.
-  EXPECT_TRUE(
-      utils::MakeTempFile("result_img.XXXXXX", &state->result_img, nullptr));
+  state->result_img.reset(new ScopedTempFile("result_img.XXXXXX"));
 
   EXPECT_TRUE(
       base::CopyFile(GetBuildArtifactsPath().Append("gen/disk_ext2_4k.img"),
-                     base::FilePath(state->a_img)));
+                     base::FilePath(state->a_img->path())));
 
-  state->image_size = utils::FileSize(state->a_img);
-
-  // Create ImageInfo A & B
-  ImageInfo old_image_info;
-  ImageInfo new_image_info;
-
-  if (!full_rootfs) {
-    old_image_info.set_channel("src-channel");
-    old_image_info.set_board("src-board");
-    old_image_info.set_version("src-version");
-    old_image_info.set_key("src-key");
-    old_image_info.set_build_channel("src-build-channel");
-    old_image_info.set_build_version("src-build-version");
-  }
-
-  new_image_info.set_channel("test-channel");
-  new_image_info.set_board("test-board");
-  new_image_info.set_version("test-version");
-  new_image_info.set_key("test-key");
-  new_image_info.set_build_channel("test-build-channel");
-  new_image_info.set_build_version("test-build-version");
+  state->image_size = utils::FileSize(state->a_img->path());
 
   // Make some changes to the A image.
   {
     string a_mnt;
-    ScopedLoopMounter b_mounter(state->a_img, &a_mnt, 0);
+    ScopedLoopMounter b_mounter(state->a_img->path(), &a_mnt, 0);
 
     brillo::Blob hardtocompress;
     while (hardtocompress.size() < 3 * kBlockSize) {
@@ -407,32 +449,29 @@
                          ones.size()));
   }
 
-  if (noop) {
-    EXPECT_TRUE(base::CopyFile(base::FilePath(state->a_img),
-                               base::FilePath(state->b_img)));
-    old_image_info = new_image_info;
-  } else {
-    if (minor_version == kSourceMinorPayloadVersion) {
-      // Create a result image with image_size bytes of garbage.
-      brillo::Blob ones(state->image_size, 0xff);
-      EXPECT_TRUE(utils::WriteFile(
-          state->result_img.c_str(), ones.data(), ones.size()));
-      EXPECT_EQ(utils::FileSize(state->a_img),
-                utils::FileSize(state->result_img));
-    }
+  // Create a result image with image_size bytes of garbage.
+  brillo::Blob ones(state->image_size, 0xff);
+  EXPECT_TRUE(utils::WriteFile(
+      state->result_img->path().c_str(), ones.data(), ones.size()));
+  EXPECT_EQ(utils::FileSize(state->a_img->path()),
+            utils::FileSize(state->result_img->path()));
 
-    EXPECT_TRUE(
-        base::CopyFile(GetBuildArtifactsPath().Append("gen/disk_ext2_4k.img"),
-                       base::FilePath(state->b_img)));
-
+  EXPECT_TRUE(
+      base::CopyFile(GetBuildArtifactsPath().Append("gen/disk_ext2_4k.img"),
+                     base::FilePath(state->b_img->path())));
+  {
     // Make some changes to the B image.
     string b_mnt;
-    ScopedLoopMounter b_mounter(state->b_img, &b_mnt, 0);
+    ScopedLoopMounter b_mounter(state->b_img->path(), &b_mnt, 0);
     base::FilePath mnt_path(b_mnt);
 
     EXPECT_TRUE(base::CopyFile(mnt_path.Append("regular-small"),
                                mnt_path.Append("regular-small2")));
+#if BASE_VER < 800000
     EXPECT_TRUE(base::DeleteFile(mnt_path.Append("regular-small"), false));
+#else
+    EXPECT_TRUE(base::DeleteFile(mnt_path.Append("regular-small")));
+#endif
     EXPECT_TRUE(base::Move(mnt_path.Append("regular-small2"),
                            mnt_path.Append("regular-small")));
     EXPECT_TRUE(
@@ -459,7 +498,11 @@
     EXPECT_TRUE(base::Move(mnt_path.Append("tmp"),
                            mnt_path.Append("link-hard-regular-16k")));
 
+#if BASE_VER < 800000
     EXPECT_TRUE(base::DeleteFile(mnt_path.Append("link-short_symlink"), false));
+#else
+    EXPECT_TRUE(base::DeleteFile(mnt_path.Append("link-short_symlink")));
+#endif
     EXPECT_TRUE(test_utils::WriteFileString(
         mnt_path.Append("link-short_symlink").value(), "foobar"));
 
@@ -475,18 +518,9 @@
         hardtocompress.size()));
   }
 
-  string old_kernel;
-  EXPECT_TRUE(
-      utils::MakeTempFile("old_kernel.XXXXXX", &state->old_kernel, nullptr));
-
-  string new_kernel;
-  EXPECT_TRUE(
-      utils::MakeTempFile("new_kernel.XXXXXX", &state->new_kernel, nullptr));
-
-  string result_kernel;
-  EXPECT_TRUE(utils::MakeTempFile(
-      "result_kernel.XXXXXX", &state->result_kernel, nullptr));
-
+  state->old_kernel.reset(new ScopedTempFile("old_kernel.XXXXXX"));
+  state->new_kernel.reset(new ScopedTempFile("new_kernel.XXXXXX"));
+  state->result_kernel.reset(new ScopedTempFile("result_kernel.XXXXXX"));
   state->kernel_size = kDefaultKernelSize;
   state->old_kernel_data.resize(kDefaultKernelSize);
   state->new_kernel_data.resize(state->old_kernel_data.size());
@@ -499,23 +533,18 @@
   std::copy(
       std::begin(kNewData), std::end(kNewData), state->new_kernel_data.begin());
 
-  if (noop) {
-    state->old_kernel_data = state->new_kernel_data;
-  }
-
   // Write kernels to disk
-  EXPECT_TRUE(utils::WriteFile(state->old_kernel.c_str(),
+  EXPECT_TRUE(utils::WriteFile(state->old_kernel->path().c_str(),
                                state->old_kernel_data.data(),
                                state->old_kernel_data.size()));
-  EXPECT_TRUE(utils::WriteFile(state->new_kernel.c_str(),
+  EXPECT_TRUE(utils::WriteFile(state->new_kernel->path().c_str(),
                                state->new_kernel_data.data(),
                                state->new_kernel_data.size()));
-  EXPECT_TRUE(utils::WriteFile(state->result_kernel.c_str(),
+  EXPECT_TRUE(utils::WriteFile(state->result_kernel->path().c_str(),
                                state->result_kernel_data.data(),
                                state->result_kernel_data.size()));
 
-  EXPECT_TRUE(utils::MakeTempFile("delta.XXXXXX", &state->delta_path, nullptr));
-  LOG(INFO) << "delta path: " << state->delta_path;
+  state->delta_file.reset(new ScopedTempFile("delta.XXXXXX"));
   {
     const string private_key =
         signature_test == kSignatureGenerator
@@ -526,15 +555,15 @@
     payload_config.is_delta = !full_rootfs;
     payload_config.hard_chunk_size = chunk_size;
     payload_config.rootfs_partition_size = kRootFSPartitionSize;
-    payload_config.version.major = kChromeOSMajorPayloadVersion;
+    payload_config.version.major = kBrilloMajorPayloadVersion;
     payload_config.version.minor = minor_version;
     if (!full_rootfs) {
       payload_config.source.partitions.emplace_back(kPartitionNameRoot);
       payload_config.source.partitions.emplace_back(kPartitionNameKernel);
-      payload_config.source.partitions.front().path = state->a_img;
+      payload_config.source.partitions.front().path = state->a_img->path();
       if (!full_kernel)
-        payload_config.source.partitions.back().path = state->old_kernel;
-      payload_config.source.image_info = old_image_info;
+        payload_config.source.partitions.back().path =
+            state->old_kernel->path();
       EXPECT_TRUE(payload_config.source.LoadImageSize());
       for (PartitionConfig& part : payload_config.source.partitions)
         EXPECT_TRUE(part.OpenFilesystem());
@@ -544,29 +573,30 @@
         payload_config.hard_chunk_size = 1024 * 1024;
     }
     payload_config.target.partitions.emplace_back(kPartitionNameRoot);
-    payload_config.target.partitions.back().path = state->b_img;
+    payload_config.target.partitions.back().path = state->b_img->path();
     payload_config.target.partitions.emplace_back(kPartitionNameKernel);
-    payload_config.target.partitions.back().path = state->new_kernel;
-    payload_config.target.image_info = new_image_info;
+    payload_config.target.partitions.back().path = state->new_kernel->path();
     EXPECT_TRUE(payload_config.target.LoadImageSize());
     for (PartitionConfig& part : payload_config.target.partitions)
       EXPECT_TRUE(part.OpenFilesystem());
 
     EXPECT_TRUE(payload_config.Validate());
-    EXPECT_TRUE(GenerateUpdatePayloadFile(
-        payload_config, state->delta_path, private_key, &state->metadata_size));
+    EXPECT_TRUE(GenerateUpdatePayloadFile(payload_config,
+                                          state->delta_file->path(),
+                                          private_key,
+                                          &state->metadata_size));
   }
   // Extend the "partitions" holding the file system a bit.
   EXPECT_EQ(0,
-            HANDLE_EINTR(truncate(state->a_img.c_str(),
+            HANDLE_EINTR(truncate(state->a_img->path().c_str(),
                                   state->image_size + 1024 * 1024)));
   EXPECT_EQ(static_cast<off_t>(state->image_size + 1024 * 1024),
-            utils::FileSize(state->a_img));
+            utils::FileSize(state->a_img->path()));
   EXPECT_EQ(0,
-            HANDLE_EINTR(truncate(state->b_img.c_str(),
+            HANDLE_EINTR(truncate(state->b_img->path().c_str(),
                                   state->image_size + 1024 * 1024)));
   EXPECT_EQ(static_cast<off_t>(state->image_size + 1024 * 1024),
-            utils::FileSize(state->b_img));
+            utils::FileSize(state->b_img->path()));
 
   if (signature_test == kSignatureGeneratedPlaceholder ||
       signature_test == kSignatureGeneratedPlaceholderMismatch) {
@@ -575,13 +605,13 @@
         GetBuildArtifactsPath(kUnittestPrivateKeyPath), &signature_size));
     LOG(INFO) << "Inserting placeholder signature.";
     ASSERT_TRUE(InsertSignaturePlaceholder(
-        signature_size, state->delta_path, &state->metadata_size));
+        signature_size, state->delta_file->path(), &state->metadata_size));
 
     if (signature_test == kSignatureGeneratedPlaceholderMismatch) {
       signature_size -= 1;
       LOG(INFO) << "Inserting mismatched placeholder signature.";
       ASSERT_FALSE(InsertSignaturePlaceholder(
-          signature_size, state->delta_path, &state->metadata_size));
+          signature_size, state->delta_file->path(), &state->metadata_size));
       return;
     }
   }
@@ -593,19 +623,18 @@
     // reflect the new size after adding the signature operation to the
     // manifest.
     LOG(INFO) << "Signing payload.";
-    SignGeneratedPayload(state->delta_path, &state->metadata_size);
+    SignGeneratedPayload(state->delta_file->path(), &state->metadata_size);
   } else if (signature_test == kSignatureGeneratedShell ||
              signature_test == kSignatureGeneratedShellECKey ||
              signature_test == kSignatureGeneratedShellBadKey ||
              signature_test == kSignatureGeneratedShellRotateCl1 ||
              signature_test == kSignatureGeneratedShellRotateCl2) {
-    SignGeneratedShellPayload(signature_test, state->delta_path);
+    SignGeneratedShellPayload(signature_test, state->delta_file->path());
   }
 }
 
 static void ApplyDeltaFile(bool full_kernel,
                            bool full_rootfs,
-                           bool noop,
                            SignatureTest signature_test,
                            DeltaState* state,
                            bool hash_checks_mandatory,
@@ -614,11 +643,14 @@
                            uint32_t minor_version) {
   // Check the metadata.
   {
-    EXPECT_TRUE(utils::ReadFile(state->delta_path, &state->delta));
+    EXPECT_TRUE(utils::ReadFile(state->delta_file->path(), &state->delta));
     PayloadMetadata payload_metadata;
     EXPECT_TRUE(payload_metadata.ParsePayloadHeader(state->delta));
     state->metadata_size = payload_metadata.GetMetadataSize();
     LOG(INFO) << "Metadata size: " << state->metadata_size;
+    state->metadata_signature_size =
+        payload_metadata.GetMetadataSignatureSize();
+    LOG(INFO) << "Metadata signature size: " << state->metadata_signature_size;
 
     DeltaArchiveManifest manifest;
     EXPECT_TRUE(payload_metadata.GetManifest(state->delta, &manifest));
@@ -630,7 +662,8 @@
       EXPECT_TRUE(manifest.has_signatures_size());
       Signatures sigs_message;
       EXPECT_TRUE(sigs_message.ParseFromArray(
-          &state->delta[state->metadata_size + manifest.signatures_offset()],
+          &state->delta[state->metadata_size + state->metadata_signature_size +
+                        manifest.signatures_offset()],
           manifest.signatures_size()));
       if (signature_test == kSignatureGeneratedShellRotateCl1 ||
           signature_test == kSignatureGeneratedShellRotateCl2)
@@ -653,68 +686,63 @@
       EXPECT_FALSE(signature.data().empty());
     }
 
-    if (noop) {
-      EXPECT_EQ(0, manifest.install_operations_size());
-      EXPECT_EQ(1, manifest.kernel_install_operations_size());
-    }
-
+    // TODO(ahassani): Make |DeltaState| into a partition list kind of struct
+    // instead of hardcoded kernel/rootfs so its cleaner and we can make the
+    // following code into a helper function instead.
+    const auto& kernel_part = *std::find_if(
+        manifest.partitions().begin(),
+        manifest.partitions().end(),
+        [](const PartitionUpdate& partition) {
+          return partition.partition_name() == kPartitionNameKernel;
+        });
     if (full_kernel) {
-      EXPECT_FALSE(manifest.has_old_kernel_info());
+      EXPECT_FALSE(kernel_part.has_old_partition_info());
     } else {
       EXPECT_EQ(state->old_kernel_data.size(),
-                manifest.old_kernel_info().size());
-      EXPECT_FALSE(manifest.old_kernel_info().hash().empty());
+                kernel_part.old_partition_info().size());
+      EXPECT_FALSE(kernel_part.old_partition_info().hash().empty());
     }
+    EXPECT_EQ(state->new_kernel_data.size(),
+              kernel_part.new_partition_info().size());
+    EXPECT_FALSE(kernel_part.new_partition_info().hash().empty());
 
-    EXPECT_EQ(manifest.new_image_info().channel(), "test-channel");
-    EXPECT_EQ(manifest.new_image_info().board(), "test-board");
-    EXPECT_EQ(manifest.new_image_info().version(), "test-version");
-    EXPECT_EQ(manifest.new_image_info().key(), "test-key");
-    EXPECT_EQ(manifest.new_image_info().build_channel(), "test-build-channel");
-    EXPECT_EQ(manifest.new_image_info().build_version(), "test-build-version");
-
-    if (!full_rootfs) {
-      if (noop) {
-        EXPECT_EQ(manifest.old_image_info().channel(), "test-channel");
-        EXPECT_EQ(manifest.old_image_info().board(), "test-board");
-        EXPECT_EQ(manifest.old_image_info().version(), "test-version");
-        EXPECT_EQ(manifest.old_image_info().key(), "test-key");
-        EXPECT_EQ(manifest.old_image_info().build_channel(),
-                  "test-build-channel");
-        EXPECT_EQ(manifest.old_image_info().build_version(),
-                  "test-build-version");
-      } else {
-        EXPECT_EQ(manifest.old_image_info().channel(), "src-channel");
-        EXPECT_EQ(manifest.old_image_info().board(), "src-board");
-        EXPECT_EQ(manifest.old_image_info().version(), "src-version");
-        EXPECT_EQ(manifest.old_image_info().key(), "src-key");
-        EXPECT_EQ(manifest.old_image_info().build_channel(),
-                  "src-build-channel");
-        EXPECT_EQ(manifest.old_image_info().build_version(),
-                  "src-build-version");
-      }
-    }
-
+    const auto& rootfs_part =
+        *std::find_if(manifest.partitions().begin(),
+                      manifest.partitions().end(),
+                      [](const PartitionUpdate& partition) {
+                        return partition.partition_name() == kPartitionNameRoot;
+                      });
     if (full_rootfs) {
-      EXPECT_FALSE(manifest.has_old_rootfs_info());
-      EXPECT_FALSE(manifest.has_old_image_info());
-      EXPECT_TRUE(manifest.has_new_image_info());
+      EXPECT_FALSE(rootfs_part.has_old_partition_info());
     } else {
-      EXPECT_EQ(state->image_size, manifest.old_rootfs_info().size());
-      EXPECT_FALSE(manifest.old_rootfs_info().hash().empty());
+      EXPECT_FALSE(rootfs_part.old_partition_info().hash().empty());
     }
-
-    EXPECT_EQ(state->new_kernel_data.size(), manifest.new_kernel_info().size());
-    EXPECT_EQ(state->image_size, manifest.new_rootfs_info().size());
-
-    EXPECT_FALSE(manifest.new_kernel_info().hash().empty());
-    EXPECT_FALSE(manifest.new_rootfs_info().hash().empty());
+    EXPECT_FALSE(rootfs_part.new_partition_info().hash().empty());
   }
 
-  MockPrefs prefs;
+  NiceMock<MockPrefs> prefs;
+  ON_CALL(prefs, SetInt64(kPrefsManifestMetadataSize, -1))
+      .WillByDefault(Return(true));
+  ON_CALL(prefs, SetInt64(kPrefsUpdateCheckResponseHash, -1))
+      .WillByDefault(Return(true));
+  ON_CALL(prefs, GetString(kPrefsUpdateCheckResponseHash, _))
+      .WillByDefault(Return(true));
+  ON_CALL(prefs, GetString(kPrefsDynamicPartitionMetadataUpdated, _))
+      .WillByDefault(Return(true));
+
+  // Set default expectation to ignore uninteresting calls to
+  // SetString/SetInt64. When starting an update delta_performer might reset
+  // update checkpoints, which results in a lot of calls with empty string or
+  // integer -1. Ignore these.
+  EXPECT_CALL(prefs, SetString(_, IsEmpty())).WillRepeatedly(Return(true));
+  EXPECT_CALL(prefs, SetInt64(_, -1)).WillRepeatedly(Return(true));
+  EXPECT_CALL(prefs, SetInt64(_, 0)).WillRepeatedly(Return(true));
+
   EXPECT_CALL(prefs, SetInt64(kPrefsManifestMetadataSize, state->metadata_size))
       .WillOnce(Return(true));
-  EXPECT_CALL(prefs, SetInt64(kPrefsManifestSignatureSize, 0))
+  EXPECT_CALL(
+      prefs,
+      SetInt64(kPrefsManifestSignatureSize, state->metadata_signature_size))
       .WillOnce(Return(true));
   EXPECT_CALL(prefs, SetInt64(kPrefsUpdateStateNextOperation, _))
       .WillRepeatedly(Return(true));
@@ -730,9 +758,15 @@
       .WillRepeatedly(Return(true));
   EXPECT_CALL(prefs, SetString(kPrefsDynamicPartitionMetadataUpdated, _))
       .WillRepeatedly(Return(true));
+  EXPECT_CALL(prefs,
+              SetString(kPrefsManifestBytes,
+                        testing::SizeIs(state->metadata_signature_size +
+                                        state->metadata_size)))
+      .WillRepeatedly(Return(true));
   if (op_hash_test == kValidOperationData && signature_test != kSignatureNone) {
-    EXPECT_CALL(prefs, SetString(kPrefsUpdateStateSignatureBlob, _))
-        .WillOnce(Return(true));
+    EXPECT_CALL(prefs,
+                SetString(kPrefsUpdateStateSignatureBlob, Not(IsEmpty())))
+        .WillRepeatedly(Return(true));
   }
 
   EXPECT_CALL(state->mock_delegate_, ShouldCancel(_))
@@ -741,7 +775,8 @@
   // Update the A image in place.
   InstallPlan* install_plan = &state->install_plan;
   install_plan->hash_checks_mandatory = hash_checks_mandatory;
-  install_plan->payloads = {{.metadata_size = state->metadata_size,
+  install_plan->payloads = {{.size = state->delta.size(),
+                             .metadata_size = state->metadata_size,
                              .type = (full_kernel && full_rootfs)
                                          ? InstallPayloadType::kFull
                                          : InstallPayloadType::kDelta}};
@@ -779,34 +814,26 @@
   (*performer)->set_public_key_path(public_key_path);
   (*performer)->set_update_certificates_path("");
 
-  EXPECT_EQ(static_cast<off_t>(state->image_size),
-            HashCalculator::RawHashOfFile(
-                state->a_img, state->image_size, &root_part.source_hash));
+  EXPECT_EQ(
+      static_cast<off_t>(state->image_size),
+      HashCalculator::RawHashOfFile(
+          state->a_img->path(), state->image_size, &root_part.source_hash));
   EXPECT_TRUE(HashCalculator::RawHashOfData(state->old_kernel_data,
                                             &kernel_part.source_hash));
 
   // The partitions should be empty before DeltaPerformer.
   install_plan->partitions.clear();
 
-  // With minor version 2, we want the target to be the new image, result_img,
-  // but with version 1, we want to update A in place.
-  string target_root, target_kernel;
-  if (minor_version == kSourceMinorPayloadVersion) {
-    target_root = state->result_img;
-    target_kernel = state->result_kernel;
-  } else {
-    target_root = state->a_img;
-    target_kernel = state->old_kernel;
-  }
-
   state->fake_boot_control_.SetPartitionDevice(
-      kPartitionNameRoot, install_plan->source_slot, state->a_img);
+      kPartitionNameRoot, install_plan->source_slot, state->a_img->path());
+  state->fake_boot_control_.SetPartitionDevice(kPartitionNameKernel,
+                                               install_plan->source_slot,
+                                               state->old_kernel->path());
   state->fake_boot_control_.SetPartitionDevice(
-      kPartitionNameKernel, install_plan->source_slot, state->old_kernel);
-  state->fake_boot_control_.SetPartitionDevice(
-      kPartitionNameRoot, install_plan->target_slot, target_root);
-  state->fake_boot_control_.SetPartitionDevice(
-      kPartitionNameKernel, install_plan->target_slot, target_kernel);
+      kPartitionNameRoot, install_plan->target_slot, state->result_img->path());
+  state->fake_boot_control_.SetPartitionDevice(kPartitionNameKernel,
+                                               install_plan->target_slot,
+                                               state->result_kernel->path());
 
   ErrorCode expected_error, actual_error;
   bool continue_writing;
@@ -885,21 +912,16 @@
     return;
   }
 
-  brillo::Blob updated_kernel_partition;
-  if (minor_version == kSourceMinorPayloadVersion) {
-    CompareFilesByBlock(
-        state->result_kernel, state->new_kernel, state->kernel_size);
-    CompareFilesByBlock(state->result_img, state->b_img, state->image_size);
-    EXPECT_TRUE(
-        utils::ReadFile(state->result_kernel, &updated_kernel_partition));
-  } else {
-    CompareFilesByBlock(
-        state->old_kernel, state->new_kernel, state->kernel_size);
-    CompareFilesByBlock(state->a_img, state->b_img, state->image_size);
-    EXPECT_TRUE(utils::ReadFile(state->old_kernel, &updated_kernel_partition));
-  }
+  CompareFilesByBlock(state->result_kernel->path(),
+                      state->new_kernel->path(),
+                      state->kernel_size);
+  CompareFilesByBlock(
+      state->result_img->path(), state->b_img->path(), state->image_size);
 
-  ASSERT_GE(updated_kernel_partition.size(), arraysize(kNewData));
+  brillo::Blob updated_kernel_partition;
+  EXPECT_TRUE(
+      utils::ReadFile(state->result_kernel->path(), &updated_kernel_partition));
+  ASSERT_GE(updated_kernel_partition.size(), base::size(kNewData));
   EXPECT_TRUE(std::equal(std::begin(kNewData),
                          std::end(kNewData),
                          updated_kernel_partition.begin()));
@@ -917,9 +939,10 @@
 
   EXPECT_EQ(state->image_size, partitions[0].target_size);
   brillo::Blob expected_new_rootfs_hash;
-  EXPECT_EQ(static_cast<off_t>(state->image_size),
-            HashCalculator::RawHashOfFile(
-                state->b_img, state->image_size, &expected_new_rootfs_hash));
+  EXPECT_EQ(
+      static_cast<off_t>(state->image_size),
+      HashCalculator::RawHashOfFile(
+          state->b_img->path(), state->image_size, &expected_new_rootfs_hash));
   EXPECT_EQ(expected_new_rootfs_hash, partitions[0].target_hash);
 }
 
@@ -944,7 +967,6 @@
 
 void DoSmallImageTest(bool full_kernel,
                       bool full_rootfs,
-                      bool noop,
                       ssize_t chunk_size,
                       SignatureTest signature_test,
                       bool hash_checks_mandatory,
@@ -953,22 +975,13 @@
   DeltaPerformer* performer = nullptr;
   GenerateDeltaFile(full_kernel,
                     full_rootfs,
-                    noop,
                     chunk_size,
                     signature_test,
                     &state,
                     minor_version);
 
-  ScopedPathUnlinker a_img_unlinker(state.a_img);
-  ScopedPathUnlinker b_img_unlinker(state.b_img);
-  ScopedPathUnlinker new_img_unlinker(state.result_img);
-  ScopedPathUnlinker delta_unlinker(state.delta_path);
-  ScopedPathUnlinker old_kernel_unlinker(state.old_kernel);
-  ScopedPathUnlinker new_kernel_unlinker(state.new_kernel);
-  ScopedPathUnlinker result_kernel_unlinker(state.result_kernel);
   ApplyDeltaFile(full_kernel,
                  full_rootfs,
-                 noop,
                  signature_test,
                  &state,
                  hash_checks_mandatory,
@@ -983,17 +996,10 @@
                                  bool hash_checks_mandatory) {
   DeltaState state;
   uint64_t minor_version = kFullPayloadMinorVersion;
-  GenerateDeltaFile(
-      true, true, false, -1, kSignatureGenerated, &state, minor_version);
-  ScopedPathUnlinker a_img_unlinker(state.a_img);
-  ScopedPathUnlinker b_img_unlinker(state.b_img);
-  ScopedPathUnlinker delta_unlinker(state.delta_path);
-  ScopedPathUnlinker old_kernel_unlinker(state.old_kernel);
-  ScopedPathUnlinker new_kernel_unlinker(state.new_kernel);
+  GenerateDeltaFile(true, true, -1, kSignatureGenerated, &state, minor_version);
   DeltaPerformer* performer = nullptr;
   ApplyDeltaFile(true,
                  true,
-                 false,
                  kSignatureGenerated,
                  &state,
                  hash_checks_mandatory,
@@ -1003,166 +1009,242 @@
   delete performer;
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageTest) {
-  DoSmallImageTest(false,
-                   false,
-                   false,
-                   -1,
-                   kSignatureGenerator,
-                   false,
-                   kInPlaceMinorPayloadVersion);
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageTest) {
+  DoSmallImageTest(
+      false, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest,
-     RunAsRootSmallImageSignaturePlaceholderTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+       RunAsRootSmallImageSignaturePlaceholderTest) {
   DoSmallImageTest(false,
                    false,
-                   false,
                    -1,
                    kSignatureGeneratedPlaceholder,
                    false,
-                   kInPlaceMinorPayloadVersion);
+                   kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest,
-     RunAsRootSmallImageSignaturePlaceholderMismatchTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+       RunAsRootSmallImageSignaturePlaceholderMismatchTest) {
   DeltaState state;
   GenerateDeltaFile(false,
                     false,
-                    false,
                     -1,
                     kSignatureGeneratedPlaceholderMismatch,
                     &state,
-                    kInPlaceMinorPayloadVersion);
+                    kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageChunksTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageChunksTest) {
   DoSmallImageTest(false,
                    false,
-                   false,
                    kBlockSize,
                    kSignatureGenerator,
                    false,
-                   kInPlaceMinorPayloadVersion);
-}
-
-TEST(DeltaPerformerIntegrationTest, RunAsRootFullKernelSmallImageTest) {
-  DoSmallImageTest(true,
-                   false,
-                   false,
-                   -1,
-                   kSignatureGenerator,
-                   false,
-                   kInPlaceMinorPayloadVersion);
-}
-
-TEST(DeltaPerformerIntegrationTest, RunAsRootFullSmallImageTest) {
-  DoSmallImageTest(true,
-                   true,
-                   false,
-                   -1,
-                   kSignatureGenerator,
-                   true,
-                   kFullPayloadMinorVersion);
-}
-
-TEST(DeltaPerformerIntegrationTest, RunAsRootNoopSmallImageTest) {
-  DoSmallImageTest(false,
-                   false,
-                   true,
-                   -1,
-                   kSignatureGenerator,
-                   false,
-                   kInPlaceMinorPayloadVersion);
-}
-
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignNoneTest) {
-  DoSmallImageTest(false,
-                   false,
-                   false,
-                   -1,
-                   kSignatureNone,
-                   false,
-                   kInPlaceMinorPayloadVersion);
-}
-
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedTest) {
-  DoSmallImageTest(false,
-                   false,
-                   false,
-                   -1,
-                   kSignatureGenerated,
-                   true,
-                   kInPlaceMinorPayloadVersion);
-}
-
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellTest) {
-  DoSmallImageTest(false,
-                   false,
-                   false,
-                   -1,
-                   kSignatureGeneratedShell,
-                   false,
-                   kInPlaceMinorPayloadVersion);
-}
-
-TEST(DeltaPerformerIntegrationTest,
-     RunAsRootSmallImageSignGeneratedShellECKeyTest) {
-  DoSmallImageTest(false,
-                   false,
-                   false,
-                   -1,
-                   kSignatureGeneratedShellECKey,
-                   false,
-                   kInPlaceMinorPayloadVersion);
-}
-
-TEST(DeltaPerformerIntegrationTest,
-     RunAsRootSmallImageSignGeneratedShellBadKeyTest) {
-  DoSmallImageTest(false,
-                   false,
-                   false,
-                   -1,
-                   kSignatureGeneratedShellBadKey,
-                   false,
-                   kInPlaceMinorPayloadVersion);
-}
-
-TEST(DeltaPerformerIntegrationTest,
-     RunAsRootSmallImageSignGeneratedShellRotateCl1Test) {
-  DoSmallImageTest(false,
-                   false,
-                   false,
-                   -1,
-                   kSignatureGeneratedShellRotateCl1,
-                   false,
-                   kInPlaceMinorPayloadVersion);
-}
-
-TEST(DeltaPerformerIntegrationTest,
-     RunAsRootSmallImageSignGeneratedShellRotateCl2Test) {
-  DoSmallImageTest(false,
-                   false,
-                   false,
-                   -1,
-                   kSignatureGeneratedShellRotateCl2,
-                   false,
-                   kInPlaceMinorPayloadVersion);
-}
-
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSourceOpsTest) {
-  DoSmallImageTest(false,
-                   false,
-                   false,
-                   -1,
-                   kSignatureGenerator,
-                   false,
                    kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest,
-     RunAsRootMandatoryOperationHashMismatchTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootFullKernelSmallImageTest) {
+  DoSmallImageTest(
+      true, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion);
+}
+
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootFullSmallImageTest) {
+  DoSmallImageTest(
+      true, true, -1, kSignatureGenerator, true, kFullPayloadMinorVersion);
+}
+
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignNoneTest) {
+  DoSmallImageTest(
+      false, false, -1, kSignatureNone, false, kSourceMinorPayloadVersion);
+}
+
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedTest) {
+  DoSmallImageTest(
+      false, false, -1, kSignatureGenerated, true, kSourceMinorPayloadVersion);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+       RunAsRootSmallImageSignGeneratedShellTest) {
+  DoSmallImageTest(false,
+                   false,
+                   -1,
+                   kSignatureGeneratedShell,
+                   false,
+                   kSourceMinorPayloadVersion);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+       RunAsRootSmallImageSignGeneratedShellECKeyTest) {
+  DoSmallImageTest(false,
+                   false,
+                   -1,
+                   kSignatureGeneratedShellECKey,
+                   false,
+                   kSourceMinorPayloadVersion);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+       RunAsRootSmallImageSignGeneratedShellBadKeyTest) {
+  DoSmallImageTest(false,
+                   false,
+                   -1,
+                   kSignatureGeneratedShellBadKey,
+                   false,
+                   kSourceMinorPayloadVersion);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+       RunAsRootSmallImageSignGeneratedShellRotateCl1Test) {
+  DoSmallImageTest(false,
+                   false,
+                   -1,
+                   kSignatureGeneratedShellRotateCl1,
+                   false,
+                   kSourceMinorPayloadVersion);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+       RunAsRootSmallImageSignGeneratedShellRotateCl2Test) {
+  DoSmallImageTest(false,
+                   false,
+                   -1,
+                   kSignatureGeneratedShellRotateCl2,
+                   false,
+                   kSourceMinorPayloadVersion);
+}
+
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageSourceOpsTest) {
+  DoSmallImageTest(
+      false, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+       RunAsRootMandatoryOperationHashMismatchTest) {
   DoOperationHashMismatchTest(kInvalidOperationData, true);
 }
 
+TEST_F(DeltaPerformerIntegrationTest, ValidatePerPartitionTimestampSuccess) {
+  // The Manifest we are validating.
+  DeltaArchiveManifest manifest;
+
+  fake_hardware_.SetVersion("system", "5");
+  fake_hardware_.SetVersion("product", "99");
+  fake_hardware_.SetBuildTimestamp(1);
+
+  manifest.set_minor_version(kFullPayloadMinorVersion);
+  manifest.set_max_timestamp(2);
+  AddPartition(&manifest, "system", 10);
+  AddPartition(&manifest, "product", 100);
+
+  RunManifestValidation(
+      manifest, kMaxSupportedMajorPayloadVersion, ErrorCode::kSuccess);
+}
+
+TEST_F(DeltaPerformerIntegrationTest, ValidatePerPartitionTimestampFailure) {
+  // The Manifest we are validating.
+  DeltaArchiveManifest manifest;
+
+  fake_hardware_.SetVersion("system", "5");
+  fake_hardware_.SetVersion("product", "99");
+  fake_hardware_.SetBuildTimestamp(1);
+
+  manifest.set_minor_version(kFullPayloadMinorVersion);
+  manifest.set_max_timestamp(2);
+  AddPartition(&manifest, "system", 10);
+  AddPartition(&manifest, "product", 98);
+
+  RunManifestValidation(manifest,
+                        kMaxSupportedMajorPayloadVersion,
+                        ErrorCode::kPayloadTimestampError);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+       ValidatePerPartitionTimestampMissingTimestamp) {
+  // The Manifest we are validating.
+  DeltaArchiveManifest manifest;
+
+  fake_hardware_.SetVersion("system", "5");
+  fake_hardware_.SetVersion("product", "99");
+  fake_hardware_.SetBuildTimestamp(1);
+
+  manifest.set_minor_version(kFullPayloadMinorVersion);
+  manifest.set_max_timestamp(2);
+  AddPartition(&manifest, "system", 10);
+  {
+    auto& partition = *manifest.add_partitions();
+    // For complete updates, missing timestamp should not trigger
+    // timestamp error.
+    partition.set_partition_name("product");
+  }
+
+  RunManifestValidation(
+      manifest, kMaxSupportedMajorPayloadVersion, ErrorCode::kSuccess);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+       ValidatePerPartitionTimestampPartialUpdatePass) {
+  fake_hardware_.SetVersion("system", "5");
+  fake_hardware_.SetVersion("product", "99");
+
+  DeltaArchiveManifest manifest;
+  manifest.set_minor_version(kPartialUpdateMinorPayloadVersion);
+  manifest.set_partial_update(true);
+  AddPartition(&manifest, "product", 100);
+  RunManifestValidation(
+      manifest, kMaxSupportedMajorPayloadVersion, ErrorCode::kSuccess);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+       ValidatePerPartitionTimestampPartialUpdateDowngrade) {
+  fake_hardware_.SetVersion("system", "5");
+  fake_hardware_.SetVersion("product", "99");
+
+  DeltaArchiveManifest manifest;
+  manifest.set_minor_version(kPartialUpdateMinorPayloadVersion);
+  manifest.set_partial_update(true);
+  AddPartition(&manifest, "product", 98);
+  RunManifestValidation(manifest,
+                        kMaxSupportedMajorPayloadVersion,
+                        ErrorCode::kPayloadTimestampError);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+       ValidatePerPartitionTimestampPartialUpdateMissingVersion) {
+  fake_hardware_.SetVersion("system", "5");
+  fake_hardware_.SetVersion("product", "99");
+
+  DeltaArchiveManifest manifest;
+  manifest.set_minor_version(kPartialUpdateMinorPayloadVersion);
+  manifest.set_partial_update(true);
+  {
+    auto& partition = *manifest.add_partitions();
+    // For partial updates, missing timestamp should trigger an error
+    partition.set_partition_name("product");
+    // has_version() == false.
+  }
+  RunManifestValidation(manifest,
+                        kMaxSupportedMajorPayloadVersion,
+                        ErrorCode::kDownloadManifestParseError);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+       ValidatePerPartitionTimestampPartialUpdateEmptyVersion) {
+  fake_hardware_.SetVersion("system", "5");
+  fake_hardware_.SetVersion("product", "99");
+
+  DeltaArchiveManifest manifest;
+  manifest.set_minor_version(kPartialUpdateMinorPayloadVersion);
+  manifest.set_partial_update(true);
+  {
+    auto& partition = *manifest.add_partitions();
+    // For partial updates, invalid timestamp should trigger an error
+    partition.set_partition_name("product");
+    partition.set_version("something");
+  }
+  RunManifestValidation(manifest,
+                        kMaxSupportedMajorPayloadVersion,
+                        ErrorCode::kDownloadManifestParseError);
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index e9022ba..840ecf6 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -20,6 +20,8 @@
 #include <inttypes.h>
 #include <time.h>
 
+#include <algorithm>
+#include <map>
 #include <memory>
 #include <string>
 #include <vector>
@@ -27,22 +29,29 @@
 #include <base/files/file_path.h>
 #include <base/files/file_util.h>
 #include <base/files/scoped_temp_dir.h>
+#include <base/stl_util.h>
 #include <base/strings/string_number_conversions.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
+#include <brillo/secure_blob.h>
 #include <gmock/gmock.h>
 #include <google/protobuf/repeated_field.h>
 #include <gtest/gtest.h>
 
 #include "update_engine/common/constants.h"
+#include "update_engine/common/error_code.h"
 #include "update_engine/common/fake_boot_control.h"
 #include "update_engine/common/fake_hardware.h"
 #include "update_engine/common/fake_prefs.h"
+#include "update_engine/common/hardware_interface.h"
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/mock_download_action.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/fake_file_descriptor.h"
-#include "update_engine/payload_consumer/mock_download_action.h"
+#include "update_engine/payload_consumer/mock_partition_writer.h"
 #include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_consumer/payload_metadata.h"
 #include "update_engine/payload_generator/bzip.h"
 #include "update_engine/payload_generator/extent_ranges.h"
 #include "update_engine/payload_generator/payload_file.h"
@@ -55,8 +64,9 @@
 using std::vector;
 using test_utils::GetBuildArtifactsPath;
 using test_utils::kRandomString;
-using test_utils::System;
 using testing::_;
+using testing::Return;
+using ::testing::Sequence;
 
 extern const char* kUnittestPrivateKeyPath;
 extern const char* kUnittestPublicKeyPath;
@@ -198,7 +208,7 @@
                                uint64_t major_version,
                                uint32_t minor_version,
                                PartitionConfig* old_part = nullptr) {
-    test_utils::ScopedTempFile blob_file("Blob-XXXXXX");
+    ScopedTempFile blob_file("Blob-XXXXXX");
     EXPECT_TRUE(test_utils::WriteFileVector(blob_file.path(), blob_data));
 
     PayloadGenerationConfig config;
@@ -224,15 +234,15 @@
     new_part.path = "/dev/zero";
     new_part.size = 1234;
 
-    payload.AddPartition(*old_part, new_part, aops);
+    payload.AddPartition(*old_part, new_part, aops, {}, 0);
 
     // We include a kernel partition without operations.
     old_part->name = kPartitionNameKernel;
     new_part.name = kPartitionNameKernel;
     new_part.size = 0;
-    payload.AddPartition(*old_part, new_part, {});
+    payload.AddPartition(*old_part, new_part, {}, {}, 0);
 
-    test_utils::ScopedTempFile payload_file("Payload-XXXXXX");
+    ScopedTempFile payload_file("Payload-XXXXXX");
     string private_key =
         sign_payload ? GetBuildArtifactsPath(kUnittestPrivateKeyPath) : "";
     EXPECT_TRUE(payload.WritePayload(payload_file.path(),
@@ -271,7 +281,14 @@
                             const string& source_path,
                             bool expect_success) {
     return ApplyPayloadToData(
-        payload_data, source_path, brillo::Blob(), expect_success);
+        &performer_, payload_data, source_path, brillo::Blob(), expect_success);
+  }
+  brillo::Blob ApplyPayloadToData(const brillo::Blob& payload_data,
+                                  const string& source_path,
+                                  const brillo::Blob& target_data,
+                                  bool expect_success) {
+    return ApplyPayloadToData(
+        &performer_, payload_data, source_path, target_data, expect_success);
   }
 
   // Apply the payload provided in |payload_data| reading from the |source_path|
@@ -279,13 +296,15 @@
   // new target file are set to |target_data| before applying the payload.
   // Expect result of performer_.Write() to be |expect_success|.
   // Returns the result of the payload application.
-  brillo::Blob ApplyPayloadToData(const brillo::Blob& payload_data,
+  brillo::Blob ApplyPayloadToData(DeltaPerformer* delta_performer,
+                                  const brillo::Blob& payload_data,
                                   const string& source_path,
                                   const brillo::Blob& target_data,
                                   bool expect_success) {
-    test_utils::ScopedTempFile new_part("Partition-XXXXXX");
+    ScopedTempFile new_part("Partition-XXXXXX");
     EXPECT_TRUE(test_utils::WriteFileVector(new_part.path(), target_data));
 
+    payload_.size = payload_data.size();
     // We installed the operations only in the rootfs partition, but the
     // delta performer needs to access all the partitions.
     fake_boot_control_.SetPartitionDevice(
@@ -298,7 +317,7 @@
         kPartitionNameKernel, install_plan_.source_slot, "/dev/null");
 
     EXPECT_EQ(expect_success,
-              performer_.Write(payload_data.data(), payload_data.size()));
+              delta_performer->Write(payload_data.data(), payload_data.size()));
     EXPECT_EQ(0, performer_.Close());
 
     brillo::Blob partition_data;
@@ -318,14 +337,17 @@
 
     // Set a valid magic string and version number 1.
     EXPECT_TRUE(performer_.Write("CrAU", 4));
-    uint64_t version = htobe64(kChromeOSMajorPayloadVersion);
+    uint64_t version = htobe64(kBrilloMajorPayloadVersion);
     EXPECT_TRUE(performer_.Write(&version, 8));
 
     payload_.metadata_size = expected_metadata_size;
+    payload_.size = actual_metadata_size + 1;
     ErrorCode error_code;
-    // When filling in size in manifest, exclude the size of the 20-byte header.
-    uint64_t size_in_manifest = htobe64(actual_metadata_size - 20);
-    bool result = performer_.Write(&size_in_manifest, 8, &error_code);
+    // When filling in size in manifest, exclude the size of the 24-byte header.
+    uint64_t size_in_manifest = htobe64(actual_metadata_size - 24);
+    performer_.Write(&size_in_manifest, 8, &error_code);
+    auto signature_size = htobe64(10);
+    bool result = performer_.Write(&signature_size, 4, &error_code);
     if (expected_metadata_size == actual_metadata_size ||
         !hash_checks_mandatory) {
       EXPECT_TRUE(result);
@@ -337,7 +359,7 @@
     EXPECT_LT(performer_.Close(), 0);
   }
 
-  // Generates a valid delta file but tests the delta performer by suppling
+  // Generates a valid delta file but tests the delta performer by supplying
   // different metadata signatures as per metadata_signature_test flag and
   // sees if the result of the parsing are as per hash_checks_mandatory flag.
   void DoMetadataSignatureTest(MetadataSignatureTest metadata_signature_test,
@@ -347,9 +369,10 @@
     brillo::Blob payload = GeneratePayload(brillo::Blob(),
                                            vector<AnnotatedOperation>(),
                                            sign_payload,
-                                           kChromeOSMajorPayloadVersion,
+                                           kBrilloMajorPayloadVersion,
                                            kFullPayloadMinorVersion);
 
+    payload_.size = payload.size();
     LOG(INFO) << "Payload size: " << payload.size();
 
     install_plan_.hash_checks_mandatory = hash_checks_mandatory;
@@ -361,6 +384,9 @@
     switch (metadata_signature_test) {
       case kEmptyMetadataSignature:
         payload_.metadata_signature.clear();
+        // We need to set the signature size in a signed payload to zero.
+        std::fill(
+            std::next(payload.begin(), 20), std::next(payload.begin(), 24), 0);
         expected_result = MetadataParseResult::kError;
         expected_error = ErrorCode::kDownloadMetadataSignatureMissingError;
         break;
@@ -406,23 +432,6 @@
     EXPECT_EQ(payload_.metadata_size, performer_.metadata_size_);
   }
 
-  // Helper function to pretend that the ECC file descriptor was already opened.
-  // Returns a pointer to the created file descriptor.
-  FakeFileDescriptor* SetFakeECCFile(size_t size) {
-    EXPECT_FALSE(performer_.source_ecc_fd_) << "source_ecc_fd_ already open.";
-    FakeFileDescriptor* ret = new FakeFileDescriptor();
-    fake_ecc_fd_.reset(ret);
-    // Call open to simulate it was already opened.
-    ret->Open("", 0);
-    ret->SetFileSize(size);
-    performer_.source_ecc_fd_ = fake_ecc_fd_;
-    return ret;
-  }
-
-  uint64_t GetSourceEccRecoveredFailures() const {
-    return performer_.source_ecc_recovered_failures_;
-  }
-
   FakePrefs prefs_;
   InstallPlan install_plan_;
   InstallPlan::Payload payload_;
@@ -455,7 +464,7 @@
   brillo::Blob payload_data = GeneratePayload(expected_data,
                                               aops,
                                               false,
-                                              kChromeOSMajorPayloadVersion,
+                                              kBrilloMajorPayloadVersion,
                                               kFullPayloadMinorVersion);
 
   EXPECT_EQ(expected_data, ApplyPayload(payload_data, "/dev/null", true));
@@ -477,7 +486,7 @@
   brillo::Blob payload_data = GeneratePayload(expected_data,
                                               aops,
                                               false,
-                                              kChromeOSMajorPayloadVersion,
+                                              kBrilloMajorPayloadVersion,
                                               kFullPayloadMinorVersion);
 
   testing::Mock::VerifyAndClearExpectations(&mock_delegate_);
@@ -579,7 +588,7 @@
   EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash));
   aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
 
-  test_utils::ScopedTempFile source("Source-XXXXXX");
+  ScopedTempFile source("Source-XXXXXX");
   EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data));
 
   PartitionConfig old_part(kPartitionNameRoot);
@@ -607,7 +616,7 @@
   EXPECT_TRUE(HashCalculator::RawHashOfData(src, &src_hash));
   aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
 
-  test_utils::ScopedTempFile source("Source-XXXXXX");
+  ScopedTempFile source("Source-XXXXXX");
   EXPECT_TRUE(test_utils::WriteFileVector(source.path(), src));
 
   PartitionConfig old_part(kPartitionNameRoot);
@@ -635,7 +644,7 @@
   EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash));
   aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
 
-  test_utils::ScopedTempFile source("Source-XXXXXX");
+  ScopedTempFile source("Source-XXXXXX");
   EXPECT_TRUE(test_utils::WriteFileVector(source.path(), actual_data));
 
   PartitionConfig old_part(kPartitionNameRoot);
@@ -648,103 +657,14 @@
   EXPECT_EQ(actual_data, ApplyPayload(payload_data, source.path(), false));
 }
 
-// Test that the error-corrected file descriptor is used to read the partition
-// since the source partition doesn't match the operation hash.
-TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyFallbackTest) {
-  constexpr size_t kCopyOperationSize = 4 * 4096;
-  test_utils::ScopedTempFile source("Source-XXXXXX");
-  // Write invalid data to the source image, which doesn't match the expected
-  // hash.
-  brillo::Blob invalid_data(kCopyOperationSize, 0x55);
-  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), invalid_data));
-
-  // Setup the fec file descriptor as the fake stream, which matches
-  // |expected_data|.
-  FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize);
-  brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize);
-
-  PartitionConfig old_part(kPartitionNameRoot);
-  old_part.path = source.path();
-  old_part.size = invalid_data.size();
-
-  brillo::Blob payload_data =
-      GenerateSourceCopyPayload(expected_data, true, &old_part);
-  EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
-  // Verify that the fake_fec was actually used.
-  EXPECT_EQ(1U, fake_fec->GetReadOps().size());
-  EXPECT_EQ(1U, GetSourceEccRecoveredFailures());
-}
-
-// Test that the error-corrected file descriptor is used to read a partition
-// when no hash is available for SOURCE_COPY but it falls back to the normal
-// file descriptor when the size of the error corrected one is too small.
-TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyWhenNoHashFallbackTest) {
-  constexpr size_t kCopyOperationSize = 4 * 4096;
-  test_utils::ScopedTempFile source("Source-XXXXXX");
-  // Setup the source path with the right expected data.
-  brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize);
-  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data));
-
-  // Setup the fec file descriptor as the fake stream, with smaller data than
-  // the expected.
-  FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize / 2);
-
-  PartitionConfig old_part(kPartitionNameRoot);
-  old_part.path = source.path();
-  old_part.size = expected_data.size();
-
-  // The payload operation doesn't include an operation hash.
-  brillo::Blob payload_data =
-      GenerateSourceCopyPayload(expected_data, false, &old_part);
-  EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
-  // Verify that the fake_fec was attempted to be used. Since the file
-  // descriptor is shorter it can actually do more than one read to realize it
-  // reached the EOF.
-  EXPECT_LE(1U, fake_fec->GetReadOps().size());
-  // This fallback doesn't count as an error-corrected operation since the
-  // operation hash was not available.
-  EXPECT_EQ(0U, GetSourceEccRecoveredFailures());
-}
-
-TEST_F(DeltaPerformerTest, ChooseSourceFDTest) {
-  constexpr size_t kSourceSize = 4 * 4096;
-  test_utils::ScopedTempFile source("Source-XXXXXX");
-  // Write invalid data to the source image, which doesn't match the expected
-  // hash.
-  brillo::Blob invalid_data(kSourceSize, 0x55);
-  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), invalid_data));
-
-  performer_.source_fd_ = std::make_shared<EintrSafeFileDescriptor>();
-  performer_.source_fd_->Open(source.path().c_str(), O_RDONLY);
-  performer_.block_size_ = 4096;
-
-  // Setup the fec file descriptor as the fake stream, which matches
-  // |expected_data|.
-  FakeFileDescriptor* fake_fec = SetFakeECCFile(kSourceSize);
-  brillo::Blob expected_data = FakeFileDescriptorData(kSourceSize);
-
-  InstallOperation op;
-  *(op.add_src_extents()) = ExtentForRange(0, kSourceSize / 4096);
-  brillo::Blob src_hash;
-  EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash));
-  op.set_src_sha256_hash(src_hash.data(), src_hash.size());
-
-  ErrorCode error = ErrorCode::kSuccess;
-  EXPECT_EQ(performer_.source_ecc_fd_, performer_.ChooseSourceFD(op, &error));
-  EXPECT_EQ(ErrorCode::kSuccess, error);
-  // Verify that the fake_fec was actually used.
-  EXPECT_EQ(1U, fake_fec->GetReadOps().size());
-  EXPECT_EQ(1U, GetSourceEccRecoveredFailures());
-}
-
 TEST_F(DeltaPerformerTest, ExtentsToByteStringTest) {
   uint64_t test[] = {1, 1, 4, 2, 0, 1};
-  static_assert(arraysize(test) % 2 == 0, "Array size uneven");
+  static_assert(base::size(test) % 2 == 0, "Array size uneven");
   const uint64_t block_size = 4096;
   const uint64_t file_length = 4 * block_size - 13;
 
   google::protobuf::RepeatedPtrField<Extent> extents;
-  for (size_t i = 0; i < arraysize(test); i += 2) {
+  for (size_t i = 0; i < base::size(test); i += 2) {
     *(extents.Add()) = ExtentForRange(test[i], test[i + 1]);
   }
 
@@ -758,27 +678,32 @@
 TEST_F(DeltaPerformerTest, ValidateManifestFullGoodTest) {
   // The Manifest we are validating.
   DeltaArchiveManifest manifest;
-  manifest.mutable_new_kernel_info();
-  manifest.mutable_new_rootfs_info();
+  for (const auto& part_name : {"kernel", "rootfs"}) {
+    auto part = manifest.add_partitions();
+    part->set_partition_name(part_name);
+    part->mutable_new_partition_info();
+  }
   manifest.set_minor_version(kFullPayloadMinorVersion);
 
   RunManifestValidation(manifest,
-                        kChromeOSMajorPayloadVersion,
+                        kBrilloMajorPayloadVersion,
                         InstallPayloadType::kFull,
                         ErrorCode::kSuccess);
 }
 
-TEST_F(DeltaPerformerTest, ValidateManifestDeltaGoodTest) {
+TEST_F(DeltaPerformerTest, ValidateManifestDeltaMaxGoodTest) {
   // The Manifest we are validating.
   DeltaArchiveManifest manifest;
-  manifest.mutable_old_kernel_info();
-  manifest.mutable_old_rootfs_info();
-  manifest.mutable_new_kernel_info();
-  manifest.mutable_new_rootfs_info();
+  for (const auto& part_name : {"kernel", "rootfs"}) {
+    auto part = manifest.add_partitions();
+    part->set_partition_name(part_name);
+    part->mutable_old_partition_info();
+    part->mutable_new_partition_info();
+  }
   manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
 
   RunManifestValidation(manifest,
-                        kChromeOSMajorPayloadVersion,
+                        kBrilloMajorPayloadVersion,
                         InstallPayloadType::kDelta,
                         ErrorCode::kSuccess);
 }
@@ -786,14 +711,16 @@
 TEST_F(DeltaPerformerTest, ValidateManifestDeltaMinGoodTest) {
   // The Manifest we are validating.
   DeltaArchiveManifest manifest;
-  manifest.mutable_old_kernel_info();
-  manifest.mutable_old_rootfs_info();
-  manifest.mutable_new_kernel_info();
-  manifest.mutable_new_rootfs_info();
+  for (const auto& part_name : {"kernel", "rootfs"}) {
+    auto part = manifest.add_partitions();
+    part->set_partition_name(part_name);
+    part->mutable_old_partition_info();
+    part->mutable_new_partition_info();
+  }
   manifest.set_minor_version(kMinSupportedMinorPayloadVersion);
 
   RunManifestValidation(manifest,
-                        kChromeOSMajorPayloadVersion,
+                        kBrilloMajorPayloadVersion,
                         InstallPayloadType::kDelta,
                         ErrorCode::kSuccess);
 }
@@ -811,9 +738,11 @@
 TEST_F(DeltaPerformerTest, ValidateManifestDeltaUnsetMinorVersion) {
   // The Manifest we are validating.
   DeltaArchiveManifest manifest;
-  // Add an empty old_rootfs_info() to trick the DeltaPerformer into think that
-  // this is a delta payload manifest with a missing minor version.
-  manifest.mutable_old_rootfs_info();
+  // Add an empty rootfs partition info to trick the DeltaPerformer into think
+  // that this is a delta payload manifest with a missing minor version.
+  auto rootfs = manifest.add_partitions();
+  rootfs->set_partition_name("rootfs");
+  rootfs->mutable_old_partition_info();
 
   RunManifestValidation(manifest,
                         kMaxSupportedMajorPayloadVersion,
@@ -824,27 +753,15 @@
 TEST_F(DeltaPerformerTest, ValidateManifestFullOldKernelTest) {
   // The Manifest we are validating.
   DeltaArchiveManifest manifest;
-  manifest.mutable_old_kernel_info();
-  manifest.mutable_new_kernel_info();
-  manifest.mutable_new_rootfs_info();
-  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
-
+  for (const auto& part_name : {"kernel", "rootfs"}) {
+    auto part = manifest.add_partitions();
+    part->set_partition_name(part_name);
+    part->mutable_old_partition_info();
+    part->mutable_new_partition_info();
+  }
+  manifest.mutable_partitions(0)->clear_old_partition_info();
   RunManifestValidation(manifest,
-                        kChromeOSMajorPayloadVersion,
-                        InstallPayloadType::kFull,
-                        ErrorCode::kPayloadMismatchedType);
-}
-
-TEST_F(DeltaPerformerTest, ValidateManifestFullOldRootfsTest) {
-  // The Manifest we are validating.
-  DeltaArchiveManifest manifest;
-  manifest.mutable_old_rootfs_info();
-  manifest.mutable_new_kernel_info();
-  manifest.mutable_new_rootfs_info();
-  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
-
-  RunManifestValidation(manifest,
-                        kChromeOSMajorPayloadVersion,
+                        kBrilloMajorPayloadVersion,
                         InstallPayloadType::kFull,
                         ErrorCode::kPayloadMismatchedType);
 }
@@ -869,8 +786,8 @@
 
   // Generate a bad version number.
   manifest.set_minor_version(kMaxSupportedMinorPayloadVersion + 10000);
-  // Mark the manifest as a delta payload by setting old_rootfs_info.
-  manifest.mutable_old_rootfs_info();
+  // Mark the manifest as a delta payload by setting |old_partition_info|.
+  manifest.add_partitions()->mutable_old_partition_info();
 
   RunManifestValidation(manifest,
                         kMaxSupportedMajorPayloadVersion,
@@ -892,20 +809,50 @@
                         ErrorCode::kPayloadTimestampError);
 }
 
+TEST_F(DeltaPerformerTest, ValidatePerPartitionTimestampSuccess) {
+  // The Manifest we are validating.
+  DeltaArchiveManifest manifest;
+
+  manifest.set_minor_version(kFullPayloadMinorVersion);
+  manifest.set_max_timestamp(2);
+  fake_hardware_.SetBuildTimestamp(1);
+  auto& partition = *manifest.add_partitions();
+  partition.set_version("10");
+  partition.set_partition_name("system");
+  fake_hardware_.SetVersion("system", "5");
+
+  RunManifestValidation(manifest,
+                        kMaxSupportedMajorPayloadVersion,
+                        InstallPayloadType::kFull,
+                        ErrorCode::kSuccess);
+}
+
 TEST_F(DeltaPerformerTest, BrilloMetadataSignatureSizeTest) {
   unsigned int seed = time(nullptr);
   EXPECT_TRUE(performer_.Write(kDeltaMagic, sizeof(kDeltaMagic)));
 
   uint64_t major_version = htobe64(kBrilloMajorPayloadVersion);
-  EXPECT_TRUE(performer_.Write(&major_version, 8));
+  EXPECT_TRUE(
+      performer_.Write(&major_version, PayloadMetadata::kDeltaVersionSize));
 
   uint64_t manifest_size = rand_r(&seed) % 256;
-  uint64_t manifest_size_be = htobe64(manifest_size);
-  EXPECT_TRUE(performer_.Write(&manifest_size_be, 8));
-
   uint32_t metadata_signature_size = rand_r(&seed) % 256;
+
+  // The payload size has to be bigger than the |metadata_size| and
+  // |metadata_signature_size|
+  payload_.size = PayloadMetadata::kDeltaManifestSizeOffset +
+                  PayloadMetadata::kDeltaManifestSizeSize +
+                  PayloadMetadata::kDeltaMetadataSignatureSizeSize +
+                  manifest_size + metadata_signature_size + 1;
+
+  uint64_t manifest_size_be = htobe64(manifest_size);
+  EXPECT_TRUE(performer_.Write(&manifest_size_be,
+                               PayloadMetadata::kDeltaManifestSizeSize));
+
   uint32_t metadata_signature_size_be = htobe32(metadata_signature_size);
-  EXPECT_TRUE(performer_.Write(&metadata_signature_size_be, 4));
+  EXPECT_TRUE(
+      performer_.Write(&metadata_signature_size_be,
+                       PayloadMetadata::kDeltaMetadataSignatureSizeSize));
 
   EXPECT_LT(performer_.Close(), 0);
 
@@ -915,10 +862,74 @@
   EXPECT_EQ(metadata_signature_size, performer_.metadata_signature_size_);
 }
 
+TEST_F(DeltaPerformerTest, BrilloMetadataSizeNOKTest) {
+  unsigned int seed = time(nullptr);
+  EXPECT_TRUE(performer_.Write(kDeltaMagic, sizeof(kDeltaMagic)));
+
+  uint64_t major_version = htobe64(kBrilloMajorPayloadVersion);
+  EXPECT_TRUE(
+      performer_.Write(&major_version, PayloadMetadata::kDeltaVersionSize));
+
+  uint64_t manifest_size = UINT64_MAX - 600;  // Subtract to avoid wrap around.
+  uint64_t manifest_offset = PayloadMetadata::kDeltaManifestSizeOffset +
+                             PayloadMetadata::kDeltaManifestSizeSize +
+                             PayloadMetadata::kDeltaMetadataSignatureSizeSize;
+  payload_.metadata_size = manifest_offset + manifest_size;
+  uint32_t metadata_signature_size = rand_r(&seed) % 256;
+
+  // The payload size is greater than the payload header but smaller than
+  // |metadata_signature_size| + |metadata_size|
+  payload_.size = manifest_offset + metadata_signature_size + 1;
+
+  uint64_t manifest_size_be = htobe64(manifest_size);
+  EXPECT_TRUE(performer_.Write(&manifest_size_be,
+                               PayloadMetadata::kDeltaManifestSizeSize));
+  uint32_t metadata_signature_size_be = htobe32(metadata_signature_size);
+
+  ErrorCode error;
+  EXPECT_FALSE(
+      performer_.Write(&metadata_signature_size_be,
+                       PayloadMetadata::kDeltaMetadataSignatureSizeSize + 1,
+                       &error));
+
+  EXPECT_EQ(ErrorCode::kDownloadInvalidMetadataSize, error);
+}
+
+TEST_F(DeltaPerformerTest, BrilloMetadataSignatureSizeNOKTest) {
+  unsigned int seed = time(nullptr);
+  EXPECT_TRUE(performer_.Write(kDeltaMagic, sizeof(kDeltaMagic)));
+
+  uint64_t major_version = htobe64(kBrilloMajorPayloadVersion);
+  EXPECT_TRUE(
+      performer_.Write(&major_version, PayloadMetadata::kDeltaVersionSize));
+
+  uint64_t manifest_size = rand_r(&seed) % 256;
+  // Subtract from UINT32_MAX to avoid wrap around.
+  uint32_t metadata_signature_size = UINT32_MAX - 600;
+
+  // The payload size is greater than |manifest_size| but smaller than
+  // |metadata_signature_size|
+  payload_.size = manifest_size + 1;
+
+  uint64_t manifest_size_be = htobe64(manifest_size);
+  EXPECT_TRUE(performer_.Write(&manifest_size_be,
+                               PayloadMetadata::kDeltaManifestSizeSize));
+
+  uint32_t metadata_signature_size_be = htobe32(metadata_signature_size);
+  ErrorCode error;
+  EXPECT_FALSE(
+      performer_.Write(&metadata_signature_size_be,
+                       PayloadMetadata::kDeltaMetadataSignatureSizeSize + 1,
+                       &error));
+
+  EXPECT_EQ(ErrorCode::kDownloadInvalidMetadataSize, error);
+}
+
 TEST_F(DeltaPerformerTest, BrilloParsePayloadMetadataTest) {
   brillo::Blob payload_data = GeneratePayload(
       {}, {}, true, kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion);
   install_plan_.hash_checks_mandatory = true;
+  payload_.size = payload_data.size();
   ErrorCode error;
   EXPECT_EQ(MetadataParseResult::kSuccess,
             performer_.ParsePayloadMetadata(payload_data, &error));
@@ -1073,4 +1084,117 @@
   EXPECT_EQ(kMaxSupportedMajorPayloadVersion, major_version);
 }
 
+TEST_F(DeltaPerformerTest, FullPayloadCanResumeTest) {
+  payload_.type = InstallPayloadType::kFull;
+  brillo::Blob expected_data =
+      brillo::Blob(std::begin(kRandomString), std::end(kRandomString));
+  expected_data.resize(4096);  // block size
+  vector<AnnotatedOperation> aops;
+  AnnotatedOperation aop;
+  *(aop.op.add_dst_extents()) = ExtentForRange(0, 1);
+  aop.op.set_data_offset(0);
+  aop.op.set_data_length(expected_data.size());
+  aop.op.set_type(InstallOperation::REPLACE);
+  aops.push_back(aop);
+
+  brillo::Blob payload_data = GeneratePayload(expected_data,
+                                              aops,
+                                              false,
+                                              kBrilloMajorPayloadVersion,
+                                              kFullPayloadMinorVersion);
+
+  ASSERT_EQ(expected_data, ApplyPayload(payload_data, "/dev/null", true));
+  performer_.CheckpointUpdateProgress(true);
+  const std::string payload_id = "12345";
+  prefs_.SetString(kPrefsUpdateCheckResponseHash, payload_id);
+  ASSERT_TRUE(DeltaPerformer::CanResumeUpdate(&prefs_, payload_id));
+}
+
+class TestDeltaPerformer : public DeltaPerformer {
+ public:
+  using DeltaPerformer::DeltaPerformer;
+
+  std::unique_ptr<PartitionWriter> CreatePartitionWriter(
+      const PartitionUpdate& partition_update,
+      const InstallPlan::Partition& install_part,
+      DynamicPartitionControlInterface* dynamic_control,
+      size_t block_size,
+      bool is_interactive,
+      bool is_dynamic_partition) {
+    LOG(INFO) << __FUNCTION__ << ": " << install_part.name;
+    auto node = partition_writers_.extract(install_part.name);
+    return std::move(node.mapped());
+  }
+
+  bool ShouldCheckpoint() override { return true; }
+
+  std::map<std::string, std::unique_ptr<MockPartitionWriter>>
+      partition_writers_;
+};
+
+namespace {
+AnnotatedOperation GetSourceCopyOp(uint32_t src_block,
+                                   uint32_t dst_block,
+                                   const void* data,
+                                   size_t length) {
+  AnnotatedOperation aop;
+  *(aop.op.add_src_extents()) = ExtentForRange(0, 1);
+  *(aop.op.add_dst_extents()) = ExtentForRange(0, 1);
+  aop.op.set_type(InstallOperation::SOURCE_COPY);
+  brillo::Blob src_hash;
+  HashCalculator::RawHashOfBytes(data, length, &src_hash);
+  aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
+  return aop;
+}
+}  // namespace
+
+TEST_F(DeltaPerformerTest, SetNextOpIndex) {
+  TestDeltaPerformer delta_performer{&prefs_,
+                                     &fake_boot_control_,
+                                     &fake_hardware_,
+                                     &mock_delegate_,
+                                     &install_plan_,
+                                     &payload_,
+                                     false};
+  brillo::Blob expected_data(std::begin(kRandomString),
+                             std::end(kRandomString));
+  expected_data.resize(4096 * 2);  // block size
+  AnnotatedOperation aop;
+
+  ScopedTempFile source("Source-XXXXXX");
+  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data));
+
+  PartitionConfig old_part(kPartitionNameRoot);
+  old_part.path = source.path();
+  old_part.size = expected_data.size();
+
+  delta_performer.partition_writers_[kPartitionNameRoot] =
+      std::make_unique<MockPartitionWriter>();
+  auto& writer1 = *delta_performer.partition_writers_[kPartitionNameRoot];
+
+  Sequence seq;
+  std::vector<size_t> indices;
+  EXPECT_CALL(writer1, CheckpointUpdateProgress(_))
+      .WillRepeatedly(
+          [&indices](size_t index) mutable { indices.emplace_back(index); });
+  EXPECT_CALL(writer1, Init(_, true, _)).Times(1).WillOnce(Return(true));
+  EXPECT_CALL(writer1, PerformSourceCopyOperation(_, _))
+      .Times(2)
+      .WillRepeatedly(Return(true));
+
+  brillo::Blob payload_data = GeneratePayload(
+      brillo::Blob(),
+      {GetSourceCopyOp(0, 0, expected_data.data(), 4096),
+       GetSourceCopyOp(1, 1, expected_data.data() + 4096, 4096)},
+      false,
+      &old_part);
+
+  ApplyPayloadToData(&delta_performer, payload_data, source.path(), {}, true);
+  ASSERT_TRUE(std::is_sorted(indices.begin(), indices.end()));
+  ASSERT_GT(indices.size(), 0UL);
+
+  // Should be equal to number of operations
+  ASSERT_EQ(indices[indices.size() - 1], 2UL);
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/extent_reader.cc b/payload_consumer/extent_reader.cc
index ad983ae..3c7329d 100644
--- a/payload_consumer/extent_reader.cc
+++ b/payload_consumer/extent_reader.cc
@@ -77,7 +77,7 @@
         std::min(count - bytes_read, cur_extent_bytes_left);
 
     ssize_t out_bytes_read;
-    TEST_AND_RETURN_FALSE(utils::PReadAll(
+    TEST_AND_RETURN_FALSE(utils::ReadAll(
         fd_,
         bytes + bytes_read,
         bytes_to_read,
diff --git a/payload_consumer/extent_reader_unittest.cc b/payload_consumer/extent_reader_unittest.cc
index b7059bc..686f14d 100644
--- a/payload_consumer/extent_reader_unittest.cc
+++ b/payload_consumer/extent_reader_unittest.cc
@@ -72,7 +72,7 @@
   }
 
   FileDescriptorPtr fd_;
-  test_utils::ScopedTempFile temp_file_{"ExtentReaderTest-file.XXXXXX"};
+  ScopedTempFile temp_file_{"ExtentReaderTest-file.XXXXXX"};
   brillo::Blob sample_;
 };
 
diff --git a/payload_consumer/extent_writer.h b/payload_consumer/extent_writer.h
index 9e53561..8b1b532 100644
--- a/payload_consumer/extent_writer.h
+++ b/payload_consumer/extent_writer.h
@@ -38,8 +38,7 @@
   virtual ~ExtentWriter() = default;
 
   // Returns true on success.
-  virtual bool Init(FileDescriptorPtr fd,
-                    const google::protobuf::RepeatedPtrField<Extent>& extents,
+  virtual bool Init(const google::protobuf::RepeatedPtrField<Extent>& extents,
                     uint32_t block_size) = 0;
 
   // Returns true on success.
@@ -51,13 +50,11 @@
 
 class DirectExtentWriter : public ExtentWriter {
  public:
-  DirectExtentWriter() = default;
+  explicit DirectExtentWriter(FileDescriptorPtr fd) : fd_(fd) {}
   ~DirectExtentWriter() override = default;
 
-  bool Init(FileDescriptorPtr fd,
-            const google::protobuf::RepeatedPtrField<Extent>& extents,
+  bool Init(const google::protobuf::RepeatedPtrField<Extent>& extents,
             uint32_t block_size) override {
-    fd_ = fd;
     block_size_ = block_size;
     extents_ = extents;
     cur_extent_ = extents_.begin();
diff --git a/payload_consumer/extent_writer_unittest.cc b/payload_consumer/extent_writer_unittest.cc
index aef856b..5c67d3e 100644
--- a/payload_consumer/extent_writer_unittest.cc
+++ b/payload_consumer/extent_writer_unittest.cc
@@ -59,15 +59,14 @@
   void WriteAlignedExtents(size_t chunk_size, size_t first_chunk_size);
 
   FileDescriptorPtr fd_;
-  test_utils::ScopedTempFile temp_file_{"ExtentWriterTest-file.XXXXXX"};
+  ScopedTempFile temp_file_{"ExtentWriterTest-file.XXXXXX"};
 };
 
 TEST_F(ExtentWriterTest, SimpleTest) {
   vector<Extent> extents = {ExtentForRange(1, 1)};
   const string bytes = "1234";
-  DirectExtentWriter direct_writer;
-  EXPECT_TRUE(
-      direct_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
+  DirectExtentWriter direct_writer{fd_};
+  EXPECT_TRUE(direct_writer.Init({extents.begin(), extents.end()}, kBlockSize));
   EXPECT_TRUE(direct_writer.Write(bytes.data(), bytes.size()));
 
   EXPECT_EQ(static_cast<off_t>(kBlockSize + bytes.size()),
@@ -84,9 +83,8 @@
 
 TEST_F(ExtentWriterTest, ZeroLengthTest) {
   vector<Extent> extents = {ExtentForRange(1, 1)};
-  DirectExtentWriter direct_writer;
-  EXPECT_TRUE(
-      direct_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
+  DirectExtentWriter direct_writer{fd_};
+  EXPECT_TRUE(direct_writer.Init({extents.begin(), extents.end()}, kBlockSize));
   EXPECT_TRUE(direct_writer.Write(nullptr, 0));
 }
 
@@ -109,9 +107,8 @@
   brillo::Blob data(kBlockSize * 3);
   test_utils::FillWithData(&data);
 
-  DirectExtentWriter direct_writer;
-  EXPECT_TRUE(
-      direct_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
+  DirectExtentWriter direct_writer{fd_};
+  EXPECT_TRUE(direct_writer.Init({extents.begin(), extents.end()}, kBlockSize));
 
   size_t bytes_written = 0;
   while (bytes_written < data.size()) {
@@ -150,9 +147,8 @@
   brillo::Blob data(17);
   test_utils::FillWithData(&data);
 
-  DirectExtentWriter direct_writer;
-  EXPECT_TRUE(
-      direct_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
+  DirectExtentWriter direct_writer{fd_};
+  EXPECT_TRUE(direct_writer.Init({extents.begin(), extents.end()}, kBlockSize));
 
   size_t bytes_written = 0;
   while (bytes_written < (block_count * kBlockSize)) {
diff --git a/payload_consumer/fake_extent_writer.h b/payload_consumer/fake_extent_writer.h
index 7b2b7ac..680b1b3 100644
--- a/payload_consumer/fake_extent_writer.h
+++ b/payload_consumer/fake_extent_writer.h
@@ -33,8 +33,7 @@
   ~FakeExtentWriter() override = default;
 
   // ExtentWriter overrides.
-  bool Init(FileDescriptorPtr /* fd */,
-            const google::protobuf::RepeatedPtrField<Extent>& /* extents */,
+  bool Init(const google::protobuf::RepeatedPtrField<Extent>& /* extents */,
             uint32_t /* block_size */) override {
     init_called_ = true;
     return true;
diff --git a/payload_consumer/fec_file_descriptor.cc b/payload_consumer/fec_file_descriptor.cc
index de22cf3..3fee196 100644
--- a/payload_consumer/fec_file_descriptor.cc
+++ b/payload_consumer/fec_file_descriptor.cc
@@ -16,6 +16,8 @@
 
 #include "update_engine/payload_consumer/fec_file_descriptor.h"
 
+#include <base/logging.h>
+
 namespace chromeos_update_engine {
 
 bool FecFileDescriptor::Open(const char* path, int flags) {
diff --git a/payload_consumer/file_descriptor.cc b/payload_consumer/file_descriptor.cc
index 1de615c..da76327 100644
--- a/payload_consumer/file_descriptor.cc
+++ b/payload_consumer/file_descriptor.cc
@@ -21,6 +21,7 @@
 #include <sys/ioctl.h>
 #include <sys/stat.h>
 #include <sys/types.h>
+#include <unistd.h>
 
 #include <base/posix/eintr_wrapper.h>
 
@@ -28,6 +29,12 @@
 
 namespace chromeos_update_engine {
 
+EintrSafeFileDescriptor::~EintrSafeFileDescriptor() {
+  if (IsOpen()) {
+    Close();
+  }
+}
+
 bool EintrSafeFileDescriptor::Open(const char* path, int flags, mode_t mode) {
   CHECK_EQ(fd_, -1);
   return ((fd_ = HANDLE_EINTR(open(path, flags, mode))) >= 0);
@@ -125,11 +132,19 @@
 
 bool EintrSafeFileDescriptor::Flush() {
   CHECK_GE(fd_, 0);
+  // Implemented as a No-Op, as delta_performer typically uses |O_DSYNC|, except
+  // in interactive settings.
+  fsync(fd_);
   return true;
 }
 
 bool EintrSafeFileDescriptor::Close() {
-  CHECK_GE(fd_, 0);
+  if (fd_ < 0) {
+    return false;
+  }
+  // https://stackoverflow.com/questions/705454/does-linux-guarantee-the-contents-of-a-file-is-flushed-to-disc-after-close
+  // |close()| doesn't imply |fsync()|, we need to do it manually.
+  fsync(fd_);
   if (IGNORE_EINTR(close(fd_)))
     return false;
   fd_ = -1;
diff --git a/payload_consumer/file_descriptor.h b/payload_consumer/file_descriptor.h
index 55f76c6..faebcc1 100644
--- a/payload_consumer/file_descriptor.h
+++ b/payload_consumer/file_descriptor.h
@@ -21,7 +21,7 @@
 #include <sys/types.h>
 #include <memory>
 
-#include <base/logging.h>
+#include <base/macros.h>
 
 // Abstraction for managing opening, reading, writing and closing of file
 // descriptors. This includes an abstract class and one standard implementation
@@ -111,6 +111,7 @@
 class EintrSafeFileDescriptor : public FileDescriptor {
  public:
   EintrSafeFileDescriptor() : fd_(-1) {}
+  ~EintrSafeFileDescriptor();
 
   // Interface methods.
   bool Open(const char* path, int flags, mode_t mode) override;
diff --git a/payload_consumer/file_descriptor_utils.cc b/payload_consumer/file_descriptor_utils.cc
index 846cbd7..9a6a601 100644
--- a/payload_consumer/file_descriptor_utils.cc
+++ b/payload_consumer/file_descriptor_utils.cc
@@ -82,8 +82,8 @@
                         const RepeatedPtrField<Extent>& tgt_extents,
                         uint64_t block_size,
                         brillo::Blob* hash_out) {
-  DirectExtentWriter writer;
-  TEST_AND_RETURN_FALSE(writer.Init(target, tgt_extents, block_size));
+  DirectExtentWriter writer{target};
+  TEST_AND_RETURN_FALSE(writer.Init(tgt_extents, block_size));
   TEST_AND_RETURN_FALSE(utils::BlocksInExtents(src_extents) ==
                         utils::BlocksInExtents(tgt_extents));
   TEST_AND_RETURN_FALSE(
diff --git a/payload_consumer/file_descriptor_utils_unittest.cc b/payload_consumer/file_descriptor_utils_unittest.cc
index 48e610f..478893d 100644
--- a/payload_consumer/file_descriptor_utils_unittest.cc
+++ b/payload_consumer/file_descriptor_utils_unittest.cc
@@ -52,14 +52,13 @@
 class FileDescriptorUtilsTest : public ::testing::Test {
  protected:
   void SetUp() override {
-    EXPECT_TRUE(utils::MakeTempFile("fd_tgt.XXXXXX", &tgt_path_, nullptr));
-    EXPECT_TRUE(target_->Open(tgt_path_.c_str(), O_RDWR));
+    EXPECT_TRUE(target_->Open(tgt_file_.path().c_str(), O_RDWR));
   }
 
   // Check that the |target_| file contains |expected_contents|.
   void ExpectTarget(const std::string& expected_contents) {
     std::string target_contents;
-    EXPECT_TRUE(utils::ReadFile(tgt_path_, &target_contents));
+    EXPECT_TRUE(utils::ReadFile(tgt_file_.path(), &target_contents));
     EXPECT_EQ(expected_contents.size(), target_contents.size());
     if (target_contents != expected_contents) {
       ADD_FAILURE() << "Contents don't match.";
@@ -70,8 +69,7 @@
     }
   }
 
-  // Path to the target temporary file.
-  std::string tgt_path_;
+  ScopedTempFile tgt_file_{"fd_tgt.XXXXXX"};
 
   // Source and target file descriptor used for testing the tools.
   FakeFileDescriptor* fake_source_{new FakeFileDescriptor()};
diff --git a/payload_consumer/file_writer_unittest.cc b/payload_consumer/file_writer_unittest.cc
index 59cfe2b..3b959f3 100644
--- a/payload_consumer/file_writer_unittest.cc
+++ b/payload_consumer/file_writer_unittest.cc
@@ -35,8 +35,7 @@
 class FileWriterTest : public ::testing::Test {};
 
 TEST(FileWriterTest, SimpleTest) {
-  // Create a uniquely named file for testing.
-  test_utils::ScopedTempFile file("FileWriterTest-XXXXXX");
+  ScopedTempFile file("FileWriterTest-XXXXXX");
   DirectFileWriter file_writer;
   EXPECT_EQ(0,
             file_writer.Open(file.path().c_str(),
@@ -60,7 +59,7 @@
 
 TEST(FileWriterTest, WriteErrorTest) {
   // Create a uniquely named file for testing.
-  test_utils::ScopedTempFile file("FileWriterTest-XXXXXX");
+  ScopedTempFile file("FileWriterTest-XXXXXX");
   DirectFileWriter file_writer;
   EXPECT_EQ(0,
             file_writer.Open(file.path().c_str(),
diff --git a/payload_consumer/filesystem_verifier_action.cc b/payload_consumer/filesystem_verifier_action.cc
index 36e5a35..22c8e0b 100644
--- a/payload_consumer/filesystem_verifier_action.cc
+++ b/payload_consumer/filesystem_verifier_action.cc
@@ -20,24 +20,65 @@
 #include <fcntl.h>
 #include <sys/stat.h>
 #include <sys/types.h>
+#include <unistd.h>
 
 #include <algorithm>
 #include <cstdlib>
+#include <memory>
 #include <string>
+#include <utility>
 
 #include <base/bind.h>
+#include <base/strings/string_util.h>
 #include <brillo/data_encoding.h>
+#include <brillo/message_loops/message_loop.h>
+#include <brillo/secure_blob.h>
 #include <brillo/streams/file_stream.h>
 
+#include "common/error_code.h"
+#include "payload_generator/delta_diff_generator.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
 
 using brillo::data_encoding::Base64Encode;
 using std::string;
 
+// On a partition with verity enabled, we expect to see the following format:
+// ===================================================
+//              Normal Filesystem Data
+// (this should take most of the space, like over 90%)
+// ===================================================
+//                  Hash tree
+//         ~0.8% (e.g. 16M for 2GB image)
+// ===================================================
+//                  FEC data
+//                    ~0.8%
+// ===================================================
+//                   Footer
+//                     4K
+// ===================================================
+
+// For OTA that doesn't do on device verity computation, hash tree and fec data
+// are written during DownloadAction as a regular InstallOp, so no special
+// handling needed, we can just read the entire partition in 1 go.
+
+// Verity enabled case: Only Normal FS data is written during download action.
+// When hasing the entire partition, we will need to build the hash tree, write
+// it to disk, then build FEC, and write it to disk. Therefore, it is important
+// that we finish writing hash tree before we attempt to read & hash it. The
+// same principal applies to FEC data.
+
+// |verity_writer_| handles building and
+// writing of FEC/HashTree, we just need to be careful when reading.
+// Specifically, we must stop at beginning of Hash tree, let |verity_writer_|
+// write both hash tree and FEC, then continue reading the remaining part of
+// partition.
+
 namespace chromeos_update_engine {
 
 namespace {
 const off_t kReadFileBufferSize = 128 * 1024;
+constexpr float kVerityProgressPercent = 0.6;
 }  // namespace
 
 void FilesystemVerifierAction::PerformAction() {
@@ -57,7 +98,7 @@
     abort_action_completer.set_code(ErrorCode::kSuccess);
     return;
   }
-
+  install_plan_.Dump();
   StartPartitionHashing();
   abort_action_completer.set_should_complete(false);
 }
@@ -68,162 +109,318 @@
 }
 
 void FilesystemVerifierAction::Cleanup(ErrorCode code) {
-  src_stream_.reset();
+  partition_fd_.reset();
   // This memory is not used anymore.
   buffer_.clear();
 
+  // If we didn't write verity, partitions were maped. Releaase resource now.
+  if (!install_plan_.write_verity &&
+      dynamic_control_->UpdateUsesSnapshotCompression()) {
+    LOG(INFO) << "Not writing verity and VABC is enabled, unmapping all "
+                 "partitions";
+    dynamic_control_->UnmapAllPartitions();
+  }
+
   if (cancelled_)
     return;
   if (code == ErrorCode::kSuccess && HasOutputPipe())
     SetOutputObject(install_plan_);
+  UpdateProgress(1.0);
   processor_->ActionComplete(this, code);
 }
 
+void FilesystemVerifierAction::UpdateProgress(double progress) {
+  if (delegate_ != nullptr) {
+    delegate_->OnVerifyProgressUpdate(progress);
+  }
+}
+
+void FilesystemVerifierAction::UpdatePartitionProgress(double progress) {
+  // We don't consider sizes of each partition. Every partition
+  // has the same length on progress bar.
+  // TODO(b/186087589): Take sizes of each partition into account.
+  UpdateProgress((progress + partition_index_) /
+                 install_plan_.partitions.size());
+}
+
+bool FilesystemVerifierAction::InitializeFdVABC(bool should_write_verity) {
+  const InstallPlan::Partition& partition =
+      install_plan_.partitions[partition_index_];
+
+  if (!should_write_verity) {
+    // In VABC, we cannot map/unmap partitions w/o first closing ALL fds first.
+    // Since this function might be called inside a ScheduledTask, the closure
+    // might have a copy of partition_fd_ when executing this function. Which
+    // means even if we do |partition_fd_.reset()| here, there's a chance that
+    // underlying fd isn't closed until we return. This is unacceptable, we need
+    // to close |partition_fd| right away.
+    if (partition_fd_) {
+      partition_fd_->Close();
+      partition_fd_.reset();
+    }
+    // In VABC, if we are not writing verity, just map all partitions,
+    // and read using regular fd on |postinstall_mount_device| .
+    // All read will go through snapuserd, which provides a consistent
+    // view: device will use snapuserd to read partition during boot.
+    // b/186196758
+    // Call UnmapAllPartitions() first, because if we wrote verity before, these
+    // writes won't be visible to previously opened snapuserd daemon. To ensure
+    // that we will see the most up to date data from partitions, call Unmap()
+    // then Map() to re-spin daemon.
+    dynamic_control_->UnmapAllPartitions();
+    dynamic_control_->MapAllPartitions();
+    return InitializeFd(partition.readonly_target_path);
+  }
+  partition_fd_ =
+      dynamic_control_->OpenCowFd(partition.name, partition.source_path, true);
+  if (!partition_fd_) {
+    LOG(ERROR) << "OpenCowReader(" << partition.name << ", "
+               << partition.source_path << ") failed.";
+    return false;
+  }
+  partition_size_ = partition.target_size;
+  return true;
+}
+
+bool FilesystemVerifierAction::InitializeFd(const std::string& part_path) {
+  partition_fd_ = FileDescriptorPtr(new EintrSafeFileDescriptor());
+  const bool write_verity = ShouldWriteVerity();
+  int flags = write_verity ? O_RDWR : O_RDONLY;
+  if (!utils::SetBlockDeviceReadOnly(part_path, !write_verity)) {
+    LOG(WARNING) << "Failed to set block device " << part_path << " as "
+                 << (write_verity ? "writable" : "readonly");
+  }
+  if (!partition_fd_->Open(part_path.c_str(), flags)) {
+    LOG(ERROR) << "Unable to open " << part_path << " for reading.";
+    return false;
+  }
+  return true;
+}
+
+void FilesystemVerifierAction::WriteVerityAndHashPartition(
+    FileDescriptorPtr fd,
+    const off64_t start_offset,
+    const off64_t end_offset,
+    void* buffer,
+    const size_t buffer_size) {
+  if (start_offset >= end_offset) {
+    LOG_IF(WARNING, start_offset > end_offset)
+        << "start_offset is greater than end_offset : " << start_offset << " > "
+        << end_offset;
+    if (!verity_writer_->Finalize(fd, fd)) {
+      LOG(ERROR) << "Failed to write verity data";
+      Cleanup(ErrorCode::kVerityCalculationError);
+      return;
+    }
+    if (dynamic_control_->UpdateUsesSnapshotCompression()) {
+      // Spin up snapuserd to read fs.
+      if (!InitializeFdVABC(false)) {
+        LOG(ERROR) << "Failed to map all partitions";
+        Cleanup(ErrorCode::kFilesystemVerifierError);
+        return;
+      }
+    }
+    HashPartition(partition_fd_, 0, partition_size_, buffer, buffer_size);
+    return;
+  }
+  const auto cur_offset = fd->Seek(start_offset, SEEK_SET);
+  if (cur_offset != start_offset) {
+    PLOG(ERROR) << "Failed to seek to offset: " << start_offset;
+    Cleanup(ErrorCode::kVerityCalculationError);
+    return;
+  }
+  const auto read_size =
+      std::min<size_t>(buffer_size, end_offset - start_offset);
+  const auto bytes_read = fd->Read(buffer, read_size);
+  if (bytes_read < 0 || static_cast<size_t>(bytes_read) != read_size) {
+    PLOG(ERROR) << "Failed to read offset " << start_offset << " expected "
+                << read_size << " bytes, actual: " << bytes_read;
+    Cleanup(ErrorCode::kVerityCalculationError);
+    return;
+  }
+  if (!verity_writer_->Update(
+          start_offset, static_cast<const uint8_t*>(buffer), read_size)) {
+    LOG(ERROR) << "VerityWriter::Update() failed";
+    Cleanup(ErrorCode::kVerityCalculationError);
+    return;
+  }
+  UpdatePartitionProgress((start_offset + bytes_read) * 1.0f / partition_size_ *
+                          kVerityProgressPercent);
+  CHECK(pending_task_id_.PostTask(
+      FROM_HERE,
+      base::BindOnce(&FilesystemVerifierAction::WriteVerityAndHashPartition,
+                     base::Unretained(this),
+                     fd,
+                     start_offset + bytes_read,
+                     end_offset,
+                     buffer,
+                     buffer_size)));
+}
+
+void FilesystemVerifierAction::HashPartition(FileDescriptorPtr fd,
+                                             const off64_t start_offset,
+                                             const off64_t end_offset,
+                                             void* buffer,
+                                             const size_t buffer_size) {
+  if (start_offset >= end_offset) {
+    LOG_IF(WARNING, start_offset > end_offset)
+        << "start_offset is greater than end_offset : " << start_offset << " > "
+        << end_offset;
+    FinishPartitionHashing();
+    return;
+  }
+  const auto cur_offset = fd->Seek(start_offset, SEEK_SET);
+  if (cur_offset != start_offset) {
+    PLOG(ERROR) << "Failed to seek to offset: " << start_offset;
+    Cleanup(ErrorCode::kFilesystemVerifierError);
+    return;
+  }
+  const auto read_size =
+      std::min<size_t>(buffer_size, end_offset - start_offset);
+  const auto bytes_read = fd->Read(buffer, read_size);
+  if (bytes_read < 0 || static_cast<size_t>(bytes_read) != read_size) {
+    PLOG(ERROR) << "Failed to read offset " << start_offset << " expected "
+                << read_size << " bytes, actual: " << bytes_read;
+    Cleanup(ErrorCode::kFilesystemVerifierError);
+    return;
+  }
+  if (!hasher_->Update(buffer, read_size)) {
+    LOG(ERROR) << "Hasher updated failed on offset" << start_offset;
+    Cleanup(ErrorCode::kFilesystemVerifierError);
+    return;
+  }
+  const auto progress = (start_offset + bytes_read) * 1.0f / partition_size_;
+  UpdatePartitionProgress(progress * (1 - kVerityProgressPercent) +
+                          kVerityProgressPercent);
+  CHECK(pending_task_id_.PostTask(
+      FROM_HERE,
+      base::BindOnce(&FilesystemVerifierAction::HashPartition,
+                     base::Unretained(this),
+                     fd,
+                     start_offset + bytes_read,
+                     end_offset,
+                     buffer,
+                     buffer_size)));
+}
+
 void FilesystemVerifierAction::StartPartitionHashing() {
   if (partition_index_ == install_plan_.partitions.size()) {
+    if (!install_plan_.untouched_dynamic_partitions.empty()) {
+      LOG(INFO) << "Verifying extents of untouched dynamic partitions ["
+                << base::JoinString(install_plan_.untouched_dynamic_partitions,
+                                    ", ")
+                << "]";
+      if (!dynamic_control_->VerifyExtentsForUntouchedPartitions(
+              install_plan_.source_slot,
+              install_plan_.target_slot,
+              install_plan_.untouched_dynamic_partitions)) {
+        Cleanup(ErrorCode::kFilesystemVerifierError);
+        return;
+      }
+    }
+
     Cleanup(ErrorCode::kSuccess);
     return;
   }
   const InstallPlan::Partition& partition =
       install_plan_.partitions[partition_index_];
-
-  string part_path;
-  switch (verifier_step_) {
-    case VerifierStep::kVerifySourceHash:
-      part_path = partition.source_path;
-      partition_size_ = partition.source_size;
-      break;
-    case VerifierStep::kVerifyTargetHash:
-      part_path = partition.target_path;
-      partition_size_ = partition.target_size;
-      break;
-  }
-
-  if (part_path.empty()) {
-    if (partition_size_ == 0) {
-      LOG(INFO) << "Skip hashing partition " << partition_index_ << " ("
-                << partition.name << ") because size is 0.";
-      partition_index_++;
-      StartPartitionHashing();
-      return;
-    }
-    LOG(ERROR) << "Cannot hash partition " << partition_index_ << " ("
-               << partition.name
-               << ") because its device path cannot be determined.";
-    Cleanup(ErrorCode::kFilesystemVerifierError);
-    return;
-  }
+  const auto& part_path = GetPartitionPath();
+  partition_size_ = GetPartitionSize();
 
   LOG(INFO) << "Hashing partition " << partition_index_ << " ("
             << partition.name << ") on device " << part_path;
-
-  brillo::ErrorPtr error;
-  src_stream_ =
-      brillo::FileStream::Open(base::FilePath(part_path),
-                               brillo::Stream::AccessMode::READ,
-                               brillo::FileStream::Disposition::OPEN_EXISTING,
-                               &error);
-
-  if (!src_stream_) {
-    LOG(ERROR) << "Unable to open " << part_path << " for reading";
+  auto success = false;
+  if (IsVABC(partition)) {
+    success = InitializeFdVABC(ShouldWriteVerity());
+  } else {
+    if (part_path.empty()) {
+      if (partition_size_ == 0) {
+        LOG(INFO) << "Skip hashing partition " << partition_index_ << " ("
+                  << partition.name << ") because size is 0.";
+        partition_index_++;
+        StartPartitionHashing();
+        return;
+      }
+      LOG(ERROR) << "Cannot hash partition " << partition_index_ << " ("
+                 << partition.name
+                 << ") because its device path cannot be determined.";
+      Cleanup(ErrorCode::kFilesystemVerifierError);
+      return;
+    }
+    success = InitializeFd(part_path);
+  }
+  if (!success) {
     Cleanup(ErrorCode::kFilesystemVerifierError);
     return;
   }
-
   buffer_.resize(kReadFileBufferSize);
   hasher_ = std::make_unique<HashCalculator>();
 
   offset_ = 0;
-  if (verifier_step_ == VerifierStep::kVerifyTargetHash &&
-      install_plan_.write_verity) {
+  filesystem_data_end_ = partition_size_;
+  CHECK_LE(partition.hash_tree_offset, partition.fec_offset)
+      << " Hash tree is expected to come before FEC data";
+  if (partition.hash_tree_offset != 0) {
+    filesystem_data_end_ = partition.hash_tree_offset;
+  } else if (partition.fec_offset != 0) {
+    filesystem_data_end_ = partition.fec_offset;
+  }
+  if (ShouldWriteVerity()) {
+    LOG(INFO) << "Verity writes enabled on partition " << partition.name;
     if (!verity_writer_->Init(partition)) {
+      LOG(INFO) << "Verity writes enabled on partition " << partition.name;
       Cleanup(ErrorCode::kVerityCalculationError);
       return;
     }
+    WriteVerityAndHashPartition(
+        partition_fd_, 0, filesystem_data_end_, buffer_.data(), buffer_.size());
+  } else {
+    LOG(INFO) << "Verity writes disabled on partition " << partition.name;
+    HashPartition(
+        partition_fd_, 0, partition_size_, buffer_.data(), buffer_.size());
   }
-
-  // Start the first read.
-  ScheduleRead();
 }
 
-void FilesystemVerifierAction::ScheduleRead() {
+bool FilesystemVerifierAction::IsVABC(
+    const InstallPlan::Partition& partition) const {
+  return dynamic_control_->UpdateUsesSnapshotCompression() &&
+         verifier_step_ == VerifierStep::kVerifyTargetHash &&
+         dynamic_control_->IsDynamicPartition(partition.name,
+                                              install_plan_.target_slot);
+}
+
+const std::string& FilesystemVerifierAction::GetPartitionPath() const {
   const InstallPlan::Partition& partition =
       install_plan_.partitions[partition_index_];
-
-  // We can only start reading anything past |hash_tree_offset| after we have
-  // already read all the data blocks that the hash tree covers. The same
-  // applies to FEC.
-  uint64_t read_end = partition_size_;
-  if (partition.hash_tree_size != 0 &&
-      offset_ < partition.hash_tree_data_offset + partition.hash_tree_data_size)
-    read_end = std::min(read_end, partition.hash_tree_offset);
-  if (partition.fec_size != 0 &&
-      offset_ < partition.fec_data_offset + partition.fec_data_size)
-    read_end = std::min(read_end, partition.fec_offset);
-  size_t bytes_to_read =
-      std::min(static_cast<uint64_t>(buffer_.size()), read_end - offset_);
-  if (!bytes_to_read) {
-    FinishPartitionHashing();
-    return;
-  }
-
-  bool read_async_ok = src_stream_->ReadAsync(
-      buffer_.data(),
-      bytes_to_read,
-      base::Bind(&FilesystemVerifierAction::OnReadDoneCallback,
-                 base::Unretained(this)),
-      base::Bind(&FilesystemVerifierAction::OnReadErrorCallback,
-                 base::Unretained(this)),
-      nullptr);
-
-  if (!read_async_ok) {
-    LOG(ERROR) << "Unable to schedule an asynchronous read from the stream.";
-    Cleanup(ErrorCode::kError);
+  switch (verifier_step_) {
+    case VerifierStep::kVerifySourceHash:
+      return partition.source_path;
+    case VerifierStep::kVerifyTargetHash:
+      if (IsVABC(partition)) {
+        return partition.readonly_target_path;
+      } else {
+        return partition.target_path;
+      }
   }
 }
 
-void FilesystemVerifierAction::OnReadDoneCallback(size_t bytes_read) {
-  if (cancelled_) {
-    Cleanup(ErrorCode::kError);
-    return;
+size_t FilesystemVerifierAction::GetPartitionSize() const {
+  const InstallPlan::Partition& partition =
+      install_plan_.partitions[partition_index_];
+  switch (verifier_step_) {
+    case VerifierStep::kVerifySourceHash:
+      return partition.source_size;
+    case VerifierStep::kVerifyTargetHash:
+      return partition.target_size;
   }
-
-  if (bytes_read == 0) {
-    LOG(ERROR) << "Failed to read the remaining " << partition_size_ - offset_
-               << " bytes from partition "
-               << install_plan_.partitions[partition_index_].name;
-    Cleanup(ErrorCode::kFilesystemVerifierError);
-    return;
-  }
-
-  if (!hasher_->Update(buffer_.data(), bytes_read)) {
-    LOG(ERROR) << "Unable to update the hash.";
-    Cleanup(ErrorCode::kError);
-    return;
-  }
-
-  if (verifier_step_ == VerifierStep::kVerifyTargetHash &&
-      install_plan_.write_verity) {
-    if (!verity_writer_->Update(offset_, buffer_.data(), bytes_read)) {
-      Cleanup(ErrorCode::kVerityCalculationError);
-      return;
-    }
-  }
-
-  offset_ += bytes_read;
-
-  if (offset_ == partition_size_) {
-    FinishPartitionHashing();
-    return;
-  }
-
-  ScheduleRead();
 }
 
-void FilesystemVerifierAction::OnReadErrorCallback(const brillo::Error* error) {
-  // TODO(deymo): Transform the read-error into an specific ErrorCode.
-  LOG(ERROR) << "Asynchronous read failed.";
-  Cleanup(ErrorCode::kError);
+bool FilesystemVerifierAction::ShouldWriteVerity() {
+  const InstallPlan::Partition& partition =
+      install_plan_.partitions[partition_index_];
+  return verifier_step_ == VerifierStep::kVerifyTargetHash &&
+         install_plan_.write_verity &&
+         (partition.hash_tree_size > 0 || partition.fec_size > 0);
 }
 
 void FilesystemVerifierAction::FinishPartitionHashing() {
@@ -249,8 +446,8 @@
         }
         // If we have not verified source partition yet, now that the target
         // partition does not match, and it's not a full payload, we need to
-        // switch to kVerifySourceHash step to check if it's because the source
-        // partition does not match either.
+        // switch to kVerifySourceHash step to check if it's because the
+        // source partition does not match either.
         verifier_step_ = VerifierStep::kVerifySourceHash;
       } else {
         partition_index_++;
@@ -286,17 +483,20 @@
       }
       // The action will skip kVerifySourceHash step if target partition hash
       // matches, if we are in this step, it means target hash does not match,
-      // and now that the source partition hash matches, we should set the error
-      // code to reflect the error in target partition.
-      // We only need to verify the source partition which the target hash does
-      // not match, the rest of the partitions don't matter.
+      // and now that the source partition hash matches, we should set the
+      // error code to reflect the error in target partition. We only need to
+      // verify the source partition which the target hash does not match, the
+      // rest of the partitions don't matter.
       Cleanup(ErrorCode::kNewRootfsVerificationError);
       return;
   }
   // Start hashing the next partition, if any.
   hasher_.reset();
   buffer_.clear();
-  src_stream_->CloseBlocking(nullptr);
+  if (partition_fd_) {
+    partition_fd_->Close();
+    partition_fd_.reset();
+  }
   StartPartitionHashing();
 }
 
diff --git a/payload_consumer/filesystem_verifier_action.h b/payload_consumer/filesystem_verifier_action.h
index 83d6668..850abda 100644
--- a/payload_consumer/filesystem_verifier_action.h
+++ b/payload_consumer/filesystem_verifier_action.h
@@ -22,12 +22,15 @@
 
 #include <memory>
 #include <string>
+#include <utility>
 #include <vector>
 
-#include <brillo/streams/stream.h>
+#include <brillo/message_loops/message_loop.h>
 
 #include "update_engine/common/action.h"
 #include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/scoped_task_id.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
 #include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/payload_consumer/verity_writer_interface.h"
 
@@ -49,32 +52,62 @@
   kVerifySourceHash,
 };
 
+class FilesystemVerifyDelegate {
+ public:
+  virtual ~FilesystemVerifyDelegate() = default;
+  virtual void OnVerifyProgressUpdate(double progress) = 0;
+};
+
 class FilesystemVerifierAction : public InstallPlanAction {
  public:
-  FilesystemVerifierAction()
-      : verity_writer_(verity_writer::CreateVerityWriter()) {}
+  explicit FilesystemVerifierAction(
+      DynamicPartitionControlInterface* dynamic_control)
+      : verity_writer_(verity_writer::CreateVerityWriter()),
+        dynamic_control_(dynamic_control) {
+    CHECK(dynamic_control_);
+  }
+
   ~FilesystemVerifierAction() override = default;
 
   void PerformAction() override;
   void TerminateProcessing() override;
 
+  // Used for listening to progress updates
+  void set_delegate(FilesystemVerifyDelegate* delegate) {
+    this->delegate_ = delegate;
+  }
+  [[nodiscard]] FilesystemVerifyDelegate* get_delegate() const {
+    return this->delegate_;
+  }
+
   // Debugging/logging
   static std::string StaticType() { return "FilesystemVerifierAction"; }
   std::string Type() const override { return StaticType(); }
 
  private:
   friend class FilesystemVerifierActionTestDelegate;
+  void WriteVerityAndHashPartition(FileDescriptorPtr fd,
+                                   const off64_t start_offset,
+                                   const off64_t end_offset,
+                                   void* buffer,
+                                   const size_t buffer_size);
+  void HashPartition(FileDescriptorPtr fd,
+                     const off64_t start_offset,
+                     const off64_t end_offset,
+                     void* buffer,
+                     const size_t buffer_size);
+
+  // Return true if we need to write verity bytes.
+  bool ShouldWriteVerity();
   // Starts the hashing of the current partition. If there aren't any partitions
   // remaining to be hashed, it finishes the action.
   void StartPartitionHashing();
 
-  // Schedules the asynchronous read of the filesystem.
-  void ScheduleRead();
+  const std::string& GetPartitionPath() const;
 
-  // Called from the main loop when a single read from |src_stream_| succeeds or
-  // fails, calling OnReadDoneCallback() and OnReadErrorCallback() respectively.
-  void OnReadDoneCallback(size_t bytes_read);
-  void OnReadErrorCallback(const brillo::Error* error);
+  bool IsVABC(const InstallPlan::Partition& partition) const;
+
+  size_t GetPartitionSize() const;
 
   // When the read is done, finalize the hash checking of the current partition
   // and continue checking the next one.
@@ -85,6 +118,17 @@
   // true if TerminateProcessing() was called.
   void Cleanup(ErrorCode code);
 
+  // Invoke delegate callback to report progress, if delegate is not null
+  void UpdateProgress(double progress);
+
+  // Updates progress of current partition. |progress| should be in range [0,
+  // 1], and it will be scaled appropriately with # of partitions.
+  void UpdatePartitionProgress(double progress);
+
+  // Initialize read_fd_ and write_fd_
+  bool InitializeFd(const std::string& part_path);
+  bool InitializeFdVABC(bool should_write_verity);
+
   // The type of the partition that we are verifying.
   VerifierStep verifier_step_ = VerifierStep::kVerifyTargetHash;
 
@@ -92,23 +136,24 @@
   // being hashed.
   size_t partition_index_{0};
 
-  // If not null, the FileStream used to read from the device.
-  brillo::StreamPtr src_stream_;
+  // If not null, the FileDescriptor used to read from the device.
+  // verity writer might attempt to write to this fd, if verity is enabled.
+  FileDescriptorPtr partition_fd_;
 
   // Buffer for storing data we read.
   brillo::Blob buffer_;
 
   bool cancelled_{false};  // true if the action has been cancelled.
 
-  // The install plan we're passed in via the input pipe.
-  InstallPlan install_plan_;
-
   // Calculates the hash of the data.
   std::unique_ptr<HashCalculator> hasher_;
 
   // Write verity data of the current partition.
   std::unique_ptr<VerityWriterInterface> verity_writer_;
 
+  // Verifies the untouched dynamic partitions for partial updates.
+  DynamicPartitionControlInterface* dynamic_control_{nullptr};
+
   // Reads and hashes this many bytes from the head of the input stream. When
   // the partition starts to be hashed, this field is initialized from the
   // corresponding InstallPlan::Partition size which is the total size
@@ -119,6 +164,16 @@
   // The byte offset that we are reading in the current partition.
   uint64_t offset_{0};
 
+  // The end offset of filesystem data, first byte position of hashtree.
+  uint64_t filesystem_data_end_{0};
+
+  // An observer that observes progress updates of this action.
+  FilesystemVerifyDelegate* delegate_{};
+
+  // Callback that should be cancelled on |TerminateProcessing|. Usually this
+  // points to pending read callbacks from async stream.
+  ScopedTaskId pending_task_id_;
+
   DISALLOW_COPY_AND_ASSIGN(FilesystemVerifierAction);
 };
 
diff --git a/payload_consumer/filesystem_verifier_action_unittest.cc b/payload_consumer/filesystem_verifier_action_unittest.cc
index cb33404..f2f2954 100644
--- a/payload_consumer/filesystem_verifier_action_unittest.cc
+++ b/payload_consumer/filesystem_verifier_action_unittest.cc
@@ -16,6 +16,8 @@
 
 #include "update_engine/payload_consumer/filesystem_verifier_action.h"
 
+#include <algorithm>
+#include <cstring>
 #include <memory>
 #include <string>
 #include <utility>
@@ -25,34 +27,194 @@
 #include <brillo/message_loops/fake_message_loop.h>
 #include <brillo/message_loops/message_loop_utils.h>
 #include <brillo/secure_blob.h>
+#include <fec/ecc.h>
 #include <gtest/gtest.h>
+#include <libsnapshot/snapshot_writer.h>
+#include <sys/stat.h>
 
+#include "update_engine/common/dynamic_partition_control_stub.h"
 #include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/mock_dynamic_partition_control.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/fake_file_descriptor.h"
+#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_consumer/verity_writer_android.h"
 
 using brillo::MessageLoop;
 using std::string;
+using testing::_;
+using testing::AtLeast;
+using testing::DoAll;
+using testing::NiceMock;
+using testing::Return;
+using testing::SetArgPointee;
 
 namespace chromeos_update_engine {
 
 class FilesystemVerifierActionTest : public ::testing::Test {
+ public:
+  static constexpr size_t BLOCK_SIZE = 4096;
+  // We use SHA256 for testing, so hash size is 256bits / 8
+  static constexpr size_t HASH_SIZE = 256 / 8;
+  static constexpr size_t PARTITION_SIZE = BLOCK_SIZE * 1024;
+  static constexpr size_t HASH_TREE_START_OFFSET = 800 * BLOCK_SIZE;
+  size_t hash_tree_size = 0;
+  size_t fec_start_offset = 0;
+  size_t fec_data_size = 0;
+  static constexpr size_t FEC_ROOTS = 2;
+  size_t fec_rounds = 0;
+  size_t fec_size = 0;
+
  protected:
-  void SetUp() override { loop_.SetAsCurrent(); }
+  void SetUp() override {
+    hash_tree_size = HashTreeBuilder::CalculateSize(
+        HASH_TREE_START_OFFSET, BLOCK_SIZE, HASH_SIZE);
+    fec_start_offset = HASH_TREE_START_OFFSET + hash_tree_size;
+    fec_data_size = fec_start_offset;
+    static constexpr size_t FEC_ROOTS = 2;
+    fec_rounds =
+        utils::DivRoundUp(fec_data_size / BLOCK_SIZE, FEC_RSM - FEC_ROOTS);
+    fec_size = fec_rounds * FEC_ROOTS * BLOCK_SIZE;
+
+    fec_data_.resize(fec_size);
+    hash_tree_data_.resize(hash_tree_size);
+    // Globally readable writable, as we want to write data
+    ASSERT_EQ(0, fchmod(source_part_.fd(), 0666))
+        << " Failed to set " << source_part_.path() << " as writable "
+        << strerror(errno);
+    ASSERT_EQ(0, fchmod(target_part_.fd(), 0666))
+        << " Failed to set " << target_part_.path() << " as writable "
+        << strerror(errno);
+    brillo::Blob part_data(PARTITION_SIZE);
+    test_utils::FillWithData(&part_data);
+    ASSERT_TRUE(utils::WriteFile(
+        source_part_.path().c_str(), part_data.data(), part_data.size()));
+    // FillWithData() will fill with different data next call. We want
+    // source/target partitions to contain different data for testing.
+    test_utils::FillWithData(&part_data);
+    ASSERT_TRUE(utils::WriteFile(
+        target_part_.path().c_str(), part_data.data(), part_data.size()));
+    loop_.SetAsCurrent();
+  }
 
   void TearDown() override {
     EXPECT_EQ(0, brillo::MessageLoopRunMaxIterations(&loop_, 1));
   }
 
+  void DoTestVABC(bool clear_target_hash, bool enable_verity);
+
   // Returns true iff test has completed successfully.
   bool DoTest(bool terminate_early, bool hash_fail);
 
   void BuildActions(const InstallPlan& install_plan);
+  void BuildActions(const InstallPlan& install_plan,
+                    DynamicPartitionControlInterface* dynamic_control);
+
+  InstallPlan::Partition* AddFakePartition(InstallPlan* install_plan,
+                                           std::string name = "fake_part") {
+    InstallPlan::Partition& part = install_plan->partitions.emplace_back();
+    part.name = name;
+    part.target_path = target_part_.path();
+    part.readonly_target_path = part.target_path;
+    part.target_size = PARTITION_SIZE;
+    part.block_size = BLOCK_SIZE;
+    part.source_path = source_part_.path();
+    part.source_size = PARTITION_SIZE;
+    EXPECT_TRUE(
+        HashCalculator::RawHashOfFile(source_part_.path(), &part.source_hash));
+    EXPECT_TRUE(
+        HashCalculator::RawHashOfFile(target_part_.path(), &part.target_hash));
+    return &part;
+  }
+  static void ZeroRange(FileDescriptorPtr fd,
+                        size_t start_block,
+                        size_t num_blocks) {
+    std::vector<unsigned char> buffer(BLOCK_SIZE);
+    ASSERT_EQ((ssize_t)(start_block * BLOCK_SIZE),
+              fd->Seek(start_block * BLOCK_SIZE, SEEK_SET));
+    for (size_t i = 0; i < num_blocks; i++) {
+      ASSERT_TRUE(utils::WriteAll(fd, buffer.data(), buffer.size()));
+    }
+  }
+
+  void SetHashWithVerity(InstallPlan::Partition* partition) {
+    partition->hash_tree_algorithm = "sha256";
+    partition->hash_tree_size = hash_tree_size;
+    partition->hash_tree_offset = HASH_TREE_START_OFFSET;
+    partition->hash_tree_data_offset = 0;
+    partition->hash_tree_data_size = HASH_TREE_START_OFFSET;
+    partition->fec_size = fec_size;
+    partition->fec_offset = fec_start_offset;
+    partition->fec_data_offset = 0;
+    partition->fec_data_size = fec_data_size;
+    partition->fec_roots = FEC_ROOTS;
+    VerityWriterAndroid verity_writer;
+    ASSERT_TRUE(verity_writer.Init(*partition));
+    LOG(INFO) << "Opening " << partition->readonly_target_path;
+    auto fd = std::make_shared<EintrSafeFileDescriptor>();
+    ASSERT_TRUE(fd->Open(partition->readonly_target_path.c_str(), O_RDWR))
+        << "Failed to open " << partition->target_path.c_str() << " "
+        << strerror(errno);
+    std::vector<unsigned char> buffer(BLOCK_SIZE);
+    // Only need to read up to hash tree
+    auto bytes_to_read = HASH_TREE_START_OFFSET;
+    auto offset = 0;
+    while (bytes_to_read > 0) {
+      const auto bytes_read = fd->Read(
+          buffer.data(), std::min<size_t>(buffer.size(), bytes_to_read));
+      ASSERT_GT(bytes_read, 0)
+          << "offset: " << offset << " bytes to read: " << bytes_to_read
+          << " error: " << strerror(errno);
+      ASSERT_TRUE(verity_writer.Update(offset, buffer.data(), bytes_read));
+      bytes_to_read -= bytes_read;
+      offset += bytes_read;
+    }
+    ASSERT_TRUE(verity_writer.Finalize(fd, fd));
+    ASSERT_TRUE(fd->IsOpen());
+    ASSERT_TRUE(HashCalculator::RawHashOfFile(target_part_.path(),
+                                              &partition->target_hash));
+
+    ASSERT_TRUE(fd->Seek(HASH_TREE_START_OFFSET, SEEK_SET));
+    ASSERT_EQ(fd->Read(hash_tree_data_.data(), hash_tree_data_.size()),
+              static_cast<ssize_t>(hash_tree_data_.size()))
+        << "Failed to read hashtree " << strerror(errno);
+    ASSERT_TRUE(fd->Seek(fec_start_offset, SEEK_SET));
+    ASSERT_EQ(fd->Read(fec_data_.data(), fec_data_.size()),
+              static_cast<ssize_t>(fec_data_.size()))
+        << "Failed to read FEC " << strerror(errno);
+    // Fs verification action is expected to write them, so clear verity data to
+    // ensure that they are re-created correctly.
+    ZeroRange(
+        fd, HASH_TREE_START_OFFSET / BLOCK_SIZE, hash_tree_size / BLOCK_SIZE);
+    ZeroRange(fd, fec_start_offset / BLOCK_SIZE, fec_size / BLOCK_SIZE);
+  }
 
   brillo::FakeMessageLoop loop_{nullptr};
   ActionProcessor processor_;
+  DynamicPartitionControlStub dynamic_control_stub_;
+  std::vector<unsigned char> fec_data_;
+  std::vector<unsigned char> hash_tree_data_;
+  static ScopedTempFile source_part_;
+  static ScopedTempFile target_part_;
+  InstallPlan install_plan_;
 };
 
+ScopedTempFile FilesystemVerifierActionTest::source_part_{
+    "source_part.XXXXXX", true, PARTITION_SIZE};
+ScopedTempFile FilesystemVerifierActionTest::target_part_{
+    "target_part.XXXXXX", true, PARTITION_SIZE};
+
+static void EnableVABC(MockDynamicPartitionControl* dynamic_control,
+                       const std::string& part_name) {
+  ON_CALL(*dynamic_control, GetDynamicPartitionsFeatureFlag())
+      .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
+  ON_CALL(*dynamic_control, UpdateUsesSnapshotCompression())
+      .WillByDefault(Return(true));
+  ON_CALL(*dynamic_control, IsDynamicPartition(part_name, _))
+      .WillByDefault(Return(true));
+}
+
 class FilesystemVerifierActionTestDelegate : public ActionProcessorDelegate {
  public:
   FilesystemVerifierActionTestDelegate()
@@ -70,7 +232,8 @@
     if (action->Type() == FilesystemVerifierAction::StaticType()) {
       ran_ = true;
       code_ = code;
-      EXPECT_FALSE(static_cast<FilesystemVerifierAction*>(action)->src_stream_);
+      EXPECT_FALSE(
+          static_cast<FilesystemVerifierAction*>(action)->partition_fd_);
     } else if (action->Type() ==
                ObjectCollectorAction<InstallPlan>::StaticType()) {
       auto collector_action =
@@ -90,7 +253,7 @@
 
 bool FilesystemVerifierActionTest::DoTest(bool terminate_early,
                                           bool hash_fail) {
-  test_utils::ScopedTempFile a_loop_file("a_loop_file.XXXXXX");
+  ScopedTempFile a_loop_file("a_loop_file.XXXXXX");
 
   // Make random data for a.
   const size_t kLoopFileSize = 10 * 1024 * 1024 + 512;
@@ -117,9 +280,8 @@
   bool success = true;
 
   // Set up the action objects
-  InstallPlan install_plan;
-  install_plan.source_slot = 0;
-  install_plan.target_slot = 1;
+  install_plan_.source_slot = 0;
+  install_plan_.target_slot = 1;
   InstallPlan::Partition part;
   part.name = "part";
   part.target_size = kLoopFileSize - (hash_fail ? 1 : 0);
@@ -134,23 +296,19 @@
     ADD_FAILURE();
     success = false;
   }
-  install_plan.partitions = {part};
+  install_plan_.partitions = {part};
 
-  BuildActions(install_plan);
+  BuildActions(install_plan_);
 
   FilesystemVerifierActionTestDelegate delegate;
   processor_.set_delegate(&delegate);
 
-  loop_.PostTask(FROM_HERE,
-                 base::Bind(
-                     [](ActionProcessor* processor, bool terminate_early) {
-                       processor->StartProcessing();
-                       if (terminate_early) {
-                         processor->StopProcessing();
-                       }
-                     },
-                     base::Unretained(&processor_),
-                     terminate_early));
+  loop_.PostTask(base::Bind(&ActionProcessor::StartProcessing,
+                            base::Unretained(&processor_)));
+  if (terminate_early) {
+    loop_.PostTask(base::Bind(&ActionProcessor::StopProcessing,
+                              base::Unretained(&processor_)));
+  }
   loop_.Run();
 
   if (!terminate_early) {
@@ -179,16 +337,18 @@
   EXPECT_TRUE(is_a_file_reading_eq);
   success = success && is_a_file_reading_eq;
 
-  bool is_install_plan_eq = (*delegate.install_plan_ == install_plan);
+  bool is_install_plan_eq = (*delegate.install_plan_ == install_plan_);
   EXPECT_TRUE(is_install_plan_eq);
   success = success && is_install_plan_eq;
   return success;
 }
 
 void FilesystemVerifierActionTest::BuildActions(
-    const InstallPlan& install_plan) {
+    const InstallPlan& install_plan,
+    DynamicPartitionControlInterface* dynamic_control) {
   auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
-  auto verifier_action = std::make_unique<FilesystemVerifierAction>();
+  auto verifier_action =
+      std::make_unique<FilesystemVerifierAction>(dynamic_control);
   auto collector_action =
       std::make_unique<ObjectCollectorAction<InstallPlan>>();
 
@@ -202,6 +362,11 @@
   processor_.EnqueueAction(std::move(collector_action));
 }
 
+void FilesystemVerifierActionTest::BuildActions(
+    const InstallPlan& install_plan) {
+  BuildActions(install_plan, &dynamic_control_stub_);
+}
+
 class FilesystemVerifierActionTest2Delegate : public ActionProcessorDelegate {
  public:
   void ActionCompleted(ActionProcessor* processor,
@@ -217,7 +382,8 @@
 };
 
 TEST_F(FilesystemVerifierActionTest, MissingInputObjectTest) {
-  auto copier_action = std::make_unique<FilesystemVerifierAction>();
+  auto copier_action =
+      std::make_unique<FilesystemVerifierAction>(&dynamic_control_stub_);
   auto collector_action =
       std::make_unique<ObjectCollectorAction<InstallPlan>>();
 
@@ -230,20 +396,19 @@
   processor_.set_delegate(&delegate);
 
   processor_.StartProcessing();
-  EXPECT_FALSE(processor_.IsRunning());
-  EXPECT_TRUE(delegate.ran_);
+  ASSERT_FALSE(processor_.IsRunning());
+  ASSERT_TRUE(delegate.ran_);
   EXPECT_EQ(ErrorCode::kError, delegate.code_);
 }
 
 TEST_F(FilesystemVerifierActionTest, NonExistentDriveTest) {
-  InstallPlan install_plan;
   InstallPlan::Partition part;
   part.name = "nope";
   part.source_path = "/no/such/file";
   part.target_path = "/no/such/file";
-  install_plan.partitions = {part};
+  install_plan_.partitions = {part};
 
-  BuildActions(install_plan);
+  BuildActions(install_plan_);
 
   FilesystemVerifierActionTest2Delegate delegate;
   processor_.set_delegate(&delegate);
@@ -274,7 +439,7 @@
 
 #ifdef __ANDROID__
 TEST_F(FilesystemVerifierActionTest, RunAsRootWriteVerityTest) {
-  test_utils::ScopedTempFile part_file("part_file.XXXXXX");
+  ScopedTempFile part_file("part_file.XXXXXX");
   constexpr size_t filesystem_size = 200 * 4096;
   constexpr size_t part_size = 256 * 4096;
   brillo::Blob part_data(filesystem_size, 0x1);
@@ -284,7 +449,6 @@
   test_utils::ScopedLoopbackDeviceBinder target_device(
       part_file.path(), true, &target_path);
 
-  InstallPlan install_plan;
   InstallPlan::Partition part;
   part.name = "part";
   part.target_path = target_path;
@@ -315,9 +479,9 @@
   part.hash_tree_salt = {0x9e, 0xcb, 0xf8, 0xd5, 0x0b, 0xb4, 0x43,
                          0x0a, 0x7a, 0x10, 0xad, 0x96, 0xd7, 0x15,
                          0x70, 0xba, 0xed, 0x27, 0xe2, 0xae};
-  install_plan.partitions = {part};
+  install_plan_.partitions = {part};
 
-  BuildActions(install_plan);
+  BuildActions(install_plan_);
 
   FilesystemVerifierActionTestDelegate delegate;
   processor_.set_delegate(&delegate);
@@ -336,7 +500,7 @@
 #endif  // __ANDROID__
 
 TEST_F(FilesystemVerifierActionTest, RunAsRootSkipWriteVerityTest) {
-  test_utils::ScopedTempFile part_file("part_file.XXXXXX");
+  ScopedTempFile part_file("part_file.XXXXXX");
   constexpr size_t filesystem_size = 200 * 4096;
   constexpr size_t part_size = 256 * 4096;
   brillo::Blob part_data(part_size);
@@ -346,8 +510,7 @@
   test_utils::ScopedLoopbackDeviceBinder target_device(
       part_file.path(), true, &target_path);
 
-  InstallPlan install_plan;
-  install_plan.write_verity = false;
+  install_plan_.write_verity = false;
   InstallPlan::Partition part;
   part.name = "part";
   part.target_path = target_path;
@@ -362,9 +525,9 @@
   part.fec_offset = part.fec_data_size;
   part.fec_size = 2 * 4096;
   EXPECT_TRUE(HashCalculator::RawHashOfData(part_data, &part.target_hash));
-  install_plan.partitions = {part};
+  install_plan_.partitions = {part};
 
-  BuildActions(install_plan);
+  BuildActions(install_plan_);
 
   FilesystemVerifierActionTestDelegate delegate;
   processor_.set_delegate(&delegate);
@@ -376,8 +539,151 @@
           base::Unretained(&processor_)));
   loop_.Run();
 
-  EXPECT_FALSE(processor_.IsRunning());
-  EXPECT_TRUE(delegate.ran());
-  EXPECT_EQ(ErrorCode::kSuccess, delegate.code());
+  ASSERT_FALSE(processor_.IsRunning());
+  ASSERT_TRUE(delegate.ran());
+  ASSERT_EQ(ErrorCode::kSuccess, delegate.code());
 }
+
+void FilesystemVerifierActionTest::DoTestVABC(bool clear_target_hash,
+                                              bool enable_verity) {
+  auto part_ptr = AddFakePartition(&install_plan_);
+  if (::testing::Test::HasFailure()) {
+    return;
+  }
+  ASSERT_NE(part_ptr, nullptr);
+  InstallPlan::Partition& part = *part_ptr;
+  part.target_path = "Shouldn't attempt to open this path";
+  if (enable_verity) {
+    install_plan_.write_verity = true;
+    ASSERT_NO_FATAL_FAILURE(SetHashWithVerity(&part));
+  }
+  if (clear_target_hash) {
+    part.target_hash.clear();
+  }
+
+  NiceMock<MockDynamicPartitionControl> dynamic_control;
+
+  EnableVABC(&dynamic_control, part.name);
+  auto open_cow = [part]() {
+    auto cow_fd = std::make_shared<EintrSafeFileDescriptor>();
+    EXPECT_TRUE(cow_fd->Open(part.readonly_target_path.c_str(), O_RDWR))
+        << "Failed to open part " << part.readonly_target_path
+        << strerror(errno);
+    return cow_fd;
+  };
+
+  EXPECT_CALL(dynamic_control, UpdateUsesSnapshotCompression())
+      .Times(AtLeast(1));
+  auto cow_fd = open_cow();
+  if (HasFailure()) {
+    return;
+  }
+
+  if (enable_verity) {
+    ON_CALL(dynamic_control, OpenCowFd(part.name, {part.source_path}, _))
+        .WillByDefault(open_cow);
+    EXPECT_CALL(dynamic_control, OpenCowFd(part.name, {part.source_path}, _))
+        .Times(AtLeast(1));
+
+    // fs verification isn't supposed to write to |readonly_target_path|. All
+    // writes should go through fd returned by |OpenCowFd|. Therefore we set
+    // target part as read-only to make sure.
+    ASSERT_EQ(0, chmod(part.readonly_target_path.c_str(), 0444))
+        << " Failed to set " << part.readonly_target_path << " as read-only "
+        << strerror(errno);
+  } else {
+    // Since we are not writing verity, we should not attempt to OpenCowFd()
+    // reads should go through regular file descriptors on mapped partitions.
+    EXPECT_CALL(dynamic_control, OpenCowFd(part.name, {part.source_path}, _))
+        .Times(0);
+    EXPECT_CALL(dynamic_control, MapAllPartitions()).Times(AtLeast(1));
+  }
+  EXPECT_CALL(dynamic_control, ListDynamicPartitionsForSlot(_, _, _))
+      .WillRepeatedly(
+          DoAll(SetArgPointee<2, std::vector<std::string>>({part.name}),
+                Return(true)));
+
+  BuildActions(install_plan_, &dynamic_control);
+
+  FilesystemVerifierActionTestDelegate delegate;
+  processor_.set_delegate(&delegate);
+
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&ActionProcessor::StartProcessing,
+                            base::Unretained(&processor_)));
+  loop_.Run();
+
+  ASSERT_FALSE(processor_.IsRunning());
+  ASSERT_TRUE(delegate.ran());
+  if (enable_verity) {
+    std::vector<unsigned char> actual_fec(fec_size);
+    ssize_t bytes_read = 0;
+    ASSERT_TRUE(utils::PReadAll(cow_fd,
+                                actual_fec.data(),
+                                actual_fec.size(),
+                                fec_start_offset,
+                                &bytes_read));
+    ASSERT_EQ(actual_fec, fec_data_);
+    std::vector<unsigned char> actual_hash_tree(hash_tree_size);
+    ASSERT_TRUE(utils::PReadAll(cow_fd,
+                                actual_hash_tree.data(),
+                                actual_hash_tree.size(),
+                                HASH_TREE_START_OFFSET,
+                                &bytes_read));
+    ASSERT_EQ(actual_hash_tree, hash_tree_data_);
+  }
+  if (clear_target_hash) {
+    ASSERT_EQ(ErrorCode::kNewRootfsVerificationError, delegate.code());
+  } else {
+    ASSERT_EQ(ErrorCode::kSuccess, delegate.code());
+  }
+}
+
+TEST_F(FilesystemVerifierActionTest, VABC_NoVerity_Success) {
+  DoTestVABC(false, false);
+}
+
+TEST_F(FilesystemVerifierActionTest, VABC_NoVerity_Target_Mismatch) {
+  DoTestVABC(true, false);
+}
+
+TEST_F(FilesystemVerifierActionTest, VABC_Verity_Success) {
+  DoTestVABC(false, true);
+}
+
+TEST_F(FilesystemVerifierActionTest, VABC_Verity_ReadAfterWrite) {
+  ASSERT_NO_FATAL_FAILURE(DoTestVABC(false, true));
+  // Run FS verification again, w/o writing verity. We have seen a bug where
+  // attempting to run fs again will cause previously written verity data to be
+  // dropped, so cover this scenario.
+  ASSERT_GE(install_plan_.partitions.size(), 1UL);
+  auto& part = install_plan_.partitions[0];
+  install_plan_.write_verity = false;
+  part.readonly_target_path = target_part_.path();
+  NiceMock<MockDynamicPartitionControl> dynamic_control;
+  EnableVABC(&dynamic_control, part.name);
+
+  // b/186196758 is only visible if we repeatedely run FS verification w/o
+  // writing verity
+  for (int i = 0; i < 3; i++) {
+    BuildActions(install_plan_, &dynamic_control);
+
+    FilesystemVerifierActionTestDelegate delegate;
+    processor_.set_delegate(&delegate);
+    loop_.PostTask(
+        FROM_HERE,
+        base::Bind(
+            [](ActionProcessor* processor) { processor->StartProcessing(); },
+            base::Unretained(&processor_)));
+    loop_.Run();
+    ASSERT_FALSE(processor_.IsRunning());
+    ASSERT_TRUE(delegate.ran());
+    ASSERT_EQ(ErrorCode::kSuccess, delegate.code());
+  }
+}
+
+TEST_F(FilesystemVerifierActionTest, VABC_Verity_Target_Mismatch) {
+  DoTestVABC(true, true);
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc
index 766b27c..06b7dd8 100644
--- a/payload_consumer/install_plan.cc
+++ b/payload_consumer/install_plan.cc
@@ -16,6 +16,9 @@
 
 #include "update_engine/payload_consumer/install_plan.h"
 
+#include <algorithm>
+#include <utility>
+
 #include <base/format_macros.h>
 #include <base/logging.h>
 #include <base/strings/string_number_conversions.h>
@@ -26,9 +29,29 @@
 #include "update_engine/payload_consumer/payload_constants.h"
 
 using std::string;
+using std::vector;
 
 namespace chromeos_update_engine {
 
+namespace {
+string PayloadUrlsToString(
+    const decltype(InstallPlan::Payload::payload_urls)& payload_urls) {
+  return "(" + base::JoinString(payload_urls, ",") + ")";
+}
+
+string VectorToString(const vector<std::pair<string, string>>& input,
+                      const string& separator) {
+  vector<string> vec;
+  std::transform(input.begin(),
+                 input.end(),
+                 std::back_inserter(vec),
+                 [](const auto& pair) {
+                   return base::JoinString({pair.first, pair.second}, ": ");
+                 });
+  return base::JoinString(vec, separator);
+}
+}  // namespace
+
 string InstallPayloadTypeToString(InstallPayloadType type) {
   switch (type) {
     case InstallPayloadType::kUnknown:
@@ -53,34 +76,10 @@
 }
 
 void InstallPlan::Dump() const {
-  string partitions_str;
-  for (const auto& partition : partitions) {
-    partitions_str +=
-        base::StringPrintf(", part: %s (source_size: %" PRIu64
-                           ", target_size %" PRIu64 ", postinst:%s)",
-                           partition.name.c_str(),
-                           partition.source_size,
-                           partition.target_size,
-                           utils::ToString(partition.run_postinstall).c_str());
-  }
-  string payloads_str;
-  for (const auto& payload : payloads) {
-    payloads_str += base::StringPrintf(
-        ", payload: (size: %" PRIu64 ", metadata_size: %" PRIu64
-        ", metadata signature: %s, hash: %s, payload type: %s)",
-        payload.size,
-        payload.metadata_size,
-        payload.metadata_signature.c_str(),
-        base::HexEncode(payload.hash.data(), payload.hash.size()).c_str(),
-        InstallPayloadTypeToString(payload.type).c_str());
-  }
+  LOG(INFO) << "InstallPlan: \n" << ToString();
+}
 
-  string version_str = base::StringPrintf(", version: %s", version.c_str());
-  if (!system_version.empty()) {
-    version_str +=
-        base::StringPrintf(", system_version: %s", system_version.c_str());
-  }
-
+string InstallPlan::ToString() const {
   string url_str = download_url;
   if (base::StartsWith(
           url_str, "fd://", base::CompareCase::INSENSITIVE_ASCII)) {
@@ -88,19 +87,66 @@
     url_str = utils::GetFilePath(fd);
   }
 
-  LOG(INFO) << "InstallPlan: " << (is_resume ? "resume" : "new_update")
-            << version_str
-            << ", source_slot: " << BootControlInterface::SlotName(source_slot)
-            << ", target_slot: " << BootControlInterface::SlotName(target_slot)
-            << ", url: " << url_str << payloads_str << partitions_str
-            << ", hash_checks_mandatory: "
-            << utils::ToString(hash_checks_mandatory)
-            << ", powerwash_required: " << utils::ToString(powerwash_required)
-            << ", switch_slot_on_reboot: "
-            << utils::ToString(switch_slot_on_reboot)
-            << ", run_post_install: " << utils::ToString(run_post_install)
-            << ", is_rollback: " << utils::ToString(is_rollback)
-            << ", write_verity: " << utils::ToString(write_verity);
+  vector<string> result_str;
+  result_str.emplace_back(VectorToString(
+      {
+          {"type", (is_resume ? "resume" : "new_update")},
+          {"version", version},
+          {"source_slot", BootControlInterface::SlotName(source_slot)},
+          {"target_slot", BootControlInterface::SlotName(target_slot)},
+          {"initial url", url_str},
+          {"hash_checks_mandatory", utils::ToString(hash_checks_mandatory)},
+          {"powerwash_required", utils::ToString(powerwash_required)},
+          {"switch_slot_on_reboot", utils::ToString(switch_slot_on_reboot)},
+          {"run_post_install", utils::ToString(run_post_install)},
+          {"is_rollback", utils::ToString(is_rollback)},
+          {"rollback_data_save_requested",
+           utils::ToString(rollback_data_save_requested)},
+          {"write_verity", utils::ToString(write_verity)},
+      },
+      "\n"));
+
+  for (const auto& partition : partitions) {
+    result_str.emplace_back(VectorToString(
+        {
+            {"Partition", partition.name},
+            {"source_size", base::NumberToString(partition.source_size)},
+            {"source_path", partition.source_path},
+            {"source_hash",
+             base::HexEncode(partition.source_hash.data(),
+                             partition.source_hash.size())},
+            {"target_size", base::NumberToString(partition.target_size)},
+            {"target_path", partition.target_path},
+            {"target_hash",
+             base::HexEncode(partition.target_hash.data(),
+                             partition.target_hash.size())},
+            {"run_postinstall", utils::ToString(partition.run_postinstall)},
+            {"postinstall_path", partition.postinstall_path},
+            {"readonly_target_path", partition.readonly_target_path},
+            {"filesystem_type", partition.filesystem_type},
+        },
+        "\n  "));
+  }
+
+  for (unsigned int i = 0; i < payloads.size(); ++i) {
+    const auto& payload = payloads[i];
+    result_str.emplace_back(VectorToString(
+        {
+            {"Payload", base::NumberToString(i)},
+            {"urls", PayloadUrlsToString(payload.payload_urls)},
+            {"size", base::NumberToString(payload.size)},
+            {"metadata_size", base::NumberToString(payload.metadata_size)},
+            {"metadata_signature", payload.metadata_signature},
+            {"hash", base::HexEncode(payload.hash.data(), payload.hash.size())},
+            {"type", InstallPayloadTypeToString(payload.type)},
+            {"fingerprint", payload.fp},
+            {"app_id", payload.app_id},
+            {"already_applied", utils::ToString(payload.already_applied)},
+        },
+        "\n  "));
+  }
+
+  return base::JoinString(result_str, "\n");
 }
 
 bool InstallPlan::LoadPartitionsFromSlots(BootControlInterface* boot_control) {
@@ -108,18 +154,19 @@
   for (Partition& partition : partitions) {
     if (source_slot != BootControlInterface::kInvalidSlot &&
         partition.source_size > 0) {
-      result = boot_control->GetPartitionDevice(
-                   partition.name, source_slot, &partition.source_path) &&
-               result;
+      TEST_AND_RETURN_FALSE(boot_control->GetPartitionDevice(
+          partition.name, source_slot, &partition.source_path));
     } else {
       partition.source_path.clear();
     }
 
     if (target_slot != BootControlInterface::kInvalidSlot &&
         partition.target_size > 0) {
-      result = boot_control->GetPartitionDevice(
-                   partition.name, target_slot, &partition.target_path) &&
-               result;
+      auto device = boot_control->GetPartitionDevice(
+          partition.name, target_slot, source_slot);
+      TEST_AND_RETURN_FALSE(device.has_value());
+      partition.target_path = device->rw_device_path;
+      partition.readonly_target_path = device->readonly_device_path;
     } else {
       partition.target_path.clear();
     }
diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h
index ede36b3..7c77789 100644
--- a/payload_consumer/install_plan.h
+++ b/payload_consumer/install_plan.h
@@ -45,6 +45,7 @@
   bool operator!=(const InstallPlan& that) const;
 
   void Dump() const;
+  std::string ToString() const;
 
   // Loads the |source_path| and |target_path| of all |partitions| based on the
   // |source_slot| and |target_slot| if available. Returns whether it succeeded
@@ -54,25 +55,28 @@
   bool is_resume{false};
   std::string download_url;  // url to download from
   std::string version;       // version we are installing.
-  // system version, if present and separate from version
-  std::string system_version;
 
   struct Payload {
-    uint64_t size = 0;               // size of the payload
-    uint64_t metadata_size = 0;      // size of the metadata
+    std::vector<std::string> payload_urls;  // URLs to download the payload
+    uint64_t size = 0;                      // size of the payload
+    uint64_t metadata_size = 0;             // size of the metadata
     std::string metadata_signature;  // signature of the metadata in base64
     brillo::Blob hash;               // SHA256 hash of the payload
     InstallPayloadType type{InstallPayloadType::kUnknown};
+    std::string fp;      // fingerprint value unique to the payload
+    std::string app_id;  // App ID of the payload
     // Only download manifest and fill in partitions in install plan without
     // apply the payload if true. Will be set by DownloadAction when resuming
     // multi-payload.
     bool already_applied = false;
 
     bool operator==(const Payload& that) const {
-      return size == that.size && metadata_size == that.metadata_size &&
+      return payload_urls == that.payload_urls && size == that.size &&
+             metadata_size == that.metadata_size &&
              metadata_signature == that.metadata_signature &&
              hash == that.hash && type == that.type &&
-             already_applied == that.already_applied;
+             already_applied == that.already_applied && fp == that.fp &&
+             app_id == that.app_id;
     }
   };
   std::vector<Payload> payloads;
@@ -98,9 +102,17 @@
     uint64_t source_size{0};
     brillo::Blob source_hash;
 
+    // |target_path| is intended to be a path to block device, which you can
+    // open with |open| syscall and perform regular unix style read/write.
+    // For VABC, this will be empty. As you can't read/write VABC devices with
+    // regular syscall.
     std::string target_path;
+    // |mountable_target_device| is intended to be a path to block device which
+    // can be used for mounting this block device's underlying filesystem.
+    std::string readonly_target_path;
     uint64_t target_size{0};
     brillo::Blob target_hash;
+
     uint32_t block_size{0};
 
     // Whether we should run the postinstall script from this partition and the
@@ -146,6 +158,9 @@
   // True if this update is a rollback.
   bool is_rollback{false};
 
+  // True if this rollback should preserve some system data.
+  bool rollback_data_save_requested{false};
+
   // True if the update should write verity.
   // False otherwise.
   bool write_verity{true};
@@ -153,6 +168,10 @@
   // If not blank, a base-64 encoded representation of the PEM-encoded
   // public key in the response.
   std::string public_key_rsa;
+
+  // The name of dynamic partitions not included in the payload. Only used
+  // for partial updates.
+  std::vector<std::string> untouched_dynamic_partitions;
 };
 
 class InstallPlanAction;
@@ -190,9 +209,10 @@
   typedef ActionTraits<InstallPlanAction>::InputObjectType InputObjectType;
   typedef ActionTraits<InstallPlanAction>::OutputObjectType OutputObjectType;
 
- private:
+ protected:
   InstallPlan install_plan_;
 
+ private:
   DISALLOW_COPY_AND_ASSIGN(InstallPlanAction);
 };
 
diff --git a/payload_consumer/install_plan_unittest.cc b/payload_consumer/install_plan_unittest.cc
new file mode 100644
index 0000000..7779494
--- /dev/null
+++ b/payload_consumer/install_plan_unittest.cc
@@ -0,0 +1,84 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <gtest/gtest.h>
+
+#include "update_engine/payload_consumer/install_plan.h"
+
+namespace chromeos_update_engine {
+
+TEST(InstallPlanTest, Dump) {
+  InstallPlan install_plan{
+      .download_url = "foo-download-url",
+      .version = "foo-version",
+      .payloads = {{
+          .payload_urls = {"url1", "url2"},
+          .metadata_signature = "foo-signature",
+          .hash = {0xb2, 0xb3},
+          .fp = "foo-fp",
+          .app_id = "foo-app-id",
+      }},
+      .source_slot = BootControlInterface::kInvalidSlot,
+      .target_slot = BootControlInterface::kInvalidSlot,
+      .partitions = {{
+          .name = "foo-partition_name",
+          .source_path = "foo-source-path",
+          .source_hash = {0xb1, 0xb2},
+          .target_path = "foo-target-path",
+          .readonly_target_path = "mountable-device",
+          .target_hash = {0xb3, 0xb4},
+          .postinstall_path = "foo-path",
+          .filesystem_type = "foo-type",
+      }},
+  };
+
+  EXPECT_EQ(install_plan.ToString(),
+            R"(type: new_update
+version: foo-version
+source_slot: INVALID
+target_slot: INVALID
+initial url: foo-download-url
+hash_checks_mandatory: false
+powerwash_required: false
+switch_slot_on_reboot: true
+run_post_install: true
+is_rollback: false
+rollback_data_save_requested: false
+write_verity: true
+Partition: foo-partition_name
+  source_size: 0
+  source_path: foo-source-path
+  source_hash: B1B2
+  target_size: 0
+  target_path: foo-target-path
+  target_hash: B3B4
+  run_postinstall: false
+  postinstall_path: foo-path
+  readonly_target_path: mountable-device
+  filesystem_type: foo-type
+Payload: 0
+  urls: (url1,url2)
+  size: 0
+  metadata_size: 0
+  metadata_signature: foo-signature
+  hash: B2B3
+  type: unknown
+  fingerprint: foo-fp
+  app_id: foo-app-id
+  already_applied: false)");
+}
+
+}  // namespace chromeos_update_engine
diff --git a/mock_file_writer.h b/payload_consumer/mock_file_writer.h
similarity index 100%
rename from mock_file_writer.h
rename to payload_consumer/mock_file_writer.h
diff --git a/payload_consumer/mock_partition_writer.h b/payload_consumer/mock_partition_writer.h
new file mode 100644
index 0000000..b056010
--- /dev/null
+++ b/payload_consumer/mock_partition_writer.h
@@ -0,0 +1,68 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_MOCK_PARTITION_WRITER_H_
+#define UPDATE_ENGINE_MOCK_PARTITION_WRITER_H_
+
+#include <gmock/gmock.h>
+
+#include "common/error_code.h"
+#include "payload_generator/delta_diff_generator.h"
+#include "update_engine/payload_consumer/partition_writer.h"
+
+namespace chromeos_update_engine {
+class MockPartitionWriter : public PartitionWriter {
+ public:
+  MockPartitionWriter() : PartitionWriter({}, {}, nullptr, kBlockSize, false) {}
+  virtual ~MockPartitionWriter() = default;
+
+  // Perform necessary initialization work before InstallOperation can be
+  // applied to this partition
+  MOCK_METHOD(bool, Init, (const InstallPlan*, bool, size_t), (override));
+
+  // |CheckpointUpdateProgress| will be called after SetNextOpIndex(), but it's
+  // optional. DeltaPerformer may or may not call this everytime an operation is
+  // applied.
+  MOCK_METHOD(void, CheckpointUpdateProgress, (size_t), (override));
+
+  // These perform a specific type of operation and return true on success.
+  // |error| will be set if source hash mismatch, otherwise |error| might not be
+  // set even if it fails.
+  MOCK_METHOD(bool,
+              PerformReplaceOperation,
+              (const InstallOperation&, const void*, size_t),
+              (override));
+  MOCK_METHOD(bool,
+              PerformZeroOrDiscardOperation,
+              (const InstallOperation&),
+              (override));
+  MOCK_METHOD(bool,
+              PerformSourceCopyOperation,
+              (const InstallOperation&, ErrorCode*),
+              (override));
+  MOCK_METHOD(bool,
+              PerformSourceBsdiffOperation,
+              (const InstallOperation&, ErrorCode*, const void*, size_t),
+              (override));
+  MOCK_METHOD(bool,
+              PerformPuffDiffOperation,
+              (const InstallOperation&, ErrorCode*, const void*, size_t),
+              (override));
+};
+
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/mount_history.cc b/payload_consumer/mount_history.cc
index 43a75b3..d699ad9 100644
--- a/payload_consumer/mount_history.cc
+++ b/payload_consumer/mount_history.cc
@@ -37,7 +37,7 @@
   brillo::Blob block0_buffer(kBlockSize);
   ssize_t bytes_read;
 
-  if (!utils::PReadAll(
+  if (!utils::ReadAll(
           blockdevice_fd, block0_buffer.data(), kBlockSize, 0, &bytes_read)) {
     LOG(WARNING) << "PReadAll failed";
     return;
@@ -54,14 +54,33 @@
   //   0x30: len32 Write time
   //   0x34: len16 Number of mounts since the last fsck
   //   0x38: len16 Magic signature 0xEF53
+  //   0x40: len32 Time of last check
+  //   0x108: len32 When the filesystem was created
 
   time_t mount_time =
       *reinterpret_cast<uint32_t*>(&block0_buffer[0x400 + 0x2C]);
+  time_t write_time =
+      *reinterpret_cast<uint32_t*>(&block0_buffer[0x400 + 0x30]);
   uint16_t mount_count =
       *reinterpret_cast<uint16_t*>(&block0_buffer[0x400 + 0x34]);
   uint16_t magic = *reinterpret_cast<uint16_t*>(&block0_buffer[0x400 + 0x38]);
+  time_t check_time =
+      *reinterpret_cast<uint32_t*>(&block0_buffer[0x400 + 0x40]);
+  time_t created_time =
+      *reinterpret_cast<uint32_t*>(&block0_buffer[0x400 + 0x108]);
 
   if (magic == 0xEF53) {
+    // Timestamps can be updated by fsck without updating mount count,
+    // log if any timestamp differ
+    if (! (write_time == created_time && check_time == created_time)) {
+      LOG(WARNING) << "Device have been modified after being created. "
+                   << "Filesystem created on "
+                   << base::Time::FromTimeT(created_time) << ", "
+                   << "last written on "
+                   << base::Time::FromTimeT(write_time) << ", "
+                   << "last checked on "
+                   << base::Time::FromTimeT(check_time) << ".";
+    }
     if (mount_count > 0) {
       LOG(WARNING) << "Device was remounted R/W " << mount_count << " times. "
                    << "Last remount happened on "
diff --git a/payload_consumer/mtd_file_descriptor.cc b/payload_consumer/mtd_file_descriptor.cc
deleted file mode 100644
index 5d940cb..0000000
--- a/payload_consumer/mtd_file_descriptor.cc
+++ /dev/null
@@ -1,263 +0,0 @@
-//
-// Copyright (C) 2014 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_consumer/mtd_file_descriptor.h"
-
-#include <fcntl.h>
-#include <mtd/ubi-user.h>
-#include <sys/ioctl.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-
-#include <memory>
-#include <string>
-
-#include <base/files/file_path.h>
-#include <base/strings/string_number_conversions.h>
-#include <base/strings/string_util.h>
-#include <base/strings/stringprintf.h>
-
-#include "update_engine/common/subprocess.h"
-#include "update_engine/common/utils.h"
-
-using std::string;
-
-namespace {
-
-static const char kSysfsClassUbi[] = "/sys/class/ubi/";
-static const char kUsableEbSize[] = "/usable_eb_size";
-static const char kReservedEbs[] = "/reserved_ebs";
-
-using chromeos_update_engine::UbiVolumeInfo;
-using chromeos_update_engine::utils::ReadFile;
-
-// Return a UbiVolumeInfo pointer if |path| is a UBI volume. Otherwise, return
-// a null unique pointer.
-std::unique_ptr<UbiVolumeInfo> GetUbiVolumeInfo(const string& path) {
-  base::FilePath device_node(path);
-  base::FilePath ubi_name(device_node.BaseName());
-
-  string sysfs_node(kSysfsClassUbi);
-  sysfs_node.append(ubi_name.MaybeAsASCII());
-
-  std::unique_ptr<UbiVolumeInfo> ret;
-
-  // Obtain volume info from sysfs.
-  string s_reserved_ebs;
-  if (!ReadFile(sysfs_node + kReservedEbs, &s_reserved_ebs)) {
-    LOG(ERROR) << "Cannot read " << sysfs_node + kReservedEbs;
-    return ret;
-  }
-  string s_eb_size;
-  if (!ReadFile(sysfs_node + kUsableEbSize, &s_eb_size)) {
-    LOG(ERROR) << "Cannot read " << sysfs_node + kUsableEbSize;
-    return ret;
-  }
-
-  base::TrimWhitespaceASCII(
-      s_reserved_ebs, base::TRIM_TRAILING, &s_reserved_ebs);
-  base::TrimWhitespaceASCII(s_eb_size, base::TRIM_TRAILING, &s_eb_size);
-
-  uint64_t reserved_ebs, eb_size;
-  if (!base::StringToUint64(s_reserved_ebs, &reserved_ebs)) {
-    LOG(ERROR) << "Cannot parse reserved_ebs: " << s_reserved_ebs;
-    return ret;
-  }
-  if (!base::StringToUint64(s_eb_size, &eb_size)) {
-    LOG(ERROR) << "Cannot parse usable_eb_size: " << s_eb_size;
-    return ret;
-  }
-
-  ret.reset(new UbiVolumeInfo);
-  ret->reserved_ebs = reserved_ebs;
-  ret->eraseblock_size = eb_size;
-  return ret;
-}
-
-}  // namespace
-
-namespace chromeos_update_engine {
-
-MtdFileDescriptor::MtdFileDescriptor()
-    : read_ctx_(nullptr, &mtd_read_close),
-      write_ctx_(nullptr, &mtd_write_close) {}
-
-bool MtdFileDescriptor::IsMtd(const char* path) {
-  uint64_t size;
-  return mtd_node_info(path, &size, nullptr, nullptr) == 0;
-}
-
-bool MtdFileDescriptor::Open(const char* path, int flags, mode_t mode) {
-  // This File Descriptor does not support read and write.
-  TEST_AND_RETURN_FALSE((flags & O_ACCMODE) != O_RDWR);
-  // But we need to open the underlying file descriptor in O_RDWR mode because
-  // during write, we need to read back to verify the write actually sticks or
-  // we have to skip the block. That job is done by mtdutils library.
-  if ((flags & O_ACCMODE) == O_WRONLY) {
-    flags &= ~O_ACCMODE;
-    flags |= O_RDWR;
-  }
-  TEST_AND_RETURN_FALSE(
-      EintrSafeFileDescriptor::Open(path, flags | O_CLOEXEC, mode));
-
-  if ((flags & O_ACCMODE) == O_RDWR) {
-    write_ctx_.reset(mtd_write_descriptor(fd_, path));
-    nr_written_ = 0;
-  } else {
-    read_ctx_.reset(mtd_read_descriptor(fd_, path));
-  }
-
-  if (!read_ctx_ && !write_ctx_) {
-    Close();
-    return false;
-  }
-
-  return true;
-}
-
-bool MtdFileDescriptor::Open(const char* path, int flags) {
-  mode_t cur = umask(022);
-  umask(cur);
-  return Open(path, flags, 0777 & ~cur);
-}
-
-ssize_t MtdFileDescriptor::Read(void* buf, size_t count) {
-  CHECK(read_ctx_);
-  return mtd_read_data(read_ctx_.get(), static_cast<char*>(buf), count);
-}
-
-ssize_t MtdFileDescriptor::Write(const void* buf, size_t count) {
-  CHECK(write_ctx_);
-  ssize_t result =
-      mtd_write_data(write_ctx_.get(), static_cast<const char*>(buf), count);
-  if (result > 0) {
-    nr_written_ += result;
-  }
-  return result;
-}
-
-off64_t MtdFileDescriptor::Seek(off64_t offset, int whence) {
-  if (write_ctx_) {
-    // Ignore seek in write mode.
-    return nr_written_;
-  }
-  return EintrSafeFileDescriptor::Seek(offset, whence);
-}
-
-bool MtdFileDescriptor::Close() {
-  read_ctx_.reset();
-  write_ctx_.reset();
-  return EintrSafeFileDescriptor::Close();
-}
-
-bool UbiFileDescriptor::IsUbi(const char* path) {
-  base::FilePath device_node(path);
-  base::FilePath ubi_name(device_node.BaseName());
-  TEST_AND_RETURN_FALSE(base::StartsWith(
-      ubi_name.MaybeAsASCII(), "ubi", base::CompareCase::SENSITIVE));
-
-  return static_cast<bool>(GetUbiVolumeInfo(path));
-}
-
-bool UbiFileDescriptor::Open(const char* path, int flags, mode_t mode) {
-  std::unique_ptr<UbiVolumeInfo> info = GetUbiVolumeInfo(path);
-  if (!info) {
-    return false;
-  }
-
-  // This File Descriptor does not support read and write.
-  TEST_AND_RETURN_FALSE((flags & O_ACCMODE) != O_RDWR);
-  TEST_AND_RETURN_FALSE(
-      EintrSafeFileDescriptor::Open(path, flags | O_CLOEXEC, mode));
-
-  usable_eb_blocks_ = info->reserved_ebs;
-  eraseblock_size_ = info->eraseblock_size;
-  volume_size_ = usable_eb_blocks_ * eraseblock_size_;
-
-  if ((flags & O_ACCMODE) == O_WRONLY) {
-    // It's best to use volume update ioctl so that UBI layer will mark the
-    // volume as being updated, and only clear that mark if the update is
-    // successful. We will need to pad to the whole volume size at close.
-    uint64_t vsize = volume_size_;
-    if (ioctl(fd_, UBI_IOCVOLUP, &vsize) != 0) {
-      PLOG(ERROR) << "Cannot issue volume update ioctl";
-      EintrSafeFileDescriptor::Close();
-      return false;
-    }
-    mode_ = kWriteOnly;
-    nr_written_ = 0;
-  } else {
-    mode_ = kReadOnly;
-  }
-
-  return true;
-}
-
-bool UbiFileDescriptor::Open(const char* path, int flags) {
-  mode_t cur = umask(022);
-  umask(cur);
-  return Open(path, flags, 0777 & ~cur);
-}
-
-ssize_t UbiFileDescriptor::Read(void* buf, size_t count) {
-  CHECK(mode_ == kReadOnly);
-  return EintrSafeFileDescriptor::Read(buf, count);
-}
-
-ssize_t UbiFileDescriptor::Write(const void* buf, size_t count) {
-  CHECK(mode_ == kWriteOnly);
-  ssize_t nr_chunk = EintrSafeFileDescriptor::Write(buf, count);
-  if (nr_chunk >= 0) {
-    nr_written_ += nr_chunk;
-  }
-  return nr_chunk;
-}
-
-off64_t UbiFileDescriptor::Seek(off64_t offset, int whence) {
-  if (mode_ == kWriteOnly) {
-    // Ignore seek in write mode.
-    return nr_written_;
-  }
-  return EintrSafeFileDescriptor::Seek(offset, whence);
-}
-
-bool UbiFileDescriptor::Close() {
-  bool pad_ok = true;
-  if (IsOpen() && mode_ == kWriteOnly) {
-    char buf[1024];
-    memset(buf, 0xFF, sizeof(buf));
-    while (nr_written_ < volume_size_) {
-      // We have written less than the whole volume. In order for us to clear
-      // the update marker, we need to fill the rest. It is recommended to fill
-      // UBI writes with 0xFF.
-      uint64_t to_write = volume_size_ - nr_written_;
-      if (to_write > sizeof(buf)) {
-        to_write = sizeof(buf);
-      }
-      ssize_t nr_chunk = EintrSafeFileDescriptor::Write(buf, to_write);
-      if (nr_chunk < 0) {
-        LOG(ERROR) << "Cannot 0xFF-pad before closing.";
-        // There is an error, but we can't really do any meaningful thing here.
-        pad_ok = false;
-        break;
-      }
-      nr_written_ += nr_chunk;
-    }
-  }
-  return EintrSafeFileDescriptor::Close() && pad_ok;
-}
-
-}  // namespace chromeos_update_engine
diff --git a/payload_consumer/mtd_file_descriptor.h b/payload_consumer/mtd_file_descriptor.h
deleted file mode 100644
index c0170b7..0000000
--- a/payload_consumer/mtd_file_descriptor.h
+++ /dev/null
@@ -1,103 +0,0 @@
-//
-// Copyright (C) 2014 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_MTD_FILE_DESCRIPTOR_H_
-#define UPDATE_ENGINE_PAYLOAD_CONSUMER_MTD_FILE_DESCRIPTOR_H_
-
-// This module defines file descriptors that deal with NAND media. We are
-// concerned with raw NAND access (as MTD device), and through UBI layer.
-
-#include <memory>
-
-#include <mtdutils.h>
-
-#include "update_engine/payload_consumer/file_descriptor.h"
-
-namespace chromeos_update_engine {
-
-// A class defining the file descriptor API for raw MTD device. This file
-// descriptor supports either random read, or sequential write but not both at
-// once.
-class MtdFileDescriptor : public EintrSafeFileDescriptor {
- public:
-  MtdFileDescriptor();
-
-  static bool IsMtd(const char* path);
-
-  bool Open(const char* path, int flags, mode_t mode) override;
-  bool Open(const char* path, int flags) override;
-  ssize_t Read(void* buf, size_t count) override;
-  ssize_t Write(const void* buf, size_t count) override;
-  off64_t Seek(off64_t offset, int whence) override;
-  uint64_t BlockDevSize() override { return 0; }
-  bool BlkIoctl(int request,
-                uint64_t start,
-                uint64_t length,
-                int* result) override {
-    return false;
-  }
-  bool Close() override;
-
- private:
-  std::unique_ptr<MtdReadContext, decltype(&mtd_read_close)> read_ctx_;
-  std::unique_ptr<MtdWriteContext, decltype(&mtd_write_close)> write_ctx_;
-  uint64_t nr_written_;
-};
-
-struct UbiVolumeInfo {
-  // Number of eraseblocks.
-  uint64_t reserved_ebs;
-  // Size of each eraseblock.
-  uint64_t eraseblock_size;
-};
-
-// A file descriptor to update a UBI volume, similar to MtdFileDescriptor.
-// Once the file descriptor is opened for write, the volume is marked as being
-// updated. The volume will not be usable until an update is completed. See
-// UBI_IOCVOLUP ioctl operation.
-class UbiFileDescriptor : public EintrSafeFileDescriptor {
- public:
-  // Perform some queries about |path| to see if it is a UBI volume.
-  static bool IsUbi(const char* path);
-
-  bool Open(const char* path, int flags, mode_t mode) override;
-  bool Open(const char* path, int flags) override;
-  ssize_t Read(void* buf, size_t count) override;
-  ssize_t Write(const void* buf, size_t count) override;
-  off64_t Seek(off64_t offset, int whence) override;
-  uint64_t BlockDevSize() override { return 0; }
-  bool BlkIoctl(int request,
-                uint64_t start,
-                uint64_t length,
-                int* result) override {
-    return false;
-  }
-  bool Close() override;
-
- private:
-  enum Mode { kReadOnly, kWriteOnly };
-
-  uint64_t usable_eb_blocks_;
-  uint64_t eraseblock_size_;
-  uint64_t volume_size_;
-  uint64_t nr_written_;
-
-  Mode mode_;
-};
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_MTD_FILE_DESCRIPTOR_H_
diff --git a/payload_consumer/partition_update_generator_android.cc b/payload_consumer/partition_update_generator_android.cc
new file mode 100644
index 0000000..4467182
--- /dev/null
+++ b/payload_consumer/partition_update_generator_android.cc
@@ -0,0 +1,183 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/partition_update_generator_android.h"
+
+#include <filesystem>
+#include <memory>
+#include <utility>
+
+#include <android-base/properties.h>
+#include <android-base/strings.h>
+#include <base/logging.h>
+#include <base/strings/string_split.h>
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+PartitionUpdateGeneratorAndroid::PartitionUpdateGeneratorAndroid(
+    BootControlInterface* boot_control, size_t block_size)
+    : boot_control_(boot_control), block_size_(block_size) {}
+
+bool PartitionUpdateGeneratorAndroid::
+    GenerateOperationsForPartitionsNotInPayload(
+        BootControlInterface::Slot source_slot,
+        BootControlInterface::Slot target_slot,
+        const std::set<std::string>& partitions_in_payload,
+        std::vector<PartitionUpdate>* update_list) {
+#ifndef __ANDROID__
+  // Skip copying partitions for host verification.
+  return true;
+#endif
+
+  auto ab_partitions = GetAbPartitionsOnDevice();
+  if (ab_partitions.empty()) {
+    LOG(ERROR) << "Failed to load static a/b partitions";
+    return false;
+  }
+
+  std::vector<PartitionUpdate> partition_updates;
+  for (const auto& partition_name : ab_partitions) {
+    if (partitions_in_payload.find(partition_name) !=
+        partitions_in_payload.end()) {
+      LOG(INFO) << partition_name << " has included in payload";
+      continue;
+    }
+    bool is_source_dynamic = false;
+    std::string source_device;
+
+    TEST_AND_RETURN_FALSE(
+        boot_control_->GetPartitionDevice(partition_name,
+                                          source_slot,
+                                          true, /* not_in_payload */
+                                          &source_device,
+                                          &is_source_dynamic));
+    bool is_target_dynamic = false;
+    std::string target_device;
+    TEST_AND_RETURN_FALSE(boot_control_->GetPartitionDevice(
+        partition_name, target_slot, true, &target_device, &is_target_dynamic));
+
+    if (is_source_dynamic || is_target_dynamic) {
+      if (is_source_dynamic != is_target_dynamic) {
+        LOG(ERROR) << "Partition " << partition_name << " is expected to be a"
+                   << " static partition. source slot is "
+                   << (is_source_dynamic ? "" : "not")
+                   << " dynamic, and target slot " << target_slot << " is "
+                   << (is_target_dynamic ? "" : "not") << " dynamic.";
+        return false;
+      } else {
+        continue;
+      }
+    }
+
+    auto source_size = utils::FileSize(source_device);
+    auto target_size = utils::FileSize(target_device);
+    if (source_size == -1 || target_size == -1 || source_size != target_size ||
+        source_size % block_size_ != 0) {
+      LOG(ERROR) << "Invalid partition size. source size " << source_size
+                 << ", target size " << target_size;
+      return false;
+    }
+
+    auto partition_update = CreatePartitionUpdate(
+        partition_name, source_device, target_device, source_size);
+    if (!partition_update.has_value()) {
+      LOG(ERROR) << "Failed to create partition update for " << partition_name;
+      return false;
+    }
+    partition_updates.push_back(partition_update.value());
+  }
+  *update_list = std::move(partition_updates);
+  return true;
+}
+
+std::vector<std::string>
+PartitionUpdateGeneratorAndroid::GetAbPartitionsOnDevice() const {
+  auto partition_list_str =
+      android::base::GetProperty("ro.product.ab_ota_partitions", "");
+  return base::SplitString(partition_list_str,
+                           ",",
+                           base::TRIM_WHITESPACE,
+                           base::SPLIT_WANT_NONEMPTY);
+}
+
+std::optional<PartitionUpdate>
+PartitionUpdateGeneratorAndroid::CreatePartitionUpdate(
+    const std::string& partition_name,
+    const std::string& source_device,
+    const std::string& target_device,
+    int64_t partition_size) {
+  PartitionUpdate partition_update;
+  partition_update.set_partition_name(partition_name);
+  auto old_partition_info = partition_update.mutable_old_partition_info();
+  old_partition_info->set_size(partition_size);
+
+  auto raw_hash = CalculateHashForPartition(source_device, partition_size);
+  if (!raw_hash.has_value()) {
+    LOG(ERROR) << "Failed to calculate hash for partition " << source_device
+               << " size: " << partition_size;
+    return {};
+  }
+  old_partition_info->set_hash(raw_hash->data(), raw_hash->size());
+  auto new_partition_info = partition_update.mutable_new_partition_info();
+  new_partition_info->set_size(partition_size);
+  new_partition_info->set_hash(raw_hash->data(), raw_hash->size());
+
+  auto copy_operation = partition_update.add_operations();
+  copy_operation->set_type(InstallOperation::SOURCE_COPY);
+  Extent copy_extent;
+  copy_extent.set_start_block(0);
+  copy_extent.set_num_blocks(partition_size / block_size_);
+
+  *copy_operation->add_src_extents() = copy_extent;
+  *copy_operation->add_dst_extents() = copy_extent;
+
+  return partition_update;
+}
+
+std::optional<brillo::Blob>
+PartitionUpdateGeneratorAndroid::CalculateHashForPartition(
+    const std::string& block_device, int64_t partition_size) {
+  // TODO(xunchang) compute the hash with ecc partitions first, the hashing
+  // behavior should match the one in SOURCE_COPY. Also, we don't have the
+  // correct hash for source partition.
+  // An alternative way is to verify the written bytes match the read bytes
+  // during filesystem verification. This could probably save us a read of
+  // partitions here.
+  brillo::Blob raw_hash;
+  if (HashCalculator::RawHashOfFile(block_device, partition_size, &raw_hash) !=
+      partition_size) {
+    LOG(ERROR) << "Failed to calculate hash for " << block_device;
+    return std::nullopt;
+  }
+
+  return raw_hash;
+}
+
+namespace partition_update_generator {
+std::unique_ptr<PartitionUpdateGeneratorInterface> Create(
+    BootControlInterface* boot_control, size_t block_size) {
+  CHECK(boot_control);
+
+  return std::unique_ptr<PartitionUpdateGeneratorInterface>(
+      new PartitionUpdateGeneratorAndroid(boot_control, block_size));
+}
+}  // namespace partition_update_generator
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/partition_update_generator_android.h b/payload_consumer/partition_update_generator_android.h
new file mode 100644
index 0000000..0330c99
--- /dev/null
+++ b/payload_consumer/partition_update_generator_android.h
@@ -0,0 +1,68 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_ANDROID_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_ANDROID_H_
+
+#include <optional>
+#include <set>
+#include <string>
+#include <vector>
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest_prod.h>  // for FRIEND_TEST
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/payload_consumer/partition_update_generator_interface.h"
+
+namespace chromeos_update_engine {
+
+class PartitionUpdateGeneratorAndroid
+    : public PartitionUpdateGeneratorInterface {
+ public:
+  PartitionUpdateGeneratorAndroid(BootControlInterface* boot_control,
+                                  size_t block_size);
+
+  bool GenerateOperationsForPartitionsNotInPayload(
+      BootControlInterface::Slot source_slot,
+      BootControlInterface::Slot target_slot,
+      const std::set<std::string>& partitions_in_payload,
+      std::vector<PartitionUpdate>* update_list) override;
+  virtual std::vector<std::string> GetAbPartitionsOnDevice() const;
+
+ private:
+  friend class PartitionUpdateGeneratorAndroidTest;
+  FRIEND_TEST(PartitionUpdateGeneratorAndroidTest, GetStaticPartitions);
+  FRIEND_TEST(PartitionUpdateGeneratorAndroidTest, CreatePartitionUpdate);
+
+  // Creates a PartitionUpdate object for a given partition to update from
+  // source to target. Returns std::nullopt on failure.
+  std::optional<PartitionUpdate> CreatePartitionUpdate(
+      const std::string& partition_name,
+      const std::string& source_device,
+      const std::string& target_device,
+      int64_t partition_size);
+
+  std::optional<brillo::Blob> CalculateHashForPartition(
+      const std::string& block_device, int64_t partition_size);
+
+  BootControlInterface* boot_control_;
+  size_t block_size_;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/partition_update_generator_android_unittest.cc b/payload_consumer/partition_update_generator_android_unittest.cc
new file mode 100644
index 0000000..86d025e
--- /dev/null
+++ b/payload_consumer/partition_update_generator_android_unittest.cc
@@ -0,0 +1,159 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/partition_update_generator_android.h"
+
+#include <map>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include <android-base/strings.h>
+#include <brillo/secure_blob.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/fake_boot_control.h"
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+class FakePartitionUpdateGenerator : public PartitionUpdateGeneratorAndroid {
+ public:
+  std::vector<std::string> GetAbPartitionsOnDevice() const {
+    return ab_partitions_;
+  }
+  using PartitionUpdateGeneratorAndroid::PartitionUpdateGeneratorAndroid;
+  std::vector<std::string> ab_partitions_;
+};
+
+class PartitionUpdateGeneratorAndroidTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    ASSERT_TRUE(device_dir_.CreateUniqueTempDir());
+    boot_control_ = std::make_unique<FakeBootControl>();
+    ASSERT_TRUE(boot_control_);
+    boot_control_->SetNumSlots(2);
+    generator_ = std::make_unique<FakePartitionUpdateGenerator>(
+        boot_control_.get(), 4096);
+    ASSERT_TRUE(generator_);
+  }
+
+  std::unique_ptr<FakePartitionUpdateGenerator> generator_;
+  std::unique_ptr<FakeBootControl> boot_control_;
+
+  base::ScopedTempDir device_dir_;
+  std::map<std::string, std::string> device_map_;
+
+  void SetUpBlockDevice(const std::map<std::string, std::string>& contents) {
+    std::set<std::string> partition_base_names;
+    for (const auto& [name, content] : contents) {
+      auto path = device_dir_.GetPath().value() + "/" + name;
+      ASSERT_TRUE(
+          utils::WriteFile(path.c_str(), content.data(), content.size()));
+
+      if (android::base::EndsWith(name, "_a")) {
+        auto prefix = name.substr(0, name.size() - 2);
+        boot_control_->SetPartitionDevice(prefix, 0, path);
+        partition_base_names.emplace(prefix);
+      } else if (android::base::EndsWith(name, "_b")) {
+        auto prefix = name.substr(0, name.size() - 2);
+        boot_control_->SetPartitionDevice(prefix, 1, path);
+        partition_base_names.emplace(prefix);
+      }
+      device_map_[name] = std::move(path);
+    }
+    generator_->ab_partitions_ = {partition_base_names.begin(),
+                                  partition_base_names.end()};
+  }
+
+  void CheckPartitionUpdate(const std::string& name,
+                            const std::string& content,
+                            const PartitionUpdate& partition_update) {
+    ASSERT_EQ(name, partition_update.partition_name());
+
+    brillo::Blob out_hash;
+    ASSERT_TRUE(HashCalculator::RawHashOfBytes(
+        content.data(), content.size(), &out_hash));
+    ASSERT_EQ(std::string(out_hash.begin(), out_hash.end()),
+              partition_update.old_partition_info().hash());
+    ASSERT_EQ(std::string(out_hash.begin(), out_hash.end()),
+              partition_update.new_partition_info().hash());
+
+    ASSERT_EQ(1, partition_update.operations_size());
+    const auto& operation = partition_update.operations(0);
+    ASSERT_EQ(InstallOperation::SOURCE_COPY, operation.type());
+
+    ASSERT_EQ(1, operation.src_extents_size());
+    ASSERT_EQ(0u, operation.src_extents(0).start_block());
+    ASSERT_EQ(content.size() / 4096, operation.src_extents(0).num_blocks());
+
+    ASSERT_EQ(1, operation.dst_extents_size());
+    ASSERT_EQ(0u, operation.dst_extents(0).start_block());
+    ASSERT_EQ(content.size() / 4096, operation.dst_extents(0).num_blocks());
+  }
+};
+
+TEST_F(PartitionUpdateGeneratorAndroidTest, CreatePartitionUpdate) {
+  auto system_contents = std::string(4096 * 2, '1');
+  auto boot_contents = std::string(4096 * 5, 'b');
+  std::map<std::string, std::string> contents = {
+      {"system_a", system_contents},
+      {"system_b", std::string(4096 * 2, 0)},
+      {"boot_a", boot_contents},
+      {"boot_b", std::string(4096 * 5, 0)},
+  };
+  SetUpBlockDevice(contents);
+
+  auto system_partition_update = generator_->CreatePartitionUpdate(
+      "system", device_map_["system_a"], device_map_["system_b"], 4096 * 2);
+  ASSERT_TRUE(system_partition_update.has_value());
+  CheckPartitionUpdate(
+      "system", system_contents, system_partition_update.value());
+
+  auto boot_partition_update = generator_->CreatePartitionUpdate(
+      "boot", device_map_["boot_a"], device_map_["boot_b"], 4096 * 5);
+  ASSERT_TRUE(boot_partition_update.has_value());
+  CheckPartitionUpdate("boot", boot_contents, boot_partition_update.value());
+}
+
+TEST_F(PartitionUpdateGeneratorAndroidTest, GenerateOperations) {
+  auto system_contents = std::string(4096 * 10, '2');
+  auto boot_contents = std::string(4096 * 5, 'b');
+  std::map<std::string, std::string> contents = {
+      {"system_a", system_contents},
+      {"system_b", std::string(4096 * 10, 0)},
+      {"boot_a", boot_contents},
+      {"boot_b", std::string(4096 * 5, 0)},
+      {"vendor_a", ""},
+      {"vendor_b", ""},
+      {"persist", ""},
+  };
+  SetUpBlockDevice(contents);
+
+  std::vector<PartitionUpdate> update_list;
+  ASSERT_TRUE(generator_->GenerateOperationsForPartitionsNotInPayload(
+      0, 1, std::set<std::string>{"vendor"}, &update_list));
+
+  ASSERT_EQ(2u, update_list.size());
+  CheckPartitionUpdate("boot", boot_contents, update_list[0]);
+  CheckPartitionUpdate("system", system_contents, update_list[1]);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/partition_update_generator_interface.h b/payload_consumer/partition_update_generator_interface.h
new file mode 100644
index 0000000..3fa3dfb
--- /dev/null
+++ b/payload_consumer/partition_update_generator_interface.h
@@ -0,0 +1,55 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_INTERFACE_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_INTERFACE_H_
+
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "update_engine/common/boot_control_interface.h"
+
+namespace chromeos_update_engine {
+class PartitionUpdate;
+
+// This class parses the partitions that are not included in the payload of a
+// partial A/B update. And it generates additional operations for these
+// partitions to make the update complete.
+class PartitionUpdateGeneratorInterface {
+ public:
+  virtual ~PartitionUpdateGeneratorInterface() = default;
+
+  // Adds PartitionUpdate for partitions not included in the payload. For static
+  // partitions, it generates SOURCE_COPY operations to copy the bytes from the
+  // source slot to target slot. For dynamic partitions, it only calculates the
+  // partition hash for the filesystem verification later.
+  virtual bool GenerateOperationsForPartitionsNotInPayload(
+      BootControlInterface::Slot source_slot,
+      BootControlInterface::Slot target_slot,
+      const std::set<std::string>& partitions_in_payload,
+      std::vector<PartitionUpdate>* update_list) = 0;
+};
+
+namespace partition_update_generator {
+std::unique_ptr<PartitionUpdateGeneratorInterface> Create(
+    BootControlInterface* boot_control, size_t block_size);
+}
+
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/partition_update_generator_stub.cc b/payload_consumer/partition_update_generator_stub.cc
new file mode 100644
index 0000000..cfbd5e1
--- /dev/null
+++ b/payload_consumer/partition_update_generator_stub.cc
@@ -0,0 +1,38 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/partition_update_generator_stub.h"
+
+#include <memory>
+
+namespace chromeos_update_engine {
+
+bool PartitionUpdateGeneratorStub::GenerateOperationsForPartitionsNotInPayload(
+    chromeos_update_engine::BootControlInterface::Slot source_slot,
+    chromeos_update_engine::BootControlInterface::Slot target_slot,
+    const std::set<std::string>& partitions_in_payload,
+    std::vector<PartitionUpdate>* update_list) {
+  return true;
+}
+
+namespace partition_update_generator {
+std::unique_ptr<PartitionUpdateGeneratorInterface> Create(
+    BootControlInterface* boot_control, size_t block_size) {
+  return std::make_unique<PartitionUpdateGeneratorStub>();
+}
+}  // namespace partition_update_generator
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/partition_update_generator_stub.h b/payload_consumer/partition_update_generator_stub.h
new file mode 100644
index 0000000..282875e
--- /dev/null
+++ b/payload_consumer/partition_update_generator_stub.h
@@ -0,0 +1,40 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_STUB_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_STUB_H_
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/payload_consumer/partition_update_generator_interface.h"
+
+namespace chromeos_update_engine {
+class PartitionUpdateGeneratorStub : public PartitionUpdateGeneratorInterface {
+ public:
+  PartitionUpdateGeneratorStub() = default;
+  bool GenerateOperationsForPartitionsNotInPayload(
+      BootControlInterface::Slot source_slot,
+      BootControlInterface::Slot target_slot,
+      const std::set<std::string>& partitions_in_payload,
+      std::vector<PartitionUpdate>* update_list) override;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/partition_writer.cc b/payload_consumer/partition_writer.cc
new file mode 100644
index 0000000..6f98ba3
--- /dev/null
+++ b/payload_consumer/partition_writer.cc
@@ -0,0 +1,661 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include <update_engine/payload_consumer/partition_writer.h>
+
+#include <fcntl.h>
+#include <linux/fs.h>
+
+#include <algorithm>
+#include <initializer_list>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include <base/strings/string_number_conversions.h>
+#include <bsdiff/bspatch.h>
+#include <puffin/puffpatch.h>
+#include <bsdiff/file_interface.h>
+#include <puffin/stream.h>
+
+#include "update_engine/common/terminator.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/bzip_extent_writer.h"
+#include "update_engine/payload_consumer/cached_file_descriptor.h"
+#include "update_engine/payload_consumer/extent_reader.h"
+#include "update_engine/payload_consumer/extent_writer.h"
+#include "update_engine/payload_consumer/fec_file_descriptor.h"
+#include "update_engine/payload_consumer/file_descriptor_utils.h"
+#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_consumer/mount_history.h"
+#include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_consumer/xz_extent_writer.h"
+
+namespace chromeos_update_engine {
+
+namespace {
+constexpr uint64_t kCacheSize = 1024 * 1024;  // 1MB
+
+// Discard the tail of the block device referenced by |fd|, from the offset
+// |data_size| until the end of the block device. Returns whether the data was
+// discarded.
+
+bool DiscardPartitionTail(const FileDescriptorPtr& fd, uint64_t data_size) {
+  uint64_t part_size = fd->BlockDevSize();
+  if (!part_size || part_size <= data_size)
+    return false;
+
+  struct blkioctl_request {
+    int number;
+    const char* name;
+  };
+  const std::initializer_list<blkioctl_request> blkioctl_requests = {
+      {BLKDISCARD, "BLKDISCARD"},
+      {BLKSECDISCARD, "BLKSECDISCARD"},
+#ifdef BLKZEROOUT
+      {BLKZEROOUT, "BLKZEROOUT"},
+#endif
+  };
+  for (const auto& req : blkioctl_requests) {
+    int error = 0;
+    if (fd->BlkIoctl(req.number, data_size, part_size - data_size, &error) &&
+        error == 0) {
+      return true;
+    }
+    LOG(WARNING) << "Error discarding the last "
+                 << (part_size - data_size) / 1024 << " KiB using ioctl("
+                 << req.name << ")";
+  }
+  return false;
+}
+
+}  // namespace
+
+// Opens path for read/write. On success returns an open FileDescriptor
+// and sets *err to 0. On failure, sets *err to errno and returns nullptr.
+FileDescriptorPtr OpenFile(const char* path,
+                           int mode,
+                           bool cache_writes,
+                           int* err) {
+  // Try to mark the block device read-only based on the mode. Ignore any
+  // failure since this won't work when passing regular files.
+  bool read_only = (mode & O_ACCMODE) == O_RDONLY;
+  utils::SetBlockDeviceReadOnly(path, read_only);
+
+  FileDescriptorPtr fd(new EintrSafeFileDescriptor());
+  if (cache_writes && !read_only) {
+    fd = FileDescriptorPtr(new CachedFileDescriptor(fd, kCacheSize));
+    LOG(INFO) << "Caching writes.";
+  }
+  if (!fd->Open(path, mode, 000)) {
+    *err = errno;
+    PLOG(ERROR) << "Unable to open file " << path;
+    return nullptr;
+  }
+  *err = 0;
+  return fd;
+}
+
+class BsdiffExtentFile : public bsdiff::FileInterface {
+ public:
+  BsdiffExtentFile(std::unique_ptr<ExtentReader> reader, size_t size)
+      : BsdiffExtentFile(std::move(reader), nullptr, size) {}
+  BsdiffExtentFile(std::unique_ptr<ExtentWriter> writer, size_t size)
+      : BsdiffExtentFile(nullptr, std::move(writer), size) {}
+
+  ~BsdiffExtentFile() override = default;
+
+  bool Read(void* buf, size_t count, size_t* bytes_read) override {
+    TEST_AND_RETURN_FALSE(reader_->Read(buf, count));
+    *bytes_read = count;
+    offset_ += count;
+    return true;
+  }
+
+  bool Write(const void* buf, size_t count, size_t* bytes_written) override {
+    TEST_AND_RETURN_FALSE(writer_->Write(buf, count));
+    *bytes_written = count;
+    offset_ += count;
+    return true;
+  }
+
+  bool Seek(off_t pos) override {
+    if (reader_ != nullptr) {
+      TEST_AND_RETURN_FALSE(reader_->Seek(pos));
+      offset_ = pos;
+    } else {
+      // For writes technically there should be no change of position, or it
+      // should be equivalent of current offset.
+      TEST_AND_RETURN_FALSE(offset_ == static_cast<uint64_t>(pos));
+    }
+    return true;
+  }
+
+  bool Close() override { return true; }
+
+  bool GetSize(uint64_t* size) override {
+    *size = size_;
+    return true;
+  }
+
+ private:
+  BsdiffExtentFile(std::unique_ptr<ExtentReader> reader,
+                   std::unique_ptr<ExtentWriter> writer,
+                   size_t size)
+      : reader_(std::move(reader)),
+        writer_(std::move(writer)),
+        size_(size),
+        offset_(0) {}
+
+  std::unique_ptr<ExtentReader> reader_;
+  std::unique_ptr<ExtentWriter> writer_;
+  uint64_t size_;
+  uint64_t offset_;
+
+  DISALLOW_COPY_AND_ASSIGN(BsdiffExtentFile);
+};
+// A class to be passed to |puffpatch| for reading from |source_fd_| and writing
+// into |target_fd_|.
+class PuffinExtentStream : public puffin::StreamInterface {
+ public:
+  // Constructor for creating a stream for reading from an |ExtentReader|.
+  PuffinExtentStream(std::unique_ptr<ExtentReader> reader, uint64_t size)
+      : PuffinExtentStream(std::move(reader), nullptr, size) {}
+
+  // Constructor for creating a stream for writing to an |ExtentWriter|.
+  PuffinExtentStream(std::unique_ptr<ExtentWriter> writer, uint64_t size)
+      : PuffinExtentStream(nullptr, std::move(writer), size) {}
+
+  ~PuffinExtentStream() override = default;
+
+  bool GetSize(uint64_t* size) const override {
+    *size = size_;
+    return true;
+  }
+
+  bool GetOffset(uint64_t* offset) const override {
+    *offset = offset_;
+    return true;
+  }
+
+  bool Seek(uint64_t offset) override {
+    if (is_read_) {
+      TEST_AND_RETURN_FALSE(reader_->Seek(offset));
+      offset_ = offset;
+    } else {
+      // For writes technically there should be no change of position, or it
+      // should equivalent of current offset.
+      TEST_AND_RETURN_FALSE(offset_ == offset);
+    }
+    return true;
+  }
+
+  bool Read(void* buffer, size_t count) override {
+    TEST_AND_RETURN_FALSE(is_read_);
+    TEST_AND_RETURN_FALSE(reader_->Read(buffer, count));
+    offset_ += count;
+    return true;
+  }
+
+  bool Write(const void* buffer, size_t count) override {
+    TEST_AND_RETURN_FALSE(!is_read_);
+    TEST_AND_RETURN_FALSE(writer_->Write(buffer, count));
+    offset_ += count;
+    return true;
+  }
+
+  bool Close() override { return true; }
+
+ private:
+  PuffinExtentStream(std::unique_ptr<ExtentReader> reader,
+                     std::unique_ptr<ExtentWriter> writer,
+                     uint64_t size)
+      : reader_(std::move(reader)),
+        writer_(std::move(writer)),
+        size_(size),
+        offset_(0),
+        is_read_(reader_ ? true : false) {}
+
+  std::unique_ptr<ExtentReader> reader_;
+  std::unique_ptr<ExtentWriter> writer_;
+  uint64_t size_;
+  uint64_t offset_;
+  bool is_read_;
+
+  DISALLOW_COPY_AND_ASSIGN(PuffinExtentStream);
+};
+
+PartitionWriter::PartitionWriter(
+    const PartitionUpdate& partition_update,
+    const InstallPlan::Partition& install_part,
+    DynamicPartitionControlInterface* dynamic_control,
+    size_t block_size,
+    bool is_interactive)
+    : partition_update_(partition_update),
+      install_part_(install_part),
+      dynamic_control_(dynamic_control),
+      interactive_(is_interactive),
+      block_size_(block_size) {}
+
+PartitionWriter::~PartitionWriter() {
+  Close();
+}
+
+bool PartitionWriter::OpenSourcePartition(uint32_t source_slot,
+                                          bool source_may_exist) {
+  source_path_.clear();
+  if (!source_may_exist) {
+    return true;
+  }
+  if (install_part_.source_size > 0 && !install_part_.source_path.empty()) {
+    source_path_ = install_part_.source_path;
+    int err;
+    source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, false, &err);
+    if (source_fd_ == nullptr) {
+      LOG(ERROR) << "Unable to open source partition " << install_part_.name
+                 << " on slot " << BootControlInterface::SlotName(source_slot)
+                 << ", file " << source_path_;
+      return false;
+    }
+  }
+  return true;
+}
+
+bool PartitionWriter::Init(const InstallPlan* install_plan,
+                           bool source_may_exist,
+                           size_t next_op_index) {
+  const PartitionUpdate& partition = partition_update_;
+  uint32_t source_slot = install_plan->source_slot;
+  uint32_t target_slot = install_plan->target_slot;
+  TEST_AND_RETURN_FALSE(OpenSourcePartition(source_slot, source_may_exist));
+
+  // We shouldn't open the source partition in certain cases, e.g. some dynamic
+  // partitions in delta payload, partitions included in the full payload for
+  // partial updates. Use the source size as the indicator.
+
+  target_path_ = install_part_.target_path;
+  int err;
+
+  int flags = O_RDWR;
+  if (!interactive_)
+    flags |= O_DSYNC;
+
+  LOG(INFO) << "Opening " << target_path_ << " partition with"
+            << (interactive_ ? "out" : "") << " O_DSYNC";
+
+  target_fd_ = OpenFile(target_path_.c_str(), flags, true, &err);
+  if (!target_fd_) {
+    LOG(ERROR) << "Unable to open target partition "
+               << partition.partition_name() << " on slot "
+               << BootControlInterface::SlotName(target_slot) << ", file "
+               << target_path_;
+    return false;
+  }
+
+  LOG(INFO) << "Applying " << partition.operations().size()
+            << " operations to partition \"" << partition.partition_name()
+            << "\"";
+
+  // Discard the end of the partition, but ignore failures.
+  DiscardPartitionTail(target_fd_, install_part_.target_size);
+
+  return true;
+}
+
+bool PartitionWriter::PerformReplaceOperation(const InstallOperation& operation,
+                                              const void* data,
+                                              size_t count) {
+  // Setup the ExtentWriter stack based on the operation type.
+  std::unique_ptr<ExtentWriter> writer = CreateBaseExtentWriter();
+
+  if (operation.type() == InstallOperation::REPLACE_BZ) {
+    writer.reset(new BzipExtentWriter(std::move(writer)));
+  } else if (operation.type() == InstallOperation::REPLACE_XZ) {
+    writer.reset(new XzExtentWriter(std::move(writer)));
+  }
+
+  TEST_AND_RETURN_FALSE(writer->Init(operation.dst_extents(), block_size_));
+  TEST_AND_RETURN_FALSE(writer->Write(data, operation.data_length()));
+
+  return true;
+}
+
+bool PartitionWriter::PerformZeroOrDiscardOperation(
+    const InstallOperation& operation) {
+#ifdef BLKZEROOUT
+  bool attempt_ioctl = true;
+  int request =
+      (operation.type() == InstallOperation::ZERO ? BLKZEROOUT : BLKDISCARD);
+#else   // !defined(BLKZEROOUT)
+  bool attempt_ioctl = false;
+  int request = 0;
+#endif  // !defined(BLKZEROOUT)
+
+  brillo::Blob zeros;
+  for (const Extent& extent : operation.dst_extents()) {
+    const uint64_t start = extent.start_block() * block_size_;
+    const uint64_t length = extent.num_blocks() * block_size_;
+    if (attempt_ioctl) {
+      int result = 0;
+      if (target_fd_->BlkIoctl(request, start, length, &result) && result == 0)
+        continue;
+      attempt_ioctl = false;
+    }
+    // In case of failure, we fall back to writing 0 to the selected region.
+    zeros.resize(16 * block_size_);
+    for (uint64_t offset = 0; offset < length; offset += zeros.size()) {
+      uint64_t chunk_length =
+          std::min(length - offset, static_cast<uint64_t>(zeros.size()));
+      TEST_AND_RETURN_FALSE(utils::WriteAll(
+          target_fd_, zeros.data(), chunk_length, start + offset));
+    }
+  }
+  return true;
+}
+
+bool PartitionWriter::PerformSourceCopyOperation(
+    const InstallOperation& operation, ErrorCode* error) {
+  TEST_AND_RETURN_FALSE(source_fd_ != nullptr);
+
+  // The device may optimize the SOURCE_COPY operation.
+  // Being this a device-specific optimization let DynamicPartitionController
+  // decide it the operation should be skipped.
+  const PartitionUpdate& partition = partition_update_;
+  const auto& partition_control = dynamic_control_;
+
+  InstallOperation buf;
+  const bool should_optimize = partition_control->OptimizeOperation(
+      partition.partition_name(), operation, &buf);
+  const InstallOperation& optimized = should_optimize ? buf : operation;
+
+  if (operation.has_src_sha256_hash()) {
+    bool read_ok;
+    brillo::Blob source_hash;
+    brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
+                                      operation.src_sha256_hash().end());
+
+    // We fall back to use the error corrected device if the hash of the raw
+    // device doesn't match or there was an error reading the source partition.
+    // Note that this code will also fall back if writing the target partition
+    // fails.
+    if (should_optimize) {
+      // Hash operation.src_extents(), then copy optimized.src_extents to
+      // optimized.dst_extents.
+      read_ok =
+          fd_utils::ReadAndHashExtents(
+              source_fd_, operation.src_extents(), block_size_, &source_hash) &&
+          fd_utils::CopyAndHashExtents(source_fd_,
+                                       optimized.src_extents(),
+                                       target_fd_,
+                                       optimized.dst_extents(),
+                                       block_size_,
+                                       nullptr /* skip hashing */);
+    } else {
+      read_ok = fd_utils::CopyAndHashExtents(source_fd_,
+                                             operation.src_extents(),
+                                             target_fd_,
+                                             operation.dst_extents(),
+                                             block_size_,
+                                             &source_hash);
+    }
+    if (read_ok && expected_source_hash == source_hash)
+      return true;
+    LOG(WARNING) << "Source hash from RAW device mismatched, attempting to "
+                    "correct using ECC";
+    if (!OpenCurrentECCPartition()) {
+      // The following function call will return false since the source hash
+      // mismatches, but we still want to call it so it prints the appropriate
+      // log message.
+      return ValidateSourceHash(source_hash, operation, source_fd_, error);
+    }
+
+    LOG(WARNING) << "Source hash from RAW device mismatched: found "
+                 << base::HexEncode(source_hash.data(), source_hash.size())
+                 << ", expected "
+                 << base::HexEncode(expected_source_hash.data(),
+                                    expected_source_hash.size());
+    if (should_optimize) {
+      TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
+          source_ecc_fd_, operation.src_extents(), block_size_, &source_hash));
+      TEST_AND_RETURN_FALSE(
+          fd_utils::CopyAndHashExtents(source_ecc_fd_,
+                                       optimized.src_extents(),
+                                       target_fd_,
+                                       optimized.dst_extents(),
+                                       block_size_,
+                                       nullptr /* skip hashing */));
+    } else {
+      TEST_AND_RETURN_FALSE(
+          fd_utils::CopyAndHashExtents(source_ecc_fd_,
+                                       operation.src_extents(),
+                                       target_fd_,
+                                       operation.dst_extents(),
+                                       block_size_,
+                                       &source_hash));
+    }
+    TEST_AND_RETURN_FALSE(
+        ValidateSourceHash(source_hash, operation, source_ecc_fd_, error));
+    // At this point reading from the error corrected device worked, but
+    // reading from the raw device failed, so this is considered a recovered
+    // failure.
+    source_ecc_recovered_failures_++;
+  } else {
+    // When the operation doesn't include a source hash, we attempt the error
+    // corrected device first since we can't verify the block in the raw device
+    // at this point, but we fall back to the raw device since the error
+    // corrected device can be shorter or not available.
+
+    if (OpenCurrentECCPartition() &&
+        fd_utils::CopyAndHashExtents(source_ecc_fd_,
+                                     optimized.src_extents(),
+                                     target_fd_,
+                                     optimized.dst_extents(),
+                                     block_size_,
+                                     nullptr)) {
+      return true;
+    }
+    TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_,
+                                                       optimized.src_extents(),
+                                                       target_fd_,
+                                                       optimized.dst_extents(),
+                                                       block_size_,
+                                                       nullptr));
+  }
+  return true;
+}
+
+bool PartitionWriter::PerformSourceBsdiffOperation(
+    const InstallOperation& operation,
+    ErrorCode* error,
+    const void* data,
+    size_t count) {
+  FileDescriptorPtr source_fd = ChooseSourceFD(operation, error);
+  TEST_AND_RETURN_FALSE(source_fd != nullptr);
+
+  auto reader = std::make_unique<DirectExtentReader>();
+  TEST_AND_RETURN_FALSE(
+      reader->Init(source_fd, operation.src_extents(), block_size_));
+  auto src_file = std::make_unique<BsdiffExtentFile>(
+      std::move(reader),
+      utils::BlocksInExtents(operation.src_extents()) * block_size_);
+
+  auto writer = CreateBaseExtentWriter();
+  TEST_AND_RETURN_FALSE(writer->Init(operation.dst_extents(), block_size_));
+  auto dst_file = std::make_unique<BsdiffExtentFile>(
+      std::move(writer),
+      utils::BlocksInExtents(operation.dst_extents()) * block_size_);
+
+  TEST_AND_RETURN_FALSE(bsdiff::bspatch(std::move(src_file),
+                                        std::move(dst_file),
+                                        reinterpret_cast<const uint8_t*>(data),
+                                        count) == 0);
+  return true;
+}
+
+bool PartitionWriter::PerformPuffDiffOperation(
+    const InstallOperation& operation,
+    ErrorCode* error,
+    const void* data,
+    size_t count) {
+  FileDescriptorPtr source_fd = ChooseSourceFD(operation, error);
+  TEST_AND_RETURN_FALSE(source_fd != nullptr);
+
+  auto reader = std::make_unique<DirectExtentReader>();
+  TEST_AND_RETURN_FALSE(
+      reader->Init(source_fd, operation.src_extents(), block_size_));
+  puffin::UniqueStreamPtr src_stream(new PuffinExtentStream(
+      std::move(reader),
+      utils::BlocksInExtents(operation.src_extents()) * block_size_));
+
+  auto writer = CreateBaseExtentWriter();
+  TEST_AND_RETURN_FALSE(writer->Init(operation.dst_extents(), block_size_));
+  puffin::UniqueStreamPtr dst_stream(new PuffinExtentStream(
+      std::move(writer),
+      utils::BlocksInExtents(operation.dst_extents()) * block_size_));
+
+  constexpr size_t kMaxCacheSize = 5 * 1024 * 1024;  // Total 5MB cache.
+  TEST_AND_RETURN_FALSE(
+      puffin::PuffPatch(std::move(src_stream),
+                        std::move(dst_stream),
+                        reinterpret_cast<const uint8_t*>(data),
+                        count,
+                        kMaxCacheSize));
+  return true;
+}
+
+FileDescriptorPtr PartitionWriter::ChooseSourceFD(
+    const InstallOperation& operation, ErrorCode* error) {
+  if (source_fd_ == nullptr) {
+    LOG(ERROR) << "ChooseSourceFD fail: source_fd_ == nullptr";
+    return nullptr;
+  }
+
+  if (!operation.has_src_sha256_hash()) {
+    // When the operation doesn't include a source hash, we attempt the error
+    // corrected device first since we can't verify the block in the raw device
+    // at this point, but we first need to make sure all extents are readable
+    // since the error corrected device can be shorter or not available.
+    if (OpenCurrentECCPartition() &&
+        fd_utils::ReadAndHashExtents(
+            source_ecc_fd_, operation.src_extents(), block_size_, nullptr)) {
+      return source_ecc_fd_;
+    }
+    return source_fd_;
+  }
+
+  brillo::Blob source_hash;
+  brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
+                                    operation.src_sha256_hash().end());
+  if (fd_utils::ReadAndHashExtents(
+          source_fd_, operation.src_extents(), block_size_, &source_hash) &&
+      source_hash == expected_source_hash) {
+    return source_fd_;
+  }
+  // We fall back to use the error corrected device if the hash of the raw
+  // device doesn't match or there was an error reading the source partition.
+  if (!OpenCurrentECCPartition()) {
+    // The following function call will return false since the source hash
+    // mismatches, but we still want to call it so it prints the appropriate
+    // log message.
+    ValidateSourceHash(source_hash, operation, source_fd_, error);
+    return nullptr;
+  }
+  LOG(WARNING) << "Source hash from RAW device mismatched: found "
+               << base::HexEncode(source_hash.data(), source_hash.size())
+               << ", expected "
+               << base::HexEncode(expected_source_hash.data(),
+                                  expected_source_hash.size());
+
+  if (fd_utils::ReadAndHashExtents(
+          source_ecc_fd_, operation.src_extents(), block_size_, &source_hash) &&
+      ValidateSourceHash(source_hash, operation, source_ecc_fd_, error)) {
+    // At this point reading from the error corrected device worked, but
+    // reading from the raw device failed, so this is considered a recovered
+    // failure.
+    source_ecc_recovered_failures_++;
+    return source_ecc_fd_;
+  }
+  return nullptr;
+}
+
+bool PartitionWriter::OpenCurrentECCPartition() {
+  // No support for ECC for full payloads.
+  // Full payload should not have any opeartion that requires ECC partitions.
+  if (source_ecc_fd_)
+    return true;
+
+  if (source_ecc_open_failure_)
+    return false;
+
+#if USE_FEC
+  const PartitionUpdate& partition = partition_update_;
+  const InstallPlan::Partition& install_part = install_part_;
+  std::string path = install_part.source_path;
+  FileDescriptorPtr fd(new FecFileDescriptor());
+  if (!fd->Open(path.c_str(), O_RDONLY, 0)) {
+    PLOG(ERROR) << "Unable to open ECC source partition "
+                << partition.partition_name() << ", file " << path;
+    source_ecc_open_failure_ = true;
+    return false;
+  }
+  source_ecc_fd_ = fd;
+#else
+  // No support for ECC compiled.
+  source_ecc_open_failure_ = true;
+#endif  // USE_FEC
+
+  return !source_ecc_open_failure_;
+}
+
+int PartitionWriter::Close() {
+  int err = 0;
+  if (source_fd_ && !source_fd_->Close()) {
+    err = errno;
+    PLOG(ERROR) << "Error closing source partition";
+    if (!err)
+      err = 1;
+  }
+  source_fd_.reset();
+  source_path_.clear();
+
+  if (target_fd_ && !target_fd_->Close()) {
+    err = errno;
+    PLOG(ERROR) << "Error closing target partition";
+    if (!err)
+      err = 1;
+  }
+  target_fd_.reset();
+  target_path_.clear();
+
+  if (source_ecc_fd_ && !source_ecc_fd_->Close()) {
+    err = errno;
+    PLOG(ERROR) << "Error closing ECC source partition";
+    if (!err)
+      err = 1;
+  }
+  source_ecc_fd_.reset();
+  source_ecc_open_failure_ = false;
+  return -err;
+}
+
+void PartitionWriter::CheckpointUpdateProgress(size_t next_op_index) {
+  target_fd_->Flush();
+}
+
+std::unique_ptr<ExtentWriter> PartitionWriter::CreateBaseExtentWriter() {
+  return std::make_unique<DirectExtentWriter>(target_fd_);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/partition_writer.h b/payload_consumer/partition_writer.h
new file mode 100644
index 0000000..82e557a
--- /dev/null
+++ b/payload_consumer/partition_writer.h
@@ -0,0 +1,147 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PARTITION_WRITER_H_
+#define UPDATE_ENGINE_PARTITION_WRITER_H_
+
+#include <cstdint>
+#include <memory>
+#include <string>
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest_prod.h>
+
+#include "update_engine/common/dynamic_partition_control_interface.h"
+#include "update_engine/payload_consumer/extent_writer.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
+#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+class PartitionWriter {
+ public:
+  PartitionWriter(const PartitionUpdate& partition_update,
+                  const InstallPlan::Partition& install_part,
+                  DynamicPartitionControlInterface* dynamic_control,
+                  size_t block_size,
+                  bool is_interactive);
+  virtual ~PartitionWriter();
+  static bool ValidateSourceHash(const brillo::Blob& calculated_hash,
+                                 const InstallOperation& operation,
+                                 const FileDescriptorPtr source_fd,
+                                 ErrorCode* error);
+
+  // Perform necessary initialization work before InstallOperation can be
+  // applied to this partition
+  [[nodiscard]] virtual bool Init(const InstallPlan* install_plan,
+                                  bool source_may_exist,
+                                  size_t next_op_index);
+
+  // |CheckpointUpdateProgress| will be called after SetNextOpIndex(), but it's
+  // optional. DeltaPerformer may or may not call this everytime an operation is
+  // applied.
+  //   |next_op_index| is index of next operation that should be applied.
+  // |next_op_index-1| is the last operation that is already applied.
+  virtual void CheckpointUpdateProgress(size_t next_op_index);
+
+  // Close partition writer, when calling this function there's no guarantee
+  // that all |InstallOperations| are sent to |PartitionWriter|. This function
+  // will be called even if we are pausing/aborting the update.
+  int Close();
+
+  // These perform a specific type of operation and return true on success.
+  // |error| will be set if source hash mismatch, otherwise |error| might not be
+  // set even if it fails.
+  [[nodiscard]] virtual bool PerformReplaceOperation(
+      const InstallOperation& operation, const void* data, size_t count);
+  [[nodiscard]] virtual bool PerformZeroOrDiscardOperation(
+      const InstallOperation& operation);
+
+  [[nodiscard]] virtual bool PerformSourceCopyOperation(
+      const InstallOperation& operation, ErrorCode* error);
+  [[nodiscard]] virtual bool PerformSourceBsdiffOperation(
+      const InstallOperation& operation,
+      ErrorCode* error,
+      const void* data,
+      size_t count);
+  [[nodiscard]] virtual bool PerformPuffDiffOperation(
+      const InstallOperation& operation,
+      ErrorCode* error,
+      const void* data,
+      size_t count);
+
+  // |DeltaPerformer| calls this when all Install Ops are sent to partition
+  // writer. No |Perform*Operation| methods will be called in the future, and
+  // the partition writer is expected to be closed soon.
+  [[nodiscard]] virtual bool FinishedInstallOps() { return true; }
+
+ protected:
+  friend class PartitionWriterTest;
+  FRIEND_TEST(PartitionWriterTest, ChooseSourceFDTest);
+
+  bool OpenSourcePartition(uint32_t source_slot, bool source_may_exist);
+
+  bool OpenCurrentECCPartition();
+  // For a given operation, choose the source fd to be used (raw device or error
+  // correction device) based on the source operation hash.
+  // Returns nullptr if the source hash mismatch cannot be corrected, and set
+  // the |error| accordingly.
+  FileDescriptorPtr ChooseSourceFD(const InstallOperation& operation,
+                                   ErrorCode* error);
+  [[nodiscard]] virtual std::unique_ptr<ExtentWriter> CreateBaseExtentWriter();
+
+  const PartitionUpdate& partition_update_;
+  const InstallPlan::Partition& install_part_;
+  DynamicPartitionControlInterface* dynamic_control_;
+  // Path to source partition
+  std::string source_path_;
+  // Path to target partition
+  std::string target_path_;
+  FileDescriptorPtr source_fd_;
+  FileDescriptorPtr target_fd_;
+  const bool interactive_;
+  const size_t block_size_;
+  // File descriptor of the error corrected source partition. Only set while
+  // updating partition using a delta payload for a partition where error
+  // correction is available. The size of the error corrected device is smaller
+  // than the underlying raw device, since it doesn't include the error
+  // correction blocks.
+  FileDescriptorPtr source_ecc_fd_{nullptr};
+
+  // The total number of operations that failed source hash verification but
+  // passed after falling back to the error-corrected |source_ecc_fd_| device.
+  uint64_t source_ecc_recovered_failures_{0};
+
+  // Whether opening the current partition as an error-corrected device failed.
+  // Used to avoid re-opening the same source partition if it is not actually
+  // error corrected.
+  bool source_ecc_open_failure_{false};
+};
+
+namespace partition_writer {
+// Return a PartitionWriter instance for perform InstallOps on this partition.
+// Uses VABCPartitionWriter for Virtual AB Compression
+std::unique_ptr<PartitionWriter> CreatePartitionWriter(
+    const PartitionUpdate& partition_update,
+    const InstallPlan::Partition& install_part,
+    DynamicPartitionControlInterface* dynamic_control,
+    size_t block_size,
+    bool is_interactive,
+    bool is_dynamic_partition);
+}  // namespace partition_writer
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/partition_writer_factory_android.cc b/payload_consumer/partition_writer_factory_android.cc
new file mode 100644
index 0000000..184e2d5
--- /dev/null
+++ b/payload_consumer/partition_writer_factory_android.cc
@@ -0,0 +1,53 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <cstddef>
+#include <memory>
+
+#include <base/logging.h>
+
+#include "update_engine/payload_consumer/vabc_partition_writer.h"
+
+namespace chromeos_update_engine::partition_writer {
+
+std::unique_ptr<PartitionWriter> CreatePartitionWriter(
+    const PartitionUpdate& partition_update,
+    const InstallPlan::Partition& install_part,
+    DynamicPartitionControlInterface* dynamic_control,
+    size_t block_size,
+    bool is_interactive,
+    bool is_dynamic_partition) {
+  if (dynamic_control && dynamic_control->UpdateUsesSnapshotCompression() &&
+      is_dynamic_partition) {
+    LOG(INFO)
+        << "Virtual AB Compression Enabled, using VABC Partition Writer for `"
+        << install_part.name << '`';
+    return std::make_unique<VABCPartitionWriter>(partition_update,
+                                                 install_part,
+                                                 dynamic_control,
+                                                 block_size,
+                                                 is_interactive);
+  } else {
+    LOG(INFO) << "Virtual AB Compression disabled, using Partition Writer for `"
+              << install_part.name << '`';
+    return std::make_unique<PartitionWriter>(partition_update,
+                                             install_part,
+                                             dynamic_control,
+                                             block_size,
+                                             is_interactive);
+  }
+}
+}  // namespace chromeos_update_engine::partition_writer
diff --git a/payload_consumer/partition_writer_factory_chromeos.cc b/payload_consumer/partition_writer_factory_chromeos.cc
new file mode 100644
index 0000000..609f043
--- /dev/null
+++ b/payload_consumer/partition_writer_factory_chromeos.cc
@@ -0,0 +1,38 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <cstddef>
+#include <memory>
+
+#include <base/logging.h>
+
+#include "update_engine/payload_consumer/partition_writer.h"
+
+namespace chromeos_update_engine::partition_writer {
+std::unique_ptr<PartitionWriter> CreatePartitionWriter(
+    const PartitionUpdate& partition_update,
+    const InstallPlan::Partition& install_part,
+    DynamicPartitionControlInterface* dynamic_control,
+    size_t block_size,
+    bool is_interactive,
+    bool is_dynamic_partition) {
+  return std::make_unique<PartitionWriter>(partition_update,
+                                           install_part,
+                                           dynamic_control,
+                                           block_size,
+                                           is_interactive);
+}
+}  // namespace chromeos_update_engine::partition_writer
diff --git a/payload_consumer/partition_writer_unittest.cc b/payload_consumer/partition_writer_unittest.cc
new file mode 100644
index 0000000..263f338
--- /dev/null
+++ b/payload_consumer/partition_writer_unittest.cc
@@ -0,0 +1,204 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <memory>
+#include <vector>
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/dynamic_partition_control_stub.h"
+#include "update_engine/common/error_code.h"
+#include "update_engine/common/fake_prefs.h"
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/delta_performer.h"
+#include "update_engine/payload_consumer/extent_reader.h"
+#include "update_engine/payload_consumer/extent_writer.h"
+#include "update_engine/payload_consumer/fake_file_descriptor.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
+#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_generator/annotated_operation.h"
+#include "update_engine/payload_generator/delta_diff_generator.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+#include "update_engine/payload_generator/payload_file.h"
+#include "update_engine/payload_generator/payload_generation_config.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+
+class PartitionWriterTest : public testing::Test {
+ public:
+  // Helper function to pretend that the ECC file descriptor was already opened.
+  // Returns a pointer to the created file descriptor.
+  FakeFileDescriptor* SetFakeECCFile(size_t size) {
+    EXPECT_FALSE(writer_.source_ecc_fd_) << "source_ecc_fd_ already open.";
+    FakeFileDescriptor* ret = new FakeFileDescriptor();
+    fake_ecc_fd_.reset(ret);
+    // Call open to simulate it was already opened.
+    ret->Open("", 0);
+    ret->SetFileSize(size);
+    writer_.source_ecc_fd_ = fake_ecc_fd_;
+    return ret;
+  }
+
+  uint64_t GetSourceEccRecoveredFailures() const {
+    return writer_.source_ecc_recovered_failures_;
+  }
+
+  AnnotatedOperation GenerateSourceCopyOp(const brillo::Blob& copied_data,
+                                          bool add_hash,
+                                          PartitionConfig* old_part = nullptr) {
+    PayloadGenerationConfig config;
+    const uint64_t kDefaultBlockSize = config.block_size;
+    EXPECT_EQ(0U, copied_data.size() % kDefaultBlockSize);
+    uint64_t num_blocks = copied_data.size() / kDefaultBlockSize;
+    AnnotatedOperation aop;
+    *(aop.op.add_src_extents()) = ExtentForRange(0, num_blocks);
+    *(aop.op.add_dst_extents()) = ExtentForRange(0, num_blocks);
+    aop.op.set_type(InstallOperation::SOURCE_COPY);
+    brillo::Blob src_hash;
+    EXPECT_TRUE(HashCalculator::RawHashOfData(copied_data, &src_hash));
+    if (add_hash)
+      aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
+
+    return aop;
+  }
+
+  brillo::Blob PerformSourceCopyOp(const InstallOperation& op,
+                                   const brillo::Blob blob_data) {
+    ScopedTempFile source_partition("Blob-XXXXXX");
+    FileDescriptorPtr fd(new EintrSafeFileDescriptor());
+    DirectExtentWriter extent_writer{fd};
+    EXPECT_TRUE(fd->Open(source_partition.path().c_str(), O_RDWR));
+    EXPECT_TRUE(extent_writer.Init(op.src_extents(), kBlockSize));
+    EXPECT_TRUE(extent_writer.Write(blob_data.data(), blob_data.size()));
+
+    ScopedTempFile target_partition("Blob-XXXXXX");
+
+    install_part_.source_path = source_partition.path();
+    install_part_.target_path = target_partition.path();
+    install_part_.source_size = blob_data.size();
+    install_part_.target_size = blob_data.size();
+
+    ErrorCode error;
+    EXPECT_TRUE(writer_.Init(&install_plan_, true, 0));
+    EXPECT_TRUE(writer_.PerformSourceCopyOperation(op, &error));
+    writer_.CheckpointUpdateProgress(1);
+
+    brillo::Blob output_data;
+    EXPECT_TRUE(utils::ReadFile(target_partition.path(), &output_data));
+    return output_data;
+  }
+
+  FakePrefs prefs_{};
+  InstallPlan install_plan_{};
+  InstallPlan::Payload payload_{};
+  DynamicPartitionControlStub dynamic_control_{};
+  FileDescriptorPtr fake_ecc_fd_{};
+  DeltaArchiveManifest manifest_{};
+  PartitionUpdate partition_update_{};
+  InstallPlan::Partition install_part_{};
+  PartitionWriter writer_{
+      partition_update_, install_part_, &dynamic_control_, kBlockSize, false};
+};
+// Test that the error-corrected file descriptor is used to read a partition
+// when no hash is available for SOURCE_COPY but it falls back to the normal
+// file descriptor when the size of the error corrected one is too small.
+TEST_F(PartitionWriterTest, ErrorCorrectionSourceCopyWhenNoHashFallbackTest) {
+  constexpr size_t kCopyOperationSize = 4 * 4096;
+  ScopedTempFile source("Source-XXXXXX");
+  // Setup the source path with the right expected data.
+  brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize);
+  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data));
+
+  // Setup the fec file descriptor as the fake stream, with smaller data than
+  // the expected.
+  FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize / 2);
+
+  PartitionConfig old_part(kPartitionNameRoot);
+  old_part.path = source.path();
+  old_part.size = expected_data.size();
+
+  // The payload operation doesn't include an operation hash.
+  auto source_copy_op = GenerateSourceCopyOp(expected_data, false, &old_part);
+
+  auto output_data = PerformSourceCopyOp(source_copy_op.op, expected_data);
+  ASSERT_EQ(output_data, expected_data);
+
+  // Verify that the fake_fec was attempted to be used. Since the file
+  // descriptor is shorter it can actually do more than one read to realize it
+  // reached the EOF.
+  EXPECT_LE(1U, fake_fec->GetReadOps().size());
+  // This fallback doesn't count as an error-corrected operation since the
+  // operation hash was not available.
+  EXPECT_EQ(0U, GetSourceEccRecoveredFailures());
+}
+
+// Test that the error-corrected file descriptor is used to read the partition
+// since the source partition doesn't match the operation hash.
+TEST_F(PartitionWriterTest, ErrorCorrectionSourceCopyFallbackTest) {
+  constexpr size_t kCopyOperationSize = 4 * 4096;
+  // Write invalid data to the source image, which doesn't match the expected
+  // hash.
+  brillo::Blob invalid_data(kCopyOperationSize, 0x55);
+
+  // Setup the fec file descriptor as the fake stream, which matches
+  // |expected_data|.
+  FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize);
+  brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize);
+
+  auto source_copy_op = GenerateSourceCopyOp(expected_data, true);
+  auto output_data = PerformSourceCopyOp(source_copy_op.op, invalid_data);
+  ASSERT_EQ(output_data, expected_data);
+
+  // Verify that the fake_fec was actually used.
+  EXPECT_EQ(1U, fake_fec->GetReadOps().size());
+  EXPECT_EQ(1U, GetSourceEccRecoveredFailures());
+}
+
+TEST_F(PartitionWriterTest, ChooseSourceFDTest) {
+  constexpr size_t kSourceSize = 4 * 4096;
+  ScopedTempFile source("Source-XXXXXX");
+  // Write invalid data to the source image, which doesn't match the expected
+  // hash.
+  brillo::Blob invalid_data(kSourceSize, 0x55);
+  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), invalid_data));
+
+  writer_.source_fd_ = std::make_shared<EintrSafeFileDescriptor>();
+  writer_.source_fd_->Open(source.path().c_str(), O_RDONLY);
+
+  // Setup the fec file descriptor as the fake stream, which matches
+  // |expected_data|.
+  FakeFileDescriptor* fake_fec = SetFakeECCFile(kSourceSize);
+  brillo::Blob expected_data = FakeFileDescriptorData(kSourceSize);
+
+  InstallOperation op;
+  *(op.add_src_extents()) = ExtentForRange(0, kSourceSize / 4096);
+  brillo::Blob src_hash;
+  EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash));
+  op.set_src_sha256_hash(src_hash.data(), src_hash.size());
+
+  ErrorCode error = ErrorCode::kSuccess;
+  EXPECT_EQ(writer_.source_ecc_fd_, writer_.ChooseSourceFD(op, &error));
+  EXPECT_EQ(ErrorCode::kSuccess, error);
+  // Verify that the fake_fec was actually used.
+  EXPECT_EQ(1U, fake_fec->GetReadOps().size());
+  EXPECT_EQ(1U, GetSourceEccRecoveredFailures());
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc
index a2368a4..d62a0ec 100644
--- a/payload_consumer/payload_constants.cc
+++ b/payload_consumer/payload_constants.cc
@@ -16,24 +16,28 @@
 
 #include "update_engine/payload_consumer/payload_constants.h"
 
+#include <base/logging.h>
+
 namespace chromeos_update_engine {
 
-const uint64_t kChromeOSMajorPayloadVersion = 1;
+// const uint64_t kChromeOSMajorPayloadVersion = 1;  DEPRECATED
 const uint64_t kBrilloMajorPayloadVersion = 2;
 
-const uint32_t kMinSupportedMinorPayloadVersion = 1;
-const uint32_t kMaxSupportedMinorPayloadVersion = 6;
+const uint64_t kMinSupportedMajorPayloadVersion = kBrilloMajorPayloadVersion;
+const uint64_t kMaxSupportedMajorPayloadVersion = kBrilloMajorPayloadVersion;
 
 const uint32_t kFullPayloadMinorVersion = 0;
-const uint32_t kInPlaceMinorPayloadVersion = 1;
+// const uint32_t kInPlaceMinorPayloadVersion = 1;  DEPRECATED
 const uint32_t kSourceMinorPayloadVersion = 2;
 const uint32_t kOpSrcHashMinorPayloadVersion = 3;
 const uint32_t kBrotliBsdiffMinorPayloadVersion = 4;
 const uint32_t kPuffdiffMinorPayloadVersion = 5;
 const uint32_t kVerityMinorPayloadVersion = 6;
+const uint32_t kPartialUpdateMinorPayloadVersion = 7;
 
-const uint64_t kMinSupportedMajorPayloadVersion = 1;
-const uint64_t kMaxSupportedMajorPayloadVersion = 2;
+const uint32_t kMinSupportedMinorPayloadVersion = kSourceMinorPayloadVersion;
+const uint32_t kMaxSupportedMinorPayloadVersion =
+    kPartialUpdateMinorPayloadVersion;
 
 const uint64_t kMaxPayloadHeaderSize = 24;
 
@@ -44,10 +48,6 @@
 
 const char* InstallOperationTypeName(InstallOperation::Type op_type) {
   switch (op_type) {
-    case InstallOperation::BSDIFF:
-      return "BSDIFF";
-    case InstallOperation::MOVE:
-      return "MOVE";
     case InstallOperation::REPLACE:
       return "REPLACE";
     case InstallOperation::REPLACE_BZ:
@@ -66,6 +66,10 @@
       return "PUFFDIFF";
     case InstallOperation::BROTLI_BSDIFF:
       return "BROTLI_BSDIFF";
+
+    case InstallOperation::BSDIFF:
+    case InstallOperation::MOVE:
+      NOTREACHED();
   }
   return "<unknown_op>";
 }
diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h
index 1642488..03647ee 100644
--- a/payload_consumer/payload_constants.h
+++ b/payload_consumer/payload_constants.h
@@ -26,7 +26,7 @@
 namespace chromeos_update_engine {
 
 // The major version used by Chrome OS.
-extern const uint64_t kChromeOSMajorPayloadVersion;
+// extern const uint64_t kChromeOSMajorPayloadVersion;  DEPRECATED
 
 // The major version used by Brillo.
 extern const uint64_t kBrilloMajorPayloadVersion;
@@ -39,7 +39,7 @@
 extern const uint32_t kFullPayloadMinorVersion;
 
 // The minor version used by the in-place delta generator algorithm.
-extern const uint32_t kInPlaceMinorPayloadVersion;
+// extern const uint32_t kInPlaceMinorPayloadVersion;  DEPRECATED
 
 // The minor version used by the A to B delta generator algorithm.
 extern const uint32_t kSourceMinorPayloadVersion;
@@ -56,6 +56,9 @@
 // The minor version that allows Verity hash tree and FEC generation.
 extern const uint32_t kVerityMinorPayloadVersion;
 
+// The minor version that allows partial update, e.g. kernel only update.
+extern const uint32_t kPartialUpdateMinorPayloadVersion;
+
 // The minimum and maximum supported minor version.
 extern const uint32_t kMinSupportedMinorPayloadVersion;
 extern const uint32_t kMaxSupportedMinorPayloadVersion;
diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc
index 0952646..f797723 100644
--- a/payload_consumer/payload_metadata.cc
+++ b/payload_consumer/payload_metadata.cc
@@ -18,6 +18,7 @@
 
 #include <endian.h>
 
+#include <base/strings/stringprintf.h>
 #include <brillo/data_encoding.h>
 
 #include "update_engine/common/constants.h"
@@ -37,45 +38,50 @@
 const uint64_t PayloadMetadata::kDeltaManifestSizeSize = 8;
 const uint64_t PayloadMetadata::kDeltaMetadataSignatureSizeSize = 4;
 
-bool PayloadMetadata::GetMetadataSignatureSizeOffset(
-    uint64_t* out_offset) const {
-  if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
-    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
-    return true;
-  }
-  return false;
+uint64_t PayloadMetadata::GetMetadataSignatureSizeOffset() const {
+  return kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
 }
 
-bool PayloadMetadata::GetManifestOffset(uint64_t* out_offset) const {
-  // Actual manifest begins right after the manifest size field or
-  // metadata signature size field if major version >= 2.
-  if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
-    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
-    return true;
-  }
-  if (major_payload_version_ == kBrilloMajorPayloadVersion) {
-    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize +
-                  kDeltaMetadataSignatureSizeSize;
-    return true;
-  }
-  LOG(ERROR) << "Unknown major payload version: " << major_payload_version_;
-  return false;
+uint64_t PayloadMetadata::GetManifestOffset() const {
+  // Actual manifest begins right after the metadata signature size field.
+  return kDeltaManifestSizeOffset + kDeltaManifestSizeSize +
+         kDeltaMetadataSignatureSizeSize;
 }
 
 MetadataParseResult PayloadMetadata::ParsePayloadHeader(
     const brillo::Blob& payload, ErrorCode* error) {
-  uint64_t manifest_offset;
+  return ParsePayloadHeader(payload.data(), payload.size(), error);
+}
+
+MetadataParseResult PayloadMetadata::ParsePayloadHeader(
+    const unsigned char* payload, size_t size, ErrorCode* error) {
   // Ensure we have data to cover the major payload version.
-  if (payload.size() < kDeltaManifestSizeOffset)
+  if (size < kDeltaManifestSizeOffset)
     return MetadataParseResult::kInsufficientData;
 
   // Validate the magic string.
-  if (memcmp(payload.data(), kDeltaMagic, sizeof(kDeltaMagic)) != 0) {
-    LOG(ERROR) << "Bad payload format -- invalid delta magic.";
+  if (memcmp(payload, kDeltaMagic, sizeof(kDeltaMagic)) != 0) {
+    LOG(ERROR) << "Bad payload format -- invalid delta magic: "
+               << base::StringPrintf("%02x%02x%02x%02x",
+                                     payload[0],
+                                     payload[1],
+                                     payload[2],
+                                     payload[3])
+               << " Expected: "
+               << base::StringPrintf("%02x%02x%02x%02x",
+                                     kDeltaMagic[0],
+                                     kDeltaMagic[1],
+                                     kDeltaMagic[2],
+                                     kDeltaMagic[3]);
     *error = ErrorCode::kDownloadInvalidMetadataMagicString;
     return MetadataParseResult::kError;
   }
 
+  uint64_t manifest_offset = GetManifestOffset();
+  // Check again with the manifest offset.
+  if (size < manifest_offset)
+    return MetadataParseResult::kInsufficientData;
+
   // Extract the payload version from the metadata.
   static_assert(sizeof(major_payload_version_) == kDeltaVersionSize,
                 "Major payload version size mismatch");
@@ -93,15 +99,6 @@
     return MetadataParseResult::kError;
   }
 
-  // Get the manifest offset now that we have payload version.
-  if (!GetManifestOffset(&manifest_offset)) {
-    *error = ErrorCode::kUnsupportedMajorPayloadVersion;
-    return MetadataParseResult::kError;
-  }
-  // Check again with the manifest offset.
-  if (payload.size() < manifest_offset)
-    return MetadataParseResult::kInsufficientData;
-
   // Next, parse the manifest size.
   static_assert(sizeof(manifest_size_) == kDeltaManifestSizeSize,
                 "manifest_size size mismatch");
@@ -113,30 +110,26 @@
   metadata_size_ = manifest_offset + manifest_size_;
   if (metadata_size_ < manifest_size_) {
     // Overflow detected.
+    LOG(ERROR) << "Overflow detected on manifest size.";
     *error = ErrorCode::kDownloadInvalidMetadataSize;
     return MetadataParseResult::kError;
   }
 
-  if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
-    // Parse the metadata signature size.
-    static_assert(
-        sizeof(metadata_signature_size_) == kDeltaMetadataSignatureSizeSize,
-        "metadata_signature_size size mismatch");
-    uint64_t metadata_signature_size_offset;
-    if (!GetMetadataSignatureSizeOffset(&metadata_signature_size_offset)) {
-      *error = ErrorCode::kError;
-      return MetadataParseResult::kError;
-    }
-    memcpy(&metadata_signature_size_,
-           &payload[metadata_signature_size_offset],
-           kDeltaMetadataSignatureSizeSize);
-    metadata_signature_size_ = be32toh(metadata_signature_size_);
+  // Parse the metadata signature size.
+  static_assert(
+      sizeof(metadata_signature_size_) == kDeltaMetadataSignatureSizeSize,
+      "metadata_signature_size size mismatch");
+  uint64_t metadata_signature_size_offset = GetMetadataSignatureSizeOffset();
+  memcpy(&metadata_signature_size_,
+         &payload[metadata_signature_size_offset],
+         kDeltaMetadataSignatureSizeSize);
+  metadata_signature_size_ = be32toh(metadata_signature_size_);
 
-    if (metadata_size_ + metadata_signature_size_ < metadata_size_) {
-      // Overflow detected.
-      *error = ErrorCode::kDownloadInvalidMetadataSize;
-      return MetadataParseResult::kError;
-    }
+  if (metadata_size_ + metadata_signature_size_ < metadata_size_) {
+    // Overflow detected.
+    LOG(ERROR) << "Overflow detected on metadata and signature size.";
+    *error = ErrorCode::kDownloadInvalidMetadataSize;
+    return MetadataParseResult::kError;
   }
   return MetadataParseResult::kSuccess;
 }
@@ -148,10 +141,14 @@
 
 bool PayloadMetadata::GetManifest(const brillo::Blob& payload,
                                   DeltaArchiveManifest* out_manifest) const {
-  uint64_t manifest_offset;
-  if (!GetManifestOffset(&manifest_offset))
-    return false;
-  CHECK_GE(payload.size(), manifest_offset + manifest_size_);
+  return GetManifest(payload.data(), payload.size(), out_manifest);
+}
+
+bool PayloadMetadata::GetManifest(const unsigned char* payload,
+                                  size_t size,
+                                  DeltaArchiveManifest* out_manifest) const {
+  uint64_t manifest_offset = GetManifestOffset();
+  CHECK_GE(size, manifest_offset + manifest_size_);
   return out_manifest->ParseFromArray(&payload[manifest_offset],
                                       manifest_size_);
 }
@@ -176,7 +173,7 @@
                  << metadata_signature;
       return ErrorCode::kDownloadMetadataSignatureError;
     }
-  } else if (major_payload_version_ == kBrilloMajorPayloadVersion) {
+  } else {
     metadata_signature_protobuf.assign(
         payload.begin() + metadata_size_,
         payload.begin() + metadata_size_ + metadata_signature_size_);
@@ -225,4 +222,32 @@
   return ErrorCode::kSuccess;
 }
 
+bool PayloadMetadata::ParsePayloadFile(const string& payload_path,
+                                       DeltaArchiveManifest* manifest,
+                                       Signatures* metadata_signatures) {
+  brillo::Blob payload;
+  TEST_AND_RETURN_FALSE(
+      utils::ReadFileChunk(payload_path, 0, kMaxPayloadHeaderSize, &payload));
+  TEST_AND_RETURN_FALSE(ParsePayloadHeader(payload));
+
+  if (manifest != nullptr) {
+    TEST_AND_RETURN_FALSE(
+        utils::ReadFileChunk(payload_path,
+                             kMaxPayloadHeaderSize,
+                             GetMetadataSize() - kMaxPayloadHeaderSize,
+                             &payload));
+    TEST_AND_RETURN_FALSE(GetManifest(payload, manifest));
+  }
+
+  if (metadata_signatures != nullptr) {
+    payload.clear();
+    TEST_AND_RETURN_FALSE(utils::ReadFileChunk(
+        payload_path, GetMetadataSize(), GetMetadataSignatureSize(), &payload));
+    TEST_AND_RETURN_FALSE(
+        metadata_signatures->ParseFromArray(payload.data(), payload.size()));
+  }
+
+  return true;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h
index 75ef8f9..f23b668 100644
--- a/payload_consumer/payload_metadata.h
+++ b/payload_consumer/payload_metadata.h
@@ -56,6 +56,9 @@
   // the payload.
   MetadataParseResult ParsePayloadHeader(const brillo::Blob& payload,
                                          ErrorCode* error);
+  MetadataParseResult ParsePayloadHeader(const unsigned char* payload,
+                                         size_t size,
+                                         ErrorCode* error);
   // Simpler version of the above, returns true on success.
   bool ParsePayloadHeader(const brillo::Blob& payload);
 
@@ -63,7 +66,7 @@
   // |metadata_signature| (if present) or the metadata signature in payload
   // itself (if present). Returns ErrorCode::kSuccess on match or a suitable
   // error code otherwise. This method must be called before any part of the
-  // metadata is parsed so that a man-in-the-middle attack on the SSL connection
+  // metadata is parsed so that an on-path attack on the SSL connection
   // to the payload server doesn't exploit any vulnerability in the code that
   // parses the protocol buffer.
   ErrorCode ValidateMetadataSignature(
@@ -88,15 +91,24 @@
   bool GetManifest(const brillo::Blob& payload,
                    DeltaArchiveManifest* out_manifest) const;
 
- private:
-  // Set |*out_offset| to the byte offset at which the manifest protobuf begins
-  // in a payload. Return true on success, false if the offset is unknown.
-  bool GetManifestOffset(uint64_t* out_offset) const;
+  bool GetManifest(const unsigned char* payload,
+                   size_t size,
+                   DeltaArchiveManifest* out_manifest) const;
 
-  // Set |*out_offset| to the byte offset where the size of the metadata
-  // signature is stored in a payload. Return true on success, if this field is
-  // not present in the payload, return false.
-  bool GetMetadataSignatureSizeOffset(uint64_t* out_offset) const;
+  // Parses a payload file |payload_path| and prepares the metadata properties,
+  // manifest and metadata signatures. Can be used as an easy to use utility to
+  // get the payload information without manually the process.
+  bool ParsePayloadFile(const std::string& payload_path,
+                        DeltaArchiveManifest* manifest,
+                        Signatures* metadata_signatures);
+
+ private:
+  // Returns the byte offset at which the manifest protobuf begins in a payload.
+  uint64_t GetManifestOffset() const;
+
+  // Returns the byte offset where the size of the metadata signature is stored
+  // in a payload.
+  uint64_t GetMetadataSignatureSizeOffset() const;
 
   uint64_t metadata_size_{0};
   uint64_t manifest_size_{0};
diff --git a/payload_consumer/payload_verifier.cc b/payload_consumer/payload_verifier.cc
index 24e337e..8a3ea65 100644
--- a/payload_consumer/payload_verifier.cc
+++ b/payload_consumer/payload_verifier.cc
@@ -172,9 +172,7 @@
       if (padded_hash_data == sig_hash_data) {
         return true;
       }
-    }
-
-    if (key_type == EVP_PKEY_EC) {
+    } else if (key_type == EVP_PKEY_EC) {
       EC_KEY* ec_key = EVP_PKEY_get0_EC_KEY(public_key.get());
       TEST_AND_RETURN_FALSE(ec_key != nullptr);
       if (ECDSA_verify(0,
@@ -185,10 +183,10 @@
                        ec_key) == 1) {
         return true;
       }
+    } else {
+      LOG(ERROR) << "Unsupported key type " << key_type;
+      return false;
     }
-
-    LOG(ERROR) << "Unsupported key type " << key_type;
-    return false;
   }
   LOG(INFO) << "Failed to verify the signature with " << public_keys_.size()
             << " keys.";
@@ -203,7 +201,7 @@
   //
   // openssl rsautl -verify -pubin -inkey <(echo pem_public_key)
   //   -in |sig_data| -out |out_hash_data|
-  RSA* rsa = EVP_PKEY_get0_RSA(public_key);
+  RSA* rsa = EVP_PKEY_get0_RSA(const_cast<EVP_PKEY*>(public_key));
 
   TEST_AND_RETURN_FALSE(rsa != nullptr);
   unsigned int keysize = RSA_size(rsa);
diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc
index c08cfc2..051ccbf 100644
--- a/payload_consumer/postinstall_runner_action.cc
+++ b/payload_consumer/postinstall_runner_action.cc
@@ -28,6 +28,7 @@
 #include <base/files/file_path.h>
 #include <base/files/file_util.h>
 #include <base/logging.h>
+#include <base/stl_util.h>
 #include <base/strings/string_split.h>
 #include <base/strings/string_util.h>
 
@@ -49,17 +50,48 @@
 
 namespace chromeos_update_engine {
 
-using brillo::MessageLoop;
 using std::string;
 using std::vector;
 
+PostinstallRunnerAction::PostinstallRunnerAction(
+    BootControlInterface* boot_control, HardwareInterface* hardware)
+    : boot_control_(boot_control), hardware_(hardware) {
+#ifdef __ANDROID__
+  fs_mount_dir_ = "/postinstall";
+#else   // __ANDROID__
+  base::FilePath temp_dir;
+  TEST_AND_RETURN(base::CreateNewTempDirectory("au_postint_mount", &temp_dir));
+  fs_mount_dir_ = temp_dir.value();
+#endif  // __ANDROID__
+  CHECK(!fs_mount_dir_.empty());
+  LOG(INFO) << "postinstall mount point: " << fs_mount_dir_;
+}
+
 void PostinstallRunnerAction::PerformAction() {
   CHECK(HasInputObject());
+  CHECK(boot_control_);
   install_plan_ = GetInputObject();
 
-  // Currently we're always powerwashing when rolling back.
+  auto dynamic_control = boot_control_->GetDynamicPartitionControl();
+  CHECK(dynamic_control);
+
+  // Mount snapshot partitions for Virtual AB Compression Compression.
+  if (dynamic_control->UpdateUsesSnapshotCompression()) {
+    // Before calling MapAllPartitions to map snapshot devices, all CowWriters
+    // must be closed, and MapAllPartitions() should be called.
+    dynamic_control->UnmapAllPartitions();
+    if (!dynamic_control->MapAllPartitions()) {
+      return CompletePostinstall(ErrorCode::kPostInstallMountError);
+    }
+  }
+
+  // We always powerwash when rolling back, however policy can determine
+  // if this is a full/normal powerwash, or a special rollback powerwash
+  // that retains a small amount of system state such as enrollment and
+  // network configuration. In both cases all user accounts are deleted.
   if (install_plan_.powerwash_required || install_plan_.is_rollback) {
-    if (hardware_->SchedulePowerwash(install_plan_.is_rollback)) {
+    if (hardware_->SchedulePowerwash(
+            install_plan_.rollback_data_save_requested)) {
       powerwash_scheduled_ = true;
     } else {
       return CompletePostinstall(ErrorCode::kPostinstallPowerwashError);
@@ -108,8 +140,7 @@
   const InstallPlan::Partition& partition =
       install_plan_.partitions[current_partition_];
 
-  const string mountable_device =
-      utils::MakePartitionNameForMount(partition.target_path);
+  const string mountable_device = partition.readonly_target_path;
   if (mountable_device.empty()) {
     LOG(ERROR) << "Cannot make mountable device from " << partition.target_path;
     return CompletePostinstall(ErrorCode::kPostinstallRunnerError);
@@ -118,14 +149,12 @@
   // Perform post-install for the current_partition_ partition. At this point we
   // need to call CompletePartitionPostinstall to complete the operation and
   // cleanup.
-#ifdef __ANDROID__
-  fs_mount_dir_ = "/postinstall";
-#else   // __ANDROID__
-  base::FilePath temp_dir;
-  TEST_AND_RETURN(base::CreateNewTempDirectory("au_postint_mount", &temp_dir));
-  fs_mount_dir_ = temp_dir.value();
-#endif  // __ANDROID__
 
+  if (!utils::FileExists(fs_mount_dir_.c_str())) {
+    LOG(ERROR) << "Mount point " << fs_mount_dir_
+               << " does not exist, mount call will fail";
+    return CompletePostinstall(ErrorCode::kPostinstallRunnerError);
+  }
   // Double check that the fs_mount_dir is not busy with a previous mounted
   // filesystem from a previous crashed postinstall step.
   if (utils::IsMountpoint(fs_mount_dir_)) {
@@ -162,11 +191,12 @@
   }
 #endif  // __ANDROID__
 
-  if (!utils::MountFilesystem(mountable_device,
-                              fs_mount_dir_,
-                              MS_RDONLY,
-                              partition.filesystem_type,
-                              constants::kPostinstallMountOptions)) {
+  if (!utils::MountFilesystem(
+          mountable_device,
+          fs_mount_dir_,
+          MS_RDONLY,
+          partition.filesystem_type,
+          hardware_->GetPartitionMountOptions(partition.name))) {
     return CompletePartitionPostinstall(
         1, "Error mounting the device " + mountable_device);
   }
@@ -215,13 +245,10 @@
     PLOG(ERROR) << "Unable to set non-blocking I/O mode on fd " << progress_fd_;
   }
 
-  progress_task_ = MessageLoop::current()->WatchFileDescriptor(
-      FROM_HERE,
+  progress_controller_ = base::FileDescriptorWatcher::WatchReadable(
       progress_fd_,
-      MessageLoop::WatchMode::kWatchRead,
-      true,
-      base::Bind(&PostinstallRunnerAction::OnProgressFdReady,
-                 base::Unretained(this)));
+      base::BindRepeating(&PostinstallRunnerAction::OnProgressFdReady,
+                          base::Unretained(this)));
 }
 
 void PostinstallRunnerAction::OnProgressFdReady() {
@@ -231,7 +258,7 @@
     bytes_read = 0;
     bool eof;
     bool ok =
-        utils::ReadAll(progress_fd_, buf, arraysize(buf), &bytes_read, &eof);
+        utils::ReadAll(progress_fd_, buf, base::size(buf), &bytes_read, &eof);
     progress_buffer_.append(buf, bytes_read);
     // Process every line.
     vector<string> lines = base::SplitString(
@@ -246,8 +273,7 @@
     if (!ok || eof) {
       // There was either an error or an EOF condition, so we are done watching
       // the file descriptor.
-      MessageLoop::current()->CancelTask(progress_task_);
-      progress_task_ = MessageLoop::kTaskIdNull;
+      progress_controller_.reset();
       return;
     }
   } while (bytes_read);
@@ -284,17 +310,18 @@
 void PostinstallRunnerAction::Cleanup() {
   utils::UnmountFilesystem(fs_mount_dir_);
 #ifndef __ANDROID__
-  if (!base::DeleteFile(base::FilePath(fs_mount_dir_), false)) {
+#if BASE_VER < 800000
+  if (!base::DeleteFile(base::FilePath(fs_mount_dir_), true)) {
+#else
+  if (!base::DeleteFile(base::FilePath(fs_mount_dir_))) {
+#endif
     PLOG(WARNING) << "Not removing temporary mountpoint " << fs_mount_dir_;
   }
-#endif  // !__ANDROID__
-  fs_mount_dir_.clear();
+#endif
 
   progress_fd_ = -1;
-  if (progress_task_ != MessageLoop::kTaskIdNull) {
-    MessageLoop::current()->CancelTask(progress_task_);
-    progress_task_ = MessageLoop::kTaskIdNull;
-  }
+  progress_controller_.reset();
+
   progress_buffer_.clear();
 }
 
@@ -348,12 +375,19 @@
       } else {
         // Schedules warm reset on next reboot, ignores the error.
         hardware_->SetWarmReset(true);
+        // Sets the vbmeta digest for the other slot to boot into.
+        hardware_->SetVbmetaDigestForInactiveSlot(false);
       }
     } else {
       error_code = ErrorCode::kUpdatedButNotActive;
     }
   }
 
+  auto dynamic_control = boot_control_->GetDynamicPartitionControl();
+  CHECK(dynamic_control);
+  dynamic_control->UnmapAllPartitions();
+  LOG(INFO) << "Unmapped all partitions.";
+
   ScopedActionCompleter completer(processor_, this);
   completer.set_code(error_code);
 
diff --git a/payload_consumer/postinstall_runner_action.h b/payload_consumer/postinstall_runner_action.h
index b9b7069..178d72a 100644
--- a/payload_consumer/postinstall_runner_action.h
+++ b/payload_consumer/postinstall_runner_action.h
@@ -17,9 +17,12 @@
 #ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_POSTINSTALL_RUNNER_ACTION_H_
 #define UPDATE_ENGINE_PAYLOAD_CONSUMER_POSTINSTALL_RUNNER_ACTION_H_
 
+#include <memory>
 #include <string>
+#include <utility>
 #include <vector>
 
+#include <base/files/file_descriptor_watcher_posix.h>
 #include <brillo/message_loops/message_loop.h>
 #include <gtest/gtest_prod.h>
 
@@ -38,8 +41,7 @@
 class PostinstallRunnerAction : public InstallPlanAction {
  public:
   PostinstallRunnerAction(BootControlInterface* boot_control,
-                          HardwareInterface* hardware)
-      : boot_control_(boot_control), hardware_(hardware) {}
+                          HardwareInterface* hardware);
 
   // InstallPlanAction overrides.
   void PerformAction() override;
@@ -66,6 +68,9 @@
   friend class PostinstallRunnerActionTest;
   FRIEND_TEST(PostinstallRunnerActionTest, ProcessProgressLineTest);
 
+  // exposed for testing purposes only
+  void SetMountDir(std::string dir) { fs_mount_dir_ = std::move(dir); }
+
   void PerformPartitionPostinstall();
 
   // Called whenever the |progress_fd_| has data available to read.
@@ -95,8 +100,6 @@
   // ready. Called when the post-install script was run for all the partitions.
   void CompletePostinstall(ErrorCode error_code);
 
-  InstallPlan install_plan_;
-
   // The path where the filesystem will be mounted during post-install.
   std::string fs_mount_dir_;
 
@@ -139,7 +142,8 @@
   // The parent progress file descriptor used to watch for progress reports from
   // the postinstall program and the task watching for them.
   int progress_fd_{-1};
-  brillo::MessageLoop::TaskId progress_task_{brillo::MessageLoop::kTaskIdNull};
+
+  std::unique_ptr<base::FileDescriptorWatcher::Controller> progress_controller_;
 
   // A buffer of a partial read line from the progress file descriptor.
   std::string progress_buffer_;
diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc
index e9313f1..792ee28 100644
--- a/payload_consumer/postinstall_runner_action_unittest.cc
+++ b/payload_consumer/postinstall_runner_action_unittest.cc
@@ -26,9 +26,14 @@
 
 #include <base/bind.h>
 #include <base/files/file_util.h>
+#if BASE_VER < 780000  // Android
 #include <base/message_loop/message_loop.h>
+#endif  // BASE_VER < 780000
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
+#if BASE_VER >= 780000  // CrOS
+#include <base/task/single_thread_task_executor.h>
+#endif  // BASE_VER >= 780000
 #include <brillo/message_loops/base_message_loop.h>
 #include <brillo/message_loops/message_loop_utils.h>
 #include <gmock/gmock.h>
@@ -40,7 +45,7 @@
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/mock_payload_state.h"
+#include "update_engine/cros/mock_payload_state.h"
 
 using brillo::MessageLoop;
 using chromeos_update_engine::test_utils::ScopedLoopbackDeviceBinder;
@@ -100,7 +105,8 @@
   void RunPostinstallAction(const string& device_path,
                             const string& postinstall_program,
                             bool powerwash_required,
-                            bool is_rollback);
+                            bool is_rollback,
+                            bool save_rollback_data);
 
   void RunPostinstallActionWithInstallPlan(const InstallPlan& install_plan);
 
@@ -143,13 +149,25 @@
           base::TimeDelta::FromMilliseconds(10));
     } else {
       CHECK(processor_);
-      processor_->StopProcessing();
+      // Must |PostDelayedTask()| here to be safe that |FileDescriptorWatcher|
+      // doesn't leak memory, do not directly call |StopProcessing()|.
+      loop_.PostDelayedTask(
+          FROM_HERE,
+          base::Bind(
+              [](ActionProcessor* processor) { processor->StopProcessing(); },
+              base::Unretained(processor_)),
+          base::TimeDelta::FromMilliseconds(100));
     }
   }
 
  protected:
+#if BASE_VER < 780000  // Android
   base::MessageLoopForIO base_loop_;
   brillo::BaseMessageLoop loop_{&base_loop_};
+#else   // CrOS
+  base::SingleThreadTaskExecutor base_loop_{base::MessagePumpType::IO};
+  brillo::BaseMessageLoop loop_{base_loop_.task_runner()};
+#endif  // BASE_VER < 780000
   brillo::AsynchronousSignalHandler async_signal_handler_;
   Subprocess subprocess_;
 
@@ -172,10 +190,12 @@
     const string& device_path,
     const string& postinstall_program,
     bool powerwash_required,
-    bool is_rollback) {
+    bool is_rollback,
+    bool save_rollback_data) {
   InstallPlan::Partition part;
   part.name = "part";
   part.target_path = device_path;
+  part.readonly_target_path = device_path;
   part.run_postinstall = true;
   part.postinstall_path = postinstall_program;
   InstallPlan install_plan;
@@ -183,6 +203,7 @@
   install_plan.download_url = "http://127.0.0.1:8080/update";
   install_plan.powerwash_required = powerwash_required;
   install_plan.is_rollback = is_rollback;
+  install_plan.rollback_data_save_requested = save_rollback_data;
   RunPostinstallActionWithInstallPlan(install_plan);
 }
 
@@ -195,6 +216,9 @@
   auto runner_action = std::make_unique<PostinstallRunnerAction>(
       &fake_boot_control_, &fake_hardware_);
   postinstall_action_ = runner_action.get();
+  base::FilePath temp_dir;
+  TEST_AND_RETURN(base::CreateNewTempDirectory("postinstall", &temp_dir));
+  postinstall_action_->SetMountDir(temp_dir.value());
   runner_action->set_delegate(setup_action_delegate_);
   BondActions(feeder_action.get(), runner_action.get());
   auto collector_action =
@@ -217,7 +241,7 @@
   EXPECT_TRUE(processor_delegate_.processing_stopped_called_ ||
               processor_delegate_.processing_done_called_);
   if (processor_delegate_.processing_done_called_) {
-    // Sanity check that the code was set when the processor finishes.
+    // Validation check that the code was set when the processor finishes.
     EXPECT_TRUE(processor_delegate_.code_set_);
   }
 }
@@ -256,7 +280,8 @@
 TEST_F(PostinstallRunnerActionTest, RunAsRootSimpleTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
 
-  RunPostinstallAction(loop.dev(), kPostinstallDefaultScript, false, false);
+  RunPostinstallAction(
+      loop.dev(), kPostinstallDefaultScript, false, false, false);
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
   EXPECT_TRUE(processor_delegate_.processing_done_called_);
 
@@ -267,7 +292,7 @@
 
 TEST_F(PostinstallRunnerActionTest, RunAsRootRunSymlinkFileTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
-  RunPostinstallAction(loop.dev(), "bin/postinst_link", false, false);
+  RunPostinstallAction(loop.dev(), "bin/postinst_link", false, false, false);
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
 }
 
@@ -277,6 +302,7 @@
   RunPostinstallAction(loop.dev(),
                        "bin/postinst_example",
                        /*powerwash_required=*/true,
+                       false,
                        false);
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
 
@@ -285,14 +311,31 @@
   EXPECT_FALSE(fake_hardware_.GetIsRollbackPowerwashScheduled());
 }
 
-TEST_F(PostinstallRunnerActionTest, RunAsRootRollbackTest) {
+TEST_F(PostinstallRunnerActionTest, RunAsRootRollbackTestNoDataSave) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
 
   // Run a simple postinstall program, rollback happened.
   RunPostinstallAction(loop.dev(),
                        "bin/postinst_example",
                        false,
-                       /*is_rollback=*/true);
+                       /*is_rollback=*/true,
+                       /*save_rollback_data=*/false);
+  EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
+
+  // Check that powerwash was scheduled and that it's NOT a rollback powerwash.
+  EXPECT_TRUE(fake_hardware_.IsPowerwashScheduled());
+  EXPECT_FALSE(fake_hardware_.GetIsRollbackPowerwashScheduled());
+}
+
+TEST_F(PostinstallRunnerActionTest, RunAsRootRollbackTestWithDataSave) {
+  ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
+
+  // Run a simple postinstall program, rollback happened.
+  RunPostinstallAction(loop.dev(),
+                       "bin/postinst_example",
+                       false,
+                       /*is_rollback=*/true,
+                       /*save_rollback_data=*/true);
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
 
   // Check that powerwash was scheduled and that it's a rollback powerwash.
@@ -303,7 +346,8 @@
 // Runs postinstall from a partition file that doesn't mount, so it should
 // fail.
 TEST_F(PostinstallRunnerActionTest, RunAsRootCantMountTest) {
-  RunPostinstallAction("/dev/null", kPostinstallDefaultScript, false, false);
+  RunPostinstallAction(
+      "/dev/null", kPostinstallDefaultScript, false, false, false);
   EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
 
   // In case of failure, Postinstall should not signal a powerwash even if it
@@ -316,6 +360,7 @@
   InstallPlan::Partition part;
   part.name = "part";
   part.target_path = "/dev/null";
+  part.readonly_target_path = "/dev/null";
   part.run_postinstall = true;
   part.postinstall_path = kPostinstallDefaultScript;
   part.postinstall_optional = true;
@@ -337,7 +382,7 @@
 // fail.
 TEST_F(PostinstallRunnerActionTest, RunAsRootErrScriptTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
-  RunPostinstallAction(loop.dev(), "bin/postinst_fail1", false, false);
+  RunPostinstallAction(loop.dev(), "bin/postinst_fail1", false, false, false);
   EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
 }
 
@@ -345,7 +390,7 @@
 // UMA with a different error code. Test those cases are properly detected.
 TEST_F(PostinstallRunnerActionTest, RunAsRootFirmwareBErrScriptTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
-  RunPostinstallAction(loop.dev(), "bin/postinst_fail3", false, false);
+  RunPostinstallAction(loop.dev(), "bin/postinst_fail3", false, false, false);
   EXPECT_EQ(ErrorCode::kPostinstallBootedFromFirmwareB,
             processor_delegate_.code_);
 }
@@ -353,16 +398,26 @@
 // Check that you can't specify an absolute path.
 TEST_F(PostinstallRunnerActionTest, RunAsRootAbsolutePathNotAllowedTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
-  RunPostinstallAction(loop.dev(), "/etc/../bin/sh", false, false);
+  RunPostinstallAction(loop.dev(), "/etc/../bin/sh", false, false, false);
   EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
 }
 
 #ifdef __ANDROID__
-// Check that the postinstall file is relabeled to the postinstall label.
+// Check that the postinstall file is labeled to the postinstall_exec label.
 // SElinux labels are only set on Android.
 TEST_F(PostinstallRunnerActionTest, RunAsRootCheckFileContextsTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
-  RunPostinstallAction(loop.dev(), "bin/self_check_context", false, false);
+  RunPostinstallAction(
+      loop.dev(), "bin/self_check_context", false, false, false);
+  EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
+}
+
+// Check that the postinstall file is relabeled to the default postinstall
+// label. SElinux labels are only set on Android.
+TEST_F(PostinstallRunnerActionTest, RunAsRootCheckDefaultFileContextsTest) {
+  ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
+  RunPostinstallAction(
+      loop.dev(), "bin/self_check_default_context", false, false, false);
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
 }
 #endif  // __ANDROID__
@@ -375,7 +430,7 @@
   loop_.PostTask(FROM_HERE,
                  base::Bind(&PostinstallRunnerActionTest::SuspendRunningAction,
                             base::Unretained(this)));
-  RunPostinstallAction(loop.dev(), "bin/postinst_suspend", false, false);
+  RunPostinstallAction(loop.dev(), "bin/postinst_suspend", false, false, false);
   // postinst_suspend returns 0 only if it was suspended at some point.
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
   EXPECT_TRUE(processor_delegate_.processing_done_called_);
@@ -387,7 +442,7 @@
 
   // Wait for the action to start and then cancel it.
   CancelWhenStarted();
-  RunPostinstallAction(loop.dev(), "bin/postinst_suspend", false, false);
+  RunPostinstallAction(loop.dev(), "bin/postinst_suspend", false, false, false);
   // When canceling the action, the action never finished and therefore we had
   // a ProcessingStopped call instead.
   EXPECT_FALSE(processor_delegate_.code_set_);
@@ -410,7 +465,8 @@
 
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
   setup_action_delegate_ = &mock_delegate_;
-  RunPostinstallAction(loop.dev(), "bin/postinst_progress", false, false);
+  RunPostinstallAction(
+      loop.dev(), "bin/postinst_progress", false, false, false);
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
 }
 
diff --git a/payload_consumer/snapshot_extent_writer.cc b/payload_consumer/snapshot_extent_writer.cc
new file mode 100644
index 0000000..242e726
--- /dev/null
+++ b/payload_consumer/snapshot_extent_writer.cc
@@ -0,0 +1,123 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/snapshot_extent_writer.h"
+
+#include <algorithm>
+#include <cstdint>
+
+#include <libsnapshot/cow_writer.h>
+
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+
+SnapshotExtentWriter::SnapshotExtentWriter(
+    android::snapshot::ICowWriter* cow_writer)
+    : cow_writer_(cow_writer) {
+  CHECK_NE(cow_writer, nullptr);
+}
+
+SnapshotExtentWriter::~SnapshotExtentWriter() {
+  CHECK(buffer_.empty()) << buffer_.size();
+}
+
+bool SnapshotExtentWriter::Init(
+    const google::protobuf::RepeatedPtrField<Extent>& extents,
+    uint32_t block_size) {
+  extents_ = extents;
+  cur_extent_idx_ = 0;
+  buffer_.clear();
+  buffer_.reserve(block_size);
+  block_size_ = block_size;
+  return true;
+}
+
+size_t SnapshotExtentWriter::ConsumeWithBuffer(const uint8_t* data,
+                                               size_t count) {
+  CHECK_LT(cur_extent_idx_, static_cast<size_t>(extents_.size()));
+  const auto& cur_extent = extents_[cur_extent_idx_];
+  const auto cur_extent_size = cur_extent.num_blocks() * block_size_;
+
+  if (buffer_.empty() && count >= cur_extent_size) {
+    if (!cow_writer_->AddRawBlocks(
+            cur_extent.start_block(), data, cur_extent_size)) {
+      LOG(ERROR) << "AddRawBlocks(" << cur_extent.start_block() << ", " << data
+                 << ", " << cur_extent_size << ") failed.";
+      // return value is expected to be greater than 0. Return 0 to signal error
+      // condition
+      return 0;
+    }
+    if (!next_extent()) {
+      CHECK_EQ(count, cur_extent_size)
+          << "Exhausted all blocks, but still have " << count - cur_extent_size
+          << " bytes left";
+    }
+    return cur_extent_size;
+  }
+  CHECK_LT(buffer_.size(), cur_extent_size)
+      << "Data left in buffer should never be >= cur_extent_size, otherwise "
+         "we should have send that data to CowWriter. Buffer size: "
+      << buffer_.size() << " current extent size: " << cur_extent_size;
+  size_t bytes_to_copy =
+      std::min<size_t>(count, cur_extent_size - buffer_.size());
+  CHECK_GT(bytes_to_copy, 0U);
+
+  buffer_.insert(buffer_.end(), data, data + bytes_to_copy);
+  CHECK_LE(buffer_.size(), cur_extent_size);
+
+  if (buffer_.size() == cur_extent_size) {
+    if (!cow_writer_->AddRawBlocks(
+            cur_extent.start_block(), buffer_.data(), buffer_.size())) {
+      LOG(ERROR) << "AddRawBlocks(" << cur_extent.start_block() << ", "
+                 << buffer_.data() << ", " << buffer_.size() << ") failed.";
+      return 0;
+    }
+    buffer_.clear();
+    if (!next_extent()) {
+      CHECK_EQ(count, bytes_to_copy) << "Exhausted all blocks, but still have "
+                                     << count - bytes_to_copy << " bytes left";
+    }
+  }
+  return bytes_to_copy;
+}
+
+// Returns true on success.
+// This will construct a COW_REPLACE operation and forward it to CowWriter. It
+// is important that caller does not perform SOURCE_COPY operation on this
+// class, otherwise raw data will be stored. Caller should find ways to use
+// COW_COPY whenever possible.
+bool SnapshotExtentWriter::Write(const void* bytes, size_t count) {
+  if (count == 0) {
+    return true;
+  }
+  CHECK_NE(extents_.size(), 0);
+
+  auto data = static_cast<const uint8_t*>(bytes);
+  while (count > 0) {
+    auto bytes_written = ConsumeWithBuffer(data, count);
+    TEST_AND_RETURN_FALSE(bytes_written > 0);
+    data += bytes_written;
+    count -= bytes_written;
+  }
+  return true;
+}
+
+bool SnapshotExtentWriter::next_extent() {
+  cur_extent_idx_++;
+  return cur_extent_idx_ < static_cast<size_t>(extents_.size());
+}
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/snapshot_extent_writer.h b/payload_consumer/snapshot_extent_writer.h
new file mode 100644
index 0000000..c3a948e
--- /dev/null
+++ b/payload_consumer/snapshot_extent_writer.h
@@ -0,0 +1,59 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_SNAPSHOT_EXTENT_WRITER_H_
+#define UPDATE_ENGINE_SNAPSHOT_EXTENT_WRITER_H_
+
+#include <cstdint>
+#include <vector>
+
+#include <libsnapshot/cow_writer.h>
+
+#include "update_engine/payload_consumer/extent_writer.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+
+class SnapshotExtentWriter : public chromeos_update_engine::ExtentWriter {
+ public:
+  explicit SnapshotExtentWriter(android::snapshot::ICowWriter* cow_writer);
+  ~SnapshotExtentWriter();
+  // Returns true on success.
+  bool Init(const google::protobuf::RepeatedPtrField<Extent>& extents,
+            uint32_t block_size) override;
+  // Returns true on success.
+  // This will construct a COW_REPLACE operation and forward it to CowWriter. It
+  // is important that caller does not perform SOURCE_COPY operation on this
+  // class, otherwise raw data will be stored. Caller should find ways to use
+  // COW_COPY whenever possible.
+  bool Write(const void* bytes, size_t count) override;
+
+ private:
+  bool next_extent();
+  [[nodiscard]] size_t ConsumeWithBuffer(const uint8_t* bytes, size_t count);
+  // It's a non-owning pointer, because PartitionWriter owns the CowWruter. This
+  // allows us to use a single instance of CowWriter for all operations applied
+  // to the same partition.
+  android::snapshot::ICowWriter* cow_writer_;
+  google::protobuf::RepeatedPtrField<Extent> extents_;
+  size_t cur_extent_idx_;
+  std::vector<uint8_t> buffer_;
+  size_t block_size_;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/snapshot_extent_writer_unittest.cc b/payload_consumer/snapshot_extent_writer_unittest.cc
new file mode 100644
index 0000000..2201043
--- /dev/null
+++ b/payload_consumer/snapshot_extent_writer_unittest.cc
@@ -0,0 +1,180 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <array>
+#include <cstring>
+#include <map>
+#include <numeric>
+#include <vector>
+
+#include <gtest/gtest.h>
+#include <google/protobuf/message_lite.h>
+#include <libsnapshot/cow_writer.h>
+
+#include "update_engine/payload_consumer/snapshot_extent_writer.h"
+#include "update_engine/payload_generator/delta_diff_generator.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+
+class FakeCowWriter : public android::snapshot::ICowWriter {
+ public:
+  struct CowOp {
+    enum { COW_COPY, COW_REPLACE, COW_ZERO } type;
+    std::vector<unsigned char> data;
+    union {
+      size_t source_block;
+      size_t num_blocks;
+    };
+  };
+  using ICowWriter::ICowWriter;
+  ~FakeCowWriter() = default;
+
+  bool EmitCopy(uint64_t new_block, uint64_t old_block) override {
+    operations_[new_block] = {.type = CowOp::COW_COPY,
+                              .source_block = static_cast<size_t>(old_block)};
+    return true;
+  }
+  bool EmitRawBlocks(uint64_t new_block_start,
+                     const void* data,
+                     size_t size) override {
+    auto&& op = operations_[new_block_start];
+    const auto uint8_ptr = static_cast<const unsigned char*>(data);
+    op.data.insert(op.data.end(), uint8_ptr, uint8_ptr + size);
+    return true;
+  }
+  bool EmitZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) override {
+    operations_[new_block_start] = {.type = CowOp::COW_ZERO};
+    return true;
+  }
+  bool Finalize() override {
+    finalize_called_ = true;
+    return true;
+  }
+
+  bool EmitLabel(uint64_t label) {
+    label_count_++;
+    return true;
+  }
+
+  // Return number of bytes the cow image occupies on disk.
+  uint64_t GetCowSize() override {
+    return std::accumulate(
+        operations_.begin(), operations_.end(), 0, [](auto&& acc, auto&& op) {
+          return acc + op.second.data.size();
+        });
+  }
+  bool Contains(size_t block) {
+    return operations_.find(block) != operations_.end();
+  }
+  bool finalize_called_ = true;
+  size_t label_count_ = 0;
+  std::map<size_t, CowOp> operations_;
+};
+
+class SnapshotExtentWriterTest : public ::testing::Test {
+ public:
+  void SetUp() override {}
+
+ protected:
+  android::snapshot::CowOptions options_ = {
+      .block_size = static_cast<uint32_t>(kBlockSize)};
+  FakeCowWriter cow_writer_{options_};
+  SnapshotExtentWriter writer_{&cow_writer_};
+};
+
+void AddExtent(google::protobuf::RepeatedPtrField<Extent>* extents,
+               size_t start_block,
+               size_t num_blocks) {
+  auto&& extent = extents->Add();
+  extent->set_start_block(start_block);
+  extent->set_num_blocks(num_blocks);
+}
+
+TEST_F(SnapshotExtentWriterTest, BufferWrites) {
+  google::protobuf::RepeatedPtrField<Extent> extents;
+  AddExtent(&extents, 123, 1);
+  writer_.Init(extents, kBlockSize);
+
+  std::vector<uint8_t> buf(kBlockSize, 0);
+  buf[123] = 231;
+  buf[231] = 123;
+  buf[buf.size() - 1] = 255;
+
+  writer_.Write(buf.data(), kBlockSize - 1);
+  ASSERT_TRUE(cow_writer_.operations_.empty())
+      << "Haven't send data of a complete block yet, CowWriter should not be "
+         "invoked.";
+  writer_.Write(buf.data() + kBlockSize - 1, 1);
+  ASSERT_TRUE(cow_writer_.Contains(123))
+      << "Once a block of data is sent to SnapshotExtentWriter, it should "
+         "forward data to cow_writer.";
+  ASSERT_EQ(cow_writer_.operations_.size(), 1U);
+  ASSERT_EQ(buf, cow_writer_.operations_[123].data);
+}
+
+TEST_F(SnapshotExtentWriterTest, NonBufferedWrites) {
+  google::protobuf::RepeatedPtrField<Extent> extents;
+  AddExtent(&extents, 123, 1);
+  AddExtent(&extents, 125, 1);
+  writer_.Init(extents, kBlockSize);
+
+  std::vector<uint8_t> buf(kBlockSize * 2, 0);
+  buf[123] = 231;
+  buf[231] = 123;
+  buf[buf.size() - 1] = 255;
+
+  writer_.Write(buf.data(), buf.size());
+  ASSERT_TRUE(cow_writer_.Contains(123));
+  ASSERT_TRUE(cow_writer_.Contains(125));
+
+  ASSERT_EQ(cow_writer_.operations_.size(), 2U);
+  auto actual_data = cow_writer_.operations_[123].data;
+  actual_data.insert(actual_data.end(),
+                     cow_writer_.operations_[125].data.begin(),
+                     cow_writer_.operations_[125].data.end());
+  ASSERT_EQ(buf, actual_data);
+}
+
+TEST_F(SnapshotExtentWriterTest, WriteAcrossBlockBoundary) {
+  google::protobuf::RepeatedPtrField<Extent> extents;
+  AddExtent(&extents, 123, 1);
+  AddExtent(&extents, 125, 2);
+  writer_.Init(extents, kBlockSize);
+
+  std::vector<uint8_t> buf(kBlockSize * 3);
+  std::memset(buf.data(), 0, buf.size());
+  buf[123] = 231;
+  buf[231] = 123;
+  buf[buf.size() - 1] = 255;
+  buf[kBlockSize - 1] = 254;
+
+  writer_.Write(buf.data(), kBlockSize - 1);
+  ASSERT_TRUE(cow_writer_.operations_.empty())
+      << "Haven't send data of a complete block yet, CowWriter should not be "
+         "invoked.";
+  writer_.Write(buf.data() + kBlockSize - 1, 1 + kBlockSize * 2);
+  ASSERT_TRUE(cow_writer_.Contains(123));
+  ASSERT_TRUE(cow_writer_.Contains(125));
+
+  ASSERT_EQ(cow_writer_.operations_.size(), 2U);
+  auto actual_data = cow_writer_.operations_[123].data;
+  actual_data.insert(actual_data.end(),
+                     cow_writer_.operations_[125].data.begin(),
+                     cow_writer_.operations_[125].data.end());
+  ASSERT_EQ(buf, actual_data);
+}
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/vabc_partition_writer.cc b/payload_consumer/vabc_partition_writer.cc
new file mode 100644
index 0000000..0843fff
--- /dev/null
+++ b/payload_consumer/vabc_partition_writer.cc
@@ -0,0 +1,175 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/vabc_partition_writer.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <libsnapshot/cow_writer.h>
+
+#include "update_engine/common/cow_operation_convert.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/extent_writer.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
+#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_consumer/partition_writer.h"
+#include "update_engine/payload_consumer/snapshot_extent_writer.h"
+
+namespace chromeos_update_engine {
+// Expected layout of COW file:
+// === Beginning of Cow Image ===
+// All Source Copy Operations
+// ========== Label 0 ==========
+// Operation 0 in PartitionUpdate
+// ========== Label 1 ==========
+// Operation 1 in PartitionUpdate
+// ========== label 2 ==========
+// Operation 2 in PartitionUpdate
+// ========== label 3 ==========
+// .
+// .
+// .
+
+// When resuming, pass |next_op_index_| as label to
+// |InitializeWithAppend|.
+// For example, suppose we finished writing SOURCE_COPY, and we finished writing
+// operation 2 completely. Update is suspended when we are half way through
+// operation 3.
+// |cnext_op_index_| would be 3, so we pass 3 as
+// label to |InitializeWithAppend|. The CowWriter will retain all data before
+// label 3, Which contains all operation 2's data, but none of operation 3's
+// data.
+
+bool VABCPartitionWriter::Init(const InstallPlan* install_plan,
+                               bool source_may_exist,
+                               size_t next_op_index) {
+  TEST_AND_RETURN_FALSE(install_plan != nullptr);
+  TEST_AND_RETURN_FALSE(
+      OpenSourcePartition(install_plan->source_slot, source_may_exist));
+  std::optional<std::string> source_path;
+  if (!install_part_.source_path.empty()) {
+    // TODO(zhangkelvin) Make |source_path| a std::optional<std::string>
+    source_path = install_part_.source_path;
+  }
+  cow_writer_ = dynamic_control_->OpenCowWriter(
+      install_part_.name, source_path, install_plan->is_resume);
+  TEST_AND_RETURN_FALSE(cow_writer_ != nullptr);
+
+  // ===== Resume case handling code goes here ====
+  // It is possible that the SOURCE_COPY are already written but
+  // |next_op_index_| is still 0. In this case we discard previously written
+  // SOURCE_COPY, and start over.
+  if (install_plan->is_resume && next_op_index > 0) {
+    LOG(INFO) << "Resuming update on partition `"
+              << partition_update_.partition_name() << "` op index "
+              << next_op_index;
+    TEST_AND_RETURN_FALSE(cow_writer_->InitializeAppend(next_op_index));
+    return true;
+  } else {
+    TEST_AND_RETURN_FALSE(cow_writer_->Initialize());
+  }
+
+  // ==============================================
+
+  // TODO(zhangkelvin) Rewrite this in C++20 coroutine once that's available.
+  auto converted = ConvertToCowOperations(partition_update_.operations(),
+                                          partition_update_.merge_operations());
+
+  WriteAllCowOps(block_size_, converted, cow_writer_.get(), source_fd_);
+  return true;
+}
+
+bool VABCPartitionWriter::WriteAllCowOps(
+    size_t block_size,
+    const std::vector<CowOperation>& converted,
+    android::snapshot::ICowWriter* cow_writer,
+    FileDescriptorPtr source_fd) {
+  std::vector<uint8_t> buffer(block_size);
+
+  for (const auto& cow_op : converted) {
+    switch (cow_op.op) {
+      case CowOperation::CowCopy:
+        if (cow_op.src_block == cow_op.dst_block) {
+          continue;
+        }
+        TEST_AND_RETURN_FALSE(
+            cow_writer->AddCopy(cow_op.dst_block, cow_op.src_block));
+        break;
+      case CowOperation::CowReplace:
+        ssize_t bytes_read = 0;
+        TEST_AND_RETURN_FALSE(utils::ReadAll(source_fd,
+                                             buffer.data(),
+                                             block_size,
+                                             cow_op.src_block * block_size,
+                                             &bytes_read));
+        if (bytes_read <= 0 || static_cast<size_t>(bytes_read) != block_size) {
+          LOG(ERROR) << "source_fd->Read failed: " << bytes_read;
+          return false;
+        }
+        TEST_AND_RETURN_FALSE(cow_writer->AddRawBlocks(
+            cow_op.dst_block, buffer.data(), block_size));
+        break;
+    }
+  }
+
+  return true;
+}
+
+std::unique_ptr<ExtentWriter> VABCPartitionWriter::CreateBaseExtentWriter() {
+  return std::make_unique<SnapshotExtentWriter>(cow_writer_.get());
+}
+
+[[nodiscard]] bool VABCPartitionWriter::PerformZeroOrDiscardOperation(
+    const InstallOperation& operation) {
+  for (const auto& extent : operation.dst_extents()) {
+    TEST_AND_RETURN_FALSE(
+        cow_writer_->AddZeroBlocks(extent.start_block(), extent.num_blocks()));
+  }
+  return true;
+}
+
+[[nodiscard]] bool VABCPartitionWriter::PerformSourceCopyOperation(
+    const InstallOperation& operation, ErrorCode* error) {
+  // TODO(zhangkelvin) Probably just ignore SOURCE_COPY? They should be taken
+  // care of during Init();
+  return true;
+}
+
+void VABCPartitionWriter::CheckpointUpdateProgress(size_t next_op_index) {
+  // No need to call fsync/sync, as CowWriter flushes after a label is added
+  // added.
+  // if cow_writer_ failed, that means Init() failed. This function shouldn't be
+  // called if Init() fails.
+  TEST_AND_RETURN(cow_writer_ != nullptr);
+  cow_writer_->AddLabel(next_op_index);
+}
+
+[[nodiscard]] bool VABCPartitionWriter::FinishedInstallOps() {
+  // Add a hardcoded magic label to indicate end of all install ops. This label
+  // is needed by filesystem verification, don't remove.
+  TEST_AND_RETURN_FALSE(cow_writer_ != nullptr);
+  return cow_writer_->AddLabel(kEndOfInstallLabel);
+}
+
+VABCPartitionWriter::~VABCPartitionWriter() {
+  if (cow_writer_) {
+    cow_writer_->Finalize();
+  }
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/vabc_partition_writer.h b/payload_consumer/vabc_partition_writer.h
new file mode 100644
index 0000000..7fb2a2c
--- /dev/null
+++ b/payload_consumer/vabc_partition_writer.h
@@ -0,0 +1,63 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_VABC_PARTITION_WRITER_H_
+#define UPDATE_ENGINE_VABC_PARTITION_WRITER_H_
+
+#include <memory>
+#include <vector>
+
+#include <libsnapshot/snapshot_writer.h>
+
+#include "update_engine/common/cow_operation_convert.h"
+#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_consumer/partition_writer.h"
+
+namespace chromeos_update_engine {
+class VABCPartitionWriter final : public PartitionWriter {
+ public:
+  using PartitionWriter::PartitionWriter;
+  [[nodiscard]] bool Init(const InstallPlan* install_plan,
+                          bool source_may_exist,
+                          size_t next_op_index) override;
+  ~VABCPartitionWriter() override;
+
+  [[nodiscard]] std::unique_ptr<ExtentWriter> CreateBaseExtentWriter() override;
+
+  // Only ZERO and SOURCE_COPY InstallOperations are treated special by VABC
+  // Partition Writer. These operations correspond to COW_ZERO and COW_COPY. All
+  // other operations just get converted to COW_REPLACE.
+  [[nodiscard]] bool PerformZeroOrDiscardOperation(
+      const InstallOperation& operation) override;
+  [[nodiscard]] bool PerformSourceCopyOperation(
+      const InstallOperation& operation, ErrorCode* error) override;
+
+  void CheckpointUpdateProgress(size_t next_op_index) override;
+
+  static bool WriteAllCowOps(size_t block_size,
+                             const std::vector<CowOperation>& converted,
+                             android::snapshot::ICowWriter* cow_writer,
+                             FileDescriptorPtr source_fd);
+
+  [[nodiscard]] bool FinishedInstallOps() override;
+
+ private:
+  std::unique_ptr<android::snapshot::ISnapshotWriter> cow_writer_;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/verity_writer_android.cc b/payload_consumer/verity_writer_android.cc
index d5437b6..e2fab7d 100644
--- a/payload_consumer/verity_writer_android.cc
+++ b/payload_consumer/verity_writer_android.cc
@@ -29,6 +29,8 @@
 }
 
 #include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/cached_file_descriptor.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
 
 namespace chromeos_update_engine {
 
@@ -41,9 +43,6 @@
 bool VerityWriterAndroid::Init(const InstallPlan::Partition& partition) {
   partition_ = &partition;
 
-  if (partition_->hash_tree_size != 0 || partition_->fec_size != 0) {
-    utils::SetBlockDeviceReadOnly(partition_->target_path, false);
-  }
   if (partition_->hash_tree_size != 0) {
     auto hash_function =
         HashTreeBuilder::HashFunction(partition_->hash_tree_algorithm);
@@ -65,58 +64,86 @@
       return false;
     }
   }
+  total_offset_ = 0;
   return true;
 }
 
-bool VerityWriterAndroid::Update(uint64_t offset,
+bool VerityWriterAndroid::Update(const uint64_t offset,
                                  const uint8_t* buffer,
                                  size_t size) {
+  if (offset != total_offset_) {
+    LOG(ERROR) << "Sequential read expected, expected to read at: "
+               << total_offset_ << " actual read occurs at: " << offset;
+    return false;
+  }
   if (partition_->hash_tree_size != 0) {
-    uint64_t hash_tree_data_end =
+    const uint64_t hash_tree_data_end =
         partition_->hash_tree_data_offset + partition_->hash_tree_data_size;
-    uint64_t start_offset = std::max(offset, partition_->hash_tree_data_offset);
-    uint64_t end_offset = std::min(offset + size, hash_tree_data_end);
+    const uint64_t start_offset =
+        std::max(offset, partition_->hash_tree_data_offset);
+    if (offset + size > hash_tree_data_end) {
+      LOG(WARNING)
+          << "Reading past hash_tree_data_end, something is probably "
+             "wrong, might cause incorrect hash of partitions. offset: "
+          << offset << " size: " << size
+          << " hash_tree_data_end: " << hash_tree_data_end;
+    }
+    const uint64_t end_offset = std::min(offset + size, hash_tree_data_end);
     if (start_offset < end_offset) {
       TEST_AND_RETURN_FALSE(hash_tree_builder_->Update(
           buffer + start_offset - offset, end_offset - start_offset));
 
       if (end_offset == hash_tree_data_end) {
-        // All hash tree data blocks has been hashed, write hash tree to disk.
-        int fd = HANDLE_EINTR(open(partition_->target_path.c_str(), O_WRONLY));
-        if (fd < 0) {
-          PLOG(ERROR) << "Failed to open " << partition_->target_path
-                      << " to write hash tree.";
-          return false;
-        }
-        ScopedFdCloser fd_closer(&fd);
-
-        LOG(INFO) << "Writing verity hash tree to " << partition_->target_path;
-        TEST_AND_RETURN_FALSE(hash_tree_builder_->BuildHashTree());
-        TEST_AND_RETURN_FALSE(hash_tree_builder_->WriteHashTreeToFd(
-            fd, partition_->hash_tree_offset));
-        hash_tree_builder_.reset();
+        LOG(INFO)
+            << "Read everything before hash tree. Ready to write hash tree.";
       }
     }
   }
+  total_offset_ += size;
+
+  return true;
+}
+
+bool VerityWriterAndroid::Finalize(FileDescriptorPtr read_fd,
+                                   FileDescriptorPtr write_fd) {
+  const auto hash_tree_data_end =
+      partition_->hash_tree_data_offset + partition_->hash_tree_data_size;
+  if (total_offset_ < hash_tree_data_end) {
+    LOG(ERROR) << "Read up to " << total_offset_
+               << " when we are expecting to read everything "
+                  "before "
+               << hash_tree_data_end;
+    return false;
+  }
+  // All hash tree data blocks has been hashed, write hash tree to disk.
+  LOG(INFO) << "Writing verity hash tree to " << partition_->target_path;
+  TEST_AND_RETURN_FALSE(hash_tree_builder_->BuildHashTree());
+  TEST_AND_RETURN_FALSE_ERRNO(
+      write_fd->Seek(partition_->hash_tree_offset, SEEK_SET));
+  auto success =
+      hash_tree_builder_->WriteHashTree([write_fd](auto data, auto size) {
+        return utils::WriteAll(write_fd, data, size);
+      });
+  // hashtree builder already prints error messages.
+  TEST_AND_RETURN_FALSE(success);
+  hash_tree_builder_.reset();
   if (partition_->fec_size != 0) {
-    uint64_t fec_data_end =
-        partition_->fec_data_offset + partition_->fec_data_size;
-    if (offset < fec_data_end && offset + size >= fec_data_end) {
-      LOG(INFO) << "Writing verity FEC to " << partition_->target_path;
-      TEST_AND_RETURN_FALSE(EncodeFEC(partition_->target_path,
-                                      partition_->fec_data_offset,
-                                      partition_->fec_data_size,
-                                      partition_->fec_offset,
-                                      partition_->fec_size,
-                                      partition_->fec_roots,
-                                      partition_->block_size,
-                                      false /* verify_mode */));
-    }
+    LOG(INFO) << "Writing verity FEC to " << partition_->target_path;
+    TEST_AND_RETURN_FALSE(EncodeFEC(read_fd,
+                                    write_fd,
+                                    partition_->fec_data_offset,
+                                    partition_->fec_data_size,
+                                    partition_->fec_offset,
+                                    partition_->fec_size,
+                                    partition_->fec_roots,
+                                    partition_->block_size,
+                                    false /* verify_mode */));
   }
   return true;
 }
 
-bool VerityWriterAndroid::EncodeFEC(const std::string& path,
+bool VerityWriterAndroid::EncodeFEC(FileDescriptorPtr read_fd,
+                                    FileDescriptorPtr write_fd,
                                     uint64_t data_offset,
                                     uint64_t data_size,
                                     uint64_t fec_offset,
@@ -135,13 +162,10 @@
       init_rs_char(FEC_PARAMS(fec_roots)), &free_rs_char);
   TEST_AND_RETURN_FALSE(rs_char != nullptr);
 
-  int fd = HANDLE_EINTR(open(path.c_str(), verify_mode ? O_RDONLY : O_RDWR));
-  if (fd < 0) {
-    PLOG(ERROR) << "Failed to open " << path << " to write FEC.";
-    return false;
-  }
-  ScopedFdCloser fd_closer(&fd);
-
+  // Cache at most 1MB of fec data, in VABC, we need to re-open fd if we
+  // perform a read() operation after write(). So reduce the number of writes
+  // can save unnecessary re-opens.
+  write_fd = std::make_shared<CachedFileDescriptor>(write_fd, 1 * (1 << 20));
   for (size_t i = 0; i < rounds; i++) {
     // Encodes |block_size| number of rs blocks each round so that we can read
     // one block each time instead of 1 byte to increase random read
@@ -154,13 +178,13 @@
       // Don't read past |data_size|, treat them as 0.
       if (offset < data_size) {
         ssize_t bytes_read = 0;
-        TEST_AND_RETURN_FALSE(utils::PReadAll(fd,
+        TEST_AND_RETURN_FALSE(utils::PReadAll(read_fd,
                                               buffer.data(),
                                               buffer.size(),
                                               data_offset + offset,
                                               &bytes_read));
-        TEST_AND_RETURN_FALSE(bytes_read ==
-                              static_cast<ssize_t>(buffer.size()));
+        TEST_AND_RETURN_FALSE(bytes_read >= 0);
+        TEST_AND_RETURN_FALSE(static_cast<size_t>(bytes_read) == buffer.size());
       }
       for (size_t k = 0; k < buffer.size(); k++) {
         rs_blocks[k * rs_n + j] = buffer[k];
@@ -179,17 +203,43 @@
       brillo::Blob fec_read(fec.size());
       ssize_t bytes_read = 0;
       TEST_AND_RETURN_FALSE(utils::PReadAll(
-          fd, fec_read.data(), fec_read.size(), fec_offset, &bytes_read));
-      TEST_AND_RETURN_FALSE(bytes_read ==
-                            static_cast<ssize_t>(fec_read.size()));
+          read_fd, fec_read.data(), fec_read.size(), fec_offset, &bytes_read));
+      TEST_AND_RETURN_FALSE(bytes_read >= 0);
+      TEST_AND_RETURN_FALSE(static_cast<size_t>(bytes_read) == fec_read.size());
       TEST_AND_RETURN_FALSE(fec == fec_read);
     } else {
-      TEST_AND_RETURN_FALSE(
-          utils::PWriteAll(fd, fec.data(), fec.size(), fec_offset));
+      CHECK(write_fd);
+      write_fd->Seek(fec_offset, SEEK_SET);
+      if (!utils::WriteAll(write_fd, fec.data(), fec.size())) {
+        PLOG(ERROR) << "EncodeFEC write() failed";
+        return false;
+      }
     }
     fec_offset += fec.size();
   }
-
+  write_fd->Flush();
   return true;
 }
+
+bool VerityWriterAndroid::EncodeFEC(const std::string& path,
+                                    uint64_t data_offset,
+                                    uint64_t data_size,
+                                    uint64_t fec_offset,
+                                    uint64_t fec_size,
+                                    uint32_t fec_roots,
+                                    uint32_t block_size,
+                                    bool verify_mode) {
+  FileDescriptorPtr fd(new EintrSafeFileDescriptor());
+  TEST_AND_RETURN_FALSE(
+      fd->Open(path.c_str(), verify_mode ? O_RDONLY : O_RDWR));
+  return EncodeFEC(fd,
+                   fd,
+                   data_offset,
+                   data_size,
+                   fec_offset,
+                   fec_size,
+                   fec_roots,
+                   block_size,
+                   verify_mode);
+}
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/verity_writer_android.h b/payload_consumer/verity_writer_android.h
index 05a5856..8339528 100644
--- a/payload_consumer/verity_writer_android.h
+++ b/payload_consumer/verity_writer_android.h
@@ -22,6 +22,7 @@
 
 #include <verity/hash_tree_builder.h>
 
+#include "payload_consumer/file_descriptor.h"
 #include "update_engine/payload_consumer/verity_writer_interface.h"
 
 namespace chromeos_update_engine {
@@ -31,8 +32,9 @@
   VerityWriterAndroid() = default;
   ~VerityWriterAndroid() override = default;
 
-  bool Init(const InstallPlan::Partition& partition) override;
+  bool Init(const InstallPlan::Partition& partition);
   bool Update(uint64_t offset, const uint8_t* buffer, size_t size) override;
+  bool Finalize(FileDescriptorPtr read_fd, FileDescriptorPtr write_fd) override;
 
   // Read [data_offset : data_offset + data_size) from |path| and encode FEC
   // data, if |verify_mode|, then compare the encoded FEC with the one in
@@ -40,6 +42,15 @@
   // in each Update() like hash tree, because for every rs block, its data are
   // spreaded across entire |data_size|, unless we can cache all data in
   // memory, we have to re-read them from disk.
+  static bool EncodeFEC(FileDescriptorPtr read_fd,
+                        FileDescriptorPtr write_fd,
+                        uint64_t data_offset,
+                        uint64_t data_size,
+                        uint64_t fec_offset,
+                        uint64_t fec_size,
+                        uint32_t fec_roots,
+                        uint32_t block_size,
+                        bool verify_mode);
   static bool EncodeFEC(const std::string& path,
                         uint64_t data_offset,
                         uint64_t data_size,
@@ -53,7 +64,7 @@
   const InstallPlan::Partition* partition_ = nullptr;
 
   std::unique_ptr<HashTreeBuilder> hash_tree_builder_;
-
+  uint64_t total_offset_ = 0;
   DISALLOW_COPY_AND_ASSIGN(VerityWriterAndroid);
 };
 
diff --git a/payload_consumer/verity_writer_android_unittest.cc b/payload_consumer/verity_writer_android_unittest.cc
index f943ce8..75da0ae 100644
--- a/payload_consumer/verity_writer_android_unittest.cc
+++ b/payload_consumer/verity_writer_android_unittest.cc
@@ -16,11 +16,14 @@
 
 #include "update_engine/payload_consumer/verity_writer_android.h"
 
+#include <fcntl.h>
+
 #include <brillo/secure_blob.h>
 #include <gtest/gtest.h>
 
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
 
 namespace chromeos_update_engine {
 
@@ -35,19 +38,23 @@
     partition_.hash_tree_size = 4096;
     partition_.hash_tree_algorithm = "sha1";
     partition_.fec_roots = 2;
+    partition_fd_ = std::make_shared<EintrSafeFileDescriptor>();
+    partition_fd_->Open(partition_.target_path.c_str(), O_RDWR);
   }
 
   VerityWriterAndroid verity_writer_;
   InstallPlan::Partition partition_;
-  test_utils::ScopedTempFile temp_file_;
+  FileDescriptorPtr partition_fd_;
+  ScopedTempFile temp_file_;
 };
 
 TEST_F(VerityWriterAndroidTest, SimpleTest) {
   brillo::Blob part_data(8192);
   test_utils::WriteFileVector(partition_.target_path, part_data);
   ASSERT_TRUE(verity_writer_.Init(partition_));
-  EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), 4096));
-  EXPECT_TRUE(verity_writer_.Update(4096, part_data.data() + 4096, 4096));
+  ASSERT_TRUE(verity_writer_.Update(0, part_data.data(), 4096));
+  ASSERT_TRUE(verity_writer_.Update(4096, part_data.data() + 4096, 4096));
+  ASSERT_TRUE(verity_writer_.Finalize(partition_fd_, partition_fd_));
   brillo::Blob actual_part;
   utils::ReadFile(partition_.target_path, &actual_part);
   // dd if=/dev/zero bs=4096 count=1 2>/dev/null | sha1sum | xxd -r -p |
@@ -56,7 +63,7 @@
                        0x1d, 0xf3, 0xbf, 0xb2, 0x6b, 0x4f, 0xb7,
                        0xcd, 0x95, 0xfb, 0x7b, 0xff, 0x1d};
   memcpy(part_data.data() + 4096, hash.data(), hash.size());
-  EXPECT_EQ(part_data, actual_part);
+  ASSERT_EQ(part_data, actual_part);
 }
 
 TEST_F(VerityWriterAndroidTest, NoOpTest) {
@@ -64,19 +71,28 @@
   partition_.hash_tree_size = 0;
   brillo::Blob part_data(4096);
   ASSERT_TRUE(verity_writer_.Init(partition_));
-  EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), part_data.size()));
-  EXPECT_TRUE(verity_writer_.Update(4096, part_data.data(), part_data.size()));
-  EXPECT_TRUE(verity_writer_.Update(8192, part_data.data(), part_data.size()));
+  ASSERT_TRUE(verity_writer_.Update(0, part_data.data(), part_data.size()));
+  ASSERT_TRUE(verity_writer_.Update(4096, part_data.data(), part_data.size()));
+  ASSERT_TRUE(verity_writer_.Update(8192, part_data.data(), part_data.size()));
+}
+
+TEST_F(VerityWriterAndroidTest, DiscontinuedRead) {
+  partition_.hash_tree_data_size = 8192;
+  partition_.hash_tree_size = 4096;
+  brillo::Blob part_data(4096);
+  ASSERT_TRUE(verity_writer_.Init(partition_));
+  ASSERT_TRUE(verity_writer_.Update(0, part_data.data(), part_data.size()));
+  ASSERT_FALSE(verity_writer_.Update(8192, part_data.data(), part_data.size()));
 }
 
 TEST_F(VerityWriterAndroidTest, InvalidHashAlgorithmTest) {
   partition_.hash_tree_algorithm = "sha123";
-  EXPECT_FALSE(verity_writer_.Init(partition_));
+  ASSERT_FALSE(verity_writer_.Init(partition_));
 }
 
 TEST_F(VerityWriterAndroidTest, WrongHashTreeSizeTest) {
   partition_.hash_tree_size = 8192;
-  EXPECT_FALSE(verity_writer_.Init(partition_));
+  ASSERT_FALSE(verity_writer_.Init(partition_));
 }
 
 TEST_F(VerityWriterAndroidTest, SHA256Test) {
@@ -84,8 +100,9 @@
   brillo::Blob part_data(8192);
   test_utils::WriteFileVector(partition_.target_path, part_data);
   ASSERT_TRUE(verity_writer_.Init(partition_));
-  EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), 4096));
-  EXPECT_TRUE(verity_writer_.Update(4096, part_data.data() + 4096, 4096));
+  ASSERT_TRUE(verity_writer_.Update(0, part_data.data(), 4096));
+  ASSERT_TRUE(verity_writer_.Update(4096, part_data.data() + 4096, 4096));
+  ASSERT_TRUE(verity_writer_.Finalize(partition_fd_, partition_fd_));
   brillo::Blob actual_part;
   utils::ReadFile(partition_.target_path, &actual_part);
   // dd if=/dev/zero bs=4096 count=1 2>/dev/null | sha256sum | xxd -r -p |
@@ -95,7 +112,33 @@
                        0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a,
                        0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89, 0x2c, 0xa7};
   memcpy(part_data.data() + 4096, hash.data(), hash.size());
-  EXPECT_EQ(part_data, actual_part);
+  ASSERT_EQ(part_data, actual_part);
+}
+
+TEST_F(VerityWriterAndroidTest, NonZeroOffsetSHA256Test) {
+  partition_.hash_tree_algorithm = "sha256";
+  partition_.hash_tree_data_offset = 100;
+  partition_.hash_tree_offset =
+      partition_.hash_tree_data_offset + partition_.hash_tree_data_size;
+  brillo::Blob part_data(8192 + partition_.hash_tree_data_offset);
+  test_utils::WriteFileVector(partition_.target_path, part_data);
+  ASSERT_TRUE(verity_writer_.Init(partition_));
+  ASSERT_TRUE(verity_writer_.Update(0, part_data.data(), 4096));
+  ASSERT_TRUE(verity_writer_.Update(4096, part_data.data() + 4096, 4096));
+  ASSERT_TRUE(verity_writer_.Update(
+      8192, part_data.data() + 8192, partition_.hash_tree_data_offset));
+  ASSERT_TRUE(verity_writer_.Finalize(partition_fd_, partition_fd_));
+  brillo::Blob actual_part;
+  utils::ReadFile(partition_.target_path, &actual_part);
+  // dd if=/dev/zero bs=4096 count=1 2>/dev/null | sha256sum | xxd -r -p |
+  //     hexdump -v -e '/1 "0x%02x, "'
+  brillo::Blob hash = {0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9,
+                       0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b, 0x02,
+                       0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a,
+                       0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89, 0x2c, 0xa7};
+  memcpy(
+      part_data.data() + partition_.hash_tree_offset, hash.data(), hash.size());
+  ASSERT_EQ(part_data, actual_part);
 }
 
 TEST_F(VerityWriterAndroidTest, FECTest) {
@@ -106,7 +149,8 @@
   brillo::Blob part_data(3 * 4096, 0x1);
   test_utils::WriteFileVector(partition_.target_path, part_data);
   ASSERT_TRUE(verity_writer_.Init(partition_));
-  EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), part_data.size()));
+  ASSERT_TRUE(verity_writer_.Update(0, part_data.data(), part_data.size()));
+  ASSERT_TRUE(verity_writer_.Finalize(partition_fd_, partition_fd_));
   brillo::Blob actual_part;
   utils::ReadFile(partition_.target_path, &actual_part);
   // Write FEC data.
@@ -114,7 +158,7 @@
     part_data[i] = 0x8e;
     part_data[i + 1] = 0x8f;
   }
-  EXPECT_EQ(part_data, actual_part);
+  ASSERT_EQ(part_data, actual_part);
 }
 
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/verity_writer_interface.h b/payload_consumer/verity_writer_interface.h
index a3ecef3..37ed605 100644
--- a/payload_consumer/verity_writer_interface.h
+++ b/payload_consumer/verity_writer_interface.h
@@ -22,6 +22,7 @@
 
 #include <base/macros.h>
 
+#include "payload_consumer/file_descriptor.h"
 #include "update_engine/payload_consumer/install_plan.h"
 
 namespace chromeos_update_engine {
@@ -37,6 +38,10 @@
   // blocks has passed.
   virtual bool Update(uint64_t offset, const uint8_t* buffer, size_t size) = 0;
 
+  // Write hash tree && FEC data to underlying fd, if they are present
+  virtual bool Finalize(FileDescriptorPtr read_fd,
+                        FileDescriptorPtr write_fd) = 0;
+
  protected:
   VerityWriterInterface() = default;
 
diff --git a/payload_consumer/verity_writer_stub.cc b/payload_consumer/verity_writer_stub.cc
index a0e2467..8bff076 100644
--- a/payload_consumer/verity_writer_stub.cc
+++ b/payload_consumer/verity_writer_stub.cc
@@ -36,4 +36,9 @@
   return true;
 }
 
+bool VerityWriterStub::Finalize(FileDescriptorPtr read_fd,
+                                FileDescriptorPtr write_fd) {
+  return true;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/verity_writer_stub.h b/payload_consumer/verity_writer_stub.h
index ea5e574..a20db03 100644
--- a/payload_consumer/verity_writer_stub.h
+++ b/payload_consumer/verity_writer_stub.h
@@ -28,6 +28,7 @@
 
   bool Init(const InstallPlan::Partition& partition) override;
   bool Update(uint64_t offset, const uint8_t* buffer, size_t size) override;
+  bool Finalize(FileDescriptorPtr read_fd, FileDescriptorPtr write_fd) override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(VerityWriterStub);
diff --git a/payload_consumer/xz_extent_writer.cc b/payload_consumer/xz_extent_writer.cc
index a5b939d..a648351 100644
--- a/payload_consumer/xz_extent_writer.cc
+++ b/payload_consumer/xz_extent_writer.cc
@@ -57,12 +57,11 @@
   TEST_AND_RETURN(input_buffer_.empty());
 }
 
-bool XzExtentWriter::Init(FileDescriptorPtr fd,
-                          const RepeatedPtrField<Extent>& extents,
+bool XzExtentWriter::Init(const RepeatedPtrField<Extent>& extents,
                           uint32_t block_size) {
   stream_ = xz_dec_init(XZ_DYNALLOC, kXzMaxDictSize);
   TEST_AND_RETURN_FALSE(stream_ != nullptr);
-  return underlying_writer_->Init(fd, extents, block_size);
+  return underlying_writer_->Init(extents, block_size);
 }
 
 bool XzExtentWriter::Write(const void* bytes, size_t count) {
diff --git a/payload_consumer/xz_extent_writer.h b/payload_consumer/xz_extent_writer.h
index e022274..70338f2 100644
--- a/payload_consumer/xz_extent_writer.h
+++ b/payload_consumer/xz_extent_writer.h
@@ -39,8 +39,7 @@
       : underlying_writer_(std::move(underlying_writer)) {}
   ~XzExtentWriter() override;
 
-  bool Init(FileDescriptorPtr fd,
-            const google::protobuf::RepeatedPtrField<Extent>& extents,
+  bool Init(const google::protobuf::RepeatedPtrField<Extent>& extents,
             uint32_t block_size) override;
   bool Write(const void* bytes, size_t count) override;
 
diff --git a/payload_consumer/xz_extent_writer_unittest.cc b/payload_consumer/xz_extent_writer_unittest.cc
index 34980a9..5269dbc 100644
--- a/payload_consumer/xz_extent_writer_unittest.cc
+++ b/payload_consumer/xz_extent_writer_unittest.cc
@@ -87,7 +87,7 @@
   }
 
   void WriteAll(const brillo::Blob& compressed) {
-    EXPECT_TRUE(xz_writer_->Init(fd_, {}, 1024));
+    EXPECT_TRUE(xz_writer_->Init({}, 1024));
     EXPECT_TRUE(xz_writer_->Write(compressed.data(), compressed.size()));
 
     EXPECT_TRUE(fake_extent_writer_->InitCalled());
@@ -130,7 +130,7 @@
 }
 
 TEST_F(XzExtentWriterTest, GarbageDataRejected) {
-  EXPECT_TRUE(xz_writer_->Init(fd_, {}, 1024));
+  EXPECT_TRUE(xz_writer_->Init({}, 1024));
   // The sample_data_ is an uncompressed string.
   EXPECT_FALSE(xz_writer_->Write(sample_data_.data(), sample_data_.size()));
 }
@@ -138,7 +138,7 @@
 TEST_F(XzExtentWriterTest, PartialDataIsKept) {
   brillo::Blob compressed(std::begin(kCompressed30KiBofA),
                           std::end(kCompressed30KiBofA));
-  EXPECT_TRUE(xz_writer_->Init(fd_, {}, 1024));
+  EXPECT_TRUE(xz_writer_->Init({}, 1024));
   for (uint8_t byte : compressed) {
     EXPECT_TRUE(xz_writer_->Write(&byte, 1));
   }
diff --git a/payload_generator/ab_generator_unittest.cc b/payload_generator/ab_generator_unittest.cc
index 270657a..84eeb77 100644
--- a/payload_generator/ab_generator_unittest.cc
+++ b/payload_generator/ab_generator_unittest.cc
@@ -30,10 +30,10 @@
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/annotated_operation.h"
-#include "update_engine/payload_generator/bzip.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/extent_ranges.h"
 #include "update_engine/payload_generator/extent_utils.h"
+#include "update_engine/payload_generator/xz.h"
 
 using std::string;
 using std::vector;
@@ -48,8 +48,8 @@
   return ext.start_block() == start_block && ext.num_blocks() == num_blocks;
 }
 
-// Tests splitting of a REPLACE/REPLACE_BZ operation.
-void TestSplitReplaceOrReplaceBzOperation(InstallOperation::Type orig_type,
+// Tests splitting of a REPLACE/REPLACE_XZ operation.
+void TestSplitReplaceOrReplaceXzOperation(InstallOperation::Type orig_type,
                                           bool compressible) {
   const size_t op_ex1_start_block = 2;
   const size_t op_ex1_num_blocks = 2;
@@ -70,8 +70,7 @@
       part_data.push_back(dis(gen));
   }
   ASSERT_EQ(part_size, part_data.size());
-  test_utils::ScopedTempFile part_file(
-      "SplitReplaceOrReplaceBzTest_part.XXXXXX");
+  ScopedTempFile part_file("SplitReplaceOrReplaceXzTest_part.XXXXXX");
   ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
 
   // Create original operation and blob data.
@@ -97,7 +96,7 @@
   if (orig_type == InstallOperation::REPLACE) {
     op_blob = op_data;
   } else {
-    ASSERT_TRUE(BzipCompress(op_data, &op_blob));
+    ASSERT_TRUE(XzCompress(op_data, &op_blob));
   }
   op.set_data_offset(0);
   op.set_data_length(op_blob.size());
@@ -107,8 +106,7 @@
   aop.name = "SplitTestOp";
 
   // Create the data file.
-  test_utils::ScopedTempFile data_file(
-      "SplitReplaceOrReplaceBzTest_data.XXXXXX");
+  ScopedTempFile data_file("SplitReplaceOrReplaceXzTest_data.XXXXXX");
   EXPECT_TRUE(test_utils::WriteFileVector(data_file.path(), op_blob));
   int data_fd = open(data_file.path().c_str(), O_RDWR, 000);
   EXPECT_GE(data_fd, 0);
@@ -118,14 +116,14 @@
 
   // Split the operation.
   vector<AnnotatedOperation> result_ops;
-  PayloadVersion version(kChromeOSMajorPayloadVersion,
+  PayloadVersion version(kBrilloMajorPayloadVersion,
                          kSourceMinorPayloadVersion);
   ASSERT_TRUE(ABGenerator::SplitAReplaceOp(
       version, aop, part_file.path(), &result_ops, &blob_file));
 
   // Check the result.
   InstallOperation::Type expected_type =
-      compressible ? InstallOperation::REPLACE_BZ : InstallOperation::REPLACE;
+      compressible ? InstallOperation::REPLACE_XZ : InstallOperation::REPLACE;
 
   ASSERT_EQ(2U, result_ops.size());
 
@@ -143,7 +141,7 @@
       part_data.begin() + op_ex1_offset + op_ex1_size);
   brillo::Blob first_expected_blob;
   if (compressible) {
-    ASSERT_TRUE(BzipCompress(first_expected_data, &first_expected_blob));
+    ASSERT_TRUE(XzCompress(first_expected_data, &first_expected_blob));
   } else {
     first_expected_blob = first_expected_data;
   }
@@ -173,7 +171,7 @@
       part_data.begin() + op_ex2_offset + op_ex2_size);
   brillo::Blob second_expected_blob;
   if (compressible) {
-    ASSERT_TRUE(BzipCompress(second_expected_data, &second_expected_blob));
+    ASSERT_TRUE(XzCompress(second_expected_data, &second_expected_blob));
   } else {
     second_expected_blob = second_expected_data;
   }
@@ -199,8 +197,8 @@
   }
 }
 
-// Tests merging of REPLACE/REPLACE_BZ operations.
-void TestMergeReplaceOrReplaceBzOperations(InstallOperation::Type orig_type,
+// Tests merging of REPLACE/REPLACE_XZ operations.
+void TestMergeReplaceOrReplaceXzOperations(InstallOperation::Type orig_type,
                                            bool compressible) {
   const size_t first_op_num_blocks = 1;
   const size_t second_op_num_blocks = 2;
@@ -220,8 +218,7 @@
       part_data.push_back(dis(gen));
   }
   ASSERT_EQ(part_size, part_data.size());
-  test_utils::ScopedTempFile part_file(
-      "MergeReplaceOrReplaceBzTest_part.XXXXXX");
+  ScopedTempFile part_file("MergeReplaceOrReplaceXzTest_part.XXXXXX");
   ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
 
   // Create original operations and blob data.
@@ -239,7 +236,7 @@
   if (orig_type == InstallOperation::REPLACE) {
     first_op_blob = first_op_data;
   } else {
-    ASSERT_TRUE(BzipCompress(first_op_data, &first_op_blob));
+    ASSERT_TRUE(XzCompress(first_op_data, &first_op_blob));
   }
   first_op.set_data_offset(0);
   first_op.set_data_length(first_op_blob.size());
@@ -259,7 +256,7 @@
   if (orig_type == InstallOperation::REPLACE) {
     second_op_blob = second_op_data;
   } else {
-    ASSERT_TRUE(BzipCompress(second_op_data, &second_op_blob));
+    ASSERT_TRUE(XzCompress(second_op_data, &second_op_blob));
   }
   second_op.set_data_offset(first_op_blob.size());
   second_op.set_data_length(second_op_blob.size());
@@ -271,8 +268,7 @@
   aops.push_back(second_aop);
 
   // Create the data file.
-  test_utils::ScopedTempFile data_file(
-      "MergeReplaceOrReplaceBzTest_data.XXXXXX");
+  ScopedTempFile data_file("MergeReplaceOrReplaceXzTest_data.XXXXXX");
   EXPECT_TRUE(test_utils::WriteFileVector(data_file.path(), blob_data));
   int data_fd = open(data_file.path().c_str(), O_RDWR, 000);
   EXPECT_GE(data_fd, 0);
@@ -281,14 +277,14 @@
   BlobFileWriter blob_file(data_fd, &data_file_size);
 
   // Merge the operations.
-  PayloadVersion version(kChromeOSMajorPayloadVersion,
+  PayloadVersion version(kBrilloMajorPayloadVersion,
                          kSourceMinorPayloadVersion);
   EXPECT_TRUE(ABGenerator::MergeOperations(
       &aops, version, 5, part_file.path(), &blob_file));
 
   // Check the result.
   InstallOperation::Type expected_op_type =
-      compressible ? InstallOperation::REPLACE_BZ : InstallOperation::REPLACE;
+      compressible ? InstallOperation::REPLACE_XZ : InstallOperation::REPLACE;
   EXPECT_EQ(1U, aops.size());
   InstallOperation new_op = aops[0].op;
   EXPECT_EQ(expected_op_type, new_op.type());
@@ -303,7 +299,7 @@
                              part_data.begin() + total_op_size);
   brillo::Blob expected_blob;
   if (compressible) {
-    ASSERT_TRUE(BzipCompress(expected_data, &expected_blob));
+    ASSERT_TRUE(XzCompress(expected_data, &expected_blob));
   } else {
     expected_blob = expected_data;
   }
@@ -384,19 +380,19 @@
 }
 
 TEST_F(ABGeneratorTest, SplitReplaceTest) {
-  TestSplitReplaceOrReplaceBzOperation(InstallOperation::REPLACE, false);
+  TestSplitReplaceOrReplaceXzOperation(InstallOperation::REPLACE, false);
 }
 
-TEST_F(ABGeneratorTest, SplitReplaceIntoReplaceBzTest) {
-  TestSplitReplaceOrReplaceBzOperation(InstallOperation::REPLACE, true);
+TEST_F(ABGeneratorTest, SplitReplaceIntoReplaceXzTest) {
+  TestSplitReplaceOrReplaceXzOperation(InstallOperation::REPLACE, true);
 }
 
-TEST_F(ABGeneratorTest, SplitReplaceBzTest) {
-  TestSplitReplaceOrReplaceBzOperation(InstallOperation::REPLACE_BZ, true);
+TEST_F(ABGeneratorTest, SplitReplaceXzTest) {
+  TestSplitReplaceOrReplaceXzOperation(InstallOperation::REPLACE_XZ, true);
 }
 
-TEST_F(ABGeneratorTest, SplitReplaceBzIntoReplaceTest) {
-  TestSplitReplaceOrReplaceBzOperation(InstallOperation::REPLACE_BZ, false);
+TEST_F(ABGeneratorTest, SplitReplaceXzIntoReplaceTest) {
+  TestSplitReplaceOrReplaceXzOperation(InstallOperation::REPLACE_XZ, false);
 }
 
 TEST_F(ABGeneratorTest, SortOperationsByDestinationTest) {
@@ -464,7 +460,7 @@
   aops.push_back(third_aop);
 
   BlobFileWriter blob_file(0, nullptr);
-  PayloadVersion version(kChromeOSMajorPayloadVersion,
+  PayloadVersion version(kBrilloMajorPayloadVersion,
                          kSourceMinorPayloadVersion);
   EXPECT_TRUE(ABGenerator::MergeOperations(&aops, version, 5, "", &blob_file));
 
@@ -484,19 +480,19 @@
 }
 
 TEST_F(ABGeneratorTest, MergeReplaceOperationsTest) {
-  TestMergeReplaceOrReplaceBzOperations(InstallOperation::REPLACE, false);
+  TestMergeReplaceOrReplaceXzOperations(InstallOperation::REPLACE, false);
 }
 
-TEST_F(ABGeneratorTest, MergeReplaceOperationsToReplaceBzTest) {
-  TestMergeReplaceOrReplaceBzOperations(InstallOperation::REPLACE, true);
+TEST_F(ABGeneratorTest, MergeReplaceOperationsToReplaceXzTest) {
+  TestMergeReplaceOrReplaceXzOperations(InstallOperation::REPLACE, true);
 }
 
-TEST_F(ABGeneratorTest, MergeReplaceBzOperationsTest) {
-  TestMergeReplaceOrReplaceBzOperations(InstallOperation::REPLACE_BZ, true);
+TEST_F(ABGeneratorTest, MergeReplaceXzOperationsTest) {
+  TestMergeReplaceOrReplaceXzOperations(InstallOperation::REPLACE_XZ, true);
 }
 
-TEST_F(ABGeneratorTest, MergeReplaceBzOperationsToReplaceTest) {
-  TestMergeReplaceOrReplaceBzOperations(InstallOperation::REPLACE_BZ, false);
+TEST_F(ABGeneratorTest, MergeReplaceXzOperationsToReplaceTest) {
+  TestMergeReplaceOrReplaceXzOperations(InstallOperation::REPLACE_XZ, false);
 }
 
 TEST_F(ABGeneratorTest, NoMergeOperationsTest) {
@@ -537,7 +533,7 @@
   aops.push_back(fourth_aop);
 
   BlobFileWriter blob_file(0, nullptr);
-  PayloadVersion version(kChromeOSMajorPayloadVersion,
+  PayloadVersion version(kBrilloMajorPayloadVersion,
                          kSourceMinorPayloadVersion);
   EXPECT_TRUE(ABGenerator::MergeOperations(&aops, version, 4, "", &blob_file));
 
@@ -561,7 +557,7 @@
   second_aop.op = second_op;
   aops.push_back(second_aop);
 
-  test_utils::ScopedTempFile src_part_file("AddSourceHashTest_src_part.XXXXXX");
+  ScopedTempFile src_part_file("AddSourceHashTest_src_part.XXXXXX");
   brillo::Blob src_data(kBlockSize);
   test_utils::FillWithData(&src_data);
   ASSERT_TRUE(test_utils::WriteFileVector(src_part_file.path(), src_data));
diff --git a/payload_generator/blob_file_writer.cc b/payload_generator/blob_file_writer.cc
index 7cdeb35..a1afe87 100644
--- a/payload_generator/blob_file_writer.cc
+++ b/payload_generator/blob_file_writer.cc
@@ -38,9 +38,9 @@
   return result;
 }
 
-void BlobFileWriter::SetTotalBlobs(size_t total_blobs) {
-  total_blobs_ = total_blobs;
-  stored_blobs_ = 0;
+void BlobFileWriter::IncTotalBlobs(size_t increment) {
+  base::AutoLock auto_lock(blob_mutex_);
+  total_blobs_ += increment;
 }
 
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/blob_file_writer.h b/payload_generator/blob_file_writer.h
index 48553be..bdd4c08 100644
--- a/payload_generator/blob_file_writer.h
+++ b/payload_generator/blob_file_writer.h
@@ -35,10 +35,8 @@
   // was stored, or -1 in case of failure.
   off_t StoreBlob(const brillo::Blob& blob);
 
-  // The number of |total_blobs| is the number of blobs that will be stored but
-  // is only used for logging purposes. If not set or set to 0, logging will be
-  // skipped. This function will also reset the number of stored blobs to 0.
-  void SetTotalBlobs(size_t total_blobs);
+  // Increase |total_blobs| by |increment|. Thread safe.
+  void IncTotalBlobs(size_t increment);
 
  private:
   size_t total_blobs_{0};
diff --git a/payload_generator/blob_file_writer_unittest.cc b/payload_generator/blob_file_writer_unittest.cc
index 487bc73..f4dcafb 100644
--- a/payload_generator/blob_file_writer_unittest.cc
+++ b/payload_generator/blob_file_writer_unittest.cc
@@ -31,24 +31,21 @@
 class BlobFileWriterTest : public ::testing::Test {};
 
 TEST(BlobFileWriterTest, SimpleTest) {
-  string blob_path;
-  int blob_fd;
-  EXPECT_TRUE(
-      utils::MakeTempFile("BlobFileWriterTest.XXXXXX", &blob_path, &blob_fd));
+  ScopedTempFile blob_file("BlobFileWriterTest.XXXXXX", true);
   off_t blob_file_size = 0;
-  BlobFileWriter blob_file(blob_fd, &blob_file_size);
+  BlobFileWriter blob_file_writer(blob_file.fd(), &blob_file_size);
 
-  off_t blob_size = 1024;
-  brillo::Blob blob(blob_size);
+  const off_t kBlobSize = 1024;
+  brillo::Blob blob(kBlobSize);
   FillWithData(&blob);
-  EXPECT_EQ(0, blob_file.StoreBlob(blob));
-  EXPECT_EQ(blob_size, blob_file.StoreBlob(blob));
+  EXPECT_EQ(0, blob_file_writer.StoreBlob(blob));
+  EXPECT_EQ(kBlobSize, blob_file_writer.StoreBlob(blob));
 
-  brillo::Blob stored_blob(blob_size);
+  brillo::Blob stored_blob(kBlobSize);
   ssize_t bytes_read;
-  ASSERT_TRUE(
-      utils::PReadAll(blob_fd, stored_blob.data(), blob_size, 0, &bytes_read));
-  EXPECT_EQ(bytes_read, blob_size);
+  ASSERT_TRUE(utils::PReadAll(
+      blob_file.fd(), stored_blob.data(), kBlobSize, 0, &bytes_read));
+  EXPECT_EQ(bytes_read, kBlobSize);
   EXPECT_EQ(blob, stored_blob);
 }
 
diff --git a/payload_generator/block_mapping_unittest.cc b/payload_generator/block_mapping_unittest.cc
index 9b9b4f1..017548a 100644
--- a/payload_generator/block_mapping_unittest.cc
+++ b/payload_generator/block_mapping_unittest.cc
@@ -36,8 +36,8 @@
 class BlockMappingTest : public ::testing::Test {
  protected:
   // Old new partition files used in testing.
-  test_utils::ScopedTempFile old_part_{"BlockMappingTest_old.XXXXXX"};
-  test_utils::ScopedTempFile new_part_{"BlockMappingTest_new.XXXXXX"};
+  ScopedTempFile old_part_{"BlockMappingTest_old.XXXXXX"};
+  ScopedTempFile new_part_{"BlockMappingTest_new.XXXXXX"};
 
   size_t block_size_{1024};
   BlockMapping bm_{block_size_};  // BlockMapping under test.
diff --git a/payload_generator/boot_img_filesystem_unittest.cc b/payload_generator/boot_img_filesystem_unittest.cc
index 0b115e0..7805156 100644
--- a/payload_generator/boot_img_filesystem_unittest.cc
+++ b/payload_generator/boot_img_filesystem_unittest.cc
@@ -63,7 +63,7 @@
     return boot_img;
   }
 
-  test_utils::ScopedTempFile boot_file_;
+  ScopedTempFile boot_file_;
 };
 
 TEST_F(BootImgFilesystemTest, SimpleTest) {
diff --git a/payload_generator/cow_size_estimator.cc b/payload_generator/cow_size_estimator.cc
new file mode 100644
index 0000000..01e9965
--- /dev/null
+++ b/payload_generator/cow_size_estimator.cc
@@ -0,0 +1,167 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/cow_size_estimator.h"
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <android-base/unique_fd.h>
+#include <libsnapshot/cow_writer.h>
+
+#include "update_engine/common/cow_operation_convert.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+using android::snapshot::CowWriter;
+
+namespace {
+bool PerformReplaceOp(const InstallOperation& op,
+                      CowWriter* writer,
+                      FileDescriptorPtr target_fd,
+                      size_t block_size) {
+  std::vector<unsigned char> buffer;
+  for (const auto& extent : op.dst_extents()) {
+    buffer.resize(extent.num_blocks() * block_size);
+    // No need to read from payload.bin then decompress, just read from target
+    // directly.
+    ssize_t bytes_read = 0;
+    auto success = utils::ReadAll(target_fd,
+                                  buffer.data(),
+                                  buffer.size(),
+                                  extent.start_block() * block_size,
+                                  &bytes_read);
+    TEST_AND_RETURN_FALSE(success);
+    CHECK_EQ(static_cast<size_t>(bytes_read), buffer.size());
+    TEST_AND_RETURN_FALSE(writer->AddRawBlocks(
+        extent.start_block(), buffer.data(), buffer.size()));
+  }
+  return true;
+}
+
+bool PerformZeroOp(const InstallOperation& op,
+                   CowWriter* writer,
+                   size_t block_size) {
+  for (const auto& extent : op.dst_extents()) {
+    TEST_AND_RETURN_FALSE(
+        writer->AddZeroBlocks(extent.start_block(), extent.num_blocks()));
+  }
+  return true;
+}
+
+bool WriteAllCowOps(size_t block_size,
+                    const std::vector<CowOperation>& converted,
+                    android::snapshot::ICowWriter* cow_writer,
+                    FileDescriptorPtr target_fd) {
+  std::vector<uint8_t> buffer(block_size);
+
+  for (const auto& cow_op : converted) {
+    switch (cow_op.op) {
+      case CowOperation::CowCopy:
+        if (cow_op.src_block == cow_op.dst_block) {
+          continue;
+        }
+        TEST_AND_RETURN_FALSE(
+            cow_writer->AddCopy(cow_op.dst_block, cow_op.src_block));
+        break;
+      case CowOperation::CowReplace:
+        ssize_t bytes_read = 0;
+        TEST_AND_RETURN_FALSE(chromeos_update_engine::utils::ReadAll(
+            target_fd,
+            buffer.data(),
+            block_size,
+            cow_op.dst_block * block_size,
+            &bytes_read));
+        if (bytes_read <= 0 || static_cast<size_t>(bytes_read) != block_size) {
+          LOG(ERROR) << "source_fd->Read failed: " << bytes_read;
+          return false;
+        }
+        TEST_AND_RETURN_FALSE(cow_writer->AddRawBlocks(
+            cow_op.dst_block, buffer.data(), block_size));
+        break;
+    }
+  }
+
+  return true;
+}
+}  // namespace
+
+size_t EstimateCowSize(
+    FileDescriptorPtr target_fd,
+    const google::protobuf::RepeatedPtrField<InstallOperation>& operations,
+    const google::protobuf::RepeatedPtrField<CowMergeOperation>&
+        merge_operations,
+    size_t block_size,
+    std::string compression) {
+  android::snapshot::CowWriter cow_writer{
+      {.block_size = static_cast<uint32_t>(block_size),
+       .compression = std::move(compression)}};
+  // CowWriter treats -1 as special value, will discard all the data but still
+  // reports Cow size. Good for estimation purposes
+  cow_writer.Initialize(android::base::borrowed_fd{-1});
+  CHECK(CowDryRun(
+      target_fd, operations, merge_operations, block_size, &cow_writer));
+  CHECK(cow_writer.Finalize());
+  return cow_writer.GetCowSize();
+}
+
+bool CowDryRun(
+    FileDescriptorPtr target_fd,
+    const google::protobuf::RepeatedPtrField<InstallOperation>& operations,
+    const google::protobuf::RepeatedPtrField<CowMergeOperation>&
+        merge_operations,
+    size_t block_size,
+    android::snapshot::CowWriter* cow_writer) {
+  const auto converted = ConvertToCowOperations(operations, merge_operations);
+  WriteAllCowOps(block_size, converted, cow_writer, target_fd);
+  cow_writer->AddLabel(0);
+  for (const auto& op : operations) {
+    switch (op.type()) {
+      case InstallOperation::REPLACE:
+      case InstallOperation::REPLACE_BZ:
+      case InstallOperation::REPLACE_XZ:
+        TEST_AND_RETURN_FALSE(
+            PerformReplaceOp(op, cow_writer, target_fd, block_size));
+        break;
+      case InstallOperation::ZERO:
+      case InstallOperation::DISCARD:
+        TEST_AND_RETURN_FALSE(PerformZeroOp(op, cow_writer, block_size));
+        break;
+      case InstallOperation::SOURCE_COPY:
+      case InstallOperation::MOVE:
+        // Already handeled by WriteAllCowOps,
+        break;
+      case InstallOperation::SOURCE_BSDIFF:
+      case InstallOperation::BROTLI_BSDIFF:
+      case InstallOperation::PUFFDIFF:
+      case InstallOperation::BSDIFF:
+        // We might do something special by adding CowBsdiff to CowWriter.
+        // For now proceed the same way as normal REPLACE operation.
+        TEST_AND_RETURN_FALSE(
+            PerformReplaceOp(op, cow_writer, target_fd, block_size));
+        break;
+    }
+    // Arbitrary label number, we won't be resuming use these labels here.
+    // They are emitted just to keep size estimates accurate. As update_engine
+    // emits 1 label for every op.
+    cow_writer->AddLabel(2);
+  }
+  // TODO(zhangkelvin) Take FEC extents into account once VABC stabilizes
+  return true;
+}
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/cow_size_estimator.h b/payload_generator/cow_size_estimator.h
new file mode 100644
index 0000000..850c890
--- /dev/null
+++ b/payload_generator/cow_size_estimator.h
@@ -0,0 +1,47 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include <cstddef>
+#include <string>
+
+#include <libsnapshot/cow_writer.h>
+#include <update_engine/update_metadata.pb.h>
+
+#include "update_engine/payload_consumer/file_descriptor.h"
+
+namespace chromeos_update_engine {
+// Given file descriptor to the target image, and list of
+// operations, estimate the size of COW image if the operations are applied on
+// Virtual AB Compression enabled device. This is intended to be used by update
+// generators to put an estimate cow size in OTA payload. When installing an OTA
+// update, libsnapshot will take this estimate as a hint to allocate spaces.
+size_t EstimateCowSize(
+    FileDescriptorPtr target_fd,
+    const google::protobuf::RepeatedPtrField<InstallOperation>& operations,
+    const google::protobuf::RepeatedPtrField<CowMergeOperation>&
+        merge_operations,
+    size_t block_size,
+    std::string compression);
+
+// Convert InstallOps to CowOps and apply the converted cow op to |cow_writer|
+bool CowDryRun(
+    FileDescriptorPtr target_fd,
+    const google::protobuf::RepeatedPtrField<InstallOperation>& operations,
+    const google::protobuf::RepeatedPtrField<CowMergeOperation>&
+        merge_operations,
+    size_t block_size,
+    android::snapshot::CowWriter* cow_writer);
+
+}  // namespace chromeos_update_engine
diff --git a/metrics_reporter_stub.cc b/payload_generator/cow_size_estimator_stub.cc
similarity index 60%
copy from metrics_reporter_stub.cc
copy to payload_generator/cow_size_estimator_stub.cc
index 81664a5..9d94d63 100644
--- a/metrics_reporter_stub.cc
+++ b/payload_generator/cow_size_estimator_stub.cc
@@ -1,5 +1,5 @@
 //
-// Copyright (C) 2017 The Android Open Source Project
+// Copyright (C) 2020 The Android Open Source Project
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -14,18 +14,18 @@
 // limitations under the License.
 //
 
-#include "update_engine/metrics_reporter_stub.h"
-
-#include <memory>
+#include "update_engine/payload_generator/cow_size_estimator.h"
 
 namespace chromeos_update_engine {
 
-namespace metrics {
-
-std::unique_ptr<MetricsReporterInterface> CreateMetricsReporter() {
-  return std::make_unique<MetricsReporterStub>();
+size_t EstimateCowSize(
+    FileDescriptorPtr source_fd,
+    FileDescriptorPtr target_fd,
+    const google::protobuf::RepeatedPtrField<InstallOperation>& operations,
+    const google::protobuf::RepeatedPtrField<CowMergeOperation>&
+        merge_operations,
+    size_t block_size) {
+  return 0;
 }
 
-}  // namespace metrics
-
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/cycle_breaker.cc b/payload_generator/cycle_breaker.cc
deleted file mode 100644
index d6eeed2..0000000
--- a/payload_generator/cycle_breaker.cc
+++ /dev/null
@@ -1,218 +0,0 @@
-//
-// Copyright (C) 2012 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/cycle_breaker.h"
-
-#include <inttypes.h>
-
-#include <limits>
-#include <set>
-#include <string>
-#include <utility>
-
-#include <base/stl_util.h>
-#include <base/strings/string_util.h>
-#include <base/strings/stringprintf.h>
-
-#include "update_engine/payload_generator/graph_utils.h"
-#include "update_engine/payload_generator/tarjan.h"
-
-using std::make_pair;
-using std::set;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-// This is the outer function from the original paper.
-void CycleBreaker::BreakCycles(const Graph& graph, set<Edge>* out_cut_edges) {
-  cut_edges_.clear();
-
-  // Make a copy, which we will modify by removing edges. Thus, in each
-  // iteration subgraph_ is the current subgraph or the original with
-  // vertices we desire. This variable was "A_K" in the original paper.
-  subgraph_ = graph;
-
-  // The paper calls for the "adjacency structure (i.e., graph) of
-  // strong (-ly connected) component K with least vertex in subgraph
-  // induced by {s, s + 1, ..., n}".
-  // We arbitrarily order each vertex by its index in the graph. Thus,
-  // each iteration, we are looking at the subgraph {s, s + 1, ..., n}
-  // and looking for the strongly connected component with vertex s.
-
-  TarjanAlgorithm tarjan;
-  skipped_ops_ = 0;
-
-  for (Graph::size_type i = 0; i < subgraph_.size(); i++) {
-    InstallOperation::Type op_type = graph[i].aop.op.type();
-    if (op_type == InstallOperation::REPLACE ||
-        op_type == InstallOperation::REPLACE_BZ) {
-      skipped_ops_++;
-      continue;
-    }
-
-    if (i > 0) {
-      // Erase node (i - 1) from subgraph_. First, erase what it points to
-      subgraph_[i - 1].out_edges.clear();
-      // Now, erase any pointers to node (i - 1)
-      for (Graph::size_type j = i; j < subgraph_.size(); j++) {
-        subgraph_[j].out_edges.erase(i - 1);
-      }
-    }
-
-    // Calculate SCC (strongly connected component) with vertex i.
-    vector<Vertex::Index> component_indexes;
-    tarjan.Execute(i, &subgraph_, &component_indexes);
-
-    // Set subgraph edges for the components in the SCC.
-    for (vector<Vertex::Index>::iterator it = component_indexes.begin();
-         it != component_indexes.end();
-         ++it) {
-      subgraph_[*it].subgraph_edges.clear();
-      for (vector<Vertex::Index>::iterator jt = component_indexes.begin();
-           jt != component_indexes.end();
-           ++jt) {
-        // If there's a link from *it -> *jt in the graph,
-        // add a subgraph_ edge
-        if (base::ContainsKey(subgraph_[*it].out_edges, *jt))
-          subgraph_[*it].subgraph_edges.insert(*jt);
-      }
-    }
-
-    current_vertex_ = i;
-    blocked_.clear();
-    blocked_.resize(subgraph_.size());
-    blocked_graph_.clear();
-    blocked_graph_.resize(subgraph_.size());
-    Circuit(current_vertex_, 0);
-  }
-
-  out_cut_edges->swap(cut_edges_);
-  LOG(INFO) << "Cycle breaker skipped " << skipped_ops_ << " ops.";
-  DCHECK(stack_.empty());
-}
-
-static const size_t kMaxEdgesToConsider = 2;
-
-void CycleBreaker::HandleCircuit() {
-  stack_.push_back(current_vertex_);
-  CHECK_GE(stack_.size(), static_cast<vector<Vertex::Index>::size_type>(2));
-  Edge min_edge = make_pair(stack_[0], stack_[1]);
-  uint64_t min_edge_weight = std::numeric_limits<uint64_t>::max();
-  size_t edges_considered = 0;
-  for (vector<Vertex::Index>::const_iterator it = stack_.begin();
-       it != (stack_.end() - 1);
-       ++it) {
-    Edge edge = make_pair(*it, *(it + 1));
-    if (cut_edges_.find(edge) != cut_edges_.end()) {
-      stack_.pop_back();
-      return;
-    }
-    uint64_t edge_weight = graph_utils::EdgeWeight(subgraph_, edge);
-    if (edge_weight < min_edge_weight) {
-      min_edge_weight = edge_weight;
-      min_edge = edge;
-    }
-    edges_considered++;
-    if (edges_considered == kMaxEdgesToConsider)
-      break;
-  }
-  cut_edges_.insert(min_edge);
-  stack_.pop_back();
-}
-
-void CycleBreaker::Unblock(Vertex::Index u) {
-  blocked_[u] = false;
-
-  for (Vertex::EdgeMap::iterator it = blocked_graph_[u].out_edges.begin();
-       it != blocked_graph_[u].out_edges.end();) {
-    Vertex::Index w = it->first;
-    blocked_graph_[u].out_edges.erase(it++);
-    if (blocked_[w])
-      Unblock(w);
-  }
-}
-
-bool CycleBreaker::StackContainsCutEdge() const {
-  for (vector<Vertex::Index>::const_iterator it = ++stack_.begin(),
-                                             e = stack_.end();
-       it != e;
-       ++it) {
-    Edge edge = make_pair(*(it - 1), *it);
-    if (base::ContainsKey(cut_edges_, edge)) {
-      return true;
-    }
-  }
-  return false;
-}
-
-bool CycleBreaker::Circuit(Vertex::Index vertex, Vertex::Index depth) {
-  // "vertex" was "v" in the original paper.
-  bool found = false;  // Was "f" in the original paper.
-  stack_.push_back(vertex);
-  blocked_[vertex] = true;
-  {
-    static int counter = 0;
-    counter++;
-    if (counter == 10000) {
-      counter = 0;
-      std::string stack_str;
-      for (Vertex::Index index : stack_) {
-        stack_str += std::to_string(index);
-        stack_str += " -> ";
-      }
-      LOG(INFO) << "stack: " << stack_str;
-    }
-  }
-
-  for (Vertex::SubgraphEdgeMap::iterator w =
-           subgraph_[vertex].subgraph_edges.begin();
-       w != subgraph_[vertex].subgraph_edges.end();
-       ++w) {
-    if (*w == current_vertex_) {
-      // The original paper called for printing stack_ followed by
-      // current_vertex_ here, which is a cycle. Instead, we call
-      // HandleCircuit() to break it.
-      HandleCircuit();
-      found = true;
-    } else if (!blocked_[*w]) {
-      if (Circuit(*w, depth + 1)) {
-        found = true;
-        if ((depth > kMaxEdgesToConsider) || StackContainsCutEdge())
-          break;
-      }
-    }
-  }
-
-  if (found) {
-    Unblock(vertex);
-  } else {
-    for (Vertex::SubgraphEdgeMap::iterator w =
-             subgraph_[vertex].subgraph_edges.begin();
-         w != subgraph_[vertex].subgraph_edges.end();
-         ++w) {
-      if (blocked_graph_[*w].out_edges.find(vertex) ==
-          blocked_graph_[*w].out_edges.end()) {
-        blocked_graph_[*w].out_edges.insert(
-            make_pair(vertex, EdgeProperties()));
-      }
-    }
-  }
-  CHECK_EQ(vertex, stack_.back());
-  stack_.pop_back();
-  return found;
-}
-
-}  // namespace chromeos_update_engine
diff --git a/payload_generator/cycle_breaker.h b/payload_generator/cycle_breaker.h
deleted file mode 100644
index 01518fe..0000000
--- a/payload_generator/cycle_breaker.h
+++ /dev/null
@@ -1,71 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_CYCLE_BREAKER_H_
-#define UPDATE_ENGINE_PAYLOAD_GENERATOR_CYCLE_BREAKER_H_
-
-// This is a modified implementation of Donald B. Johnson's algorithm for
-// finding all elementary cycles (a.k.a. circuits) in a directed graph.
-// See the paper "Finding All the Elementary Circuits of a Directed Graph"
-// at http://dutta.csc.ncsu.edu/csc791_spring07/wrap/circuits_johnson.pdf
-// for reference.
-
-// Note: this version of the algorithm not only finds cycles, but breaks them.
-// It uses a simple greedy algorithm for cutting: when a cycle is discovered,
-// the edge with the least weight is cut. Longer term we may wish to do
-// something more intelligent, since the goal is (ideally) to minimize the
-// sum of the weights of all cut cycles. In practice, it's intractable
-// to consider all cycles before cutting any; there are simply too many.
-// In a sample graph representative of a typical workload, I found over
-// 5 * 10^15 cycles.
-
-#include <set>
-#include <vector>
-
-#include "update_engine/payload_generator/graph_types.h"
-
-namespace chromeos_update_engine {
-
-class CycleBreaker {
- public:
-  CycleBreaker() : skipped_ops_(0) {}
-  // out_cut_edges is replaced with the cut edges.
-  void BreakCycles(const Graph& graph, std::set<Edge>* out_cut_edges);
-
-  size_t skipped_ops() const { return skipped_ops_; }
-
- private:
-  void HandleCircuit();
-  void Unblock(Vertex::Index u);
-  bool Circuit(Vertex::Index vertex, Vertex::Index depth);
-  bool StackContainsCutEdge() const;
-
-  std::vector<bool> blocked_;         // "blocked" in the paper
-  Vertex::Index current_vertex_;      // "s" in the paper
-  std::vector<Vertex::Index> stack_;  // the stack variable in the paper
-  Graph subgraph_;                    // "A_K" in the paper
-  Graph blocked_graph_;               // "B" in the paper
-
-  std::set<Edge> cut_edges_;
-
-  // Number of operations skipped b/c we know they don't have any
-  // incoming edges.
-  size_t skipped_ops_;
-};
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_PAYLOAD_GENERATOR_CYCLE_BREAKER_H_
diff --git a/payload_generator/cycle_breaker_unittest.cc b/payload_generator/cycle_breaker_unittest.cc
deleted file mode 100644
index fdcf49b..0000000
--- a/payload_generator/cycle_breaker_unittest.cc
+++ /dev/null
@@ -1,279 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/cycle_breaker.h"
-
-#include <set>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include <base/logging.h>
-#include <base/stl_util.h>
-#include <gtest/gtest.h>
-
-#include "update_engine/payload_generator/graph_types.h"
-
-using std::make_pair;
-using std::pair;
-using std::set;
-using std::string;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-namespace {
-void SetOpForNodes(Graph* graph) {
-  for (Vertex& vertex : *graph) {
-    vertex.aop.op.set_type(InstallOperation::MOVE);
-  }
-}
-}  // namespace
-
-class CycleBreakerTest : public ::testing::Test {};
-
-TEST(CycleBreakerTest, SimpleTest) {
-  int counter = 0;
-  const Vertex::Index n_a = counter++;
-  const Vertex::Index n_b = counter++;
-  const Vertex::Index n_c = counter++;
-  const Vertex::Index n_d = counter++;
-  const Vertex::Index n_e = counter++;
-  const Vertex::Index n_f = counter++;
-  const Vertex::Index n_g = counter++;
-  const Vertex::Index n_h = counter++;
-  const Graph::size_type kNodeCount = counter++;
-
-  Graph graph(kNodeCount);
-  SetOpForNodes(&graph);
-
-  graph[n_a].out_edges.insert(make_pair(n_e, EdgeProperties()));
-  graph[n_a].out_edges.insert(make_pair(n_f, EdgeProperties()));
-  graph[n_b].out_edges.insert(make_pair(n_a, EdgeProperties()));
-  graph[n_c].out_edges.insert(make_pair(n_d, EdgeProperties()));
-  graph[n_d].out_edges.insert(make_pair(n_e, EdgeProperties()));
-  graph[n_d].out_edges.insert(make_pair(n_f, EdgeProperties()));
-  graph[n_e].out_edges.insert(make_pair(n_b, EdgeProperties()));
-  graph[n_e].out_edges.insert(make_pair(n_c, EdgeProperties()));
-  graph[n_e].out_edges.insert(make_pair(n_f, EdgeProperties()));
-  graph[n_f].out_edges.insert(make_pair(n_g, EdgeProperties()));
-  graph[n_g].out_edges.insert(make_pair(n_h, EdgeProperties()));
-  graph[n_h].out_edges.insert(make_pair(n_g, EdgeProperties()));
-
-  CycleBreaker breaker;
-
-  set<Edge> broken_edges;
-  breaker.BreakCycles(graph, &broken_edges);
-
-  // The following cycles must be cut:
-  // A->E->B
-  // C->D->E
-  // G->H
-
-  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_a, n_e)) ||
-              base::ContainsKey(broken_edges, make_pair(n_e, n_b)) ||
-              base::ContainsKey(broken_edges, make_pair(n_b, n_a)));
-  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_c, n_d)) ||
-              base::ContainsKey(broken_edges, make_pair(n_d, n_e)) ||
-              base::ContainsKey(broken_edges, make_pair(n_e, n_c)));
-  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_g, n_h)) ||
-              base::ContainsKey(broken_edges, make_pair(n_h, n_g)));
-  EXPECT_EQ(3U, broken_edges.size());
-}
-
-namespace {
-pair<Vertex::Index, EdgeProperties> EdgeWithWeight(Vertex::Index dest,
-                                                   uint64_t weight) {
-  EdgeProperties props;
-  props.extents.resize(1);
-  props.extents[0].set_num_blocks(weight);
-  return make_pair(dest, props);
-}
-}  // namespace
-
-// This creates a bunch of cycles like this:
-//
-//               root <------.
-//    (t)->     / | \        |
-//             V  V  V       |
-//             N  N  N       |
-//              \ | /        |
-//               VVV         |
-//                N          |
-//              / | \        |
-//             V  V  V       |
-//             N  N  N       |
-//               ...         |
-//     (s)->    \ | /        |
-//               VVV         |
-//                N          |
-//                 \_________/
-//
-// such that the original cutting algo would cut edges (s). We changed
-// the algorithm to cut cycles (t) instead, since they are closer to the
-// root, and that can massively speed up cycle cutting.
-TEST(CycleBreakerTest, AggressiveCutTest) {
-  size_t counter = 0;
-
-  const int kNodesPerGroup = 4;
-  const int kGroups = 33;
-
-  Graph graph(kGroups * kNodesPerGroup + 1);  // + 1 for the root node
-  SetOpForNodes(&graph);
-
-  const Vertex::Index n_root = counter++;
-
-  Vertex::Index last_hub = n_root;
-  for (int i = 0; i < kGroups; i++) {
-    uint64_t weight = 5;
-    if (i == 0)
-      weight = 2;
-    else if (i == (kGroups - 1))
-      weight = 1;
-
-    const Vertex::Index next_hub = counter++;
-
-    for (int j = 0; j < (kNodesPerGroup - 1); j++) {
-      const Vertex::Index node = counter++;
-      graph[last_hub].out_edges.insert(EdgeWithWeight(node, weight));
-      graph[node].out_edges.insert(EdgeWithWeight(next_hub, weight));
-    }
-    last_hub = next_hub;
-  }
-
-  graph[last_hub].out_edges.insert(EdgeWithWeight(n_root, 5));
-
-  EXPECT_EQ(counter, graph.size());
-
-  CycleBreaker breaker;
-
-  set<Edge> broken_edges;
-  LOG(INFO) << "If this hangs for more than 1 second, the test has failed.";
-  breaker.BreakCycles(graph, &broken_edges);
-
-  set<Edge> expected_cuts;
-
-  for (Vertex::EdgeMap::const_iterator it = graph[n_root].out_edges.begin(),
-                                       e = graph[n_root].out_edges.end();
-       it != e;
-       ++it) {
-    expected_cuts.insert(make_pair(n_root, it->first));
-  }
-
-  EXPECT_TRUE(broken_edges == expected_cuts);
-}
-
-TEST(CycleBreakerTest, WeightTest) {
-  size_t counter = 0;
-  const Vertex::Index n_a = counter++;
-  const Vertex::Index n_b = counter++;
-  const Vertex::Index n_c = counter++;
-  const Vertex::Index n_d = counter++;
-  const Vertex::Index n_e = counter++;
-  const Vertex::Index n_f = counter++;
-  const Vertex::Index n_g = counter++;
-  const Vertex::Index n_h = counter++;
-  const Vertex::Index n_i = counter++;
-  const Vertex::Index n_j = counter++;
-  const Graph::size_type kNodeCount = counter++;
-
-  Graph graph(kNodeCount);
-  SetOpForNodes(&graph);
-
-  graph[n_a].out_edges.insert(EdgeWithWeight(n_b, 4));
-  graph[n_a].out_edges.insert(EdgeWithWeight(n_f, 3));
-  graph[n_a].out_edges.insert(EdgeWithWeight(n_h, 2));
-  graph[n_b].out_edges.insert(EdgeWithWeight(n_a, 3));
-  graph[n_b].out_edges.insert(EdgeWithWeight(n_c, 4));
-  graph[n_c].out_edges.insert(EdgeWithWeight(n_b, 5));
-  graph[n_c].out_edges.insert(EdgeWithWeight(n_d, 3));
-  graph[n_d].out_edges.insert(EdgeWithWeight(n_a, 6));
-  graph[n_d].out_edges.insert(EdgeWithWeight(n_e, 3));
-  graph[n_e].out_edges.insert(EdgeWithWeight(n_d, 4));
-  graph[n_e].out_edges.insert(EdgeWithWeight(n_g, 5));
-  graph[n_f].out_edges.insert(EdgeWithWeight(n_g, 2));
-  graph[n_g].out_edges.insert(EdgeWithWeight(n_f, 3));
-  graph[n_g].out_edges.insert(EdgeWithWeight(n_d, 5));
-  graph[n_h].out_edges.insert(EdgeWithWeight(n_i, 8));
-  graph[n_i].out_edges.insert(EdgeWithWeight(n_e, 4));
-  graph[n_i].out_edges.insert(EdgeWithWeight(n_h, 9));
-  graph[n_i].out_edges.insert(EdgeWithWeight(n_j, 6));
-
-  CycleBreaker breaker;
-
-  set<Edge> broken_edges;
-  breaker.BreakCycles(graph, &broken_edges);
-
-  // These are required to be broken:
-  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_b, n_a)));
-  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_b, n_c)));
-  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_d, n_e)));
-  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_f, n_g)));
-  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_h, n_i)));
-}
-
-TEST(CycleBreakerTest, UnblockGraphTest) {
-  size_t counter = 0;
-  const Vertex::Index n_a = counter++;
-  const Vertex::Index n_b = counter++;
-  const Vertex::Index n_c = counter++;
-  const Vertex::Index n_d = counter++;
-  const Graph::size_type kNodeCount = counter++;
-
-  Graph graph(kNodeCount);
-  SetOpForNodes(&graph);
-
-  graph[n_a].out_edges.insert(EdgeWithWeight(n_b, 1));
-  graph[n_a].out_edges.insert(EdgeWithWeight(n_c, 1));
-  graph[n_b].out_edges.insert(EdgeWithWeight(n_c, 2));
-  graph[n_c].out_edges.insert(EdgeWithWeight(n_b, 2));
-  graph[n_b].out_edges.insert(EdgeWithWeight(n_d, 2));
-  graph[n_d].out_edges.insert(EdgeWithWeight(n_a, 2));
-
-  CycleBreaker breaker;
-
-  set<Edge> broken_edges;
-  breaker.BreakCycles(graph, &broken_edges);
-
-  // These are required to be broken:
-  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_a, n_b)));
-  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_a, n_c)));
-}
-
-TEST(CycleBreakerTest, SkipOpsTest) {
-  size_t counter = 0;
-  const Vertex::Index n_a = counter++;
-  const Vertex::Index n_b = counter++;
-  const Vertex::Index n_c = counter++;
-  const Graph::size_type kNodeCount = counter++;
-
-  Graph graph(kNodeCount);
-  SetOpForNodes(&graph);
-  graph[n_a].aop.op.set_type(InstallOperation::REPLACE_BZ);
-  graph[n_c].aop.op.set_type(InstallOperation::REPLACE);
-
-  graph[n_a].out_edges.insert(EdgeWithWeight(n_b, 1));
-  graph[n_c].out_edges.insert(EdgeWithWeight(n_b, 1));
-
-  CycleBreaker breaker;
-
-  set<Edge> broken_edges;
-  breaker.BreakCycles(graph, &broken_edges);
-
-  EXPECT_EQ(2U, breaker.skipped_ops());
-}
-
-}  // namespace chromeos_update_engine
diff --git a/payload_generator/deflate_utils.cc b/payload_generator/deflate_utils.cc
index 01402dd..c874bfd 100644
--- a/payload_generator/deflate_utils.cc
+++ b/payload_generator/deflate_utils.cc
@@ -46,7 +46,7 @@
 // TODO(*): Optimize this so we don't have to read all extents into memory in
 // case it is large.
 bool CopyExtentsToFile(const string& in_path,
-                       const vector<Extent> extents,
+                       const vector<Extent>& extents,
                        const string& out_path,
                        size_t block_size) {
   brillo::Blob data(utils::BlocksInExtents(extents) * block_size);
@@ -284,8 +284,9 @@
       TEST_AND_RETURN_FALSE(
           CopyExtentsToFile(part.path, file.extents, path.value(), kBlockSize));
       // Test if it is actually a Squashfs file.
-      auto sqfs =
-          SquashfsFilesystem::CreateFromFile(path.value(), extract_deflates);
+      auto sqfs = SquashfsFilesystem::CreateFromFile(path.value(),
+                                                     extract_deflates,
+                                                     /*load_settings=*/false);
       if (sqfs) {
         // It is an squashfs file. Get its files to replace with itself.
         vector<FilesystemInterface::File> files;
@@ -306,7 +307,7 @@
       }
     }
 
-    if (is_regular_file && extract_deflates) {
+    if (is_regular_file && extract_deflates && !file.is_compressed) {
       // Search for deflates if the file is in zip or gzip format.
       // .zvoice files may eventually move out of rootfs. If that happens,
       // remove ".zvoice" (crbug.com/782918).
diff --git a/payload_generator/delta_diff_generator.cc b/payload_generator/delta_diff_generator.cc
index d484d32..ab0f036 100644
--- a/payload_generator/delta_diff_generator.cc
+++ b/payload_generator/delta_diff_generator.cc
@@ -29,16 +29,21 @@
 #include <vector>
 
 #include <base/logging.h>
+#include <base/threading/simple_thread.h>
 
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/delta_performer.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/ab_generator.h"
+#include "update_engine/payload_generator/annotated_operation.h"
 #include "update_engine/payload_generator/blob_file_writer.h"
+#include "update_engine/payload_generator/cow_size_estimator.h"
 #include "update_engine/payload_generator/delta_diff_utils.h"
 #include "update_engine/payload_generator/full_update_generator.h"
-#include "update_engine/payload_generator/inplace_generator.h"
+#include "update_engine/payload_generator/merge_sequence_generator.h"
 #include "update_engine/payload_generator/payload_file.h"
+#include "update_engine/update_metadata.pb.h"
 
 using std::string;
 using std::unique_ptr;
@@ -50,6 +55,110 @@
 const size_t kRootFSPartitionSize = static_cast<size_t>(2) * 1024 * 1024 * 1024;
 const size_t kBlockSize = 4096;  // bytes
 
+class PartitionProcessor : public base::DelegateSimpleThread::Delegate {
+  bool IsDynamicPartition(const std::string& partition_name) {
+    for (const auto& group :
+         config_.target.dynamic_partition_metadata->groups()) {
+      const auto& names = group.partition_names();
+      if (std::find(names.begin(), names.end(), partition_name) !=
+          names.end()) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+ public:
+  explicit PartitionProcessor(
+      const PayloadGenerationConfig& config,
+      const PartitionConfig& old_part,
+      const PartitionConfig& new_part,
+      BlobFileWriter* file_writer,
+      std::vector<AnnotatedOperation>* aops,
+      std::vector<CowMergeOperation>* cow_merge_sequence,
+      size_t* cow_size,
+      std::unique_ptr<chromeos_update_engine::OperationsGenerator> strategy)
+      : config_(config),
+        old_part_(old_part),
+        new_part_(new_part),
+        file_writer_(file_writer),
+        aops_(aops),
+        cow_merge_sequence_(cow_merge_sequence),
+        cow_size_(cow_size),
+        strategy_(std::move(strategy)) {}
+  PartitionProcessor(PartitionProcessor&&) noexcept = default;
+
+  void Run() override {
+    LOG(INFO) << "Started an async task to process partition "
+              << new_part_.name;
+    bool success = strategy_->GenerateOperations(
+        config_, old_part_, new_part_, file_writer_, aops_);
+    if (!success) {
+      // ABORT the entire process, so that developer can look
+      // at recent logs and diagnose what happened
+      LOG(FATAL) << "GenerateOperations(" << old_part_.name << ", "
+                 << new_part_.name << ") failed";
+    }
+
+    bool snapshot_enabled =
+        config_.target.dynamic_partition_metadata &&
+        config_.target.dynamic_partition_metadata->snapshot_enabled();
+    if (!snapshot_enabled || !IsDynamicPartition(new_part_.name)) {
+      return;
+    }
+    // Skip cow size estimation if VABC isn't enabled
+    if (!config_.target.dynamic_partition_metadata->vabc_enabled()) {
+      return;
+    }
+    if (!old_part_.path.empty()) {
+      auto generator = MergeSequenceGenerator::Create(*aops_);
+      if (!generator || !generator->Generate(cow_merge_sequence_)) {
+        LOG(FATAL) << "Failed to generate merge sequence";
+      }
+    }
+
+    LOG(INFO) << "Estimating COW size for partition: " << new_part_.name;
+    // Need the contents of source/target image bytes when doing
+    // dry run.
+    FileDescriptorPtr source_fd{new EintrSafeFileDescriptor()};
+    source_fd->Open(old_part_.path.c_str(), O_RDONLY);
+
+    auto target_fd = std::make_unique<EintrSafeFileDescriptor>();
+    target_fd->Open(new_part_.path.c_str(), O_RDONLY);
+
+    google::protobuf::RepeatedPtrField<InstallOperation> operations;
+
+    for (const AnnotatedOperation& aop : *aops_) {
+      *operations.Add() = aop.op;
+    }
+    *cow_size_ = EstimateCowSize(
+        std::move(target_fd),
+        std::move(operations),
+        {cow_merge_sequence_->begin(), cow_merge_sequence_->end()},
+        config_.block_size,
+        config_.target.dynamic_partition_metadata->vabc_compression_param());
+    if (!new_part_.disable_fec_computation) {
+      *cow_size_ +=
+          new_part_.verity.fec_extent.num_blocks() * config_.block_size;
+    }
+    *cow_size_ +=
+        new_part_.verity.hash_tree_extent.num_blocks() * config_.block_size;
+    LOG(INFO) << "Estimated COW size for partition: " << new_part_.name << " "
+              << *cow_size_;
+  }
+
+ private:
+  const PayloadGenerationConfig& config_;
+  const PartitionConfig& old_part_;
+  const PartitionConfig& new_part_;
+  BlobFileWriter* file_writer_;
+  std::vector<AnnotatedOperation>* aops_;
+  std::vector<CowMergeOperation>* cow_merge_sequence_;
+  size_t* cow_size_;
+  std::unique_ptr<chromeos_update_engine::OperationsGenerator> strategy_;
+  DISALLOW_COPY_AND_ASSIGN(PartitionProcessor);
+};
+
 bool GenerateUpdatePayloadFile(const PayloadGenerationConfig& config,
                                const string& output_path,
                                const string& private_key_path,
@@ -64,23 +173,28 @@
   PayloadFile payload;
   TEST_AND_RETURN_FALSE(payload.Init(config));
 
-  const string kTempFileTemplate("CrAU_temp_data.XXXXXX");
-  string temp_file_path;
-  int data_file_fd;
-  TEST_AND_RETURN_FALSE(
-      utils::MakeTempFile(kTempFileTemplate, &temp_file_path, &data_file_fd));
-  ScopedPathUnlinker temp_file_unlinker(temp_file_path);
-  TEST_AND_RETURN_FALSE(data_file_fd >= 0);
-
+  ScopedTempFile data_file("CrAU_temp_data.XXXXXX", true);
   {
     off_t data_file_size = 0;
-    ScopedFdCloser data_file_fd_closer(&data_file_fd);
-    BlobFileWriter blob_file(data_file_fd, &data_file_size);
+    BlobFileWriter blob_file(data_file.fd(), &data_file_size);
     if (config.is_delta) {
       TEST_AND_RETURN_FALSE(config.source.partitions.size() ==
                             config.target.partitions.size());
     }
     PartitionConfig empty_part("");
+    std::vector<std::vector<AnnotatedOperation>> all_aops;
+    all_aops.resize(config.target.partitions.size());
+
+    std::vector<std::vector<CowMergeOperation>> all_merge_sequences;
+    all_merge_sequences.resize(config.target.partitions.size());
+
+    std::vector<size_t> all_cow_sizes(config.target.partitions.size(), 0);
+
+    std::vector<PartitionProcessor> partition_tasks{};
+    auto thread_count = std::min<int>(diff_utils::GetMaxThreads(),
+                                      config.target.partitions.size());
+    base::DelegateSimpleThreadPool thread_pool{"partition-thread-pool",
+                                               thread_count};
     for (size_t i = 0; i < config.target.partitions.size(); i++) {
       const PartitionConfig& old_part =
           config.is_delta ? config.source.partitions[i] : empty_part;
@@ -93,36 +207,49 @@
       unique_ptr<OperationsGenerator> strategy;
       if (!old_part.path.empty()) {
         // Delta update.
-        if (config.version.minor == kInPlaceMinorPayloadVersion) {
-          LOG(INFO) << "Using generator InplaceGenerator().";
-          strategy.reset(new InplaceGenerator());
-        } else {
-          LOG(INFO) << "Using generator ABGenerator().";
-          strategy.reset(new ABGenerator());
-        }
+        LOG(INFO) << "Using generator ABGenerator() for partition "
+                  << new_part.name;
+        strategy.reset(new ABGenerator());
       } else {
-        LOG(INFO) << "Using generator FullUpdateGenerator().";
+        LOG(INFO) << "Using generator FullUpdateGenerator() for partition "
+                  << new_part.name;
         strategy.reset(new FullUpdateGenerator());
       }
 
-      vector<AnnotatedOperation> aops;
       // Generate the operations using the strategy we selected above.
-      TEST_AND_RETURN_FALSE(strategy->GenerateOperations(
-          config, old_part, new_part, &blob_file, &aops));
+      partition_tasks.push_back(PartitionProcessor(config,
+                                                   old_part,
+                                                   new_part,
+                                                   &blob_file,
+                                                   &all_aops[i],
+                                                   &all_merge_sequences[i],
+                                                   &all_cow_sizes[i],
+                                                   std::move(strategy)));
+    }
+    thread_pool.Start();
+    for (auto& processor : partition_tasks) {
+      thread_pool.AddWork(&processor);
+    }
+    thread_pool.JoinAll();
 
-      // Filter the no-operations. OperationsGenerators should not output this
-      // kind of operations normally, but this is an extra step to fix that if
-      // happened.
-      diff_utils::FilterNoopOperations(&aops);
-
-      TEST_AND_RETURN_FALSE(payload.AddPartition(old_part, new_part, aops));
+    for (size_t i = 0; i < config.target.partitions.size(); i++) {
+      const PartitionConfig& old_part =
+          config.is_delta ? config.source.partitions[i] : empty_part;
+      const PartitionConfig& new_part = config.target.partitions[i];
+      TEST_AND_RETURN_FALSE(
+          payload.AddPartition(old_part,
+                               new_part,
+                               std::move(all_aops[i]),
+                               std::move(all_merge_sequences[i]),
+                               all_cow_sizes[i]));
     }
   }
+  data_file.CloseFd();
 
   LOG(INFO) << "Writing payload file...";
   // Write payload file to disk.
   TEST_AND_RETURN_FALSE(payload.WritePayload(
-      output_path, temp_file_path, private_key_path, metadata_size));
+      output_path, data_file.path(), private_key_path, metadata_size));
 
   LOG(INFO) << "All done. Successfully created delta file with "
             << "metadata size = " << *metadata_size;
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index 4ba6e24..3c025e1 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -83,103 +83,6 @@
 
 const int kBrotliCompressionQuality = 11;
 
-// Process a range of blocks from |range_start| to |range_end| in the extent at
-// position |*idx_p| of |extents|. If |do_remove| is true, this range will be
-// removed, which may cause the extent to be trimmed, split or removed entirely.
-// The value of |*idx_p| is updated to point to the next extent to be processed.
-// Returns true iff the next extent to process is a new or updated one.
-bool ProcessExtentBlockRange(vector<Extent>* extents,
-                             size_t* idx_p,
-                             const bool do_remove,
-                             uint64_t range_start,
-                             uint64_t range_end) {
-  size_t idx = *idx_p;
-  uint64_t start_block = (*extents)[idx].start_block();
-  uint64_t num_blocks = (*extents)[idx].num_blocks();
-  uint64_t range_size = range_end - range_start;
-
-  if (do_remove) {
-    if (range_size == num_blocks) {
-      // Remove the entire extent.
-      extents->erase(extents->begin() + idx);
-    } else if (range_end == num_blocks) {
-      // Trim the end of the extent.
-      (*extents)[idx].set_num_blocks(num_blocks - range_size);
-      idx++;
-    } else if (range_start == 0) {
-      // Trim the head of the extent.
-      (*extents)[idx].set_start_block(start_block + range_size);
-      (*extents)[idx].set_num_blocks(num_blocks - range_size);
-    } else {
-      // Trim the middle, splitting the remainder into two parts.
-      (*extents)[idx].set_num_blocks(range_start);
-      Extent e;
-      e.set_start_block(start_block + range_end);
-      e.set_num_blocks(num_blocks - range_end);
-      idx++;
-      extents->insert(extents->begin() + idx, e);
-    }
-  } else if (range_end == num_blocks) {
-    // Done with this extent.
-    idx++;
-  } else {
-    return false;
-  }
-
-  *idx_p = idx;
-  return true;
-}
-
-// Remove identical corresponding block ranges in |src_extents| and
-// |dst_extents|. Used for preventing moving of blocks onto themselves during
-// MOVE operations. The value of |total_bytes| indicates the actual length of
-// content; this may be slightly less than the total size of blocks, in which
-// case the last block is only partly occupied with data. Returns the total
-// number of bytes removed.
-size_t RemoveIdenticalBlockRanges(vector<Extent>* src_extents,
-                                  vector<Extent>* dst_extents,
-                                  const size_t total_bytes) {
-  size_t src_idx = 0;
-  size_t dst_idx = 0;
-  uint64_t src_offset = 0, dst_offset = 0;
-  size_t removed_bytes = 0, nonfull_block_bytes;
-  bool do_remove = false;
-  while (src_idx < src_extents->size() && dst_idx < dst_extents->size()) {
-    do_remove = ((*src_extents)[src_idx].start_block() + src_offset ==
-                 (*dst_extents)[dst_idx].start_block() + dst_offset);
-
-    uint64_t src_num_blocks = (*src_extents)[src_idx].num_blocks();
-    uint64_t dst_num_blocks = (*dst_extents)[dst_idx].num_blocks();
-    uint64_t min_num_blocks =
-        std::min(src_num_blocks - src_offset, dst_num_blocks - dst_offset);
-    uint64_t prev_src_offset = src_offset;
-    uint64_t prev_dst_offset = dst_offset;
-    src_offset += min_num_blocks;
-    dst_offset += min_num_blocks;
-
-    bool new_src = ProcessExtentBlockRange(
-        src_extents, &src_idx, do_remove, prev_src_offset, src_offset);
-    bool new_dst = ProcessExtentBlockRange(
-        dst_extents, &dst_idx, do_remove, prev_dst_offset, dst_offset);
-    if (new_src) {
-      src_offset = 0;
-    }
-    if (new_dst) {
-      dst_offset = 0;
-    }
-
-    if (do_remove)
-      removed_bytes += min_num_blocks * kBlockSize;
-  }
-
-  // If we removed the last block and this block is only partly used by file
-  // content, deduct the unused portion from the total removed byte count.
-  if (do_remove && (nonfull_block_bytes = total_bytes % kBlockSize))
-    removed_bytes -= kBlockSize - nonfull_block_bytes;
-
-  return removed_bytes;
-}
-
 // Storing a diff operation has more overhead over replace operation in the
 // manifest, we need to store an additional src_sha256_hash which is 32 bytes
 // and not compressible, and also src_extents which could use anywhere from a
@@ -318,13 +221,11 @@
     return;
   }
 
-  if (!version_.InplaceUpdate()) {
-    if (!ABGenerator::FragmentOperations(
-            version_, &file_aops_, new_part_, blob_file_)) {
-      LOG(ERROR) << "Failed to fragment operations for " << name_;
-      failed_ = true;
-      return;
-    }
+  if (!ABGenerator::FragmentOperations(
+          version_, &file_aops_, new_part_, blob_file_)) {
+    LOG(ERROR) << "Failed to fragment operations for " << name_;
+    failed_ = true;
+    return;
   }
 
   LOG(INFO) << "Encoded file " << name_ << " (" << new_extents_blocks_
@@ -349,12 +250,13 @@
   if (old_file_iter != old_files_map.end())
     return old_file_iter->second;
 
-  // No old file match for the new file name, use a similar file with the
-  // shortest levenshtein distance.
+  // No old file matches the new file name. Use a similar file with the
+  // shortest levenshtein distance instead.
   // This works great if the file has version number in it, but even for
   // a completely new file, using a similar file can still help.
-  int min_distance = new_file_name.size();
-  const FilesystemInterface::File* old_file;
+  int min_distance =
+      LevenshteinDistance(new_file_name, old_files_map.begin()->first);
+  const FilesystemInterface::File* old_file = &old_files_map.begin()->second;
   for (const auto& pair : old_files_map) {
     int distance = LevenshteinDistance(new_file_name, pair.first);
     if (distance < min_distance) {
@@ -447,12 +349,8 @@
     // from the same source blocks. At that time, this code can die. -adlr
     FilesystemInterface::File old_file =
         GetOldFile(old_files_map, new_file.name);
-    vector<Extent> old_file_extents;
-    if (version.InplaceUpdate())
-      old_file_extents =
-          FilterExtentRanges(old_file.extents, old_visited_blocks);
-    else
-      old_file_extents = FilterExtentRanges(old_file.extents, old_zero_blocks);
+    auto old_file_extents =
+        FilterExtentRanges(old_file.extents, old_zero_blocks);
     old_visited_blocks.AddExtents(old_file_extents);
 
     file_delta_processors.emplace_back(old_part.path,
@@ -541,21 +439,6 @@
                                            &old_block_ids,
                                            &new_block_ids));
 
-  // If the update is inplace, we map all the blocks that didn't move,
-  // regardless of the contents since they are already copied and no operation
-  // is required.
-  if (version.InplaceUpdate()) {
-    uint64_t num_blocks = std::min(old_num_blocks, new_num_blocks);
-    for (uint64_t block = 0; block < num_blocks; block++) {
-      if (old_block_ids[block] == new_block_ids[block] &&
-          !old_visited_blocks->ContainsBlock(block) &&
-          !new_visited_blocks->ContainsBlock(block)) {
-        old_visited_blocks->AddBlock(block);
-        new_visited_blocks->AddBlock(block);
-      }
-    }
-  }
-
   // A mapping from the block_id to the list of block numbers with that block id
   // in the old partition. This is used to lookup where in the old partition
   // is a block from the new partition.
@@ -602,10 +485,6 @@
     AppendBlockToExtents(&old_identical_blocks,
                          old_blocks_map_it->second.back());
     AppendBlockToExtents(&new_identical_blocks, block);
-    // We can't reuse source blocks in minor version 1 because the cycle
-    // breaking algorithm used in the in-place update doesn't support that.
-    if (version.InplaceUpdate())
-      old_blocks_map_it->second.pop_back();
   }
 
   if (chunk_blocks == -1)
@@ -657,9 +536,7 @@
       aops->emplace_back();
       AnnotatedOperation* aop = &aops->back();
       aop->name = "<identical-blocks>";
-      aop->op.set_type(version.OperationAllowed(InstallOperation::SOURCE_COPY)
-                           ? InstallOperation::SOURCE_COPY
-                           : InstallOperation::MOVE);
+      aop->op.set_type(InstallOperation::SOURCE_COPY);
 
       uint64_t chunk_num_blocks =
           std::min(static_cast<uint64_t>(extent.num_blocks()) - op_block_offset,
@@ -704,6 +581,11 @@
   InstallOperation operation;
 
   uint64_t total_blocks = utils::BlocksInExtents(new_extents);
+  if (chunk_blocks == 0) {
+    LOG(ERROR) << "Invalid number of chunk_blocks. Cannot be 0.";
+    return false;
+  }
+
   if (chunk_blocks == -1)
     chunk_blocks = total_blocks;
 
@@ -732,13 +614,8 @@
 
     // Check if the operation writes nothing.
     if (operation.dst_extents_size() == 0) {
-      if (operation.type() == InstallOperation::MOVE) {
-        LOG(INFO) << "Empty MOVE operation (" << name << "), skipping";
-        continue;
-      } else {
-        LOG(ERROR) << "Empty non-MOVE operation";
-        return false;
-      }
+      LOG(ERROR) << "Empty non-MOVE operation";
+      return false;
     }
 
     // Now, insert into the list of operations.
@@ -828,19 +705,18 @@
 
   // Disable bsdiff, and puffdiff when the data is too big.
   bool bsdiff_allowed =
-      version.OperationAllowed(InstallOperation::SOURCE_BSDIFF) ||
-      version.OperationAllowed(InstallOperation::BSDIFF);
+      version.OperationAllowed(InstallOperation::SOURCE_BSDIFF);
   if (bsdiff_allowed &&
       blocks_to_read * kBlockSize > kMaxBsdiffDestinationSize) {
-    LOG(INFO) << "bsdiff blacklisted, data too big: "
-              << blocks_to_read * kBlockSize << " bytes";
+    LOG(INFO) << "bsdiff ignored, data too big: " << blocks_to_read * kBlockSize
+              << " bytes";
     bsdiff_allowed = false;
   }
 
   bool puffdiff_allowed = version.OperationAllowed(InstallOperation::PUFFDIFF);
   if (puffdiff_allowed &&
       blocks_to_read * kBlockSize > kMaxPuffdiffDestinationSize) {
-    LOG(INFO) << "puffdiff blacklisted, data too big: "
+    LOG(INFO) << "puffdiff ignored, data too big: "
               << blocks_to_read * kBlockSize << " bytes";
     puffdiff_allowed = false;
   }
@@ -878,9 +754,7 @@
                                              kBlockSize));
     if (old_data == new_data) {
       // No change in data.
-      operation.set_type(version.OperationAllowed(InstallOperation::SOURCE_COPY)
-                             ? InstallOperation::SOURCE_COPY
-                             : InstallOperation::MOVE);
+      operation.set_type(InstallOperation::SOURCE_COPY);
       data_blob = brillo::Blob();
     } else if (IsDiffOperationBetter(
                    operation, data_blob.size(), 0, src_extents.size())) {
@@ -892,7 +766,7 @@
         ScopedPathUnlinker unlinker(patch.value());
 
         std::unique_ptr<bsdiff::PatchWriterInterface> bsdiff_patch_writer;
-        InstallOperation::Type operation_type = InstallOperation::BSDIFF;
+        InstallOperation::Type operation_type = InstallOperation::SOURCE_BSDIFF;
         if (version.OperationAllowed(InstallOperation::BROTLI_BSDIFF)) {
           bsdiff_patch_writer =
               bsdiff::CreateBSDF2PatchWriter(patch.value(),
@@ -901,9 +775,6 @@
           operation_type = InstallOperation::BROTLI_BSDIFF;
         } else {
           bsdiff_patch_writer = bsdiff::CreateBsdiffPatchWriter(patch.value());
-          if (version.OperationAllowed(InstallOperation::SOURCE_BSDIFF)) {
-            operation_type = InstallOperation::SOURCE_BSDIFF;
-          }
         }
 
         brillo::Blob bsdiff_delta;
@@ -951,17 +822,13 @@
         // Only Puffdiff if both files have at least one deflate left.
         if (!src_deflates.empty() && !dst_deflates.empty()) {
           brillo::Blob puffdiff_delta;
-          string temp_file_path;
-          TEST_AND_RETURN_FALSE(utils::MakeTempFile(
-              "puffdiff-delta.XXXXXX", &temp_file_path, nullptr));
-          ScopedPathUnlinker temp_file_unlinker(temp_file_path);
-
+          ScopedTempFile temp_file("puffdiff-delta.XXXXXX");
           // Perform PuffDiff operation.
           TEST_AND_RETURN_FALSE(puffin::PuffDiff(old_data,
                                                  new_data,
                                                  src_deflates,
                                                  dst_deflates,
-                                                 temp_file_path,
+                                                 temp_file.path(),
                                                  &puffdiff_delta));
           TEST_AND_RETURN_FALSE(puffdiff_delta.size() > 0);
           if (IsDiffOperationBetter(operation,
@@ -976,23 +843,14 @@
     }
   }
 
-  // Remove identical src/dst block ranges in MOVE operations.
-  if (operation.type() == InstallOperation::MOVE) {
-    auto removed_bytes =
-        RemoveIdenticalBlockRanges(&src_extents, &dst_extents, new_data.size());
-    operation.set_src_length(old_data.size() - removed_bytes);
-    operation.set_dst_length(new_data.size() - removed_bytes);
-  }
-
   // WARNING: We always set legacy |src_length| and |dst_length| fields for
   // BSDIFF. For SOURCE_BSDIFF we only set them for minor version 3 and
   // lower. This is needed because we used to use these two parameters in the
   // SOURCE_BSDIFF for minor version 3 and lower, but we do not need them
   // anymore in higher minor versions. This means if we stop adding these
   // parameters for those minor versions, the delta payloads will be invalid.
-  if (operation.type() == InstallOperation::BSDIFF ||
-      (operation.type() == InstallOperation::SOURCE_BSDIFF &&
-       version.minor <= kOpSrcHashMinorPayloadVersion)) {
+  if (operation.type() == InstallOperation::SOURCE_BSDIFF &&
+      version.minor <= kOpSrcHashMinorPayloadVersion) {
     operation.set_src_length(old_data.size());
     operation.set_dst_length(new_data.size());
   }
@@ -1021,22 +879,6 @@
           op_type == InstallOperation::DISCARD);
 }
 
-// Returns true if |op| is a no-op operation that doesn't do any useful work
-// (e.g., a move operation that copies blocks onto themselves).
-bool IsNoopOperation(const InstallOperation& op) {
-  return (op.type() == InstallOperation::MOVE &&
-          ExpandExtents(op.src_extents()) == ExpandExtents(op.dst_extents()));
-}
-
-void FilterNoopOperations(vector<AnnotatedOperation>* ops) {
-  ops->erase(std::remove_if(ops->begin(),
-                            ops->end(),
-                            [](const AnnotatedOperation& aop) {
-                              return IsNoopOperation(aop.op);
-                            }),
-             ops->end());
-}
-
 bool InitializePartitionInfo(const PartitionConfig& part, PartitionInfo* info) {
   info->set_size(part.size);
   HashCalculator hasher;
@@ -1092,7 +934,7 @@
   if (magic != EXT2_SUPER_MAGIC)
     return false;
 
-  // Sanity check the parameters.
+  // Validation check the parameters.
   TEST_AND_RETURN_FALSE(log_block_size >= EXT2_MIN_BLOCK_LOG_SIZE &&
                         log_block_size <= EXT2_MAX_BLOCK_LOG_SIZE);
   TEST_AND_RETURN_FALSE(block_count > 0);
diff --git a/payload_generator/delta_diff_utils.h b/payload_generator/delta_diff_utils.h
index 2211b30..c75d16d 100644
--- a/payload_generator/delta_diff_utils.h
+++ b/payload_generator/delta_diff_utils.h
@@ -127,14 +127,6 @@
 // Returns true if an operation with type |op_type| has no |src_extents|.
 bool IsNoSourceOperation(InstallOperation::Type op_type);
 
-// Returns true if |op| is a no-op operation that doesn't do any useful work
-// (e.g., a move operation that copies blocks onto themselves).
-bool IsNoopOperation(const InstallOperation& op);
-
-// Filters all the operations that are no-op, maintaining the relative order
-// of the rest of the operations.
-void FilterNoopOperations(std::vector<AnnotatedOperation>* ops);
-
 bool InitializePartitionInfo(const PartitionConfig& partition,
                              PartitionInfo* info);
 
diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc
index b2950e8..f2db1bd 100644
--- a/payload_generator/delta_diff_utils_unittest.cc
+++ b/payload_generator/delta_diff_utils_unittest.cc
@@ -69,13 +69,12 @@
 // Create a fake filesystem of the given |size| and initialize the partition
 // holding it in the PartitionConfig |part|.
 void CreatePartition(PartitionConfig* part,
-                     const string& pattern,
+                     ScopedTempFile* part_file,
                      uint64_t block_size,
                      off_t size) {
-  int fd = -1;
-  ASSERT_TRUE(utils::MakeTempFile(pattern.c_str(), &part->path, &fd));
-  ASSERT_EQ(0, ftruncate(fd, size));
-  ASSERT_EQ(0, close(fd));
+  part->path = part_file->path();
+  ASSERT_EQ(0, ftruncate(part_file->fd(), size));
+  part_file->CloseFd();
   part->fs_interface.reset(new FakeFilesystem(block_size, size / block_size));
   part->size = size;
 }
@@ -112,31 +111,21 @@
 
   void SetUp() override {
     CreatePartition(&old_part_,
-                    "DeltaDiffUtilsTest-old_part-XXXXXX",
+                    &old_part_file_,
                     block_size_,
                     block_size_ * kDefaultBlockCount);
     CreatePartition(&new_part_,
-                    "DeltaDiffUtilsTest-old_part-XXXXXX",
+                    &new_part_file_,
                     block_size_,
                     block_size_ * kDefaultBlockCount);
-    ASSERT_TRUE(utils::MakeTempFile(
-        "DeltaDiffUtilsTest-blob-XXXXXX", &blob_path_, &blob_fd_));
-  }
-
-  void TearDown() override {
-    unlink(old_part_.path.c_str());
-    unlink(new_part_.path.c_str());
-    if (blob_fd_ != -1)
-      close(blob_fd_);
-    unlink(blob_path_.c_str());
   }
 
   // Helper function to call DeltaMovedAndZeroBlocks() using this class' data
   // members. This simply avoids repeating all the arguments that never change.
   bool RunDeltaMovedAndZeroBlocks(ssize_t chunk_blocks,
                                   uint32_t minor_version) {
-    BlobFileWriter blob_file(blob_fd_, &blob_size_);
-    PayloadVersion version(kChromeOSMajorPayloadVersion, minor_version);
+    BlobFileWriter blob_file(tmp_blob_file_.fd(), &blob_size_);
+    PayloadVersion version(kBrilloMajorPayloadVersion, minor_version);
     ExtentRanges old_zero_blocks;
     return diff_utils::DeltaMovedAndZeroBlocks(&aops_,
                                                old_part_.path,
@@ -155,10 +144,11 @@
   // with
   PartitionConfig old_part_{"part"};
   PartitionConfig new_part_{"part"};
+  ScopedTempFile old_part_file_{"DeltaDiffUtilsTest-old_part-XXXXXX", true};
+  ScopedTempFile new_part_file_{"DeltaDiffUtilsTest-new_part-XXXXXX", true};
 
   // The file holding the output blob from the various diff utils functions.
-  string blob_path_;
-  int blob_fd_{-1};
+  ScopedTempFile tmp_blob_file_{"DeltaDiffUtilsTest-blob-XXXXXX", true};
   off_t blob_size_{0};
 
   size_t block_size_{kBlockSize};
@@ -173,7 +163,7 @@
   new_part_.verity.hash_tree_extent = ExtentForRange(20, 30);
   new_part_.verity.fec_extent = ExtentForRange(40, 50);
 
-  BlobFileWriter blob_file(blob_fd_, &blob_size_);
+  BlobFileWriter blob_file(tmp_blob_file_.fd(), &blob_size_);
   EXPECT_TRUE(diff_utils::DeltaReadPartition(
       &aops_,
       old_part_,
@@ -194,164 +184,6 @@
   }
 }
 
-TEST_F(DeltaDiffUtilsTest, MoveSmallTest) {
-  brillo::Blob data_blob(block_size_);
-  test_utils::FillWithData(&data_blob);
-
-  // The old file is on a different block than the new one.
-  vector<Extent> old_extents = {ExtentForRange(11, 1)};
-  vector<Extent> new_extents = {ExtentForRange(1, 1)};
-
-  EXPECT_TRUE(WriteExtents(old_part_.path, old_extents, kBlockSize, data_blob));
-  EXPECT_TRUE(WriteExtents(new_part_.path, new_extents, kBlockSize, data_blob));
-
-  brillo::Blob data;
-  InstallOperation op;
-  EXPECT_TRUE(diff_utils::ReadExtentsToDiff(
-      old_part_.path,
-      new_part_.path,
-      old_extents,
-      new_extents,
-      {},  // old_deflates
-      {},  // new_deflates
-      PayloadVersion(kChromeOSMajorPayloadVersion, kInPlaceMinorPayloadVersion),
-      &data,
-      &op));
-  EXPECT_TRUE(data.empty());
-
-  EXPECT_TRUE(op.has_type());
-  EXPECT_EQ(InstallOperation::MOVE, op.type());
-  EXPECT_FALSE(op.has_data_offset());
-  EXPECT_FALSE(op.has_data_length());
-  EXPECT_EQ(1, op.src_extents_size());
-  EXPECT_EQ(kBlockSize, op.src_length());
-  EXPECT_EQ(1, op.dst_extents_size());
-  EXPECT_EQ(kBlockSize, op.dst_length());
-  EXPECT_EQ(utils::BlocksInExtents(op.src_extents()),
-            utils::BlocksInExtents(op.dst_extents()));
-  EXPECT_EQ(1U, utils::BlocksInExtents(op.dst_extents()));
-}
-
-TEST_F(DeltaDiffUtilsTest, MoveWithSameBlock) {
-  // Setup the old/new files so that it has immobile chunks; we make sure to
-  // utilize all sub-cases of such chunks: blocks 21--22 induce a split (src)
-  // and complete removal (dst), whereas blocks 24--25 induce trimming of the
-  // tail (src) and head (dst) of extents. The final block (29) is used for
-  // ensuring we properly account for the number of bytes removed in cases where
-  // the last block is partly filled. The detailed configuration:
-  //
-  // Old:  [ 20     21 22     23     24 25 ] [ 28     29 ]
-  // New:  [ 18 ] [ 21 22 ] [ 20 ] [ 24 25     26 ] [ 29 ]
-  // Same:          ^^ ^^            ^^ ^^            ^^
-  vector<Extent> old_extents = {ExtentForRange(20, 6), ExtentForRange(28, 2)};
-  vector<Extent> new_extents = {ExtentForRange(18, 1),
-                                ExtentForRange(21, 2),
-                                ExtentForRange(20, 1),
-                                ExtentForRange(24, 3),
-                                ExtentForRange(29, 1)};
-
-  uint64_t num_blocks = utils::BlocksInExtents(old_extents);
-  EXPECT_EQ(num_blocks, utils::BlocksInExtents(new_extents));
-
-  // The size of the data should match the total number of blocks. Each block
-  // has a different content.
-  brillo::Blob file_data;
-  for (uint64_t i = 0; i < num_blocks; ++i) {
-    file_data.resize(file_data.size() + kBlockSize, 'a' + i);
-  }
-
-  EXPECT_TRUE(WriteExtents(old_part_.path, old_extents, kBlockSize, file_data));
-  EXPECT_TRUE(WriteExtents(new_part_.path, new_extents, kBlockSize, file_data));
-
-  brillo::Blob data;
-  InstallOperation op;
-  EXPECT_TRUE(diff_utils::ReadExtentsToDiff(
-      old_part_.path,
-      new_part_.path,
-      old_extents,
-      new_extents,
-      {},  // old_deflates
-      {},  // new_deflates
-      PayloadVersion(kChromeOSMajorPayloadVersion, kInPlaceMinorPayloadVersion),
-      &data,
-      &op));
-
-  EXPECT_TRUE(data.empty());
-
-  EXPECT_TRUE(op.has_type());
-  EXPECT_EQ(InstallOperation::MOVE, op.type());
-  EXPECT_FALSE(op.has_data_offset());
-  EXPECT_FALSE(op.has_data_length());
-
-  // The expected old and new extents that actually moved. See comment above.
-  old_extents = {
-      ExtentForRange(20, 1), ExtentForRange(23, 1), ExtentForRange(28, 1)};
-  new_extents = {
-      ExtentForRange(18, 1), ExtentForRange(20, 1), ExtentForRange(26, 1)};
-  num_blocks = utils::BlocksInExtents(old_extents);
-
-  EXPECT_EQ(num_blocks * kBlockSize, op.src_length());
-  EXPECT_EQ(num_blocks * kBlockSize, op.dst_length());
-
-  EXPECT_EQ(old_extents.size(), static_cast<size_t>(op.src_extents_size()));
-  for (int i = 0; i < op.src_extents_size(); i++) {
-    EXPECT_EQ(old_extents[i].start_block(), op.src_extents(i).start_block())
-        << "i == " << i;
-    EXPECT_EQ(old_extents[i].num_blocks(), op.src_extents(i).num_blocks())
-        << "i == " << i;
-  }
-
-  EXPECT_EQ(new_extents.size(), static_cast<size_t>(op.dst_extents_size()));
-  for (int i = 0; i < op.dst_extents_size(); i++) {
-    EXPECT_EQ(new_extents[i].start_block(), op.dst_extents(i).start_block())
-        << "i == " << i;
-    EXPECT_EQ(new_extents[i].num_blocks(), op.dst_extents(i).num_blocks())
-        << "i == " << i;
-  }
-}
-
-TEST_F(DeltaDiffUtilsTest, BsdiffSmallTest) {
-  // Test a BSDIFF operation from block 1 to block 2.
-  brillo::Blob data_blob(kBlockSize);
-  test_utils::FillWithData(&data_blob);
-
-  // The old file is on a different block than the new one.
-  vector<Extent> old_extents = {ExtentForRange(1, 1)};
-  vector<Extent> new_extents = {ExtentForRange(2, 1)};
-
-  EXPECT_TRUE(WriteExtents(old_part_.path, old_extents, kBlockSize, data_blob));
-  // Modify one byte in the new file.
-  data_blob[0]++;
-  EXPECT_TRUE(WriteExtents(new_part_.path, new_extents, kBlockSize, data_blob));
-
-  brillo::Blob data;
-  InstallOperation op;
-  EXPECT_TRUE(diff_utils::ReadExtentsToDiff(
-      old_part_.path,
-      new_part_.path,
-      old_extents,
-      new_extents,
-      {},  // old_deflates
-      {},  // new_deflates
-      PayloadVersion(kChromeOSMajorPayloadVersion, kInPlaceMinorPayloadVersion),
-      &data,
-      &op));
-
-  EXPECT_FALSE(data.empty());
-
-  EXPECT_TRUE(op.has_type());
-  EXPECT_EQ(InstallOperation::BSDIFF, op.type());
-  EXPECT_FALSE(op.has_data_offset());
-  EXPECT_FALSE(op.has_data_length());
-  EXPECT_EQ(1, op.src_extents_size());
-  EXPECT_EQ(kBlockSize, op.src_length());
-  EXPECT_EQ(1, op.dst_extents_size());
-  EXPECT_EQ(kBlockSize, op.dst_length());
-  EXPECT_EQ(utils::BlocksInExtents(op.src_extents()),
-            utils::BlocksInExtents(op.dst_extents()));
-  EXPECT_EQ(1U, utils::BlocksInExtents(op.dst_extents()));
-}
-
 TEST_F(DeltaDiffUtilsTest, ReplaceSmallTest) {
   // The old file is on a different block than the new one.
   vector<Extent> old_extents = {ExtentForRange(1, 1)};
@@ -383,8 +215,7 @@
         new_extents,
         {},  // old_deflates
         {},  // new_deflates
-        PayloadVersion(kChromeOSMajorPayloadVersion,
-                       kInPlaceMinorPayloadVersion),
+        PayloadVersion(kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion),
         &data,
         &op));
     EXPECT_FALSE(data.empty());
@@ -426,7 +257,7 @@
       new_extents,
       {},  // old_deflates
       {},  // new_deflates
-      PayloadVersion(kChromeOSMajorPayloadVersion, kSourceMinorPayloadVersion),
+      PayloadVersion(kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion),
       &data,
       &op));
   EXPECT_TRUE(data.empty());
@@ -460,7 +291,7 @@
       new_extents,
       {},  // old_deflates
       {},  // new_deflates
-      PayloadVersion(kChromeOSMajorPayloadVersion, kSourceMinorPayloadVersion),
+      PayloadVersion(kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion),
       &data,
       &op));
 
@@ -500,49 +331,6 @@
   EXPECT_EQ(InstallOperation::REPLACE_BZ, op.type());
 }
 
-TEST_F(DeltaDiffUtilsTest, IsNoopOperationTest) {
-  InstallOperation op;
-  op.set_type(InstallOperation::REPLACE_BZ);
-  EXPECT_FALSE(diff_utils::IsNoopOperation(op));
-  op.set_type(InstallOperation::MOVE);
-  EXPECT_TRUE(diff_utils::IsNoopOperation(op));
-  *(op.add_src_extents()) = ExtentForRange(3, 2);
-  *(op.add_dst_extents()) = ExtentForRange(3, 2);
-  EXPECT_TRUE(diff_utils::IsNoopOperation(op));
-  *(op.add_src_extents()) = ExtentForRange(7, 5);
-  *(op.add_dst_extents()) = ExtentForRange(7, 5);
-  EXPECT_TRUE(diff_utils::IsNoopOperation(op));
-  *(op.add_src_extents()) = ExtentForRange(20, 2);
-  *(op.add_dst_extents()) = ExtentForRange(20, 1);
-  *(op.add_dst_extents()) = ExtentForRange(21, 1);
-  EXPECT_TRUE(diff_utils::IsNoopOperation(op));
-  *(op.add_src_extents()) = ExtentForRange(24, 1);
-  *(op.add_dst_extents()) = ExtentForRange(25, 1);
-  EXPECT_FALSE(diff_utils::IsNoopOperation(op));
-}
-
-TEST_F(DeltaDiffUtilsTest, FilterNoopOperations) {
-  AnnotatedOperation aop1;
-  aop1.op.set_type(InstallOperation::REPLACE_BZ);
-  *(aop1.op.add_dst_extents()) = ExtentForRange(3, 2);
-  aop1.name = "aop1";
-
-  AnnotatedOperation aop2 = aop1;
-  aop2.name = "aop2";
-
-  AnnotatedOperation noop;
-  noop.op.set_type(InstallOperation::MOVE);
-  *(noop.op.add_src_extents()) = ExtentForRange(3, 2);
-  *(noop.op.add_dst_extents()) = ExtentForRange(3, 2);
-  noop.name = "noop";
-
-  vector<AnnotatedOperation> ops = {noop, aop1, noop, noop, aop2, noop};
-  diff_utils::FilterNoopOperations(&ops);
-  EXPECT_EQ(2u, ops.size());
-  EXPECT_EQ("aop1", ops[0].name);
-  EXPECT_EQ("aop2", ops[1].name);
-}
-
 // Test the simple case where all the blocks are different and no new blocks are
 // zeroed.
 TEST_F(DeltaDiffUtilsTest, NoZeroedOrUniqueBlocksDetected) {
@@ -550,7 +338,7 @@
   InitializePartitionWithUniqueBlocks(new_part_, block_size_, 42);
 
   EXPECT_TRUE(RunDeltaMovedAndZeroBlocks(-1,  // chunk_blocks
-                                         kInPlaceMinorPayloadVersion));
+                                         kSourceMinorPayloadVersion));
 
   EXPECT_EQ(0U, old_visited_blocks_.blocks());
   EXPECT_EQ(0U, new_visited_blocks_.blocks());
@@ -558,29 +346,6 @@
   EXPECT_TRUE(aops_.empty());
 }
 
-// Test that when the partitions have identical blocks in the same positions no
-// MOVE operation is performed and all the blocks are handled.
-TEST_F(DeltaDiffUtilsTest, IdenticalPartitionsDontMove) {
-  InitializePartitionWithUniqueBlocks(old_part_, block_size_, 42);
-  InitializePartitionWithUniqueBlocks(new_part_, block_size_, 42);
-
-  // Mark some of the blocks as already visited.
-  vector<Extent> already_visited = {ExtentForRange(5, 10),
-                                    ExtentForRange(25, 10)};
-  old_visited_blocks_.AddExtents(already_visited);
-  new_visited_blocks_.AddExtents(already_visited);
-
-  // Most of the blocks rest in the same place, but there's no need for MOVE
-  // operations on those blocks.
-  EXPECT_TRUE(RunDeltaMovedAndZeroBlocks(-1,  // chunk_blocks
-                                         kInPlaceMinorPayloadVersion));
-
-  EXPECT_EQ(kDefaultBlockCount, old_visited_blocks_.blocks());
-  EXPECT_EQ(kDefaultBlockCount, new_visited_blocks_.blocks());
-  EXPECT_EQ(0, blob_size_);
-  EXPECT_TRUE(aops_.empty());
-}
-
 // Test that when the partitions have identical blocks in the same positions
 // MOVE operation is performed and all the blocks are handled.
 TEST_F(DeltaDiffUtilsTest, IdenticalBlocksAreCopiedFromSource) {
@@ -701,16 +466,14 @@
   EXPECT_TRUE(WriteExtents(old_part_.path, old_zeros, block_size_, zeros_data));
 
   EXPECT_TRUE(RunDeltaMovedAndZeroBlocks(5,  // chunk_blocks
-                                         kInPlaceMinorPayloadVersion));
+                                         kSourceMinorPayloadVersion));
 
-  // Zeroed blocks from old_visited_blocks_ were copied over, so me actually
-  // use them regardless of the trivial MOVE operation not being emitted.
+  // Zeroed blocks from |old_visited_blocks_| were copied over.
   EXPECT_EQ(old_zeros,
             old_visited_blocks_.GetExtentsForBlockCount(
                 old_visited_blocks_.blocks()));
 
-  // All the new zeroed blocks should be used, part with REPLACE_BZ and part
-  // trivial MOVE operations (not included).
+  // All the new zeroed blocks should be used with REPLACE_BZ.
   EXPECT_EQ(new_zeros,
             new_visited_blocks_.GetExtentsForBlockCount(
                 new_visited_blocks_.blocks()));
@@ -721,7 +484,8 @@
       // This range should be split.
       ExtentForRange(30, 5),
       ExtentForRange(35, 5),
-      ExtentForRange(40, 3),
+      ExtentForRange(40, 5),
+      ExtentForRange(45, 5),
   };
 
   EXPECT_EQ(expected_op_extents.size(), aops_.size());
@@ -821,6 +585,8 @@
       "update_engine");
   EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "bin/delta_generator").name,
             "delta_generator");
+  // Check file name with minimum size.
+  EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "a").name, "filename");
 }
 
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/ext2_filesystem_unittest.cc b/payload_generator/ext2_filesystem_unittest.cc
index 54600e9..88e1538 100644
--- a/payload_generator/ext2_filesystem_unittest.cc
+++ b/payload_generator/ext2_filesystem_unittest.cc
@@ -62,7 +62,7 @@
 class Ext2FilesystemTest : public ::testing::Test {};
 
 TEST_F(Ext2FilesystemTest, InvalidFilesystem) {
-  test_utils::ScopedTempFile fs_filename_{"Ext2FilesystemTest-XXXXXX"};
+  ScopedTempFile fs_filename_{"Ext2FilesystemTest-XXXXXX"};
   ASSERT_EQ(0, truncate(fs_filename_.path().c_str(), kDefaultFilesystemSize));
   unique_ptr<Ext2Filesystem> fs =
       Ext2Filesystem::CreateFromFile(fs_filename_.path());
diff --git a/payload_generator/extent_ranges.cc b/payload_generator/extent_ranges.cc
index 0e3f087..2098639 100644
--- a/payload_generator/extent_ranges.cc
+++ b/payload_generator/extent_ranges.cc
@@ -27,7 +27,6 @@
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/extent_utils.h"
 
-using std::set;
 using std::vector;
 
 namespace chromeos_update_engine {
@@ -203,6 +202,15 @@
   }
 }
 
+bool ExtentRanges::OverlapsWithExtent(const Extent& extent) const {
+  for (const auto& entry : extent_set_) {
+    if (ExtentsOverlap(entry, extent)) {
+      return true;
+    }
+  }
+  return false;
+}
+
 bool ExtentRanges::ContainsBlock(uint64_t block) const {
   auto lower = extent_set_.lower_bound(ExtentForRange(block, 1));
   // The block could be on the extent before the one in |lower|.
diff --git a/payload_generator/extent_ranges.h b/payload_generator/extent_ranges.h
index 62ffff4..68aa27f 100644
--- a/payload_generator/extent_ranges.h
+++ b/payload_generator/extent_ranges.h
@@ -63,6 +63,9 @@
   void AddRanges(const ExtentRanges& ranges);
   void SubtractRanges(const ExtentRanges& ranges);
 
+  // Returns true if the input extent overlaps with the current ExtentRanges.
+  bool OverlapsWithExtent(const Extent& extent) const;
+
   // Returns whether the block |block| is in this ExtentRange.
   bool ContainsBlock(uint64_t block) const;
 
diff --git a/payload_generator/extent_ranges_unittest.cc b/payload_generator/extent_ranges_unittest.cc
index 2bcffed..f55bb73 100644
--- a/payload_generator/extent_ranges_unittest.cc
+++ b/payload_generator/extent_ranges_unittest.cc
@@ -18,6 +18,7 @@
 
 #include <vector>
 
+#include <base/stl_util.h>
 #include <gtest/gtest.h>
 
 #include "update_engine/common/test_utils.h"
@@ -51,9 +52,9 @@
   }
 }
 
-#define EXPECT_RANGE_EQ(ranges, var)                      \
-  do {                                                    \
-    ExpectRangeEq(ranges, var, arraysize(var), __LINE__); \
+#define EXPECT_RANGE_EQ(ranges, var)                       \
+  do {                                                     \
+    ExpectRangeEq(ranges, var, base::size(var), __LINE__); \
   } while (0)
 
 void ExpectRangesOverlapOrTouch(uint64_t a_start,
diff --git a/payload_generator/extent_utils.cc b/payload_generator/extent_utils.cc
index c0c7643..2efef12 100644
--- a/payload_generator/extent_utils.cc
+++ b/payload_generator/extent_utils.cc
@@ -155,4 +155,10 @@
   return a.start_block() == b.start_block() && a.num_blocks() == b.num_blocks();
 }
 
+std::ostream& operator<<(std::ostream& out, const Extent& extent) {
+  out << "[" << extent.start_block() << " - "
+      << extent.start_block() + extent.num_blocks() - 1 << "]";
+  return out;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/extent_utils.h b/payload_generator/extent_utils.h
index 9763b1f..7aa614a 100644
--- a/payload_generator/extent_utils.h
+++ b/payload_generator/extent_utils.h
@@ -20,6 +20,8 @@
 #include <string>
 #include <vector>
 
+#include <base/logging.h>
+
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/update_metadata.pb.h"
 
@@ -83,6 +85,45 @@
 
 bool operator==(const Extent& a, const Extent& b);
 
+// TODO(zhangkelvin) This is ugly. Rewrite using C++20's coroutine once
+// that's available. Unfortunately with C++17 this is the best I could do.
+
+// An iterator that takes a sequence of extents, and iterate over blocks
+// inside this sequence of extents.
+// Example usage:
+
+// BlockIterator it1{src_extents};
+// while(!it1.is_end()) {
+//    auto block = *it1;
+//    Do stuff with |block|
+// }
+struct BlockIterator {
+  explicit BlockIterator(
+      const google::protobuf::RepeatedPtrField<Extent>& src_extents)
+      : src_extents_(src_extents) {}
+
+  BlockIterator& operator++() {
+    CHECK_LT(cur_extent_, src_extents_.size());
+    block_offset_++;
+    if (block_offset_ >= src_extents_[cur_extent_].num_blocks()) {
+      cur_extent_++;
+      block_offset_ = 0;
+    }
+    return *this;
+  }
+
+  [[nodiscard]] bool is_end() { return cur_extent_ >= src_extents_.size(); }
+  [[nodiscard]] uint64_t operator*() {
+    return src_extents_[cur_extent_].start_block() + block_offset_;
+  }
+
+  const google::protobuf::RepeatedPtrField<Extent>& src_extents_;
+  int cur_extent_ = 0;
+  size_t block_offset_ = 0;
+};
+
+std::ostream& operator<<(std::ostream& out, const Extent& extent);
+
 }  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_PAYLOAD_GENERATOR_EXTENT_UTILS_H_
diff --git a/payload_generator/filesystem_interface.h b/payload_generator/filesystem_interface.h
index d04295c..05d387f 100644
--- a/payload_generator/filesystem_interface.h
+++ b/payload_generator/filesystem_interface.h
@@ -62,6 +62,13 @@
     // indicating the starting block, and the number of consecutive blocks.
     std::vector<Extent> extents;
 
+    // If true, the file is already compressed on the disk, so we don't need to
+    // parse it again for deflates. For example, image .gz files inside a
+    // compressed SquashFS image. They might have already been compressed by the
+    // mksquashfs, so we can't really parse the file and look for deflate
+    // compressed parts anymore.
+    bool is_compressed = false;
+
     // All the deflate locations in the file. These locations are not relative
     // to the extents. They are relative to the file system itself.
     std::vector<puffin::BitExtent> deflates;
diff --git a/payload_generator/full_update_generator.cc b/payload_generator/full_update_generator.cc
index 94a43ab..4a5f63a 100644
--- a/payload_generator/full_update_generator.cc
+++ b/payload_generator/full_update_generator.cc
@@ -153,7 +153,7 @@
   aops->resize(num_chunks);
   vector<ChunkProcessor> chunk_processors;
   chunk_processors.reserve(num_chunks);
-  blob_file->SetTotalBlobs(num_chunks);
+  blob_file->IncTotalBlobs(num_chunks);
 
   for (size_t i = 0; i < num_chunks; ++i) {
     size_t start_block = i * chunk_blocks;
@@ -187,9 +187,6 @@
     thread_pool.AddWork(&processor);
   thread_pool.JoinAll();
 
-  // All the work done, disable logging.
-  blob_file->SetTotalBlobs(0);
-
   // All the operations must have a type set at this point. Otherwise, a
   // ChunkProcessor failed to complete.
   for (const AnnotatedOperation& aop : *aops) {
diff --git a/payload_generator/full_update_generator_unittest.cc b/payload_generator/full_update_generator_unittest.cc
index e398125..d3b3491 100644
--- a/payload_generator/full_update_generator_unittest.cc
+++ b/payload_generator/full_update_generator_unittest.cc
@@ -41,11 +41,9 @@
     config_.block_size = 4096;
 
     new_part_conf.path = part_file_.path();
-    EXPECT_TRUE(utils::MakeTempFile(
-        "FullUpdateTest_blobs.XXXXXX", &out_blobs_path_, &out_blobs_fd_));
 
-    blob_file_.reset(new BlobFileWriter(out_blobs_fd_, &out_blobs_length_));
-    out_blobs_unlinker_.reset(new ScopedPathUnlinker(out_blobs_path_));
+    blob_file_writer_.reset(
+        new BlobFileWriter(blob_file_.fd(), &out_blobs_length_));
   }
 
   PayloadGenerationConfig config_;
@@ -54,14 +52,11 @@
   vector<AnnotatedOperation> aops;
 
   // Output file holding the payload blobs.
-  string out_blobs_path_;
-  int out_blobs_fd_{-1};
   off_t out_blobs_length_{0};
-  ScopedFdCloser out_blobs_fd_closer_{&out_blobs_fd_};
-  test_utils::ScopedTempFile part_file_{"FullUpdateTest_partition.XXXXXX"};
+  ScopedTempFile part_file_{"FullUpdateTest_partition.XXXXXX"};
 
-  std::unique_ptr<BlobFileWriter> blob_file_;
-  std::unique_ptr<ScopedPathUnlinker> out_blobs_unlinker_;
+  ScopedTempFile blob_file_{"FullUpdateTest_blobs.XXXXXX", true};
+  std::unique_ptr<BlobFileWriter> blob_file_writer_;
 
   // FullUpdateGenerator under test.
   FullUpdateGenerator generator_;
@@ -77,7 +72,7 @@
   EXPECT_TRUE(generator_.GenerateOperations(config_,
                                             new_part_conf,  // this is ignored
                                             new_part_conf,
-                                            blob_file_.get(),
+                                            blob_file_writer_.get(),
                                             &aops));
   int64_t new_part_chunks = new_part_conf.size / config_.hard_chunk_size;
   EXPECT_EQ(new_part_chunks, static_cast<int64_t>(aops.size()));
@@ -90,7 +85,7 @@
     EXPECT_EQ(config_.hard_chunk_size / config_.block_size,
               aops[i].op.dst_extents(0).num_blocks());
     if (aops[i].op.type() != InstallOperation::REPLACE) {
-      EXPECT_EQ(InstallOperation::REPLACE_BZ, aops[i].op.type());
+      EXPECT_EQ(InstallOperation::REPLACE_XZ, aops[i].op.type());
     }
   }
 }
@@ -108,7 +103,7 @@
   EXPECT_TRUE(generator_.GenerateOperations(config_,
                                             new_part_conf,  // this is ignored
                                             new_part_conf,
-                                            blob_file_.get(),
+                                            blob_file_writer_.get(),
                                             &aops));
   // new_part has one chunk and a half.
   EXPECT_EQ(2U, aops.size());
@@ -129,7 +124,7 @@
   EXPECT_TRUE(generator_.GenerateOperations(config_,
                                             new_part_conf,  // this is ignored
                                             new_part_conf,
-                                            blob_file_.get(),
+                                            blob_file_writer_.get(),
                                             &aops));
 
   // new_part has less than one chunk.
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index f035ff1..b04fec0 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -14,30 +14,34 @@
 // limitations under the License.
 //
 
+#include <map>
 #include <string>
 #include <vector>
 
+#include <base/bind.h>
 #include <base/files/file_path.h>
 #include <base/files/file_util.h>
 #include <base/logging.h>
 #include <base/strings/string_number_conversions.h>
 #include <base/strings/string_split.h>
+#include <base/strings/string_util.h>
 #include <brillo/flag_helper.h>
 #include <brillo/key_value_store.h>
 #include <brillo/message_loops/base_message_loop.h>
 #include <xz.h>
 
+#include "update_engine/common/download_action.h"
 #include "update_engine/common/fake_boot_control.h"
 #include "update_engine/common/fake_hardware.h"
 #include "update_engine/common/file_fetcher.h"
 #include "update_engine/common/prefs.h"
 #include "update_engine/common/terminator.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/download_action.h"
 #include "update_engine/payload_consumer/filesystem_verifier_action.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/payload_generation_config.h"
+#include "update_engine/payload_generator/payload_properties.h"
 #include "update_engine/payload_generator/payload_signer.h"
 #include "update_engine/payload_generator/xz.h"
 #include "update_engine/update_metadata.pb.h"
@@ -46,6 +50,7 @@
 // and an output file as arguments and the path to an output file and
 // generates a delta that can be sent to Chrome OS clients.
 
+using std::map;
 using std::string;
 using std::vector;
 
@@ -53,6 +58,9 @@
 
 namespace {
 
+constexpr char kPayloadPropertiesFormatKeyValue[] = "key-value";
+constexpr char kPayloadPropertiesFormatJson[] = "json";
+
 void ParseSignatureSizes(const string& signature_sizes_flag,
                          vector<size_t>* signature_sizes) {
   signature_sizes->clear();
@@ -67,38 +75,6 @@
   }
 }
 
-bool ParseImageInfo(const string& channel,
-                    const string& board,
-                    const string& version,
-                    const string& key,
-                    const string& build_channel,
-                    const string& build_version,
-                    ImageInfo* image_info) {
-  // All of these arguments should be present or missing.
-  bool empty = channel.empty();
-
-  CHECK_EQ(channel.empty(), empty);
-  CHECK_EQ(board.empty(), empty);
-  CHECK_EQ(version.empty(), empty);
-  CHECK_EQ(key.empty(), empty);
-
-  if (empty)
-    return false;
-
-  image_info->set_channel(channel);
-  image_info->set_board(board);
-  image_info->set_version(version);
-  image_info->set_key(key);
-
-  image_info->set_build_channel(build_channel.empty() ? channel
-                                                      : build_channel);
-
-  image_info->set_build_version(build_version.empty() ? version
-                                                      : build_version);
-
-  return true;
-}
-
 void CalculateHashForSigning(const vector<size_t>& sizes,
                              const string& out_hash_file,
                              const string& out_metadata_hash_file,
@@ -207,8 +183,11 @@
   install_plan.source_slot =
       config.is_delta ? 0 : BootControlInterface::kInvalidSlot;
   install_plan.target_slot = 1;
-  payload.type =
-      config.is_delta ? InstallPayloadType::kDelta : InstallPayloadType::kFull;
+  // For partial updates, we always write kDelta to the payload. Make it
+  // consistent for host simulation.
+  payload.type = config.is_delta || config.is_partial_update
+                     ? InstallPayloadType::kDelta
+                     : InstallPayloadType::kFull;
   payload.size = utils::FileSize(payload_file);
   // TODO(senj): This hash is only correct for unsigned payload, need to support
   // signed payload using PayloadSigner.
@@ -245,11 +224,10 @@
       std::make_unique<DownloadAction>(&prefs,
                                        &fake_boot_control,
                                        &fake_hardware,
-                                       nullptr,
                                        new FileFetcher(),
                                        true /* interactive */);
-  auto filesystem_verifier_action =
-      std::make_unique<FilesystemVerifierAction>();
+  auto filesystem_verifier_action = std::make_unique<FilesystemVerifierAction>(
+      fake_boot_control.GetDynamicPartitionControl());
 
   BondActions(install_plan_action.get(), download_action.get());
   BondActions(download_action.get(), filesystem_verifier_action.get());
@@ -259,7 +237,9 @@
   processor.EnqueueAction(std::move(install_plan_action));
   processor.EnqueueAction(std::move(download_action));
   processor.EnqueueAction(std::move(filesystem_verifier_action));
-  processor.StartProcessing();
+  loop.PostTask(FROM_HERE,
+                base::Bind(&ActionProcessor::StartProcessing,
+                           base::Unretained(&processor)));
   loop.Run();
   CHECK_EQ(delegate.code_, ErrorCode::kSuccess);
   LOG(INFO) << "Completed applying " << (config.is_delta ? "delta" : "full")
@@ -267,19 +247,62 @@
   return true;
 }
 
-int ExtractProperties(const string& payload_path, const string& props_file) {
-  brillo::KeyValueStore properties;
-  TEST_AND_RETURN_FALSE(
-      PayloadSigner::ExtractPayloadProperties(payload_path, &properties));
-  if (props_file == "-") {
-    printf("%s", properties.SaveToString().c_str());
+bool ExtractProperties(const string& payload_path,
+                       const string& props_file,
+                       const string& props_format) {
+  string properties;
+  PayloadProperties payload_props(payload_path);
+  if (props_format == kPayloadPropertiesFormatKeyValue) {
+    TEST_AND_RETURN_FALSE(payload_props.GetPropertiesAsKeyValue(&properties));
+  } else if (props_format == kPayloadPropertiesFormatJson) {
+    TEST_AND_RETURN_FALSE(payload_props.GetPropertiesAsJson(&properties));
   } else {
-    properties.Save(base::FilePath(props_file));
+    LOG(FATAL) << "Invalid option " << props_format
+               << " for --properties_format flag.";
+  }
+  if (props_file == "-") {
+    printf("%s", properties.c_str());
+  } else {
+    utils::WriteFile(
+        props_file.c_str(), properties.c_str(), properties.length());
     LOG(INFO) << "Generated properties file at " << props_file;
   }
   return true;
 }
 
+template <typename Key, typename Val>
+string ToString(const map<Key, Val>& map) {
+  vector<string> result;
+  result.reserve(map.size());
+  for (const auto& it : map) {
+    result.emplace_back(it.first + ": " + it.second);
+  }
+  return "{" + base::JoinString(result, ",") + "}";
+}
+
+bool ParsePerPartitionTimestamps(const string& partition_timestamps,
+                                 PayloadGenerationConfig* config) {
+  base::StringPairs pairs;
+  CHECK(base::SplitStringIntoKeyValuePairs(
+      partition_timestamps, ':', ',', &pairs))
+      << "--partition_timestamps accepts commad "
+         "separated pairs. e.x. system:1234,vendor:5678";
+  map<string, string> partition_timestamps_map{
+      std::move_iterator(pairs.begin()), std::move_iterator(pairs.end())};
+  for (auto&& partition : config->target.partitions) {
+    auto&& it = partition_timestamps_map.find(partition.name);
+    if (it != partition_timestamps_map.end()) {
+      partition.version = std::move(it->second);
+      partition_timestamps_map.erase(it);
+    }
+  }
+  if (!partition_timestamps_map.empty()) {
+    LOG(ERROR) << "Unused timestamps: " << ToString(partition_timestamps_map);
+    return false;
+  }
+  return true;
+}
+
 int Main(int argc, char** argv) {
   DEFINE_string(old_image, "", "Path to the old rootfs");
   DEFINE_string(new_image, "", "Path to the new rootfs");
@@ -361,57 +384,21 @@
   DEFINE_string(properties_file,
                 "",
                 "If passed, dumps the payload properties of the payload passed "
-                "in --in_file and exits.");
+                "in --in_file and exits. Look at --properties_format.");
+  DEFINE_string(properties_format,
+                kPayloadPropertiesFormatKeyValue,
+                "Defines the format of the --properties_file. The acceptable "
+                "values are: key-value (default) and json");
   DEFINE_int64(max_timestamp,
                0,
                "The maximum timestamp of the OS allowed to apply this "
                "payload.");
-
-  DEFINE_string(old_channel,
-                "",
-                "The channel for the old image. 'dev-channel', 'npo-channel', "
-                "etc. Ignored, except during delta generation.");
-  DEFINE_string(old_board,
-                "",
-                "The board for the old image. 'x86-mario', 'lumpy', "
-                "etc. Ignored, except during delta generation.");
   DEFINE_string(
-      old_version, "", "The build version of the old image. 1.2.3, etc.");
-  DEFINE_string(old_key,
-                "",
-                "The key used to sign the old image. 'premp', 'mp', 'mp-v3',"
-                " etc");
-  DEFINE_string(old_build_channel,
-                "",
-                "The channel for the build of the old image. 'dev-channel', "
-                "etc, but will never contain special channels such as "
-                "'npo-channel'. Ignored, except during delta generation.");
-  DEFINE_string(old_build_version,
-                "",
-                "The version of the build containing the old image.");
+      partition_timestamps,
+      "",
+      "The per-partition maximum timestamps which the OS allowed to apply this "
+      "payload. Passed in comma separated pairs, e.x. system:1234,vendor:5678");
 
-  DEFINE_string(new_channel,
-                "",
-                "The channel for the new image. 'dev-channel', 'npo-channel', "
-                "etc. Ignored, except during delta generation.");
-  DEFINE_string(new_board,
-                "",
-                "The board for the new image. 'x86-mario', 'lumpy', "
-                "etc. Ignored, except during delta generation.");
-  DEFINE_string(
-      new_version, "", "The build version of the new image. 1.2.3, etc.");
-  DEFINE_string(new_key,
-                "",
-                "The key used to sign the new image. 'premp', 'mp', 'mp-v3',"
-                " etc");
-  DEFINE_string(new_build_channel,
-                "",
-                "The channel for the build of the new image. 'dev-channel', "
-                "etc, but will never contain special channels such as "
-                "'npo-channel'. Ignored, except during delta generation.");
-  DEFINE_string(new_build_version,
-                "",
-                "The version of the build containing the new image.");
   DEFINE_string(new_postinstall_config_file,
                 "",
                 "A config file specifying postinstall related metadata. "
@@ -423,10 +410,23 @@
   DEFINE_bool(disable_fec_computation,
               false,
               "Disables the fec data computation on device.");
+  DEFINE_bool(disable_verity_computation,
+              false,
+              "Disables the verity data computation on device.");
   DEFINE_string(
       out_maximum_signature_size_file,
       "",
       "Path to the output maximum signature size given a private key.");
+  DEFINE_bool(is_partial_update,
+              false,
+              "The payload only targets a subset of partitions on the device,"
+              "e.g. generic kernel image update.");
+  DEFINE_bool(
+      disable_vabc,
+      false,
+      "Whether to disable Virtual AB Compression when installing the OTA");
+  DEFINE_string(
+      apex_info_file, "", "Path to META/apex_info.pb found in target build");
 
   brillo::FlagHelper::Init(
       argc,
@@ -438,7 +438,11 @@
   Terminator::Init();
 
   logging::LoggingSettings log_settings;
+#if BASE_VER < 780000
   log_settings.log_file = "delta_generator.log";
+#else
+  log_settings.log_file_path = "delta_generator.log";
+#endif
   log_settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
   log_settings.lock_log = logging::LOCK_LOG_FILE;
   log_settings.delete_old = logging::APPEND_TO_OLD_LOG_FILE;
@@ -500,7 +504,10 @@
     return VerifySignedPayload(FLAGS_in_file, FLAGS_public_key);
   }
   if (!FLAGS_properties_file.empty()) {
-    return ExtractProperties(FLAGS_in_file, FLAGS_properties_file) ? 0 : 1;
+    return ExtractProperties(
+               FLAGS_in_file, FLAGS_properties_file, FLAGS_properties_format)
+               ? 0
+               : 1;
   }
 
   // A payload generation was requested. Convert the flags to a
@@ -521,16 +528,19 @@
   partition_names = base::SplitString(
       FLAGS_partition_names, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
   CHECK(!partition_names.empty());
-  if (FLAGS_major_version == kChromeOSMajorPayloadVersion ||
-      FLAGS_new_partitions.empty()) {
-    LOG_IF(FATAL, partition_names.size() != 2)
-        << "To support more than 2 partitions, please use the "
-        << "--new_partitions flag and major version 2.";
-    LOG_IF(FATAL,
-           partition_names[0] != kPartitionNameRoot ||
-               partition_names[1] != kPartitionNameKernel)
-        << "To support non-default partition name, please use the "
-        << "--new_partitions flag and major version 2.";
+  if (FLAGS_major_version < kMinSupportedMajorPayloadVersion ||
+      FLAGS_major_version > kMaxSupportedMajorPayloadVersion) {
+    LOG(FATAL) << "Unsupported major version " << FLAGS_major_version;
+    return 1;
+  }
+
+  if (!FLAGS_apex_info_file.empty()) {
+    // apex_info_file should point to a regular file(or symlink to a regular
+    // file)
+    CHECK(utils::FileExists(FLAGS_apex_info_file.c_str()));
+    CHECK(utils::IsRegFile(FLAGS_apex_info_file.c_str()) ||
+          utils::IsSymlink(FLAGS_apex_info_file.c_str()));
+    payload_config.apex_info_file = FLAGS_apex_info_file;
   }
 
   if (!FLAGS_new_partitions.empty()) {
@@ -586,13 +596,15 @@
     }
   }
 
+  if (FLAGS_is_partial_update) {
+    payload_config.is_partial_update = true;
+  }
+
   if (!FLAGS_in_file.empty()) {
     return ApplyPayload(FLAGS_in_file, payload_config) ? 0 : 1;
   }
 
   if (!FLAGS_new_postinstall_config_file.empty()) {
-    LOG_IF(FATAL, FLAGS_major_version == kChromeOSMajorPayloadVersion)
-        << "Postinstall config is only allowed in major version 2 or newer.";
     brillo::KeyValueStore store;
     CHECK(store.Load(base::FilePath(FLAGS_new_postinstall_config_file)));
     CHECK(payload_config.target.LoadPostInstallConfig(store));
@@ -610,35 +622,20 @@
   CHECK(payload_config.target.LoadImageSize());
 
   if (!FLAGS_dynamic_partition_info_file.empty()) {
-    LOG_IF(FATAL, FLAGS_major_version == kChromeOSMajorPayloadVersion)
-        << "Dynamic partition info is only allowed in major version 2 or "
-           "newer.";
     brillo::KeyValueStore store;
     CHECK(store.Load(base::FilePath(FLAGS_dynamic_partition_info_file)));
     CHECK(payload_config.target.LoadDynamicPartitionMetadata(store));
     CHECK(payload_config.target.ValidateDynamicPartitionMetadata());
+    if (FLAGS_disable_vabc) {
+      LOG(INFO) << "Disabling VABC";
+      payload_config.target.dynamic_partition_metadata->set_vabc_enabled(false);
+      payload_config.target.dynamic_partition_metadata
+          ->set_vabc_compression_param("");
+    }
   }
 
   CHECK(!FLAGS_out_file.empty());
 
-  // Ignore failures. These are optional arguments.
-  ParseImageInfo(FLAGS_new_channel,
-                 FLAGS_new_board,
-                 FLAGS_new_version,
-                 FLAGS_new_key,
-                 FLAGS_new_build_channel,
-                 FLAGS_new_build_version,
-                 &payload_config.target.image_info);
-
-  // Ignore failures. These are optional arguments.
-  ParseImageInfo(FLAGS_old_channel,
-                 FLAGS_old_board,
-                 FLAGS_old_version,
-                 FLAGS_old_key,
-                 FLAGS_old_build_channel,
-                 FLAGS_old_build_version,
-                 &payload_config.source.image_info);
-
   payload_config.rootfs_partition_size = FLAGS_rootfs_partition_size;
 
   if (payload_config.is_delta) {
@@ -656,28 +653,49 @@
     // Autodetect minor_version by looking at the update_engine.conf in the old
     // image.
     if (payload_config.is_delta) {
-      payload_config.version.minor = kInPlaceMinorPayloadVersion;
       brillo::KeyValueStore store;
       uint32_t minor_version;
+      bool minor_version_found = false;
       for (const PartitionConfig& part : payload_config.source.partitions) {
         if (part.fs_interface && part.fs_interface->LoadSettings(&store) &&
             utils::GetMinorVersion(store, &minor_version)) {
           payload_config.version.minor = minor_version;
+          minor_version_found = true;
+          LOG(INFO) << "Auto-detected minor_version="
+                    << payload_config.version.minor;
           break;
         }
       }
+      if (!minor_version_found) {
+        LOG(FATAL) << "Failed to detect the minor version.";
+        return 1;
+      }
     } else {
       payload_config.version.minor = kFullPayloadMinorVersion;
+      LOG(INFO) << "Using non-delta minor_version="
+                << payload_config.version.minor;
     }
-    LOG(INFO) << "Auto-detected minor_version=" << payload_config.version.minor;
   } else {
     payload_config.version.minor = FLAGS_minor_version;
     LOG(INFO) << "Using provided minor_version=" << FLAGS_minor_version;
   }
 
-  payload_config.max_timestamp = FLAGS_max_timestamp;
+  if (payload_config.version.minor != kFullPayloadMinorVersion &&
+      (payload_config.version.minor < kMinSupportedMinorPayloadVersion ||
+       payload_config.version.minor > kMaxSupportedMinorPayloadVersion)) {
+    LOG(FATAL) << "Unsupported minor version " << payload_config.version.minor;
+    return 1;
+  }
 
-  if (payload_config.version.minor >= kVerityMinorPayloadVersion)
+  payload_config.max_timestamp = FLAGS_max_timestamp;
+  if (!FLAGS_partition_timestamps.empty()) {
+    CHECK(ParsePerPartitionTimestamps(FLAGS_partition_timestamps,
+                                      &payload_config));
+  }
+
+  if (payload_config.is_delta &&
+      payload_config.version.minor >= kVerityMinorPayloadVersion &&
+      !FLAGS_disable_verity_computation)
     CHECK(payload_config.target.LoadVerityConfig());
 
   LOG(INFO) << "Generating " << (payload_config.is_delta ? "delta" : "full")
diff --git a/payload_generator/graph_types.h b/payload_generator/graph_types.h
deleted file mode 100644
index f96b0f3..0000000
--- a/payload_generator/graph_types.h
+++ /dev/null
@@ -1,87 +0,0 @@
-//
-// Copyright (C) 2009 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_TYPES_H_
-#define UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_TYPES_H_
-
-#include <map>
-#include <set>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include <base/macros.h>
-
-#include "update_engine/payload_generator/annotated_operation.h"
-#include "update_engine/payload_generator/extent_utils.h"
-#include "update_engine/update_metadata.pb.h"
-
-// A few classes that help in generating delta images use these types
-// for the graph work.
-
-namespace chromeos_update_engine {
-
-struct EdgeProperties {
-  // Read-before extents. I.e., blocks in |extents| must be read by the
-  // node pointed to before the pointing node runs (presumably b/c it
-  // overwrites these blocks).
-  std::vector<Extent> extents;
-
-  // Write before extents. I.e., blocks in |write_extents| must be written
-  // by the node pointed to before the pointing node runs (presumably
-  // b/c it reads the data written by the other node).
-  std::vector<Extent> write_extents;
-
-  bool operator==(const EdgeProperties& that) const {
-    return extents == that.extents && write_extents == that.write_extents;
-  }
-};
-
-struct Vertex {
-  Vertex() : valid(true), index(-1), lowlink(-1) {}
-  bool valid;
-
-  typedef std::map<std::vector<Vertex>::size_type, EdgeProperties> EdgeMap;
-  EdgeMap out_edges;
-
-  // We sometimes wish to consider a subgraph of a graph. A subgraph would have
-  // a subset of the vertices from the graph and a subset of the edges.
-  // When considering this vertex within a subgraph, subgraph_edges stores
-  // the out-edges.
-  typedef std::set<std::vector<Vertex>::size_type> SubgraphEdgeMap;
-  SubgraphEdgeMap subgraph_edges;
-
-  // For Tarjan's algorithm:
-  std::vector<Vertex>::size_type index;
-  std::vector<Vertex>::size_type lowlink;
-
-  // Other Vertex properties:
-  AnnotatedOperation aop;
-
-  typedef std::vector<Vertex>::size_type Index;
-  static const Vertex::Index kInvalidIndex;
-};
-
-typedef std::vector<Vertex> Graph;
-
-typedef std::pair<Vertex::Index, Vertex::Index> Edge;
-
-const uint64_t kTempBlockStart = 1ULL << 60;
-static_assert(kTempBlockStart != 0, "kTempBlockStart invalid");
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_TYPES_H_
diff --git a/payload_generator/graph_utils.cc b/payload_generator/graph_utils.cc
deleted file mode 100644
index 7f5cf8f..0000000
--- a/payload_generator/graph_utils.cc
+++ /dev/null
@@ -1,142 +0,0 @@
-//
-// Copyright (C) 2009 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/graph_utils.h"
-
-#include <string>
-#include <utility>
-#include <vector>
-
-#include <base/logging.h>
-#include <base/macros.h>
-
-#include "update_engine/payload_consumer/payload_constants.h"
-#include "update_engine/payload_generator/annotated_operation.h"
-#include "update_engine/payload_generator/extent_utils.h"
-
-using std::make_pair;
-using std::pair;
-using std::string;
-using std::vector;
-
-namespace chromeos_update_engine {
-namespace graph_utils {
-
-uint64_t EdgeWeight(const Graph& graph, const Edge& edge) {
-  uint64_t weight = 0;
-  const vector<Extent>& extents =
-      graph[edge.first].out_edges.find(edge.second)->second.extents;
-  for (vector<Extent>::const_iterator it = extents.begin(); it != extents.end();
-       ++it) {
-    if (it->start_block() != kSparseHole)
-      weight += it->num_blocks();
-  }
-  return weight;
-}
-
-void AddReadBeforeDep(Vertex* src, Vertex::Index dst, uint64_t block) {
-  Vertex::EdgeMap::iterator edge_it = src->out_edges.find(dst);
-  if (edge_it == src->out_edges.end()) {
-    // Must create new edge
-    pair<Vertex::EdgeMap::iterator, bool> result =
-        src->out_edges.insert(make_pair(dst, EdgeProperties()));
-    CHECK(result.second);
-    edge_it = result.first;
-  }
-  AppendBlockToExtents(&edge_it->second.extents, block);
-}
-
-void AddReadBeforeDepExtents(Vertex* src,
-                             Vertex::Index dst,
-                             const vector<Extent>& extents) {
-  // TODO(adlr): Be more efficient than adding each block individually.
-  for (vector<Extent>::const_iterator it = extents.begin(), e = extents.end();
-       it != e;
-       ++it) {
-    const Extent& extent = *it;
-    for (uint64_t block = extent.start_block(),
-                  block_end = extent.start_block() + extent.num_blocks();
-         block != block_end;
-         ++block) {
-      AddReadBeforeDep(src, dst, block);
-    }
-  }
-}
-
-void DropWriteBeforeDeps(Vertex::EdgeMap* edge_map) {
-  // Specially crafted for-loop for the map-iterate-delete dance.
-  for (Vertex::EdgeMap::iterator it = edge_map->begin();
-       it != edge_map->end();) {
-    if (!it->second.write_extents.empty())
-      it->second.write_extents.clear();
-    if (it->second.extents.empty()) {
-      // Erase *it, as it contains no blocks
-      edge_map->erase(it++);
-    } else {
-      ++it;
-    }
-  }
-}
-
-// For each node N in graph, drop all edges N->|index|.
-void DropIncomingEdgesTo(Graph* graph, Vertex::Index index) {
-  // This would be much more efficient if we had doubly-linked
-  // edges in the graph.
-  for (Graph::iterator it = graph->begin(), e = graph->end(); it != e; ++it) {
-    it->out_edges.erase(index);
-  }
-}
-
-namespace {
-template <typename T>
-void DumpExtents(const T& field, int prepend_space_count) {
-  string header(prepend_space_count, ' ');
-  for (const auto& extent : field) {
-    LOG(INFO) << header << "(" << extent.start_block() << ", "
-              << extent.num_blocks() << ")";
-  }
-}
-
-void DumpOutEdges(const Vertex::EdgeMap& out_edges) {
-  for (Vertex::EdgeMap::const_iterator it = out_edges.begin(),
-                                       e = out_edges.end();
-       it != e;
-       ++it) {
-    LOG(INFO) << "    " << it->first << " read-before:";
-    DumpExtents(it->second.extents, 6);
-    LOG(INFO) << "      write-before:";
-    DumpExtents(it->second.write_extents, 6);
-  }
-}
-}  // namespace
-
-void DumpGraph(const Graph& graph) {
-  LOG(INFO) << "Graph length: " << graph.size();
-  for (Graph::size_type i = 0, e = graph.size(); i != e; ++i) {
-    LOG(INFO) << i << (graph[i].valid ? "" : "-INV") << ": "
-              << graph[i].aop.name << ": "
-              << InstallOperationTypeName(graph[i].aop.op.type());
-    LOG(INFO) << "  src_extents:";
-    DumpExtents(graph[i].aop.op.src_extents(), 4);
-    LOG(INFO) << "  dst_extents:";
-    DumpExtents(graph[i].aop.op.dst_extents(), 4);
-    LOG(INFO) << "  out edges:";
-    DumpOutEdges(graph[i].out_edges);
-  }
-}
-
-}  // namespace graph_utils
-}  // namespace chromeos_update_engine
diff --git a/payload_generator/graph_utils.h b/payload_generator/graph_utils.h
deleted file mode 100644
index 7024215..0000000
--- a/payload_generator/graph_utils.h
+++ /dev/null
@@ -1,54 +0,0 @@
-//
-// Copyright (C) 2009 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_UTILS_H_
-#define UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_UTILS_H_
-
-#include <vector>
-
-#include <base/macros.h>
-
-#include "update_engine/payload_generator/graph_types.h"
-#include "update_engine/update_metadata.pb.h"
-
-// A few utility functions for graphs
-
-namespace chromeos_update_engine {
-
-namespace graph_utils {
-
-// Returns the number of blocks represented by all extents in the edge.
-uint64_t EdgeWeight(const Graph& graph, const Edge& edge);
-
-// These add a read-before dependency from graph[src] -> graph[dst]. If the dep
-// already exists, the block/s is/are added to the existing edge.
-void AddReadBeforeDep(Vertex* src, Vertex::Index dst, uint64_t block);
-void AddReadBeforeDepExtents(Vertex* src,
-                             Vertex::Index dst,
-                             const std::vector<Extent>& extents);
-
-void DropWriteBeforeDeps(Vertex::EdgeMap* edge_map);
-
-// For each node N in graph, drop all edges N->|index|.
-void DropIncomingEdgesTo(Graph* graph, Vertex::Index index);
-
-void DumpGraph(const Graph& graph);
-
-}  // namespace graph_utils
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_UTILS_H_
diff --git a/payload_generator/graph_utils_unittest.cc b/payload_generator/graph_utils_unittest.cc
deleted file mode 100644
index 07e7664..0000000
--- a/payload_generator/graph_utils_unittest.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-//
-// Copyright (C) 2009 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/graph_utils.h"
-
-#include <utility>
-#include <vector>
-
-#include <gtest/gtest.h>
-
-#include "update_engine/payload_consumer/payload_constants.h"
-#include "update_engine/payload_generator/extent_ranges.h"
-#include "update_engine/payload_generator/extent_utils.h"
-
-using std::make_pair;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-class GraphUtilsTest : public ::testing::Test {};
-
-TEST(GraphUtilsTest, SimpleTest) {
-  Graph graph(2);
-
-  graph[0].out_edges.insert(make_pair(1, EdgeProperties()));
-
-  vector<Extent>& extents = graph[0].out_edges[1].extents;
-
-  EXPECT_EQ(0U, extents.size());
-  AppendBlockToExtents(&extents, 0);
-  EXPECT_EQ(1U, extents.size());
-  AppendBlockToExtents(&extents, 1);
-  AppendBlockToExtents(&extents, 2);
-  EXPECT_EQ(1U, extents.size());
-  AppendBlockToExtents(&extents, 4);
-
-  EXPECT_EQ(2U, extents.size());
-  EXPECT_EQ(0U, extents[0].start_block());
-  EXPECT_EQ(3U, extents[0].num_blocks());
-  EXPECT_EQ(4U, extents[1].start_block());
-  EXPECT_EQ(1U, extents[1].num_blocks());
-
-  EXPECT_EQ(4U, graph_utils::EdgeWeight(graph, make_pair(0, 1)));
-}
-
-TEST(GraphUtilsTest, DepsTest) {
-  Graph graph(3);
-
-  graph_utils::AddReadBeforeDep(&graph[0], 1, 3);
-  EXPECT_EQ(1U, graph[0].out_edges.size());
-  {
-    Extent& extent = graph[0].out_edges[1].extents[0];
-    EXPECT_EQ(3U, extent.start_block());
-    EXPECT_EQ(1U, extent.num_blocks());
-  }
-  graph_utils::AddReadBeforeDep(&graph[0], 1, 4);
-  EXPECT_EQ(1U, graph[0].out_edges.size());
-  {
-    Extent& extent = graph[0].out_edges[1].extents[0];
-    EXPECT_EQ(3U, extent.start_block());
-    EXPECT_EQ(2U, extent.num_blocks());
-  }
-  graph_utils::AddReadBeforeDepExtents(
-      &graph[2], 1, vector<Extent>(1, ExtentForRange(5, 2)));
-  EXPECT_EQ(1U, graph[2].out_edges.size());
-  {
-    Extent& extent = graph[2].out_edges[1].extents[0];
-    EXPECT_EQ(5U, extent.start_block());
-    EXPECT_EQ(2U, extent.num_blocks());
-  }
-  // Change most recent edge from read-before to write-before
-  graph[2].out_edges[1].write_extents.swap(graph[2].out_edges[1].extents);
-  graph_utils::DropWriteBeforeDeps(&graph[2].out_edges);
-  EXPECT_EQ(0U, graph[2].out_edges.size());
-
-  EXPECT_EQ(1U, graph[0].out_edges.size());
-  graph_utils::DropIncomingEdgesTo(&graph, 1);
-  EXPECT_EQ(0U, graph[0].out_edges.size());
-}
-
-}  // namespace chromeos_update_engine
diff --git a/payload_generator/inplace_generator.cc b/payload_generator/inplace_generator.cc
deleted file mode 100644
index d553cc4..0000000
--- a/payload_generator/inplace_generator.cc
+++ /dev/null
@@ -1,798 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/inplace_generator.h"
-
-#include <algorithm>
-#include <map>
-#include <set>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include <base/stl_util.h>
-
-#include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/payload_constants.h"
-#include "update_engine/payload_generator/cycle_breaker.h"
-#include "update_engine/payload_generator/delta_diff_generator.h"
-#include "update_engine/payload_generator/delta_diff_utils.h"
-#include "update_engine/payload_generator/extent_ranges.h"
-#include "update_engine/payload_generator/graph_types.h"
-#include "update_engine/payload_generator/graph_utils.h"
-#include "update_engine/payload_generator/topological_sort.h"
-#include "update_engine/update_metadata.pb.h"
-
-using std::make_pair;
-using std::map;
-using std::pair;
-using std::set;
-using std::string;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-using Block = InplaceGenerator::Block;
-
-namespace {
-
-// The only PayloadVersion supported by this implementation.
-const PayloadVersion kInPlacePayloadVersion{kChromeOSMajorPayloadVersion,
-                                            kInPlaceMinorPayloadVersion};
-
-// This class allocates non-existent temp blocks, starting from
-// kTempBlockStart. Other code is responsible for converting these
-// temp blocks into real blocks, as the client can't read or write to
-// these blocks.
-class DummyExtentAllocator {
- public:
-  vector<Extent> Allocate(const uint64_t block_count) {
-    vector<Extent> ret(1);
-    ret[0].set_start_block(next_block_);
-    ret[0].set_num_blocks(block_count);
-    next_block_ += block_count;
-    return ret;
-  }
-
- private:
-  uint64_t next_block_{kTempBlockStart};
-};
-
-// Takes a vector of blocks and returns an equivalent vector of Extent
-// objects.
-vector<Extent> CompressExtents(const vector<uint64_t>& blocks) {
-  vector<Extent> new_extents;
-  for (uint64_t block : blocks) {
-    AppendBlockToExtents(&new_extents, block);
-  }
-  return new_extents;
-}
-
-// Helper class to compare two operations by start block of the first Extent in
-// their destination extents given the index of the operations in the graph.
-class IndexedInstallOperationsDstComparator {
- public:
-  explicit IndexedInstallOperationsDstComparator(Graph* graph)
-      : graph_(graph) {}
-
-  // Compares the operations in the vertex a and b of graph_.
-  bool operator()(size_t a, size_t b) const {
-    return diff_utils::CompareAopsByDestination((*graph_)[a].aop,
-                                                (*graph_)[b].aop);
-  }
-
- private:
-  const Graph* const graph_;
-};
-
-}  // namespace
-
-void InplaceGenerator::CheckGraph(const Graph& graph) {
-  for (const Vertex& v : graph) {
-    CHECK(v.aop.op.has_type());
-  }
-}
-
-void InplaceGenerator::SubstituteBlocks(Vertex* vertex,
-                                        const vector<Extent>& remove_extents,
-                                        const vector<Extent>& replace_extents) {
-  // First, expand out the blocks that op reads from
-  vector<uint64_t> read_blocks = ExpandExtents(vertex->aop.op.src_extents());
-  {
-    // Expand remove_extents and replace_extents
-    vector<uint64_t> remove_extents_expanded = ExpandExtents(remove_extents);
-    vector<uint64_t> replace_extents_expanded = ExpandExtents(replace_extents);
-    CHECK_EQ(remove_extents_expanded.size(), replace_extents_expanded.size());
-    map<uint64_t, uint64_t> conversion;
-    for (vector<uint64_t>::size_type i = 0; i < replace_extents_expanded.size();
-         i++) {
-      conversion[remove_extents_expanded[i]] = replace_extents_expanded[i];
-    }
-    ApplyMap(&read_blocks, conversion);
-    for (auto& edge_prop_pair : vertex->out_edges) {
-      vector<uint64_t> write_before_deps_expanded =
-          ExpandExtents(edge_prop_pair.second.write_extents);
-      ApplyMap(&write_before_deps_expanded, conversion);
-      edge_prop_pair.second.write_extents =
-          CompressExtents(write_before_deps_expanded);
-    }
-  }
-  // Convert read_blocks back to extents
-  vertex->aop.op.clear_src_extents();
-  vector<Extent> new_extents = CompressExtents(read_blocks);
-  StoreExtents(new_extents, vertex->aop.op.mutable_src_extents());
-}
-
-bool InplaceGenerator::CutEdges(Graph* graph,
-                                const set<Edge>& edges,
-                                vector<CutEdgeVertexes>* out_cuts) {
-  DummyExtentAllocator scratch_allocator;
-  vector<CutEdgeVertexes> cuts;
-  cuts.reserve(edges.size());
-
-  uint64_t scratch_blocks_used = 0;
-  for (const Edge& edge : edges) {
-    cuts.resize(cuts.size() + 1);
-    vector<Extent> old_extents =
-        (*graph)[edge.first].out_edges[edge.second].extents;
-    // Choose some scratch space
-    scratch_blocks_used += graph_utils::EdgeWeight(*graph, edge);
-    cuts.back().tmp_extents =
-        scratch_allocator.Allocate(graph_utils::EdgeWeight(*graph, edge));
-    // create vertex to copy original->scratch
-    cuts.back().new_vertex = graph->size();
-    graph->emplace_back();
-    cuts.back().old_src = edge.first;
-    cuts.back().old_dst = edge.second;
-
-    EdgeProperties& cut_edge_properties =
-        (*graph)[edge.first].out_edges.find(edge.second)->second;
-
-    // This should never happen, as we should only be cutting edges between
-    // real file nodes, and write-before relationships are created from
-    // a real file node to a temp copy node:
-    CHECK(cut_edge_properties.write_extents.empty())
-        << "Can't cut edge that has write-before relationship.";
-
-    // make node depend on the copy operation
-    (*graph)[edge.first].out_edges.insert(
-        make_pair(graph->size() - 1, cut_edge_properties));
-
-    // Set src/dst extents and other proto variables for copy operation
-    graph->back().aop.op.set_type(InstallOperation::MOVE);
-    StoreExtents(cut_edge_properties.extents,
-                 graph->back().aop.op.mutable_src_extents());
-    StoreExtents(cuts.back().tmp_extents,
-                 graph->back().aop.op.mutable_dst_extents());
-    graph->back().aop.op.set_src_length(graph_utils::EdgeWeight(*graph, edge) *
-                                        kBlockSize);
-    graph->back().aop.op.set_dst_length(graph->back().aop.op.src_length());
-
-    // make the dest node read from the scratch space
-    SubstituteBlocks(&((*graph)[edge.second]),
-                     (*graph)[edge.first].out_edges[edge.second].extents,
-                     cuts.back().tmp_extents);
-
-    // delete the old edge
-    CHECK_EQ(static_cast<Graph::size_type>(1),
-             (*graph)[edge.first].out_edges.erase(edge.second));
-
-    // Add an edge from dst to copy operation
-    EdgeProperties write_before_edge_properties;
-    write_before_edge_properties.write_extents = cuts.back().tmp_extents;
-    (*graph)[edge.second].out_edges.insert(
-        make_pair(graph->size() - 1, write_before_edge_properties));
-  }
-  out_cuts->swap(cuts);
-  return true;
-}
-
-// Creates all the edges for the graph. Writers of a block point to
-// readers of the same block. This is because for an edge A->B, B
-// must complete before A executes.
-void InplaceGenerator::CreateEdges(Graph* graph, const vector<Block>& blocks) {
-  for (vector<Block>::size_type i = 0; i < blocks.size(); i++) {
-    // Blocks with both a reader and writer get an edge
-    if (blocks[i].reader == Vertex::kInvalidIndex ||
-        blocks[i].writer == Vertex::kInvalidIndex)
-      continue;
-    // Don't have a node depend on itself
-    if (blocks[i].reader == blocks[i].writer)
-      continue;
-    // See if there's already an edge we can add onto
-    Vertex::EdgeMap::iterator edge_it =
-        (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
-    if (edge_it == (*graph)[blocks[i].writer].out_edges.end()) {
-      // No existing edge. Create one
-      (*graph)[blocks[i].writer].out_edges.insert(
-          make_pair(blocks[i].reader, EdgeProperties()));
-      edge_it = (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
-      CHECK(edge_it != (*graph)[blocks[i].writer].out_edges.end());
-    }
-    AppendBlockToExtents(&edge_it->second.extents, i);
-  }
-}
-
-namespace {
-
-class SortCutsByTopoOrderLess {
- public:
-  explicit SortCutsByTopoOrderLess(
-      const vector<vector<Vertex::Index>::size_type>& table)
-      : table_(table) {}
-  bool operator()(const CutEdgeVertexes& a, const CutEdgeVertexes& b) {
-    return table_[a.old_dst] < table_[b.old_dst];
-  }
-
- private:
-  const vector<vector<Vertex::Index>::size_type>& table_;
-};
-
-}  // namespace
-
-void InplaceGenerator::GenerateReverseTopoOrderMap(
-    const vector<Vertex::Index>& op_indexes,
-    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes) {
-  vector<vector<Vertex::Index>::size_type> table(op_indexes.size());
-  for (vector<Vertex::Index>::size_type i = 0, e = op_indexes.size(); i != e;
-       ++i) {
-    Vertex::Index node = op_indexes[i];
-    if (table.size() < (node + 1)) {
-      table.resize(node + 1);
-    }
-    table[node] = i;
-  }
-  reverse_op_indexes->swap(table);
-}
-
-void InplaceGenerator::SortCutsByTopoOrder(
-    const vector<Vertex::Index>& op_indexes, vector<CutEdgeVertexes>* cuts) {
-  // first, make a reverse lookup table.
-  vector<vector<Vertex::Index>::size_type> table;
-  GenerateReverseTopoOrderMap(op_indexes, &table);
-  SortCutsByTopoOrderLess less(table);
-  sort(cuts->begin(), cuts->end(), less);
-}
-
-void InplaceGenerator::MoveAndSortFullOpsToBack(
-    Graph* graph, vector<Vertex::Index>* op_indexes) {
-  vector<Vertex::Index> ret;
-  vector<Vertex::Index> full_ops;
-  ret.reserve(op_indexes->size());
-  for (auto op_index : *op_indexes) {
-    InstallOperation::Type type = (*graph)[op_index].aop.op.type();
-    if (type == InstallOperation::REPLACE ||
-        type == InstallOperation::REPLACE_BZ) {
-      full_ops.push_back(op_index);
-    } else {
-      ret.push_back(op_index);
-    }
-  }
-  LOG(INFO) << "Stats: " << full_ops.size() << " full ops out of "
-            << (full_ops.size() + ret.size()) << " total ops.";
-  // Sort full ops according to their dst_extents.
-  sort(full_ops.begin(),
-       full_ops.end(),
-       IndexedInstallOperationsDstComparator(graph));
-  ret.insert(ret.end(), full_ops.begin(), full_ops.end());
-  op_indexes->swap(ret);
-}
-
-namespace {
-
-template <typename T>
-bool TempBlocksExistInExtents(const T& extents) {
-  for (const auto& extent : extents) {
-    uint64_t start = extent.start_block();
-    uint64_t num = extent.num_blocks();
-    if (start >= kTempBlockStart || (start + num) >= kTempBlockStart) {
-      LOG(ERROR) << "temp block!";
-      LOG(ERROR) << "start: " << start << ", num: " << num;
-      LOG(ERROR) << "kTempBlockStart: " << kTempBlockStart;
-      LOG(ERROR) << "returning true";
-      return true;
-    }
-    // check for wrap-around, which would be a bug:
-    CHECK(start <= (start + num));
-  }
-  return false;
-}
-
-// Converts the cuts, which must all have the same |old_dst| member,
-// to full. It does this by converting the |old_dst| to REPLACE or
-// REPLACE_BZ, dropping all incoming edges to |old_dst|, and marking
-// all temp nodes invalid.
-bool ConvertCutsToFull(
-    Graph* graph,
-    const string& new_part,
-    BlobFileWriter* blob_file,
-    vector<Vertex::Index>* op_indexes,
-    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
-    const vector<CutEdgeVertexes>& cuts) {
-  CHECK(!cuts.empty());
-  set<Vertex::Index> deleted_nodes;
-  for (const CutEdgeVertexes& cut : cuts) {
-    TEST_AND_RETURN_FALSE(
-        InplaceGenerator::ConvertCutToFullOp(graph, cut, new_part, blob_file));
-    deleted_nodes.insert(cut.new_vertex);
-  }
-  deleted_nodes.insert(cuts[0].old_dst);
-
-  vector<Vertex::Index> new_op_indexes;
-  new_op_indexes.reserve(op_indexes->size());
-  for (Vertex::Index vertex_index : *op_indexes) {
-    if (base::ContainsKey(deleted_nodes, vertex_index))
-      continue;
-    new_op_indexes.push_back(vertex_index);
-  }
-  new_op_indexes.push_back(cuts[0].old_dst);
-  op_indexes->swap(new_op_indexes);
-  InplaceGenerator::GenerateReverseTopoOrderMap(*op_indexes,
-                                                reverse_op_indexes);
-  return true;
-}
-
-// Tries to assign temp blocks for a collection of cuts, all of which share
-// the same old_dst member. If temp blocks can't be found, old_dst will be
-// converted to a REPLACE or REPLACE_BZ operation. Returns true on success,
-// which can happen even if blocks are converted to full. Returns false
-// on exceptional error cases.
-bool AssignBlockForAdjoiningCuts(
-    Graph* graph,
-    const string& new_part,
-    BlobFileWriter* blob_file,
-    vector<Vertex::Index>* op_indexes,
-    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
-    const vector<CutEdgeVertexes>& cuts) {
-  CHECK(!cuts.empty());
-  const Vertex::Index old_dst = cuts[0].old_dst;
-  // Calculate # of blocks needed
-  uint64_t blocks_needed = 0;
-  vector<uint64_t> cuts_blocks_needed(cuts.size());
-  for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
-    uint64_t cut_blocks_needed = 0;
-    for (const Extent& extent : cuts[i].tmp_extents) {
-      cut_blocks_needed += extent.num_blocks();
-    }
-    blocks_needed += cut_blocks_needed;
-    cuts_blocks_needed[i] = cut_blocks_needed;
-  }
-
-  // Find enough blocks
-  ExtentRanges scratch_ranges;
-  // Each block that's supplying temp blocks and the corresponding blocks:
-  typedef vector<pair<Vertex::Index, ExtentRanges>> SupplierVector;
-  SupplierVector block_suppliers;
-  uint64_t scratch_blocks_found = 0;
-  for (vector<Vertex::Index>::size_type i = (*reverse_op_indexes)[old_dst] + 1,
-                                        e = op_indexes->size();
-       i < e;
-       ++i) {
-    Vertex::Index test_node = (*op_indexes)[i];
-    if (!(*graph)[test_node].valid)
-      continue;
-    // See if this node has sufficient blocks
-    ExtentRanges ranges;
-    ranges.AddRepeatedExtents((*graph)[test_node].aop.op.dst_extents());
-    ranges.SubtractExtent(
-        ExtentForRange(kTempBlockStart, kSparseHole - kTempBlockStart));
-    ranges.SubtractRepeatedExtents((*graph)[test_node].aop.op.src_extents());
-    // For now, for simplicity, subtract out all blocks in read-before
-    // dependencies.
-    for (Vertex::EdgeMap::const_iterator
-             edge_i = (*graph)[test_node].out_edges.begin(),
-             edge_e = (*graph)[test_node].out_edges.end();
-         edge_i != edge_e;
-         ++edge_i) {
-      ranges.SubtractExtents(edge_i->second.extents);
-    }
-
-    // Prevent using the block 0 as scratch space due to crbug.com/480751.
-    if (ranges.ContainsBlock(0)) {
-      LOG(INFO) << "Removing block 0 from the selected scratch range in vertex "
-                << i;
-      ranges.SubtractBlock(0);
-    }
-
-    if (ranges.blocks() == 0)
-      continue;
-
-    if (ranges.blocks() + scratch_blocks_found > blocks_needed) {
-      // trim down ranges
-      vector<Extent> new_ranges =
-          ranges.GetExtentsForBlockCount(blocks_needed - scratch_blocks_found);
-      ranges = ExtentRanges();
-      ranges.AddExtents(new_ranges);
-    }
-    scratch_ranges.AddRanges(ranges);
-    block_suppliers.push_back(make_pair(test_node, ranges));
-    scratch_blocks_found += ranges.blocks();
-    if (scratch_ranges.blocks() >= blocks_needed)
-      break;
-  }
-  if (scratch_ranges.blocks() < blocks_needed) {
-    LOG(INFO) << "Unable to find sufficient scratch";
-    TEST_AND_RETURN_FALSE(ConvertCutsToFull(
-        graph, new_part, blob_file, op_indexes, reverse_op_indexes, cuts));
-    return true;
-  }
-  // Use the scratch we found
-  TEST_AND_RETURN_FALSE(scratch_ranges.blocks() == scratch_blocks_found);
-
-  // Make all the suppliers depend on this node
-  for (const auto& index_range_pair : block_suppliers) {
-    graph_utils::AddReadBeforeDepExtents(
-        &(*graph)[index_range_pair.first],
-        old_dst,
-        index_range_pair.second.GetExtentsForBlockCount(
-            index_range_pair.second.blocks()));
-  }
-
-  // Replace temp blocks in each cut
-  for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
-    const CutEdgeVertexes& cut = cuts[i];
-    vector<Extent> real_extents =
-        scratch_ranges.GetExtentsForBlockCount(cuts_blocks_needed[i]);
-    scratch_ranges.SubtractExtents(real_extents);
-
-    // Fix the old dest node w/ the real blocks
-    InplaceGenerator::SubstituteBlocks(
-        &(*graph)[old_dst], cut.tmp_extents, real_extents);
-
-    // Fix the new node w/ the real blocks. Since the new node is just a
-    // copy operation, we can replace all the dest extents w/ the real
-    // blocks.
-    InstallOperation* op = &(*graph)[cut.new_vertex].aop.op;
-    op->clear_dst_extents();
-    StoreExtents(real_extents, op->mutable_dst_extents());
-  }
-  return true;
-}
-
-}  // namespace
-
-bool InplaceGenerator::AssignTempBlocks(
-    Graph* graph,
-    const string& new_part,
-    BlobFileWriter* blob_file,
-    vector<Vertex::Index>* op_indexes,
-    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
-    const vector<CutEdgeVertexes>& cuts) {
-  CHECK(!cuts.empty());
-
-  // group of cuts w/ the same old_dst:
-  vector<CutEdgeVertexes> cuts_group;
-
-  for (vector<CutEdgeVertexes>::size_type i = cuts.size() - 1, e = 0; true;
-       --i) {
-    LOG(INFO) << "Fixing temp blocks in cut " << i
-              << ": old dst: " << cuts[i].old_dst
-              << " new vertex: " << cuts[i].new_vertex
-              << " path: " << (*graph)[cuts[i].old_dst].aop.name;
-
-    if (cuts_group.empty() || (cuts_group[0].old_dst == cuts[i].old_dst)) {
-      cuts_group.push_back(cuts[i]);
-    } else {
-      CHECK(!cuts_group.empty());
-      TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
-                                                        new_part,
-                                                        blob_file,
-                                                        op_indexes,
-                                                        reverse_op_indexes,
-                                                        cuts_group));
-      cuts_group.clear();
-      cuts_group.push_back(cuts[i]);
-    }
-
-    if (i == e) {
-      // break out of for() loop
-      break;
-    }
-  }
-  CHECK(!cuts_group.empty());
-  TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(
-      graph, new_part, blob_file, op_indexes, reverse_op_indexes, cuts_group));
-  return true;
-}
-
-bool InplaceGenerator::NoTempBlocksRemain(const Graph& graph) {
-  size_t idx = 0;
-  for (Graph::const_iterator it = graph.begin(), e = graph.end(); it != e;
-       ++it, ++idx) {
-    if (!it->valid)
-      continue;
-    const InstallOperation& op = it->aop.op;
-    if (TempBlocksExistInExtents(op.dst_extents()) ||
-        TempBlocksExistInExtents(op.src_extents())) {
-      LOG(INFO) << "bad extents in node " << idx;
-      LOG(INFO) << "so yeah";
-      return false;
-    }
-
-    // Check out-edges:
-    for (const auto& edge_prop_pair : it->out_edges) {
-      if (TempBlocksExistInExtents(edge_prop_pair.second.extents) ||
-          TempBlocksExistInExtents(edge_prop_pair.second.write_extents)) {
-        LOG(INFO) << "bad out edge in node " << idx;
-        LOG(INFO) << "so yeah";
-        return false;
-      }
-    }
-  }
-  return true;
-}
-
-bool InplaceGenerator::ConvertCutToFullOp(Graph* graph,
-                                          const CutEdgeVertexes& cut,
-                                          const string& new_part,
-                                          BlobFileWriter* blob_file) {
-  // Drop all incoming edges, keep all outgoing edges
-
-  // Keep all outgoing edges
-  if ((*graph)[cut.old_dst].aop.op.type() != InstallOperation::REPLACE_BZ &&
-      (*graph)[cut.old_dst].aop.op.type() != InstallOperation::REPLACE) {
-    Vertex::EdgeMap out_edges = (*graph)[cut.old_dst].out_edges;
-    graph_utils::DropWriteBeforeDeps(&out_edges);
-
-    // Replace the operation with a REPLACE or REPLACE_BZ to generate the same
-    // |new_extents| list of blocks and update the graph.
-    vector<AnnotatedOperation> new_aop;
-    vector<Extent> new_extents;
-    ExtentsToVector((*graph)[cut.old_dst].aop.op.dst_extents(), &new_extents);
-    TEST_AND_RETURN_FALSE(diff_utils::DeltaReadFile(
-        &new_aop,
-        "",  // old_part
-        new_part,
-        vector<Extent>(),  // old_extents
-        new_extents,
-        {},  // old_deflates
-        {},  // new_deflates
-        (*graph)[cut.old_dst].aop.name,
-        -1,  // chunk_blocks, forces to have a single operation.
-        kInPlacePayloadVersion,
-        blob_file));
-    TEST_AND_RETURN_FALSE(new_aop.size() == 1);
-    TEST_AND_RETURN_FALSE(AddInstallOpToGraph(
-        graph, cut.old_dst, nullptr, new_aop.front().op, new_aop.front().name));
-
-    (*graph)[cut.old_dst].out_edges = out_edges;
-
-    // Right now we don't have doubly-linked edges, so we have to scan
-    // the whole graph.
-    graph_utils::DropIncomingEdgesTo(graph, cut.old_dst);
-  }
-
-  // Delete temp node
-  (*graph)[cut.old_src].out_edges.erase(cut.new_vertex);
-  CHECK((*graph)[cut.old_dst].out_edges.find(cut.new_vertex) ==
-        (*graph)[cut.old_dst].out_edges.end());
-  (*graph)[cut.new_vertex].valid = false;
-  LOG(INFO) << "marked node invalid: " << cut.new_vertex;
-  return true;
-}
-
-bool InplaceGenerator::ConvertGraphToDag(Graph* graph,
-                                         const string& new_part,
-                                         BlobFileWriter* blob_file,
-                                         vector<Vertex::Index>* final_order,
-                                         Vertex::Index scratch_vertex) {
-  CycleBreaker cycle_breaker;
-  LOG(INFO) << "Finding cycles...";
-  set<Edge> cut_edges;
-  cycle_breaker.BreakCycles(*graph, &cut_edges);
-  LOG(INFO) << "done finding cycles";
-  CheckGraph(*graph);
-
-  // Calculate number of scratch blocks needed
-
-  LOG(INFO) << "Cutting cycles...";
-  vector<CutEdgeVertexes> cuts;
-  TEST_AND_RETURN_FALSE(CutEdges(graph, cut_edges, &cuts));
-  LOG(INFO) << "done cutting cycles";
-  LOG(INFO) << "There are " << cuts.size() << " cuts.";
-  CheckGraph(*graph);
-
-  LOG(INFO) << "Creating initial topological order...";
-  TopologicalSort(*graph, final_order);
-  LOG(INFO) << "done with initial topo order";
-  CheckGraph(*graph);
-
-  LOG(INFO) << "Moving full ops to the back";
-  MoveAndSortFullOpsToBack(graph, final_order);
-  LOG(INFO) << "done moving full ops to back";
-
-  vector<vector<Vertex::Index>::size_type> inverse_final_order;
-  GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
-
-  SortCutsByTopoOrder(*final_order, &cuts);
-
-  if (!cuts.empty())
-    TEST_AND_RETURN_FALSE(AssignTempBlocks(
-        graph, new_part, blob_file, final_order, &inverse_final_order, cuts));
-  LOG(INFO) << "Making sure all temp blocks have been allocated";
-
-  // Remove the scratch node, if any
-  if (scratch_vertex != Vertex::kInvalidIndex) {
-    final_order->erase(final_order->begin() +
-                       inverse_final_order[scratch_vertex]);
-    (*graph)[scratch_vertex].valid = false;
-    GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
-  }
-
-  graph_utils::DumpGraph(*graph);
-  CHECK(NoTempBlocksRemain(*graph));
-  LOG(INFO) << "done making sure all temp blocks are allocated";
-  return true;
-}
-
-void InplaceGenerator::CreateScratchNode(uint64_t start_block,
-                                         uint64_t num_blocks,
-                                         Vertex* vertex) {
-  vertex->aop.name = "<scratch>";
-  vertex->aop.op.set_type(InstallOperation::REPLACE_BZ);
-  vertex->aop.op.set_data_offset(0);
-  vertex->aop.op.set_data_length(0);
-  Extent* extent = vertex->aop.op.add_dst_extents();
-  extent->set_start_block(start_block);
-  extent->set_num_blocks(num_blocks);
-}
-
-bool InplaceGenerator::AddInstallOpToBlocksVector(
-    const InstallOperation& operation,
-    const Graph& graph,
-    Vertex::Index vertex,
-    vector<Block>* blocks) {
-  // See if this is already present.
-  TEST_AND_RETURN_FALSE(operation.dst_extents_size() > 0);
-
-  enum BlockField { READER = 0, WRITER, BLOCK_FIELD_COUNT };
-  for (int field = READER; field < BLOCK_FIELD_COUNT; field++) {
-    const char* past_participle = (field == READER) ? "read" : "written";
-    const google::protobuf::RepeatedPtrField<Extent>& extents =
-        (field == READER) ? operation.src_extents() : operation.dst_extents();
-    Vertex::Index Block::*access_type =
-        (field == READER) ? &Block::reader : &Block::writer;
-
-    for (const Extent& extent : extents) {
-      for (uint64_t block = extent.start_block();
-           block < (extent.start_block() + extent.num_blocks());
-           block++) {
-        if ((*blocks)[block].*access_type != Vertex::kInvalidIndex) {
-          LOG(FATAL) << "Block " << block << " is already " << past_participle
-                     << " by " << (*blocks)[block].*access_type << "("
-                     << graph[(*blocks)[block].*access_type].aop.name
-                     << ") and also " << vertex << "(" << graph[vertex].aop.name
-                     << ")";
-        }
-        (*blocks)[block].*access_type = vertex;
-      }
-    }
-  }
-  return true;
-}
-
-bool InplaceGenerator::AddInstallOpToGraph(Graph* graph,
-                                           Vertex::Index existing_vertex,
-                                           vector<Block>* blocks,
-                                           const InstallOperation& operation,
-                                           const string& op_name) {
-  Vertex::Index vertex = existing_vertex;
-  if (vertex == Vertex::kInvalidIndex) {
-    graph->emplace_back();
-    vertex = graph->size() - 1;
-  }
-  (*graph)[vertex].aop.op = operation;
-  CHECK((*graph)[vertex].aop.op.has_type());
-  (*graph)[vertex].aop.name = op_name;
-
-  if (blocks)
-    TEST_AND_RETURN_FALSE(InplaceGenerator::AddInstallOpToBlocksVector(
-        (*graph)[vertex].aop.op, *graph, vertex, blocks));
-  return true;
-}
-
-void InplaceGenerator::ApplyMap(vector<uint64_t>* collection,
-                                const map<uint64_t, uint64_t>& the_map) {
-  for (uint64_t& elem : *collection) {
-    const auto& map_it = the_map.find(elem);
-    if (map_it != the_map.end())
-      elem = map_it->second;
-  }
-}
-
-bool InplaceGenerator::ResolveReadAfterWriteDependencies(
-    const PartitionConfig& old_part,
-    const PartitionConfig& new_part,
-    uint64_t partition_size,
-    size_t block_size,
-    BlobFileWriter* blob_file,
-    vector<AnnotatedOperation>* aops) {
-  // Convert the operations to the graph.
-  Graph graph;
-  CheckGraph(graph);
-  vector<Block> blocks(std::max(old_part.size, new_part.size) / block_size);
-  for (const auto& aop : *aops) {
-    AddInstallOpToGraph(
-        &graph, Vertex::kInvalidIndex, &blocks, aop.op, aop.name);
-  }
-  CheckGraph(graph);
-
-  // Final scratch block (if there's space)
-  Vertex::Index scratch_vertex = Vertex::kInvalidIndex;
-  if (blocks.size() < (partition_size / block_size)) {
-    scratch_vertex = graph.size();
-    graph.emplace_back();
-    size_t scratch_blocks = (partition_size / block_size) - blocks.size();
-    LOG(INFO) << "Added " << scratch_blocks << " scratch space blocks.";
-    CreateScratchNode(blocks.size(), scratch_blocks, &graph.back());
-  }
-  CheckGraph(graph);
-
-  LOG(INFO) << "Creating edges...";
-  CreateEdges(&graph, blocks);
-  LOG(INFO) << "Done creating edges";
-  CheckGraph(graph);
-
-  vector<Vertex::Index> final_order;
-  TEST_AND_RETURN_FALSE(ConvertGraphToDag(
-      &graph, new_part.path, blob_file, &final_order, scratch_vertex));
-
-  // Copy operations over to the |aops| vector in the final_order generated by
-  // the topological sort.
-  aops->clear();
-  aops->reserve(final_order.size());
-  for (const Vertex::Index vertex_index : final_order) {
-    const Vertex& vertex = graph[vertex_index];
-    aops->push_back(vertex.aop);
-  }
-  return true;
-}
-
-bool InplaceGenerator::GenerateOperations(const PayloadGenerationConfig& config,
-                                          const PartitionConfig& old_part,
-                                          const PartitionConfig& new_part,
-                                          BlobFileWriter* blob_file,
-                                          vector<AnnotatedOperation>* aops) {
-  TEST_AND_RETURN_FALSE(old_part.name == new_part.name);
-  TEST_AND_RETURN_FALSE(config.version.major == kInPlacePayloadVersion.major);
-  TEST_AND_RETURN_FALSE(config.version.minor == kInPlacePayloadVersion.minor);
-
-  ssize_t hard_chunk_blocks =
-      (config.hard_chunk_size == -1
-           ? -1
-           : config.hard_chunk_size / config.block_size);
-  size_t soft_chunk_blocks = config.soft_chunk_size / config.block_size;
-  uint64_t partition_size = new_part.size;
-  if (new_part.name == kPartitionNameRoot)
-    partition_size = config.rootfs_partition_size;
-
-  LOG(INFO) << "Delta compressing " << new_part.name << " partition...";
-  TEST_AND_RETURN_FALSE(diff_utils::DeltaReadPartition(aops,
-                                                       old_part,
-                                                       new_part,
-                                                       hard_chunk_blocks,
-                                                       soft_chunk_blocks,
-                                                       config.version,
-                                                       blob_file));
-  LOG(INFO) << "Done reading " << new_part.name;
-
-  TEST_AND_RETURN_FALSE(ResolveReadAfterWriteDependencies(
-      old_part, new_part, partition_size, config.block_size, blob_file, aops));
-  LOG(INFO) << "Done reordering " << new_part.name;
-  return true;
-}
-
-};  // namespace chromeos_update_engine
diff --git a/payload_generator/inplace_generator.h b/payload_generator/inplace_generator.h
deleted file mode 100644
index e7298d2..0000000
--- a/payload_generator/inplace_generator.h
+++ /dev/null
@@ -1,240 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_INPLACE_GENERATOR_H_
-#define UPDATE_ENGINE_PAYLOAD_GENERATOR_INPLACE_GENERATOR_H_
-
-#include <map>
-#include <set>
-#include <string>
-#include <vector>
-
-#include "update_engine/payload_generator/blob_file_writer.h"
-#include "update_engine/payload_generator/delta_diff_generator.h"
-#include "update_engine/payload_generator/graph_types.h"
-#include "update_engine/payload_generator/operations_generator.h"
-
-// InplaceGenerator contains all functionality related to the inplace algorithm
-// for generating update payloads. These are the functions used when delta minor
-// version is 1.
-
-namespace chromeos_update_engine {
-
-// This struct stores all relevant info for an edge that is cut between
-// nodes old_src -> old_dst by creating new vertex new_vertex. The new
-// relationship is:
-// old_src -(read before)-> new_vertex <-(write before)- old_dst
-// new_vertex is a MOVE operation that moves some existing blocks into
-// temp space. The temp extents are, by necessity, stored in new_vertex
-// (as dst extents) and old_dst (as src extents), but they are also broken
-// out into tmp_extents, as the nodes themselves may contain many more
-// extents.
-struct CutEdgeVertexes {
-  Vertex::Index new_vertex;
-  Vertex::Index old_src;
-  Vertex::Index old_dst;
-  std::vector<Extent> tmp_extents;
-};
-
-class InplaceGenerator : public OperationsGenerator {
- public:
-  // Represents a disk block on the install partition.
-  struct Block {
-    // During install, each block on the install partition will be written
-    // and some may be read (in all likelihood, many will be read).
-    // The reading and writing will be performed by InstallOperations,
-    // each of which has a corresponding vertex in a graph.
-    // A Block object tells which vertex will read or write this block
-    // at install time.
-    // Generally, there will be a vector of Block objects whose length
-    // is the number of blocks on the install partition.
-    Block() : reader(Vertex::kInvalidIndex), writer(Vertex::kInvalidIndex) {}
-    Vertex::Index reader;
-    Vertex::Index writer;
-  };
-
-  InplaceGenerator() = default;
-
-  // Checks all the operations in the graph have a type assigned.
-  static void CheckGraph(const Graph& graph);
-
-  // Modifies blocks read by 'op' so that any blocks referred to by
-  // 'remove_extents' are replaced with blocks from 'replace_extents'.
-  // 'remove_extents' and 'replace_extents' must be the same number of blocks.
-  // Blocks will be substituted in the order listed in the vectors.
-  // E.g. if 'op' reads blocks 1, 2, 3, 4, 5, 6, 7, 8, remove_extents
-  // contains blocks 6, 2, 3, 5, and replace blocks contains
-  // 12, 13, 14, 15, then op will be changed to read from:
-  // 1, 13, 14, 4, 15, 12, 7, 8
-  static void SubstituteBlocks(Vertex* vertex,
-                               const std::vector<Extent>& remove_extents,
-                               const std::vector<Extent>& replace_extents);
-
-  // Cuts 'edges' from 'graph' according to the AU algorithm. This means
-  // for each edge A->B, remove the dependency that B occur before A.
-  // Do this by creating a new operation X that copies from the blocks
-  // specified by the edge's properties to temp space T. Modify B to read
-  // from T rather than the blocks in the edge. Modify A to depend on X,
-  // but not on B. Free space is found by looking in 'blocks'.
-  // Returns true on success.
-  static bool CutEdges(Graph* graph,
-                       const std::set<Edge>& edges,
-                       std::vector<CutEdgeVertexes>* out_cuts);
-
-  // Creates all the edges for the graph. Writers of a block point to
-  // readers of the same block. This is because for an edge A->B, B
-  // must complete before A executes.
-  static void CreateEdges(Graph* graph, const std::vector<Block>& blocks);
-
-  // Takes |op_indexes|, which is effectively a mapping from order in
-  // which the op is performed -> graph vertex index, and produces the
-  // reverse: a mapping from graph vertex index -> op_indexes index.
-  static void GenerateReverseTopoOrderMap(
-      const std::vector<Vertex::Index>& op_indexes,
-      std::vector<std::vector<Vertex::Index>::size_type>* reverse_op_indexes);
-
-  // Sorts the vector |cuts| by its |cuts[].old_dest| member. Order is
-  // determined by the order of elements in op_indexes.
-  static void SortCutsByTopoOrder(const std::vector<Vertex::Index>& op_indexes,
-                                  std::vector<CutEdgeVertexes>* cuts);
-
-  // Given a topologically sorted graph |op_indexes| and |graph|, alters
-  // |op_indexes| to move all the full operations to the end of the vector.
-  // Full operations should not be depended on, so this is safe.
-  static void MoveAndSortFullOpsToBack(Graph* graph,
-                                       std::vector<Vertex::Index>* op_indexes);
-
-  // Returns true iff there are no extents in the graph that refer to temp
-  // blocks. Temp blocks are in the range [kTempBlockStart, kSparseHole).
-  static bool NoTempBlocksRemain(const Graph& graph);
-
-  // Takes a |graph|, which has edges that must be cut, as listed in
-  // |cuts|.  Cuts the edges. Maintains a list in which the operations
-  // will be performed (in |op_indexes|) and the reverse (in
-  // |reverse_op_indexes|).  Cutting edges requires scratch space, and
-  // if insufficient scratch is found, the file is reread and will be
-  // send down (either as REPLACE or REPLACE_BZ).  Returns true on
-  // success.
-  static bool AssignTempBlocks(
-      Graph* graph,
-      const std::string& new_part,
-      BlobFileWriter* blob_file,
-      std::vector<Vertex::Index>* op_indexes,
-      std::vector<std::vector<Vertex::Index>::size_type>* reverse_op_indexes,
-      const std::vector<CutEdgeVertexes>& cuts);
-
-  // Handles allocation of temp blocks to a cut edge by converting the
-  // dest node to a full op. This removes the need for temp blocks, but
-  // comes at the cost of a worse compression ratio.
-  // For example, say we have A->B->A. It would first be cut to form:
-  // A->B->N<-A, where N copies blocks to temp space. If there are no
-  // temp blocks, this function can be called to convert it to the form:
-  // A->B. Now, A is a full operation.
-  static bool ConvertCutToFullOp(Graph* graph,
-                                 const CutEdgeVertexes& cut,
-                                 const std::string& new_part,
-                                 BlobFileWriter* blob_file);
-
-  // Takes a graph, which is not a DAG, which represents the files just
-  // read from disk, and converts it into a DAG by breaking all cycles
-  // and finding temp space to resolve broken edges.
-  // The final order of the nodes is given in |final_order|
-  // Some files may need to be reread from disk, thus |fd| and
-  // |data_file_size| are be passed.
-  // If |scratch_vertex| is not kInvalidIndex, removes it from
-  // |final_order| before returning.
-  // Returns true on success.
-  static bool ConvertGraphToDag(Graph* graph,
-                                const std::string& new_part,
-                                BlobFileWriter* blob_file,
-                                std::vector<Vertex::Index>* final_order,
-                                Vertex::Index scratch_vertex);
-
-  // Creates a dummy REPLACE_BZ node in the given |vertex|. This can be used
-  // to provide scratch space. The node writes |num_blocks| blocks starting at
-  // |start_block|The node should be marked invalid before writing all nodes to
-  // the output file.
-  static void CreateScratchNode(uint64_t start_block,
-                                uint64_t num_blocks,
-                                Vertex* vertex);
-
-  // The |blocks| vector contains a reader and writer for each block on the
-  // filesystem that's being in-place updated. We populate the reader/writer
-  // fields of |blocks| by calling this function.
-  // For each block in |operation| that is read or written, find that block
-  // in |blocks| and set the reader/writer field to the vertex passed.
-  // |graph| is not strictly necessary, but useful for printing out
-  // error messages.
-  static bool AddInstallOpToBlocksVector(const InstallOperation& operation,
-                                         const Graph& graph,
-                                         Vertex::Index vertex,
-                                         std::vector<Block>* blocks);
-
-  // Add a vertex (if |existing_vertex| is kInvalidVertex) or update an
-  // |existing_vertex| with the passed |operation|.
-  // This method will also register the vertex as the reader or writer of the
-  // blocks involved in the operation updating the |blocks| vector. The
-  // |op_name| associated with the Vertex is used for logging purposes.
-  static bool AddInstallOpToGraph(Graph* graph,
-                                  Vertex::Index existing_vertex,
-                                  std::vector<Block>* blocks,
-                                  const InstallOperation& operation,
-                                  const std::string& op_name);
-
-  // Apply the transformation stored in |the_map| to the |collection| vector
-  // replacing the map keys found in |collection| with its associated value in
-  // |the_map|.
-  static void ApplyMap(std::vector<uint64_t>* collection,
-                       const std::map<uint64_t, uint64_t>& the_map);
-
-  // Resolve all read-after-write dependencies in the operation list |aops|. The
-  // operations in |aops| are such that they generate the desired |new_part| if
-  // applied reading always from the original image. This function reorders the
-  // operations and generates new operations when needed to make these
-  // operations produce the same |new_part| result when applied in-place.
-  // The new operations will create blobs in |data_file_fd| and update
-  // the file size pointed by |data_file_size| if needed.
-  // On success, stores the new operations in |aops| in the right order and
-  // returns true.
-  static bool ResolveReadAfterWriteDependencies(
-      const PartitionConfig& old_part,
-      const PartitionConfig& new_part,
-      uint64_t partition_size,
-      size_t block_size,
-      BlobFileWriter* blob_file,
-      std::vector<AnnotatedOperation>* aops);
-
-  // Generate the update payload operations for the given partition using
-  // only operations that read from the target and/or write to the target,
-  // hence, applying the payload "in-place" in the target partition. This method
-  // assumes that the contents of the source image are pre-copied to the target
-  // partition, up to the size of the source image. Use this method to generate
-  // a delta update with the minor version kInPlaceMinorPayloadVersion.
-  // The operations are stored in |aops|. All the offsets in the operations
-  // reference the data written to |blob_file|.
-  bool GenerateOperations(const PayloadGenerationConfig& config,
-                          const PartitionConfig& old_part,
-                          const PartitionConfig& new_part,
-                          BlobFileWriter* blob_file,
-                          std::vector<AnnotatedOperation>* aops) override;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(InplaceGenerator);
-};
-
-};  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_PAYLOAD_GENERATOR_INPLACE_GENERATOR_H_
diff --git a/payload_generator/inplace_generator_unittest.cc b/payload_generator/inplace_generator_unittest.cc
deleted file mode 100644
index 8028f36..0000000
--- a/payload_generator/inplace_generator_unittest.cc
+++ /dev/null
@@ -1,752 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/inplace_generator.h"
-
-#include <map>
-#include <memory>
-#include <set>
-#include <sstream>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include <base/format_macros.h>
-#include <base/logging.h>
-#include <base/strings/string_util.h>
-#include <base/strings/stringprintf.h>
-#include <gtest/gtest.h>
-
-#include "update_engine/common/test_utils.h"
-#include "update_engine/common/utils.h"
-#include "update_engine/payload_generator/cycle_breaker.h"
-#include "update_engine/payload_generator/delta_diff_generator.h"
-#include "update_engine/payload_generator/delta_diff_utils.h"
-#include "update_engine/payload_generator/extent_ranges.h"
-#include "update_engine/payload_generator/graph_types.h"
-#include "update_engine/payload_generator/graph_utils.h"
-
-using std::map;
-using std::set;
-using std::string;
-using std::stringstream;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-using Block = InplaceGenerator::Block;
-
-namespace {
-
-void GenVertex(Vertex* out,
-               const vector<Extent>& src_extents,
-               const vector<Extent>& dst_extents,
-               const string& path,
-               InstallOperation::Type type) {
-  out->aop.op.set_type(type);
-  out->aop.name = path;
-  StoreExtents(src_extents, out->aop.op.mutable_src_extents());
-  StoreExtents(dst_extents, out->aop.op.mutable_dst_extents());
-}
-
-vector<Extent> VectOfExt(uint64_t start_block, uint64_t num_blocks) {
-  return vector<Extent>(1, ExtentForRange(start_block, num_blocks));
-}
-
-EdgeProperties EdgeWithReadDep(const vector<Extent>& extents) {
-  EdgeProperties ret;
-  ret.extents = extents;
-  return ret;
-}
-
-EdgeProperties EdgeWithWriteDep(const vector<Extent>& extents) {
-  EdgeProperties ret;
-  ret.write_extents = extents;
-  return ret;
-}
-
-template <typename T>
-void DumpVect(const vector<T>& vect) {
-  stringstream ss(stringstream::out);
-  for (typename vector<T>::const_iterator it = vect.begin(), e = vect.end();
-       it != e;
-       ++it) {
-    ss << *it << ", ";
-  }
-  LOG(INFO) << "{" << ss.str() << "}";
-}
-
-void AppendExtent(vector<Extent>* vect, uint64_t start, uint64_t length) {
-  vect->resize(vect->size() + 1);
-  vect->back().set_start_block(start);
-  vect->back().set_num_blocks(length);
-}
-
-void OpAppendExtent(InstallOperation* op, uint64_t start, uint64_t length) {
-  Extent* extent = op->add_src_extents();
-  extent->set_start_block(start);
-  extent->set_num_blocks(length);
-}
-
-}  // namespace
-
-class InplaceGeneratorTest : public ::testing::Test {
- protected:
-  // Initialize |blob_path_|, |blob_file_size_| and |blob_file_fd_| variables
-  // with a new blob file. The file is closed and removed automatically when
-  // the test finishes.
-  void CreateBlobFile() {
-    // blob_fd_closer_ takes a pointer to blob_fd_. Make sure we destroy a
-    // previous instance before overriding blob_fd_.
-    blob_fd_closer_.reset();
-    EXPECT_TRUE(utils::MakeTempFile(
-        "InplaceGenerator_blob_file.XXXXXX", &blob_path_, &blob_fd_));
-    blob_path_unlinker_.reset(new ScopedPathUnlinker(blob_path_));
-    blob_fd_closer_.reset(new ScopedFdCloser(&blob_fd_));
-    blob_file_size_ = 0;
-    EXPECT_GE(blob_fd_, 0);
-    blob_file_.reset(new BlobFileWriter(blob_fd_, &blob_file_size_));
-  }
-
-  // Dump the list of operations |aops| in case of test failure.
-  void DumpAopsOnFailure(const vector<AnnotatedOperation>& aops) {
-    if (HasNonfatalFailure()) {
-      LOG(INFO) << "Result operation list:";
-      for (const auto& aop : aops) {
-        LOG(INFO) << aop;
-      }
-    }
-  }
-
-  // Blob file name, file descriptor and file size used to store operation
-  // blobs.
-  string blob_path_;
-  int blob_fd_{-1};
-  off_t blob_file_size_{0};
-  std::unique_ptr<BlobFileWriter> blob_file_;
-  std::unique_ptr<ScopedPathUnlinker> blob_path_unlinker_;
-  std::unique_ptr<ScopedFdCloser> blob_fd_closer_;
-};
-
-TEST_F(InplaceGeneratorTest, BlockDefaultValues) {
-  // Tests that a Block is initialized with the default values as a
-  // Vertex::kInvalidIndex. This is required by the delta generators.
-  Block block;
-  EXPECT_EQ(Vertex::kInvalidIndex, block.reader);
-  EXPECT_EQ(Vertex::kInvalidIndex, block.writer);
-}
-
-TEST_F(InplaceGeneratorTest, SubstituteBlocksTest) {
-  vector<Extent> remove_blocks;
-  AppendExtent(&remove_blocks, 3, 3);
-  AppendExtent(&remove_blocks, 7, 1);
-  vector<Extent> replace_blocks;
-  AppendExtent(&replace_blocks, 10, 2);
-  AppendExtent(&replace_blocks, 13, 2);
-  Vertex vertex;
-  InstallOperation& op = vertex.aop.op;
-  OpAppendExtent(&op, 4, 3);
-  OpAppendExtent(&op, kSparseHole, 4);  // Sparse hole in file
-  OpAppendExtent(&op, 3, 1);
-  OpAppendExtent(&op, 7, 3);
-
-  InplaceGenerator::SubstituteBlocks(&vertex, remove_blocks, replace_blocks);
-
-  EXPECT_EQ(7, op.src_extents_size());
-  EXPECT_EQ(11U, op.src_extents(0).start_block());
-  EXPECT_EQ(1U, op.src_extents(0).num_blocks());
-  EXPECT_EQ(13U, op.src_extents(1).start_block());
-  EXPECT_EQ(1U, op.src_extents(1).num_blocks());
-  EXPECT_EQ(6U, op.src_extents(2).start_block());
-  EXPECT_EQ(1U, op.src_extents(2).num_blocks());
-  EXPECT_EQ(kSparseHole, op.src_extents(3).start_block());
-  EXPECT_EQ(4U, op.src_extents(3).num_blocks());
-  EXPECT_EQ(10U, op.src_extents(4).start_block());
-  EXPECT_EQ(1U, op.src_extents(4).num_blocks());
-  EXPECT_EQ(14U, op.src_extents(5).start_block());
-  EXPECT_EQ(1U, op.src_extents(5).num_blocks());
-  EXPECT_EQ(8U, op.src_extents(6).start_block());
-  EXPECT_EQ(2U, op.src_extents(6).num_blocks());
-}
-
-TEST_F(InplaceGeneratorTest, CutEdgesTest) {
-  Graph graph;
-  vector<Block> blocks(9);
-
-  // Create nodes in graph
-  {
-    graph.resize(graph.size() + 1);
-    graph.back().aop.op.set_type(InstallOperation::MOVE);
-    // Reads from blocks 3, 5, 7
-    vector<Extent> extents;
-    AppendBlockToExtents(&extents, 3);
-    AppendBlockToExtents(&extents, 5);
-    AppendBlockToExtents(&extents, 7);
-    StoreExtents(extents, graph.back().aop.op.mutable_src_extents());
-    blocks[3].reader = graph.size() - 1;
-    blocks[5].reader = graph.size() - 1;
-    blocks[7].reader = graph.size() - 1;
-
-    // Writes to blocks 1, 2, 4
-    extents.clear();
-    AppendBlockToExtents(&extents, 1);
-    AppendBlockToExtents(&extents, 2);
-    AppendBlockToExtents(&extents, 4);
-    StoreExtents(extents, graph.back().aop.op.mutable_dst_extents());
-    blocks[1].writer = graph.size() - 1;
-    blocks[2].writer = graph.size() - 1;
-    blocks[4].writer = graph.size() - 1;
-  }
-  {
-    graph.resize(graph.size() + 1);
-    graph.back().aop.op.set_type(InstallOperation::MOVE);
-    // Reads from blocks 1, 2, 4
-    vector<Extent> extents;
-    AppendBlockToExtents(&extents, 1);
-    AppendBlockToExtents(&extents, 2);
-    AppendBlockToExtents(&extents, 4);
-    StoreExtents(extents, graph.back().aop.op.mutable_src_extents());
-    blocks[1].reader = graph.size() - 1;
-    blocks[2].reader = graph.size() - 1;
-    blocks[4].reader = graph.size() - 1;
-
-    // Writes to blocks 3, 5, 6
-    extents.clear();
-    AppendBlockToExtents(&extents, 3);
-    AppendBlockToExtents(&extents, 5);
-    AppendBlockToExtents(&extents, 6);
-    StoreExtents(extents, graph.back().aop.op.mutable_dst_extents());
-    blocks[3].writer = graph.size() - 1;
-    blocks[5].writer = graph.size() - 1;
-    blocks[6].writer = graph.size() - 1;
-  }
-
-  // Create edges
-  InplaceGenerator::CreateEdges(&graph, blocks);
-
-  // Find cycles
-  CycleBreaker cycle_breaker;
-  set<Edge> cut_edges;
-  cycle_breaker.BreakCycles(graph, &cut_edges);
-
-  EXPECT_EQ(1U, cut_edges.size());
-  EXPECT_TRUE(cut_edges.end() !=
-              cut_edges.find(std::pair<Vertex::Index, Vertex::Index>(1, 0)));
-
-  vector<CutEdgeVertexes> cuts;
-  EXPECT_TRUE(InplaceGenerator::CutEdges(&graph, cut_edges, &cuts));
-
-  EXPECT_EQ(3U, graph.size());
-
-  // Check new node in graph:
-  EXPECT_EQ(InstallOperation::MOVE, graph.back().aop.op.type());
-  EXPECT_EQ(2, graph.back().aop.op.src_extents_size());
-  EXPECT_EQ(1, graph.back().aop.op.dst_extents_size());
-  EXPECT_EQ(kTempBlockStart, graph.back().aop.op.dst_extents(0).start_block());
-  EXPECT_EQ(2U, graph.back().aop.op.dst_extents(0).num_blocks());
-  EXPECT_TRUE(graph.back().out_edges.empty());
-
-  // Check that old node reads from new blocks
-  EXPECT_EQ(2, graph[0].aop.op.src_extents_size());
-  EXPECT_EQ(kTempBlockStart, graph[0].aop.op.src_extents(0).start_block());
-  EXPECT_EQ(2U, graph[0].aop.op.src_extents(0).num_blocks());
-  EXPECT_EQ(7U, graph[0].aop.op.src_extents(1).start_block());
-  EXPECT_EQ(1U, graph[0].aop.op.src_extents(1).num_blocks());
-
-  // And that the old dst extents haven't changed
-  EXPECT_EQ(2, graph[0].aop.op.dst_extents_size());
-  EXPECT_EQ(1U, graph[0].aop.op.dst_extents(0).start_block());
-  EXPECT_EQ(2U, graph[0].aop.op.dst_extents(0).num_blocks());
-  EXPECT_EQ(4U, graph[0].aop.op.dst_extents(1).start_block());
-  EXPECT_EQ(1U, graph[0].aop.op.dst_extents(1).num_blocks());
-
-  // Ensure it only depends on the next node and the new temp node
-  EXPECT_EQ(2U, graph[0].out_edges.size());
-  EXPECT_TRUE(graph[0].out_edges.end() != graph[0].out_edges.find(1));
-  EXPECT_TRUE(graph[0].out_edges.end() !=
-              graph[0].out_edges.find(graph.size() - 1));
-
-  // Check second node has unchanged extents
-  EXPECT_EQ(2, graph[1].aop.op.src_extents_size());
-  EXPECT_EQ(1U, graph[1].aop.op.src_extents(0).start_block());
-  EXPECT_EQ(2U, graph[1].aop.op.src_extents(0).num_blocks());
-  EXPECT_EQ(4U, graph[1].aop.op.src_extents(1).start_block());
-  EXPECT_EQ(1U, graph[1].aop.op.src_extents(1).num_blocks());
-
-  EXPECT_EQ(2, graph[1].aop.op.dst_extents_size());
-  EXPECT_EQ(3U, graph[1].aop.op.dst_extents(0).start_block());
-  EXPECT_EQ(1U, graph[1].aop.op.dst_extents(0).num_blocks());
-  EXPECT_EQ(5U, graph[1].aop.op.dst_extents(1).start_block());
-  EXPECT_EQ(2U, graph[1].aop.op.dst_extents(1).num_blocks());
-
-  // Ensure it only depends on the next node
-  EXPECT_EQ(1U, graph[1].out_edges.size());
-  EXPECT_TRUE(graph[1].out_edges.end() != graph[1].out_edges.find(2));
-}
-
-TEST_F(InplaceGeneratorTest, AssignTempBlocksReuseTest) {
-  Graph graph(9);
-
-  const vector<Extent> empt;
-  uint64_t tmp = kTempBlockStart;
-  const string kFilename = "/foo";
-
-  vector<CutEdgeVertexes> cuts;
-  cuts.resize(3);
-
-  // Simple broken loop:
-  GenVertex(
-      &graph[0], VectOfExt(0, 1), VectOfExt(1, 1), "", InstallOperation::MOVE);
-  GenVertex(&graph[1],
-            VectOfExt(tmp, 1),
-            VectOfExt(0, 1),
-            "",
-            InstallOperation::MOVE);
-  GenVertex(&graph[2],
-            VectOfExt(1, 1),
-            VectOfExt(tmp, 1),
-            "",
-            InstallOperation::MOVE);
-  // Corresponding edges:
-  graph[0].out_edges[2] = EdgeWithReadDep(VectOfExt(1, 1));
-  graph[1].out_edges[2] = EdgeWithWriteDep(VectOfExt(tmp, 1));
-  graph[1].out_edges[0] = EdgeWithReadDep(VectOfExt(0, 1));
-  // Store the cut:
-  cuts[0].old_dst = 1;
-  cuts[0].old_src = 0;
-  cuts[0].new_vertex = 2;
-  cuts[0].tmp_extents = VectOfExt(tmp, 1);
-  tmp++;
-
-  // Slightly more complex pair of loops:
-  GenVertex(
-      &graph[3], VectOfExt(4, 2), VectOfExt(2, 2), "", InstallOperation::MOVE);
-  GenVertex(
-      &graph[4], VectOfExt(6, 1), VectOfExt(7, 1), "", InstallOperation::MOVE);
-  GenVertex(&graph[5],
-            VectOfExt(tmp, 3),
-            VectOfExt(4, 3),
-            kFilename,
-            InstallOperation::MOVE);
-  GenVertex(&graph[6],
-            VectOfExt(2, 2),
-            VectOfExt(tmp, 2),
-            "",
-            InstallOperation::MOVE);
-  GenVertex(&graph[7],
-            VectOfExt(7, 1),
-            VectOfExt(tmp + 2, 1),
-            "",
-            InstallOperation::MOVE);
-  // Corresponding edges:
-  graph[3].out_edges[6] = EdgeWithReadDep(VectOfExt(2, 2));
-  graph[4].out_edges[7] = EdgeWithReadDep(VectOfExt(7, 1));
-  graph[5].out_edges[6] = EdgeWithWriteDep(VectOfExt(tmp, 2));
-  graph[5].out_edges[7] = EdgeWithWriteDep(VectOfExt(tmp + 2, 1));
-  graph[5].out_edges[3] = EdgeWithReadDep(VectOfExt(4, 2));
-  graph[5].out_edges[4] = EdgeWithReadDep(VectOfExt(6, 1));
-  // Store the cuts:
-  cuts[1].old_dst = 5;
-  cuts[1].old_src = 3;
-  cuts[1].new_vertex = 6;
-  cuts[1].tmp_extents = VectOfExt(tmp, 2);
-  cuts[2].old_dst = 5;
-  cuts[2].old_src = 4;
-  cuts[2].new_vertex = 7;
-  cuts[2].tmp_extents = VectOfExt(tmp + 2, 1);
-
-  // Supplier of temp block:
-  GenVertex(&graph[8], empt, VectOfExt(8, 1), "", InstallOperation::REPLACE);
-
-  // Specify the final order:
-  vector<Vertex::Index> op_indexes;
-  op_indexes.push_back(2);
-  op_indexes.push_back(0);
-  op_indexes.push_back(1);
-  op_indexes.push_back(6);
-  op_indexes.push_back(3);
-  op_indexes.push_back(7);
-  op_indexes.push_back(4);
-  op_indexes.push_back(5);
-  op_indexes.push_back(8);
-
-  vector<vector<Vertex::Index>::size_type> reverse_op_indexes;
-  InplaceGenerator::GenerateReverseTopoOrderMap(op_indexes,
-                                                &reverse_op_indexes);
-
-  CreateBlobFile();
-  EXPECT_TRUE(InplaceGenerator::AssignTempBlocks(&graph,
-                                                 "/dev/zero",
-                                                 blob_file_.get(),
-                                                 &op_indexes,
-                                                 &reverse_op_indexes,
-                                                 cuts));
-  EXPECT_FALSE(graph[6].valid);
-  EXPECT_FALSE(graph[7].valid);
-  EXPECT_EQ(1, graph[1].aop.op.src_extents_size());
-  EXPECT_EQ(2U, graph[1].aop.op.src_extents(0).start_block());
-  EXPECT_EQ(1U, graph[1].aop.op.src_extents(0).num_blocks());
-  EXPECT_EQ(InstallOperation::REPLACE_BZ, graph[5].aop.op.type());
-}
-
-TEST_F(InplaceGeneratorTest, MoveAndSortFullOpsToBackTest) {
-  Graph graph(4);
-  graph[0].aop.name = "A";
-  graph[0].aop.op.set_type(InstallOperation::REPLACE);
-  graph[1].aop.name = "B";
-  graph[1].aop.op.set_type(InstallOperation::BSDIFF);
-  graph[2].aop.name = "C";
-  graph[2].aop.op.set_type(InstallOperation::REPLACE_BZ);
-  graph[3].aop.name = "D";
-  graph[3].aop.op.set_type(InstallOperation::MOVE);
-
-  vector<Vertex::Index> vect(graph.size());
-
-  for (vector<Vertex::Index>::size_type i = 0; i < vect.size(); ++i) {
-    vect[i] = i;
-  }
-  InplaceGenerator::MoveAndSortFullOpsToBack(&graph, &vect);
-  EXPECT_EQ(vect.size(), graph.size());
-  EXPECT_EQ(graph[vect[0]].aop.name, "B");
-  EXPECT_EQ(graph[vect[1]].aop.name, "D");
-  EXPECT_EQ(graph[vect[2]].aop.name, "A");
-  EXPECT_EQ(graph[vect[3]].aop.name, "C");
-}
-
-TEST_F(InplaceGeneratorTest, AssignTempBlocksTest) {
-  Graph graph(9);
-  const vector<Extent> empt;  // empty
-  const string kFilename = "/foo";
-
-  // Some scratch space:
-  GenVertex(&graph[0], empt, VectOfExt(200, 1), "", InstallOperation::REPLACE);
-  GenVertex(&graph[1], empt, VectOfExt(210, 10), "", InstallOperation::REPLACE);
-  GenVertex(&graph[2], empt, VectOfExt(220, 1), "", InstallOperation::REPLACE);
-
-  // A cycle that requires 10 blocks to break:
-  GenVertex(&graph[3],
-            VectOfExt(10, 11),
-            VectOfExt(0, 9),
-            "",
-            InstallOperation::BSDIFF);
-  graph[3].out_edges[4] = EdgeWithReadDep(VectOfExt(0, 9));
-  GenVertex(&graph[4],
-            VectOfExt(0, 9),
-            VectOfExt(10, 11),
-            "",
-            InstallOperation::BSDIFF);
-  graph[4].out_edges[3] = EdgeWithReadDep(VectOfExt(10, 11));
-
-  // A cycle that requires 9 blocks to break:
-  GenVertex(&graph[5],
-            VectOfExt(40, 11),
-            VectOfExt(30, 10),
-            "",
-            InstallOperation::BSDIFF);
-  graph[5].out_edges[6] = EdgeWithReadDep(VectOfExt(30, 10));
-  GenVertex(&graph[6],
-            VectOfExt(30, 10),
-            VectOfExt(40, 11),
-            "",
-            InstallOperation::BSDIFF);
-  graph[6].out_edges[5] = EdgeWithReadDep(VectOfExt(40, 11));
-
-  // A cycle that requires 40 blocks to break (which is too many):
-  GenVertex(&graph[7],
-            VectOfExt(120, 50),
-            VectOfExt(60, 40),
-            "",
-            InstallOperation::BSDIFF);
-  graph[7].out_edges[8] = EdgeWithReadDep(VectOfExt(60, 40));
-  GenVertex(&graph[8],
-            VectOfExt(60, 40),
-            VectOfExt(120, 50),
-            kFilename,
-            InstallOperation::BSDIFF);
-  graph[8].out_edges[7] = EdgeWithReadDep(VectOfExt(120, 50));
-
-  graph_utils::DumpGraph(graph);
-
-  vector<Vertex::Index> final_order;
-
-  CreateBlobFile();
-  EXPECT_TRUE(InplaceGenerator::ConvertGraphToDag(&graph,
-                                                  "/dev/zero",
-                                                  blob_file_.get(),
-                                                  &final_order,
-                                                  Vertex::kInvalidIndex));
-
-  Graph expected_graph(12);
-  GenVertex(&expected_graph[0],
-            empt,
-            VectOfExt(200, 1),
-            "",
-            InstallOperation::REPLACE);
-  GenVertex(&expected_graph[1],
-            empt,
-            VectOfExt(210, 10),
-            "",
-            InstallOperation::REPLACE);
-  GenVertex(&expected_graph[2],
-            empt,
-            VectOfExt(220, 1),
-            "",
-            InstallOperation::REPLACE);
-  GenVertex(&expected_graph[3],
-            VectOfExt(10, 11),
-            VectOfExt(0, 9),
-            "",
-            InstallOperation::BSDIFF);
-  expected_graph[3].out_edges[9] = EdgeWithReadDep(VectOfExt(0, 9));
-  GenVertex(&expected_graph[4],
-            VectOfExt(60, 9),
-            VectOfExt(10, 11),
-            "",
-            InstallOperation::BSDIFF);
-  expected_graph[4].out_edges[3] = EdgeWithReadDep(VectOfExt(10, 11));
-  expected_graph[4].out_edges[9] = EdgeWithWriteDep(VectOfExt(60, 9));
-  GenVertex(&expected_graph[5],
-            VectOfExt(40, 11),
-            VectOfExt(30, 10),
-            "",
-            InstallOperation::BSDIFF);
-  expected_graph[5].out_edges[10] = EdgeWithReadDep(VectOfExt(30, 10));
-
-  GenVertex(&expected_graph[6],
-            VectOfExt(60, 10),
-            VectOfExt(40, 11),
-            "",
-            InstallOperation::BSDIFF);
-  expected_graph[6].out_edges[5] = EdgeWithReadDep(VectOfExt(40, 11));
-  expected_graph[6].out_edges[10] = EdgeWithWriteDep(VectOfExt(60, 10));
-
-  GenVertex(&expected_graph[7],
-            VectOfExt(120, 50),
-            VectOfExt(60, 40),
-            "",
-            InstallOperation::BSDIFF);
-  expected_graph[7].out_edges[6] = EdgeWithReadDep(VectOfExt(60, 10));
-
-  GenVertex(&expected_graph[8],
-            empt,
-            VectOfExt(0, 50),
-            "/foo",
-            InstallOperation::REPLACE_BZ);
-  expected_graph[8].out_edges[7] = EdgeWithReadDep(VectOfExt(120, 50));
-
-  GenVertex(&expected_graph[9],
-            VectOfExt(0, 9),
-            VectOfExt(60, 9),
-            "",
-            InstallOperation::MOVE);
-
-  GenVertex(&expected_graph[10],
-            VectOfExt(30, 10),
-            VectOfExt(60, 10),
-            "",
-            InstallOperation::MOVE);
-  expected_graph[10].out_edges[4] = EdgeWithReadDep(VectOfExt(60, 9));
-
-  EXPECT_EQ(12U, graph.size());
-  EXPECT_FALSE(graph.back().valid);
-  for (Graph::size_type i = 0; i < graph.size() - 1; i++) {
-    EXPECT_TRUE(graph[i].out_edges == expected_graph[i].out_edges);
-    if (i == 8) {
-      // special case
-    } else {
-      // EXPECT_TRUE(graph[i] == expected_graph[i]) << "i = " << i;
-    }
-  }
-}
-
-TEST_F(InplaceGeneratorTest, CreateScratchNodeTest) {
-  Vertex vertex;
-  InplaceGenerator::CreateScratchNode(12, 34, &vertex);
-  EXPECT_EQ(InstallOperation::REPLACE_BZ, vertex.aop.op.type());
-  EXPECT_EQ(0U, vertex.aop.op.data_offset());
-  EXPECT_EQ(0U, vertex.aop.op.data_length());
-  EXPECT_EQ(1, vertex.aop.op.dst_extents_size());
-  EXPECT_EQ(12U, vertex.aop.op.dst_extents(0).start_block());
-  EXPECT_EQ(34U, vertex.aop.op.dst_extents(0).num_blocks());
-}
-
-TEST_F(InplaceGeneratorTest, ApplyMapTest) {
-  vector<uint64_t> collection = {1, 2, 3, 4, 6};
-  vector<uint64_t> expected_values = {1, 2, 5, 4, 8};
-  map<uint64_t, uint64_t> value_map;
-  value_map[3] = 5;
-  value_map[6] = 8;
-  value_map[5] = 10;
-
-  InplaceGenerator::ApplyMap(&collection, value_map);
-  EXPECT_EQ(expected_values, collection);
-}
-
-// We can't produce MOVE operations with a source or destination in the block 0.
-// This test checks that the cycle breaker procedure doesn't produce such
-// operations.
-TEST_F(InplaceGeneratorTest, ResolveReadAfterWriteDependenciesAvoidMoveToZero) {
-  size_t block_size = 4096;
-  size_t num_blocks = 4;
-  vector<AnnotatedOperation> aops;
-
-  // Create a REPLACE_BZ for block 0, and a circular dependency among all other
-  // blocks. This situation would prefer to issue a MOVE to scratch space and
-  // the only available block is 0.
-  aops.emplace_back();
-  aops.back().name = base::StringPrintf("<bz-block-0>");
-  aops.back().op.set_type(InstallOperation::REPLACE_BZ);
-  StoreExtents({ExtentForRange(0, 1)}, aops.back().op.mutable_dst_extents());
-
-  for (size_t i = 1; i < num_blocks; i++) {
-    AnnotatedOperation aop;
-    aop.name = base::StringPrintf("<op-%" PRIuS ">", i);
-    aop.op.set_type(InstallOperation::BSDIFF);
-    StoreExtents({ExtentForRange(1 + i % (num_blocks - 1), 1)},
-                 aop.op.mutable_src_extents());
-    StoreExtents({ExtentForRange(i, 1)}, aop.op.mutable_dst_extents());
-    aops.push_back(aop);
-  }
-
-  PartitionConfig part("part");
-  part.path = "/dev/zero";
-  part.size = num_blocks * block_size;
-
-  CreateBlobFile();
-
-  // We ran two tests here. The first one without enough blocks for the scratch
-  // space, forcing it to create a new full operation and the second case with
-  // one extra block in the partition that can be used for the move operation.
-  for (const auto part_blocks : vector<uint64_t>{num_blocks, num_blocks + 1}) {
-    SCOPED_TRACE(
-        base::StringPrintf("Using partition_blocks=%" PRIu64, part_blocks));
-    vector<AnnotatedOperation> result_aops = aops;
-    EXPECT_TRUE(InplaceGenerator::ResolveReadAfterWriteDependencies(
-        part,
-        part,
-        part_blocks * block_size,
-        block_size,
-        blob_file_.get(),
-        &result_aops));
-
-    size_t full_ops = 0;
-    for (const auto& aop : result_aops) {
-      if (diff_utils::IsAReplaceOperation(aop.op.type()))
-        full_ops++;
-
-      if (aop.op.type() != InstallOperation::MOVE)
-        continue;
-      for (const Extent& extent : aop.op.src_extents()) {
-        EXPECT_NE(0U, extent.start_block())
-            << "On src extents for aop: " << aop;
-      }
-      for (const Extent& extent : aop.op.dst_extents()) {
-        EXPECT_NE(0U, extent.start_block())
-            << "On dst extents for aop: " << aop;
-      }
-    }
-
-    // If there's extra space in the partition, it should not use a new full
-    // operation for it.
-    EXPECT_EQ(part_blocks == num_blocks ? 2U : 1U, full_ops);
-
-    DumpAopsOnFailure(result_aops);
-  }
-}
-
-// Test that we can shrink a filesystem and break cycles.
-TEST_F(InplaceGeneratorTest, ResolveReadAfterWriteDependenciesShrinkData) {
-  size_t block_size = 4096;
-  size_t old_blocks = 10;
-  size_t new_blocks = 8;
-  vector<AnnotatedOperation> aops;
-
-  // Create a loop using the blocks 1-6 and one other operation writing to the
-  // block 7 from outside the new partition. The loop in the blocks 1-6 uses
-  // two-block operations, so it needs two blocks of scratch space. It can't use
-  // the block 0 as scratch space (see previous test) and it can't use the
-  // blocks 7 or 8 due the last move operation.
-
-  aops.emplace_back();
-  aops.back().name = base::StringPrintf("<bz-block-0>");
-  aops.back().op.set_type(InstallOperation::REPLACE_BZ);
-  StoreExtents({ExtentForRange(0, 1)}, aops.back().op.mutable_dst_extents());
-
-  const size_t num_ops = 3;
-  for (size_t i = 0; i < num_ops; i++) {
-    AnnotatedOperation aop;
-    aop.name = base::StringPrintf("<op-%" PRIuS ">", i);
-    aop.op.set_type(InstallOperation::BSDIFF);
-    StoreExtents({ExtentForRange(1 + 2 * i, 2)}, aop.op.mutable_src_extents());
-    StoreExtents({ExtentForRange(1 + 2 * ((i + 1) % num_ops), 2)},
-                 aop.op.mutable_dst_extents());
-    aops.push_back(aop);
-  }
-
-  {
-    AnnotatedOperation aop;
-    aop.name = "<op-shrink>";
-    aop.op.set_type(InstallOperation::BSDIFF);
-    StoreExtents({ExtentForRange(8, 1)}, aop.op.mutable_src_extents());
-    StoreExtents({ExtentForRange(7, 1)}, aop.op.mutable_dst_extents());
-    aops.push_back(aop);
-  }
-
-  PartitionConfig old_part("part");
-  old_part.path = "/dev/zero";
-  old_part.size = old_blocks * block_size;
-
-  PartitionConfig new_part("part");
-  new_part.path = "/dev/zero";
-  new_part.size = new_blocks * block_size;
-
-  CreateBlobFile();
-
-  EXPECT_TRUE(InplaceGenerator::ResolveReadAfterWriteDependencies(
-      old_part,
-      new_part,
-      (old_blocks + 2) * block_size,  // enough scratch space.
-      block_size,
-      blob_file_.get(),
-      &aops));
-
-  size_t full_ops = 0;
-  for (const auto& aop : aops) {
-    if (diff_utils::IsAReplaceOperation(aop.op.type()))
-      full_ops++;
-  }
-  // There should be only one REPLACE* operation, the one we added for block 0.
-  EXPECT_EQ(1U, full_ops);
-
-  // There should be only one MOVE operation, the one used to break the loop
-  // which should write to scratch space past the block 7 (the last block of the
-  // new partition) which is being written later.
-  size_t move_ops = 0;
-  for (const auto& aop : aops) {
-    if (aop.op.type() == InstallOperation::MOVE) {
-      move_ops++;
-      for (const Extent& extent : aop.op.dst_extents()) {
-        EXPECT_LE(7U, extent.start_block())
-            << "On dst extents for aop: " << aop;
-      }
-    }
-  }
-  EXPECT_EQ(1U, move_ops);
-
-  DumpAopsOnFailure(aops);
-}
-
-}  // namespace chromeos_update_engine
diff --git a/payload_generator/mapfile_filesystem_unittest.cc b/payload_generator/mapfile_filesystem_unittest.cc
index 36ae3bf..57b672b 100644
--- a/payload_generator/mapfile_filesystem_unittest.cc
+++ b/payload_generator/mapfile_filesystem_unittest.cc
@@ -55,8 +55,8 @@
 
 class MapfileFilesystemTest : public ::testing::Test {
  protected:
-  test_utils::ScopedTempFile temp_file_{"mapfile_file.XXXXXX"};
-  test_utils::ScopedTempFile temp_mapfile_{"mapfile_mapfile.XXXXXX"};
+  ScopedTempFile temp_file_{"mapfile_file.XXXXXX"};
+  ScopedTempFile temp_mapfile_{"mapfile_mapfile.XXXXXX"};
 };
 
 TEST_F(MapfileFilesystemTest, EmptyFilesystem) {
diff --git a/payload_generator/merge_sequence_generator.cc b/payload_generator/merge_sequence_generator.cc
new file mode 100644
index 0000000..289e2f8
--- /dev/null
+++ b/payload_generator/merge_sequence_generator.cc
@@ -0,0 +1,301 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/merge_sequence_generator.h"
+
+#include <algorithm>
+
+#include "update_engine/payload_generator/extent_utils.h"
+
+namespace chromeos_update_engine {
+
+CowMergeOperation CreateCowMergeOperation(const Extent& src_extent,
+                                          const Extent& dst_extent) {
+  CowMergeOperation ret;
+  ret.set_type(CowMergeOperation::COW_COPY);
+  *ret.mutable_src_extent() = src_extent;
+  *ret.mutable_dst_extent() = dst_extent;
+  return ret;
+}
+
+std::ostream& operator<<(std::ostream& os,
+                         const CowMergeOperation& merge_operation) {
+  os << "CowMergeOperation src extent: "
+     << ExtentsToString({merge_operation.src_extent()})
+     << ", dst extent: " << ExtentsToString({merge_operation.dst_extent()});
+  return os;
+}
+
+// The OTA generation guarantees that all blocks in the dst extent will be
+// written only once. So we can use it to order the CowMergeOperation.
+bool operator<(const CowMergeOperation& op1, const CowMergeOperation& op2) {
+  return op1.dst_extent().start_block() < op2.dst_extent().start_block();
+}
+
+bool operator==(const CowMergeOperation& op1, const CowMergeOperation& op2) {
+  return op1.type() == op2.type() && op1.src_extent() == op2.src_extent() &&
+         op1.dst_extent() == op2.dst_extent();
+}
+
+template <typename T>
+constexpr T GetDifference(T first, T second) {
+  T abs_diff = (first > second) ? (first - second) : (second - first);
+  return abs_diff;
+}
+
+void SplitSelfOverlapping(const Extent& src_extent,
+                          const Extent& dst_extent,
+                          std::vector<CowMergeOperation>* sequence) {
+  CHECK_EQ(src_extent.num_blocks(), dst_extent.num_blocks());
+  if (src_extent.start_block() == dst_extent.start_block()) {
+    sequence->emplace_back(CreateCowMergeOperation(src_extent, dst_extent));
+    return;
+  }
+
+  const size_t diff =
+      GetDifference(src_extent.start_block(), dst_extent.start_block());
+  for (size_t i = 0; i < src_extent.num_blocks(); i += diff) {
+    auto num_blocks = std::min<size_t>(diff, src_extent.num_blocks() - i);
+    sequence->emplace_back(CreateCowMergeOperation(
+        ExtentForRange(i + src_extent.start_block(), num_blocks),
+        ExtentForRange(i + dst_extent.start_block(), num_blocks)));
+  }
+}
+
+std::unique_ptr<MergeSequenceGenerator> MergeSequenceGenerator::Create(
+    const std::vector<AnnotatedOperation>& aops) {
+  std::vector<CowMergeOperation> sequence;
+  for (const auto& aop : aops) {
+    // Only handle SOURCE_COPY now for the cow size optimization.
+    if (aop.op.type() != InstallOperation::SOURCE_COPY) {
+      continue;
+    }
+    if (aop.op.dst_extents().size() != 1) {
+      std::vector<Extent> out_extents;
+      ExtentsToVector(aop.op.dst_extents(), &out_extents);
+      LOG(ERROR) << "The dst extents for source_copy expects to be contiguous,"
+                 << " dst extents: " << ExtentsToString(out_extents);
+      return nullptr;
+    }
+
+    // Split the source extents.
+    size_t used_blocks = 0;
+    for (const auto& src_extent : aop.op.src_extents()) {
+      // The dst_extent in the merge sequence will be a subset of
+      // InstallOperation's dst_extent. This will simplify the OTA -> COW
+      // conversion when we install the payload.
+      Extent dst_extent =
+          ExtentForRange(aop.op.dst_extents(0).start_block() + used_blocks,
+                         src_extent.num_blocks());
+
+      // Self-overlapping SOURCE_COPY, must split into multiple non
+      // self-overlapping ops
+      if (ExtentRanges::ExtentsOverlap(src_extent, dst_extent)) {
+        SplitSelfOverlapping(src_extent, dst_extent, &sequence);
+      } else {
+        sequence.emplace_back(CreateCowMergeOperation(src_extent, dst_extent));
+      }
+      used_blocks += src_extent.num_blocks();
+    }
+
+    if (used_blocks != aop.op.dst_extents(0).num_blocks()) {
+      LOG(ERROR) << "Number of blocks in src extents doesn't equal to the"
+                 << " ones in the dst extents, src blocks " << used_blocks
+                 << ", dst blocks " << aop.op.dst_extents(0).num_blocks();
+      return nullptr;
+    }
+  }
+
+  std::sort(sequence.begin(), sequence.end());
+  return std::unique_ptr<MergeSequenceGenerator>(
+      new MergeSequenceGenerator(sequence));
+}
+
+bool MergeSequenceGenerator::FindDependency(
+    std::map<CowMergeOperation, std::set<CowMergeOperation>>* result) const {
+  CHECK(result);
+  LOG(INFO) << "Finding dependencies";
+
+  // Since the OTA operation may reuse some source blocks, use the binary
+  // search on sorted dst extents to find overlaps.
+  std::map<CowMergeOperation, std::set<CowMergeOperation>> merge_after;
+  for (const auto& op : operations_) {
+    // lower bound (inclusive): dst extent's end block >= src extent's start
+    // block.
+    const auto lower_it = std::lower_bound(
+        operations_.begin(),
+        operations_.end(),
+        op,
+        [](const CowMergeOperation& it, const CowMergeOperation& op) {
+          auto dst_end_block =
+              it.dst_extent().start_block() + it.dst_extent().num_blocks() - 1;
+          return dst_end_block < op.src_extent().start_block();
+        });
+    // upper bound: dst extent's start block > src extent's end block
+    const auto upper_it = std::upper_bound(
+        lower_it,
+        operations_.end(),
+        op,
+        [](const CowMergeOperation& op, const CowMergeOperation& it) {
+          auto src_end_block =
+              op.src_extent().start_block() + op.src_extent().num_blocks() - 1;
+          return src_end_block < it.dst_extent().start_block();
+        });
+
+    // TODO(xunchang) skip inserting the empty set to merge_after.
+    if (lower_it == upper_it) {
+      merge_after.insert({op, {}});
+    } else {
+      std::set<CowMergeOperation> operations(lower_it, upper_it);
+      auto it = operations.find(op);
+      if (it != operations.end()) {
+        LOG(INFO) << "Self overlapping " << op;
+        operations.erase(it);
+      }
+      auto ret = merge_after.emplace(op, std::move(operations));
+      // Check the insertion indeed happens.
+      CHECK(ret.second);
+    }
+  }
+
+  *result = std::move(merge_after);
+  return true;
+}
+
+bool MergeSequenceGenerator::Generate(
+    std::vector<CowMergeOperation>* sequence) const {
+  sequence->clear();
+  std::map<CowMergeOperation, std::set<CowMergeOperation>> merge_after;
+  if (!FindDependency(&merge_after)) {
+    LOG(ERROR) << "Failed to find dependencies";
+    return false;
+  }
+
+  LOG(INFO) << "Generating sequence";
+
+  // Use the non-DFS version of the topology sort. So we can control the
+  // operations to discard to break cycles; thus yielding a deterministic
+  // sequence.
+  std::map<CowMergeOperation, int> incoming_edges;
+  for (const auto& it : merge_after) {
+    for (const auto& blocked : it.second) {
+      // Value is default initialized to 0.
+      incoming_edges[blocked] += 1;
+    }
+  }
+
+  std::set<CowMergeOperation> free_operations;
+  for (const auto& op : operations_) {
+    if (incoming_edges.find(op) == incoming_edges.end()) {
+      free_operations.insert(op);
+    }
+  }
+
+  std::vector<CowMergeOperation> merge_sequence;
+  std::set<CowMergeOperation> convert_to_raw;
+  while (!incoming_edges.empty()) {
+    if (!free_operations.empty()) {
+      merge_sequence.insert(
+          merge_sequence.end(), free_operations.begin(), free_operations.end());
+    } else {
+      auto to_convert = incoming_edges.begin()->first;
+      free_operations.insert(to_convert);
+      convert_to_raw.insert(to_convert);
+      LOG(INFO) << "Converting operation to raw " << to_convert;
+    }
+
+    std::set<CowMergeOperation> next_free_operations;
+    for (const auto& op : free_operations) {
+      incoming_edges.erase(op);
+
+      // Now that this particular operation is merged, other operations blocked
+      // by this one may be free. Decrement the count of blocking operations,
+      // and set up the free operations for the next iteration.
+      for (const auto& blocked : merge_after[op]) {
+        auto it = incoming_edges.find(blocked);
+        if (it == incoming_edges.end()) {
+          continue;
+        }
+
+        auto blocking_transfer_count = &it->second;
+        if (*blocking_transfer_count <= 0) {
+          LOG(ERROR) << "Unexpected count in merge after map "
+                     << blocking_transfer_count;
+          return false;
+        }
+        // This operation is no longer blocked by anyone. Add it to the merge
+        // sequence in the next iteration.
+        *blocking_transfer_count -= 1;
+        if (*blocking_transfer_count == 0) {
+          next_free_operations.insert(blocked);
+        }
+      }
+    }
+
+    LOG(INFO) << "Remaining transfers " << incoming_edges.size()
+              << ", free transfers " << free_operations.size()
+              << ", merge_sequence size " << merge_sequence.size();
+    free_operations = std::move(next_free_operations);
+  }
+
+  if (!free_operations.empty()) {
+    merge_sequence.insert(
+        merge_sequence.end(), free_operations.begin(), free_operations.end());
+  }
+
+  CHECK_EQ(operations_.size(), merge_sequence.size() + convert_to_raw.size());
+
+  size_t blocks_in_sequence = 0;
+  for (const CowMergeOperation& transfer : merge_sequence) {
+    blocks_in_sequence += transfer.dst_extent().num_blocks();
+  }
+
+  size_t blocks_in_raw = 0;
+  for (const CowMergeOperation& transfer : convert_to_raw) {
+    blocks_in_raw += transfer.dst_extent().num_blocks();
+  }
+
+  LOG(INFO) << "Blocks in merge sequence " << blocks_in_sequence
+            << ", blocks in raw " << blocks_in_raw;
+  if (!ValidateSequence(merge_sequence)) {
+    return false;
+  }
+
+  *sequence = std::move(merge_sequence);
+  return true;
+}
+
+bool MergeSequenceGenerator::ValidateSequence(
+    const std::vector<CowMergeOperation>& sequence) {
+  LOG(INFO) << "Validating merge sequence";
+  ExtentRanges visited;
+  for (const auto& op : sequence) {
+    if (visited.OverlapsWithExtent(op.src_extent())) {
+      LOG(ERROR) << "Transfer violates the merge sequence " << op
+                 << "Visited extent ranges: ";
+      visited.Dump();
+      return false;
+    }
+
+    CHECK(!visited.OverlapsWithExtent(op.dst_extent()))
+        << "dst extent should write only once.";
+    visited.AddExtent(op.dst_extent());
+  }
+
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/merge_sequence_generator.h b/payload_generator/merge_sequence_generator.h
new file mode 100644
index 0000000..385fcc3
--- /dev/null
+++ b/payload_generator/merge_sequence_generator.h
@@ -0,0 +1,78 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_MERGE_SEQUENCE_GENERATOR_H_
+#define UPDATE_ENGINE_PAYLOAD_GENERATOR_MERGE_SEQUENCE_GENERATOR_H_
+
+#include <map>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "update_engine/payload_generator/annotated_operation.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+#include "update_engine/payload_generator/extent_utils.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+// Constructs CowMergeOperation from src & dst extents
+CowMergeOperation CreateCowMergeOperation(const Extent& src_extent,
+                                          const Extent& dst_extent);
+
+// Comparator for CowMergeOperation.
+bool operator<(const CowMergeOperation& op1, const CowMergeOperation& op2);
+bool operator==(const CowMergeOperation& op1, const CowMergeOperation& op2);
+
+std::ostream& operator<<(std::ostream& os,
+                         const CowMergeOperation& merge_operation);
+
+// This class takes a list of CowMergeOperations; and sorts them so that no
+// read after write will happen by following the sequence. When there is a
+// cycle, we will omit some operations in the list. Therefore, the result
+// sequence may not contain all blocks in the input list.
+class MergeSequenceGenerator {
+ public:
+  // Creates an object from a list of OTA InstallOperations. Returns nullptr on
+  // failure.
+  static std::unique_ptr<MergeSequenceGenerator> Create(
+      const std::vector<AnnotatedOperation>& aops);
+  // Checks that no read after write happens in the given sequence.
+  static bool ValidateSequence(const std::vector<CowMergeOperation>& sequence);
+
+  // Generates a merge sequence from |operations_|, puts the result in
+  // |sequence|. Returns false on failure.
+  bool Generate(std::vector<CowMergeOperation>* sequence) const;
+
+ private:
+  friend class MergeSequenceGeneratorTest;
+  explicit MergeSequenceGenerator(std::vector<CowMergeOperation> transfers)
+      : operations_(std::move(transfers)) {}
+
+  // For a given merge operation, finds all the operations that should merge
+  // after myself. Put the result in |merge_after|.
+  bool FindDependency(std::map<CowMergeOperation, std::set<CowMergeOperation>>*
+                          merge_after) const;
+  // The list of CowMergeOperations to sort.
+  std::vector<CowMergeOperation> operations_;
+};
+
+void SplitSelfOverlapping(const Extent& src_extent,
+                          const Extent& dst_extent,
+                          std::vector<CowMergeOperation>* sequence);
+
+}  // namespace chromeos_update_engine
+#endif
diff --git a/payload_generator/merge_sequence_generator_unittest.cc b/payload_generator/merge_sequence_generator_unittest.cc
new file mode 100644
index 0000000..b8507ed
--- /dev/null
+++ b/payload_generator/merge_sequence_generator_unittest.cc
@@ -0,0 +1,250 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <algorithm>
+#include <vector>
+
+#include <gtest/gtest.h>
+
+#include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+#include "update_engine/payload_generator/extent_utils.h"
+#include "update_engine/payload_generator/merge_sequence_generator.h"
+
+namespace chromeos_update_engine {
+class MergeSequenceGeneratorTest : public ::testing::Test {
+ protected:
+  void VerifyTransfers(MergeSequenceGenerator* generator,
+                       const std::vector<CowMergeOperation>& expected) {
+    ASSERT_EQ(expected, generator->operations_);
+  }
+
+  void FindDependency(
+      std::vector<CowMergeOperation> transfers,
+      std::map<CowMergeOperation, std::set<CowMergeOperation>>* result) {
+    std::sort(transfers.begin(), transfers.end());
+    MergeSequenceGenerator generator(std::move(transfers));
+    ASSERT_TRUE(generator.FindDependency(result));
+  }
+
+  void GenerateSequence(std::vector<CowMergeOperation> transfers,
+                        const std::vector<CowMergeOperation>& expected) {
+    std::sort(transfers.begin(), transfers.end());
+    MergeSequenceGenerator generator(std::move(transfers));
+    std::vector<CowMergeOperation> sequence;
+    ASSERT_TRUE(generator.Generate(&sequence));
+    ASSERT_EQ(expected, sequence);
+  }
+};
+
+TEST_F(MergeSequenceGeneratorTest, Create) {
+  std::vector<AnnotatedOperation> aops{{"file1", {}}, {"file2", {}}};
+  aops[0].op.set_type(InstallOperation::SOURCE_COPY);
+  *aops[0].op.add_src_extents() = ExtentForRange(10, 10);
+  *aops[0].op.add_dst_extents() = ExtentForRange(30, 10);
+
+  aops[1].op.set_type(InstallOperation::SOURCE_COPY);
+  *aops[1].op.add_src_extents() = ExtentForRange(20, 10);
+  *aops[1].op.add_dst_extents() = ExtentForRange(40, 10);
+
+  auto generator = MergeSequenceGenerator::Create(aops);
+  ASSERT_TRUE(generator);
+  std::vector<CowMergeOperation> expected = {
+      CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(30, 10)),
+      CreateCowMergeOperation(ExtentForRange(20, 10), ExtentForRange(40, 10))};
+  VerifyTransfers(generator.get(), expected);
+
+  *aops[1].op.add_src_extents() = ExtentForRange(30, 5);
+  *aops[1].op.add_dst_extents() = ExtentForRange(50, 5);
+  generator = MergeSequenceGenerator::Create(aops);
+  ASSERT_FALSE(generator);
+}
+
+TEST_F(MergeSequenceGeneratorTest, Create_SplitSource) {
+  InstallOperation op;
+  op.set_type(InstallOperation::SOURCE_COPY);
+  *(op.add_src_extents()) = ExtentForRange(2, 3);
+  *(op.add_src_extents()) = ExtentForRange(6, 1);
+  *(op.add_src_extents()) = ExtentForRange(8, 4);
+  *(op.add_dst_extents()) = ExtentForRange(10, 8);
+
+  AnnotatedOperation aop{"file1", op};
+  auto generator = MergeSequenceGenerator::Create({aop});
+  ASSERT_TRUE(generator);
+  std::vector<CowMergeOperation> expected = {
+      CreateCowMergeOperation(ExtentForRange(2, 3), ExtentForRange(10, 3)),
+      CreateCowMergeOperation(ExtentForRange(6, 1), ExtentForRange(13, 1)),
+      CreateCowMergeOperation(ExtentForRange(8, 4), ExtentForRange(14, 4))};
+  VerifyTransfers(generator.get(), expected);
+}
+
+TEST_F(MergeSequenceGeneratorTest, FindDependency) {
+  std::vector<CowMergeOperation> transfers = {
+      CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)),
+      CreateCowMergeOperation(ExtentForRange(40, 10), ExtentForRange(50, 10)),
+  };
+
+  std::map<CowMergeOperation, std::set<CowMergeOperation>> merge_after;
+  FindDependency(transfers, &merge_after);
+  ASSERT_EQ(std::set<CowMergeOperation>(), merge_after.at(transfers[0]));
+  ASSERT_EQ(std::set<CowMergeOperation>(), merge_after.at(transfers[1]));
+
+  transfers = {
+      CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(25, 10)),
+      CreateCowMergeOperation(ExtentForRange(24, 5), ExtentForRange(35, 5)),
+      CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(15, 10)),
+  };
+
+  FindDependency(transfers, &merge_after);
+  ASSERT_EQ(std::set<CowMergeOperation>({transfers[2]}),
+            merge_after.at(transfers[0]));
+  ASSERT_EQ(std::set<CowMergeOperation>({transfers[0], transfers[2]}),
+            merge_after.at(transfers[1]));
+  ASSERT_EQ(std::set<CowMergeOperation>({transfers[0], transfers[1]}),
+            merge_after.at(transfers[2]));
+}
+
+TEST_F(MergeSequenceGeneratorTest, FindDependencyEdgeCase) {
+  std::vector<CowMergeOperation> transfers = {
+      CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)),
+      CreateCowMergeOperation(ExtentForRange(40, 10), ExtentForRange(50, 10)),
+      CreateCowMergeOperation(ExtentForRange(59, 10), ExtentForRange(60, 10)),
+  };
+
+  std::map<CowMergeOperation, std::set<CowMergeOperation>> merge_after;
+  FindDependency(transfers, &merge_after);
+  ASSERT_EQ(std::set<CowMergeOperation>(), merge_after.at(transfers[0]));
+  ASSERT_EQ(std::set<CowMergeOperation>(), merge_after.at(transfers[1]));
+  ASSERT_EQ(merge_after[transfers[2]].size(), 1U);
+}
+
+TEST_F(MergeSequenceGeneratorTest, FindDependency_ReusedSourceBlocks) {
+  std::vector<CowMergeOperation> transfers = {
+      CreateCowMergeOperation(ExtentForRange(5, 10), ExtentForRange(15, 10)),
+      CreateCowMergeOperation(ExtentForRange(6, 5), ExtentForRange(30, 5)),
+      CreateCowMergeOperation(ExtentForRange(50, 5), ExtentForRange(5, 5)),
+  };
+
+  std::map<CowMergeOperation, std::set<CowMergeOperation>> merge_after;
+  FindDependency(transfers, &merge_after);
+  ASSERT_EQ(std::set<CowMergeOperation>({transfers[2]}),
+            merge_after.at(transfers[0]));
+  ASSERT_EQ(std::set<CowMergeOperation>({transfers[2]}),
+            merge_after.at(transfers[1]));
+}
+
+TEST_F(MergeSequenceGeneratorTest, ValidateSequence) {
+  std::vector<CowMergeOperation> transfers = {
+      CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)),
+      CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(40, 10)),
+  };
+
+  // Self overlapping
+  ASSERT_TRUE(MergeSequenceGenerator::ValidateSequence(transfers));
+
+  transfers = {
+      CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(20, 10)),
+      CreateCowMergeOperation(ExtentForRange(15, 10), ExtentForRange(10, 10)),
+  };
+  ASSERT_FALSE(MergeSequenceGenerator::ValidateSequence(transfers));
+}
+
+TEST_F(MergeSequenceGeneratorTest, GenerateSequenceNoCycles) {
+  std::vector<CowMergeOperation> transfers = {
+      CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)),
+      // file3 should merge before file2
+      CreateCowMergeOperation(ExtentForRange(40, 5), ExtentForRange(25, 5)),
+      CreateCowMergeOperation(ExtentForRange(25, 10), ExtentForRange(30, 10)),
+  };
+
+  std::vector<CowMergeOperation> expected{
+      transfers[0], transfers[2], transfers[1]};
+  GenerateSequence(transfers, expected);
+}
+
+TEST_F(MergeSequenceGeneratorTest, GenerateSequenceWithCycles) {
+  std::vector<CowMergeOperation> transfers = {
+      CreateCowMergeOperation(ExtentForRange(25, 10), ExtentForRange(30, 10)),
+      CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(40, 10)),
+      CreateCowMergeOperation(ExtentForRange(40, 10), ExtentForRange(25, 10)),
+      CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)),
+  };
+
+  // file 1,2,3 form a cycle. And file3, whose dst ext has smallest offset, will
+  // be converted to raw blocks
+  std::vector<CowMergeOperation> expected{
+      transfers[3], transfers[1], transfers[0]};
+  GenerateSequence(transfers, expected);
+}
+
+TEST_F(MergeSequenceGeneratorTest, GenerateSequenceMultipleCycles) {
+  std::vector<CowMergeOperation> transfers = {
+      // cycle 1
+      CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(25, 10)),
+      CreateCowMergeOperation(ExtentForRange(24, 5), ExtentForRange(35, 5)),
+      CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(15, 10)),
+      // cycle 2
+      CreateCowMergeOperation(ExtentForRange(55, 10), ExtentForRange(60, 10)),
+      CreateCowMergeOperation(ExtentForRange(60, 10), ExtentForRange(70, 10)),
+      CreateCowMergeOperation(ExtentForRange(70, 10), ExtentForRange(55, 10)),
+  };
+
+  // file 3, 6 will be converted to raw.
+  std::vector<CowMergeOperation> expected{
+      transfers[1], transfers[0], transfers[4], transfers[3]};
+  GenerateSequence(transfers, expected);
+}
+
+void ValidateSplitSequence(const Extent& src_extent, const Extent& dst_extent) {
+  std::vector<CowMergeOperation> sequence;
+  SplitSelfOverlapping(src_extent, dst_extent, &sequence);
+  ExtentRanges src_extent_set;
+  src_extent_set.AddExtent(src_extent);
+  ExtentRanges dst_extent_set;
+  dst_extent_set.AddExtent(dst_extent);
+
+  size_t src_block_count = 0;
+  size_t dst_block_count = 0;
+  std::cout << "src_extent: " << src_extent << " dst_extent: " << dst_extent
+            << '\n';
+  for (const auto& merge_op : sequence) {
+    src_extent_set.SubtractExtent(merge_op.src_extent());
+    dst_extent_set.SubtractExtent(merge_op.dst_extent());
+    src_block_count += merge_op.src_extent().num_blocks();
+    dst_block_count += merge_op.dst_extent().num_blocks();
+    std::cout << merge_op.src_extent() << " -> " << merge_op.dst_extent()
+              << '\n';
+    ASSERT_FALSE(ExtentRanges::ExtentsOverlap(merge_op.src_extent(),
+                                              merge_op.dst_extent()));
+  }
+  std::cout << '\n';
+  // Check that all blocks are covered
+  ASSERT_EQ(src_extent_set.extent_set().size(), 0UL);
+  ASSERT_EQ(dst_extent_set.extent_set().size(), 0UL);
+
+  // Check that the split didn't cover extra blocks
+  ASSERT_EQ(src_block_count, src_extent.num_blocks());
+  ASSERT_EQ(dst_block_count, dst_extent.num_blocks());
+}
+
+TEST_F(MergeSequenceGeneratorTest, SplitSelfOverlappingTest) {
+  auto a = ExtentForRange(25, 16);
+  auto b = ExtentForRange(30, 16);
+  ValidateSplitSequence(a, b);
+  ValidateSplitSequence(b, a);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc
index a111fd6..6ec219f 100644
--- a/payload_generator/payload_file.cc
+++ b/payload_generator/payload_file.cc
@@ -25,6 +25,7 @@
 #include <base/strings/stringprintf.h>
 
 #include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/delta_performer.h"
 #include "update_engine/payload_consumer/file_writer.h"
 #include "update_engine/payload_consumer/payload_constants.h"
@@ -64,40 +65,46 @@
   TEST_AND_RETURN_FALSE(config.version.Validate());
   major_version_ = config.version.major;
   manifest_.set_minor_version(config.version.minor);
-
-  if (!config.source.ImageInfoIsEmpty())
-    *(manifest_.mutable_old_image_info()) = config.source.image_info;
-
-  if (!config.target.ImageInfoIsEmpty())
-    *(manifest_.mutable_new_image_info()) = config.target.image_info;
-
   manifest_.set_block_size(config.block_size);
   manifest_.set_max_timestamp(config.max_timestamp);
 
-  if (major_version_ == kBrilloMajorPayloadVersion) {
-    if (config.target.dynamic_partition_metadata != nullptr)
-      *(manifest_.mutable_dynamic_partition_metadata()) =
-          *(config.target.dynamic_partition_metadata);
+  if (config.target.dynamic_partition_metadata != nullptr)
+    *(manifest_.mutable_dynamic_partition_metadata()) =
+        *(config.target.dynamic_partition_metadata);
+
+  if (config.is_partial_update) {
+    manifest_.set_partial_update(true);
   }
 
+  if (!config.apex_info_file.empty()) {
+    ApexMetadata apex_metadata;
+    int fd = open(config.apex_info_file.c_str(), O_RDONLY);
+    if (fd < 0) {
+      PLOG(FATAL) << "Failed to open " << config.apex_info_file << " for read.";
+    }
+    ScopedFdCloser closer{&fd};
+    CHECK(apex_metadata.ParseFromFileDescriptor(fd));
+    if (apex_metadata.apex_info_size() > 0) {
+      *manifest_.mutable_apex_info() =
+          std::move(*apex_metadata.mutable_apex_info());
+    }
+  }
   return true;
 }
 
 bool PayloadFile::AddPartition(const PartitionConfig& old_conf,
                                const PartitionConfig& new_conf,
-                               const vector<AnnotatedOperation>& aops) {
-  // Check partitions order for Chrome OS
-  if (major_version_ == kChromeOSMajorPayloadVersion) {
-    const vector<const char*> part_order = {kPartitionNameRoot,
-                                            kPartitionNameKernel};
-    TEST_AND_RETURN_FALSE(part_vec_.size() < part_order.size());
-    TEST_AND_RETURN_FALSE(new_conf.name == part_order[part_vec_.size()]);
-  }
+                               vector<AnnotatedOperation> aops,
+                               vector<CowMergeOperation> merge_sequence,
+                               size_t cow_size) {
   Partition part;
+  part.cow_size = cow_size;
   part.name = new_conf.name;
-  part.aops = aops;
+  part.aops = std::move(aops);
+  part.cow_merge_sequence = std::move(merge_sequence);
   part.postinstall = new_conf.postinstall;
   part.verity = new_conf.verity;
+  part.version = new_conf.version;
   // Initialize the PartitionInfo objects if present.
   if (!old_conf.path.empty())
     TEST_AND_RETURN_FALSE(
@@ -113,11 +120,9 @@
                                const string& private_key_path,
                                uint64_t* metadata_size_out) {
   // Reorder the data blobs with the manifest_.
-  string ordered_blobs_path;
-  TEST_AND_RETURN_FALSE(utils::MakeTempFile(
-      "CrAU_temp_data.ordered.XXXXXX", &ordered_blobs_path, nullptr));
-  ScopedPathUnlinker ordered_blobs_unlinker(ordered_blobs_path);
-  TEST_AND_RETURN_FALSE(ReorderDataBlobs(data_blobs_path, ordered_blobs_path));
+  ScopedTempFile ordered_blobs_file("CrAU_temp_data.ordered.XXXXXX");
+  TEST_AND_RETURN_FALSE(
+      ReorderDataBlobs(data_blobs_path, ordered_blobs_file.path()));
 
   // Check that install op blobs are in order.
   uint64_t next_blob_offset = 0;
@@ -134,75 +139,61 @@
   }
 
   // Copy the operations and partition info from the part_vec_ to the manifest.
-  manifest_.clear_install_operations();
-  manifest_.clear_kernel_install_operations();
   manifest_.clear_partitions();
   for (const auto& part : part_vec_) {
-    if (major_version_ == kBrilloMajorPayloadVersion) {
-      PartitionUpdate* partition = manifest_.add_partitions();
-      partition->set_partition_name(part.name);
-      if (part.postinstall.run) {
-        partition->set_run_postinstall(true);
-        if (!part.postinstall.path.empty())
-          partition->set_postinstall_path(part.postinstall.path);
-        if (!part.postinstall.filesystem_type.empty())
-          partition->set_filesystem_type(part.postinstall.filesystem_type);
-        partition->set_postinstall_optional(part.postinstall.optional);
+    PartitionUpdate* partition = manifest_.add_partitions();
+    partition->set_partition_name(part.name);
+    if (!part.version.empty()) {
+      partition->set_version(part.version);
+    }
+    if (part.cow_size > 0) {
+      partition->set_estimate_cow_size(part.cow_size);
+    }
+    if (part.postinstall.run) {
+      partition->set_run_postinstall(true);
+      if (!part.postinstall.path.empty())
+        partition->set_postinstall_path(part.postinstall.path);
+      if (!part.postinstall.filesystem_type.empty())
+        partition->set_filesystem_type(part.postinstall.filesystem_type);
+      partition->set_postinstall_optional(part.postinstall.optional);
+    }
+    if (!part.verity.IsEmpty()) {
+      if (part.verity.hash_tree_extent.num_blocks() != 0) {
+        *partition->mutable_hash_tree_data_extent() =
+            part.verity.hash_tree_data_extent;
+        *partition->mutable_hash_tree_extent() = part.verity.hash_tree_extent;
+        partition->set_hash_tree_algorithm(part.verity.hash_tree_algorithm);
+        if (!part.verity.hash_tree_salt.empty())
+          partition->set_hash_tree_salt(part.verity.hash_tree_salt.data(),
+                                        part.verity.hash_tree_salt.size());
       }
-      if (!part.verity.IsEmpty()) {
-        if (part.verity.hash_tree_extent.num_blocks() != 0) {
-          *partition->mutable_hash_tree_data_extent() =
-              part.verity.hash_tree_data_extent;
-          *partition->mutable_hash_tree_extent() = part.verity.hash_tree_extent;
-          partition->set_hash_tree_algorithm(part.verity.hash_tree_algorithm);
-          if (!part.verity.hash_tree_salt.empty())
-            partition->set_hash_tree_salt(part.verity.hash_tree_salt.data(),
-                                          part.verity.hash_tree_salt.size());
-        }
-        if (part.verity.fec_extent.num_blocks() != 0) {
-          *partition->mutable_fec_data_extent() = part.verity.fec_data_extent;
-          *partition->mutable_fec_extent() = part.verity.fec_extent;
-          partition->set_fec_roots(part.verity.fec_roots);
-        }
-      }
-      for (const AnnotatedOperation& aop : part.aops) {
-        *partition->add_operations() = aop.op;
-      }
-      if (part.old_info.has_size() || part.old_info.has_hash())
-        *(partition->mutable_old_partition_info()) = part.old_info;
-      if (part.new_info.has_size() || part.new_info.has_hash())
-        *(partition->mutable_new_partition_info()) = part.new_info;
-    } else {
-      // major_version_ == kChromeOSMajorPayloadVersion
-      if (part.name == kPartitionNameKernel) {
-        for (const AnnotatedOperation& aop : part.aops)
-          *manifest_.add_kernel_install_operations() = aop.op;
-        if (part.old_info.has_size() || part.old_info.has_hash())
-          *manifest_.mutable_old_kernel_info() = part.old_info;
-        if (part.new_info.has_size() || part.new_info.has_hash())
-          *manifest_.mutable_new_kernel_info() = part.new_info;
-      } else {
-        for (const AnnotatedOperation& aop : part.aops)
-          *manifest_.add_install_operations() = aop.op;
-        if (part.old_info.has_size() || part.old_info.has_hash())
-          *manifest_.mutable_old_rootfs_info() = part.old_info;
-        if (part.new_info.has_size() || part.new_info.has_hash())
-          *manifest_.mutable_new_rootfs_info() = part.new_info;
+      if (part.verity.fec_extent.num_blocks() != 0) {
+        *partition->mutable_fec_data_extent() = part.verity.fec_data_extent;
+        *partition->mutable_fec_extent() = part.verity.fec_extent;
+        partition->set_fec_roots(part.verity.fec_roots);
       }
     }
+    for (const AnnotatedOperation& aop : part.aops) {
+      *partition->add_operations() = aop.op;
+    }
+    for (const auto& merge_op : part.cow_merge_sequence) {
+      *partition->add_merge_operations() = merge_op;
+    }
+
+    if (part.old_info.has_size() || part.old_info.has_hash())
+      *(partition->mutable_old_partition_info()) = part.old_info;
+    if (part.new_info.has_size() || part.new_info.has_hash())
+      *(partition->mutable_new_partition_info()) = part.new_info;
   }
 
   // Signatures appear at the end of the blobs. Note the offset in the
-  // manifest_.
+  // |manifest_|.
   uint64_t signature_blob_length = 0;
   if (!private_key_path.empty()) {
     TEST_AND_RETURN_FALSE(PayloadSigner::SignatureBlobLength(
         {private_key_path}, &signature_blob_length));
     PayloadSigner::AddSignatureToManifest(
-        next_blob_offset,
-        signature_blob_length,
-        major_version_ == kChromeOSMajorPayloadVersion,
-        &manifest_);
+        next_blob_offset, signature_blob_length, &manifest_);
   }
 
   // Serialize protobuf
@@ -229,18 +220,14 @@
   TEST_AND_RETURN_FALSE(
       WriteUint64AsBigEndian(&writer, serialized_manifest.size()));
 
-  // Write metadata signature size.
-  uint32_t metadata_signature_size = 0;
-  if (major_version_ == kBrilloMajorPayloadVersion) {
-    // Metadata signature has the same size as payload signature, because they
-    // are both the same kind of signature for the same kind of hash.
-    uint32_t metadata_signature_size = htobe32(signature_blob_length);
-    TEST_AND_RETURN_FALSE_ERRNO(writer.Write(&metadata_signature_size,
-                                             sizeof(metadata_signature_size)));
-    metadata_size += sizeof(metadata_signature_size);
-    // Set correct size instead of big endian size.
-    metadata_signature_size = signature_blob_length;
-  }
+  // Metadata signature has the same size as payload signature, because they
+  // are both the same kind of signature for the same kind of hash.
+  uint32_t metadata_signature_size = htobe32(signature_blob_length);
+  TEST_AND_RETURN_FALSE_ERRNO(
+      writer.Write(&metadata_signature_size, sizeof(metadata_signature_size)));
+  metadata_size += sizeof(metadata_signature_size);
+  // Set correct size instead of big endian size.
+  metadata_signature_size = signature_blob_length;
 
   // Write protobuf
   LOG(INFO) << "Writing final delta file protobuf... "
@@ -249,8 +236,7 @@
       writer.Write(serialized_manifest.data(), serialized_manifest.size()));
 
   // Write metadata signature blob.
-  if (major_version_ == kBrilloMajorPayloadVersion &&
-      !private_key_path.empty()) {
+  if (!private_key_path.empty()) {
     brillo::Blob metadata_hash;
     TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile(
         payload_file, metadata_size, &metadata_hash));
@@ -261,9 +247,9 @@
         writer.Write(metadata_signature.data(), metadata_signature.size()));
   }
 
-  // Append the data blobs
+  // Append the data blobs.
   LOG(INFO) << "Writing final delta file data blobs...";
-  int blobs_fd = open(ordered_blobs_path.c_str(), O_RDONLY, 0);
+  int blobs_fd = open(ordered_blobs_file.path().c_str(), O_RDONLY, 0);
   ScopedFdCloser blobs_fd_closer(&blobs_fd);
   TEST_AND_RETURN_FALSE(blobs_fd >= 0);
   for (;;) {
diff --git a/payload_generator/payload_file.h b/payload_generator/payload_file.h
index 9dc80a7..3a45793 100644
--- a/payload_generator/payload_file.h
+++ b/payload_generator/payload_file.h
@@ -43,7 +43,9 @@
   // reference a blob stored in the file provided to WritePayload().
   bool AddPartition(const PartitionConfig& old_conf,
                     const PartitionConfig& new_conf,
-                    const std::vector<AnnotatedOperation>& aops);
+                    std::vector<AnnotatedOperation> aops,
+                    std::vector<CowMergeOperation> merge_sequence,
+                    size_t cow_size);
 
   // Write the payload to the |payload_file| file. The operations reference
   // blobs in the |data_blobs_path| file and the blobs will be reordered in the
@@ -60,9 +62,9 @@
   // Computes a SHA256 hash of the given buf and sets the hash value in the
   // operation so that update_engine could verify. This hash should be set
   // for all operations that have a non-zero data blob. One exception is the
-  // dummy operation for signature blob because the contents of the signature
+  // fake operation for signature blob because the contents of the signature
   // blob will not be available at payload creation time. So, update_engine will
-  // gracefully ignore the dummy signature operation.
+  // gracefully ignore the fake signature operation.
   static bool AddOperationHash(InstallOperation* op, const brillo::Blob& buf);
 
   // Install operations in the manifest may reference data blobs, which
@@ -90,12 +92,16 @@
 
     // The operations to be performed to this partition.
     std::vector<AnnotatedOperation> aops;
+    std::vector<CowMergeOperation> cow_merge_sequence;
 
     PartitionInfo old_info;
     PartitionInfo new_info;
 
     PostInstallConfig postinstall;
     VerityConfig verity;
+    // Per partition timestamp.
+    std::string version;
+    size_t cow_size;
   };
 
   std::vector<Partition> part_vec_;
diff --git a/payload_generator/payload_file_unittest.cc b/payload_generator/payload_file_unittest.cc
index 45faebb..1fd36f5 100644
--- a/payload_generator/payload_file_unittest.cc
+++ b/payload_generator/payload_file_unittest.cc
@@ -36,7 +36,7 @@
 };
 
 TEST_F(PayloadFileTest, ReorderBlobsTest) {
-  test_utils::ScopedTempFile orig_blobs("ReorderBlobsTest.orig.XXXXXX");
+  ScopedTempFile orig_blobs("ReorderBlobsTest.orig.XXXXXX");
 
   // The operations have three blob and one gap (the whitespace):
   // Rootfs operation 1: [8, 3] bcd
@@ -45,7 +45,7 @@
   string orig_data = "kernel abcd";
   EXPECT_TRUE(test_utils::WriteFileString(orig_blobs.path(), orig_data));
 
-  test_utils::ScopedTempFile new_blobs("ReorderBlobsTest.new.XXXXXX");
+  ScopedTempFile new_blobs("ReorderBlobsTest.new.XXXXXX");
 
   payload_.part_vec_.resize(2);
 
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index 88cca30..2cd2ebc 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -23,6 +23,7 @@
 #include <base/logging.h>
 #include <base/strings/string_number_conversions.h>
 #include <brillo/strings/string_utils.h>
+#include <libsnapshot/cow_format.h>
 
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/delta_performer.h"
@@ -32,6 +33,7 @@
 #include "update_engine/payload_generator/ext2_filesystem.h"
 #include "update_engine/payload_generator/mapfile_filesystem.h"
 #include "update_engine/payload_generator/raw_filesystem.h"
+#include "update_engine/payload_generator/squashfs_filesystem.h"
 
 using std::string;
 
@@ -86,6 +88,14 @@
     return true;
   }
 
+  fs_interface = SquashfsFilesystem::CreateFromFile(path,
+                                                    /*extract_deflates=*/true,
+                                                    /*load_settings=*/true);
+  if (fs_interface) {
+    TEST_AND_RETURN_FALSE(fs_interface->GetBlockSize() == kBlockSize);
+    return true;
+  }
+
   // Fall back to a RAW filesystem.
   TEST_AND_RETURN_FALSE(size % kBlockSize == 0);
   fs_interface = RawFilesystem::Create(
@@ -94,7 +104,6 @@
 }
 
 bool ImageConfig::ValidateIsEmpty() const {
-  TEST_AND_RETURN_FALSE(ImageInfoIsEmpty());
   return partitions.empty();
 }
 
@@ -168,7 +177,17 @@
   bool snapshot_enabled = false;
   store.GetBoolean("virtual_ab", &snapshot_enabled);
   metadata->set_snapshot_enabled(snapshot_enabled);
-
+  bool vabc_enabled = false;
+  if (store.GetBoolean("virtual_ab_compression", &vabc_enabled) &&
+      vabc_enabled) {
+    LOG(INFO) << "Target build supports VABC";
+    metadata->set_vabc_enabled(vabc_enabled);
+  }
+  // We use "gz" compression by default for VABC.
+  if (metadata->vabc_enabled()) {
+    metadata->set_vabc_compression_param("gz");
+    metadata->set_cow_version(android::snapshot::kCowVersionManifest);
+  }
   dynamic_partition_metadata = std::move(metadata);
   return true;
 }
@@ -206,28 +225,20 @@
   return true;
 }
 
-bool ImageConfig::ImageInfoIsEmpty() const {
-  return image_info.board().empty() && image_info.key().empty() &&
-         image_info.channel().empty() && image_info.version().empty() &&
-         image_info.build_channel().empty() &&
-         image_info.build_version().empty();
-}
-
 PayloadVersion::PayloadVersion(uint64_t major_version, uint32_t minor_version) {
   major = major_version;
   minor = minor_version;
 }
 
 bool PayloadVersion::Validate() const {
-  TEST_AND_RETURN_FALSE(major == kChromeOSMajorPayloadVersion ||
-                        major == kBrilloMajorPayloadVersion);
+  TEST_AND_RETURN_FALSE(major == kBrilloMajorPayloadVersion);
   TEST_AND_RETURN_FALSE(minor == kFullPayloadMinorVersion ||
-                        minor == kInPlaceMinorPayloadVersion ||
                         minor == kSourceMinorPayloadVersion ||
                         minor == kOpSrcHashMinorPayloadVersion ||
                         minor == kBrotliBsdiffMinorPayloadVersion ||
                         minor == kPuffdiffMinorPayloadVersion ||
-                        minor == kVerityMinorPayloadVersion);
+                        minor == kVerityMinorPayloadVersion ||
+                        minor == kPartialUpdateMinorPayloadVersion);
   return true;
 }
 
@@ -237,13 +248,10 @@
     case InstallOperation::REPLACE:
     case InstallOperation::REPLACE_BZ:
       // These operations were included in the original payload format.
-      return true;
-
     case InstallOperation::REPLACE_XZ:
-      // These operations are included in the major version used in Brillo, but
-      // can also be used with minor version 3 or newer.
-      return major == kBrilloMajorPayloadVersion ||
-             minor >= kOpSrcHashMinorPayloadVersion;
+      // These operations are included minor version 3 or newer and full
+      // payloads.
+      return true;
 
     case InstallOperation::ZERO:
     case InstallOperation::DISCARD:
@@ -252,14 +260,6 @@
       // them for delta payloads for now.
       return minor >= kBrotliBsdiffMinorPayloadVersion;
 
-    // Delta operations:
-    case InstallOperation::MOVE:
-    case InstallOperation::BSDIFF:
-      // MOVE and BSDIFF were replaced by SOURCE_COPY and SOURCE_BSDIFF and
-      // should not be used in newer delta versions, since the idempotent checks
-      // were removed.
-      return minor == kInPlaceMinorPayloadVersion;
-
     case InstallOperation::SOURCE_COPY:
     case InstallOperation::SOURCE_BSDIFF:
       return minor >= kSourceMinorPayloadVersion;
@@ -269,21 +269,22 @@
 
     case InstallOperation::PUFFDIFF:
       return minor >= kPuffdiffMinorPayloadVersion;
+
+    case InstallOperation::MOVE:
+    case InstallOperation::BSDIFF:
+      NOTREACHED();
   }
   return false;
 }
 
-bool PayloadVersion::IsDelta() const {
+bool PayloadVersion::IsDeltaOrPartial() const {
   return minor != kFullPayloadMinorVersion;
 }
 
-bool PayloadVersion::InplaceUpdate() const {
-  return minor == kInPlaceMinorPayloadVersion;
-}
-
 bool PayloadGenerationConfig::Validate() const {
   TEST_AND_RETURN_FALSE(version.Validate());
-  TEST_AND_RETURN_FALSE(version.IsDelta() == is_delta);
+  TEST_AND_RETURN_FALSE(version.IsDeltaOrPartial() ==
+                        (is_delta || is_partial_update));
   if (is_delta) {
     for (const PartitionConfig& part : source.partitions) {
       if (!part.path.empty()) {
@@ -295,9 +296,6 @@
       TEST_AND_RETURN_FALSE(part.verity.IsEmpty());
     }
 
-    // If new_image_info is present, old_image_info must be present.
-    TEST_AND_RETURN_FALSE(source.ImageInfoIsEmpty() ==
-                          target.ImageInfoIsEmpty());
   } else {
     // All the "source" image fields must be empty for full payloads.
     TEST_AND_RETURN_FALSE(source.ValidateIsEmpty());
@@ -307,15 +305,14 @@
   for (const PartitionConfig& part : target.partitions) {
     TEST_AND_RETURN_FALSE(part.ValidateExists());
     TEST_AND_RETURN_FALSE(part.size % block_size == 0);
-    if (version.minor == kInPlaceMinorPayloadVersion &&
-        part.name == kPartitionNameRoot)
-      TEST_AND_RETURN_FALSE(rootfs_partition_size >= part.size);
-    if (version.major == kChromeOSMajorPayloadVersion)
-      TEST_AND_RETURN_FALSE(part.postinstall.IsEmpty());
     if (version.minor < kVerityMinorPayloadVersion)
       TEST_AND_RETURN_FALSE(part.verity.IsEmpty());
   }
 
+  if (version.minor < kPartialUpdateMinorPayloadVersion) {
+    TEST_AND_RETURN_FALSE(!is_partial_update);
+  }
+
   TEST_AND_RETURN_FALSE(hard_chunk_size == -1 ||
                         hard_chunk_size % block_size == 0);
   TEST_AND_RETURN_FALSE(soft_chunk_size % block_size == 0);
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index e90edde..9c8c59f 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -119,6 +119,9 @@
 
   // Enables the on device fec data computation by default.
   bool disable_fec_computation = false;
+
+  // Per-partition version, usually a number representing timestamp.
+  std::string version;
 };
 
 // The ImageConfig struct describes a pair of binaries kernel and rootfs and the
@@ -146,13 +149,6 @@
   // Validate |dynamic_partition_metadata| against |partitions|.
   bool ValidateDynamicPartitionMetadata() const;
 
-  // Returns whether the |image_info| field is empty.
-  bool ImageInfoIsEmpty() const;
-
-  // The ImageInfo message defined in the update_metadata.proto file describes
-  // the metadata of the image.
-  ImageInfo image_info;
-
   // The updated partitions.
   std::vector<PartitionConfig> partitions;
 
@@ -170,12 +166,8 @@
   // Return whether the passed |operation| is allowed by this payload.
   bool OperationAllowed(InstallOperation::Type operation) const;
 
-  // Whether this payload version is a delta payload.
-  bool IsDelta() const;
-
-  // Tells whether the update is done in-place, that is, whether the operations
-  // read and write from the same partition.
-  bool InplaceUpdate() const;
+  // Whether this payload version is a delta or partial payload.
+  bool IsDeltaOrPartial() const;
 
   // The major version of the payload.
   uint64_t major;
@@ -202,6 +194,10 @@
   // Whether the requested payload is a delta payload.
   bool is_delta = false;
 
+  // Whether the requested payload is a partial payload, i.e. only update a
+  // subset of partitions on device.
+  bool is_partial_update = false;
+
   // The major/minor version of the payload.
   PayloadVersion version;
 
@@ -234,6 +230,9 @@
 
   // The maximum timestamp of the OS allowed to apply this payload.
   int64_t max_timestamp = 0;
+
+  // Path to apex_info.pb, extracted from target_file.zip
+  std::string apex_info_file;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/payload_generation_config_android_unittest.cc b/payload_generator/payload_generation_config_android_unittest.cc
index 44eaf55..e87b034 100644
--- a/payload_generator/payload_generation_config_android_unittest.cc
+++ b/payload_generator/payload_generation_config_android_unittest.cc
@@ -138,8 +138,7 @@
   }
 
   ImageConfig image_config_;
-  test_utils::ScopedTempFile temp_file_{
-      "PayloadGenerationConfigAndroidTest.XXXXXX"};
+  ScopedTempFile temp_file_{"PayloadGenerationConfigAndroidTest.XXXXXX"};
 };
 
 TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigSimpleTest) {
diff --git a/payload_generator/payload_properties.cc b/payload_generator/payload_properties.cc
new file mode 100644
index 0000000..bcf4fbd
--- /dev/null
+++ b/payload_generator/payload_properties.cc
@@ -0,0 +1,124 @@
+//
+// Copyright 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/payload_properties.h"
+
+#include <algorithm>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <base/json/json_writer.h>
+#include <base/strings/string_util.h>
+#include <base/values.h>
+#include <brillo/data_encoding.h>
+
+#include "update_engine/common/constants.h"
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/payload_metadata.h"
+#include "update_engine/update_metadata.pb.h"
+
+using std::string;
+using std::vector;
+
+namespace chromeos_update_engine {
+
+namespace {
+// These ones are needed by the GoldenEye.
+const char kPayloadPropertyJsonVersion[] = "version";
+const char kPayloadPropertyJsonPayloadHash[] = "sha256_hex";
+const char kPayloadPropertyJsonMetadataSize[] = "metadata_size";
+const char kPayloadPropertyJsonMetadataSignature[] = "metadata_signature";
+
+// These are needed by the Nebraska and devserver.
+const char kPayloadPropertyJsonPayloadSize[] = "size";
+const char kPayloadPropertyJsonIsDelta[] = "is_delta";
+}  // namespace
+
+PayloadProperties::PayloadProperties(const string& payload_path)
+    : payload_path_(payload_path) {}
+
+bool PayloadProperties::GetPropertiesAsJson(string* json_str) {
+  TEST_AND_RETURN_FALSE(LoadFromPayload());
+
+  base::DictionaryValue properties;
+  properties.SetInteger(kPayloadPropertyJsonVersion, version_);
+  properties.SetInteger(kPayloadPropertyJsonMetadataSize, metadata_size_);
+  properties.SetString(kPayloadPropertyJsonMetadataSignature,
+                       metadata_signatures_);
+  properties.SetInteger(kPayloadPropertyJsonPayloadSize, payload_size_);
+  properties.SetString(kPayloadPropertyJsonPayloadHash, payload_hash_);
+  properties.SetBoolean(kPayloadPropertyJsonIsDelta, is_delta_);
+
+  return base::JSONWriter::Write(properties, json_str);
+}
+
+bool PayloadProperties::GetPropertiesAsKeyValue(string* key_value_str) {
+  TEST_AND_RETURN_FALSE(LoadFromPayload());
+
+  brillo::KeyValueStore properties;
+  properties.SetString(kPayloadPropertyFileSize, std::to_string(payload_size_));
+  properties.SetString(kPayloadPropertyMetadataSize,
+                       std::to_string(metadata_size_));
+  properties.SetString(kPayloadPropertyFileHash, payload_hash_);
+  properties.SetString(kPayloadPropertyMetadataHash, metadata_hash_);
+
+  *key_value_str = properties.SaveToString();
+  return true;
+}
+
+bool PayloadProperties::LoadFromPayload() {
+  PayloadMetadata payload_metadata;
+  DeltaArchiveManifest manifest;
+  Signatures metadata_signatures;
+  TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadFile(
+      payload_path_, &manifest, &metadata_signatures));
+
+  metadata_size_ = payload_metadata.GetMetadataSize();
+  payload_size_ = utils::FileSize(payload_path_);
+
+  brillo::Blob metadata_hash;
+  TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile(
+                            payload_path_, metadata_size_, &metadata_hash) ==
+                        static_cast<off_t>(metadata_size_));
+  metadata_hash_ = brillo::data_encoding::Base64Encode(metadata_hash);
+
+  brillo::Blob payload_hash;
+  TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile(
+                            payload_path_, payload_size_, &payload_hash) ==
+                        static_cast<off_t>(payload_size_));
+  payload_hash_ = brillo::data_encoding::Base64Encode(payload_hash);
+
+  if (payload_metadata.GetMetadataSignatureSize() > 0) {
+    TEST_AND_RETURN_FALSE(metadata_signatures.signatures_size() > 0);
+    vector<string> base64_signatures;
+    for (const auto& sig : metadata_signatures.signatures()) {
+      base64_signatures.push_back(
+          brillo::data_encoding::Base64Encode(sig.data()));
+    }
+    metadata_signatures_ = base::JoinString(base64_signatures, ":");
+  }
+
+  is_delta_ = std::any_of(manifest.partitions().begin(),
+                          manifest.partitions().end(),
+                          [](const PartitionUpdate& part) {
+                            return part.has_old_partition_info();
+                          });
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/payload_properties.h b/payload_generator/payload_properties.h
new file mode 100644
index 0000000..846b181
--- /dev/null
+++ b/payload_generator/payload_properties.h
@@ -0,0 +1,70 @@
+//
+// Copyright 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_PAYLOAD_PROPERTIES_H_
+#define UPDATE_ENGINE_PAYLOAD_GENERATOR_PAYLOAD_PROPERTIES_H_
+
+#include <string>
+
+#include <brillo/key_value_store.h>
+#include <brillo/secure_blob.h>
+
+namespace chromeos_update_engine {
+
+// A class for extracting information about a payload from the payload file
+// itself. Currently the metadata can be exported as a json file or a key/value
+// properties file. But more can be added if required.
+class PayloadProperties {
+ public:
+  explicit PayloadProperties(const std::string& payload_path);
+  ~PayloadProperties() = default;
+
+  // Get the properties in a json format. The json file will be used in
+  // autotests, cros flash, etc. Mainly in Chrome OS.
+  bool GetPropertiesAsJson(std::string* json_str);
+
+  // Get the properties of the payload as a key/value store. This is mainly used
+  // in Android.
+  bool GetPropertiesAsKeyValue(std::string* key_value_str);
+
+ private:
+  // Does the main job of reading the payload and extracting information from
+  // it.
+  bool LoadFromPayload();
+
+  // The path to the payload file.
+  std::string payload_path_;
+
+  // The version of the metadata json format. If the output json file changes
+  // format, this needs to be increased.
+  int version_{2};
+
+  size_t metadata_size_;
+  std::string metadata_hash_;
+  std::string metadata_signatures_;
+
+  size_t payload_size_;
+  std::string payload_hash_;
+
+  // Whether the payload is a delta (true) or full (false).
+  bool is_delta_;
+
+  DISALLOW_COPY_AND_ASSIGN(PayloadProperties);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_PAYLOAD_GENERATOR_PAYLOAD_PROPERTIES_H_
diff --git a/payload_generator/payload_properties_unittest.cc b/payload_generator/payload_properties_unittest.cc
new file mode 100644
index 0000000..0ff364f
--- /dev/null
+++ b/payload_generator/payload_properties_unittest.cc
@@ -0,0 +1,132 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/payload_properties.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <base/files/file_util.h>
+#include <base/files/scoped_file.h>
+#include <base/files/scoped_temp_dir.h>
+#include <base/rand_util.h>
+#include <base/strings/stringprintf.h>
+#include <brillo/data_encoding.h>
+
+#include <gtest/gtest.h>
+
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_generator/delta_diff_generator.h"
+#include "update_engine/payload_generator/delta_diff_utils.h"
+#include "update_engine/payload_generator/full_update_generator.h"
+#include "update_engine/payload_generator/operations_generator.h"
+#include "update_engine/payload_generator/payload_file.h"
+#include "update_engine/payload_generator/payload_generation_config.h"
+
+using std::string;
+using std::unique_ptr;
+using std::vector;
+
+namespace chromeos_update_engine {
+
+// TODO(kimjae): current implementation is very specific to a static way of
+// producing a deterministic test. It would definitely be beneficial to
+// extend the |PayloadPropertiesTest::SetUp()| into a generic helper or
+// seperate class that can handle creation of different |PayloadFile|s.
+class PayloadPropertiesTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    PayloadGenerationConfig config;
+    config.version.major = kBrilloMajorPayloadVersion;
+    config.version.minor = kSourceMinorPayloadVersion;
+    PayloadFile payload;
+    EXPECT_TRUE(payload.Init(config));
+
+    const auto SetupPartitionConfig =
+        [](PartitionConfig* config, const string& path, size_t size) {
+          config->path = path;
+          config->size = size;
+        };
+    const auto WriteZerosToFile = [](const char path[], size_t size) {
+      string zeros(size, '\0');
+      EXPECT_TRUE(utils::WriteFile(path, zeros.c_str(), zeros.size()));
+    };
+    ScopedTempFile old_part_file("old_part.XXXXXX");
+    ScopedTempFile new_part_file("new_part.XXXXXX");
+    PartitionConfig old_part(kPartitionNameRoot);
+    PartitionConfig new_part(kPartitionNameRoot);
+    SetupPartitionConfig(&old_part, old_part_file.path(), 0);
+    SetupPartitionConfig(&new_part, new_part_file.path(), 10000);
+    WriteZerosToFile(old_part_file.path().c_str(), old_part.size);
+    WriteZerosToFile(new_part_file.path().c_str(), new_part.size);
+
+    // Select payload generation strategy based on the config.
+    unique_ptr<OperationsGenerator> strategy(new FullUpdateGenerator());
+
+    vector<AnnotatedOperation> aops;
+    off_t data_file_size = 0;
+    ScopedTempFile data_file("temp_data.XXXXXX", true);
+    BlobFileWriter blob_file_writer(data_file.fd(), &data_file_size);
+    // Generate the operations using the strategy we selected above.
+    EXPECT_TRUE(strategy->GenerateOperations(
+        config, old_part, new_part, &blob_file_writer, &aops));
+
+    payload.AddPartition(old_part, new_part, aops, {}, 0);
+
+    uint64_t metadata_size;
+    EXPECT_TRUE(payload.WritePayload(
+        payload_file_.path(), data_file.path(), "", &metadata_size));
+  }
+
+  ScopedTempFile payload_file_{"payload_file.XXXXXX"};
+};
+
+// Validate the hash of file exists within the output.
+TEST_F(PayloadPropertiesTest, GetPropertiesAsJsonTestHash) {
+  constexpr char kJsonProperties[] =
+      "{"
+      R"("is_delta":true,)"
+      R"("metadata_signature":"",)"
+      R"("metadata_size":165,)"
+      R"("sha256_hex":"cV7kfZBH3K0B6QJHxxykDh6b6x0WgVOmc63whPLOy7U=",)"
+      R"("size":211,)"
+      R"("version":2)"
+      "}";
+  string json;
+  EXPECT_TRUE(
+      PayloadProperties(payload_file_.path()).GetPropertiesAsJson(&json));
+  EXPECT_EQ(kJsonProperties, json) << "JSON contents:\n" << json;
+}
+
+// Validate the hash of file and metadata are within the output.
+TEST_F(PayloadPropertiesTest, GetPropertiesAsKeyValueTestHash) {
+  constexpr char kKeyValueProperties[] =
+      "FILE_HASH=cV7kfZBH3K0B6QJHxxykDh6b6x0WgVOmc63whPLOy7U=\n"
+      "FILE_SIZE=211\n"
+      "METADATA_HASH=aEKYyzJt2E8Gz8fzB+gmekN5mriotZCSq6R+kDfdeV4=\n"
+      "METADATA_SIZE=165\n";
+  string key_value;
+  EXPECT_TRUE(PayloadProperties{payload_file_.path()}.GetPropertiesAsKeyValue(
+      &key_value));
+  EXPECT_EQ(kKeyValueProperties, key_value) << "Key Value contents:\n"
+                                            << key_value;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc
index 72780b1..d9f0dd7 100644
--- a/payload_generator/payload_signer.cc
+++ b/payload_generator/payload_signer.cc
@@ -82,7 +82,7 @@
 // Given an unsigned payload under |payload_path| and the |payload_signature|
 // and |metadata_signature| generates an updated payload that includes the
 // signatures. It populates |out_metadata_size| with the size of the final
-// manifest after adding the dummy signature operation, and
+// manifest after adding the fake signature operation, and
 // |out_signatures_offset| with the expected offset for the new blob, and
 // |out_metadata_signature_size| which will be size of |metadata_signature|
 // if the payload major version supports metadata signature, 0 otherwise.
@@ -104,22 +104,20 @@
   uint64_t metadata_size = payload_metadata.GetMetadataSize();
   uint32_t metadata_signature_size =
       payload_metadata.GetMetadataSignatureSize();
-  if (payload_metadata.GetMajorVersion() == kBrilloMajorPayloadVersion) {
-    // Write metadata signature size in header.
-    uint32_t metadata_signature_size_be = htobe32(metadata_signature.size());
-    memcpy(payload.data() + manifest_offset,
-           &metadata_signature_size_be,
-           sizeof(metadata_signature_size_be));
-    manifest_offset += sizeof(metadata_signature_size_be);
-    // Replace metadata signature.
-    payload.erase(payload.begin() + metadata_size,
-                  payload.begin() + metadata_size + metadata_signature_size);
-    payload.insert(payload.begin() + metadata_size,
-                   metadata_signature.begin(),
-                   metadata_signature.end());
-    metadata_signature_size = metadata_signature.size();
-    LOG(INFO) << "Metadata signature size: " << metadata_signature_size;
-  }
+  // Write metadata signature size in header.
+  uint32_t metadata_signature_size_be = htobe32(metadata_signature.size());
+  memcpy(payload.data() + manifest_offset,
+         &metadata_signature_size_be,
+         sizeof(metadata_signature_size_be));
+  manifest_offset += sizeof(metadata_signature_size_be);
+  // Replace metadata signature.
+  payload.erase(payload.begin() + metadata_size,
+                payload.begin() + metadata_size + metadata_signature_size);
+  payload.insert(payload.begin() + metadata_size,
+                 metadata_signature.begin(),
+                 metadata_signature.end());
+  metadata_signature_size = metadata_signature.size();
+  LOG(INFO) << "Metadata signature size: " << metadata_signature_size;
 
   DeltaArchiveManifest manifest;
   TEST_AND_RETURN_FALSE(payload_metadata.GetManifest(payload, &manifest));
@@ -143,7 +141,6 @@
     PayloadSigner::AddSignatureToManifest(
         payload.size() - metadata_size - metadata_signature_size,
         payload_signature.size(),
-        payload_metadata.GetMajorVersion() == kChromeOSMajorPayloadVersion,
         &manifest);
 
     // Updates the payload to include the new manifest.
@@ -241,25 +238,10 @@
 
 void PayloadSigner::AddSignatureToManifest(uint64_t signature_blob_offset,
                                            uint64_t signature_blob_length,
-                                           bool add_dummy_op,
                                            DeltaArchiveManifest* manifest) {
   LOG(INFO) << "Making room for signature in file";
   manifest->set_signatures_offset(signature_blob_offset);
-  LOG(INFO) << "set? " << manifest->has_signatures_offset();
-  manifest->set_signatures_offset(signature_blob_offset);
   manifest->set_signatures_size(signature_blob_length);
-  // Add a dummy op at the end to appease older clients
-  if (add_dummy_op) {
-    InstallOperation* dummy_op = manifest->add_kernel_install_operations();
-    dummy_op->set_type(InstallOperation::REPLACE);
-    dummy_op->set_data_offset(signature_blob_offset);
-    dummy_op->set_data_length(signature_blob_length);
-    Extent* dummy_extent = dummy_op->add_dst_extents();
-    // Tell the dummy op to write this data to a big sparse hole
-    dummy_extent->set_start_block(kSparseHole);
-    dummy_extent->set_num_blocks(
-        utils::DivRoundUp(signature_blob_length, kBlockSize));
-  }
 }
 
 bool PayloadSigner::VerifySignedPayload(const string& payload_path,
@@ -337,7 +319,6 @@
                                                  signature.data(),
                                                  rsa,
                                                  RSA_NO_PADDING);
-
     if (signature_size < 0) {
       LOG(ERROR) << "Signing hash failed: "
                  << ERR_error_string(ERR_get_error(), nullptr);
@@ -512,35 +493,4 @@
   return true;
 }
 
-bool PayloadSigner::ExtractPayloadProperties(
-    const string& payload_path, brillo::KeyValueStore* properties) {
-  brillo::Blob payload;
-  TEST_AND_RETURN_FALSE(
-      utils::ReadFileChunk(payload_path, 0, kMaxPayloadHeaderSize, &payload));
-
-  PayloadMetadata payload_metadata;
-  TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadHeader(payload));
-  uint64_t metadata_size = payload_metadata.GetMetadataSize();
-
-  uint64_t file_size = utils::FileSize(payload_path);
-  properties->SetString(kPayloadPropertyFileSize, std::to_string(file_size));
-  properties->SetString(kPayloadPropertyMetadataSize,
-                        std::to_string(metadata_size));
-
-  brillo::Blob file_hash, metadata_hash;
-  TEST_AND_RETURN_FALSE(
-      HashCalculator::RawHashOfFile(payload_path, file_size, &file_hash) ==
-      static_cast<off_t>(file_size));
-
-  TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile(
-                            payload_path, metadata_size, &metadata_hash) ==
-                        static_cast<off_t>(metadata_size));
-
-  properties->SetString(kPayloadPropertyFileHash,
-                        brillo::data_encoding::Base64Encode(file_hash));
-  properties->SetString(kPayloadPropertyMetadataHash,
-                        brillo::data_encoding::Base64Encode(metadata_hash));
-  return true;
-}
-
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/payload_signer.h b/payload_generator/payload_signer.h
index bd1e32f..9676b71 100644
--- a/payload_generator/payload_signer.h
+++ b/payload_generator/payload_signer.h
@@ -39,12 +39,9 @@
   static bool VerifySignedPayload(const std::string& payload_path,
                                   const std::string& public_key_path);
 
-  // Adds specified signature offset/length to given |manifest|, also adds a
-  // dummy operation that points to a signature blob located at the specified
-  // offset/length if |add_dummy_op| is true.
+  // Adds specified signature offset/length to given |manifest|.
   static void AddSignatureToManifest(uint64_t signature_blob_offset,
                                      uint64_t signature_blob_length,
-                                     bool add_dummy_op,
                                      DeltaArchiveManifest* manifest);
 
   // Given a raw |hash| and a private key in |private_key_path| calculates the
@@ -65,7 +62,7 @@
   // size in |metadata_signature_size| and signatures offset in
   // |signatures_offset|, calculates the payload signature blob into
   // |out_serialized_signature|. Note that the payload must already have an
-  // updated manifest that includes the dummy signature op and correct metadata
+  // updated manifest that includes the fake signature op and correct metadata
   // signature size in header. Returns true on success, false otherwise.
   static bool SignPayload(const std::string& unsigned_payload_path,
                           const std::vector<std::string>& private_key_paths,
@@ -95,7 +92,7 @@
                                     brillo::Blob* out_payload_hash_data,
                                     brillo::Blob* out_metadata_hash);
 
-  // Given an unsigned payload in |payload_path| (with no dummy signature op)
+  // Given an unsigned payload in |payload_path| (with no fake signature op)
   // and the raw |payload_signatures| and |metadata_signatures| updates the
   // payload to include the signature thus turning it into a signed payload. The
   // new payload is stored in |signed_payload_path|. |payload_path| and
diff --git a/payload_generator/payload_signer_unittest.cc b/payload_generator/payload_signer_unittest.cc
index bf7100b..2a0b394 100644
--- a/payload_generator/payload_signer_unittest.cc
+++ b/payload_generator/payload_signer_unittest.cc
@@ -20,6 +20,7 @@
 #include <vector>
 
 #include <base/logging.h>
+#include <base/stl_util.h>
 #include <gtest/gtest.h>
 
 #include "update_engine/common/hash_calculator.h"
@@ -118,8 +119,8 @@
   EXPECT_EQ(1, signatures.signatures_size());
   const Signatures::Signature& sig = signatures.signatures(0);
   const string& sig_data = sig.data();
-  ASSERT_EQ(arraysize(kDataSignature), sig_data.size());
-  for (size_t i = 0; i < arraysize(kDataSignature); i++) {
+  ASSERT_EQ(base::size(kDataSignature), sig_data.size());
+  for (size_t i = 0; i < base::size(kDataSignature); i++) {
     EXPECT_EQ(kDataSignature[i], static_cast<uint8_t>(sig_data[i]));
   }
 }
@@ -166,7 +167,7 @@
 }
 
 TEST_F(PayloadSignerTest, SkipMetadataSignatureTest) {
-  test_utils::ScopedTempFile payload_file("payload.XXXXXX");
+  ScopedTempFile payload_file("payload.XXXXXX");
   PayloadGenerationConfig config;
   config.version.major = kBrilloMajorPayloadVersion;
   PayloadFile payload;
@@ -193,7 +194,7 @@
 }
 
 TEST_F(PayloadSignerTest, VerifySignedPayloadTest) {
-  test_utils::ScopedTempFile payload_file("payload.XXXXXX");
+  ScopedTempFile payload_file("payload.XXXXXX");
   PayloadGenerationConfig config;
   config.version.major = kBrilloMajorPayloadVersion;
   PayloadFile payload;
diff --git a/payload_generator/squashfs_filesystem.cc b/payload_generator/squashfs_filesystem.cc
index 6c892f5..a41e283 100644
--- a/payload_generator/squashfs_filesystem.cc
+++ b/payload_generator/squashfs_filesystem.cc
@@ -23,6 +23,7 @@
 #include <utility>
 
 #include <base/files/file_util.h>
+#include <base/files/scoped_temp_dir.h>
 #include <base/logging.h>
 #include <base/strings/string_number_conversions.h>
 #include <base/strings/string_split.h>
@@ -36,6 +37,8 @@
 #include "update_engine/payload_generator/extent_utils.h"
 #include "update_engine/update_metadata.pb.h"
 
+using base::FilePath;
+using base::ScopedTempDir;
 using std::string;
 using std::unique_ptr;
 using std::vector;
@@ -49,6 +52,8 @@
 constexpr uint64_t kSquashfsCompressedBit = 1 << 24;
 constexpr uint32_t kSquashfsZlibCompression = 1;
 
+constexpr char kUpdateEngineConf[] = "etc/update_engine.conf";
+
 bool ReadSquashfsHeader(const brillo::Blob blob,
                         SquashfsFilesystem::SquashfsHeader* header) {
   if (blob.size() < kSquashfsSuperBlockSize) {
@@ -67,24 +72,60 @@
 }
 
 bool GetFileMapContent(const string& sqfs_path, string* map) {
-  // Create a tmp file
-  string map_file;
-  TEST_AND_RETURN_FALSE(
-      utils::MakeTempFile("squashfs_file_map.XXXXXX", &map_file, nullptr));
-  ScopedPathUnlinker map_unlinker(map_file);
-
+  ScopedTempFile map_file("squashfs_file_map.XXXXXX");
   // Run unsquashfs to get the system file map.
   // unsquashfs -m <map-file> <squashfs-file>
-  vector<string> cmd = {"unsquashfs", "-m", map_file, sqfs_path};
-  string stdout;
+  vector<string> cmd = {"unsquashfs", "-m", map_file.path(), sqfs_path};
+  string stdout, stderr;
   int exit_code;
-  if (!Subprocess::SynchronousExec(cmd, &exit_code, &stdout) ||
+  if (!Subprocess::SynchronousExec(cmd, &exit_code, &stdout, &stderr) ||
       exit_code != 0) {
-    LOG(ERROR) << "Failed to run unsquashfs -m. The stdout content was: "
-               << stdout;
+    LOG(ERROR) << "Failed to run `unsquashfs -m` with stdout content: "
+               << stdout << " and stderr content: " << stderr;
     return false;
   }
-  TEST_AND_RETURN_FALSE(utils::ReadFile(map_file, map));
+  TEST_AND_RETURN_FALSE(utils::ReadFile(map_file.path(), map));
+  return true;
+}
+
+bool GetUpdateEngineConfig(const std::string& sqfs_path, string* config) {
+  ScopedTempDir unsquash_dir;
+  if (!unsquash_dir.CreateUniqueTempDir()) {
+    PLOG(ERROR) << "Failed to create a temporary directory.";
+    return false;
+  }
+
+  // Run unsquashfs to extract update_engine.conf
+  // -f: To force overriding if the target directory exists.
+  // -d: The directory to unsquash the files.
+  vector<string> cmd = {"unsquashfs",
+                        "-f",
+                        "-d",
+                        unsquash_dir.GetPath().value(),
+                        sqfs_path,
+                        kUpdateEngineConf};
+  string stdout, stderr;
+  int exit_code;
+  if (!Subprocess::SynchronousExec(cmd, &exit_code, &stdout, &stderr) ||
+      exit_code != 0) {
+    PLOG(ERROR) << "Failed to unsquashfs etc/update_engine.conf with stdout: "
+                << stdout << " and stderr: " << stderr;
+    return false;
+  }
+
+  auto config_path = unsquash_dir.GetPath().Append(kUpdateEngineConf);
+  string config_content;
+  if (!utils::ReadFile(config_path.value(), &config_content)) {
+    PLOG(ERROR) << "Failed to read " << config_path.value();
+    return false;
+  }
+
+  if (config_content.empty()) {
+    LOG(ERROR) << "update_engine config file was empty!!";
+    return false;
+  }
+
+  *config = std::move(config_content);
   return true;
 }
 
@@ -120,6 +161,7 @@
     uint64_t start;
     TEST_AND_RETURN_FALSE(base::StringToUint64(splits[1], &start));
     uint64_t cur_offset = start;
+    bool is_compressed = false;
     for (size_t i = 2; i < splits.size(); ++i) {
       uint64_t blk_size;
       TEST_AND_RETURN_FALSE(base::StringToUint64(splits[i], &blk_size));
@@ -127,10 +169,11 @@
       auto new_blk_size = blk_size & ~kSquashfsCompressedBit;
       TEST_AND_RETURN_FALSE(new_blk_size <= header.block_size);
       if (new_blk_size > 0 && !(blk_size & kSquashfsCompressedBit)) {
-        // Compressed block
+        // It is a compressed block.
         if (is_zlib && extract_deflates) {
           zlib_blks.emplace_back(cur_offset, new_blk_size);
         }
+        is_compressed = true;
       }
       cur_offset += new_blk_size;
     }
@@ -140,6 +183,7 @@
       File file;
       file.name = splits[0].as_string();
       file.extents = {ExtentForBytes(kBlockSize, start, cur_offset - start)};
+      file.is_compressed = is_compressed;
       files_.emplace_back(file);
     }
   }
@@ -151,7 +195,8 @@
   // If there is any overlap between two consecutive extents, remove them. Here
   // we are assuming all files have exactly one extent. If this assumption
   // changes then this implementation needs to change too.
-  for (auto first = files_.begin(), second = first + 1;
+  for (auto first = files_.begin(),
+            second = first + (first == files_.end() ? 0 : 1);
        first != files_.end() && second != files_.end();
        second = first + 1) {
     auto first_begin = first->extents[0].start_block();
@@ -217,7 +262,15 @@
                 return a.offset < b.offset;
               });
 
-    // Sanity check. Make sure zlib blocks are not overlapping.
+    // Sometimes a squashfs can have a two files that are hard linked. In this
+    // case both files will have the same starting offset in the image and hence
+    // the same zlib blocks. So we need to remove these duplicates to eliminate
+    // further potential probems. As a matter of fact the next statement will
+    // fail if there are duplicates (there will be overlap between two blocks).
+    auto last = std::unique(zlib_blks.begin(), zlib_blks.end());
+    zlib_blks.erase(last, zlib_blks.end());
+
+    // Make sure zlib blocks are not overlapping.
     auto result = std::adjacent_find(
         zlib_blks.begin(),
         zlib_blks.end(),
@@ -239,12 +292,12 @@
 }
 
 unique_ptr<SquashfsFilesystem> SquashfsFilesystem::CreateFromFile(
-    const string& sqfs_path, bool extract_deflates) {
+    const string& sqfs_path, bool extract_deflates, bool load_settings) {
   if (sqfs_path.empty())
     return nullptr;
 
   brillo::StreamPtr sqfs_file =
-      brillo::FileStream::Open(base::FilePath(sqfs_path),
+      brillo::FileStream::Open(FilePath(sqfs_path),
                                brillo::Stream::AccessMode::READ,
                                brillo::FileStream::Disposition::OPEN_EXISTING,
                                nullptr);
@@ -278,6 +331,12 @@
     return nullptr;
   }
 
+  if (load_settings) {
+    if (!GetUpdateEngineConfig(sqfs_path, &sqfs->update_engine_config_)) {
+      return nullptr;
+    }
+  }
+
   return sqfs;
 }
 
@@ -311,9 +370,12 @@
 }
 
 bool SquashfsFilesystem::LoadSettings(brillo::KeyValueStore* store) const {
-  // Settings not supported in squashfs.
-  LOG(ERROR) << "squashfs doesn't support LoadSettings().";
-  return false;
+  if (!store->LoadFromString(update_engine_config_)) {
+    LOG(ERROR) << "Failed to load the settings with config: "
+               << update_engine_config_;
+    return false;
+  }
+  return true;
 }
 
 bool SquashfsFilesystem::IsSquashfsImage(const brillo::Blob& blob) {
diff --git a/payload_generator/squashfs_filesystem.h b/payload_generator/squashfs_filesystem.h
index b79f8c7..5045dfc 100644
--- a/payload_generator/squashfs_filesystem.h
+++ b/payload_generator/squashfs_filesystem.h
@@ -59,7 +59,7 @@
   // |extract_deflates| is true, it will process files to find location of all
   // deflate streams.
   static std::unique_ptr<SquashfsFilesystem> CreateFromFile(
-      const std::string& sqfs_path, bool extract_deflates);
+      const std::string& sqfs_path, bool extract_deflates, bool load_settings);
 
   // Creates the file system from a file map |filemap| which is a multi-line
   // string with each line with the following format:
@@ -113,6 +113,9 @@
   // All the files in the filesystem.
   std::vector<File> files_;
 
+  // The content of /etc/update_engine.conf.
+  std::string update_engine_config_;
+
   DISALLOW_COPY_AND_ASSIGN(SquashfsFilesystem);
 };
 
diff --git a/payload_generator/squashfs_filesystem_unittest.cc b/payload_generator/squashfs_filesystem_unittest.cc
index 29fcf1c..68ca9df 100644
--- a/payload_generator/squashfs_filesystem_unittest.cc
+++ b/payload_generator/squashfs_filesystem_unittest.cc
@@ -112,7 +112,7 @@
 #ifdef __CHROMEOS__
 TEST_F(SquashfsFilesystemTest, EmptyFilesystemTest) {
   unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFile(
-      GetBuildArtifactsPath("gen/disk_sqfs_empty.img"), true);
+      GetBuildArtifactsPath("gen/disk_sqfs_empty.img"), true, false);
   CheckSquashfs(fs);
 
   // Even an empty squashfs filesystem is rounded up to 4K.
@@ -133,7 +133,7 @@
 
 TEST_F(SquashfsFilesystemTest, DefaultFilesystemTest) {
   unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFile(
-      GetBuildArtifactsPath("gen/disk_sqfs_default.img"), true);
+      GetBuildArtifactsPath("gen/disk_sqfs_default.img"), true, false);
   CheckSquashfs(fs);
 
   vector<FilesystemInterface::File> files;
@@ -148,6 +148,18 @@
   EXPECT_EQ(files[0].name, file.name);
   EXPECT_EQ(files[0].extents, file.extents);
 }
+
+TEST_F(SquashfsFilesystemTest, UpdateEngineConfigTest) {
+  unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFile(
+      GetBuildArtifactsPath("gen/disk_sqfs_unittest.img"), true, true);
+  CheckSquashfs(fs);
+
+  brillo::KeyValueStore kvs;
+  EXPECT_TRUE(fs->LoadSettings(&kvs));
+  string minor_version;
+  EXPECT_TRUE(kvs.GetString("PAYLOAD_MINOR_VERSION", &minor_version));
+  EXPECT_EQ(minor_version, "1234");
+}
 #endif  // __CHROMEOS__
 
 TEST_F(SquashfsFilesystemTest, SimpleFileMapTest) {
diff --git a/payload_generator/tarjan.cc b/payload_generator/tarjan.cc
deleted file mode 100644
index 2d4ca31..0000000
--- a/payload_generator/tarjan.cc
+++ /dev/null
@@ -1,83 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-#include "update_engine/payload_generator/tarjan.h"
-
-#include <algorithm>
-#include <vector>
-
-#include <base/logging.h>
-#include <base/stl_util.h>
-
-using std::min;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-namespace {
-const vector<Vertex>::size_type kInvalidIndex = -1;
-}
-
-void TarjanAlgorithm::Execute(Vertex::Index vertex,
-                              Graph* graph,
-                              vector<Vertex::Index>* out) {
-  stack_.clear();
-  components_.clear();
-  index_ = 0;
-  for (Graph::iterator it = graph->begin(); it != graph->end(); ++it)
-    it->index = it->lowlink = kInvalidIndex;
-  required_vertex_ = vertex;
-
-  Tarjan(vertex, graph);
-  if (!components_.empty())
-    out->swap(components_[0]);
-}
-
-void TarjanAlgorithm::Tarjan(Vertex::Index vertex, Graph* graph) {
-  CHECK_EQ((*graph)[vertex].index, kInvalidIndex);
-  (*graph)[vertex].index = index_;
-  (*graph)[vertex].lowlink = index_;
-  index_++;
-  stack_.push_back(vertex);
-  for (Vertex::EdgeMap::iterator it = (*graph)[vertex].out_edges.begin();
-       it != (*graph)[vertex].out_edges.end();
-       ++it) {
-    Vertex::Index vertex_next = it->first;
-    if ((*graph)[vertex_next].index == kInvalidIndex) {
-      Tarjan(vertex_next, graph);
-      (*graph)[vertex].lowlink =
-          min((*graph)[vertex].lowlink, (*graph)[vertex_next].lowlink);
-    } else if (base::ContainsValue(stack_, vertex_next)) {
-      (*graph)[vertex].lowlink =
-          min((*graph)[vertex].lowlink, (*graph)[vertex_next].index);
-    }
-  }
-  if ((*graph)[vertex].lowlink == (*graph)[vertex].index) {
-    vector<Vertex::Index> component;
-    Vertex::Index other_vertex;
-    do {
-      other_vertex = stack_.back();
-      stack_.pop_back();
-      component.push_back(other_vertex);
-    } while (other_vertex != vertex && !stack_.empty());
-
-    if (base::ContainsValue(component, required_vertex_)) {
-      components_.resize(components_.size() + 1);
-      component.swap(components_.back());
-    }
-  }
-}
-
-}  // namespace chromeos_update_engine
diff --git a/payload_generator/tarjan.h b/payload_generator/tarjan.h
deleted file mode 100644
index 39ac4e4..0000000
--- a/payload_generator/tarjan.h
+++ /dev/null
@@ -1,53 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_TARJAN_H_
-#define UPDATE_ENGINE_PAYLOAD_GENERATOR_TARJAN_H_
-
-// This is an implementation of Tarjan's algorithm which finds all
-// Strongly Connected Components in a graph.
-
-// Note: a true Tarjan algorithm would find all strongly connected components
-// in the graph. This implementation will only find the strongly connected
-// component containing the vertex passed in.
-
-#include <vector>
-
-#include "update_engine/payload_generator/graph_types.h"
-
-namespace chromeos_update_engine {
-
-class TarjanAlgorithm {
- public:
-  TarjanAlgorithm() : index_(0), required_vertex_(0) {}
-
-  // 'out' is set to the result if there is one, otherwise it's untouched.
-  void Execute(Vertex::Index vertex,
-               Graph* graph,
-               std::vector<Vertex::Index>* out);
-
- private:
-  void Tarjan(Vertex::Index vertex, Graph* graph);
-
-  Vertex::Index index_;
-  Vertex::Index required_vertex_;
-  std::vector<Vertex::Index> stack_;
-  std::vector<std::vector<Vertex::Index>> components_;
-};
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_PAYLOAD_GENERATOR_TARJAN_H_
diff --git a/payload_generator/tarjan_unittest.cc b/payload_generator/tarjan_unittest.cc
deleted file mode 100644
index b271227..0000000
--- a/payload_generator/tarjan_unittest.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/tarjan.h"
-
-#include <string>
-#include <utility>
-
-#include <base/logging.h>
-#include <base/stl_util.h>
-#include <gtest/gtest.h>
-
-#include "update_engine/payload_generator/graph_types.h"
-
-using std::make_pair;
-using std::string;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-class TarjanAlgorithmTest : public ::testing::Test {};
-
-TEST(TarjanAlgorithmTest, SimpleTest) {
-  const Vertex::Index n_a = 0;
-  const Vertex::Index n_b = 1;
-  const Vertex::Index n_c = 2;
-  const Vertex::Index n_d = 3;
-  const Vertex::Index n_e = 4;
-  const Vertex::Index n_f = 5;
-  const Vertex::Index n_g = 6;
-  const Vertex::Index n_h = 7;
-  const Graph::size_type kNodeCount = 8;
-
-  Graph graph(kNodeCount);
-
-  graph[n_a].out_edges.insert(make_pair(n_e, EdgeProperties()));
-  graph[n_a].out_edges.insert(make_pair(n_f, EdgeProperties()));
-  graph[n_b].out_edges.insert(make_pair(n_a, EdgeProperties()));
-  graph[n_c].out_edges.insert(make_pair(n_d, EdgeProperties()));
-  graph[n_d].out_edges.insert(make_pair(n_e, EdgeProperties()));
-  graph[n_d].out_edges.insert(make_pair(n_f, EdgeProperties()));
-  graph[n_e].out_edges.insert(make_pair(n_b, EdgeProperties()));
-  graph[n_e].out_edges.insert(make_pair(n_c, EdgeProperties()));
-  graph[n_e].out_edges.insert(make_pair(n_f, EdgeProperties()));
-  graph[n_f].out_edges.insert(make_pair(n_g, EdgeProperties()));
-  graph[n_g].out_edges.insert(make_pair(n_h, EdgeProperties()));
-  graph[n_h].out_edges.insert(make_pair(n_g, EdgeProperties()));
-
-  TarjanAlgorithm tarjan;
-
-  for (Vertex::Index i = n_a; i <= n_e; i++) {
-    vector<Vertex::Index> vertex_indexes;
-    tarjan.Execute(i, &graph, &vertex_indexes);
-
-    EXPECT_EQ(5U, vertex_indexes.size());
-    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_a));
-    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_b));
-    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_c));
-    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_d));
-    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_e));
-  }
-
-  {
-    vector<Vertex::Index> vertex_indexes;
-    tarjan.Execute(n_f, &graph, &vertex_indexes);
-
-    EXPECT_EQ(1U, vertex_indexes.size());
-    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_f));
-  }
-
-  for (Vertex::Index i = n_g; i <= n_h; i++) {
-    vector<Vertex::Index> vertex_indexes;
-    tarjan.Execute(i, &graph, &vertex_indexes);
-
-    EXPECT_EQ(2U, vertex_indexes.size());
-    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_g));
-    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_h));
-  }
-}
-
-}  // namespace chromeos_update_engine
diff --git a/payload_generator/topological_sort.cc b/payload_generator/topological_sort.cc
deleted file mode 100644
index 0abd708..0000000
--- a/payload_generator/topological_sort.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/topological_sort.h"
-
-#include <set>
-#include <vector>
-
-#include <base/logging.h>
-
-using std::set;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-namespace {
-void TopologicalSortVisit(const Graph& graph,
-                          set<Vertex::Index>* visited_nodes,
-                          vector<Vertex::Index>* nodes,
-                          Vertex::Index node) {
-  if (visited_nodes->find(node) != visited_nodes->end())
-    return;
-
-  visited_nodes->insert(node);
-  // Visit all children.
-  for (Vertex::EdgeMap::const_iterator it = graph[node].out_edges.begin();
-       it != graph[node].out_edges.end();
-       ++it) {
-    TopologicalSortVisit(graph, visited_nodes, nodes, it->first);
-  }
-  // Visit this node.
-  nodes->push_back(node);
-}
-}  // namespace
-
-void TopologicalSort(const Graph& graph, vector<Vertex::Index>* out) {
-  set<Vertex::Index> visited_nodes;
-
-  for (Vertex::Index i = 0; i < graph.size(); i++) {
-    TopologicalSortVisit(graph, &visited_nodes, out, i);
-  }
-}
-
-}  // namespace chromeos_update_engine
diff --git a/payload_generator/topological_sort.h b/payload_generator/topological_sort.h
deleted file mode 100644
index 461cbe1..0000000
--- a/payload_generator/topological_sort.h
+++ /dev/null
@@ -1,42 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_TOPOLOGICAL_SORT_H_
-#define UPDATE_ENGINE_PAYLOAD_GENERATOR_TOPOLOGICAL_SORT_H_
-
-#include <vector>
-
-#include "update_engine/payload_generator/graph_types.h"
-
-namespace chromeos_update_engine {
-
-// Performs a topological sort on the directed graph 'graph' and stores
-// the nodes, in order visited, in 'out'.
-// For example, this graph:
-// A ---> C ----.
-//  \           v
-//   `--> B --> D
-// Might result in this in 'out':
-// out[0] = D
-// out[1] = B
-// out[2] = C
-// out[3] = A
-// Note: results are undefined if there is a cycle in the graph.
-void TopologicalSort(const Graph& graph, std::vector<Vertex::Index>* out);
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_PAYLOAD_GENERATOR_TOPOLOGICAL_SORT_H_
diff --git a/payload_generator/topological_sort_unittest.cc b/payload_generator/topological_sort_unittest.cc
deleted file mode 100644
index aa296d8..0000000
--- a/payload_generator/topological_sort_unittest.cc
+++ /dev/null
@@ -1,96 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/topological_sort.h"
-
-#include <utility>
-#include <vector>
-
-#include <gtest/gtest.h>
-
-#include "update_engine/payload_generator/graph_types.h"
-
-using std::make_pair;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-class TopologicalSortTest : public ::testing::Test {};
-
-namespace {
-// Returns true if the value is found in vect. If found, the index is stored
-// in out_index if out_index is not null.
-template <typename T>
-bool IndexOf(const vector<T>& vect,
-             const T& value,
-             typename vector<T>::size_type* out_index) {
-  for (typename vector<T>::size_type i = 0; i < vect.size(); i++) {
-    if (vect[i] == value) {
-      if (out_index) {
-        *out_index = i;
-      }
-      return true;
-    }
-  }
-  return false;
-}
-}  // namespace
-
-TEST(TopologicalSortTest, SimpleTest) {
-  int counter = 0;
-  const Vertex::Index n_a = counter++;
-  const Vertex::Index n_b = counter++;
-  const Vertex::Index n_c = counter++;
-  const Vertex::Index n_d = counter++;
-  const Vertex::Index n_e = counter++;
-  const Vertex::Index n_f = counter++;
-  const Vertex::Index n_g = counter++;
-  const Vertex::Index n_h = counter++;
-  const Vertex::Index n_i = counter++;
-  const Vertex::Index n_j = counter++;
-  const Graph::size_type kNodeCount = counter++;
-
-  Graph graph(kNodeCount);
-
-  graph[n_i].out_edges.insert(make_pair(n_j, EdgeProperties()));
-  graph[n_i].out_edges.insert(make_pair(n_c, EdgeProperties()));
-  graph[n_i].out_edges.insert(make_pair(n_e, EdgeProperties()));
-  graph[n_i].out_edges.insert(make_pair(n_h, EdgeProperties()));
-  graph[n_c].out_edges.insert(make_pair(n_b, EdgeProperties()));
-  graph[n_b].out_edges.insert(make_pair(n_a, EdgeProperties()));
-  graph[n_e].out_edges.insert(make_pair(n_d, EdgeProperties()));
-  graph[n_e].out_edges.insert(make_pair(n_g, EdgeProperties()));
-  graph[n_g].out_edges.insert(make_pair(n_d, EdgeProperties()));
-  graph[n_g].out_edges.insert(make_pair(n_f, EdgeProperties()));
-  graph[n_d].out_edges.insert(make_pair(n_a, EdgeProperties()));
-
-  vector<Vertex::Index> sorted;
-  TopologicalSort(graph, &sorted);
-
-  for (Vertex::Index i = 0; i < graph.size(); i++) {
-    vector<Vertex::Index>::size_type src_index = 0;
-    EXPECT_TRUE(IndexOf(sorted, i, &src_index));
-    for (Vertex::EdgeMap::const_iterator it = graph[i].out_edges.begin();
-         it != graph[i].out_edges.end();
-         ++it) {
-      vector<Vertex::Index>::size_type dst_index = 0;
-      EXPECT_TRUE(IndexOf(sorted, it->first, &dst_index));
-      EXPECT_LT(dst_index, src_index);
-    }
-  }
-}
-
-}  // namespace chromeos_update_engine
diff --git a/payload_generator/zip_unittest.cc b/payload_generator/zip_unittest.cc
index e357b15..10e899b 100644
--- a/payload_generator/zip_unittest.cc
+++ b/payload_generator/zip_unittest.cc
@@ -33,7 +33,6 @@
 using chromeos_update_engine::test_utils::kRandomString;
 using google::protobuf::RepeatedPtrField;
 using std::string;
-using std::vector;
 
 namespace chromeos_update_engine {
 
@@ -50,8 +49,7 @@
   }
   ~MemoryExtentWriter() override = default;
 
-  bool Init(FileDescriptorPtr fd,
-            const RepeatedPtrField<Extent>& extents,
+  bool Init(const RepeatedPtrField<Extent>& extents,
             uint32_t block_size) override {
     return true;
   }
@@ -72,7 +70,7 @@
   std::unique_ptr<ExtentWriter> writer(
       new W(std::make_unique<MemoryExtentWriter>(out)));
   // Init() parameters are ignored by the testing MemoryExtentWriter.
-  bool ok = writer->Init(nullptr, {}, 1);
+  bool ok = writer->Init({}, 1);
   ok = writer->Write(in.data(), in.size()) && ok;
   return ok;
 }
diff --git a/power_manager_android.h b/power_manager_android.h
deleted file mode 100644
index 86399ab..0000000
--- a/power_manager_android.h
+++ /dev/null
@@ -1,40 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_POWER_MANAGER_ANDROID_H_
-#define UPDATE_ENGINE_POWER_MANAGER_ANDROID_H_
-
-#include <base/macros.h>
-
-#include "update_engine/power_manager_interface.h"
-
-namespace chromeos_update_engine {
-
-class PowerManagerAndroid : public PowerManagerInterface {
- public:
-  PowerManagerAndroid() = default;
-  ~PowerManagerAndroid() override = default;
-
-  // PowerManagerInterface overrides.
-  bool RequestReboot() override;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(PowerManagerAndroid);
-};
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_POWER_MANAGER_ANDROID_H_
diff --git a/protobuflint.py b/protobuflint.py
new file mode 100755
index 0000000..02ccdbe
--- /dev/null
+++ b/protobuflint.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""CLI script for linting .proto files inside update_engine."""
+
+import sys
+import re
+import subprocess
+
+def check_proto_file(commit_hash, filename):
+  """Check if |filename| is consistnet with our protobuf guidelines
+
+    Args:
+      commit_hash: Hash of the git commit to look
+      filename: A filesystem path to a .proto file
+    Returns:
+      True if this file passes linting check, False otherwise
+  """
+  output = subprocess.check_output(
+      ["git", "diff", commit_hash+"~", commit_hash, "--", filename])
+  output = output.decode()
+  p = re.compile(r"^[+]?\s*required .*$", re.M)
+  m = p.search(output)
+  if m:
+    print("File", filename,
+          "contains 'required' keyword. Usage of required",
+          "is strongly discouraged in protobuf", m.group())
+    return False
+  return True
+
+def main():
+  if len(sys.argv) < 2:
+    print("Usage:", sys.argv[0], "commit_hash", "<file1>", "<file2>", "...")
+    sys.exit(1)
+  commit_hash = sys.argv[1]
+  for filename in sys.argv[2:]:
+    if filename.endswith(".proto"):
+      if not check_proto_file(commit_hash, filename):
+        sys.exit(1)
+
+if __name__ == "__main__":
+  main()
diff --git a/pylintrc b/pylintrc
index 33adec2..a433868 100644
--- a/pylintrc
+++ b/pylintrc
@@ -24,7 +24,7 @@
 # Profiled execution.
 profile=no
 
-# Add files or directories to the blacklist. They should be base names, not
+# Add files or directories to the ignorelist. They should be base names, not
 # paths.
 ignore=CVS,.svn,.git,update_metadata_pb2.py
 
diff --git a/real_system_state.h b/real_system_state.h
deleted file mode 100644
index 4712008..0000000
--- a/real_system_state.h
+++ /dev/null
@@ -1,202 +0,0 @@
-//
-// Copyright (C) 2013 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_REAL_SYSTEM_STATE_H_
-#define UPDATE_ENGINE_REAL_SYSTEM_STATE_H_
-
-#include "update_engine/system_state.h"
-
-#include <memory>
-#include <set>
-
-#include <policy/device_policy.h>
-
-#if USE_CHROME_KIOSK_APP
-#include <kiosk-app/dbus-proxies.h>
-#endif  // USE_CHROME_KIOSK_APP
-
-#include "update_engine/certificate_checker.h"
-#include "update_engine/common/boot_control_interface.h"
-#include "update_engine/common/clock.h"
-#include "update_engine/common/dlcservice_interface.h"
-#include "update_engine/common/hardware_interface.h"
-#include "update_engine/common/prefs.h"
-#include "update_engine/connection_manager_interface.h"
-#include "update_engine/daemon_state_interface.h"
-#include "update_engine/metrics_reporter_interface.h"
-#include "update_engine/metrics_reporter_omaha.h"
-#include "update_engine/p2p_manager.h"
-#include "update_engine/payload_state.h"
-#include "update_engine/power_manager_interface.h"
-#include "update_engine/update_attempter.h"
-#include "update_engine/update_manager/update_manager.h"
-
-namespace chromeos_update_engine {
-
-// A real implementation of the SystemStateInterface which is
-// used by the actual product code.
-class RealSystemState : public SystemState, public DaemonStateInterface {
- public:
-  // Constructs all system objects that do not require separate initialization;
-  // see Initialize() below for the remaining ones.
-  RealSystemState() = default;
-  ~RealSystemState() override;
-
-  // Initializes and sets systems objects that require an initialization
-  // separately from construction. Returns |true| on success.
-  bool Initialize();
-
-  // DaemonStateInterface overrides.
-  // Start the periodic update attempts. Must be called at the beginning of the
-  // program to start the periodic update check process.
-  bool StartUpdater() override;
-
-  void AddObserver(ServiceObserverInterface* observer) override;
-  void RemoveObserver(ServiceObserverInterface* observer) override;
-  const std::set<ServiceObserverInterface*>& service_observers() override {
-    CHECK(update_attempter_.get());
-    return update_attempter_->service_observers();
-  }
-
-  // SystemState overrides.
-  inline void set_device_policy(
-      const policy::DevicePolicy* device_policy) override {
-    device_policy_ = device_policy;
-  }
-
-  inline const policy::DevicePolicy* device_policy() override {
-    return device_policy_;
-  }
-
-  inline BootControlInterface* boot_control() override {
-    return boot_control_.get();
-  }
-
-  inline ClockInterface* clock() override { return &clock_; }
-
-  inline ConnectionManagerInterface* connection_manager() override {
-    return connection_manager_.get();
-  }
-
-  inline HardwareInterface* hardware() override { return hardware_.get(); }
-
-  inline MetricsReporterInterface* metrics_reporter() override {
-    return &metrics_reporter_;
-  }
-
-  inline PrefsInterface* prefs() override { return prefs_.get(); }
-
-  inline PrefsInterface* powerwash_safe_prefs() override {
-    return powerwash_safe_prefs_.get();
-  }
-
-  inline PayloadStateInterface* payload_state() override {
-    return &payload_state_;
-  }
-
-  inline UpdateAttempter* update_attempter() override {
-    return update_attempter_.get();
-  }
-
-  inline OmahaRequestParams* request_params() override {
-    return &request_params_;
-  }
-
-  inline P2PManager* p2p_manager() override { return p2p_manager_.get(); }
-
-  inline chromeos_update_manager::UpdateManager* update_manager() override {
-    return update_manager_.get();
-  }
-
-  inline PowerManagerInterface* power_manager() override {
-    return power_manager_.get();
-  }
-
-  inline bool system_rebooted() override { return system_rebooted_; }
-
-  inline DlcServiceInterface* dlcservice() override {
-    return dlcservice_.get();
-  }
-
- private:
-  // Real DBus proxies using the DBus connection.
-#if USE_CHROME_KIOSK_APP
-  std::unique_ptr<org::chromium::KioskAppServiceInterfaceProxy>
-      kiosk_app_proxy_;
-#endif  // USE_CHROME_KIOSK_APP
-
-  // Interface for the power manager.
-  std::unique_ptr<PowerManagerInterface> power_manager_;
-
-  // Interface for dlcservice.
-  std::unique_ptr<DlcServiceInterface> dlcservice_;
-
-  // Interface for the clock.
-  std::unique_ptr<BootControlInterface> boot_control_;
-
-  // Interface for the clock.
-  Clock clock_;
-
-  // The latest device policy object from the policy provider.
-  const policy::DevicePolicy* device_policy_{nullptr};
-
-  // The connection manager object that makes download decisions depending on
-  // the current type of connection.
-  std::unique_ptr<ConnectionManagerInterface> connection_manager_;
-
-  // Interface for the hardware functions.
-  std::unique_ptr<HardwareInterface> hardware_;
-
-  // The Metrics reporter for reporting UMA stats.
-  MetricsReporterOmaha metrics_reporter_;
-
-  // Interface for persisted store.
-  std::unique_ptr<PrefsInterface> prefs_;
-
-  // Interface for persisted store that persists across powerwashes.
-  std::unique_ptr<PrefsInterface> powerwash_safe_prefs_;
-
-  // All state pertaining to payload state such as response, URL, backoff
-  // states.
-  PayloadState payload_state_;
-
-  // OpenSSLWrapper and CertificateChecker used for checking SSL certificates.
-  OpenSSLWrapper openssl_wrapper_;
-  std::unique_ptr<CertificateChecker> certificate_checker_;
-
-  // Pointer to the update attempter object.
-  std::unique_ptr<UpdateAttempter> update_attempter_;
-
-  // Common parameters for all Omaha requests.
-  OmahaRequestParams request_params_{this};
-
-  std::unique_ptr<P2PManager> p2p_manager_;
-
-  std::unique_ptr<chromeos_update_manager::UpdateManager> update_manager_;
-
-  policy::PolicyProvider policy_provider_;
-
-  // If true, this is the first instance of the update engine since the system
-  // rebooted. Important for tracking whether you are running instance of the
-  // update engine on first boot or due to a crash/restart.
-  bool system_rebooted_{false};
-
-  ActionProcessor processor_;
-};
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_REAL_SYSTEM_STATE_H_
diff --git a/sample_images/generate_images.sh b/sample_images/generate_images.sh
index 8478682..81a3296 100755
--- a/sample_images/generate_images.sh
+++ b/sample_images/generate_images.sh
@@ -186,14 +186,27 @@
 exit 0
 EOF
 
+  # An unlabeled postinstall bash program.
+  sudo tee "${mntdir}"/bin/self_check_default_context >/dev/null <<EOF
+#!/etc/../bin/sh
+echo "This is my context:"
+ls -lZ "\$0"
+ls -lZ "\$0" | grep -F ' u:object_r:postinstall_file:s0 ' || exit 5
+exit 0
+EOF
+
   # A postinstall bash program.
   sudo tee "${mntdir}"/bin/self_check_context >/dev/null <<EOF
 #!/etc/../bin/sh
 echo "This is my context:"
-ls -lZ "\$0" | grep -F ' u:object_r:postinstall_file:s0 ' || exit 5
+ls -lZ "\$0"
+ls -lZ "\$0" | grep -F ' u:object_r:postinstall_exec:s0 ' || exit 5
 exit 0
 EOF
 
+  # Give the test function the context we expect the postinstall-executable to have.
+  sudo setfattr -n security.selinux -v u:object_r:postinstall_exec:s0 "${mntdir}"/bin/self_check_context
+
   sudo tee "${mntdir}"/postinst >/dev/null <<EOF
 #!/etc/../bin/sh
 echo "postinst"
@@ -270,6 +283,7 @@
   # Add squashfs sample images.
   generate_image disk_sqfs_empty sqfs empty $((1024 * 4096)) 4096
   generate_image disk_sqfs_default sqfs default $((1024 * 4096)) 4096
+  generate_image disk_sqfs_unittest sqfs unittest $((1024 * 4096)) 4096
 
   # Generate the tarball and delete temporary images.
   echo "Packing tar file sample_images.tar.bz2"
diff --git a/sample_images/generate_payloads.sh b/sample_images/generate_payloads.sh
new file mode 100755
index 0000000..ee64229
--- /dev/null
+++ b/sample_images/generate_payloads.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This script generates some sample payloads from the images in
+# sample_images.tar.bz2. and packages them in the sample_payloads.tar.xz file.
+# The payloads are then used in paycheck_unittests.py. The file names
+# must match the ones used in update_payload ebuild and paycheck_unittests.py.
+
+set -e
+
+TEMP_IMG_DIR=./sample_images
+OLD_KERNEL="${TEMP_IMG_DIR}/disk_ext2_4k_empty.img"
+OLD_ROOT="${TEMP_IMG_DIR}/disk_sqfs_empty.img"
+NEW_KERNEL="${TEMP_IMG_DIR}/disk_ext2_4k.img"
+NEW_ROOT="${TEMP_IMG_DIR}/disk_sqfs_default.img"
+
+
+mkdir -p "${TEMP_IMG_DIR}"
+tar -xvf sample_images.tar.bz2 -C "${TEMP_IMG_DIR}"
+
+echo "Generating full payload"
+delta_generator --out_file=full_payload.bin \
+                --partition_names=kernel:root \
+                --new_partitions="${NEW_KERNEL}":"${NEW_ROOT}"
+
+echo "Generating delta payload"
+delta_generator --out_file=delta_payload.bin \
+                --partition_names=kernel:root \
+                --new_partitions="${NEW_KERNEL}":"${NEW_ROOT}" \
+                --old_partitions="${OLD_KERNEL}":"${OLD_ROOT}" --minor_version=6
+
+echo "Creating sample_payloads.tar"
+tar -cJf sample_payloads.tar.xz {delta,full}_payload.bin
+
+rm -rf "${TEMP_IMG_DIR}" {delta,full}_payload.bin
+
+echo "Done"
diff --git a/sample_images/sample_images.tar.bz2 b/sample_images/sample_images.tar.bz2
index 6215482..7965d8b 100644
--- a/sample_images/sample_images.tar.bz2
+++ b/sample_images/sample_images.tar.bz2
Binary files differ
diff --git a/sample_images/sample_payloads.tar.xz b/sample_images/sample_payloads.tar.xz
new file mode 100644
index 0000000..eb589ba
--- /dev/null
+++ b/sample_images/sample_payloads.tar.xz
Binary files differ
diff --git a/scripts/blockdiff.py b/scripts/blockdiff.py
index 5793def..95893cf 100755
--- a/scripts/blockdiff.py
+++ b/scripts/blockdiff.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python
 #
 # Copyright (C) 2013 The Android Open Source Project
 #
@@ -17,6 +17,7 @@
 
 """Block diff utility."""
 
+from __future__ import absolute_import
 from __future__ import print_function
 
 # pylint: disable=import-error
@@ -46,7 +47,7 @@
 
   """
   if max_length < 0:
-    max_length = sys.maxint
+    max_length = sys.maxsize
   diff_list = []
   num_blocks = extent_start = extent_length = 0
   while max_length or extent_length:
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index d9c18ff..746cefb 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -89,12 +89,14 @@
   exit 1
 }
 
-# Loads shflags. We first look at the default install location; then look for
-# crosutils (chroot); finally check our own directory.
+# Loads shflags. We first look at the default install location; then our own
+# directory; finally the parent directory.
 load_shflags() {
   local my_dir="$(dirname "$(readlink -f "$0")")"
   local path
-  for path in /usr/share/misc "${my_dir}"/lib/shflags; do
+  for path in /usr/share/misc \
+    "${my_dir}"/lib/shflags \
+    "${my_dir}"/../lib/shflags; do
     if [[ -r "${path}/shflags" ]]; then
       . "${path}/shflags" || die "Could not load ${path}/shflags."
       return
@@ -186,9 +188,22 @@
     "Optional: The maximum unix timestamp of the OS allowed to apply this \
 payload, should be set to a number higher than the build timestamp of the \
 system running on the device, 0 if not specified."
+  DEFINE_string partition_timestamps "" \
+    "Optional: Per-partition maximum unix timestamp of the OS allowed to \
+apply this payload. Should be a comma separated key value pairs. e.x.\
+system:1234,vendor:456"
   DEFINE_string disable_fec_computation "" \
     "Optional: Disables the on device fec data computation for incremental \
 update. This feature is enabled by default."
+  DEFINE_string disable_verity_computation "" \
+    "Optional: Disables the on device verity computation for incremental \
+update. This feature is enabled by default."
+  DEFINE_string is_partial_update "" \
+    "Optional: True if the payload is for partial update. i.e. it only updates \
+a subset of partitions on device."
+  DEFINE_string full_boot "" "Will include full boot image"
+  DEFINE_string disable_vabc "" \
+    "Optional: Disables Virtual AB Compression when installing the OTA"
 fi
 if [[ "${COMMAND}" == "hash" || "${COMMAND}" == "sign" ]]; then
   DEFINE_string unsigned_payload "" "Path to the input unsigned payload."
@@ -271,6 +286,9 @@
 # Path to the dynamic partition info file in target image if exists.
 DYNAMIC_PARTITION_INFO_FILE=""
 
+# Path to the META/apex_info.pb found in target build
+APEX_INFO_FILE=""
+
 # read_option_int <file.txt> <option_key> [default_value]
 #
 # Reads the unsigned integer value associated with |option_key| in a key=value
@@ -554,6 +572,13 @@
         "${dynamic_partitions_info}"
       DYNAMIC_PARTITION_INFO_FILE="${dynamic_partitions_info}"
     fi
+    local apex_info=$(create_tempfile "apex_info.XXXXXX")
+    CLEANUP_FILES+=("${apex_info}")
+    if unzip -l "${image}" "META/apex_info.pb" > /dev/null; then
+      extract_file "${image}" "META/apex_info.pb" \
+        "${apex_info}"
+      APEX_INFO_FILE="${apex_info}"
+    fi
   fi
 
   local part
@@ -642,7 +667,12 @@
     fi
     partition_names+="${part}"
     new_partitions+="${DST_PARTITIONS[${part}]}"
-    old_partitions+="${SRC_PARTITIONS[${part}]:-}"
+    if [ "${FLAGS_full_boot}" == "true" ] && [ "${part}" == "boot" ]; then
+      # Skip boot partition.
+      old_partitions+=""
+    else
+      old_partitions+="${SRC_PARTITIONS[${part}]:-}"
+    fi
     new_mapfiles+="${DST_PARTITIONS_MAP[${part}]:-}"
     old_mapfiles+="${SRC_PARTITIONS_MAP[${part}]:-}"
   done
@@ -654,19 +684,40 @@
     --new_mapfiles="${new_mapfiles}"
   )
 
+  if [[ "${FLAGS_is_partial_update}" == "true" ]]; then
+    GENERATOR_ARGS+=( --is_partial_update="true" )
+    # Need at least minor version 7 for partial update, so generate with minor
+    # version 7 if we don't have a source image. Let the delta_generator to fail
+    # the other incompatiable minor versions.
+    if [[ -z "${FORCE_MINOR_VERSION}" ]]; then
+      FORCE_MINOR_VERSION="7"
+    fi
+  fi
+
   if [[ "${payload_type}" == "delta" ]]; then
     # Source image args:
     GENERATOR_ARGS+=(
       --old_partitions="${old_partitions}"
       --old_mapfiles="${old_mapfiles}"
     )
-    if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
-      GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
-    fi
     if [[ -n "${FLAGS_disable_fec_computation}" ]]; then
       GENERATOR_ARGS+=(
         --disable_fec_computation="${FLAGS_disable_fec_computation}" )
     fi
+    if [[ -n "${FLAGS_disable_verity_computation}" ]]; then
+      GENERATOR_ARGS+=(
+        --disable_verity_computation="${FLAGS_disable_verity_computation}" )
+    fi
+  fi
+
+  if [[ -n "${FLAGS_disable_vabc}" ]]; then
+    GENERATOR_ARGS+=(
+      --disable_vabc="${FLAGS_disable_vabc}" )
+  fi
+
+  # minor version is set only for delta or partial payload.
+  if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
+    GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
   fi
 
   if [[ -n "${FORCE_MAJOR_VERSION}" ]]; then
@@ -681,6 +732,10 @@
     GENERATOR_ARGS+=( --max_timestamp="${FLAGS_max_timestamp}" )
   fi
 
+  if [[ -n "${FLAGS_partition_timestamps}" ]]; then
+    GENERATOR_ARGS+=( --partition_timestamps="${FLAGS_partition_timestamps}" )
+  fi
+
   if [[ -n "${POSTINSTALL_CONFIG_FILE}" ]]; then
     GENERATOR_ARGS+=(
       --new_postinstall_config_file="${POSTINSTALL_CONFIG_FILE}"
@@ -692,6 +747,11 @@
       --dynamic_partition_info_file="${DYNAMIC_PARTITION_INFO_FILE}"
     )
   fi
+  if [[ -n "{APEX_INFO_FILE}" ]]; then
+    GENERATOR_ARGS+=(
+      --apex_info_file="${APEX_INFO_FILE}"
+    )
+  fi
 
   echo "Running delta_generator with args: ${GENERATOR_ARGS[@]}"
   "${GENERATOR}" "${GENERATOR_ARGS[@]}"
@@ -881,8 +941,8 @@
   check_update_payload ${PAYCHECK_ARGS[@]} --check
 }
 
-# Sanity check that the real generator exists:
-GENERATOR="$(which delta_generator || true)"
+# Check that the real generator exists:
+[[ -x "${GENERATOR}" ]] || GENERATOR="$(which delta_generator || true)"
 [[ -x "${GENERATOR}" ]] || die "can't find delta_generator"
 
 case "$COMMAND" in
diff --git a/scripts/cow_converter.py b/scripts/cow_converter.py
new file mode 100644
index 0000000..14e016c
--- /dev/null
+++ b/scripts/cow_converter.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Command-line tool for converting OTA payloads to VABC style COW images."""
+
+import os
+import sys
+import tempfile
+import zipfile
+import subprocess
+
+
+def IsSparseImage(filepath):
+  """Determine if an image is a sparse image
+  Args:
+    filepath: str, a path to an .img file
+
+  Returns:
+    return true iff the filepath is a sparse image.
+
+  """
+  with open(filepath, 'rb') as fp:
+    # Magic for android sparse image format
+    # https://source.android.com/devices/bootloader/images
+    return fp.read(4) == b'\x3A\xFF\x26\xED'
+
+
+def ConvertCOW(ota_path, target_file_path, tmp_dir, output_dir):
+  """Convert ota payload to COW IMAGE
+  Args:
+    ota_path: str, path to ota.zip
+    target_file_path: str, path to target_file.zip,
+      must be the target build for OTA.
+    tmp_dir: A temp dir as scratch space
+    output_dir: A directory where all converted COW images will be written.
+  """
+  with zipfile.ZipFile(ota_path) as ota_zip:
+    payload_path = ota_zip.extract("payload.bin", output_dir)
+  with zipfile.ZipFile(target_file_path) as zfp:
+    for fileinfo in zfp.infolist():
+      img_name = os.path.basename(fileinfo.filename)
+      if not fileinfo.filename.endswith(".img"):
+        continue
+      if fileinfo.filename.startswith("IMAGES/") or \
+              fileinfo.filename.startswith("RADIO/"):
+        img_path = zfp.extract(fileinfo, tmp_dir)
+        target_img_path = os.path.join(output_dir, img_name)
+        if IsSparseImage(img_path):
+          subprocess.check_call(["simg2img", img_path, target_img_path])
+        else:
+          os.rename(img_path, target_img_path)
+        print("Extracted", fileinfo.filename, "size:", fileinfo.file_size)
+
+  subprocess.call(["cow_converter", payload_path,
+                   output_dir])
+
+
+def main():
+  if len(sys.argv) != 4:
+    print(
+        "Usage:", sys.argv[0], "<your_ota.zip> <target_file.zip> <output dir>")
+    return 1
+  ota_path = sys.argv[1]
+  target_file_path = sys.argv[2]
+  output_dir = sys.argv[3]
+  os.makedirs(output_dir, exist_ok=True)
+  with tempfile.TemporaryDirectory() as tmp_dir:
+    ConvertCOW(ota_path, target_file_path, tmp_dir, output_dir)
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/scripts/ota_stress_test.py b/scripts/ota_stress_test.py
new file mode 100644
index 0000000..55aa4b1
--- /dev/null
+++ b/scripts/ota_stress_test.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Repeatedly install an A/B update to an Android device over adb."""
+
+import argparse
+import sys
+from pathlib import Path
+import subprocess
+import signal
+
+
+def CleanupLoopDevices():
+  # b/184716804 clean up unused loop devices
+  subprocess.check_call(["adb", "shell", "su", "0", "losetup", '-D'])
+
+
+def CancelOTA():
+  subprocess.call(["adb", "shell", "su", "0",
+                  "update_engine_client", "--cancel"])
+
+
+def PerformOTAThenPause(otafile: Path, update_device_script: Path):
+  python = sys.executable
+  ota_cmd = [python, str(update_device_script), str(otafile),
+             "--no-postinstall", "--no-slot-switch"]
+  p = subprocess.Popen(ota_cmd)
+  pid = p.pid
+  try:
+    ret = p.wait(10)
+    if ret is not None and ret != 0:
+      raise RuntimeError("OTA failed to apply")
+    if ret == 0:
+      print("OTA finished early? Surprise.")
+      return
+  except subprocess.TimeoutExpired:
+    pass
+  print(f"Killing {pid}")
+  subprocess.check_call(["pkill", "-INT", "-P", str(pid)])
+  p.send_signal(signal.SIGINT)
+  p.wait()
+
+
+def PerformTest(otafile: Path, resumes: int, timeout: int):
+  """Install an OTA to device, raising exceptions on failure
+
+  Args:
+    otafile: Path to the ota.zip to install
+
+  Return:
+    None if no error, if there's an error exception will be thrown
+  """
+  assert otafile.exists()
+  print("Applying", otafile)
+  script_dir = Path(__file__).parent.absolute()
+  update_device_script = script_dir / "update_device.py"
+  assert update_device_script.exists()
+  print(update_device_script)
+  python = sys.executable
+
+  for i in range(resumes):
+    print("Pause/Resume for the", i+1, "th time")
+    PerformOTAThenPause(otafile, update_device_script)
+  CancelOTA()
+  CleanupLoopDevices()
+
+  ota_cmd = [python, str(update_device_script),
+             str(otafile), "--no-postinstall"]
+  print("Finishing OTA Update", ota_cmd)
+  output = subprocess.check_output(
+      ota_cmd, stderr=subprocess.STDOUT, timeout=timeout).decode()
+  print(output)
+  if "onPayloadApplicationComplete(ErrorCode::kSuccess" not in output:
+    raise RuntimeError("Failed to finish OTA")
+  subprocess.call(
+      ["adb", "shell", "su", "0", "update_engine_client", "--cancel"])
+  subprocess.check_call(
+      ["adb", "shell", "su", "0", "update_engine_client", "--reset_status"])
+  CleanupLoopDevices()
+
+
+def main():
+  parser = argparse.ArgumentParser(
+      description='Android A/B OTA stress test helper.')
+  parser.add_argument('otafile', metavar='PAYLOAD', type=Path,
+                      help='the OTA package file (a .zip file) or raw payload \
+                      if device uses Omaha.')
+  parser.add_argument('-n', "--iterations", type=int, default=10,
+                      metavar='ITERATIONS',
+                      help='The number of iterations to run the stress test, or\
+                       -1 to keep running until CTRL+C')
+  parser.add_argument('-r', "--resumes", type=int, default=5, metavar='RESUMES',
+                      help='The number of iterations to pause the update when \
+                        installing')
+  parser.add_argument('-t', "--timeout", type=int, default=60*60,
+                      metavar='TIMEOUTS',
+                      help='Timeout, in seconds, when waiting for OTA to \
+                        finish')
+  args = parser.parse_args()
+  print(args)
+  n = args.iterations
+  while n != 0:
+    PerformTest(args.otafile, args.resumes, args.timeout)
+    n -= 1
+
+
+if __name__ == "__main__":
+  main()
diff --git a/scripts/paycheck.py b/scripts/paycheck.py
index 9d61778..cb1713f 100755
--- a/scripts/paycheck.py
+++ b/scripts/paycheck.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python
 #
 # Copyright (C) 2013 The Android Open Source Project
 #
@@ -17,6 +17,7 @@
 
 """Command-line tool for checking and applying Chrome OS update payloads."""
 
+from __future__ import absolute_import
 from __future__ import print_function
 
 # pylint: disable=import-error
@@ -26,18 +27,43 @@
 import sys
 import tempfile
 
-from update_payload import common
+# pylint: disable=redefined-builtin
+from six.moves import zip
 from update_payload import error
 
+
 lib_dir = os.path.join(os.path.dirname(__file__), 'lib')
 if os.path.exists(lib_dir) and os.path.isdir(lib_dir):
   sys.path.insert(1, lib_dir)
-import update_payload
+import update_payload  # pylint: disable=wrong-import-position
 
 
 _TYPE_FULL = 'full'
 _TYPE_DELTA = 'delta'
 
+def CheckApplyPayload(args):
+  """Whether to check the result after applying the payload.
+
+  Args:
+    args: Parsed command arguments (the return value of
+          ArgumentParser.parse_args).
+
+  Returns:
+    Boolean value whether to check.
+  """
+  return args.dst_part_paths is not None
+
+def ApplyPayload(args):
+  """Whether to apply the payload.
+
+  Args:
+    args: Parsed command arguments (the return value of
+          ArgumentParser.parse_args).
+
+  Returns:
+    Boolean value whether to apply the payload.
+  """
+  return CheckApplyPayload(args) or args.out_dst_part_paths is not None
 
 def ParseArguments(argv):
   """Parse and validate command-line arguments.
@@ -49,9 +75,9 @@
     Returns the arguments returned by the argument parser.
   """
   parser = argparse.ArgumentParser(
-      description=('Applies a Chrome OS update PAYLOAD to src_kern and '
-                   'src_root emitting dst_kern and dst_root, respectively. '
-                   'src_kern and src_root are only needed for delta payloads. '
+      description=('Applies a Chrome OS update PAYLOAD to src_part_paths'
+                   'emitting dst_part_paths, respectively. '
+                   'src_part_paths are only needed for delta payloads. '
                    'When no partitions are provided, verifies the payload '
                    'integrity.'),
       epilog=('Note: a payload may verify correctly but fail to apply, and '
@@ -67,9 +93,6 @@
   check_args.add_argument('-c', '--check', action='store_true', default=False,
                           help=('force payload integrity check (e.g. before '
                                 'applying)'))
-  check_args.add_argument('-D', '--describe', action='store_true',
-                          default=False,
-                          help='Print a friendly description of the payload.')
   check_args.add_argument('-r', '--report', metavar='FILE',
                           help="dump payload report (`-' for stdout)")
   check_args.add_argument('-t', '--type', dest='assert_type',
@@ -93,13 +116,6 @@
   check_args.add_argument('-s', '--metadata-size', metavar='NUM', default=0,
                           help='the metadata size to verify with the one in'
                           ' payload')
-  # TODO(tbrindus): deprecated in favour of --part_sizes
-  check_args.add_argument('-p', '--root-part-size', metavar='NUM',
-                          default=0, type=int,
-                          help='override rootfs partition size auto-inference')
-  check_args.add_argument('-P', '--kern-part-size', metavar='NUM',
-                          default=0, type=int,
-                          help='override kernel partition size auto-inference')
   check_args.add_argument('--part_sizes', metavar='NUM', nargs='+', type=int,
                           help='override partition size auto-inference')
 
@@ -113,21 +129,6 @@
                           help='use the specified bspatch binary')
   apply_args.add_argument('--puffpatch-path', metavar='FILE',
                           help='use the specified puffpatch binary')
-  # TODO(tbrindus): deprecated in favour of --dst_part_paths
-  apply_args.add_argument('--dst_kern', metavar='FILE',
-                          help='destination kernel partition file')
-  apply_args.add_argument('--dst_root', metavar='FILE',
-                          help='destination root partition file')
-  # TODO(tbrindus): deprecated in favour of --src_part_paths
-  apply_args.add_argument('--src_kern', metavar='FILE',
-                          help='source kernel partition file')
-  apply_args.add_argument('--src_root', metavar='FILE',
-                          help='source root partition file')
-  # TODO(tbrindus): deprecated in favour of --out_dst_part_paths
-  apply_args.add_argument('--out_dst_kern', metavar='FILE',
-                          help='created destination kernel partition file')
-  apply_args.add_argument('--out_dst_root', metavar='FILE',
-                          help='created destination root partition file')
 
   apply_args.add_argument('--src_part_paths', metavar='FILE', nargs='+',
                           help='source partitition files')
@@ -143,36 +144,28 @@
   # Parse command-line arguments.
   args = parser.parse_args(argv)
 
-  # TODO(tbrindus): temporary workaround to keep old-style flags from breaking
-  # without having to handle both types in our code. Remove after flag usage is
-  # removed from calling scripts.
-  args.part_names = args.part_names or [common.KERNEL, common.ROOTFS]
-  args.part_sizes = args.part_sizes or [args.kern_part_size,
-                                        args.root_part_size]
-  args.src_part_paths = args.src_part_paths or [args.src_kern, args.src_root]
-  args.dst_part_paths = args.dst_part_paths or [args.dst_kern, args.dst_root]
-  args.out_dst_part_paths = args.out_dst_part_paths or [args.out_dst_kern,
-                                                        args.out_dst_root]
-
-  # Make sure we don't have new dependencies on old flags by deleting them from
-  # the namespace here.
-  for old in ['kern_part_size', 'root_part_size', 'src_kern', 'src_root',
-              'dst_kern', 'dst_root', 'out_dst_kern', 'out_dst_root']:
-    delattr(args, old)
-
   # There are several options that imply --check.
   args.check = (args.check or args.report or args.assert_type or
                 args.block_size or args.allow_unhashed or
                 args.disabled_tests or args.meta_sig or args.key or
-                any(args.part_sizes) or args.metadata_size)
+                args.part_sizes is not None or args.metadata_size)
 
+  # Makes sure the following arguments have the same length as |part_names| if
+  # set.
   for arg in ['part_sizes', 'src_part_paths', 'dst_part_paths',
               'out_dst_part_paths']:
+    if getattr(args, arg) is None:
+      # Parameter is not set.
+      continue
     if len(args.part_names) != len(getattr(args, arg, [])):
       parser.error('partitions in --%s do not match --part_names' % arg)
 
-  if all(args.dst_part_paths) or all(args.out_dst_part_paths):
-    if all(args.src_part_paths):
+  def _IsSrcPartPathsProvided(args):
+    return args.src_part_paths is not None
+
+  # Makes sure parameters are coherent with payload type.
+  if ApplyPayload(args):
+    if _IsSrcPartPathsProvided(args):
       if args.assert_type == _TYPE_FULL:
         parser.error('%s payload does not accept source partition arguments'
                      % _TYPE_FULL)
@@ -208,15 +201,12 @@
   # Parse and validate arguments.
   args = ParseArguments(argv[1:])
 
-  with open(args.payload) as payload_file:
+  with open(args.payload, 'rb') as payload_file:
     payload = update_payload.Payload(payload_file)
     try:
       # Initialize payload.
       payload.Init()
 
-      if args.describe:
-        payload.Describe()
-
       # Perform payload integrity checks.
       if args.check:
         report_file = None
@@ -230,8 +220,9 @@
               report_file = open(args.report, 'w')
               do_close_report_file = True
 
-          part_sizes = dict(zip(args.part_names, args.part_sizes))
-          metadata_sig_file = args.meta_sig and open(args.meta_sig)
+          part_sizes = (args.part_sizes and
+                        dict(zip(args.part_names, args.part_sizes)))
+          metadata_sig_file = args.meta_sig and open(args.meta_sig, 'rb')
           payload.Check(
               pubkey_file_name=args.key,
               metadata_sig_file=metadata_sig_file,
@@ -249,7 +240,7 @@
             report_file.close()
 
       # Apply payload.
-      if all(args.dst_part_paths) or all(args.out_dst_part_paths):
+      if ApplyPayload(args):
         dargs = {'bsdiff_in_place': not args.extract_bsdiff}
         if args.bspatch_path:
           dargs['bspatch_path'] = args.bspatch_path
@@ -260,9 +251,9 @@
 
         out_dst_parts = {}
         file_handles = []
-        if all(args.out_dst_part_paths):
+        if args.out_dst_part_paths is not None:
           for name, path in zip(args.part_names, args.out_dst_part_paths):
-            handle = open(path, 'w+')
+            handle = open(path, 'wb+')
             file_handles.append(handle)
             out_dst_parts[name] = handle.name
         else:
@@ -275,7 +266,7 @@
 
         # If destination kernel and rootfs partitions are not given, then this
         # just becomes an apply operation with no check.
-        if all(args.dst_part_paths):
+        if CheckApplyPayload(args):
           # Prior to comparing, add the unused space past the filesystem
           # boundary in the new target partitions to become the same size as
           # the given partitions. This will truncate to larger size.
@@ -293,7 +284,7 @@
         # files are created as temp files and will be deleted upon close().
         for handle in file_handles:
           handle.close()
-    except error.PayloadError, e:
+    except error.PayloadError as e:
       sys.stderr.write('Error: %s\n' % e)
       return 1
 
diff --git a/scripts/paycheck_unittest.py b/scripts/paycheck_unittest.py
new file mode 100755
index 0000000..a90d269
--- /dev/null
+++ b/scripts/paycheck_unittest.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Unit testing paycheck.py."""
+
+# This test requires new (Y) and old (X) images, as well as a full payload
+# from image Y and a delta payload from Y to X for each partition.
+# Payloads are from sample_images/generate_payloads.
+#
+# The test performs the following:
+#
+# - It statically applies the full and delta payloads.
+#
+# - It applies full_payload to yield a new kernel (kern.part) and rootfs
+#   (root.part) and compares them to the new image partitions.
+#
+# - It applies delta_payload to the old image to yield a new kernel and rootfs
+#   and compares them to the new image partitions.
+#
+# Previously test_paycheck.sh. Run with update_payload ebuild.
+
+# Disable check for function names to avoid errors based on old code
+# pylint: disable=invalid-name
+
+import filecmp
+import os
+import subprocess
+import unittest
+
+
+class PaycheckTest(unittest.TestCase):
+  """Test paycheck functions."""
+
+  def setUp(self):
+    self.tmpdir = os.getenv('T')
+
+    self._full_payload = os.path.join(self.tmpdir, 'full_payload.bin')
+    self._delta_payload = os.path.join(self.tmpdir, 'delta_payload.bin')
+
+    self._new_kernel = os.path.join(self.tmpdir, 'disk_ext2_4k.img')
+    self._new_root = os.path.join(self.tmpdir, 'disk_sqfs_default.img')
+    self._old_kernel = os.path.join(self.tmpdir,
+                                    'disk_ext2_4k_empty.img')
+    self._old_root = os.path.join(self.tmpdir, 'disk_sqfs_empty.img')
+
+    # Temp output files.
+    self._kernel_part = os.path.join(self.tmpdir, 'kern.part')
+    self._root_part = os.path.join(self.tmpdir, 'root.part')
+
+  def checkPayload(self, type_arg, payload):
+    """Checks Payload."""
+    self.assertEqual(0, subprocess.check_call(['./paycheck.py', '-t',
+                                               type_arg, payload]))
+
+  def testFullPayload(self):
+    """Checks the full payload statically."""
+    self.checkPayload('full', self._full_payload)
+
+  def testDeltaPayload(self):
+    """Checks the delta payload statically."""
+    self.checkPayload('delta', self._delta_payload)
+
+  def testApplyFullPayload(self):
+    """Applies full payloads and compares results to new sample images."""
+    self.assertEqual(0, subprocess.check_call(['./paycheck.py',
+                                               self._full_payload,
+                                               '--part_names', 'kernel', 'root',
+                                               '--out_dst_part_paths',
+                                               self._kernel_part,
+                                               self._root_part]))
+
+    # Check if generated full image is equal to sample image.
+    self.assertTrue(filecmp.cmp(self._kernel_part, self._new_kernel))
+    self.assertTrue(filecmp.cmp(self._root_part, self._new_root))
+
+  def testApplyDeltaPayload(self):
+    """Applies delta to old image and checks against new sample images."""
+    self.assertEqual(0, subprocess.check_call(['./paycheck.py',
+                                               self._delta_payload,
+                                               '--part_names', 'kernel', 'root',
+                                               '--src_part_paths',
+                                               self._old_kernel, self._old_root,
+                                               '--out_dst_part_paths',
+                                               self._kernel_part,
+                                               self._root_part]))
+
+    self.assertTrue(filecmp.cmp(self._kernel_part, self._new_kernel))
+    self.assertTrue(filecmp.cmp(self._root_part, self._new_root))
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/scripts/payload_info.py b/scripts/payload_info.py
index 09a7cf7..8343d21 100755
--- a/scripts/payload_info.py
+++ b/scripts/payload_info.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python
 # -*- coding: utf-8 -*-
 #
 # Copyright (C) 2015 The Android Open Source Project
@@ -18,16 +18,17 @@
 
 """payload_info: Show information about an update payload."""
 
+from __future__ import absolute_import
 from __future__ import print_function
 
 import argparse
-import itertools
 import sys
 import textwrap
 
+from six.moves import range
 import update_payload
 
-MAJOR_PAYLOAD_VERSION_CHROMEOS = 1
+
 MAJOR_PAYLOAD_VERSION_BRILLO = 2
 
 def DisplayValue(key, value):
@@ -41,12 +42,12 @@
 def DisplayHexData(data, indent=0):
   """Print out binary data as a hex values."""
   for off in range(0, len(data), 16):
-    chunk = data[off:off + 16]
+    chunk = bytearray(data[off:off + 16])
     print(' ' * indent +
-          ' '.join('%.2x' % ord(c) for c in chunk) +
+          ' '.join('%.2x' % c for c in chunk) +
           '   ' * (16 - len(chunk)) +
           ' | ' +
-          ''.join(c if 32 <= ord(c) < 127 else '.' for c in chunk))
+          ''.join(chr(c) if 32 <= c < 127 else '.' for c in chunk))
 
 
 class PayloadCommand(object):
@@ -69,15 +70,16 @@
   def _DisplayManifest(self):
     """Show information from the payload manifest."""
     manifest = self.payload.manifest
-    if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
-      DisplayValue('Number of partitions', len(manifest.partitions))
-      for partition in manifest.partitions:
-        DisplayValue('  Number of "%s" ops' % partition.partition_name,
-                     len(partition.operations))
-    else:
-      DisplayValue('Number of operations', len(manifest.install_operations))
-      DisplayValue('Number of kernel ops',
-                   len(manifest.kernel_install_operations))
+    DisplayValue('Number of partitions', len(manifest.partitions))
+    for partition in manifest.partitions:
+      DisplayValue('  Number of "%s" ops' % partition.partition_name,
+                   len(partition.operations))
+    for partition in manifest.partitions:
+      DisplayValue("  Timestamp for " +
+                   partition.partition_name, partition.version)
+    for partition in manifest.partitions:
+      DisplayValue("  COW Size for " +
+                   partition.partition_name, partition.estimate_cow_size)
     DisplayValue('Block size', manifest.block_size)
     DisplayValue('Minor version', manifest.minor_version)
 
@@ -131,8 +133,8 @@
 
     Args:
       name: The name you want displayed above the operation table.
-      operations: The install_operations object that you want to display
-                  information about.
+      operations: The operations object that you want to display information
+                  about.
     """
     def _DisplayExtents(extents, name):
       """Show information about extents."""
@@ -149,7 +151,7 @@
 
     op_dict = update_payload.common.OpType.NAMES
     print('%s:' % name)
-    for op, op_count in itertools.izip(operations, itertools.count()):
+    for op_count, op in enumerate(operations):
       print('  %d: %s' % (op_count, op_dict[op.type]))
       if op.HasField('data_offset'):
         print('    Data offset: %s' % op.data_offset)
@@ -170,14 +172,9 @@
     read_blocks = 0
     written_blocks = 0
     num_write_seeks = 0
-    if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
-      partitions_operations = [part.operations for part in manifest.partitions]
-    else:
-      partitions_operations = [manifest.install_operations,
-                               manifest.kernel_install_operations]
-    for operations in partitions_operations:
+    for partition in manifest.partitions:
       last_ext = None
-      for curr_op in operations:
+      for curr_op in partition.operations:
         read_blocks += sum([ext.num_blocks for ext in curr_op.src_extents])
         written_blocks += sum([ext.num_blocks for ext in curr_op.dst_extents])
         for curr_ext in curr_op.dst_extents:
@@ -187,15 +184,10 @@
             num_write_seeks += 1
           last_ext = curr_ext
 
-    if manifest.minor_version == 1:
-      # Rootfs and kernel are written during the filesystem copy in version 1.
-      written_blocks += manifest.old_rootfs_info.size / manifest.block_size
-      written_blocks += manifest.old_kernel_info.size / manifest.block_size
-    # Old and new rootfs and kernel are read once during verification
-    read_blocks += manifest.old_rootfs_info.size / manifest.block_size
-    read_blocks += manifest.old_kernel_info.size / manifest.block_size
-    read_blocks += manifest.new_rootfs_info.size / manifest.block_size
-    read_blocks += manifest.new_kernel_info.size / manifest.block_size
+      # Old and new partitions are read once during verification.
+      read_blocks += partition.old_partition_info.size // manifest.block_size
+      read_blocks += partition.new_partition_info.size // manifest.block_size
+
     stats = {'read_blocks': read_blocks,
              'written_blocks': written_blocks,
              'num_write_seeks': num_write_seeks}
@@ -219,21 +211,15 @@
       self._DisplayStats(self.payload.manifest)
     if self.options.list_ops:
       print()
-      if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
-        for partition in self.payload.manifest.partitions:
-          self._DisplayOps('%s install operations' % partition.partition_name,
-                           partition.operations)
-      else:
-        self._DisplayOps('Install operations',
-                         self.payload.manifest.install_operations)
-        self._DisplayOps('Kernel install operations',
-                         self.payload.manifest.kernel_install_operations)
+      for partition in self.payload.manifest.partitions:
+        self._DisplayOps('%s install operations' % partition.partition_name,
+                         partition.operations)
 
 
 def main():
   parser = argparse.ArgumentParser(
       description='Show information about an update payload.')
-  parser.add_argument('payload_file', type=file,
+  parser.add_argument('payload_file', type=argparse.FileType('rb'),
                       help='The update payload file.')
   parser.add_argument('--list_ops', default=False, action='store_true',
                       help='List the install operations and their extents.')
@@ -245,5 +231,6 @@
 
   PayloadCommand(args).Run()
 
+
 if __name__ == '__main__':
   sys.exit(main())
diff --git a/scripts/payload_info_unittest.py b/scripts/payload_info_unittest.py
index a4ee9d5..07bb679 100755
--- a/scripts/payload_info_unittest.py
+++ b/scripts/payload_info_unittest.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python
 #
 # Copyright (C) 2015 The Android Open Source Project
 #
@@ -17,24 +17,31 @@
 
 """Unit testing payload_info.py."""
 
+# Disable check for function names to avoid errors based on old code
+# pylint: disable-msg=invalid-name
+
+from __future__ import absolute_import
 from __future__ import print_function
 
-import StringIO
-import collections
-import mock
 import sys
 import unittest
 
+from contextlib import contextmanager
+
+from six.moves import StringIO
+
+import mock  # pylint: disable=import-error
+
 import payload_info
 import update_payload
 
-from contextlib import contextmanager
-
 from update_payload import update_metadata_pb2
 
+
 class FakePayloadError(Exception):
   """A generic error when using the FakePayload."""
 
+
 class FakeOption(object):
   """Fake options object for testing."""
 
@@ -42,11 +49,12 @@
     self.list_ops = False
     self.stats = False
     self.signatures = False
-    for key, val in kwargs.iteritems():
+    for key, val in kwargs.items():
       setattr(self, key, val)
     if not hasattr(self, 'payload_file'):
       self.payload_file = None
 
+
 class FakeOp(object):
   """Fake manifest operation for testing."""
 
@@ -54,48 +62,57 @@
     self.src_extents = src_extents
     self.dst_extents = dst_extents
     self.type = op_type
-    for key, val in kwargs.iteritems():
+    for key, val in kwargs.items():
       setattr(self, key, val)
 
   def HasField(self, field):
     return hasattr(self, field)
 
+
+class FakeExtent(object):
+  """Fake Extent for testing."""
+  def __init__(self, start_block, num_blocks):
+    self.start_block = start_block
+    self.num_blocks = num_blocks
+
+
+class FakePartitionInfo(object):
+  """Fake PartitionInfo for testing."""
+  def __init__(self, size):
+    self.size = size
+
+
 class FakePartition(object):
   """Fake PartitionUpdate field for testing."""
 
-  def __init__(self, partition_name, operations):
+  def __init__(self, partition_name, operations, old_size, new_size):
     self.partition_name = partition_name
     self.operations = operations
+    self.old_partition_info = FakePartitionInfo(old_size)
+    self.new_partition_info = FakePartitionInfo(new_size)
+
 
 class FakeManifest(object):
   """Fake manifest for testing."""
 
-  def __init__(self, major_version):
-    FakeExtent = collections.namedtuple('FakeExtent',
-                                        ['start_block', 'num_blocks'])
-    self.install_operations = [FakeOp([],
-                                      [FakeExtent(1, 1), FakeExtent(2, 2)],
-                                      update_payload.common.OpType.REPLACE_BZ,
-                                      dst_length=3*4096,
-                                      data_offset=1,
-                                      data_length=1)]
-    self.kernel_install_operations = [FakeOp(
-        [FakeExtent(1, 1)],
-        [FakeExtent(x, x) for x in xrange(20)],
-        update_payload.common.OpType.SOURCE_COPY,
-        src_length=4096)]
-    if major_version == payload_info.MAJOR_PAYLOAD_VERSION_BRILLO:
-      self.partitions = [FakePartition('root', self.install_operations),
-                         FakePartition('kernel',
-                                       self.kernel_install_operations)]
-      self.install_operations = self.kernel_install_operations = []
+  def __init__(self):
+    self.partitions = [
+        FakePartition(update_payload.common.ROOTFS,
+                      [FakeOp([], [FakeExtent(1, 1), FakeExtent(2, 2)],
+                              update_payload.common.OpType.REPLACE_BZ,
+                              dst_length=3*4096,
+                              data_offset=1,
+                              data_length=1)
+                      ], 1 * 4096, 3 * 4096),
+        FakePartition(update_payload.common.KERNEL,
+                      [FakeOp([FakeExtent(1, 1)],
+                              [FakeExtent(x, x) for x in range(20)],
+                              update_payload.common.OpType.SOURCE_COPY,
+                              src_length=4096)
+                      ], 2 * 4096, 4 * 4096),
+    ]
     self.block_size = 4096
     self.minor_version = 4
-    FakePartInfo = collections.namedtuple('FakePartInfo', ['size'])
-    self.old_rootfs_info = FakePartInfo(1 * 4096)
-    self.old_kernel_info = FakePartInfo(2 * 4096)
-    self.new_rootfs_info = FakePartInfo(3 * 4096)
-    self.new_kernel_info = FakePartInfo(4 * 4096)
     self.signatures_offset = None
     self.signatures_size = None
 
@@ -103,26 +120,27 @@
     """Fake HasField method based on the python members."""
     return hasattr(self, field_name) and getattr(self, field_name) is not None
 
+
 class FakeHeader(object):
   """Fake payload header for testing."""
 
-  def __init__(self, version, manifest_len, metadata_signature_len):
-    self.version = version
+  def __init__(self, manifest_len, metadata_signature_len):
+    self.version = payload_info.MAJOR_PAYLOAD_VERSION_BRILLO
     self.manifest_len = manifest_len
     self.metadata_signature_len = metadata_signature_len
 
   @property
   def size(self):
-    return (20 if self.version == payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS
-            else 24)
+    return 24
+
 
 class FakePayload(object):
   """Fake payload for testing."""
 
-  def __init__(self, major_version):
-    self._header = FakeHeader(major_version, 222, 0)
+  def __init__(self):
+    self._header = FakeHeader(222, 0)
     self.header = None
-    self._manifest = FakeManifest(major_version)
+    self._manifest = FakeManifest()
     self.manifest = None
 
     self._blobs = {}
@@ -152,7 +170,7 @@
   def _AddSignatureToProto(proto, **kwargs):
     """Add a new Signature element to the passed proto."""
     new_signature = proto.signatures.add()
-    for key, val in kwargs.iteritems():
+    for key, val in kwargs.items():
       setattr(new_signature, key, val)
 
   def AddPayloadSignature(self, **kwargs):
@@ -170,6 +188,7 @@
     self._header.metadata_signature_len = len(blob)
     self._blobs[-len(blob)] = blob
 
+
 class PayloadCommandTest(unittest.TestCase):
   """Test class for our PayloadCommand class."""
 
@@ -178,7 +197,7 @@
     """A tool for capturing the sys.stdout"""
     stdout = sys.stdout
     try:
-      sys.stdout = StringIO.StringIO()
+      sys.stdout = StringIO()
       yield sys.stdout
     finally:
       sys.stdout = stdout
@@ -192,60 +211,33 @@
     with mock.patch.object(update_payload, 'Payload', return_value=payload), \
          self.OutputCapturer() as output:
       payload_cmd.Run()
-    self.assertEquals(output.getvalue(), expected_out)
+    self.assertEqual(output.getvalue(), expected_out)
 
   def testDisplayValue(self):
     """Verify that DisplayValue prints what we expect."""
     with self.OutputCapturer() as output:
       payload_info.DisplayValue('key', 'value')
-    self.assertEquals(output.getvalue(), 'key:                         value\n')
+    self.assertEqual(output.getvalue(), 'key:                         value\n')
 
   def testRun(self):
     """Verify that Run parses and displays the payload like we expect."""
     payload_cmd = payload_info.PayloadCommand(FakeOption(action='show'))
-    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
-    expected_out = """Payload version:             1
+    payload = FakePayload()
+    expected_out = """Payload version:             2
 Manifest length:             222
-Number of operations:        1
-Number of kernel ops:        1
+Number of partitions:        2
+  Number of "root" ops:      1
+  Number of "kernel" ops:    1
 Block size:                  4096
 Minor version:               4
 """
     self.TestCommand(payload_cmd, payload, expected_out)
 
-  def testListOpsOnVersion1(self):
-    """Verify that the --list_ops option gives the correct output."""
-    payload_cmd = payload_info.PayloadCommand(
-        FakeOption(list_ops=True, action='show'))
-    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
-    expected_out = """Payload version:             1
-Manifest length:             222
-Number of operations:        1
-Number of kernel ops:        1
-Block size:                  4096
-Minor version:               4
-
-Install operations:
-  0: REPLACE_BZ
-    Data offset: 1
-    Data length: 1
-    Destination: 2 extents (3 blocks)
-      (1,1) (2,2)
-Kernel install operations:
-  0: SOURCE_COPY
-    Source: 1 extent (1 block)
-      (1,1)
-    Destination: 20 extents (190 blocks)
-      (0,0) (1,1) (2,2) (3,3) (4,4) (5,5) (6,6) (7,7) (8,8) (9,9) (10,10)
-      (11,11) (12,12) (13,13) (14,14) (15,15) (16,16) (17,17) (18,18) (19,19)
-"""
-    self.TestCommand(payload_cmd, payload, expected_out)
-
   def testListOpsOnVersion2(self):
     """Verify that the --list_ops option gives the correct output."""
     payload_cmd = payload_info.PayloadCommand(
         FakeOption(list_ops=True, action='show'))
-    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO)
+    payload = FakePayload()
     expected_out = """Payload version:             2
 Manifest length:             222
 Number of partitions:        2
@@ -270,28 +262,11 @@
 """
     self.TestCommand(payload_cmd, payload, expected_out)
 
-  def testStatsOnVersion1(self):
-    """Verify that the --stats option works correctly."""
-    payload_cmd = payload_info.PayloadCommand(
-        FakeOption(stats=True, action='show'))
-    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
-    expected_out = """Payload version:             1
-Manifest length:             222
-Number of operations:        1
-Number of kernel ops:        1
-Block size:                  4096
-Minor version:               4
-Blocks read:                 11
-Blocks written:              193
-Seeks when writing:          18
-"""
-    self.TestCommand(payload_cmd, payload, expected_out)
-
   def testStatsOnVersion2(self):
     """Verify that the --stats option works correctly on version 2."""
     payload_cmd = payload_info.PayloadCommand(
         FakeOption(stats=True, action='show'))
-    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO)
+    payload = FakePayload()
     expected_out = """Payload version:             2
 Manifest length:             222
 Number of partitions:        2
@@ -309,11 +284,12 @@
     """Verify that the --signatures option works with unsigned payloads."""
     payload_cmd = payload_info.PayloadCommand(
         FakeOption(action='show', signatures=True))
-    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
-    expected_out = """Payload version:             1
+    payload = FakePayload()
+    expected_out = """Payload version:             2
 Manifest length:             222
-Number of operations:        1
-Number of kernel ops:        1
+Number of partitions:        2
+  Number of "root" ops:      1
+  Number of "kernel" ops:    1
 Block size:                  4096
 Minor version:               4
 No metadata signatures stored in the payload
@@ -325,11 +301,11 @@
     """Verify that the --signatures option shows the present signatures."""
     payload_cmd = payload_info.PayloadCommand(
         FakeOption(action='show', signatures=True))
-    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO)
+    payload = FakePayload()
     payload.AddPayloadSignature(version=1,
-                                data='12345678abcdefgh\x00\x01\x02\x03')
-    payload.AddPayloadSignature(data='I am a signature so access is yes.')
-    payload.AddMetadataSignature(data='\x00\x0a\x0c')
+                                data=b'12345678abcdefgh\x00\x01\x02\x03')
+    payload.AddPayloadSignature(data=b'I am a signature so access is yes.')
+    payload.AddMetadataSignature(data=b'\x00\x0a\x0c')
     expected_out = """Payload version:             2
 Manifest length:             222
 Number of partitions:        2
@@ -353,5 +329,6 @@
 """
     self.TestCommand(payload_cmd, payload, expected_out)
 
+
 if __name__ == '__main__':
   unittest.main()
diff --git a/scripts/run_unittests b/scripts/run_unittests
index 0d301ba..db5ed73 100755
--- a/scripts/run_unittests
+++ b/scripts/run_unittests
@@ -26,5 +26,6 @@
 done
 
 ./payload_info_unittest.py
+./paycheck_unittest.py
 
 exit 0
diff --git a/scripts/simulate_ota.py b/scripts/simulate_ota.py
new file mode 100644
index 0000000..6349979
--- /dev/null
+++ b/scripts/simulate_ota.py
@@ -0,0 +1,147 @@
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Tools for running host side simulation of an OTA update."""
+
+
+from __future__ import print_function
+
+import argparse
+import filecmp
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+import zipfile
+
+import update_payload
+
+
+def extract_file(zip_file_path, entry_name, target_file_path):
+  """Extract a file from zip archive into |target_file_path|"""
+  with open(target_file_path, 'wb') as out_fp:
+    if isinstance(zip_file_path, zipfile.ZipFile):
+      with zip_file_path.open(entry_name) as fp:
+        shutil.copyfileobj(fp, out_fp)
+    elif os.path.isdir(zip_file_path):
+      with open(os.path.join(zip_file_path, entry_name), "rb") as fp:
+        shutil.copyfileobj(fp, out_fp)
+
+def is_sparse_image(filepath):
+  with open(filepath, 'rb') as fp:
+    # Magic for android sparse image format
+    # https://source.android.com/devices/bootloader/images
+    return fp.read(4) == b'\x3A\xFF\x26\xED'
+
+def extract_img(zip_archive, img_name, output_path):
+  entry_name = "IMAGES/" + img_name + ".img"
+  extract_file(zip_archive, entry_name, output_path)
+  if is_sparse_image(output_path):
+    raw_img_path = output_path + ".raw"
+    subprocess.check_output(["simg2img", output_path, raw_img_path])
+    os.rename(raw_img_path, output_path)
+
+def run_ota(source, target, payload_path, tempdir):
+  """Run an OTA on host side"""
+  payload = update_payload.Payload(payload_path)
+  payload.Init()
+  if zipfile.is_zipfile(source):
+    source = zipfile.ZipFile(source)
+  if zipfile.is_zipfile(target):
+    target = zipfile.ZipFile(target)
+
+  old_partitions = []
+  new_partitions = []
+  expected_new_partitions = []
+  for part in payload.manifest.partitions:
+    name = part.partition_name
+    old_image = os.path.join(tempdir, "source_" + name + ".img")
+    new_image = os.path.join(tempdir, "target_" + name + ".img")
+    print("Extracting source image for", name)
+    extract_img(source, name, old_image)
+    print("Extracting target image for", name)
+    extract_img(target, name, new_image)
+
+    old_partitions.append(old_image)
+    scratch_image_name = new_image + ".actual"
+    new_partitions.append(scratch_image_name)
+    with open(scratch_image_name, "wb") as fp:
+      fp.truncate(part.new_partition_info.size)
+    expected_new_partitions.append(new_image)
+
+  delta_generator_args = ["delta_generator", "--in_file=" + payload_path]
+  partition_names = [
+      part.partition_name for part in payload.manifest.partitions
+  ]
+  delta_generator_args.append("--partition_names=" + ":".join(partition_names))
+  delta_generator_args.append("--old_partitions=" + ":".join(old_partitions))
+  delta_generator_args.append("--new_partitions=" + ":".join(new_partitions))
+
+  subprocess.check_output(delta_generator_args)
+
+  valid = True
+  for (expected_part, actual_part, part_name) in \
+      zip(expected_new_partitions, new_partitions, partition_names):
+    if filecmp.cmp(expected_part, actual_part):
+      print("Partition `{}` is valid".format(part_name))
+    else:
+      valid = False
+      print(
+          "Partition `{}` is INVALID expected image: {} actual image: {}"
+          .format(part_name, expected_part, actual_part))
+
+  if not valid and sys.stdout.isatty():
+    input("Paused to investigate invalid partitions, press any key to exit.")
+
+
+def main():
+  parser = argparse.ArgumentParser(
+      description="Run host side simulation of OTA package")
+  parser.add_argument(
+      "--source",
+      help="Target file zip for the source build",
+      required=True, nargs=1)
+  parser.add_argument(
+      "--target",
+      help="Target file zip for the target build",
+      required=True, nargs=1)
+  parser.add_argument(
+      "payload",
+      help="payload.bin for the OTA package, or a zip of OTA package itself",
+      nargs=1)
+  args = parser.parse_args()
+  print(args)
+
+  assert os.path.exists(args.source[0]), \
+    "source target file must point to a valid zipfile or directory"
+  assert os.path.exists(args.target[0]), \
+    "target path must point to a valid zipfile or directory"
+
+  # pylint: disable=no-member
+  with tempfile.TemporaryDirectory() as tempdir:
+    payload_path = args.payload[0]
+    if zipfile.is_zipfile(payload_path):
+      with zipfile.ZipFile(payload_path, "r") as zfp:
+        payload_entry_name = 'payload.bin'
+        zfp.extract(payload_entry_name, tempdir)
+        payload_path = os.path.join(tempdir, payload_entry_name)
+
+    run_ota(args.source[0], args.target[0], payload_path, tempdir)
+
+
+if __name__ == '__main__':
+  main()
diff --git a/scripts/test_paycheck.sh b/scripts/test_paycheck.sh
deleted file mode 100755
index 239b984..0000000
--- a/scripts/test_paycheck.sh
+++ /dev/null
@@ -1,169 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2013 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# A test script for paycheck.py and the update_payload.py library.
-#
-# This script requires three payload files, along with a metadata signature for
-# each, and a public key for verifying signatures. Payload include:
-#
-# - A full payload for release X (old_full_payload)
-#
-# - A full payload for release Y (new_full_payload), where Y > X
-#
-# - A delta payload from X to Y (delta_payload)
-#
-# The test performs the following:
-#
-# - It verifies each payload against its metadata signature, also asserting the
-#   payload type. Another artifact is a human-readable payload report, which
-#   is output to stdout to be inspected by the user.
-#
-# - It applies old_full_payload to yield old kernel (old_kern.part) and rootfs
-#   (old_root.part) partitions.
-#
-# - It applies delta_payload to old_{kern,root}.part to yield new kernel
-#   (new_delta_kern.part) and rootfs (new_delta_root.part) partitions.
-#
-# - It applies new_full_payload to yield reference new kernel
-#   (new_full_kern.part) and rootfs (new_full_root.part) partitions.
-#
-# - It compares new_{delta,full}_kern.part and new_{delta,full}_root.part to
-#   ensure that they are binary identical.
-#
-# If all steps have completed successfully we know with high certainty that
-# paycheck.py (and hence update_payload.py) correctly parses both full and delta
-# payloads, and applies them to yield the expected result. Finally, each
-# paycheck.py execution is timed.
-
-
-# Stop on errors, unset variables.
-set -e
-set -u
-
-# Temporary image files.
-OLD_KERN_PART=old_kern.part
-OLD_ROOT_PART=old_root.part
-NEW_DELTA_KERN_PART=new_delta_kern.part
-NEW_DELTA_ROOT_PART=new_delta_root.part
-NEW_FULL_KERN_PART=new_full_kern.part
-NEW_FULL_ROOT_PART=new_full_root.part
-CROS_PARTS="kernel root"
-
-
-log() {
-  echo "$@" >&2
-}
-
-die() {
-  log "$@"
-  exit 1
-}
-
-usage_and_exit() {
-  cat >&2 <<EOF
-Usage: ${0##*/} old_full_payload delta_payload new_full_payload
-EOF
-  exit
-}
-
-check_payload() {
-  payload_file=$1
-  payload_type=$2
-
-  time ${paycheck} -t ${payload_type} ${payload_file}
-}
-
-apply_full_payload() {
-  payload_file=$1
-  out_dst_kern_part="$2/$3"
-  out_dst_root_part="$2/$4"
-
-  time ${paycheck} ${payload_file} \
-    --part_names ${CROS_PARTS} \
-    --out_dst_part_paths ${out_dst_kern_part} ${out_dst_root_part}
-}
-
-apply_delta_payload() {
-  payload_file=$1
-  out_dst_kern_part="$2/$3"
-  out_dst_root_part="$2/$4"
-  dst_kern_part="$2/$5"
-  dst_root_part="$2/$6"
-  src_kern_part="$2/$7"
-  src_root_part="$2/$8"
-
-  time ${paycheck} ${payload_file} \
-    --part_names ${CROS_PARTS} \
-    --out_dst_part_paths ${out_dst_kern_part} ${out_dst_root_part} \
-    --dst_part_paths ${dst_kern_part} ${dst_root_part} \
-    --src_part_paths ${src_kern_part} ${src_root_part}
-}
-
-main() {
-  # Read command-line arguments.
-  if [ $# == 1 ] && [ "$1" == "-h" ]; then
-    usage_and_exit
-  elif [ $# != 3 ]; then
-    die "Error: unexpected number of arguments"
-  fi
-  old_full_payload="$1"
-  delta_payload="$2"
-  new_full_payload="$3"
-
-  # Find paycheck.py
-  paycheck=${0%/*}/paycheck.py
-  if [ -z "${paycheck}" ] || [ ! -x ${paycheck} ]; then
-    die "cannot find ${paycheck} or file is not executable"
-  fi
-
-  # Check the payloads statically.
-  log "Checking payloads..."
-  check_payload "${old_full_payload}" full
-  check_payload "${new_full_payload}" full
-  check_payload "${delta_payload}" delta
-  log "Done"
-
-  # Apply full/delta payloads and verify results are identical.
-  tmpdir="$(mktemp -d --tmpdir test_paycheck.XXXXXXXX)"
-  log "Initiating application of payloads at $tmpdir"
-
-  log "Applying old full payload..."
-  apply_full_payload "${old_full_payload}" "${tmpdir}" "${OLD_KERN_PART}" \
-    "${OLD_ROOT_PART}"
-  log "Done"
-
-  log "Applying new full payload..."
-  apply_full_payload "${new_full_payload}" "${tmpdir}" "${NEW_FULL_KERN_PART}" \
-    "${NEW_FULL_ROOT_PART}"
-  log "Done"
-
-  log "Applying delta payload to old partitions..."
-  apply_delta_payload "${delta_payload}" "${tmpdir}" "${NEW_DELTA_KERN_PART}" \
-    "${NEW_DELTA_ROOT_PART}" "${NEW_FULL_KERN_PART}" \
-    "${NEW_FULL_ROOT_PART}" "${OLD_KERN_PART}" "${OLD_ROOT_PART}"
-  log "Done"
-
-  log "Comparing results of delta and new full updates..."
-  diff "${tmpdir}/${NEW_FULL_KERN_PART}" "${tmpdir}/${NEW_DELTA_KERN_PART}"
-  diff "${tmpdir}/${NEW_FULL_ROOT_PART}" "${tmpdir}/${NEW_DELTA_ROOT_PART}"
-  log "Done"
-
-  log "Cleaning up"
-  rm -fr "${tmpdir}"
-}
-
-main "$@"
diff --git a/scripts/update_device.py b/scripts/update_device.py
index 49f766d..f672cda 100755
--- a/scripts/update_device.py
+++ b/scripts/update_device.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python3
 #
 # Copyright (C) 2017 The Android Open Source Project
 #
@@ -17,18 +17,25 @@
 
 """Send an A/B update to an Android device over adb."""
 
+from __future__ import print_function
+from __future__ import absolute_import
+
 import argparse
-import BaseHTTPServer
+import binascii
 import hashlib
 import logging
 import os
 import socket
 import subprocess
 import sys
+import struct
+import tempfile
 import threading
 import xml.etree.ElementTree
 import zipfile
 
+from six.moves import BaseHTTPServer
+
 import update_payload.payload
 
 
@@ -41,6 +48,7 @@
 # The port on the device that update_engine should connect to.
 DEVICE_PORT = 1234
 
+
 def CopyFileObjLength(fsrc, fdst, buffer_size=128 * 1024, copy_length=None):
   """Copy from a file object to another.
 
@@ -85,6 +93,7 @@
   OTA_PAYLOAD_PROPERTIES_TXT = 'payload_properties.txt'
   SECONDARY_OTA_PAYLOAD_BIN = 'secondary/payload.bin'
   SECONDARY_OTA_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt'
+  PAYLOAD_MAGIC_HEADER = b'CrAU'
 
   def __init__(self, otafilename, secondary_payload=False):
     self.otafilename = otafilename
@@ -93,10 +102,34 @@
     payload_entry = (self.SECONDARY_OTA_PAYLOAD_BIN if secondary_payload else
                      self.OTA_PAYLOAD_BIN)
     payload_info = otazip.getinfo(payload_entry)
-    self.offset = payload_info.header_offset
-    self.offset += zipfile.sizeFileHeader
-    self.offset += len(payload_info.extra) + len(payload_info.filename)
-    self.size = payload_info.file_size
+
+    if payload_info.compress_type != 0:
+      logging.error(
+          "Expected payload to be uncompressed, got compression method %d",
+          payload_info.compress_type)
+    # Don't use len(payload_info.extra). Because that returns size of extra
+    # fields in central directory. We need to look at local file directory,
+    # as these two might have different sizes.
+    with open(otafilename, "rb") as fp:
+      fp.seek(payload_info.header_offset)
+      data = fp.read(zipfile.sizeFileHeader)
+      fheader = struct.unpack(zipfile.structFileHeader, data)
+      # Last two fields of local file header are filename length and
+      # extra length
+      filename_len = fheader[-2]
+      extra_len = fheader[-1]
+      self.offset = payload_info.header_offset
+      self.offset += zipfile.sizeFileHeader
+      self.offset += filename_len + extra_len
+      self.size = payload_info.file_size
+      fp.seek(self.offset)
+      payload_header = fp.read(4)
+      if payload_header != self.PAYLOAD_MAGIC_HEADER:
+        logging.warning(
+            "Invalid header, expected %s, got %s."
+            "Either the offset is not correct, or payload is corrupted",
+            binascii.hexlify(self.PAYLOAD_MAGIC_HEADER),
+            binascii.hexlify(payload_header))
 
     property_entry = (self.SECONDARY_OTA_PAYLOAD_PROPERTIES_TXT if
                       secondary_payload else self.OTA_PAYLOAD_PROPERTIES_TXT)
@@ -137,7 +170,6 @@
           start_range = file_size - int(e)
     return start_range, end_range
 
-
   def do_GET(self):  # pylint: disable=invalid-name
     """Reply with the requested payload file."""
     if self.path != '/payload':
@@ -180,7 +212,6 @@
     f.seek(serving_start + start_range)
     CopyFileObjLength(f, self.wfile, copy_length=end_range - start_range)
 
-
   def do_POST(self):  # pylint: disable=invalid-name
     """Reply with the omaha response xml."""
     if self.path != '/update':
@@ -276,6 +307,7 @@
     logging.info('Server Terminated')
 
   def StopServer(self):
+    self._httpd.shutdown()
     self._httpd.socket.close()
 
 
@@ -289,13 +321,13 @@
   """Return the command to run to start the update in the Android device."""
   ota = AndroidOTAPackage(ota_filename, secondary)
   headers = ota.properties
-  headers += 'USER_AGENT=Dalvik (something, something)\n'
-  headers += 'NETWORK_ID=0\n'
-  headers += extra_headers
+  headers += b'USER_AGENT=Dalvik (something, something)\n'
+  headers += b'NETWORK_ID=0\n'
+  headers += extra_headers.encode()
 
   return ['update_engine_client', '--update', '--follow',
           '--payload=%s' % payload_url, '--offset=%d' % ota.offset,
-          '--size=%d' % ota.size, '--headers="%s"' % headers]
+          '--size=%d' % ota.size, '--headers="%s"' % headers.decode()]
 
 
 def OmahaUpdateCommand(omaha_url):
@@ -318,7 +350,7 @@
     if self._device_serial:
       self._command_prefix += ['-s', self._device_serial]
 
-  def adb(self, command):
+  def adb(self, command, timeout_seconds: float = None):
     """Run an ADB command like "adb push".
 
     Args:
@@ -333,7 +365,7 @@
     command = self._command_prefix + command
     logging.info('Running: %s', ' '.join(str(x) for x in command))
     p = subprocess.Popen(command, universal_newlines=True)
-    p.wait()
+    p.wait(timeout_seconds)
     return p.returncode
 
   def adb_output(self, command):
@@ -353,6 +385,28 @@
     return subprocess.check_output(command, universal_newlines=True)
 
 
+def PushMetadata(dut, otafile, metadata_path):
+  payload = update_payload.Payload(otafile)
+  payload.Init()
+  with tempfile.TemporaryDirectory() as tmpdir:
+    with zipfile.ZipFile(otafile, "r") as zfp:
+      extracted_path = os.path.join(tmpdir, "payload.bin")
+      with zfp.open("payload.bin") as payload_fp, \
+              open(extracted_path, "wb") as output_fp:
+          # Only extract the first |data_offset| bytes from the payload.
+          # This is because allocateSpaceForPayload only needs to see
+          # the manifest, not the entire payload.
+          # Extracting the entire payload works, but is slow for full
+          # OTA.
+        output_fp.write(payload_fp.read(payload.data_offset))
+
+      return dut.adb([
+          "push",
+          extracted_path,
+          metadata_path
+      ]) == 0
+
+
 def main():
   parser = argparse.ArgumentParser(description='Android A/B OTA helper.')
   parser.add_argument('otafile', metavar='PAYLOAD', type=str,
@@ -372,6 +426,17 @@
                       help='Extra headers to pass to the device.')
   parser.add_argument('--secondary', action='store_true',
                       help='Update with the secondary payload in the package.')
+  parser.add_argument('--no-slot-switch', action='store_true',
+                      help='Do not perform slot switch after the update.')
+  parser.add_argument('--no-postinstall', action='store_true',
+                      help='Do not execute postinstall scripts after the update.')
+  parser.add_argument('--allocate-only', action='store_true',
+                      help='Allocate space for this OTA, instead of actually \
+                        applying the OTA.')
+  parser.add_argument('--verify-only', action='store_true',
+                      help='Verify metadata then exit, instead of applying the OTA.')
+  parser.add_argument('--no-care-map', action='store_true',
+                      help='Do not push care_map.pb to device.')
   args = parser.parse_args()
   logging.basicConfig(
       level=logging.WARNING if args.no_verbose else logging.INFO)
@@ -389,6 +454,40 @@
   help_cmd = ['shell', 'su', '0', 'update_engine_client', '--help']
   use_omaha = 'omaha' in dut.adb_output(help_cmd)
 
+  metadata_path = "/data/ota_package/metadata"
+  if args.allocate_only:
+    if PushMetadata(dut, args.otafile, metadata_path):
+      dut.adb([
+          "shell", "update_engine_client", "--allocate",
+          "--metadata={}".format(metadata_path)])
+    # Return 0, as we are executing ADB commands here, no work needed after
+    # this point
+    return 0
+  if args.verify_only:
+    if PushMetadata(dut, args.otafile, metadata_path):
+      dut.adb([
+          "shell", "update_engine_client", "--verify",
+          "--metadata={}".format(metadata_path)])
+    # Return 0, as we are executing ADB commands here, no work needed after
+    # this point
+    return 0
+
+  if args.no_slot_switch:
+    args.extra_headers += "\nSWITCH_SLOT_ON_REBOOT=0"
+  if args.no_postinstall:
+    args.extra_headers += "\nRUN_POST_INSTALL=0"
+
+  with zipfile.ZipFile(args.otafile) as zfp:
+    CARE_MAP_ENTRY_NAME = "care_map.pb"
+    if CARE_MAP_ENTRY_NAME in zfp.namelist() and not args.no_care_map:
+      # Need root permission to push to /data
+      dut.adb(["root"])
+      with tempfile.NamedTemporaryFile() as care_map_fp:
+        care_map_fp.write(zfp.read(CARE_MAP_ENTRY_NAME))
+        care_map_fp.flush()
+        dut.adb(["push", care_map_fp.name,
+                "/data/ota_package/" + CARE_MAP_ENTRY_NAME])
+
   if args.file:
     # Update via pushing a file to /data.
     device_ota_file = os.path.join(OTA_PACKAGE_PATH, 'debug.zip')
@@ -447,9 +546,10 @@
     if server_thread:
       server_thread.StopServer()
     for cmd in finalize_cmds:
-      dut.adb(cmd)
+      dut.adb(cmd, 5)
 
   return 0
 
+
 if __name__ == '__main__':
   sys.exit(main())
diff --git a/scripts/update_payload/__init__.py b/scripts/update_payload/__init__.py
index 8ee95e2..6e77678 100644
--- a/scripts/update_payload/__init__.py
+++ b/scripts/update_payload/__init__.py
@@ -17,6 +17,8 @@
 """Library for processing, verifying and applying Chrome OS update payloads."""
 
 # Just raise the interface classes to the root namespace.
+from __future__ import absolute_import
+
 from update_payload.checker import CHECKS_TO_DISABLE
 from update_payload.error import PayloadError
 from update_payload.payload import Payload
diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py
index 21d8e87..29ccb8e 100644
--- a/scripts/update_payload/applier.py
+++ b/scripts/update_payload/applier.py
@@ -24,12 +24,12 @@
 
 """
 
+from __future__ import absolute_import
 from __future__ import print_function
 
 import array
 import bz2
 import hashlib
-import itertools
 # Not everywhere we can have the lzma library so we ignore it if we didn't have
 # it because it is not going to be used. For example, 'cros flash' uses
 # devserver code which eventually loads this file, but the lzma library is not
@@ -45,7 +45,6 @@
   except ImportError:
     pass
 import os
-import shutil
 import subprocess
 import sys
 import tempfile
@@ -53,7 +52,6 @@
 from update_payload import common
 from update_payload.error import PayloadError
 
-
 #
 # Helper functions.
 #
@@ -72,7 +70,7 @@
   """
   hasher = hashlib.sha256()
   block_length = 1024 * 1024
-  max_length = length if length >= 0 else sys.maxint
+  max_length = length if length >= 0 else sys.maxsize
 
   while max_length > 0:
     read_length = min(max_length, block_length)
@@ -108,20 +106,16 @@
   Returns:
     A character array containing the concatenated read data.
   """
-  data = array.array('c')
+  data = array.array('B')
   if max_length < 0:
-    max_length = sys.maxint
+    max_length = sys.maxsize
   for ex in extents:
     if max_length == 0:
       break
     read_length = min(max_length, ex.num_blocks * block_size)
 
-    # Fill with zeros or read from file, depending on the type of extent.
-    if ex.start_block == common.PSEUDO_EXTENT_MARKER:
-      data.extend(itertools.repeat('\0', read_length))
-    else:
-      file_obj.seek(ex.start_block * block_size)
-      data.fromfile(file_obj, read_length)
+    file_obj.seek(ex.start_block * block_size)
+    data.fromfile(file_obj, read_length)
 
     max_length -= read_length
 
@@ -149,12 +143,8 @@
     if not data_length:
       raise PayloadError('%s: more write extents than data' % ex_name)
     write_length = min(data_length, ex.num_blocks * block_size)
-
-    # Only do actual writing if this is not a pseudo-extent.
-    if ex.start_block != common.PSEUDO_EXTENT_MARKER:
-      file_obj.seek(ex.start_block * block_size)
-      data_view = buffer(data, data_offset, write_length)
-      file_obj.write(data_view)
+    file_obj.seek(ex.start_block * block_size)
+    file_obj.write(data[data_offset:(data_offset + write_length)])
 
     data_offset += write_length
     data_length -= write_length
@@ -184,20 +174,17 @@
   arg = ''
   pad_off = pad_len = 0
   if data_length < 0:
-    data_length = sys.maxint
+    data_length = sys.maxsize
   for ex, ex_name in common.ExtentIter(extents, base_name):
     if not data_length:
       raise PayloadError('%s: more extents than total data length' % ex_name)
 
-    is_pseudo = ex.start_block == common.PSEUDO_EXTENT_MARKER
-    start_byte = -1 if is_pseudo else ex.start_block * block_size
+    start_byte = ex.start_block * block_size
     num_bytes = ex.num_blocks * block_size
     if data_length < num_bytes:
       # We're only padding a real extent.
-      if not is_pseudo:
-        pad_off = start_byte + data_length
-        pad_len = num_bytes - data_length
-
+      pad_off = start_byte + data_length
+      pad_len = num_bytes - data_length
       num_bytes = data_length
 
     arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
@@ -274,30 +261,28 @@
       num_blocks = ex.num_blocks
       count = num_blocks * block_size
 
-      # Make sure it's not a fake (signature) operation.
-      if start_block != common.PSEUDO_EXTENT_MARKER:
-        data_end = data_start + count
+      data_end = data_start + count
 
-        # Make sure we're not running past partition boundary.
-        if (start_block + num_blocks) * block_size > part_size:
-          raise PayloadError(
-              '%s: extent (%s) exceeds partition size (%d)' %
-              (ex_name, common.FormatExtent(ex, block_size),
-               part_size))
+      # Make sure we're not running past partition boundary.
+      if (start_block + num_blocks) * block_size > part_size:
+        raise PayloadError(
+            '%s: extent (%s) exceeds partition size (%d)' %
+            (ex_name, common.FormatExtent(ex, block_size),
+             part_size))
 
-        # Make sure that we have enough data to write.
-        if data_end >= data_length + block_size:
-          raise PayloadError(
-              '%s: more dst blocks than data (even with padding)')
+      # Make sure that we have enough data to write.
+      if data_end >= data_length + block_size:
+        raise PayloadError(
+            '%s: more dst blocks than data (even with padding)')
 
-        # Pad with zeros if necessary.
-        if data_end > data_length:
-          padding = data_end - data_length
-          out_data += '\0' * padding
+      # Pad with zeros if necessary.
+      if data_end > data_length:
+        padding = data_end - data_length
+        out_data += b'\0' * padding
 
-        self.payload.payload_file.seek(start_block * block_size)
-        part_file.seek(start_block * block_size)
-        part_file.write(out_data[data_start:data_end])
+      self.payload.payload_file.seek(start_block * block_size)
+      part_file.seek(start_block * block_size)
+      part_file.write(out_data[data_start:data_end])
 
       data_start += count
 
@@ -306,30 +291,6 @@
       raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
                          (op_name, data_start, data_length))
 
-  def _ApplyMoveOperation(self, op, op_name, part_file):
-    """Applies a MOVE operation.
-
-    Note that this operation must read the whole block data from the input and
-    only then dump it, due to our in-place update semantics; otherwise, it
-    might clobber data midway through.
-
-    Args:
-      op: the operation object
-      op_name: name string for error reporting
-      part_file: the partition file object
-
-    Raises:
-      PayloadError if something goes wrong.
-    """
-    block_size = self.block_size
-
-    # Gather input raw data from src extents.
-    in_data = _ReadExtents(part_file, op.src_extents, block_size)
-
-    # Dump extracted data to dst extents.
-    _WriteExtents(part_file, in_data, op.dst_extents, block_size,
-                  '%s.dst_extents' % op_name)
-
   def _ApplyZeroOperation(self, op, op_name, part_file):
     """Applies a ZERO operation.
 
@@ -347,10 +308,8 @@
     # Iterate over the extents and write zero.
     # pylint: disable=unused-variable
     for ex, ex_name in common.ExtentIter(op.dst_extents, base_name):
-      # Only do actual writing if this is not a pseudo-extent.
-      if ex.start_block != common.PSEUDO_EXTENT_MARKER:
-        part_file.seek(ex.start_block * block_size)
-        part_file.write('\0' * (ex.num_blocks * block_size))
+      part_file.seek(ex.start_block * block_size)
+      part_file.write(b'\0' * (ex.num_blocks * block_size))
 
   def _ApplySourceCopyOperation(self, op, op_name, old_part_file,
                                 new_part_file):
@@ -439,12 +398,19 @@
       # Diff from source partition.
       old_file_name = '/dev/fd/%d' % old_part_file.fileno()
 
-      if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF,
-                     common.OpType.BROTLI_BSDIFF):
+      # In python3, file descriptors(fd) are not passed to child processes by
+      # default. To pass the fds to the child processes, we need to set the flag
+      # 'inheritable' in the fds and make the subprocess calls with the argument
+      # close_fds set to False.
+      if sys.version_info.major >= 3:
+        os.set_inheritable(new_part_file.fileno(), True)
+        os.set_inheritable(old_part_file.fileno(), True)
+
+      if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF):
         # Invoke bspatch on partition file with extents args.
         bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name,
                        patch_file_name, in_extents_arg, out_extents_arg]
-        subprocess.check_call(bspatch_cmd)
+        subprocess.check_call(bspatch_cmd, close_fds=False)
       elif op.type == common.OpType.PUFFDIFF:
         # Invoke puffpatch on partition file with extents args.
         puffpatch_cmd = [self.puffpatch_path,
@@ -454,14 +420,14 @@
                          "--patch_file=%s" % patch_file_name,
                          "--src_extents=%s" % in_extents_arg,
                          "--dst_extents=%s" % out_extents_arg]
-        subprocess.check_call(puffpatch_cmd)
+        subprocess.check_call(puffpatch_cmd, close_fds=False)
       else:
-        raise PayloadError("Unknown operation %s", op.type)
+        raise PayloadError("Unknown operation %s" % op.type)
 
       # Pad with zeros past the total output length.
       if pad_len:
         new_part_file.seek(pad_off)
-        new_part_file.write('\0' * pad_len)
+        new_part_file.write(b'\0' * pad_len)
     else:
       # Gather input raw data and write to a temp file.
       input_part_file = old_part_file if old_part_file else new_part_file
@@ -477,8 +443,7 @@
       with tempfile.NamedTemporaryFile(delete=False) as out_file:
         out_file_name = out_file.name
 
-      if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF,
-                     common.OpType.BROTLI_BSDIFF):
+      if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF):
         # Invoke bspatch.
         bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name,
                        patch_file_name]
@@ -492,7 +457,7 @@
                          "--patch_file=%s" % patch_file_name]
         subprocess.check_call(puffpatch_cmd)
       else:
-        raise PayloadError("Unknown operation %s", op.type)
+        raise PayloadError("Unknown operation %s" % op.type)
 
       # Read output.
       with open(out_file_name, 'rb') as out_file:
@@ -505,7 +470,7 @@
       # Write output back to partition, with padding.
       unaligned_out_len = len(out_data) % block_size
       if unaligned_out_len:
-        out_data += '\0' * (block_size - unaligned_out_len)
+        out_data += b'\0' * (block_size - unaligned_out_len)
       _WriteExtents(new_part_file, out_data, op.dst_extents, block_size,
                     '%s.dst_extents' % op_name)
 
@@ -520,10 +485,6 @@
                        new_part_file, part_size):
     """Applies a sequence of update operations to a partition.
 
-    This assumes an in-place update semantics for MOVE and BSDIFF, namely all
-    reads are performed first, then the data is processed and written back to
-    the same file.
-
     Args:
       operations: the sequence of operations
       base_name: the name of the operation sequence
@@ -541,13 +502,8 @@
       if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
                      common.OpType.REPLACE_XZ):
         self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size)
-      elif op.type == common.OpType.MOVE:
-        self._ApplyMoveOperation(op, op_name, new_part_file)
       elif op.type == common.OpType.ZERO:
         self._ApplyZeroOperation(op, op_name, new_part_file)
-      elif op.type == common.OpType.BSDIFF:
-        self._ApplyDiffOperation(op, op_name, data, new_part_file,
-                                 new_part_file)
       elif op.type == common.OpType.SOURCE_COPY:
         self._ApplySourceCopyOperation(op, op_name, old_part_file,
                                        new_part_file)
@@ -583,15 +539,8 @@
         _VerifySha256(old_part_file, old_part_info.hash,
                       'old ' + part_name, length=old_part_info.size)
       new_part_file_mode = 'r+b'
-      if self.minor_version == common.INPLACE_MINOR_PAYLOAD_VERSION:
-        # Copy the src partition to the dst one; make sure we don't truncate it.
-        shutil.copyfile(old_part_file_name, new_part_file_name)
-      elif self.minor_version >= common.SOURCE_MINOR_PAYLOAD_VERSION:
-        # In minor version >= 2, we don't want to copy the partitions, so
-        # instead just make the new partition file.
-        open(new_part_file_name, 'w').close()
-      else:
-        raise PayloadError("Unknown minor version: %d" % self.minor_version)
+      open(new_part_file_name, 'w').close()
+
     else:
       # We need to create/truncate the dst partition file.
       new_part_file_mode = 'w+b'
@@ -639,20 +588,11 @@
     install_operations = []
 
     manifest = self.payload.manifest
-    if self.payload.header.version == 1:
-      for real_name, proto_name in common.CROS_PARTITIONS:
-        new_part_info[real_name] = getattr(manifest, 'new_%s_info' % proto_name)
-        old_part_info[real_name] = getattr(manifest, 'old_%s_info' % proto_name)
-
-      install_operations.append((common.ROOTFS, manifest.install_operations))
-      install_operations.append((common.KERNEL,
-                                 manifest.kernel_install_operations))
-    else:
-      for part in manifest.partitions:
-        name = part.partition_name
-        new_part_info[name] = part.new_partition_info
-        old_part_info[name] = part.old_partition_info
-        install_operations.append((name, part.operations))
+    for part in manifest.partitions:
+      name = part.partition_name
+      new_part_info[name] = part.new_partition_info
+      old_part_info[name] = part.old_partition_info
+      install_operations.append((name, part.operations))
 
     part_names = set(new_part_info.keys())  # Equivalently, old_part_info.keys()
 
diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py
index 6d17fbe..56a9370 100644
--- a/scripts/update_payload/checker.py
+++ b/scripts/update_payload/checker.py
@@ -24,6 +24,7 @@
   checker.Run(...)
 """
 
+from __future__ import absolute_import
 from __future__ import print_function
 
 import array
@@ -34,22 +35,22 @@
 import os
 import subprocess
 
+# pylint: disable=redefined-builtin
+from six.moves import range
+
 from update_payload import common
 from update_payload import error
 from update_payload import format_utils
 from update_payload import histogram
 from update_payload import update_metadata_pb2
 
-
 #
 # Constants.
 #
 
-_CHECK_DST_PSEUDO_EXTENTS = 'dst-pseudo-extents'
 _CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block'
 _CHECK_PAYLOAD_SIG = 'payload-sig'
 CHECKS_TO_DISABLE = (
-    _CHECK_DST_PSEUDO_EXTENTS,
     _CHECK_MOVE_SAME_SRC_DST_BLOCK,
     _CHECK_PAYLOAD_SIG,
 )
@@ -66,15 +67,14 @@
 # Supported minor version map to payload types allowed to be using them.
 _SUPPORTED_MINOR_VERSIONS = {
     0: (_TYPE_FULL,),
-    1: (_TYPE_DELTA,),
     2: (_TYPE_DELTA,),
     3: (_TYPE_DELTA,),
     4: (_TYPE_DELTA,),
     5: (_TYPE_DELTA,),
     6: (_TYPE_DELTA,),
+    7: (_TYPE_DELTA,),
 }
 
-_OLD_DELTA_USABLE_PART_SIZE = 2 * 1024 * 1024 * 1024
 
 #
 # Helper functions.
@@ -323,8 +323,6 @@
     self.allow_unhashed = allow_unhashed
 
     # Disable specific tests.
-    self.check_dst_pseudo_extents = (
-        _CHECK_DST_PSEUDO_EXTENTS not in disabled_tests)
     self.check_move_same_src_dst_block = (
         _CHECK_MOVE_SAME_SRC_DST_BLOCK not in disabled_tests)
     self.check_payload_sig = _CHECK_PAYLOAD_SIG not in disabled_tests
@@ -609,7 +607,7 @@
     """
     self.major_version = self.payload.header.version
 
-    part_sizes = collections.defaultdict(int, part_sizes)
+    part_sizes = part_sizes or collections.defaultdict(int)
     manifest = self.payload.manifest
     report.AddSection('manifest')
 
@@ -628,35 +626,23 @@
     self._CheckPresentIff(self.sigs_offset, self.sigs_size,
                           'signatures_offset', 'signatures_size', 'manifest')
 
-    if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
-      for real_name, proto_name in common.CROS_PARTITIONS:
-        self.old_part_info[real_name] = self._CheckOptionalSubMsg(
-            manifest, 'old_%s_info' % proto_name, report)
-        self.new_part_info[real_name] = self._CheckMandatorySubMsg(
-            manifest, 'new_%s_info' % proto_name, report, 'manifest')
+    for part in manifest.partitions:
+      name = part.partition_name
+      self.old_part_info[name] = self._CheckOptionalSubMsg(
+          part, 'old_partition_info', report)
+      self.new_part_info[name] = self._CheckMandatorySubMsg(
+          part, 'new_partition_info', report, 'manifest.partitions')
 
-      # Check: old_kernel_info <==> old_rootfs_info.
-      self._CheckPresentIff(self.old_part_info[common.KERNEL].msg,
-                            self.old_part_info[common.ROOTFS].msg,
-                            'old_kernel_info', 'old_rootfs_info', 'manifest')
-    else:
-      for part in manifest.partitions:
-        name = part.partition_name
-        self.old_part_info[name] = self._CheckOptionalSubMsg(
-            part, 'old_partition_info', report)
-        self.new_part_info[name] = self._CheckMandatorySubMsg(
-            part, 'new_partition_info', report, 'manifest.partitions')
+    # Check: Old-style partition infos should not be specified.
+    for _, part in common.CROS_PARTITIONS:
+      self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest')
+      self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest')
 
-      # Check: Old-style partition infos should not be specified.
-      for _, part in common.CROS_PARTITIONS:
-        self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest')
-        self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest')
-
-      # Check: If old_partition_info is specified anywhere, it must be
-      # specified everywhere.
-      old_part_msgs = [part.msg for part in self.old_part_info.values() if part]
-      self._CheckPresentIffMany(old_part_msgs, 'old_partition_info',
-                                'manifest.partitions')
+    # Check: If old_partition_info is specified anywhere, it must be
+    # specified everywhere.
+    old_part_msgs = [part.msg for part in self.old_part_info.values() if part]
+    self._CheckPresentIffMany(old_part_msgs, 'old_partition_info',
+                              'manifest.partitions')
 
     is_delta = any(part and part.msg for part in self.old_part_info.values())
     if is_delta:
@@ -666,7 +652,7 @@
             'Apparent full payload contains old_{kernel,rootfs}_info.')
       self.payload_type = _TYPE_DELTA
 
-      for part, (msg, part_report) in self.old_part_info.iteritems():
+      for part, (msg, part_report) in self.old_part_info.items():
         # Check: {size, hash} present in old_{kernel,rootfs}_info.
         field = 'old_%s_info' % part
         self.old_fs_sizes[part] = self._CheckMandatoryField(msg, 'size',
@@ -687,7 +673,7 @@
       self.payload_type = _TYPE_FULL
 
     # Check: new_{kernel,rootfs}_info present; contains {size, hash}.
-    for part, (msg, part_report) in self.new_part_info.iteritems():
+    for part, (msg, part_report) in self.new_part_info.items():
       field = 'new_%s_info' % part
       self.new_fs_sizes[part] = self._CheckMandatoryField(msg, 'size',
                                                           part_report, field)
@@ -724,8 +710,7 @@
     self._CheckBlocksFitLength(length, total_blocks, self.block_size,
                                '%s: %s' % (op_name, length_name))
 
-  def _CheckExtents(self, extents, usable_size, block_counters, name,
-                    allow_pseudo=False, allow_signature=False):
+  def _CheckExtents(self, extents, usable_size, block_counters, name):
     """Checks a sequence of extents.
 
     Args:
@@ -733,8 +718,6 @@
       usable_size: The usable size of the partition to which the extents apply.
       block_counters: Array of counters corresponding to the number of blocks.
       name: The name of the extent block.
-      allow_pseudo: Whether or not pseudo block numbers are allowed.
-      allow_signature: Whether or not the extents are used for a signature.
 
     Returns:
       The total number of blocks in the extents.
@@ -755,20 +738,15 @@
       if num_blocks == 0:
         raise error.PayloadError('%s: extent length is zero.' % ex_name)
 
-      if start_block != common.PSEUDO_EXTENT_MARKER:
-        # Check: Make sure we're within the partition limit.
-        if usable_size and end_block * self.block_size > usable_size:
-          raise error.PayloadError(
-              '%s: extent (%s) exceeds usable partition size (%d).' %
-              (ex_name, common.FormatExtent(ex, self.block_size), usable_size))
+      # Check: Make sure we're within the partition limit.
+      if usable_size and end_block * self.block_size > usable_size:
+        raise error.PayloadError(
+            '%s: extent (%s) exceeds usable partition size (%d).' %
+            (ex_name, common.FormatExtent(ex, self.block_size), usable_size))
 
-        # Record block usage.
-        for i in xrange(start_block, end_block):
-          block_counters[i] += 1
-      elif not (allow_pseudo or (allow_signature and len(extents) == 1)):
-        # Pseudo-extents must be allowed explicitly, or otherwise be part of a
-        # signature operation (in which case there has to be exactly one).
-        raise error.PayloadError('%s: unexpected pseudo-extent.' % ex_name)
+      # Record block usage.
+      for i in range(start_block, end_block):
+        block_counters[i] += 1
 
       total_num_blocks += num_blocks
 
@@ -786,6 +764,11 @@
     Raises:
       error.PayloadError if any check fails.
     """
+    # Check: total_dst_blocks is not a floating point.
+    if isinstance(total_dst_blocks, float):
+      raise error.PayloadError('%s: contains invalid data type of '
+                               'total_dst_blocks.' % op_name)
+
     # Check: Does not contain src extents.
     if op.src_extents:
       raise error.PayloadError('%s: contains src_extents.' % op_name)
@@ -806,89 +789,6 @@
             'space (%d * %d).' %
             (op_name, data_length, total_dst_blocks, self.block_size))
 
-  def _CheckMoveOperation(self, op, data_offset, total_src_blocks,
-                          total_dst_blocks, op_name):
-    """Specific checks for MOVE operations.
-
-    Args:
-      op: The operation object from the manifest.
-      data_offset: The offset of a data blob for the operation.
-      total_src_blocks: Total number of blocks in src_extents.
-      total_dst_blocks: Total number of blocks in dst_extents.
-      op_name: Operation name for error reporting.
-
-    Raises:
-      error.PayloadError if any check fails.
-    """
-    # Check: No data_{offset,length}.
-    if data_offset is not None:
-      raise error.PayloadError('%s: contains data_{offset,length}.' % op_name)
-
-    # Check: total_src_blocks == total_dst_blocks.
-    if total_src_blocks != total_dst_blocks:
-      raise error.PayloadError(
-          '%s: total src blocks (%d) != total dst blocks (%d).' %
-          (op_name, total_src_blocks, total_dst_blocks))
-
-    # Check: For all i, i-th src block index != i-th dst block index.
-    i = 0
-    src_extent_iter = iter(op.src_extents)
-    dst_extent_iter = iter(op.dst_extents)
-    src_extent = dst_extent = None
-    src_idx = src_num = dst_idx = dst_num = 0
-    while i < total_src_blocks:
-      # Get the next source extent, if needed.
-      if not src_extent:
-        try:
-          src_extent = src_extent_iter.next()
-        except StopIteration:
-          raise error.PayloadError('%s: ran out of src extents (%d/%d).' %
-                                   (op_name, i, total_src_blocks))
-        src_idx = src_extent.start_block
-        src_num = src_extent.num_blocks
-
-      # Get the next dest extent, if needed.
-      if not dst_extent:
-        try:
-          dst_extent = dst_extent_iter.next()
-        except StopIteration:
-          raise error.PayloadError('%s: ran out of dst extents (%d/%d).' %
-                                   (op_name, i, total_dst_blocks))
-        dst_idx = dst_extent.start_block
-        dst_num = dst_extent.num_blocks
-
-      # Check: start block is not 0. See crbug/480751; there are still versions
-      # of update_engine which fail when seeking to 0 in PReadAll and PWriteAll,
-      # so we need to fail payloads that try to MOVE to/from block 0.
-      if src_idx == 0 or dst_idx == 0:
-        raise error.PayloadError(
-            '%s: MOVE operation cannot have extent with start block 0' %
-            op_name)
-
-      if self.check_move_same_src_dst_block and src_idx == dst_idx:
-        raise error.PayloadError(
-            '%s: src/dst block number %d is the same (%d).' %
-            (op_name, i, src_idx))
-
-      advance = min(src_num, dst_num)
-      i += advance
-
-      src_idx += advance
-      src_num -= advance
-      if src_num == 0:
-        src_extent = None
-
-      dst_idx += advance
-      dst_num -= advance
-      if dst_num == 0:
-        dst_extent = None
-
-    # Make sure we've exhausted all src/dst extents.
-    if src_extent:
-      raise error.PayloadError('%s: excess src blocks.' % op_name)
-    if dst_extent:
-      raise error.PayloadError('%s: excess dst blocks.' % op_name)
-
   def _CheckZeroOperation(self, op, op_name):
     """Specific checks for ZERO operations.
 
@@ -908,7 +808,7 @@
       raise error.PayloadError('%s: contains data_offset.' % op_name)
 
   def _CheckAnyDiffOperation(self, op, data_length, total_dst_blocks, op_name):
-    """Specific checks for BSDIFF, SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF
+    """Specific checks for SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF
        operations.
 
     Args:
@@ -933,8 +833,7 @@
            total_dst_blocks * self.block_size))
 
     # Check the existence of src_length and dst_length for legacy bsdiffs.
-    if (op.type == common.OpType.BSDIFF or
-        (op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3)):
+    if op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3:
       if not op.HasField('src_length') or not op.HasField('dst_length'):
         raise error.PayloadError('%s: require {src,dst}_length.' % op_name)
     else:
@@ -983,21 +882,19 @@
     if self.minor_version >= 3 and op.src_sha256_hash is None:
       raise error.PayloadError('%s: source hash missing.' % op_name)
 
-  def _CheckOperation(self, op, op_name, is_last, old_block_counters,
-                      new_block_counters, old_usable_size, new_usable_size,
-                      prev_data_offset, allow_signature, blob_hash_counts):
+  def _CheckOperation(self, op, op_name, old_block_counters, new_block_counters,
+                      old_usable_size, new_usable_size, prev_data_offset,
+                      blob_hash_counts):
     """Checks a single update operation.
 
     Args:
       op: The operation object.
       op_name: Operation name string for error reporting.
-      is_last: Whether this is the last operation in the sequence.
       old_block_counters: Arrays of block read counters.
       new_block_counters: Arrays of block write counters.
       old_usable_size: The overall usable size for src data in bytes.
       new_usable_size: The overall usable size for dst data in bytes.
       prev_data_offset: Offset of last used data bytes.
-      allow_signature: Whether this may be a signature operation.
       blob_hash_counts: Counters for hashed/unhashed blobs.
 
     Returns:
@@ -1009,14 +906,10 @@
     # Check extents.
     total_src_blocks = self._CheckExtents(
         op.src_extents, old_usable_size, old_block_counters,
-        op_name + '.src_extents', allow_pseudo=True)
-    allow_signature_in_extents = (allow_signature and is_last and
-                                  op.type == common.OpType.REPLACE)
+        op_name + '.src_extents')
     total_dst_blocks = self._CheckExtents(
         op.dst_extents, new_usable_size, new_block_counters,
-        op_name + '.dst_extents',
-        allow_pseudo=(not self.check_dst_pseudo_extents),
-        allow_signature=allow_signature_in_extents)
+        op_name + '.dst_extents')
 
     # Check: data_offset present <==> data_length present.
     data_offset = self._CheckOptionalField(op, 'data_offset', None)
@@ -1052,9 +945,7 @@
             (op_name, common.FormatSha256(op.data_sha256_hash),
              common.FormatSha256(actual_hash.digest())))
     elif data_offset is not None:
-      if allow_signature_in_extents:
-        blob_hash_counts['signature'] += 1
-      elif self.allow_unhashed:
+      if self.allow_unhashed:
         blob_hash_counts['unhashed'] += 1
       else:
         raise error.PayloadError('%s: unhashed operation not allowed.' %
@@ -1068,19 +959,11 @@
             (op_name, data_offset, prev_data_offset))
 
     # Type-specific checks.
-    if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
+    if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
+                   common.OpType.REPLACE_XZ):
       self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
-    elif (op.type == common.OpType.REPLACE_XZ and
-          (self.minor_version >= 3 or
-           self.major_version >= common.BRILLO_MAJOR_PAYLOAD_VERSION)):
-      self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
-    elif op.type == common.OpType.MOVE and self.minor_version == 1:
-      self._CheckMoveOperation(op, data_offset, total_src_blocks,
-                               total_dst_blocks, op_name)
     elif op.type == common.OpType.ZERO and self.minor_version >= 4:
       self._CheckZeroOperation(op, op_name)
-    elif op.type == common.OpType.BSDIFF and self.minor_version == 1:
-      self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name)
     elif op.type == common.OpType.SOURCE_COPY and self.minor_version >= 2:
       self._CheckSourceCopyOperation(data_offset, total_src_blocks,
                                      total_dst_blocks, op_name)
@@ -1102,7 +985,7 @@
 
   def _SizeToNumBlocks(self, size):
     """Returns the number of blocks needed to contain a given byte size."""
-    return (size + self.block_size - 1) / self.block_size
+    return (size + self.block_size - 1) // self.block_size
 
   def _AllocBlockCounters(self, total_size):
     """Returns a freshly initialized array of block counters.
@@ -1122,7 +1005,7 @@
 
   def _CheckOperations(self, operations, report, base_name, old_fs_size,
                        new_fs_size, old_usable_size, new_usable_size,
-                       prev_data_offset, allow_signature):
+                       prev_data_offset):
     """Checks a sequence of update operations.
 
     Args:
@@ -1134,7 +1017,6 @@
       old_usable_size: The overall usable size of the old partition in bytes.
       new_usable_size: The overall usable size of the new partition in bytes.
       prev_data_offset: Offset of last used data bytes.
-      allow_signature: Whether this sequence may contain signature operations.
 
     Returns:
       The total data blob size used.
@@ -1149,9 +1031,7 @@
         common.OpType.REPLACE: 0,
         common.OpType.REPLACE_BZ: 0,
         common.OpType.REPLACE_XZ: 0,
-        common.OpType.MOVE: 0,
         common.OpType.ZERO: 0,
-        common.OpType.BSDIFF: 0,
         common.OpType.SOURCE_COPY: 0,
         common.OpType.SOURCE_BSDIFF: 0,
         common.OpType.PUFFDIFF: 0,
@@ -1162,8 +1042,6 @@
         common.OpType.REPLACE: 0,
         common.OpType.REPLACE_BZ: 0,
         common.OpType.REPLACE_XZ: 0,
-        # MOVE operations don't have blobs.
-        common.OpType.BSDIFF: 0,
         # SOURCE_COPY operations don't have blobs.
         common.OpType.SOURCE_BSDIFF: 0,
         common.OpType.PUFFDIFF: 0,
@@ -1174,8 +1052,6 @@
         'hashed': 0,
         'unhashed': 0,
     }
-    if allow_signature:
-      blob_hash_counts['signature'] = 0
 
     # Allocate old and new block counters.
     old_block_counters = (self._AllocBlockCounters(old_usable_size)
@@ -1188,16 +1064,14 @@
       op_num += 1
 
       # Check: Type is valid.
-      if op.type not in op_counts.keys():
+      if op.type not in op_counts:
         raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type))
       op_counts[op.type] += 1
 
-      is_last = op_num == len(operations)
       curr_data_used = self._CheckOperation(
-          op, op_name, is_last, old_block_counters, new_block_counters,
+          op, op_name, old_block_counters, new_block_counters,
           old_usable_size, new_usable_size,
-          prev_data_offset + total_data_used, allow_signature,
-          blob_hash_counts)
+          prev_data_offset + total_data_used, blob_hash_counts)
       if curr_data_used:
         op_blob_totals[op.type] += curr_data_used
         total_data_used += curr_data_used
@@ -1251,21 +1125,17 @@
     if not sigs.signatures:
       raise error.PayloadError('Signature block is empty.')
 
-    last_ops_section = (self.payload.manifest.kernel_install_operations or
-                        self.payload.manifest.install_operations)
-
-    # Only major version 1 has the fake signature OP at the end.
-    if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
-      fake_sig_op = last_ops_section[-1]
+    # Check that we don't have the signature operation blob at the end (used to
+    # be for major version 1).
+    last_partition = self.payload.manifest.partitions[-1]
+    if last_partition.operations:
+      last_op = last_partition.operations[-1]
       # Check: signatures_{offset,size} must match the last (fake) operation.
-      if not (fake_sig_op.type == common.OpType.REPLACE and
-              self.sigs_offset == fake_sig_op.data_offset and
-              self.sigs_size == fake_sig_op.data_length):
-        raise error.PayloadError('Signatures_{offset,size} (%d+%d) does not'
-                                 ' match last operation (%d+%d).' %
-                                 (self.sigs_offset, self.sigs_size,
-                                  fake_sig_op.data_offset,
-                                  fake_sig_op.data_length))
+      if (last_op.type == common.OpType.REPLACE and
+          last_op.data_offset == self.sigs_offset and
+          last_op.data_length == self.sigs_size):
+        raise error.PayloadError('It seems like the last operation is the '
+                                 'signature blob. This is an invalid payload.')
 
     # Compute the checksum of all data up to signature blob.
     # TODO(garnold) we're re-reading the whole data section into a string
@@ -1280,17 +1150,13 @@
       sig_report = report.AddSubReport(sig_name)
 
       # Check: Signature contains mandatory fields.
-      self._CheckMandatoryField(sig, 'version', sig_report, sig_name)
       self._CheckMandatoryField(sig, 'data', None, sig_name)
       sig_report.AddField('data len', len(sig.data))
 
       # Check: Signatures pertains to actual payload hash.
-      if sig.version == 1:
+      if sig.data:
         self._CheckSha256Signature(sig.data, pubkey_file_name,
                                    payload_hasher.digest(), sig_name)
-      else:
-        raise error.PayloadError('Unknown signature version (%d).' %
-                                 sig.version)
 
   def Run(self, pubkey_file_name=None, metadata_sig_file=None, metadata_size=0,
           part_sizes=None, report_out_file=None):
@@ -1344,62 +1210,38 @@
       self._CheckManifest(report, part_sizes)
       assert self.payload_type, 'payload type should be known by now'
 
-      manifest = self.payload.manifest
-
-      # Part 3: Examine partition operations.
-      install_operations = []
-      if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
-        # partitions field should not ever exist in major version 1 payloads
-        self._CheckRepeatedElemNotPresent(manifest, 'partitions', 'manifest')
-
-        install_operations.append((common.ROOTFS, manifest.install_operations))
-        install_operations.append((common.KERNEL,
-                                   manifest.kernel_install_operations))
-
-      else:
-        self._CheckRepeatedElemNotPresent(manifest, 'install_operations',
+      # Make sure deprecated values are not present in the payload.
+      for field in ('install_operations', 'kernel_install_operations'):
+        self._CheckRepeatedElemNotPresent(self.payload.manifest, field,
                                           'manifest')
-        self._CheckRepeatedElemNotPresent(manifest, 'kernel_install_operations',
-                                          'manifest')
-
-        for update in manifest.partitions:
-          install_operations.append((update.partition_name, update.operations))
+      for field in ('old_kernel_info', 'old_rootfs_info',
+                    'new_kernel_info', 'new_rootfs_info'):
+        self._CheckElemNotPresent(self.payload.manifest, field, 'manifest')
 
       total_blob_size = 0
-      for part, operations in install_operations:
+      for part, operations in ((p.partition_name, p.operations)
+                               for p in self.payload.manifest.partitions):
         report.AddSection('%s operations' % part)
 
         new_fs_usable_size = self.new_fs_sizes[part]
         old_fs_usable_size = self.old_fs_sizes[part]
 
-        if part_sizes.get(part, None):
+        if part_sizes is not None and part_sizes.get(part, None):
           new_fs_usable_size = old_fs_usable_size = part_sizes[part]
-        # Infer the usable partition size when validating rootfs operations:
-        # - If rootfs partition size was provided, use that.
-        # - Otherwise, if this is an older delta (minor version < 2), stick with
-        #   a known constant size. This is necessary because older deltas may
-        #   exceed the filesystem size when moving data blocks around.
-        # - Otherwise, use the encoded filesystem size.
-        elif self.payload_type == _TYPE_DELTA and part == common.ROOTFS and \
-            self.minor_version in (None, 1):
-          new_fs_usable_size = old_fs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
 
-        # TODO(garnold)(chromium:243559) only default to the filesystem size if
-        # no explicit size provided *and* the partition size is not embedded in
-        # the payload; see issue for more details.
+        # TODO(chromium:243559) only default to the filesystem size if no
+        # explicit size provided *and* the partition size is not embedded in the
+        # payload; see issue for more details.
         total_blob_size += self._CheckOperations(
             operations, report, '%s_install_operations' % part,
             self.old_fs_sizes[part], self.new_fs_sizes[part],
-            old_fs_usable_size, new_fs_usable_size, total_blob_size,
-            (self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION
-             and part == common.KERNEL))
+            old_fs_usable_size, new_fs_usable_size, total_blob_size)
 
       # Check: Operations data reach the end of the payload file.
       used_payload_size = self.payload.data_offset + total_blob_size
       # Major versions 2 and higher have a signature at the end, so it should be
       # considered in the total size of the image.
-      if (self.major_version >= common.BRILLO_MAJOR_PAYLOAD_VERSION and
-          self.sigs_size):
+      if self.sigs_size:
         used_payload_size += self.sigs_size
 
       if used_payload_size != payload_file_size:
diff --git a/scripts/update_payload/checker_unittest.py b/scripts/update_payload/checker_unittest.py
index 7e52233..993b785 100755
--- a/scripts/update_payload/checker_unittest.py
+++ b/scripts/update_payload/checker_unittest.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python
 #
 # Copyright (C) 2013 The Android Open Source Project
 #
@@ -17,35 +17,36 @@
 
 """Unit testing checker.py."""
 
-from __future__ import print_function
+# Disable check for function names to avoid errors based on old code
+# pylint: disable-msg=invalid-name
+
+from __future__ import absolute_import
 
 import array
 import collections
-import cStringIO
 import hashlib
+import io
 import itertools
 import os
 import unittest
 
-# pylint cannot find mox.
-# pylint: disable=F0401
-import mox
+from six.moves import zip
+
+import mock  # pylint: disable=import-error
 
 from update_payload import checker
 from update_payload import common
 from update_payload import test_utils
 from update_payload import update_metadata_pb2
 from update_payload.error import PayloadError
-from update_payload.payload import Payload # Avoid name conflicts later.
+from update_payload.payload import Payload  # Avoid name conflicts later.
 
 
 def _OpTypeByName(op_name):
-  """Returns the type of an operation from itsname."""
+  """Returns the type of an operation from its name."""
   op_name_to_type = {
       'REPLACE': common.OpType.REPLACE,
       'REPLACE_BZ': common.OpType.REPLACE_BZ,
-      'MOVE': common.OpType.MOVE,
-      'BSDIFF': common.OpType.BSDIFF,
       'SOURCE_COPY': common.OpType.SOURCE_COPY,
       'SOURCE_BSDIFF': common.OpType.SOURCE_BSDIFF,
       'ZERO': common.OpType.ZERO,
@@ -65,7 +66,7 @@
   if checker_init_dargs is None:
     checker_init_dargs = {}
 
-  payload_file = cStringIO.StringIO()
+  payload_file = io.BytesIO()
   payload_gen_write_to_file_func(payload_file, **payload_gen_dargs)
   payload_file.seek(0)
   payload = Payload(payload_file)
@@ -75,7 +76,7 @@
 
 def _GetPayloadCheckerWithData(payload_gen):
   """Returns a payload checker from a given payload generator."""
-  payload_file = cStringIO.StringIO()
+  payload_file = io.BytesIO()
   payload_gen.WriteToFile(payload_file)
   payload_file.seek(0)
   payload = Payload(payload_file)
@@ -89,7 +90,7 @@
 # pylint: disable=W0212
 # Don't bark about missing members of classes you cannot import.
 # pylint: disable=E1101
-class PayloadCheckerTest(mox.MoxTestBase):
+class PayloadCheckerTest(unittest.TestCase):
   """Tests the PayloadChecker class.
 
   In addition to ordinary testFoo() methods, which are automatically invoked by
@@ -102,11 +103,42 @@
   all such tests is done in AddAllParametricTests().
   """
 
+  def setUp(self):
+    """setUp function for unittest testcase"""
+    self.mock_checks = []
+
+  def tearDown(self):
+    """tearDown function for unittest testcase"""
+    # Verify that all mock functions were called.
+    for check in self.mock_checks:
+      check.mock_fn.assert_called_once_with(*check.exp_args, **check.exp_kwargs)
+
+  class MockChecksAtTearDown(object):
+    """Mock data storage.
+
+    This class stores the mock functions and its arguments to be checked at a
+    later point.
+    """
+    def __init__(self, mock_fn, *args, **kwargs):
+      self.mock_fn = mock_fn
+      self.exp_args = args
+      self.exp_kwargs = kwargs
+
+  def addPostCheckForMockFunction(self, mock_fn, *args, **kwargs):
+    """Store a mock function and its arguments to self.mock_checks
+
+    Args:
+      mock_fn: mock function object
+      args: expected positional arguments for the mock_fn
+      kwargs: expected named arguments for the mock_fn
+    """
+    self.mock_checks.append(self.MockChecksAtTearDown(mock_fn, *args, **kwargs))
+
   def MockPayload(self):
     """Create a mock payload object, complete with a mock manifest."""
-    payload = self.mox.CreateMock(Payload)
+    payload = mock.create_autospec(Payload)
     payload.is_init = True
-    payload.manifest = self.mox.CreateMock(
+    payload.manifest = mock.create_autospec(
         update_metadata_pb2.DeltaArchiveManifest)
     return payload
 
@@ -175,19 +207,20 @@
     subreport = 'fake subreport'
 
     # Create a mock message.
-    msg = self.mox.CreateMock(update_metadata_pb2._message.Message)
-    msg.HasField(name).AndReturn(is_present)
+    msg = mock.create_autospec(update_metadata_pb2._message.Message)
+    self.addPostCheckForMockFunction(msg.HasField, name)
+    msg.HasField.return_value = is_present
     setattr(msg, name, val)
-
     # Create a mock report.
-    report = self.mox.CreateMock(checker._PayloadReport)
+    report = mock.create_autospec(checker._PayloadReport)
     if is_present:
       if is_submsg:
-        report.AddSubReport(name).AndReturn(subreport)
+        self.addPostCheckForMockFunction(report.AddSubReport, name)
+        report.AddSubReport.return_value = subreport
       else:
-        report.AddField(name, convert(val), linebreak=linebreak, indent=indent)
+        self.addPostCheckForMockFunction(report.AddField, name, convert(val),
+                                         linebreak=linebreak, indent=indent)
 
-    self.mox.ReplayAll()
     return (msg, report, subreport, name, val)
 
   def DoAddElemTest(self, is_present, is_mandatory, is_submsg, convert,
@@ -213,9 +246,9 @@
     else:
       ret_val, ret_subreport = checker.PayloadChecker._CheckElem(*args,
                                                                  **kwargs)
-      self.assertEquals(val if is_present else None, ret_val)
-      self.assertEquals(subreport if is_present and is_submsg else None,
-                        ret_subreport)
+      self.assertEqual(val if is_present else None, ret_val)
+      self.assertEqual(subreport if is_present and is_submsg else None,
+                       ret_subreport)
 
   def DoAddFieldTest(self, is_mandatory, is_present, convert, linebreak,
                      indent):
@@ -245,7 +278,7 @@
       self.assertRaises(PayloadError, tested_func, *args, **kwargs)
     else:
       ret_val = tested_func(*args, **kwargs)
-      self.assertEquals(val if is_present else None, ret_val)
+      self.assertEqual(val if is_present else None, ret_val)
 
   def DoAddSubMsgTest(self, is_mandatory, is_present):
     """Parametrized testing of _Check{Mandatory,Optional}SubMsg().
@@ -269,8 +302,8 @@
       self.assertRaises(PayloadError, tested_func, *args)
     else:
       ret_val, ret_subreport = tested_func(*args)
-      self.assertEquals(val if is_present else None, ret_val)
-      self.assertEquals(subreport if is_present else None, ret_subreport)
+      self.assertEqual(val if is_present else None, ret_val)
+      self.assertEqual(subreport if is_present else None, ret_subreport)
 
   def testCheckPresentIff(self):
     """Tests _CheckPresentIff()."""
@@ -296,15 +329,14 @@
       returned_signed_hash: The signed hash data retuned by openssl.
       expected_signed_hash: The signed hash data to compare against.
     """
-    try:
-      # Stub out the subprocess invocation.
-      self.mox.StubOutWithMock(checker.PayloadChecker, '_Run')
+    # Stub out the subprocess invocation.
+    with mock.patch.object(checker.PayloadChecker, '_Run') \
+         as mock_payload_checker:
       if expect_subprocess_call:
-        checker.PayloadChecker._Run(
-            mox.IsA(list), send_data=sig_data).AndReturn(
-                (sig_asn1_header + returned_signed_hash, None))
+        mock_payload_checker([], send_data=sig_data)
+        mock_payload_checker.return_value = (
+            sig_asn1_header + returned_signed_hash, None)
 
-      self.mox.ReplayAll()
       if expect_pass:
         self.assertIsNone(checker.PayloadChecker._CheckSha256Signature(
             sig_data, 'foo', expected_signed_hash, 'bar'))
@@ -312,13 +344,11 @@
         self.assertRaises(PayloadError,
                           checker.PayloadChecker._CheckSha256Signature,
                           sig_data, 'foo', expected_signed_hash, 'bar')
-    finally:
-      self.mox.UnsetStubs()
 
   def testCheckSha256Signature_Pass(self):
     """Tests _CheckSha256Signature(); pass case."""
     sig_data = 'fake-signature'.ljust(256)
-    signed_hash = hashlib.sha256('fake-data').digest()
+    signed_hash = hashlib.sha256(b'fake-data').digest()
     self.DoCheckSha256SignatureTest(True, True, sig_data,
                                     common.SIG_ASN1_HEADER, signed_hash,
                                     signed_hash)
@@ -326,7 +356,7 @@
   def testCheckSha256Signature_FailBadSignature(self):
     """Tests _CheckSha256Signature(); fails due to malformed signature."""
     sig_data = 'fake-signature'  # Malformed (not 256 bytes in length).
-    signed_hash = hashlib.sha256('fake-data').digest()
+    signed_hash = hashlib.sha256(b'fake-data').digest()
     self.DoCheckSha256SignatureTest(False, False, sig_data,
                                     common.SIG_ASN1_HEADER, signed_hash,
                                     signed_hash)
@@ -334,7 +364,7 @@
   def testCheckSha256Signature_FailBadOutputLength(self):
     """Tests _CheckSha256Signature(); fails due to unexpected output length."""
     sig_data = 'fake-signature'.ljust(256)
-    signed_hash = 'fake-hash'  # Malformed (not 32 bytes in length).
+    signed_hash = b'fake-hash'  # Malformed (not 32 bytes in length).
     self.DoCheckSha256SignatureTest(False, True, sig_data,
                                     common.SIG_ASN1_HEADER, signed_hash,
                                     signed_hash)
@@ -342,16 +372,16 @@
   def testCheckSha256Signature_FailBadAsnHeader(self):
     """Tests _CheckSha256Signature(); fails due to bad ASN1 header."""
     sig_data = 'fake-signature'.ljust(256)
-    signed_hash = hashlib.sha256('fake-data').digest()
-    bad_asn1_header = 'bad-asn-header'.ljust(len(common.SIG_ASN1_HEADER))
+    signed_hash = hashlib.sha256(b'fake-data').digest()
+    bad_asn1_header = b'bad-asn-header'.ljust(len(common.SIG_ASN1_HEADER))
     self.DoCheckSha256SignatureTest(False, True, sig_data, bad_asn1_header,
                                     signed_hash, signed_hash)
 
   def testCheckSha256Signature_FailBadHash(self):
     """Tests _CheckSha256Signature(); fails due to bad hash returned."""
     sig_data = 'fake-signature'.ljust(256)
-    expected_signed_hash = hashlib.sha256('fake-data').digest()
-    returned_signed_hash = hashlib.sha256('bad-fake-data').digest()
+    expected_signed_hash = hashlib.sha256(b'fake-data').digest()
+    returned_signed_hash = hashlib.sha256(b'bad-fake-data').digest()
     self.DoCheckSha256SignatureTest(False, True, sig_data,
                                     common.SIG_ASN1_HEADER,
                                     expected_signed_hash, returned_signed_hash)
@@ -429,10 +459,10 @@
       payload_gen.SetBlockSize(test_utils.KiB(4))
 
     # Add some operations.
-    payload_gen.AddOperation(False, common.OpType.MOVE,
+    payload_gen.AddOperation(common.ROOTFS, common.OpType.SOURCE_COPY,
                              src_extents=[(0, 16), (16, 497)],
                              dst_extents=[(16, 496), (0, 16)])
-    payload_gen.AddOperation(True, common.OpType.MOVE,
+    payload_gen.AddOperation(common.KERNEL, common.OpType.SOURCE_COPY,
                              src_extents=[(0, 8), (8, 8)],
                              dst_extents=[(8, 8), (0, 8)])
 
@@ -457,21 +487,23 @@
     # Add old kernel/rootfs partition info, as required.
     if fail_mismatched_oki_ori or fail_old_kernel_fs_size or fail_bad_oki:
       oki_hash = (None if fail_bad_oki
-                  else hashlib.sha256('fake-oki-content').digest())
-      payload_gen.SetPartInfo(True, False, old_kernel_fs_size, oki_hash)
+                  else hashlib.sha256(b'fake-oki-content').digest())
+      payload_gen.SetPartInfo(common.KERNEL, False, old_kernel_fs_size,
+                              oki_hash)
     if not fail_mismatched_oki_ori and (fail_old_rootfs_fs_size or
                                         fail_bad_ori):
       ori_hash = (None if fail_bad_ori
-                  else hashlib.sha256('fake-ori-content').digest())
-      payload_gen.SetPartInfo(False, False, old_rootfs_fs_size, ori_hash)
+                  else hashlib.sha256(b'fake-ori-content').digest())
+      payload_gen.SetPartInfo(common.ROOTFS, False, old_rootfs_fs_size,
+                              ori_hash)
 
     # Add new kernel/rootfs partition info.
     payload_gen.SetPartInfo(
-        True, True, new_kernel_fs_size,
-        None if fail_bad_nki else hashlib.sha256('fake-nki-content').digest())
+        common.KERNEL, True, new_kernel_fs_size,
+        None if fail_bad_nki else hashlib.sha256(b'fake-nki-content').digest())
     payload_gen.SetPartInfo(
-        False, True, new_rootfs_fs_size,
-        None if fail_bad_nri else hashlib.sha256('fake-nri-content').digest())
+        common.ROOTFS, True, new_rootfs_fs_size,
+        None if fail_bad_nri else hashlib.sha256(b'fake-nri-content').digest())
 
     # Set the minor version.
     payload_gen.SetMinorVersion(0)
@@ -518,28 +550,11 @@
 
     # Passes w/ all real extents.
     extents = self.NewExtentList((0, 4), (8, 3), (1024, 16))
-    self.assertEquals(
+    self.assertEqual(
         23,
         payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
                                       collections.defaultdict(int), 'foo'))
 
-    # Passes w/ pseudo-extents (aka sparse holes).
-    extents = self.NewExtentList((0, 4), (common.PSEUDO_EXTENT_MARKER, 5),
-                                 (8, 3))
-    self.assertEquals(
-        12,
-        payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
-                                      collections.defaultdict(int), 'foo',
-                                      allow_pseudo=True))
-
-    # Passes w/ pseudo-extent due to a signature.
-    extents = self.NewExtentList((common.PSEUDO_EXTENT_MARKER, 2))
-    self.assertEquals(
-        2,
-        payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
-                                      collections.defaultdict(int), 'foo',
-                                      allow_signature=True))
-
     # Fails, extent missing a start block.
     extents = self.NewExtentList((-1, 4), (8, 3), (1024, 16))
     self.assertRaises(
@@ -570,34 +585,34 @@
     block_size = payload_checker.block_size
     data_length = 10000
 
-    op = self.mox.CreateMock(
-        update_metadata_pb2.InstallOperation)
+    op = mock.create_autospec(update_metadata_pb2.InstallOperation)
     op.type = common.OpType.REPLACE
 
     # Pass.
     op.src_extents = []
     self.assertIsNone(
         payload_checker._CheckReplaceOperation(
-            op, data_length, (data_length + block_size - 1) / block_size,
+            op, data_length, (data_length + block_size - 1) // block_size,
             'foo'))
 
     # Fail, src extents founds.
     op.src_extents = ['bar']
     self.assertRaises(
         PayloadError, payload_checker._CheckReplaceOperation,
-        op, data_length, (data_length + block_size - 1) / block_size, 'foo')
+        op, data_length, (data_length + block_size - 1) // block_size, 'foo')
 
     # Fail, missing data.
     op.src_extents = []
     self.assertRaises(
         PayloadError, payload_checker._CheckReplaceOperation,
-        op, None, (data_length + block_size - 1) / block_size, 'foo')
+        op, None, (data_length + block_size - 1) // block_size, 'foo')
 
     # Fail, length / block number mismatch.
     op.src_extents = ['bar']
     self.assertRaises(
         PayloadError, payload_checker._CheckReplaceOperation,
-        op, data_length, (data_length + block_size - 1) / block_size + 1, 'foo')
+        op, data_length, (data_length + block_size - 1) // block_size + 1,
+        'foo')
 
   def testCheckReplaceBzOperation(self):
     """Tests _CheckReplaceOperation() where op.type == REPLACE_BZ."""
@@ -605,7 +620,7 @@
     block_size = payload_checker.block_size
     data_length = block_size * 3
 
-    op = self.mox.CreateMock(
+    op = mock.create_autospec(
         update_metadata_pb2.InstallOperation)
     op.type = common.OpType.REPLACE_BZ
 
@@ -613,25 +628,32 @@
     op.src_extents = []
     self.assertIsNone(
         payload_checker._CheckReplaceOperation(
-            op, data_length, (data_length + block_size - 1) / block_size + 5,
+            op, data_length, (data_length + block_size - 1) // block_size + 5,
             'foo'))
 
     # Fail, src extents founds.
     op.src_extents = ['bar']
     self.assertRaises(
         PayloadError, payload_checker._CheckReplaceOperation,
-        op, data_length, (data_length + block_size - 1) / block_size + 5, 'foo')
+        op, data_length, (data_length + block_size - 1) // block_size + 5,
+        'foo')
 
     # Fail, missing data.
     op.src_extents = []
     self.assertRaises(
         PayloadError, payload_checker._CheckReplaceOperation,
-        op, None, (data_length + block_size - 1) / block_size, 'foo')
+        op, None, (data_length + block_size - 1) // block_size, 'foo')
 
     # Fail, too few blocks to justify BZ.
     op.src_extents = []
     self.assertRaises(
         PayloadError, payload_checker._CheckReplaceOperation,
+        op, data_length, (data_length + block_size - 1) // block_size, 'foo')
+
+    # Fail, total_dst_blocks is a floating point value.
+    op.src_extents = []
+    self.assertRaises(
+        PayloadError, payload_checker._CheckReplaceOperation,
         op, data_length, (data_length + block_size - 1) / block_size, 'foo')
 
   def testCheckReplaceXzOperation(self):
@@ -640,7 +662,7 @@
     block_size = payload_checker.block_size
     data_length = block_size * 3
 
-    op = self.mox.CreateMock(
+    op = mock.create_autospec(
         update_metadata_pb2.InstallOperation)
     op.type = common.OpType.REPLACE_XZ
 
@@ -648,153 +670,34 @@
     op.src_extents = []
     self.assertIsNone(
         payload_checker._CheckReplaceOperation(
-            op, data_length, (data_length + block_size - 1) / block_size + 5,
+            op, data_length, (data_length + block_size - 1) // block_size + 5,
             'foo'))
 
     # Fail, src extents founds.
     op.src_extents = ['bar']
     self.assertRaises(
         PayloadError, payload_checker._CheckReplaceOperation,
-        op, data_length, (data_length + block_size - 1) / block_size + 5, 'foo')
+        op, data_length, (data_length + block_size - 1) // block_size + 5,
+        'foo')
 
     # Fail, missing data.
     op.src_extents = []
     self.assertRaises(
         PayloadError, payload_checker._CheckReplaceOperation,
-        op, None, (data_length + block_size - 1) / block_size, 'foo')
+        op, None, (data_length + block_size - 1) // block_size, 'foo')
 
     # Fail, too few blocks to justify XZ.
     op.src_extents = []
     self.assertRaises(
         PayloadError, payload_checker._CheckReplaceOperation,
+        op, data_length, (data_length + block_size - 1) // block_size, 'foo')
+
+    # Fail, total_dst_blocks is a floating point value.
+    op.src_extents = []
+    self.assertRaises(
+        PayloadError, payload_checker._CheckReplaceOperation,
         op, data_length, (data_length + block_size - 1) / block_size, 'foo')
 
-  def testCheckMoveOperation_Pass(self):
-    """Tests _CheckMoveOperation(); pass case."""
-    payload_checker = checker.PayloadChecker(self.MockPayload())
-    op = update_metadata_pb2.InstallOperation()
-    op.type = common.OpType.MOVE
-
-    self.AddToMessage(op.src_extents,
-                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
-    self.AddToMessage(op.dst_extents,
-                      self.NewExtentList((16, 128), (512, 6)))
-    self.assertIsNone(
-        payload_checker._CheckMoveOperation(op, None, 134, 134, 'foo'))
-
-  def testCheckMoveOperation_FailContainsData(self):
-    """Tests _CheckMoveOperation(); fails, message contains data."""
-    payload_checker = checker.PayloadChecker(self.MockPayload())
-    op = update_metadata_pb2.InstallOperation()
-    op.type = common.OpType.MOVE
-
-    self.AddToMessage(op.src_extents,
-                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
-    self.AddToMessage(op.dst_extents,
-                      self.NewExtentList((16, 128), (512, 6)))
-    self.assertRaises(
-        PayloadError, payload_checker._CheckMoveOperation,
-        op, 1024, 134, 134, 'foo')
-
-  def testCheckMoveOperation_FailInsufficientSrcBlocks(self):
-    """Tests _CheckMoveOperation(); fails, not enough actual src blocks."""
-    payload_checker = checker.PayloadChecker(self.MockPayload())
-    op = update_metadata_pb2.InstallOperation()
-    op.type = common.OpType.MOVE
-
-    self.AddToMessage(op.src_extents,
-                      self.NewExtentList((1, 4), (12, 2), (1024, 127)))
-    self.AddToMessage(op.dst_extents,
-                      self.NewExtentList((16, 128), (512, 6)))
-    self.assertRaises(
-        PayloadError, payload_checker._CheckMoveOperation,
-        op, None, 134, 134, 'foo')
-
-  def testCheckMoveOperation_FailInsufficientDstBlocks(self):
-    """Tests _CheckMoveOperation(); fails, not enough actual dst blocks."""
-    payload_checker = checker.PayloadChecker(self.MockPayload())
-    op = update_metadata_pb2.InstallOperation()
-    op.type = common.OpType.MOVE
-
-    self.AddToMessage(op.src_extents,
-                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
-    self.AddToMessage(op.dst_extents,
-                      self.NewExtentList((16, 128), (512, 5)))
-    self.assertRaises(
-        PayloadError, payload_checker._CheckMoveOperation,
-        op, None, 134, 134, 'foo')
-
-  def testCheckMoveOperation_FailExcessSrcBlocks(self):
-    """Tests _CheckMoveOperation(); fails, too many actual src blocks."""
-    payload_checker = checker.PayloadChecker(self.MockPayload())
-    op = update_metadata_pb2.InstallOperation()
-    op.type = common.OpType.MOVE
-
-    self.AddToMessage(op.src_extents,
-                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
-    self.AddToMessage(op.dst_extents,
-                      self.NewExtentList((16, 128), (512, 5)))
-    self.assertRaises(
-        PayloadError, payload_checker._CheckMoveOperation,
-        op, None, 134, 134, 'foo')
-    self.AddToMessage(op.src_extents,
-                      self.NewExtentList((1, 4), (12, 2), (1024, 129)))
-    self.AddToMessage(op.dst_extents,
-                      self.NewExtentList((16, 128), (512, 6)))
-    self.assertRaises(
-        PayloadError, payload_checker._CheckMoveOperation,
-        op, None, 134, 134, 'foo')
-
-  def testCheckMoveOperation_FailExcessDstBlocks(self):
-    """Tests _CheckMoveOperation(); fails, too many actual dst blocks."""
-    payload_checker = checker.PayloadChecker(self.MockPayload())
-    op = update_metadata_pb2.InstallOperation()
-    op.type = common.OpType.MOVE
-
-    self.AddToMessage(op.src_extents,
-                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
-    self.AddToMessage(op.dst_extents,
-                      self.NewExtentList((16, 128), (512, 7)))
-    self.assertRaises(
-        PayloadError, payload_checker._CheckMoveOperation,
-        op, None, 134, 134, 'foo')
-
-  def testCheckMoveOperation_FailStagnantBlocks(self):
-    """Tests _CheckMoveOperation(); fails, there are blocks that do not move."""
-    payload_checker = checker.PayloadChecker(self.MockPayload())
-    op = update_metadata_pb2.InstallOperation()
-    op.type = common.OpType.MOVE
-
-    self.AddToMessage(op.src_extents,
-                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
-    self.AddToMessage(op.dst_extents,
-                      self.NewExtentList((8, 128), (512, 6)))
-    self.assertRaises(
-        PayloadError, payload_checker._CheckMoveOperation,
-        op, None, 134, 134, 'foo')
-
-  def testCheckMoveOperation_FailZeroStartBlock(self):
-    """Tests _CheckMoveOperation(); fails, has extent with start block 0."""
-    payload_checker = checker.PayloadChecker(self.MockPayload())
-    op = update_metadata_pb2.InstallOperation()
-    op.type = common.OpType.MOVE
-
-    self.AddToMessage(op.src_extents,
-                      self.NewExtentList((0, 4), (12, 2), (1024, 128)))
-    self.AddToMessage(op.dst_extents,
-                      self.NewExtentList((8, 128), (512, 6)))
-    self.assertRaises(
-        PayloadError, payload_checker._CheckMoveOperation,
-        op, None, 134, 134, 'foo')
-
-    self.AddToMessage(op.src_extents,
-                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
-    self.AddToMessage(op.dst_extents,
-                      self.NewExtentList((0, 128), (512, 6)))
-    self.assertRaises(
-        PayloadError, payload_checker._CheckMoveOperation,
-        op, None, 134, 134, 'foo')
-
   def testCheckAnyDiff(self):
     """Tests _CheckAnyDiffOperation()."""
     payload_checker = checker.PayloadChecker(self.MockPayload())
@@ -832,8 +735,8 @@
     self.assertRaises(PayloadError, payload_checker._CheckSourceCopyOperation,
                       None, 0, 1, 'foo')
 
-  def DoCheckOperationTest(self, op_type_name, is_last, allow_signature,
-                           allow_unhashed, fail_src_extents, fail_dst_extents,
+  def DoCheckOperationTest(self, op_type_name, allow_unhashed,
+                           fail_src_extents, fail_dst_extents,
                            fail_mismatched_data_offset_length,
                            fail_missing_dst_extents, fail_src_length,
                            fail_dst_length, fail_data_hash,
@@ -841,10 +744,8 @@
     """Parametric testing of _CheckOperation().
 
     Args:
-      op_type_name: 'REPLACE', 'REPLACE_BZ', 'REPLACE_XZ', 'MOVE', 'BSDIFF',
+      op_type_name: 'REPLACE', 'REPLACE_BZ', 'REPLACE_XZ',
         'SOURCE_COPY', 'SOURCE_BSDIFF', BROTLI_BSDIFF or 'PUFFDIFF'.
-      is_last: Whether we're testing the last operation in a sequence.
-      allow_signature: Whether we're testing a signature-capable operation.
       allow_unhashed: Whether we're allowing to not hash the data.
       fail_src_extents: Tamper with src extents.
       fail_dst_extents: Tamper with dst extents.
@@ -869,9 +770,9 @@
     old_part_size = test_utils.MiB(4)
     new_part_size = test_utils.MiB(8)
     old_block_counters = array.array(
-        'B', [0] * ((old_part_size + block_size - 1) / block_size))
+        'B', [0] * ((old_part_size + block_size - 1) // block_size))
     new_block_counters = array.array(
-        'B', [0] * ((new_part_size + block_size - 1) / block_size))
+        'B', [0] * ((new_part_size + block_size - 1) // block_size))
     prev_data_offset = 1876
     blob_hash_counts = collections.defaultdict(int)
 
@@ -880,8 +781,7 @@
     op.type = op_type
 
     total_src_blocks = 0
-    if op_type in (common.OpType.MOVE, common.OpType.BSDIFF,
-                   common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF,
+    if op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF,
                    common.OpType.PUFFDIFF, common.OpType.BROTLI_BSDIFF):
       if fail_src_extents:
         self.AddToMessage(op.src_extents,
@@ -891,12 +791,9 @@
                           self.NewExtentList((1, 16)))
         total_src_blocks = 16
 
-    # TODO(tbrindus): add major version 2 tests.
-    payload_checker.major_version = common.CHROMEOS_MAJOR_PAYLOAD_VERSION
+    payload_checker.major_version = common.BRILLO_MAJOR_PAYLOAD_VERSION
     if op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
       payload_checker.minor_version = 0
-    elif op_type in (common.OpType.MOVE, common.OpType.BSDIFF):
-      payload_checker.minor_version = 2 if fail_bad_minor_version else 1
     elif op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF):
       payload_checker.minor_version = 1 if fail_bad_minor_version else 2
     if op_type == common.OpType.REPLACE_XZ:
@@ -907,7 +804,7 @@
     elif op_type == common.OpType.PUFFDIFF:
       payload_checker.minor_version = 4 if fail_bad_minor_version else 5
 
-    if op_type not in (common.OpType.MOVE, common.OpType.SOURCE_COPY):
+    if op_type != common.OpType.SOURCE_COPY:
       if not fail_mismatched_data_offset_length:
         op.data_length = 16 * block_size - 8
       if fail_prev_data_offset:
@@ -916,20 +813,16 @@
         op.data_offset = prev_data_offset
 
       fake_data = 'fake-data'.ljust(op.data_length)
-      if not (allow_unhashed or (is_last and allow_signature and
-                                 op_type == common.OpType.REPLACE)):
-        if not fail_data_hash:
-          # Create a valid data blob hash.
-          op.data_sha256_hash = hashlib.sha256(fake_data).digest()
-          payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn(
-              fake_data)
+      if not allow_unhashed and not fail_data_hash:
+        # Create a valid data blob hash.
+        op.data_sha256_hash = hashlib.sha256(fake_data.encode('utf-8')).digest()
+        payload.ReadDataBlob.return_value = fake_data.encode('utf-8')
 
       elif fail_data_hash:
         # Create an invalid data blob hash.
         op.data_sha256_hash = hashlib.sha256(
-            fake_data.replace(' ', '-')).digest()
-        payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn(
-            fake_data)
+            fake_data.replace(' ', '-').encode('utf-8')).digest()
+        payload.ReadDataBlob.return_value = fake_data.encode('utf-8')
 
     total_dst_blocks = 0
     if not fail_missing_dst_extents:
@@ -944,8 +837,7 @@
     if total_src_blocks:
       if fail_src_length:
         op.src_length = total_src_blocks * block_size + 8
-      elif (op_type in (common.OpType.MOVE, common.OpType.BSDIFF,
-                        common.OpType.SOURCE_BSDIFF) and
+      elif (op_type == common.OpType.SOURCE_BSDIFF and
             payload_checker.minor_version <= 3):
         op.src_length = total_src_blocks * block_size
     elif fail_src_length:
@@ -955,19 +847,17 @@
     if total_dst_blocks:
       if fail_dst_length:
         op.dst_length = total_dst_blocks * block_size + 8
-      elif (op_type in (common.OpType.MOVE, common.OpType.BSDIFF,
-                        common.OpType.SOURCE_BSDIFF) and
+      elif (op_type == common.OpType.SOURCE_BSDIFF and
             payload_checker.minor_version <= 3):
         op.dst_length = total_dst_blocks * block_size
 
-    self.mox.ReplayAll()
     should_fail = (fail_src_extents or fail_dst_extents or
                    fail_mismatched_data_offset_length or
                    fail_missing_dst_extents or fail_src_length or
                    fail_dst_length or fail_data_hash or fail_prev_data_offset or
                    fail_bad_minor_version)
-    args = (op, 'foo', is_last, old_block_counters, new_block_counters,
-            old_part_size, new_part_size, prev_data_offset, allow_signature,
+    args = (op, 'foo', old_block_counters, new_block_counters,
+            old_part_size, new_part_size, prev_data_offset,
             blob_hash_counts)
     if should_fail:
       self.assertRaises(PayloadError, payload_checker._CheckOperation, *args)
@@ -1009,8 +899,9 @@
     if fail_nonexhaustive_full_update:
       rootfs_data_length -= block_size
 
-    payload_gen.AddOperation(False, rootfs_op_type,
-                             dst_extents=[(0, rootfs_data_length / block_size)],
+    payload_gen.AddOperation(common.ROOTFS, rootfs_op_type,
+                             dst_extents=
+                             [(0, rootfs_data_length // block_size)],
                              data_offset=0,
                              data_length=rootfs_data_length)
 
@@ -1020,17 +911,17 @@
                                              'allow_unhashed': True})
     payload_checker.payload_type = checker._TYPE_FULL
     report = checker._PayloadReport()
-
-    args = (payload_checker.payload.manifest.install_operations, report, 'foo',
-            0, rootfs_part_size, rootfs_part_size, rootfs_part_size, 0, False)
+    partition = next((p for p in payload_checker.payload.manifest.partitions
+                      if p.partition_name == common.ROOTFS), None)
+    args = (partition.operations, report, 'foo',
+            0, rootfs_part_size, rootfs_part_size, rootfs_part_size, 0)
     if fail_nonexhaustive_full_update:
       self.assertRaises(PayloadError, payload_checker._CheckOperations, *args)
     else:
       self.assertEqual(rootfs_data_length,
                        payload_checker._CheckOperations(*args))
 
-  def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_missing_pseudo_op,
-                            fail_mismatched_pseudo_op, fail_sig_missing_fields,
+  def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_sig_missing_fields,
                             fail_unknown_sig_version, fail_incorrect_sig):
     """Tests _CheckSignatures()."""
     # Generate a test payload. For this test, we only care about the signature
@@ -1041,20 +932,18 @@
     payload_gen.SetBlockSize(block_size)
     rootfs_part_size = test_utils.MiB(2)
     kernel_part_size = test_utils.KiB(16)
-    payload_gen.SetPartInfo(False, True, rootfs_part_size,
-                            hashlib.sha256('fake-new-rootfs-content').digest())
-    payload_gen.SetPartInfo(True, True, kernel_part_size,
-                            hashlib.sha256('fake-new-kernel-content').digest())
+    payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_part_size,
+                            hashlib.sha256(b'fake-new-rootfs-content').digest())
+    payload_gen.SetPartInfo(common.KERNEL, True, kernel_part_size,
+                            hashlib.sha256(b'fake-new-kernel-content').digest())
     payload_gen.SetMinorVersion(0)
     payload_gen.AddOperationWithData(
-        False, common.OpType.REPLACE,
-        dst_extents=[(0, rootfs_part_size / block_size)],
+        common.ROOTFS, common.OpType.REPLACE,
+        dst_extents=[(0, rootfs_part_size // block_size)],
         data_blob=os.urandom(rootfs_part_size))
 
-    do_forge_pseudo_op = (fail_missing_pseudo_op or fail_mismatched_pseudo_op)
-    do_forge_sigs_data = (do_forge_pseudo_op or fail_empty_sigs_blob or
-                          fail_sig_missing_fields or fail_unknown_sig_version
-                          or fail_incorrect_sig)
+    do_forge_sigs_data = (fail_empty_sigs_blob or fail_sig_missing_fields or
+                          fail_unknown_sig_version or fail_incorrect_sig)
 
     sigs_data = None
     if do_forge_sigs_data:
@@ -1063,29 +952,19 @@
         if fail_sig_missing_fields:
           sig_data = None
         else:
-          sig_data = test_utils.SignSha256('fake-payload-content',
+          sig_data = test_utils.SignSha256(b'fake-payload-content',
                                            test_utils._PRIVKEY_FILE_NAME)
         sigs_gen.AddSig(5 if fail_unknown_sig_version else 1, sig_data)
 
       sigs_data = sigs_gen.ToBinary()
       payload_gen.SetSignatures(payload_gen.curr_offset, len(sigs_data))
 
-    if do_forge_pseudo_op:
-      assert sigs_data is not None, 'should have forged signatures blob by now'
-      sigs_len = len(sigs_data)
-      payload_gen.AddOperation(
-          False, common.OpType.REPLACE,
-          data_offset=payload_gen.curr_offset / 2,
-          data_length=sigs_len / 2,
-          dst_extents=[(0, (sigs_len / 2 + block_size - 1) / block_size)])
-
     # Generate payload (complete w/ signature) and create the test object.
     payload_checker = _GetPayloadChecker(
         payload_gen.WriteToFileWithData,
         payload_gen_dargs={
             'sigs_data': sigs_data,
-            'privkey_file_name': test_utils._PRIVKEY_FILE_NAME,
-            'do_add_pseudo_operation': not do_forge_pseudo_op})
+            'privkey_file_name': test_utils._PRIVKEY_FILE_NAME})
     payload_checker.payload_type = checker._TYPE_FULL
     report = checker._PayloadReport()
 
@@ -1095,8 +974,7 @@
         common.KERNEL: kernel_part_size
     })
 
-    should_fail = (fail_empty_sigs_blob or fail_missing_pseudo_op or
-                   fail_mismatched_pseudo_op or fail_sig_missing_fields or
+    should_fail = (fail_empty_sigs_blob or fail_sig_missing_fields or
                    fail_unknown_sig_version or fail_incorrect_sig)
     args = (report, test_utils._PUBKEY_FILE_NAME)
     if should_fail:
@@ -1120,7 +998,6 @@
 
     should_succeed = (
         (minor_version == 0 and payload_type == checker._TYPE_FULL) or
-        (minor_version == 1 and payload_type == checker._TYPE_DELTA) or
         (minor_version == 2 and payload_type == checker._TYPE_DELTA) or
         (minor_version == 3 and payload_type == checker._TYPE_DELTA) or
         (minor_version == 4 and payload_type == checker._TYPE_DELTA) or
@@ -1150,10 +1027,10 @@
     payload_gen.SetBlockSize(block_size)
     kernel_filesystem_size = test_utils.KiB(16)
     rootfs_filesystem_size = test_utils.MiB(2)
-    payload_gen.SetPartInfo(False, True, rootfs_filesystem_size,
-                            hashlib.sha256('fake-new-rootfs-content').digest())
-    payload_gen.SetPartInfo(True, True, kernel_filesystem_size,
-                            hashlib.sha256('fake-new-kernel-content').digest())
+    payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_filesystem_size,
+                            hashlib.sha256(b'fake-new-rootfs-content').digest())
+    payload_gen.SetPartInfo(common.KERNEL, True, kernel_filesystem_size,
+                            hashlib.sha256(b'fake-new-kernel-content').digest())
     payload_gen.SetMinorVersion(0)
 
     rootfs_part_size = 0
@@ -1163,8 +1040,8 @@
     if fail_rootfs_part_size_exceeded:
       rootfs_op_size += block_size
     payload_gen.AddOperationWithData(
-        False, common.OpType.REPLACE,
-        dst_extents=[(0, rootfs_op_size / block_size)],
+        common.ROOTFS, common.OpType.REPLACE,
+        dst_extents=[(0, rootfs_op_size // block_size)],
         data_blob=os.urandom(rootfs_op_size))
 
     kernel_part_size = 0
@@ -1174,8 +1051,8 @@
     if fail_kernel_part_size_exceeded:
       kernel_op_size += block_size
     payload_gen.AddOperationWithData(
-        True, common.OpType.REPLACE,
-        dst_extents=[(0, kernel_op_size / block_size)],
+        common.KERNEL, common.OpType.REPLACE,
+        dst_extents=[(0, kernel_op_size // block_size)],
         data_blob=os.urandom(kernel_op_size))
 
     # Generate payload (complete w/ signature) and create the test object.
@@ -1186,16 +1063,14 @@
     else:
       use_block_size = block_size
 
-    # For the unittests 246 is the value that generated for the payload.
-    metadata_size = 246
+    # For the unittests 237 is the value that generated for the payload.
+    metadata_size = 237
     if fail_mismatched_metadata_size:
       metadata_size += 1
 
     kwargs = {
         'payload_gen_dargs': {
             'privkey_file_name': test_utils._PRIVKEY_FILE_NAME,
-            'do_add_pseudo_operation': True,
-            'is_pseudo_in_kernel': True,
             'padding': os.urandom(1024) if fail_excess_data else None},
         'checker_init_dargs': {
             'assert_type': 'delta' if fail_wrong_payload_type else 'full',
@@ -1207,7 +1082,7 @@
       payload_checker = _GetPayloadChecker(payload_gen.WriteToFileWithData,
                                            **kwargs)
 
-      kwargs = {
+      kwargs2 = {
           'pubkey_file_name': test_utils._PUBKEY_FILE_NAME,
           'metadata_size': metadata_size,
           'part_sizes': {
@@ -1219,15 +1094,15 @@
                      fail_rootfs_part_size_exceeded or
                      fail_kernel_part_size_exceeded)
       if should_fail:
-        self.assertRaises(PayloadError, payload_checker.Run, **kwargs)
+        self.assertRaises(PayloadError, payload_checker.Run, **kwargs2)
       else:
-        self.assertIsNone(payload_checker.Run(**kwargs))
+        self.assertIsNone(payload_checker.Run(**kwargs2))
+
 
 # This implements a generic API, hence the occasional unused args.
 # pylint: disable=W0613
-def ValidateCheckOperationTest(op_type_name, is_last, allow_signature,
-                               allow_unhashed, fail_src_extents,
-                               fail_dst_extents,
+def ValidateCheckOperationTest(op_type_name, allow_unhashed,
+                               fail_src_extents, fail_dst_extents,
                                fail_mismatched_data_offset_length,
                                fail_missing_dst_extents, fail_src_length,
                                fail_dst_length, fail_data_hash,
@@ -1244,8 +1119,8 @@
                                                  fail_bad_minor_version)):
     return False
 
-  # MOVE and SOURCE_COPY operations don't carry data.
-  if (op_type in (common.OpType.MOVE, common.OpType.SOURCE_COPY) and (
+  # SOURCE_COPY operation does not carry data.
+  if (op_type == common.OpType.SOURCE_COPY and (
       fail_mismatched_data_offset_length or fail_data_hash or
       fail_prev_data_offset)):
     return False
@@ -1274,14 +1149,14 @@
                (values) associated with them.
     validate_func: A function used for validating test argument combinations.
   """
-  for value_tuple in itertools.product(*arg_space.itervalues()):
-    run_dargs = dict(zip(arg_space.iterkeys(), value_tuple))
+  for value_tuple in itertools.product(*iter(arg_space.values())):
+    run_dargs = dict(zip(iter(arg_space.keys()), value_tuple))
     if validate_func and not validate_func(**run_dargs):
       continue
     run_method_name = 'Do%sTest' % tested_method_name
     test_method_name = 'test%s' % tested_method_name
-    for arg_key, arg_val in run_dargs.iteritems():
-      if arg_val or type(arg_val) is int:
+    for arg_key, arg_val in run_dargs.items():
+      if arg_val or isinstance(arg_val, int):
         test_method_name += '__%s=%s' % (arg_key, arg_val)
     setattr(PayloadCheckerTest, test_method_name,
             TestMethodBody(run_method_name, run_dargs))
@@ -1328,11 +1203,8 @@
   # Add all _CheckOperation() test cases.
   AddParametricTests('CheckOperation',
                      {'op_type_name': ('REPLACE', 'REPLACE_BZ', 'REPLACE_XZ',
-                                       'MOVE', 'BSDIFF', 'SOURCE_COPY',
-                                       'SOURCE_BSDIFF', 'PUFFDIFF',
-                                       'BROTLI_BSDIFF'),
-                      'is_last': (True, False),
-                      'allow_signature': (True, False),
+                                       'SOURCE_COPY', 'SOURCE_BSDIFF',
+                                       'PUFFDIFF', 'BROTLI_BSDIFF'),
                       'allow_unhashed': (True, False),
                       'fail_src_extents': (True, False),
                       'fail_dst_extents': (True, False),
@@ -1352,15 +1224,13 @@
   # Add all _CheckOperations() test cases.
   AddParametricTests('CheckSignatures',
                      {'fail_empty_sigs_blob': (True, False),
-                      'fail_missing_pseudo_op': (True, False),
-                      'fail_mismatched_pseudo_op': (True, False),
                       'fail_sig_missing_fields': (True, False),
                       'fail_unknown_sig_version': (True, False),
                       'fail_incorrect_sig': (True, False)})
 
   # Add all _CheckManifestMinorVersion() test cases.
   AddParametricTests('CheckManifestMinorVersion',
-                     {'minor_version': (None, 0, 1, 2, 3, 4, 5, 555),
+                     {'minor_version': (None, 0, 2, 3, 4, 5, 555),
                       'payload_type': (checker._TYPE_FULL,
                                        checker._TYPE_DELTA)})
 
diff --git a/scripts/update_payload/common.py b/scripts/update_payload/common.py
index 9061a75..b934cf8 100644
--- a/scripts/update_payload/common.py
+++ b/scripts/update_payload/common.py
@@ -16,8 +16,11 @@
 
 """Utilities for update payload processing."""
 
+from __future__ import absolute_import
 from __future__ import print_function
 
+import base64
+
 from update_payload import update_metadata_pb2
 from update_payload.error import PayloadError
 
@@ -25,18 +28,14 @@
 #
 # Constants.
 #
-PSEUDO_EXTENT_MARKER = (1L << 64) - 1  # UINT64_MAX
-
 SIG_ASN1_HEADER = (
-    '\x30\x31\x30\x0d\x06\x09\x60\x86'
-    '\x48\x01\x65\x03\x04\x02\x01\x05'
-    '\x00\x04\x20'
+    b'\x30\x31\x30\x0d\x06\x09\x60\x86'
+    b'\x48\x01\x65\x03\x04\x02\x01\x05'
+    b'\x00\x04\x20'
 )
 
-CHROMEOS_MAJOR_PAYLOAD_VERSION = 1
 BRILLO_MAJOR_PAYLOAD_VERSION = 2
 
-INPLACE_MINOR_PAYLOAD_VERSION = 1
 SOURCE_MINOR_PAYLOAD_VERSION = 2
 OPSRCHASH_MINOR_PAYLOAD_VERSION = 3
 BROTLI_BSDIFF_MINOR_PAYLOAD_VERSION = 4
@@ -47,6 +46,7 @@
 # Tuple of (name in system, name in protobuf).
 CROS_PARTITIONS = ((KERNEL, KERNEL), (ROOTFS, 'rootfs'))
 
+
 #
 # Payload operation types.
 #
@@ -55,8 +55,6 @@
   _CLASS = update_metadata_pb2.InstallOperation
   REPLACE = _CLASS.REPLACE
   REPLACE_BZ = _CLASS.REPLACE_BZ
-  MOVE = _CLASS.MOVE
-  BSDIFF = _CLASS.BSDIFF
   SOURCE_COPY = _CLASS.SOURCE_COPY
   SOURCE_BSDIFF = _CLASS.SOURCE_BSDIFF
   ZERO = _CLASS.ZERO
@@ -64,13 +62,11 @@
   REPLACE_XZ = _CLASS.REPLACE_XZ
   PUFFDIFF = _CLASS.PUFFDIFF
   BROTLI_BSDIFF = _CLASS.BROTLI_BSDIFF
-  ALL = (REPLACE, REPLACE_BZ, MOVE, BSDIFF, SOURCE_COPY, SOURCE_BSDIFF, ZERO,
+  ALL = (REPLACE, REPLACE_BZ, SOURCE_COPY, SOURCE_BSDIFF, ZERO,
          DISCARD, REPLACE_XZ, PUFFDIFF, BROTLI_BSDIFF)
   NAMES = {
       REPLACE: 'REPLACE',
       REPLACE_BZ: 'REPLACE_BZ',
-      MOVE: 'MOVE',
-      BSDIFF: 'BSDIFF',
       SOURCE_COPY: 'SOURCE_COPY',
       SOURCE_BSDIFF: 'SOURCE_BSDIFF',
       ZERO: 'ZERO',
@@ -146,7 +142,7 @@
 
   try:
     data = file_obj.read(length)
-  except IOError, e:
+  except IOError as e:
     raise PayloadError('error reading from file (%s): %s' % (file_obj.name, e))
 
   if len(data) != length:
@@ -167,13 +163,12 @@
   end_block = ex.start_block + ex.num_blocks
   if block_size:
     return '%d->%d * %d' % (ex.start_block, end_block, block_size)
-  else:
-    return '%d->%d' % (ex.start_block, end_block)
+  return '%d->%d' % (ex.start_block, end_block)
 
 
 def FormatSha256(digest):
   """Returns a canonical string representation of a SHA256 digest."""
-  return digest.encode('base64').strip()
+  return base64.b64encode(digest).decode('utf-8')
 
 
 #
diff --git a/scripts/update_payload/format_utils.py b/scripts/update_payload/format_utils.py
index 6248ba9..e73badf 100644
--- a/scripts/update_payload/format_utils.py
+++ b/scripts/update_payload/format_utils.py
@@ -16,6 +16,8 @@
 
 """Various formatting functions."""
 
+from __future__ import division
+
 
 def NumToPercent(num, total, min_precision=1, max_precision=5):
   """Returns the percentage (string) of |num| out of |total|.
@@ -50,7 +52,7 @@
   precision = min(min_precision, max_precision)
   factor = 10 ** precision
   while precision <= max_precision:
-    percent = num * 100 * factor / total
+    percent = num * 100 * factor // total
     if percent:
       break
     factor *= 10
@@ -102,8 +104,8 @@
     magnitude = next_magnitude
 
   if exp != 0:
-    whole = size / magnitude
-    frac = (size % magnitude) * (10 ** precision) / magnitude
+    whole = size // magnitude
+    frac = (size % magnitude) * (10 ** precision) // magnitude
     while frac and not frac % 10:
       frac /= 10
     return '%d%s %s' % (whole, '.%d' % frac if frac else '', suffixes[exp - 1])
diff --git a/scripts/update_payload/format_utils_unittest.py b/scripts/update_payload/format_utils_unittest.py
index 42ea621..4dcd652 100755
--- a/scripts/update_payload/format_utils_unittest.py
+++ b/scripts/update_payload/format_utils_unittest.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python
 #
 # Copyright (C) 2013 The Android Open Source Project
 #
@@ -17,6 +17,11 @@
 
 """Unit tests for format_utils.py."""
 
+# Disable check for function names to avoid errors based on old code
+# pylint: disable-msg=invalid-name
+
+from __future__ import absolute_import
+
 import unittest
 
 from update_payload import format_utils
diff --git a/scripts/update_payload/histogram.py b/scripts/update_payload/histogram.py
index 1ac2ab5..bad2dc3 100644
--- a/scripts/update_payload/histogram.py
+++ b/scripts/update_payload/histogram.py
@@ -16,6 +16,9 @@
 
 """Histogram generation tools."""
 
+from __future__ import absolute_import
+from __future__ import division
+
 from collections import defaultdict
 
 from update_payload import format_utils
@@ -110,7 +113,7 @@
     hist_bar = '|'
     for key, count in self.data:
       if self.total:
-        bar_len = count * self.scale / self.total
+        bar_len = count * self.scale // self.total
         hist_bar = '|%s|' % ('#' * bar_len).ljust(self.scale)
 
       line = '%s %s %s' % (
diff --git a/scripts/update_payload/histogram_unittest.py b/scripts/update_payload/histogram_unittest.py
index e757dd0..ccde2bb 100755
--- a/scripts/update_payload/histogram_unittest.py
+++ b/scripts/update_payload/histogram_unittest.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python
 #
 # Copyright (C) 2013 The Android Open Source Project
 #
@@ -17,6 +17,11 @@
 
 """Unit tests for histogram.py."""
 
+# Disable check for function names to avoid errors based on old code
+# pylint: disable-msg=invalid-name
+
+from __future__ import absolute_import
+
 import unittest
 
 from update_payload import format_utils
diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py
index 2a0cb58..998703a 100644
--- a/scripts/update_payload/payload.py
+++ b/scripts/update_payload/payload.py
@@ -16,10 +16,14 @@
 
 """Tools for reading, verifying and applying Chrome OS update payloads."""
 
+from __future__ import absolute_import
 from __future__ import print_function
 
 import hashlib
+import io
+import mmap
 import struct
+import zipfile
 
 from update_payload import applier
 from update_payload import checker
@@ -64,7 +68,7 @@
     """Update payload header struct."""
 
     # Header constants; sizes are in bytes.
-    _MAGIC = 'CrAU'
+    _MAGIC = b'CrAU'
     _VERSION_SIZE = 8
     _MANIFEST_LEN_SIZE = 8
     _METADATA_SIGNATURE_LEN_SIZE = 4
@@ -111,7 +115,6 @@
             payload_file, self._METADATA_SIGNATURE_LEN_SIZE, True,
             hasher=hasher)
 
-
   def __init__(self, payload_file, payload_file_offset=0):
     """Initialize the payload object.
 
@@ -119,7 +122,15 @@
       payload_file: update payload file object open for reading
       payload_file_offset: the offset of the actual payload
     """
-    self.payload_file = payload_file
+    if zipfile.is_zipfile(payload_file):
+      with zipfile.ZipFile(payload_file) as zfp:
+        self.payload_file = zfp.open("payload.bin", "r")
+    elif isinstance(payload_file, str):
+      payload_fp = open(payload_file, "rb")
+      payload_bytes = mmap.mmap(payload_fp.fileno(), 0, access=mmap.ACCESS_READ)
+      self.payload_file = io.BytesIO(payload_bytes)
+    else:
+      self.payload_file = payload_file
     self.payload_file_offset = payload_file_offset
     self.manifest_hasher = None
     self.is_init = False
@@ -226,31 +237,6 @@
 
     self.is_init = True
 
-  def Describe(self):
-    """Emits the payload embedded description data to standard output."""
-    def _DescribeImageInfo(description, image_info):
-      """Display info about the image."""
-      def _DisplayIndentedValue(name, value):
-        print('  {:<14} {}'.format(name+':', value))
-
-      print('%s:' % description)
-      _DisplayIndentedValue('Channel', image_info.channel)
-      _DisplayIndentedValue('Board', image_info.board)
-      _DisplayIndentedValue('Version', image_info.version)
-      _DisplayIndentedValue('Key', image_info.key)
-
-      if image_info.build_channel != image_info.channel:
-        _DisplayIndentedValue('Build channel', image_info.build_channel)
-
-      if image_info.build_version != image_info.version:
-        _DisplayIndentedValue('Build version', image_info.build_version)
-
-    if self.manifest.HasField('old_image_info'):
-      _DescribeImageInfo('Old Image', self.manifest.old_image_info)
-
-    if self.manifest.HasField('new_image_info'):
-      _DescribeImageInfo('New Image', self.manifest.new_image_info)
-
   def _AssertInit(self):
     """Raises an exception if the object was not initialized."""
     if not self.is_init:
@@ -263,9 +249,7 @@
   def IsDelta(self):
     """Returns True iff the payload appears to be a delta."""
     self._AssertInit()
-    return (self.manifest.HasField('old_kernel_info') or
-            self.manifest.HasField('old_rootfs_info') or
-            any(partition.HasField('old_partition_info')
+    return (any(partition.HasField('old_partition_info')
                 for partition in self.manifest.partitions))
 
   def IsFull(self):
diff --git a/scripts/update_payload/test_utils.py b/scripts/update_payload/test_utils.py
index 1e2259d..e153669 100644
--- a/scripts/update_payload/test_utils.py
+++ b/scripts/update_payload/test_utils.py
@@ -16,9 +16,10 @@
 
 """Utilities for unit testing."""
 
+from __future__ import absolute_import
 from __future__ import print_function
 
-import cStringIO
+import io
 import hashlib
 import os
 import struct
@@ -70,7 +71,7 @@
   """
   try:
     file_obj.write(struct.pack(common.IntPackingFmtStr(size, is_unsigned), val))
-  except IOError, e:
+  except IOError as e:
     raise payload.PayloadError('error writing to file (%s): %s' %
                                (file_obj.name, e))
 
@@ -173,31 +174,37 @@
     self.block_size = block_size
     _SetMsgField(self.manifest, 'block_size', block_size)
 
-  def SetPartInfo(self, is_kernel, is_new, part_size, part_hash):
+  def SetPartInfo(self, part_name, is_new, part_size, part_hash):
     """Set the partition info entry.
 
     Args:
-      is_kernel: whether this is kernel partition info
-      is_new: whether to set old (False) or new (True) info
-      part_size: the partition size (in fact, filesystem size)
-      part_hash: the partition hash
+      part_name: The name of the partition.
+      is_new: Whether to set old (False) or new (True) info.
+      part_size: The partition size (in fact, filesystem size).
+      part_hash: The partition hash.
     """
-    if is_kernel:
-      part_info = (self.manifest.new_kernel_info if is_new
-                   else self.manifest.old_kernel_info)
-    else:
-      part_info = (self.manifest.new_rootfs_info if is_new
-                   else self.manifest.old_rootfs_info)
+    partition = next((x for x in self.manifest.partitions
+                      if x.partition_name == part_name), None)
+    if partition is None:
+      partition = self.manifest.partitions.add()
+      partition.partition_name = part_name
+
+    part_info = (partition.new_partition_info if is_new
+                 else partition.old_partition_info)
     _SetMsgField(part_info, 'size', part_size)
     _SetMsgField(part_info, 'hash', part_hash)
 
-  def AddOperation(self, is_kernel, op_type, data_offset=None,
+  def AddOperation(self, part_name, op_type, data_offset=None,
                    data_length=None, src_extents=None, src_length=None,
                    dst_extents=None, dst_length=None, data_sha256_hash=None):
     """Adds an InstallOperation entry."""
-    operations = (self.manifest.kernel_install_operations if is_kernel
-                  else self.manifest.install_operations)
+    partition = next((x for x in self.manifest.partitions
+                      if x.partition_name == part_name), None)
+    if partition is None:
+      partition = self.manifest.partitions.add()
+      partition.partition_name = part_name
 
+    operations = partition.operations
     op = operations.add()
     op.type = op_type
 
@@ -277,7 +284,7 @@
     self.data_blobs.append(data_blob)
     return data_length, data_offset
 
-  def AddOperationWithData(self, is_kernel, op_type, src_extents=None,
+  def AddOperationWithData(self, part_name, op_type, src_extents=None,
                            src_length=None, dst_extents=None, dst_length=None,
                            data_blob=None, do_hash_data_blob=True):
     """Adds an install operation and associated data blob.
@@ -287,12 +294,12 @@
     necessary offset/length accounting.
 
     Args:
-      is_kernel: whether this is a kernel (True) or rootfs (False) operation
-      op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ, MOVE or BSDIFF
+      part_name: The name of the partition (e.g. kernel or root).
+      op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ.
       src_extents: list of (start, length) pairs indicating src block ranges
-      src_length: size of the src data in bytes (needed for BSDIFF)
+      src_length: size of the src data in bytes (needed for diff operations)
       dst_extents: list of (start, length) pairs indicating dst block ranges
-      dst_length: size of the dst data in bytes (needed for BSDIFF)
+      dst_length: size of the dst data in bytes (needed for diff operations)
       data_blob: a data blob associated with this operation
       do_hash_data_blob: whether or not to compute and add a data blob hash
     """
@@ -302,15 +309,13 @@
         data_sha256_hash = hashlib.sha256(data_blob).digest()
       data_length, data_offset = self.AddData(data_blob)
 
-    self.AddOperation(is_kernel, op_type, data_offset=data_offset,
+    self.AddOperation(part_name, op_type, data_offset=data_offset,
                       data_length=data_length, src_extents=src_extents,
                       src_length=src_length, dst_extents=dst_extents,
                       dst_length=dst_length, data_sha256_hash=data_sha256_hash)
 
   def WriteToFileWithData(self, file_obj, sigs_data=None,
-                          privkey_file_name=None,
-                          do_add_pseudo_operation=False,
-                          is_pseudo_in_kernel=False, padding=None):
+                          privkey_file_name=None, padding=None):
     """Writes the payload content to a file, optionally signing the content.
 
     Args:
@@ -319,10 +324,6 @@
                  payload signature fields assumed to be preset by the caller)
       privkey_file_name: key used for signing the payload (optional; used only
                          if explicit signatures blob not provided)
-      do_add_pseudo_operation: whether a pseudo-operation should be added to
-                               account for the signature blob
-      is_pseudo_in_kernel: whether the pseudo-operation should be added to
-                           kernel (True) or rootfs (False) operations
       padding: stuff to dump past the normal data blobs provided (optional)
 
     Raises:
@@ -335,7 +336,7 @@
 
     if do_generate_sigs_data:
       # First, sign some arbitrary data to obtain the size of a signature blob.
-      fake_sig = SignSha256('fake-payload-data', privkey_file_name)
+      fake_sig = SignSha256(b'fake-payload-data', privkey_file_name)
       fake_sigs_gen = SignaturesGenerator()
       fake_sigs_gen.AddSig(1, fake_sig)
       sigs_len = len(fake_sigs_gen.ToBinary())
@@ -343,20 +344,9 @@
       # Update the payload with proper signature attributes.
       self.SetSignatures(self.curr_offset, sigs_len)
 
-    # Add a pseudo-operation to account for the signature blob, if requested.
-    if do_add_pseudo_operation:
-      if not self.block_size:
-        raise TestError('cannot add pseudo-operation without knowing the '
-                        'payload block size')
-      self.AddOperation(
-          is_pseudo_in_kernel, common.OpType.REPLACE,
-          data_offset=self.curr_offset, data_length=sigs_len,
-          dst_extents=[(common.PSEUDO_EXTENT_MARKER,
-                        (sigs_len + self.block_size - 1) / self.block_size)])
-
     if do_generate_sigs_data:
       # Once all payload fields are updated, dump and sign it.
-      temp_payload_file = cStringIO.StringIO()
+      temp_payload_file = io.BytesIO()
       self.WriteToFile(temp_payload_file, data_blobs=self.data_blobs)
       sig = SignSha256(temp_payload_file.getvalue(), privkey_file_name)
       sigs_gen = SignaturesGenerator()
diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py
index cb8f4c2..9aef9f2 100644
--- a/scripts/update_payload/update_metadata_pb2.py
+++ b/scripts/update_payload/update_metadata_pb2.py
@@ -20,7 +20,7 @@
   package='chromeos_update_engine',
   syntax='proto2',
   serialized_options=_b('H\003'),
-  serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"s\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\"\xb1\x06\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadataB\x02H\x03')
+  serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"\x9f\x01\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1aO\n\tSignature\x12\x13\n\x07version\x18\x01 \x01(\rB\x02\x18\x01\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x1f\n\x17unpadded_signature_size\x18\x03 \x01(\x07\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"\x8f\x01\n\tImageInfo\x12\x11\n\x05\x62oard\x18\x01 \x01(\tB\x02\x18\x01\x12\x0f\n\x03key\x18\x02 \x01(\tB\x02\x18\x01\x12\x13\n\x07\x63hannel\x18\x03 \x01(\tB\x02\x18\x01\x12\x13\n\x07version\x18\x04 \x01(\tB\x02\x18\x01\x12\x19\n\rbuild_channel\x18\x05 \x01(\tB\x02\x18\x01\x12\x19\n\rbuild_version\x18\x06 \x01(\tB\x02\x18\x01\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xcf\x01\n\x11\x43owMergeOperation\x12<\n\x04type\x18\x01 \x01(\x0e\x32..chromeos_update_engine.CowMergeOperation.Type\x12\x32\n\nsrc_extent\x18\x02 \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\ndst_extent\x18\x03 \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\"\x14\n\x04Type\x12\x0c\n\x08\x43OW_COPY\x10\x00\"\xc8\x06\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\x12\x0f\n\x07version\x18\x11 \x01(\t\x12\x43\n\x10merge_operations\x18\x12 \x03(\x0b\x32).chromeos_update_engine.CowMergeOperation\x12\x19\n\x11\x65stimate_cow_size\x18\x13 \x01(\x04\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"\xa9\x01\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\x12\x14\n\x0cvabc_enabled\x18\x03 \x01(\x08\x12\x1e\n\x16vabc_compression_param\x18\x04 \x01(\t\"c\n\x08\x41pexInfo\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\x03\x12\x15\n\ris_compressed\x18\x03 \x01(\x08\x12\x19\n\x11\x64\x65\x63ompressed_size\x18\x04 \x01(\x03\"C\n\x0c\x41pexMetadata\x12\x33\n\tapex_info\x18\x01 \x03(\x0b\x32 .chromeos_update_engine.ApexInfo\"\x9e\x07\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12=\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfoB\x02\x18\x01\x12=\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfoB\x02\x18\x01\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadata\x12\x16\n\x0epartial_update\x18\x10 \x01(\x08\x12\x33\n\tapex_info\x18\x11 \x03(\x0b\x32 .chromeos_update_engine.ApexInfoB\x02H\x03')
 )
 
 
@@ -78,11 +78,29 @@
   ],
   containing_type=None,
   serialized_options=None,
-  serialized_start=712,
-  serialized_end=885,
+  serialized_start=775,
+  serialized_end=948,
 )
 _sym_db.RegisterEnumDescriptor(_INSTALLOPERATION_TYPE)
 
+_COWMERGEOPERATION_TYPE = _descriptor.EnumDescriptor(
+  name='Type',
+  full_name='chromeos_update_engine.CowMergeOperation.Type',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='COW_COPY', index=0, number=0,
+      serialized_options=None,
+      type=None),
+  ],
+  containing_type=None,
+  serialized_options=None,
+  serialized_start=1138,
+  serialized_end=1158,
+)
+_sym_db.RegisterEnumDescriptor(_COWMERGEOPERATION_TYPE)
+
 
 _EXTENT = _descriptor.Descriptor(
   name='Extent',
@@ -135,7 +153,7 @@
       has_default_value=False, default_value=0,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='data', full_name='chromeos_update_engine.Signatures.Signature.data', index=1,
       number=2, type=12, cpp_type=9, label=1,
@@ -143,6 +161,13 @@
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='unpadded_signature_size', full_name='chromeos_update_engine.Signatures.Signature.unpadded_signature_size', index=2,
+      number=3, type=7, cpp_type=3, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -155,8 +180,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=180,
-  serialized_end=222,
+  serialized_start=181,
+  serialized_end=260,
 )
 
 _SIGNATURES = _descriptor.Descriptor(
@@ -185,8 +210,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=100,
-  serialized_end=222,
+  serialized_start=101,
+  serialized_end=260,
 )
 
 
@@ -223,8 +248,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=224,
-  serialized_end=267,
+  serialized_start=262,
+  serialized_end=305,
 )
 
 
@@ -241,42 +266,42 @@
       has_default_value=False, default_value=_b("").decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='key', full_name='chromeos_update_engine.ImageInfo.key', index=1,
       number=2, type=9, cpp_type=9, label=1,
       has_default_value=False, default_value=_b("").decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='channel', full_name='chromeos_update_engine.ImageInfo.channel', index=2,
       number=3, type=9, cpp_type=9, label=1,
       has_default_value=False, default_value=_b("").decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='version', full_name='chromeos_update_engine.ImageInfo.version', index=3,
       number=4, type=9, cpp_type=9, label=1,
       has_default_value=False, default_value=_b("").decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='build_channel', full_name='chromeos_update_engine.ImageInfo.build_channel', index=4,
       number=5, type=9, cpp_type=9, label=1,
       has_default_value=False, default_value=_b("").decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='build_version', full_name='chromeos_update_engine.ImageInfo.build_version', index=5,
       number=6, type=9, cpp_type=9, label=1,
       has_default_value=False, default_value=_b("").decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -289,8 +314,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=269,
-  serialized_end=388,
+  serialized_start=308,
+  serialized_end=451,
 )
 
 
@@ -377,8 +402,54 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=391,
-  serialized_end=885,
+  serialized_start=454,
+  serialized_end=948,
+)
+
+
+_COWMERGEOPERATION = _descriptor.Descriptor(
+  name='CowMergeOperation',
+  full_name='chromeos_update_engine.CowMergeOperation',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='type', full_name='chromeos_update_engine.CowMergeOperation.type', index=0,
+      number=1, type=14, cpp_type=8, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='src_extent', full_name='chromeos_update_engine.CowMergeOperation.src_extent', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='dst_extent', full_name='chromeos_update_engine.CowMergeOperation.dst_extent', index=2,
+      number=3, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+    _COWMERGEOPERATION_TYPE,
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto2',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=951,
+  serialized_end=1158,
 )
 
 
@@ -501,6 +572,27 @@
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='version', full_name='chromeos_update_engine.PartitionUpdate.version', index=16,
+      number=17, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='merge_operations', full_name='chromeos_update_engine.PartitionUpdate.merge_operations', index=17,
+      number=18, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='estimate_cow_size', full_name='chromeos_update_engine.PartitionUpdate.estimate_cow_size', index=18,
+      number=19, type=4, cpp_type=4, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -513,8 +605,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=888,
-  serialized_end=1615,
+  serialized_start=1161,
+  serialized_end=2001,
 )
 
 
@@ -558,8 +650,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1617,
-  serialized_end=1693,
+  serialized_start=2003,
+  serialized_end=2079,
 )
 
 
@@ -584,6 +676,20 @@
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='vabc_enabled', full_name='chromeos_update_engine.DynamicPartitionMetadata.vabc_enabled', index=2,
+      number=3, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='vabc_compression_param', full_name='chromeos_update_engine.DynamicPartitionMetadata.vabc_compression_param', index=3,
+      number=4, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -596,8 +702,91 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1695,
-  serialized_end=1810,
+  serialized_start=2082,
+  serialized_end=2251,
+)
+
+
+_APEXINFO = _descriptor.Descriptor(
+  name='ApexInfo',
+  full_name='chromeos_update_engine.ApexInfo',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='package_name', full_name='chromeos_update_engine.ApexInfo.package_name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='version', full_name='chromeos_update_engine.ApexInfo.version', index=1,
+      number=2, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='is_compressed', full_name='chromeos_update_engine.ApexInfo.is_compressed', index=2,
+      number=3, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='decompressed_size', full_name='chromeos_update_engine.ApexInfo.decompressed_size', index=3,
+      number=4, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto2',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=2253,
+  serialized_end=2352,
+)
+
+
+_APEXMETADATA = _descriptor.Descriptor(
+  name='ApexMetadata',
+  full_name='chromeos_update_engine.ApexMetadata',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='apex_info', full_name='chromeos_update_engine.ApexMetadata.apex_info', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto2',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=2354,
+  serialized_end=2421,
 )
 
 
@@ -614,14 +803,14 @@
       has_default_value=False, default_value=[],
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1,
       number=2, type=11, cpp_type=10, label=3,
       has_default_value=False, default_value=[],
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2,
       number=3, type=13, cpp_type=3, label=1,
@@ -649,42 +838,42 @@
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6,
       number=7, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7,
       number=8, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8,
       number=9, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9,
       number=10, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='new_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_image_info', index=10,
       number=11, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='minor_version', full_name='chromeos_update_engine.DeltaArchiveManifest.minor_version', index=11,
       number=12, type=13, cpp_type=3, label=1,
@@ -713,6 +902,20 @@
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='partial_update', full_name='chromeos_update_engine.DeltaArchiveManifest.partial_update', index=15,
+      number=16, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='apex_info', full_name='chromeos_update_engine.DeltaArchiveManifest.apex_info', index=16,
+      number=17, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -725,8 +928,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1813,
-  serialized_end=2630,
+  serialized_start=2424,
+  serialized_end=3350,
 )
 
 _SIGNATURES_SIGNATURE.containing_type = _SIGNATURES
@@ -735,6 +938,10 @@
 _INSTALLOPERATION.fields_by_name['src_extents'].message_type = _EXTENT
 _INSTALLOPERATION.fields_by_name['dst_extents'].message_type = _EXTENT
 _INSTALLOPERATION_TYPE.containing_type = _INSTALLOPERATION
+_COWMERGEOPERATION.fields_by_name['type'].enum_type = _COWMERGEOPERATION_TYPE
+_COWMERGEOPERATION.fields_by_name['src_extent'].message_type = _EXTENT
+_COWMERGEOPERATION.fields_by_name['dst_extent'].message_type = _EXTENT
+_COWMERGEOPERATION_TYPE.containing_type = _COWMERGEOPERATION
 _PARTITIONUPDATE.fields_by_name['new_partition_signature'].message_type = _SIGNATURES_SIGNATURE
 _PARTITIONUPDATE.fields_by_name['old_partition_info'].message_type = _PARTITIONINFO
 _PARTITIONUPDATE.fields_by_name['new_partition_info'].message_type = _PARTITIONINFO
@@ -743,7 +950,9 @@
 _PARTITIONUPDATE.fields_by_name['hash_tree_extent'].message_type = _EXTENT
 _PARTITIONUPDATE.fields_by_name['fec_data_extent'].message_type = _EXTENT
 _PARTITIONUPDATE.fields_by_name['fec_extent'].message_type = _EXTENT
+_PARTITIONUPDATE.fields_by_name['merge_operations'].message_type = _COWMERGEOPERATION
 _DYNAMICPARTITIONMETADATA.fields_by_name['groups'].message_type = _DYNAMICPARTITIONGROUP
+_APEXMETADATA.fields_by_name['apex_info'].message_type = _APEXINFO
 _DELTAARCHIVEMANIFEST.fields_by_name['install_operations'].message_type = _INSTALLOPERATION
 _DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations'].message_type = _INSTALLOPERATION
 _DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info'].message_type = _PARTITIONINFO
@@ -754,14 +963,18 @@
 _DELTAARCHIVEMANIFEST.fields_by_name['new_image_info'].message_type = _IMAGEINFO
 _DELTAARCHIVEMANIFEST.fields_by_name['partitions'].message_type = _PARTITIONUPDATE
 _DELTAARCHIVEMANIFEST.fields_by_name['dynamic_partition_metadata'].message_type = _DYNAMICPARTITIONMETADATA
+_DELTAARCHIVEMANIFEST.fields_by_name['apex_info'].message_type = _APEXINFO
 DESCRIPTOR.message_types_by_name['Extent'] = _EXTENT
 DESCRIPTOR.message_types_by_name['Signatures'] = _SIGNATURES
 DESCRIPTOR.message_types_by_name['PartitionInfo'] = _PARTITIONINFO
 DESCRIPTOR.message_types_by_name['ImageInfo'] = _IMAGEINFO
 DESCRIPTOR.message_types_by_name['InstallOperation'] = _INSTALLOPERATION
+DESCRIPTOR.message_types_by_name['CowMergeOperation'] = _COWMERGEOPERATION
 DESCRIPTOR.message_types_by_name['PartitionUpdate'] = _PARTITIONUPDATE
 DESCRIPTOR.message_types_by_name['DynamicPartitionGroup'] = _DYNAMICPARTITIONGROUP
 DESCRIPTOR.message_types_by_name['DynamicPartitionMetadata'] = _DYNAMICPARTITIONMETADATA
+DESCRIPTOR.message_types_by_name['ApexInfo'] = _APEXINFO
+DESCRIPTOR.message_types_by_name['ApexMetadata'] = _APEXMETADATA
 DESCRIPTOR.message_types_by_name['DeltaArchiveManifest'] = _DELTAARCHIVEMANIFEST
 _sym_db.RegisterFileDescriptor(DESCRIPTOR)
 
@@ -808,6 +1021,13 @@
   })
 _sym_db.RegisterMessage(InstallOperation)
 
+CowMergeOperation = _reflection.GeneratedProtocolMessageType('CowMergeOperation', (_message.Message,), {
+  'DESCRIPTOR' : _COWMERGEOPERATION,
+  '__module__' : 'update_metadata_pb2'
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.CowMergeOperation)
+  })
+_sym_db.RegisterMessage(CowMergeOperation)
+
 PartitionUpdate = _reflection.GeneratedProtocolMessageType('PartitionUpdate', (_message.Message,), {
   'DESCRIPTOR' : _PARTITIONUPDATE,
   '__module__' : 'update_metadata_pb2'
@@ -829,6 +1049,20 @@
   })
 _sym_db.RegisterMessage(DynamicPartitionMetadata)
 
+ApexInfo = _reflection.GeneratedProtocolMessageType('ApexInfo', (_message.Message,), {
+  'DESCRIPTOR' : _APEXINFO,
+  '__module__' : 'update_metadata_pb2'
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.ApexInfo)
+  })
+_sym_db.RegisterMessage(ApexInfo)
+
+ApexMetadata = _reflection.GeneratedProtocolMessageType('ApexMetadata', (_message.Message,), {
+  'DESCRIPTOR' : _APEXMETADATA,
+  '__module__' : 'update_metadata_pb2'
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.ApexMetadata)
+  })
+_sym_db.RegisterMessage(ApexMetadata)
+
 DeltaArchiveManifest = _reflection.GeneratedProtocolMessageType('DeltaArchiveManifest', (_message.Message,), {
   'DESCRIPTOR' : _DELTAARCHIVEMANIFEST,
   '__module__' : 'update_metadata_pb2'
@@ -838,6 +1072,21 @@
 
 
 DESCRIPTOR._options = None
+_SIGNATURES_SIGNATURE.fields_by_name['version']._options = None
+_IMAGEINFO.fields_by_name['board']._options = None
+_IMAGEINFO.fields_by_name['key']._options = None
+_IMAGEINFO.fields_by_name['channel']._options = None
+_IMAGEINFO.fields_by_name['version']._options = None
+_IMAGEINFO.fields_by_name['build_channel']._options = None
+_IMAGEINFO.fields_by_name['build_version']._options = None
 _INSTALLOPERATION_TYPE.values_by_name["MOVE"]._options = None
 _INSTALLOPERATION_TYPE.values_by_name["BSDIFF"]._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['install_operations']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['new_kernel_info']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['old_rootfs_info']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['new_rootfs_info']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['old_image_info']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['new_image_info']._options = None
 # @@protoc_insertion_point(module_scope)
diff --git a/stable/Android.bp b/stable/Android.bp
new file mode 100644
index 0000000..1573ebd
--- /dev/null
+++ b/stable/Android.bp
@@ -0,0 +1,79 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Stable AIDL interface between update_engine and other APEXes
+// ========================================================
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "system_update_engine_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["system_update_engine_license"],
+}
+
+aidl_interface {
+    name: "libupdate_engine_stable",
+
+    // This header library is available to core and product modules.
+    product_available: true,
+
+    srcs: [
+        "android/os/IUpdateEngineStable.aidl",
+        "android/os/IUpdateEngineStableCallback.aidl",
+    ],
+    backend: {
+        cpp: {
+            enabled: true,
+        },
+        java: {
+            enabled: false,
+        },
+        ndk: {
+            enabled: true,
+            apex_available: [
+                "com.android.gki.*",
+            ],
+        },
+    },
+    versions: ["1"],
+}
+
+// update_engine_stable_client (type: executable)
+// ========================================================
+// update_engine console client installed to APEXes.
+cc_binary {
+    name: "update_engine_stable_client",
+    product_specific: true,
+    header_libs: [
+        "libupdate_engine_headers",
+    ],
+    shared_libs: [
+        "libbinder_ndk",
+        "libbase",
+        "liblog",
+    ],
+    static_libs: [
+        "libgflags",
+        "libupdate_engine_stable-V1-ndk_platform",
+    ],
+    srcs: [
+        "update_engine_stable_client.cc",
+    ],
+    apex_available: [
+        "com.android.gki.*",
+    ],
+}
diff --git a/stable/aidl_api/libupdate_engine_stable/1/.hash b/stable/aidl_api/libupdate_engine_stable/1/.hash
new file mode 100644
index 0000000..f21562a
--- /dev/null
+++ b/stable/aidl_api/libupdate_engine_stable/1/.hash
@@ -0,0 +1 @@
+526043ea6cb098d53a9c3e778420e64c4e864d8c
diff --git a/stable/aidl_api/libupdate_engine_stable/1/android/os/IUpdateEngineStable.aidl b/stable/aidl_api/libupdate_engine_stable/1/android/os/IUpdateEngineStable.aidl
new file mode 100644
index 0000000..67db18e
--- /dev/null
+++ b/stable/aidl_api/libupdate_engine_stable/1/android/os/IUpdateEngineStable.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE.                          //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+//     the interface (from the latest frozen version), the build system will
+//     prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.os;
+interface IUpdateEngineStable {
+  void applyPayloadFd(in ParcelFileDescriptor pfd, in long payload_offset, in long payload_size, in String[] headerKeyValuePairs);
+  boolean bind(android.os.IUpdateEngineStableCallback callback);
+  boolean unbind(android.os.IUpdateEngineStableCallback callback);
+}
diff --git a/stable/aidl_api/libupdate_engine_stable/1/android/os/IUpdateEngineStableCallback.aidl b/stable/aidl_api/libupdate_engine_stable/1/android/os/IUpdateEngineStableCallback.aidl
new file mode 100644
index 0000000..dbca127
--- /dev/null
+++ b/stable/aidl_api/libupdate_engine_stable/1/android/os/IUpdateEngineStableCallback.aidl
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE.                          //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+//     the interface (from the latest frozen version), the build system will
+//     prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.os;
+interface IUpdateEngineStableCallback {
+  oneway void onStatusUpdate(int status_code, float percentage);
+  oneway void onPayloadApplicationComplete(int error_code);
+}
diff --git a/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStable.aidl b/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStable.aidl
new file mode 100644
index 0000000..82c3ca5
--- /dev/null
+++ b/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStable.aidl
@@ -0,0 +1,23 @@
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE.                          //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL interface (or parcelable). Do not try to
+// edit this file. It looks like you are doing that because you have modified
+// an AIDL interface in a backward-incompatible way, e.g., deleting a function
+// from an interface or a field from a parcelable and it broke the build. That
+// breakage is intended.
+//
+// You must not make a backward incompatible changes to the AIDL files built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.os;
+interface IUpdateEngineStable {
+  void applyPayloadFd(in ParcelFileDescriptor pfd, in long payload_offset, in long payload_size, in String[] headerKeyValuePairs);
+  boolean bind(android.os.IUpdateEngineStableCallback callback);
+  boolean unbind(android.os.IUpdateEngineStableCallback callback);
+}
diff --git a/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStableCallback.aidl b/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStableCallback.aidl
new file mode 100644
index 0000000..4c72b49
--- /dev/null
+++ b/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStableCallback.aidl
@@ -0,0 +1,22 @@
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE.                          //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL interface (or parcelable). Do not try to
+// edit this file. It looks like you are doing that because you have modified
+// an AIDL interface in a backward-incompatible way, e.g., deleting a function
+// from an interface or a field from a parcelable and it broke the build. That
+// breakage is intended.
+//
+// You must not make a backward incompatible changes to the AIDL files built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.os;
+interface IUpdateEngineStableCallback {
+  oneway void onStatusUpdate(int status_code, float percentage);
+  oneway void onPayloadApplicationComplete(int error_code);
+}
diff --git a/stable/android/os/IUpdateEngineStable.aidl b/stable/android/os/IUpdateEngineStable.aidl
new file mode 100644
index 0000000..b3b6674
--- /dev/null
+++ b/stable/android/os/IUpdateEngineStable.aidl
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.os;
+
+import android.os.IUpdateEngineStableCallback;
+import android.os.ParcelFileDescriptor;
+
+/**
+ * The stable interface exposed by the update engine daemon.
+ */
+interface IUpdateEngineStable {
+  /**
+   * Apply the given payload as provided in the given file descriptor.
+   *
+   * See {@link #bind(IUpdateEngineCallback)} for status updates.
+   *
+   * @param pfd The file descriptor opened at the payload file. Note that the daemon must have
+   *   enough permission to operate on the file descriptor.
+   * @param payload_offset offset into pfd where the payload binary starts.
+   * @param payload_size length after payload_offset to read from pfd. If 0, it will be auto
+   *   detected.
+   * @param headerKeyValuePairs additional header key value pairs, in the format of "key=value".
+   * @see android.os.UpdateEngine#applyPayload(android.content.res.AssetFileDescriptor, String[])
+   */
+  void applyPayloadFd(in ParcelFileDescriptor pfd,
+                      in long payload_offset,
+                      in long payload_size,
+                      in String[] headerKeyValuePairs);
+
+  /**
+   * Bind a callback for status updates on payload application.
+   *
+   * At any given time, only one callback can be bound. If a callback is already bound,
+   * subsequent binding will fail and return false until the bound callback is unbound. That is,
+   * binding is first-come, first-serve.
+   *
+   * A bound callback may be unbound explicitly by calling
+   * {@link #unbind(IUpdateEngineStableCallback)}, or
+   * implicitly when the process implementing the callback dies.
+   *
+   * @param callback See {@link IUpdateEngineStableCallback}
+   * @return true if binding is successful, false otherwise.
+   * @see android.os.UpdateEngine#bind(android.os.UpdateEngineCallback)
+   */
+  boolean bind(IUpdateEngineStableCallback callback);
+
+  /**
+   * Unbind a possibly bound callback.
+   *
+   * If the provided callback does not match the previously bound callback, unbinding fails.
+   *
+   * Note that a callback may also be unbound when the process implementing the callback dies.
+   * Hence, a client usually does not need to explicitly unbind a callback unless it wants to change
+   * the bound callback.
+   *
+   * @param callback The callback to be unbound. See {@link IUpdateEngineStableCallback}.
+   * @return true if unbinding is successful, false otherwise.
+   * @see android.os.UpdateEngine#unbind(android.os.UpdateEngineCallback)
+   */
+  boolean unbind(IUpdateEngineStableCallback callback);
+}
diff --git a/stable/android/os/IUpdateEngineStableCallback.aidl b/stable/android/os/IUpdateEngineStableCallback.aidl
new file mode 100644
index 0000000..d8fc333
--- /dev/null
+++ b/stable/android/os/IUpdateEngineStableCallback.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.os;
+
+/**
+ * The stable Callback interface for IUpdateEngineStable.
+ */
+oneway interface IUpdateEngineStableCallback {
+  /**
+   * Invoked when a payload is being applied and there is a status update.
+   *
+   * @param status_code see {@link android.os.UpdateEngine.UpdateStatusConstants}.
+   * @param percentage percentage of progress of the current stage.
+   * @see android.os.UpdateEngineCallback#onStatusUpdate(int, float)
+   */
+  void onStatusUpdate(int status_code, float percentage);
+
+  /**
+   * Invoked when a payload has finished being applied.
+   *
+   * @param error_code see {@link android.os.UpdateEngine.ErrorCodeConstants}
+   * @see android.os.UpdateEngineCallback#onPayloadApplicationComplete(int)
+   */
+  void onPayloadApplicationComplete(int error_code);
+}
diff --git a/stable/update_engine_stable_client.cc b/stable/update_engine_stable_client.cc
new file mode 100644
index 0000000..17f66b6
--- /dev/null
+++ b/stable/update_engine_stable_client.cc
@@ -0,0 +1,187 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// update_engine console client installed to APEXes for scripts to invoke
+// directly. Uses the stable API.
+
+#include <fcntl.h>
+#include <sysexits.h>
+#include <unistd.h>
+
+#include <vector>
+
+#include <aidl/android/os/BnUpdateEngineStableCallback.h>
+#include <aidl/android/os/IUpdateEngineStable.h>
+#include <android-base/logging.h>
+#include <android-base/strings.h>
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+#include <android/binder_ibinder.h>
+#include <common/error_code.h>
+#include <gflags/gflags.h>
+
+namespace chromeos_update_engine::internal {
+
+DEFINE_string(payload,
+              "file:///path/to/payload.bin",
+              "The file URI to the update payload to use, or path to the file");
+DEFINE_int64(offset,
+             0,
+             "The offset in the payload where the CrAU update starts.");
+DEFINE_int64(size,
+             0,
+             "The size of the CrAU part of the payload. If 0 is passed, it "
+             "will be autodetected.");
+DEFINE_string(headers,
+              "",
+              "A list of key-value pairs, one element of the list per line.");
+
+[[noreturn]] int Exit(int return_code) {
+  LOG(INFO) << "Exit: " << return_code;
+  exit(return_code);
+}
+// Called whenever the UpdateEngine daemon dies.
+void UpdateEngineServiceDied(void*) {
+  LOG(ERROR) << "UpdateEngineService died.";
+  Exit(EX_SOFTWARE);
+}
+
+class UpdateEngineClientAndroid {
+ public:
+  UpdateEngineClientAndroid() = default;
+  int Run();
+
+ private:
+  class UECallback : public aidl::android::os::BnUpdateEngineStableCallback {
+   public:
+    UECallback() = default;
+
+    // android::os::BnUpdateEngineStableCallback overrides.
+    ndk::ScopedAStatus onStatusUpdate(int status_code, float progress) override;
+    ndk::ScopedAStatus onPayloadApplicationComplete(int error_code) override;
+  };
+
+  static std::vector<std::string> ParseHeaders(const std::string& arg);
+
+  const ndk::ScopedAIBinder_DeathRecipient death_recipient_{
+      AIBinder_DeathRecipient_new(&UpdateEngineServiceDied)};
+  std::shared_ptr<aidl::android::os::IUpdateEngineStable> service_;
+  std::shared_ptr<aidl::android::os::BnUpdateEngineStableCallback> callback_;
+};
+
+ndk::ScopedAStatus UpdateEngineClientAndroid::UECallback::onStatusUpdate(
+    int status_code, float progress) {
+  LOG(INFO) << "onStatusUpdate(" << status_code << ", " << progress << ")";
+  return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus
+UpdateEngineClientAndroid::UECallback::onPayloadApplicationComplete(
+    int error_code) {
+  LOG(INFO) << "onPayloadApplicationComplete(" << error_code << ")";
+  auto code = static_cast<ErrorCode>(error_code);
+  Exit((code == ErrorCode::kSuccess || code == ErrorCode::kUpdatedButNotActive)
+           ? EX_OK
+           : EX_SOFTWARE);
+}
+
+int UpdateEngineClientAndroid::Run() {
+  service_ = aidl::android::os::IUpdateEngineStable::fromBinder(ndk::SpAIBinder(
+      AServiceManager_getService("android.os.UpdateEngineStableService")));
+  if (service_ == nullptr) {
+    LOG(ERROR)
+        << "Failed to get IUpdateEngineStable binder from service manager.";
+    return EX_SOFTWARE;
+  }
+
+  // Register a callback object with the service.
+  callback_ = ndk::SharedRefBase::make<UECallback>();
+  bool bound;
+  if (!service_->bind(callback_, &bound).isOk() || !bound) {
+    LOG(ERROR) << "Failed to bind() the UpdateEngine daemon.";
+    return EX_SOFTWARE;
+  }
+
+  auto headers = ParseHeaders(FLAGS_headers);
+  ndk::ScopedAStatus status;
+  const char* payload_path;
+  std::string file_prefix = "file://";
+  if (android::base::StartsWith(FLAGS_payload, file_prefix)) {
+    payload_path = FLAGS_payload.data() + file_prefix.length();
+  } else {
+    payload_path = FLAGS_payload.data();
+  }
+  ndk::ScopedFileDescriptor ufd(
+      TEMP_FAILURE_RETRY(open(payload_path, O_RDONLY)));
+  if (ufd.get() < 0) {
+    PLOG(ERROR) << "Can't open " << payload_path;
+    return EX_SOFTWARE;
+  }
+  status = service_->applyPayloadFd(ufd, FLAGS_offset, FLAGS_size, headers);
+  if (!status.isOk()) {
+    LOG(ERROR) << "Cannot apply payload: " << status.getDescription();
+    return EX_SOFTWARE;
+  }
+
+  // When following updates status changes, exit if the update_engine daemon
+  // dies.
+  if (AIBinder_linkToDeath(service_->asBinder().get(),
+                           death_recipient_.get(),
+                           nullptr) != STATUS_OK) {
+    return EX_SOFTWARE;
+  }
+
+  return EX_OK;
+}
+
+std::vector<std::string> UpdateEngineClientAndroid::ParseHeaders(
+    const std::string& arg) {
+  std::vector<std::string> lines = android::base::Split(arg, "\n");
+  std::vector<std::string> headers;
+  for (const auto& line : lines) {
+    auto header = android::base::Trim(line);
+    if (!header.empty()) {
+      headers.push_back(header);
+    }
+  }
+  return headers;
+}
+
+}  // namespace chromeos_update_engine::internal
+
+int main(int argc, char** argv) {
+  android::base::InitLogging(argv);
+  gflags::ParseCommandLineFlags(&argc, &argv, true);
+
+  // Unlike other update_engine* processes that uses message loops,
+  // update_engine_stable_client uses a thread pool model. However, number of
+  // threads is limited to 1; that is, 0 additional threads should be spawned.
+  // This avoids some race conditions.
+  if (!ABinderProcess_setThreadPoolMaxThreadCount(0)) {
+    LOG(ERROR) << "Cannot set thread pool max thread count";
+    return EX_SOFTWARE;
+  }
+  ABinderProcess_startThreadPool();
+
+  chromeos_update_engine::internal::UpdateEngineClientAndroid client{};
+  int code = client.Run();
+  if (code != EX_OK)
+    return code;
+
+  ABinderProcess_joinThreadPool();
+  LOG(ERROR) << "Exited from joinThreadPool.";
+  return EX_SOFTWARE;
+}
diff --git a/tar_bunzip2.gni b/tar_bunzip2.gni
new file mode 100644
index 0000000..5d90167
--- /dev/null
+++ b/tar_bunzip2.gni
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+template("tar_bunzip2") {
+  forward_variables_from(invoker, [ "image_out_dir" ])
+  out_dir = "${root_gen_dir}/${image_out_dir}"
+
+  action_foreach(target_name) {
+    sources = invoker.sources
+    script = "//common-mk/file_generator_wrapper.py"
+    outputs = [ "${out_dir}/{{source_name_part}}.flag" ]
+    args = [
+      "sh",
+      "-c",
+      "tar -xvf \"{{source}}\" -C \"${out_dir}\" && touch ${out_dir}/{{source_name_part}}.flag",
+    ]
+  }
+}
diff --git a/tar_bunzip2.gypi b/tar_bunzip2.gypi
deleted file mode 100644
index 4d1be28..0000000
--- a/tar_bunzip2.gypi
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-{
-  'variables': {
-    'out_dir': '<(SHARED_INTERMEDIATE_DIR)/<(image_out_dir)',
-  },
-  'rules': [
-    {
-      'rule_name': 'tar-bunzip2',
-      'extension': 'bz2',
-      'outputs': [
-        # The .flag file is used to mark the timestamp of the file extraction
-        # and re-run this action if a new .bz2 file is generated.
-        '<(out_dir)/<(RULE_INPUT_ROOT).flag',
-      ],
-      'action': [
-        'sh',
-        '-c',
-        'tar -xvf "<(RULE_INPUT_PATH)" -C "<(out_dir)" && touch <(out_dir)/<(RULE_INPUT_ROOT).flag',
-      ],
-      'msvs_cygwin_shell': 0,
-      'process_outputs_as_sources': 1,
-      'message': 'Unpacking file <(RULE_INPUT_PATH)',
-    },
-  ],
-}
diff --git a/test_config.xml b/test_config.xml
index 2639e7f..fe3cbfd 100644
--- a/test_config.xml
+++ b/test_config.xml
@@ -16,13 +16,14 @@
 <configuration description="Config to run update_engine_unittests on device">
     <option name="test-suite-tag" value="apct" />
     <option name="test-suite-tag" value="apct-native" />
+    <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer" />
     <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
         <option name="cleanup" value="true" />
-        <option name="push" value="update_engine_unittests->/data/local/tmp/update_engine_unittests" />
+        <option name="push" value="update_engine_unittests->/data/nativetest/update_engine_unittests" />
     </target_preparer>
 
     <test class="com.android.tradefed.testtype.GTest" >
-        <option name="native-test-device-path" value="/data/local/tmp" />
+        <option name="native-test-device-path" value="/data/nativetest" />
         <!-- The following rules avoid test runner from calling the following helper executables
              directly as gtests. -->
         <option name="file-exclusion-filter-regex" value=".*/delta_generator$" />
diff --git a/test_http_server.cc b/test_http_server.cc
index 4536f37..a2f1e05 100644
--- a/test_http_server.cc
+++ b/test_http_server.cc
@@ -189,7 +189,8 @@
   ret = WriteString(fd,
                     string("HTTP/1.1 ") + Itoa(return_code) + " " +
                         GetHttpResponseDescription(return_code) +
-                        EOL "Content-Type: application/octet-stream" EOL);
+                        EOL "Content-Type: application/octet-stream" EOL
+                            "Connection: close" EOL);
   if (ret < 0)
     return -1;
   written += ret;
@@ -406,6 +407,7 @@
   if ((ret = WriteString(fd, "HTTP/1.1 " + Itoa(code) + " " + status + EOL)) <
       0)
     return;
+  WriteString(fd, "Connection: close" EOL);
   WriteString(fd, "Location: " + url + EOL);
 }
 
@@ -658,5 +660,4 @@
       LOG(FATAL) << "ERROR on accept";
     HandleConnection(client_fd);
   }
-  return 0;
 }
diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc
deleted file mode 100644
index 579c736..0000000
--- a/update_attempter_unittest.cc
+++ /dev/null
@@ -1,1569 +0,0 @@
-//
-// Copyright (C) 2012 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/update_attempter.h"
-
-#include <stdint.h>
-
-#include <memory>
-
-#include <base/files/file_util.h>
-#include <base/message_loop/message_loop.h>
-#include <brillo/message_loops/base_message_loop.h>
-#include <brillo/message_loops/message_loop.h>
-#include <brillo/message_loops/message_loop_utils.h>
-#include <gtest/gtest.h>
-#include <policy/libpolicy.h>
-#include <policy/mock_device_policy.h>
-#include <policy/mock_libpolicy.h>
-
-#include "update_engine/common/dlcservice_interface.h"
-#include "update_engine/common/fake_clock.h"
-#include "update_engine/common/fake_prefs.h"
-#include "update_engine/common/mock_action.h"
-#include "update_engine/common/mock_action_processor.h"
-#include "update_engine/common/mock_http_fetcher.h"
-#include "update_engine/common/mock_prefs.h"
-#include "update_engine/common/platform_constants.h"
-#include "update_engine/common/prefs.h"
-#include "update_engine/common/test_utils.h"
-#include "update_engine/common/utils.h"
-#include "update_engine/fake_system_state.h"
-#include "update_engine/mock_p2p_manager.h"
-#include "update_engine/mock_payload_state.h"
-#include "update_engine/mock_service_observer.h"
-#include "update_engine/payload_consumer/filesystem_verifier_action.h"
-#include "update_engine/payload_consumer/install_plan.h"
-#include "update_engine/payload_consumer/payload_constants.h"
-#include "update_engine/payload_consumer/postinstall_runner_action.h"
-#include "update_engine/update_boot_flags_action.h"
-
-using base::Time;
-using base::TimeDelta;
-using chromeos_update_manager::EvalStatus;
-using chromeos_update_manager::StagingSchedule;
-using chromeos_update_manager::UpdateCheckParams;
-using policy::DevicePolicy;
-using std::string;
-using std::unique_ptr;
-using std::vector;
-using testing::_;
-using testing::DoAll;
-using testing::Field;
-using testing::InSequence;
-using testing::Ne;
-using testing::NiceMock;
-using testing::Pointee;
-using testing::Property;
-using testing::Return;
-using testing::ReturnPointee;
-using testing::ReturnRef;
-using testing::SaveArg;
-using testing::SetArgPointee;
-using update_engine::UpdateAttemptFlags;
-using update_engine::UpdateEngineStatus;
-using update_engine::UpdateStatus;
-
-namespace chromeos_update_engine {
-
-namespace {
-
-class MockDlcService : public DlcServiceInterface {
- public:
-  MOCK_METHOD1(GetInstalled, bool(vector<string>*));
-};
-
-}  // namespace
-
-const char kRollbackVersion[] = "10575.39.2";
-
-// Test a subclass rather than the main class directly so that we can mock out
-// methods within the class. There're explicit unit tests for the mocked out
-// methods.
-class UpdateAttempterUnderTest : public UpdateAttempter {
- public:
-  explicit UpdateAttempterUnderTest(SystemState* system_state)
-      : UpdateAttempter(system_state, nullptr) {}
-
-  // Wrap the update scheduling method, allowing us to opt out of scheduled
-  // updates for testing purposes.
-  bool ScheduleUpdates() override {
-    schedule_updates_called_ = true;
-    if (do_schedule_updates_) {
-      UpdateAttempter::ScheduleUpdates();
-    } else {
-      LOG(INFO) << "[TEST] Update scheduling disabled.";
-    }
-    return true;
-  }
-  void EnableScheduleUpdates() { do_schedule_updates_ = true; }
-  void DisableScheduleUpdates() { do_schedule_updates_ = false; }
-
-  // Indicates whether ScheduleUpdates() was called.
-  bool schedule_updates_called() const { return schedule_updates_called_; }
-
-  // Need to expose forced_omaha_url_ so we can test it.
-  const string& forced_omaha_url() const { return forced_omaha_url_; }
-
- private:
-  bool schedule_updates_called_ = false;
-  bool do_schedule_updates_ = true;
-};
-
-class UpdateAttempterTest : public ::testing::Test {
- protected:
-  UpdateAttempterTest()
-      : certificate_checker_(fake_system_state_.mock_prefs(),
-                             &openssl_wrapper_) {
-    // Override system state members.
-    fake_system_state_.set_connection_manager(&mock_connection_manager);
-    fake_system_state_.set_update_attempter(&attempter_);
-    fake_system_state_.set_dlcservice(&mock_dlcservice_);
-    loop_.SetAsCurrent();
-
-    certificate_checker_.Init();
-
-    attempter_.set_forced_update_pending_callback(
-        new base::Callback<void(bool, bool)>(base::Bind([](bool, bool) {})));
-    // Finish initializing the attempter.
-    attempter_.Init();
-  }
-
-  void SetUp() override {
-    EXPECT_NE(nullptr, attempter_.system_state_);
-    EXPECT_EQ(0, attempter_.http_response_code_);
-    EXPECT_EQ(UpdateStatus::IDLE, attempter_.status_);
-    EXPECT_EQ(0.0, attempter_.download_progress_);
-    EXPECT_EQ(0, attempter_.last_checked_time_);
-    EXPECT_EQ("0.0.0.0", attempter_.new_version_);
-    EXPECT_EQ(0ULL, attempter_.new_payload_size_);
-    processor_ = new NiceMock<MockActionProcessor>();
-    attempter_.processor_.reset(processor_);  // Transfers ownership.
-    prefs_ = fake_system_state_.mock_prefs();
-
-    // Set up store/load semantics of P2P properties via the mock PayloadState.
-    actual_using_p2p_for_downloading_ = false;
-    EXPECT_CALL(*fake_system_state_.mock_payload_state(),
-                SetUsingP2PForDownloading(_))
-        .WillRepeatedly(SaveArg<0>(&actual_using_p2p_for_downloading_));
-    EXPECT_CALL(*fake_system_state_.mock_payload_state(),
-                GetUsingP2PForDownloading())
-        .WillRepeatedly(ReturnPointee(&actual_using_p2p_for_downloading_));
-    actual_using_p2p_for_sharing_ = false;
-    EXPECT_CALL(*fake_system_state_.mock_payload_state(),
-                SetUsingP2PForSharing(_))
-        .WillRepeatedly(SaveArg<0>(&actual_using_p2p_for_sharing_));
-    EXPECT_CALL(*fake_system_state_.mock_payload_state(),
-                GetUsingP2PForDownloading())
-        .WillRepeatedly(ReturnPointee(&actual_using_p2p_for_sharing_));
-  }
-
- public:
-  void ScheduleQuitMainLoop();
-
-  // Callbacks to run the different tests from the main loop.
-  void UpdateTestStart();
-  void UpdateTestVerify();
-  void RollbackTestStart(bool enterprise_rollback, bool valid_slot);
-  void RollbackTestVerify();
-  void PingOmahaTestStart();
-  void ReadScatterFactorFromPolicyTestStart();
-  void DecrementUpdateCheckCountTestStart();
-  void NoScatteringDoneDuringManualUpdateTestStart();
-  void P2PNotEnabledStart();
-  void P2PEnabledStart();
-  void P2PEnabledInteractiveStart();
-  void P2PEnabledStartingFailsStart();
-  void P2PEnabledHousekeepingFailsStart();
-  void ResetRollbackHappenedStart(bool is_consumer,
-                                  bool is_policy_available,
-                                  bool expected_reset);
-  // Staging related callbacks.
-  void SetUpStagingTest(const StagingSchedule& schedule, FakePrefs* prefs);
-  void CheckStagingOff();
-  void StagingSetsPrefsAndTurnsOffScatteringStart();
-  void StagingOffIfInteractiveStart();
-  void StagingOffIfOobeStart();
-
-  bool actual_using_p2p_for_downloading() {
-    return actual_using_p2p_for_downloading_;
-  }
-  bool actual_using_p2p_for_sharing() { return actual_using_p2p_for_sharing_; }
-
-  base::MessageLoopForIO base_loop_;
-  brillo::BaseMessageLoop loop_{&base_loop_};
-
-  FakeSystemState fake_system_state_;
-  UpdateAttempterUnderTest attempter_{&fake_system_state_};
-  OpenSSLWrapper openssl_wrapper_;
-  CertificateChecker certificate_checker_;
-  MockDlcService mock_dlcservice_;
-
-  NiceMock<MockActionProcessor>* processor_;
-  NiceMock<MockPrefs>* prefs_;  // Shortcut to fake_system_state_->mock_prefs().
-  NiceMock<MockConnectionManager> mock_connection_manager;
-
-  bool actual_using_p2p_for_downloading_;
-  bool actual_using_p2p_for_sharing_;
-};
-
-void UpdateAttempterTest::ScheduleQuitMainLoop() {
-  loop_.PostTask(
-      FROM_HERE,
-      base::Bind([](brillo::BaseMessageLoop* loop) { loop->BreakLoop(); },
-                 base::Unretained(&loop_)));
-}
-
-TEST_F(UpdateAttempterTest, ActionCompletedDownloadTest) {
-  unique_ptr<MockHttpFetcher> fetcher(new MockHttpFetcher("", 0, nullptr));
-  fetcher->FailTransfer(503);  // Sets the HTTP response code.
-  DownloadAction action(prefs_,
-                        nullptr,
-                        nullptr,
-                        nullptr,
-                        fetcher.release(),
-                        false /* interactive */);
-  EXPECT_CALL(*prefs_, GetInt64(kPrefsDeltaUpdateFailures, _)).Times(0);
-  attempter_.ActionCompleted(nullptr, &action, ErrorCode::kSuccess);
-  EXPECT_EQ(UpdateStatus::FINALIZING, attempter_.status());
-  EXPECT_EQ(0.0, attempter_.download_progress_);
-  ASSERT_EQ(nullptr, attempter_.error_event_.get());
-}
-
-TEST_F(UpdateAttempterTest, ActionCompletedErrorTest) {
-  MockAction action;
-  EXPECT_CALL(action, Type()).WillRepeatedly(Return("MockAction"));
-  attempter_.status_ = UpdateStatus::DOWNLOADING;
-  EXPECT_CALL(*prefs_, GetInt64(kPrefsDeltaUpdateFailures, _))
-      .WillOnce(Return(false));
-  attempter_.ActionCompleted(nullptr, &action, ErrorCode::kError);
-  ASSERT_NE(nullptr, attempter_.error_event_.get());
-}
-
-TEST_F(UpdateAttempterTest, DownloadProgressAccumulationTest) {
-  // Simple test case, where all the values match (nothing was skipped)
-  uint64_t bytes_progressed_1 = 1024 * 1024;  // 1MB
-  uint64_t bytes_progressed_2 = 1024 * 1024;  // 1MB
-  uint64_t bytes_received_1 = bytes_progressed_1;
-  uint64_t bytes_received_2 = bytes_received_1 + bytes_progressed_2;
-  uint64_t bytes_total = 20 * 1024 * 1024;  // 20MB
-
-  double progress_1 =
-      static_cast<double>(bytes_received_1) / static_cast<double>(bytes_total);
-  double progress_2 =
-      static_cast<double>(bytes_received_2) / static_cast<double>(bytes_total);
-
-  EXPECT_EQ(0.0, attempter_.download_progress_);
-  // This is set via inspecting the InstallPlan payloads when the
-  // OmahaResponseAction is completed
-  attempter_.new_payload_size_ = bytes_total;
-  NiceMock<MockServiceObserver> observer;
-  EXPECT_CALL(observer,
-              SendStatusUpdate(AllOf(
-                  Field(&UpdateEngineStatus::progress, progress_1),
-                  Field(&UpdateEngineStatus::status, UpdateStatus::DOWNLOADING),
-                  Field(&UpdateEngineStatus::new_size_bytes, bytes_total))));
-  EXPECT_CALL(observer,
-              SendStatusUpdate(AllOf(
-                  Field(&UpdateEngineStatus::progress, progress_2),
-                  Field(&UpdateEngineStatus::status, UpdateStatus::DOWNLOADING),
-                  Field(&UpdateEngineStatus::new_size_bytes, bytes_total))));
-  attempter_.AddObserver(&observer);
-  attempter_.BytesReceived(bytes_progressed_1, bytes_received_1, bytes_total);
-  EXPECT_EQ(progress_1, attempter_.download_progress_);
-  // This iteration validates that a later set of updates to the variables are
-  // properly handled (so that |getStatus()| will return the same progress info
-  // as the callback is receiving.
-  attempter_.BytesReceived(bytes_progressed_2, bytes_received_2, bytes_total);
-  EXPECT_EQ(progress_2, attempter_.download_progress_);
-}
-
-TEST_F(UpdateAttempterTest, ChangeToDownloadingOnReceivedBytesTest) {
-  // The transition into UpdateStatus::DOWNLOADING happens when the
-  // first bytes are received.
-  uint64_t bytes_progressed = 1024 * 1024;    // 1MB
-  uint64_t bytes_received = 2 * 1024 * 1024;  // 2MB
-  uint64_t bytes_total = 20 * 1024 * 1024;    // 300MB
-  attempter_.status_ = UpdateStatus::CHECKING_FOR_UPDATE;
-  // This is set via inspecting the InstallPlan payloads when the
-  // OmahaResponseAction is completed
-  attempter_.new_payload_size_ = bytes_total;
-  EXPECT_EQ(0.0, attempter_.download_progress_);
-  NiceMock<MockServiceObserver> observer;
-  EXPECT_CALL(observer,
-              SendStatusUpdate(AllOf(
-                  Field(&UpdateEngineStatus::status, UpdateStatus::DOWNLOADING),
-                  Field(&UpdateEngineStatus::new_size_bytes, bytes_total))));
-  attempter_.AddObserver(&observer);
-  attempter_.BytesReceived(bytes_progressed, bytes_received, bytes_total);
-  EXPECT_EQ(UpdateStatus::DOWNLOADING, attempter_.status_);
-}
-
-TEST_F(UpdateAttempterTest, BroadcastCompleteDownloadTest) {
-  // There is a special case to ensure that at 100% downloaded,
-  // download_progress_ is updated and that value broadcast. This test confirms
-  // that.
-  uint64_t bytes_progressed = 0;              // ignored
-  uint64_t bytes_received = 5 * 1024 * 1024;  // ignored
-  uint64_t bytes_total = 5 * 1024 * 1024;     // 300MB
-  attempter_.status_ = UpdateStatus::DOWNLOADING;
-  attempter_.new_payload_size_ = bytes_total;
-  EXPECT_EQ(0.0, attempter_.download_progress_);
-  NiceMock<MockServiceObserver> observer;
-  EXPECT_CALL(observer,
-              SendStatusUpdate(AllOf(
-                  Field(&UpdateEngineStatus::progress, 1.0),
-                  Field(&UpdateEngineStatus::status, UpdateStatus::DOWNLOADING),
-                  Field(&UpdateEngineStatus::new_size_bytes, bytes_total))));
-  attempter_.AddObserver(&observer);
-  attempter_.BytesReceived(bytes_progressed, bytes_received, bytes_total);
-  EXPECT_EQ(1.0, attempter_.download_progress_);
-}
-
-TEST_F(UpdateAttempterTest, ActionCompletedOmahaRequestTest) {
-  unique_ptr<MockHttpFetcher> fetcher(new MockHttpFetcher("", 0, nullptr));
-  fetcher->FailTransfer(500);  // Sets the HTTP response code.
-  OmahaRequestAction action(
-      &fake_system_state_, nullptr, std::move(fetcher), false);
-  ObjectCollectorAction<OmahaResponse> collector_action;
-  BondActions(&action, &collector_action);
-  OmahaResponse response;
-  response.poll_interval = 234;
-  action.SetOutputObject(response);
-  EXPECT_CALL(*prefs_, GetInt64(kPrefsDeltaUpdateFailures, _)).Times(0);
-  attempter_.ActionCompleted(nullptr, &action, ErrorCode::kSuccess);
-  EXPECT_EQ(500, attempter_.http_response_code());
-  EXPECT_EQ(UpdateStatus::IDLE, attempter_.status());
-  EXPECT_EQ(234U, attempter_.server_dictated_poll_interval_);
-  ASSERT_TRUE(attempter_.error_event_.get() == nullptr);
-}
-
-TEST_F(UpdateAttempterTest, ConstructWithUpdatedMarkerTest) {
-  FakePrefs fake_prefs;
-  string boot_id;
-  EXPECT_TRUE(utils::GetBootId(&boot_id));
-  fake_prefs.SetString(kPrefsUpdateCompletedOnBootId, boot_id);
-  fake_system_state_.set_prefs(&fake_prefs);
-  attempter_.Init();
-  EXPECT_EQ(UpdateStatus::UPDATED_NEED_REBOOT, attempter_.status());
-}
-
-TEST_F(UpdateAttempterTest, GetErrorCodeForActionTest) {
-  EXPECT_EQ(ErrorCode::kSuccess,
-            GetErrorCodeForAction(nullptr, ErrorCode::kSuccess));
-
-  FakeSystemState fake_system_state;
-  OmahaRequestAction omaha_request_action(
-      &fake_system_state, nullptr, nullptr, false);
-  EXPECT_EQ(ErrorCode::kOmahaRequestError,
-            GetErrorCodeForAction(&omaha_request_action, ErrorCode::kError));
-  OmahaResponseHandlerAction omaha_response_handler_action(&fake_system_state_);
-  EXPECT_EQ(
-      ErrorCode::kOmahaResponseHandlerError,
-      GetErrorCodeForAction(&omaha_response_handler_action, ErrorCode::kError));
-  FilesystemVerifierAction filesystem_verifier_action;
-  EXPECT_EQ(
-      ErrorCode::kFilesystemVerifierError,
-      GetErrorCodeForAction(&filesystem_verifier_action, ErrorCode::kError));
-  PostinstallRunnerAction postinstall_runner_action(
-      fake_system_state.fake_boot_control(), fake_system_state.fake_hardware());
-  EXPECT_EQ(
-      ErrorCode::kPostinstallRunnerError,
-      GetErrorCodeForAction(&postinstall_runner_action, ErrorCode::kError));
-  MockAction action_mock;
-  EXPECT_CALL(action_mock, Type()).WillOnce(Return("MockAction"));
-  EXPECT_EQ(ErrorCode::kError,
-            GetErrorCodeForAction(&action_mock, ErrorCode::kError));
-}
-
-TEST_F(UpdateAttempterTest, DisableDeltaUpdateIfNeededTest) {
-  attempter_.omaha_request_params_->set_delta_okay(true);
-  EXPECT_CALL(*prefs_, GetInt64(kPrefsDeltaUpdateFailures, _))
-      .WillOnce(Return(false));
-  attempter_.DisableDeltaUpdateIfNeeded();
-  EXPECT_TRUE(attempter_.omaha_request_params_->delta_okay());
-  EXPECT_CALL(*prefs_, GetInt64(kPrefsDeltaUpdateFailures, _))
-      .WillOnce(
-          DoAll(SetArgPointee<1>(UpdateAttempter::kMaxDeltaUpdateFailures - 1),
-                Return(true)));
-  attempter_.DisableDeltaUpdateIfNeeded();
-  EXPECT_TRUE(attempter_.omaha_request_params_->delta_okay());
-  EXPECT_CALL(*prefs_, GetInt64(kPrefsDeltaUpdateFailures, _))
-      .WillOnce(
-          DoAll(SetArgPointee<1>(UpdateAttempter::kMaxDeltaUpdateFailures),
-                Return(true)));
-  attempter_.DisableDeltaUpdateIfNeeded();
-  EXPECT_FALSE(attempter_.omaha_request_params_->delta_okay());
-  EXPECT_CALL(*prefs_, GetInt64(_, _)).Times(0);
-  attempter_.DisableDeltaUpdateIfNeeded();
-  EXPECT_FALSE(attempter_.omaha_request_params_->delta_okay());
-}
-
-TEST_F(UpdateAttempterTest, MarkDeltaUpdateFailureTest) {
-  EXPECT_CALL(*prefs_, GetInt64(kPrefsDeltaUpdateFailures, _))
-      .WillOnce(Return(false))
-      .WillOnce(DoAll(SetArgPointee<1>(-1), Return(true)))
-      .WillOnce(DoAll(SetArgPointee<1>(1), Return(true)))
-      .WillOnce(
-          DoAll(SetArgPointee<1>(UpdateAttempter::kMaxDeltaUpdateFailures),
-                Return(true)));
-  EXPECT_CALL(*prefs_, SetInt64(Ne(kPrefsDeltaUpdateFailures), _))
-      .WillRepeatedly(Return(true));
-  EXPECT_CALL(*prefs_, SetInt64(kPrefsDeltaUpdateFailures, 1)).Times(2);
-  EXPECT_CALL(*prefs_, SetInt64(kPrefsDeltaUpdateFailures, 2));
-  EXPECT_CALL(*prefs_,
-              SetInt64(kPrefsDeltaUpdateFailures,
-                       UpdateAttempter::kMaxDeltaUpdateFailures + 1));
-  for (int i = 0; i < 4; i++)
-    attempter_.MarkDeltaUpdateFailure();
-}
-
-TEST_F(UpdateAttempterTest, ScheduleErrorEventActionNoEventTest) {
-  EXPECT_CALL(*processor_, EnqueueAction(_)).Times(0);
-  EXPECT_CALL(*processor_, StartProcessing()).Times(0);
-  EXPECT_CALL(*fake_system_state_.mock_payload_state(), UpdateFailed(_))
-      .Times(0);
-  OmahaResponse response;
-  string url1 = "http://url1";
-  response.packages.push_back({.payload_urls = {url1, "https://url"}});
-  EXPECT_CALL(*(fake_system_state_.mock_payload_state()), GetCurrentUrl())
-      .WillRepeatedly(Return(url1));
-  fake_system_state_.mock_payload_state()->SetResponse(response);
-  attempter_.ScheduleErrorEventAction();
-  EXPECT_EQ(url1, fake_system_state_.mock_payload_state()->GetCurrentUrl());
-}
-
-TEST_F(UpdateAttempterTest, ScheduleErrorEventActionTest) {
-  EXPECT_CALL(*processor_,
-              EnqueueAction(Pointee(Property(
-                  &AbstractAction::Type, OmahaRequestAction::StaticType()))));
-  EXPECT_CALL(*processor_, StartProcessing());
-  ErrorCode err = ErrorCode::kError;
-  EXPECT_CALL(*fake_system_state_.mock_payload_state(), UpdateFailed(err));
-  attempter_.error_event_.reset(new OmahaEvent(
-      OmahaEvent::kTypeUpdateComplete, OmahaEvent::kResultError, err));
-  attempter_.ScheduleErrorEventAction();
-  EXPECT_EQ(UpdateStatus::REPORTING_ERROR_EVENT, attempter_.status());
-}
-
-namespace {
-// Actions that will be built as part of an update check.
-const string kUpdateActionTypes[] = {  // NOLINT(runtime/string)
-    OmahaRequestAction::StaticType(),
-    OmahaResponseHandlerAction::StaticType(),
-    UpdateBootFlagsAction::StaticType(),
-    OmahaRequestAction::StaticType(),
-    DownloadAction::StaticType(),
-    OmahaRequestAction::StaticType(),
-    FilesystemVerifierAction::StaticType(),
-    PostinstallRunnerAction::StaticType(),
-    OmahaRequestAction::StaticType()};
-
-// Actions that will be built as part of a user-initiated rollback.
-const string kRollbackActionTypes[] = {
-    // NOLINT(runtime/string)
-    InstallPlanAction::StaticType(),
-    PostinstallRunnerAction::StaticType(),
-};
-
-const StagingSchedule kValidStagingSchedule = {
-    {4, 10}, {10, 40}, {19, 70}, {26, 100}};
-
-}  // namespace
-
-void UpdateAttempterTest::UpdateTestStart() {
-  attempter_.set_http_response_code(200);
-
-  // Expect that the device policy is loaded by the UpdateAttempter at some
-  // point by calling RefreshDevicePolicy.
-  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
-  EXPECT_CALL(*device_policy, LoadPolicy())
-      .Times(testing::AtLeast(1))
-      .WillRepeatedly(Return(true));
-  attempter_.policy_provider_.reset(
-      new policy::PolicyProvider(std::move(device_policy)));
-
-  {
-    InSequence s;
-    for (size_t i = 0; i < arraysize(kUpdateActionTypes); ++i) {
-      EXPECT_CALL(*processor_,
-                  EnqueueAction(Pointee(
-                      Property(&AbstractAction::Type, kUpdateActionTypes[i]))));
-    }
-    EXPECT_CALL(*processor_, StartProcessing());
-  }
-
-  attempter_.Update("", "", "", "", false, false, false);
-  loop_.PostTask(FROM_HERE,
-                 base::Bind(&UpdateAttempterTest::UpdateTestVerify,
-                            base::Unretained(this)));
-}
-
-void UpdateAttempterTest::UpdateTestVerify() {
-  EXPECT_EQ(0, attempter_.http_response_code());
-  EXPECT_EQ(&attempter_, processor_->delegate());
-  EXPECT_EQ(UpdateStatus::CHECKING_FOR_UPDATE, attempter_.status());
-  loop_.BreakLoop();
-}
-
-void UpdateAttempterTest::RollbackTestStart(bool enterprise_rollback,
-                                            bool valid_slot) {
-  // Create a device policy so that we can change settings.
-  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
-  EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
-  fake_system_state_.set_device_policy(device_policy.get());
-  if (enterprise_rollback) {
-    // We return an empty owner as this is an enterprise.
-    EXPECT_CALL(*device_policy, GetOwner(_))
-        .WillRepeatedly(DoAll(SetArgPointee<0>(string("")), Return(true)));
-  } else {
-    // We return a fake owner as this is an owned consumer device.
-    EXPECT_CALL(*device_policy, GetOwner(_))
-        .WillRepeatedly(DoAll(SetArgPointee<0>(string("fake.mail@fake.com")),
-                              Return(true)));
-  }
-
-  attempter_.policy_provider_.reset(
-      new policy::PolicyProvider(std::move(device_policy)));
-
-  if (valid_slot) {
-    BootControlInterface::Slot rollback_slot = 1;
-    LOG(INFO) << "Test Mark Bootable: "
-              << BootControlInterface::SlotName(rollback_slot);
-    fake_system_state_.fake_boot_control()->SetSlotBootable(rollback_slot,
-                                                            true);
-  }
-
-  bool is_rollback_allowed = false;
-
-  // We only allow rollback on devices that are not enterprise enrolled and
-  // which have a valid slot to rollback to.
-  if (!enterprise_rollback && valid_slot) {
-    is_rollback_allowed = true;
-  }
-
-  if (is_rollback_allowed) {
-    InSequence s;
-    for (size_t i = 0; i < arraysize(kRollbackActionTypes); ++i) {
-      EXPECT_CALL(*processor_,
-                  EnqueueAction(Pointee(Property(&AbstractAction::Type,
-                                                 kRollbackActionTypes[i]))));
-    }
-    EXPECT_CALL(*processor_, StartProcessing());
-
-    EXPECT_TRUE(attempter_.Rollback(true));
-    loop_.PostTask(FROM_HERE,
-                   base::Bind(&UpdateAttempterTest::RollbackTestVerify,
-                              base::Unretained(this)));
-  } else {
-    EXPECT_FALSE(attempter_.Rollback(true));
-    loop_.BreakLoop();
-  }
-}
-
-void UpdateAttempterTest::RollbackTestVerify() {
-  // Verifies the actions that were enqueued.
-  EXPECT_EQ(&attempter_, processor_->delegate());
-  EXPECT_EQ(UpdateStatus::ATTEMPTING_ROLLBACK, attempter_.status());
-  EXPECT_EQ(0U, attempter_.install_plan_->partitions.size());
-  EXPECT_EQ(attempter_.install_plan_->powerwash_required, true);
-  loop_.BreakLoop();
-}
-
-TEST_F(UpdateAttempterTest, UpdateTest) {
-  UpdateTestStart();
-  loop_.Run();
-}
-
-TEST_F(UpdateAttempterTest, RollbackTest) {
-  loop_.PostTask(FROM_HERE,
-                 base::Bind(&UpdateAttempterTest::RollbackTestStart,
-                            base::Unretained(this),
-                            false,
-                            true));
-  loop_.Run();
-}
-
-TEST_F(UpdateAttempterTest, InvalidSlotRollbackTest) {
-  loop_.PostTask(FROM_HERE,
-                 base::Bind(&UpdateAttempterTest::RollbackTestStart,
-                            base::Unretained(this),
-                            false,
-                            false));
-  loop_.Run();
-}
-
-TEST_F(UpdateAttempterTest, EnterpriseRollbackTest) {
-  loop_.PostTask(FROM_HERE,
-                 base::Bind(&UpdateAttempterTest::RollbackTestStart,
-                            base::Unretained(this),
-                            true,
-                            true));
-  loop_.Run();
-}
-
-void UpdateAttempterTest::PingOmahaTestStart() {
-  EXPECT_CALL(*processor_,
-              EnqueueAction(Pointee(Property(
-                  &AbstractAction::Type, OmahaRequestAction::StaticType()))));
-  EXPECT_CALL(*processor_, StartProcessing());
-  attempter_.PingOmaha();
-  ScheduleQuitMainLoop();
-}
-
-TEST_F(UpdateAttempterTest, PingOmahaTest) {
-  EXPECT_FALSE(attempter_.waiting_for_scheduled_check_);
-  EXPECT_FALSE(attempter_.schedule_updates_called());
-  // Disable scheduling of subsequnet checks; we're using the DefaultPolicy in
-  // testing, which is more permissive than we want to handle here.
-  attempter_.DisableScheduleUpdates();
-  loop_.PostTask(FROM_HERE,
-                 base::Bind(&UpdateAttempterTest::PingOmahaTestStart,
-                            base::Unretained(this)));
-  brillo::MessageLoopRunMaxIterations(&loop_, 100);
-  EXPECT_EQ(UpdateStatus::UPDATED_NEED_REBOOT, attempter_.status());
-  EXPECT_TRUE(attempter_.schedule_updates_called());
-}
-
-TEST_F(UpdateAttempterTest, CreatePendingErrorEventTest) {
-  MockAction action;
-  const ErrorCode kCode = ErrorCode::kDownloadTransferError;
-  attempter_.CreatePendingErrorEvent(&action, kCode);
-  ASSERT_NE(nullptr, attempter_.error_event_.get());
-  EXPECT_EQ(OmahaEvent::kTypeUpdateComplete, attempter_.error_event_->type);
-  EXPECT_EQ(OmahaEvent::kResultError, attempter_.error_event_->result);
-  EXPECT_EQ(
-      static_cast<ErrorCode>(static_cast<int>(kCode) |
-                             static_cast<int>(ErrorCode::kTestOmahaUrlFlag)),
-      attempter_.error_event_->error_code);
-}
-
-TEST_F(UpdateAttempterTest, CreatePendingErrorEventResumedTest) {
-  attempter_.install_plan_.reset(new InstallPlan);
-  attempter_.install_plan_->is_resume = true;
-  MockAction action;
-  const ErrorCode kCode = ErrorCode::kInstallDeviceOpenError;
-  attempter_.CreatePendingErrorEvent(&action, kCode);
-  ASSERT_NE(nullptr, attempter_.error_event_.get());
-  EXPECT_EQ(OmahaEvent::kTypeUpdateComplete, attempter_.error_event_->type);
-  EXPECT_EQ(OmahaEvent::kResultError, attempter_.error_event_->result);
-  EXPECT_EQ(
-      static_cast<ErrorCode>(static_cast<int>(kCode) |
-                             static_cast<int>(ErrorCode::kResumedFlag) |
-                             static_cast<int>(ErrorCode::kTestOmahaUrlFlag)),
-      attempter_.error_event_->error_code);
-}
-
-TEST_F(UpdateAttempterTest, P2PNotStartedAtStartupWhenNotEnabled) {
-  MockP2PManager mock_p2p_manager;
-  fake_system_state_.set_p2p_manager(&mock_p2p_manager);
-  mock_p2p_manager.fake().SetP2PEnabled(false);
-  EXPECT_CALL(mock_p2p_manager, EnsureP2PRunning()).Times(0);
-  attempter_.UpdateEngineStarted();
-}
-
-TEST_F(UpdateAttempterTest, P2PNotStartedAtStartupWhenEnabledButNotSharing) {
-  MockP2PManager mock_p2p_manager;
-  fake_system_state_.set_p2p_manager(&mock_p2p_manager);
-  mock_p2p_manager.fake().SetP2PEnabled(true);
-  EXPECT_CALL(mock_p2p_manager, EnsureP2PRunning()).Times(0);
-  attempter_.UpdateEngineStarted();
-}
-
-TEST_F(UpdateAttempterTest, P2PStartedAtStartupWhenEnabledAndSharing) {
-  MockP2PManager mock_p2p_manager;
-  fake_system_state_.set_p2p_manager(&mock_p2p_manager);
-  mock_p2p_manager.fake().SetP2PEnabled(true);
-  mock_p2p_manager.fake().SetCountSharedFilesResult(1);
-  EXPECT_CALL(mock_p2p_manager, EnsureP2PRunning());
-  attempter_.UpdateEngineStarted();
-}
-
-TEST_F(UpdateAttempterTest, P2PNotEnabled) {
-  loop_.PostTask(FROM_HERE,
-                 base::Bind(&UpdateAttempterTest::P2PNotEnabledStart,
-                            base::Unretained(this)));
-  loop_.Run();
-}
-
-void UpdateAttempterTest::P2PNotEnabledStart() {
-  // If P2P is not enabled, check that we do not attempt housekeeping
-  // and do not convey that p2p is to be used.
-  MockP2PManager mock_p2p_manager;
-  fake_system_state_.set_p2p_manager(&mock_p2p_manager);
-  mock_p2p_manager.fake().SetP2PEnabled(false);
-  EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0);
-  attempter_.Update("", "", "", "", false, false, false);
-  EXPECT_FALSE(actual_using_p2p_for_downloading_);
-  EXPECT_FALSE(actual_using_p2p_for_sharing());
-  ScheduleQuitMainLoop();
-}
-
-TEST_F(UpdateAttempterTest, P2PEnabledStartingFails) {
-  loop_.PostTask(FROM_HERE,
-                 base::Bind(&UpdateAttempterTest::P2PEnabledStartingFailsStart,
-                            base::Unretained(this)));
-  loop_.Run();
-}
-
-void UpdateAttempterTest::P2PEnabledStartingFailsStart() {
-  // If p2p is enabled, but starting it fails ensure we don't do
-  // any housekeeping and do not convey that p2p should be used.
-  MockP2PManager mock_p2p_manager;
-  fake_system_state_.set_p2p_manager(&mock_p2p_manager);
-  mock_p2p_manager.fake().SetP2PEnabled(true);
-  mock_p2p_manager.fake().SetEnsureP2PRunningResult(false);
-  mock_p2p_manager.fake().SetPerformHousekeepingResult(false);
-  EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0);
-  attempter_.Update("", "", "", "", false, false, false);
-  EXPECT_FALSE(actual_using_p2p_for_downloading());
-  EXPECT_FALSE(actual_using_p2p_for_sharing());
-  ScheduleQuitMainLoop();
-}
-
-TEST_F(UpdateAttempterTest, P2PEnabledHousekeepingFails) {
-  loop_.PostTask(
-      FROM_HERE,
-      base::Bind(&UpdateAttempterTest::P2PEnabledHousekeepingFailsStart,
-                 base::Unretained(this)));
-  loop_.Run();
-}
-
-void UpdateAttempterTest::P2PEnabledHousekeepingFailsStart() {
-  // If p2p is enabled, starting it works but housekeeping fails, ensure
-  // we do not convey p2p is to be used.
-  MockP2PManager mock_p2p_manager;
-  fake_system_state_.set_p2p_manager(&mock_p2p_manager);
-  mock_p2p_manager.fake().SetP2PEnabled(true);
-  mock_p2p_manager.fake().SetEnsureP2PRunningResult(true);
-  mock_p2p_manager.fake().SetPerformHousekeepingResult(false);
-  EXPECT_CALL(mock_p2p_manager, PerformHousekeeping());
-  attempter_.Update("", "", "", "", false, false, false);
-  EXPECT_FALSE(actual_using_p2p_for_downloading());
-  EXPECT_FALSE(actual_using_p2p_for_sharing());
-  ScheduleQuitMainLoop();
-}
-
-TEST_F(UpdateAttempterTest, P2PEnabled) {
-  loop_.PostTask(FROM_HERE,
-                 base::Bind(&UpdateAttempterTest::P2PEnabledStart,
-                            base::Unretained(this)));
-  loop_.Run();
-}
-
-void UpdateAttempterTest::P2PEnabledStart() {
-  MockP2PManager mock_p2p_manager;
-  fake_system_state_.set_p2p_manager(&mock_p2p_manager);
-  // If P2P is enabled and starting it works, check that we performed
-  // housekeeping and that we convey p2p should be used.
-  mock_p2p_manager.fake().SetP2PEnabled(true);
-  mock_p2p_manager.fake().SetEnsureP2PRunningResult(true);
-  mock_p2p_manager.fake().SetPerformHousekeepingResult(true);
-  EXPECT_CALL(mock_p2p_manager, PerformHousekeeping());
-  attempter_.Update("", "", "", "", false, false, false);
-  EXPECT_TRUE(actual_using_p2p_for_downloading());
-  EXPECT_TRUE(actual_using_p2p_for_sharing());
-  ScheduleQuitMainLoop();
-}
-
-TEST_F(UpdateAttempterTest, P2PEnabledInteractive) {
-  loop_.PostTask(FROM_HERE,
-                 base::Bind(&UpdateAttempterTest::P2PEnabledInteractiveStart,
-                            base::Unretained(this)));
-  loop_.Run();
-}
-
-void UpdateAttempterTest::P2PEnabledInteractiveStart() {
-  MockP2PManager mock_p2p_manager;
-  fake_system_state_.set_p2p_manager(&mock_p2p_manager);
-  // For an interactive check, if P2P is enabled and starting it
-  // works, check that we performed housekeeping and that we convey
-  // p2p should be used for sharing but NOT for downloading.
-  mock_p2p_manager.fake().SetP2PEnabled(true);
-  mock_p2p_manager.fake().SetEnsureP2PRunningResult(true);
-  mock_p2p_manager.fake().SetPerformHousekeepingResult(true);
-  EXPECT_CALL(mock_p2p_manager, PerformHousekeeping());
-  attempter_.Update("",
-                    "",
-                    "",
-                    "",
-                    false,
-                    false,
-                    /*interactive=*/true);
-  EXPECT_FALSE(actual_using_p2p_for_downloading());
-  EXPECT_TRUE(actual_using_p2p_for_sharing());
-  ScheduleQuitMainLoop();
-}
-
-TEST_F(UpdateAttempterTest, ReadScatterFactorFromPolicy) {
-  loop_.PostTask(
-      FROM_HERE,
-      base::Bind(&UpdateAttempterTest::ReadScatterFactorFromPolicyTestStart,
-                 base::Unretained(this)));
-  loop_.Run();
-}
-
-// Tests that the scatter_factor_in_seconds value is properly fetched
-// from the device policy.
-void UpdateAttempterTest::ReadScatterFactorFromPolicyTestStart() {
-  int64_t scatter_factor_in_seconds = 36000;
-
-  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
-  EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
-  fake_system_state_.set_device_policy(device_policy.get());
-
-  EXPECT_CALL(*device_policy, GetScatterFactorInSeconds(_))
-      .WillRepeatedly(
-          DoAll(SetArgPointee<0>(scatter_factor_in_seconds), Return(true)));
-
-  attempter_.policy_provider_.reset(
-      new policy::PolicyProvider(std::move(device_policy)));
-
-  attempter_.Update("", "", "", "", false, false, false);
-  EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
-
-  ScheduleQuitMainLoop();
-}
-
-TEST_F(UpdateAttempterTest, DecrementUpdateCheckCountTest) {
-  loop_.PostTask(
-      FROM_HERE,
-      base::Bind(&UpdateAttempterTest::DecrementUpdateCheckCountTestStart,
-                 base::Unretained(this)));
-  loop_.Run();
-}
-
-void UpdateAttempterTest::DecrementUpdateCheckCountTestStart() {
-  // Tests that the scatter_factor_in_seconds value is properly fetched
-  // from the device policy and is decremented if value > 0.
-  int64_t initial_value = 5;
-  FakePrefs fake_prefs;
-  attempter_.prefs_ = &fake_prefs;
-
-  fake_system_state_.fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch());
-
-  EXPECT_TRUE(fake_prefs.SetInt64(kPrefsUpdateCheckCount, initial_value));
-
-  int64_t scatter_factor_in_seconds = 10;
-
-  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
-  EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
-  fake_system_state_.set_device_policy(device_policy.get());
-
-  EXPECT_CALL(*device_policy, GetScatterFactorInSeconds(_))
-      .WillRepeatedly(
-          DoAll(SetArgPointee<0>(scatter_factor_in_seconds), Return(true)));
-
-  attempter_.policy_provider_.reset(
-      new policy::PolicyProvider(std::move(device_policy)));
-
-  attempter_.Update("", "", "", "", false, false, false);
-  EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
-
-  // Make sure the file still exists.
-  EXPECT_TRUE(fake_prefs.Exists(kPrefsUpdateCheckCount));
-
-  int64_t new_value;
-  EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &new_value));
-  EXPECT_EQ(initial_value - 1, new_value);
-
-  EXPECT_TRUE(
-      attempter_.omaha_request_params_->update_check_count_wait_enabled());
-
-  // However, if the count is already 0, it's not decremented. Test that.
-  initial_value = 0;
-  EXPECT_TRUE(fake_prefs.SetInt64(kPrefsUpdateCheckCount, initial_value));
-  attempter_.Update("", "", "", "", false, false, false);
-  EXPECT_TRUE(fake_prefs.Exists(kPrefsUpdateCheckCount));
-  EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &new_value));
-  EXPECT_EQ(initial_value, new_value);
-
-  ScheduleQuitMainLoop();
-}
-
-TEST_F(UpdateAttempterTest, NoScatteringDoneDuringManualUpdateTestStart) {
-  loop_.PostTask(
-      FROM_HERE,
-      base::Bind(
-          &UpdateAttempterTest::NoScatteringDoneDuringManualUpdateTestStart,
-          base::Unretained(this)));
-  loop_.Run();
-}
-
-void UpdateAttempterTest::NoScatteringDoneDuringManualUpdateTestStart() {
-  // Tests that no scattering logic is enabled if the update check
-  // is manually done (as opposed to a scheduled update check)
-  int64_t initial_value = 8;
-  FakePrefs fake_prefs;
-  attempter_.prefs_ = &fake_prefs;
-
-  fake_system_state_.fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch());
-  fake_system_state_.set_prefs(&fake_prefs);
-
-  EXPECT_TRUE(
-      fake_prefs.SetInt64(kPrefsWallClockScatteringWaitPeriod, initial_value));
-  EXPECT_TRUE(fake_prefs.SetInt64(kPrefsUpdateCheckCount, initial_value));
-
-  // make sure scatter_factor is non-zero as scattering is disabled
-  // otherwise.
-  int64_t scatter_factor_in_seconds = 50;
-
-  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
-  EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
-  fake_system_state_.set_device_policy(device_policy.get());
-
-  EXPECT_CALL(*device_policy, GetScatterFactorInSeconds(_))
-      .WillRepeatedly(
-          DoAll(SetArgPointee<0>(scatter_factor_in_seconds), Return(true)));
-
-  attempter_.policy_provider_.reset(
-      new policy::PolicyProvider(std::move(device_policy)));
-
-  // Trigger an interactive check so we can test that scattering is disabled.
-  attempter_.Update("",
-                    "",
-                    "",
-                    "",
-                    false,
-                    false,
-                    /*interactive=*/true);
-  EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
-
-  // Make sure scattering is disabled for manual (i.e. user initiated) update
-  // checks and all artifacts are removed.
-  EXPECT_FALSE(
-      attempter_.omaha_request_params_->wall_clock_based_wait_enabled());
-  EXPECT_FALSE(fake_prefs.Exists(kPrefsWallClockScatteringWaitPeriod));
-  EXPECT_EQ(0, attempter_.omaha_request_params_->waiting_period().InSeconds());
-  EXPECT_FALSE(
-      attempter_.omaha_request_params_->update_check_count_wait_enabled());
-  EXPECT_FALSE(fake_prefs.Exists(kPrefsUpdateCheckCount));
-
-  ScheduleQuitMainLoop();
-}
-
-void UpdateAttempterTest::SetUpStagingTest(const StagingSchedule& schedule,
-                                           FakePrefs* prefs) {
-  attempter_.prefs_ = prefs;
-  fake_system_state_.set_prefs(prefs);
-
-  int64_t initial_value = 8;
-  EXPECT_TRUE(
-      prefs->SetInt64(kPrefsWallClockScatteringWaitPeriod, initial_value));
-  EXPECT_TRUE(prefs->SetInt64(kPrefsUpdateCheckCount, initial_value));
-  attempter_.scatter_factor_ = TimeDelta::FromSeconds(20);
-
-  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
-  EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
-  fake_system_state_.set_device_policy(device_policy.get());
-  EXPECT_CALL(*device_policy, GetDeviceUpdateStagingSchedule(_))
-      .WillRepeatedly(DoAll(SetArgPointee<0>(schedule), Return(true)));
-
-  attempter_.policy_provider_.reset(
-      new policy::PolicyProvider(std::move(device_policy)));
-}
-
-TEST_F(UpdateAttempterTest, StagingSetsPrefsAndTurnsOffScattering) {
-  loop_.PostTask(
-      FROM_HERE,
-      base::Bind(
-          &UpdateAttempterTest::StagingSetsPrefsAndTurnsOffScatteringStart,
-          base::Unretained(this)));
-  loop_.Run();
-}
-
-void UpdateAttempterTest::StagingSetsPrefsAndTurnsOffScatteringStart() {
-  // Tests that staging sets its prefs properly and turns off scattering.
-  fake_system_state_.fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch());
-  FakePrefs fake_prefs;
-  SetUpStagingTest(kValidStagingSchedule, &fake_prefs);
-
-  attempter_.Update("", "", "", "", false, false, false);
-  // Check that prefs have the correct values.
-  int64_t update_count;
-  EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &update_count));
-  int64_t waiting_time_days;
-  EXPECT_TRUE(fake_prefs.GetInt64(kPrefsWallClockStagingWaitPeriod,
-                                  &waiting_time_days));
-  EXPECT_GT(waiting_time_days, 0);
-  // Update count should have been decremented.
-  EXPECT_EQ(7, update_count);
-  // Check that Omaha parameters were updated correctly.
-  EXPECT_TRUE(
-      attempter_.omaha_request_params_->update_check_count_wait_enabled());
-  EXPECT_TRUE(
-      attempter_.omaha_request_params_->wall_clock_based_wait_enabled());
-  EXPECT_EQ(waiting_time_days,
-            attempter_.omaha_request_params_->waiting_period().InDays());
-  // Check class variables.
-  EXPECT_EQ(waiting_time_days, attempter_.staging_wait_time_.InDays());
-  EXPECT_EQ(kValidStagingSchedule, attempter_.staging_schedule_);
-  // Check that scattering is turned off
-  EXPECT_EQ(0, attempter_.scatter_factor_.InSeconds());
-  EXPECT_FALSE(fake_prefs.Exists(kPrefsWallClockScatteringWaitPeriod));
-
-  ScheduleQuitMainLoop();
-}
-
-void UpdateAttempterTest::CheckStagingOff() {
-  // Check that all prefs were removed.
-  EXPECT_FALSE(attempter_.prefs_->Exists(kPrefsUpdateCheckCount));
-  EXPECT_FALSE(attempter_.prefs_->Exists(kPrefsWallClockScatteringWaitPeriod));
-  EXPECT_FALSE(attempter_.prefs_->Exists(kPrefsWallClockStagingWaitPeriod));
-  // Check that the Omaha parameters have the correct value.
-  EXPECT_EQ(0, attempter_.omaha_request_params_->waiting_period().InDays());
-  EXPECT_EQ(attempter_.omaha_request_params_->waiting_period(),
-            attempter_.staging_wait_time_);
-  EXPECT_FALSE(
-      attempter_.omaha_request_params_->update_check_count_wait_enabled());
-  EXPECT_FALSE(
-      attempter_.omaha_request_params_->wall_clock_based_wait_enabled());
-  // Check that scattering is turned off too.
-  EXPECT_EQ(0, attempter_.scatter_factor_.InSeconds());
-}
-
-TEST_F(UpdateAttempterTest, StagingOffIfInteractive) {
-  loop_.PostTask(FROM_HERE,
-                 base::Bind(&UpdateAttempterTest::StagingOffIfInteractiveStart,
-                            base::Unretained(this)));
-  loop_.Run();
-}
-
-void UpdateAttempterTest::StagingOffIfInteractiveStart() {
-  // Tests that staging is turned off when an interactive update is requested.
-  fake_system_state_.fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch());
-  FakePrefs fake_prefs;
-  SetUpStagingTest(kValidStagingSchedule, &fake_prefs);
-
-  attempter_.Update("", "", "", "", false, false, /* interactive = */ true);
-  CheckStagingOff();
-
-  ScheduleQuitMainLoop();
-}
-
-TEST_F(UpdateAttempterTest, StagingOffIfOobe) {
-  loop_.PostTask(FROM_HERE,
-                 base::Bind(&UpdateAttempterTest::StagingOffIfOobeStart,
-                            base::Unretained(this)));
-  loop_.Run();
-}
-
-void UpdateAttempterTest::StagingOffIfOobeStart() {
-  // Tests that staging is turned off if OOBE hasn't been completed.
-  fake_system_state_.fake_hardware()->SetIsOOBEEnabled(true);
-  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
-  FakePrefs fake_prefs;
-  SetUpStagingTest(kValidStagingSchedule, &fake_prefs);
-
-  attempter_.Update("", "", "", "", false, false, /* interactive = */ true);
-  CheckStagingOff();
-
-  ScheduleQuitMainLoop();
-}
-
-// Checks that we only report daily metrics at most every 24 hours.
-TEST_F(UpdateAttempterTest, ReportDailyMetrics) {
-  FakeClock fake_clock;
-  FakePrefs fake_prefs;
-
-  fake_system_state_.set_clock(&fake_clock);
-  fake_system_state_.set_prefs(&fake_prefs);
-
-  Time epoch = Time::FromInternalValue(0);
-  fake_clock.SetWallclockTime(epoch);
-
-  // If there is no kPrefsDailyMetricsLastReportedAt state variable,
-  // we should report.
-  EXPECT_TRUE(attempter_.CheckAndReportDailyMetrics());
-  // We should not report again if no time has passed.
-  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
-
-  // We should not report if only 10 hours has passed.
-  fake_clock.SetWallclockTime(epoch + TimeDelta::FromHours(10));
-  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
-
-  // We should not report if only 24 hours - 1 sec has passed.
-  fake_clock.SetWallclockTime(epoch + TimeDelta::FromHours(24) -
-                              TimeDelta::FromSeconds(1));
-  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
-
-  // We should report if 24 hours has passed.
-  fake_clock.SetWallclockTime(epoch + TimeDelta::FromHours(24));
-  EXPECT_TRUE(attempter_.CheckAndReportDailyMetrics());
-
-  // But then we should not report again..
-  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
-
-  // .. until another 24 hours has passed
-  fake_clock.SetWallclockTime(epoch + TimeDelta::FromHours(47));
-  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
-  fake_clock.SetWallclockTime(epoch + TimeDelta::FromHours(48));
-  EXPECT_TRUE(attempter_.CheckAndReportDailyMetrics());
-  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
-
-  // .. and another 24 hours
-  fake_clock.SetWallclockTime(epoch + TimeDelta::FromHours(71));
-  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
-  fake_clock.SetWallclockTime(epoch + TimeDelta::FromHours(72));
-  EXPECT_TRUE(attempter_.CheckAndReportDailyMetrics());
-  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
-
-  // If the span between time of reporting and present time is
-  // negative, we report. This is in order to reset the timestamp and
-  // avoid an edge condition whereby a distant point in the future is
-  // in the state variable resulting in us never ever reporting again.
-  fake_clock.SetWallclockTime(epoch + TimeDelta::FromHours(71));
-  EXPECT_TRUE(attempter_.CheckAndReportDailyMetrics());
-  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
-
-  // In this case we should not update until the clock reads 71 + 24 = 95.
-  // Check that.
-  fake_clock.SetWallclockTime(epoch + TimeDelta::FromHours(94));
-  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
-  fake_clock.SetWallclockTime(epoch + TimeDelta::FromHours(95));
-  EXPECT_TRUE(attempter_.CheckAndReportDailyMetrics());
-  EXPECT_FALSE(attempter_.CheckAndReportDailyMetrics());
-}
-
-TEST_F(UpdateAttempterTest, BootTimeInUpdateMarkerFile) {
-  FakeClock fake_clock;
-  fake_clock.SetBootTime(Time::FromTimeT(42));
-  fake_system_state_.set_clock(&fake_clock);
-  FakePrefs fake_prefs;
-  fake_system_state_.set_prefs(&fake_prefs);
-  attempter_.Init();
-
-  Time boot_time;
-  EXPECT_FALSE(attempter_.GetBootTimeAtUpdate(&boot_time));
-
-  attempter_.WriteUpdateCompletedMarker();
-
-  EXPECT_TRUE(attempter_.GetBootTimeAtUpdate(&boot_time));
-  EXPECT_EQ(boot_time.ToTimeT(), 42);
-}
-
-TEST_F(UpdateAttempterTest, AnyUpdateSourceAllowedUnofficial) {
-  fake_system_state_.fake_hardware()->SetIsOfficialBuild(false);
-  EXPECT_TRUE(attempter_.IsAnyUpdateSourceAllowed());
-}
-
-TEST_F(UpdateAttempterTest, AnyUpdateSourceAllowedOfficialDevmode) {
-  fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
-  fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(true);
-  EXPECT_TRUE(attempter_.IsAnyUpdateSourceAllowed());
-}
-
-TEST_F(UpdateAttempterTest, AnyUpdateSourceDisallowedOfficialNormal) {
-  fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
-  fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false);
-  EXPECT_FALSE(attempter_.IsAnyUpdateSourceAllowed());
-}
-
-TEST_F(UpdateAttempterTest, CheckForUpdateAUDlcTest) {
-  fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
-  fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false);
-
-  const string dlc_module_id = "a_dlc_module_id";
-  vector<string> dlc_module_ids = {dlc_module_id};
-  ON_CALL(mock_dlcservice_, GetInstalled(testing::_))
-      .WillByDefault(DoAll(testing::SetArgPointee<0>(dlc_module_ids),
-                           testing::Return(true)));
-
-  attempter_.CheckForUpdate("", "autest", UpdateAttemptFlags::kNone);
-  EXPECT_EQ(attempter_.dlc_module_ids_.size(), 1);
-  EXPECT_EQ(attempter_.dlc_module_ids_[0], dlc_module_id);
-}
-
-TEST_F(UpdateAttempterTest, CheckForUpdateAUTest) {
-  fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
-  fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false);
-  attempter_.CheckForUpdate("", "autest", UpdateAttemptFlags::kNone);
-  EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url());
-}
-
-TEST_F(UpdateAttempterTest, CheckForUpdateScheduledAUTest) {
-  fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
-  fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false);
-  attempter_.CheckForUpdate("", "autest-scheduled", UpdateAttemptFlags::kNone);
-  EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url());
-}
-
-TEST_F(UpdateAttempterTest, CheckForInstallTest) {
-  fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
-  fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false);
-  attempter_.CheckForInstall({}, "autest");
-  EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url());
-
-  attempter_.CheckForInstall({}, "autest-scheduled");
-  EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url());
-
-  attempter_.CheckForInstall({}, "http://omaha.phishing");
-  EXPECT_EQ("", attempter_.forced_omaha_url());
-}
-
-TEST_F(UpdateAttempterTest, InstallSetsStatusIdle) {
-  attempter_.CheckForInstall({}, "http://foo.bar");
-  attempter_.status_ = UpdateStatus::DOWNLOADING;
-  EXPECT_TRUE(attempter_.is_install_);
-  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
-  UpdateEngineStatus status;
-  attempter_.GetStatus(&status);
-  // Should set status to idle after an install operation.
-  EXPECT_EQ(UpdateStatus::IDLE, status.status);
-}
-
-TEST_F(UpdateAttempterTest, RollbackAfterInstall) {
-  attempter_.is_install_ = true;
-  attempter_.Rollback(false);
-  EXPECT_FALSE(attempter_.is_install_);
-}
-
-TEST_F(UpdateAttempterTest, UpdateAfterInstall) {
-  attempter_.is_install_ = true;
-  attempter_.CheckForUpdate("", "", UpdateAttemptFlags::kNone);
-  EXPECT_FALSE(attempter_.is_install_);
-}
-
-TEST_F(UpdateAttempterTest, TargetVersionPrefixSetAndReset) {
-  attempter_.CalculateUpdateParams("", "", "", "1234", false, false, false);
-  EXPECT_EQ("1234",
-            fake_system_state_.request_params()->target_version_prefix());
-
-  attempter_.CalculateUpdateParams("", "", "", "", false, false, false);
-  EXPECT_TRUE(
-      fake_system_state_.request_params()->target_version_prefix().empty());
-}
-
-TEST_F(UpdateAttempterTest, RollbackAllowedSetAndReset) {
-  attempter_.CalculateUpdateParams("",
-                                   "",
-                                   "",
-                                   "1234",
-                                   /*rollback_allowed=*/true,
-                                   false,
-                                   false);
-  EXPECT_TRUE(fake_system_state_.request_params()->rollback_allowed());
-
-  attempter_.CalculateUpdateParams("",
-                                   "",
-                                   "",
-                                   "1234",
-                                   /*rollback_allowed=*/false,
-                                   false,
-                                   false);
-  EXPECT_FALSE(fake_system_state_.request_params()->rollback_allowed());
-}
-
-TEST_F(UpdateAttempterTest, UpdateDeferredByPolicyTest) {
-  // Construct an OmahaResponseHandlerAction that has processed an InstallPlan,
-  // but the update is being deferred by the Policy.
-  OmahaResponseHandlerAction response_action(&fake_system_state_);
-  response_action.install_plan_.version = "a.b.c.d";
-  response_action.install_plan_.system_version = "b.c.d.e";
-  response_action.install_plan_.payloads.push_back(
-      {.size = 1234ULL, .type = InstallPayloadType::kFull});
-  // Inform the UpdateAttempter that the OmahaResponseHandlerAction has
-  // completed, with the deferred-update error code.
-  attempter_.ActionCompleted(
-      nullptr, &response_action, ErrorCode::kOmahaUpdateDeferredPerPolicy);
-  {
-    UpdateEngineStatus status;
-    attempter_.GetStatus(&status);
-    EXPECT_EQ(UpdateStatus::UPDATE_AVAILABLE, status.status);
-    EXPECT_TRUE(attempter_.install_plan_);
-    EXPECT_EQ(attempter_.install_plan_->version, status.new_version);
-    EXPECT_EQ(attempter_.install_plan_->system_version,
-              status.new_system_version);
-    EXPECT_EQ(attempter_.install_plan_->payloads[0].size,
-              status.new_size_bytes);
-  }
-  // An "error" event should have been created to tell Omaha that the update is
-  // being deferred.
-  EXPECT_TRUE(nullptr != attempter_.error_event_);
-  EXPECT_EQ(OmahaEvent::kTypeUpdateComplete, attempter_.error_event_->type);
-  EXPECT_EQ(OmahaEvent::kResultUpdateDeferred, attempter_.error_event_->result);
-  ErrorCode expected_code = static_cast<ErrorCode>(
-      static_cast<int>(ErrorCode::kOmahaUpdateDeferredPerPolicy) |
-      static_cast<int>(ErrorCode::kTestOmahaUrlFlag));
-  EXPECT_EQ(expected_code, attempter_.error_event_->error_code);
-  // End the processing
-  attempter_.ProcessingDone(nullptr, ErrorCode::kOmahaUpdateDeferredPerPolicy);
-  // Validate the state of the attempter.
-  {
-    UpdateEngineStatus status;
-    attempter_.GetStatus(&status);
-    EXPECT_EQ(UpdateStatus::REPORTING_ERROR_EVENT, status.status);
-    EXPECT_EQ(response_action.install_plan_.version, status.new_version);
-    EXPECT_EQ(response_action.install_plan_.system_version,
-              status.new_system_version);
-    EXPECT_EQ(response_action.install_plan_.payloads[0].size,
-              status.new_size_bytes);
-  }
-}
-
-TEST_F(UpdateAttempterTest, UpdateIsNotRunningWhenUpdateAvailable) {
-  EXPECT_FALSE(attempter_.IsUpdateRunningOrScheduled());
-  // Verify in-progress update with UPDATE_AVAILABLE is running
-  attempter_.status_ = UpdateStatus::UPDATE_AVAILABLE;
-  EXPECT_TRUE(attempter_.IsUpdateRunningOrScheduled());
-}
-
-TEST_F(UpdateAttempterTest, UpdateAttemptFlagsCachedAtUpdateStart) {
-  attempter_.SetUpdateAttemptFlags(UpdateAttemptFlags::kFlagRestrictDownload);
-
-  UpdateCheckParams params = {.updates_enabled = true};
-  attempter_.OnUpdateScheduled(EvalStatus::kSucceeded, params);
-
-  EXPECT_EQ(UpdateAttemptFlags::kFlagRestrictDownload,
-            attempter_.GetCurrentUpdateAttemptFlags());
-}
-
-TEST_F(UpdateAttempterTest, RollbackNotAllowed) {
-  UpdateCheckParams params = {.updates_enabled = true,
-                              .rollback_allowed = false};
-  attempter_.OnUpdateScheduled(EvalStatus::kSucceeded, params);
-  EXPECT_FALSE(fake_system_state_.request_params()->rollback_allowed());
-}
-
-TEST_F(UpdateAttempterTest, RollbackAllowed) {
-  UpdateCheckParams params = {.updates_enabled = true,
-                              .rollback_allowed = true};
-  attempter_.OnUpdateScheduled(EvalStatus::kSucceeded, params);
-  EXPECT_TRUE(fake_system_state_.request_params()->rollback_allowed());
-}
-
-TEST_F(UpdateAttempterTest, InteractiveUpdateUsesPassedRestrictions) {
-  attempter_.SetUpdateAttemptFlags(UpdateAttemptFlags::kFlagRestrictDownload);
-
-  attempter_.CheckForUpdate("", "", UpdateAttemptFlags::kNone);
-  EXPECT_EQ(UpdateAttemptFlags::kNone,
-            attempter_.GetCurrentUpdateAttemptFlags());
-}
-
-TEST_F(UpdateAttempterTest, NonInteractiveUpdateUsesSetRestrictions) {
-  attempter_.SetUpdateAttemptFlags(UpdateAttemptFlags::kNone);
-
-  // This tests that when CheckForUpdate() is called with the non-interactive
-  // flag set, that it doesn't change the current UpdateAttemptFlags.
-  attempter_.CheckForUpdate("",
-                            "",
-                            UpdateAttemptFlags::kFlagNonInteractive |
-                                UpdateAttemptFlags::kFlagRestrictDownload);
-  EXPECT_EQ(UpdateAttemptFlags::kNone,
-            attempter_.GetCurrentUpdateAttemptFlags());
-}
-
-void UpdateAttempterTest::ResetRollbackHappenedStart(bool is_consumer,
-                                                     bool is_policy_loaded,
-                                                     bool expected_reset) {
-  EXPECT_CALL(*fake_system_state_.mock_payload_state(), GetRollbackHappened())
-      .WillRepeatedly(Return(true));
-  auto mock_policy_provider =
-      std::make_unique<NiceMock<policy::MockPolicyProvider>>();
-  EXPECT_CALL(*mock_policy_provider, IsConsumerDevice())
-      .WillRepeatedly(Return(is_consumer));
-  EXPECT_CALL(*mock_policy_provider, device_policy_is_loaded())
-      .WillRepeatedly(Return(is_policy_loaded));
-  const policy::MockDevicePolicy device_policy;
-  EXPECT_CALL(*mock_policy_provider, GetDevicePolicy())
-      .WillRepeatedly(ReturnRef(device_policy));
-  EXPECT_CALL(*fake_system_state_.mock_payload_state(),
-              SetRollbackHappened(false))
-      .Times(expected_reset ? 1 : 0);
-  attempter_.policy_provider_ = std::move(mock_policy_provider);
-  attempter_.Update("", "", "", "", false, false, false);
-  ScheduleQuitMainLoop();
-}
-
-TEST_F(UpdateAttempterTest, ResetRollbackHappenedOobe) {
-  loop_.PostTask(FROM_HERE,
-                 base::Bind(&UpdateAttempterTest::ResetRollbackHappenedStart,
-                            base::Unretained(this),
-                            /*is_consumer=*/false,
-                            /*is_policy_loaded=*/false,
-                            /*expected_reset=*/false));
-  loop_.Run();
-}
-
-TEST_F(UpdateAttempterTest, ResetRollbackHappenedConsumer) {
-  loop_.PostTask(FROM_HERE,
-                 base::Bind(&UpdateAttempterTest::ResetRollbackHappenedStart,
-                            base::Unretained(this),
-                            /*is_consumer=*/true,
-                            /*is_policy_loaded=*/false,
-                            /*expected_reset=*/true));
-  loop_.Run();
-}
-
-TEST_F(UpdateAttempterTest, ResetRollbackHappenedEnterprise) {
-  loop_.PostTask(FROM_HERE,
-                 base::Bind(&UpdateAttempterTest::ResetRollbackHappenedStart,
-                            base::Unretained(this),
-                            /*is_consumer=*/false,
-                            /*is_policy_loaded=*/true,
-                            /*expected_reset=*/true));
-  loop_.Run();
-}
-
-TEST_F(UpdateAttempterTest, SetRollbackHappenedRollback) {
-  attempter_.install_plan_.reset(new InstallPlan);
-  attempter_.install_plan_->is_rollback = true;
-
-  EXPECT_CALL(*fake_system_state_.mock_payload_state(),
-              SetRollbackHappened(true))
-      .Times(1);
-  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
-}
-
-TEST_F(UpdateAttempterTest, SetRollbackHappenedNotRollback) {
-  attempter_.install_plan_.reset(new InstallPlan);
-  attempter_.install_plan_->is_rollback = false;
-
-  EXPECT_CALL(*fake_system_state_.mock_payload_state(),
-              SetRollbackHappened(true))
-      .Times(0);
-  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
-}
-
-TEST_F(UpdateAttempterTest, RollbackMetricsRollbackSuccess) {
-  attempter_.install_plan_.reset(new InstallPlan);
-  attempter_.install_plan_->is_rollback = true;
-  attempter_.install_plan_->version = kRollbackVersion;
-
-  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
-              ReportEnterpriseRollbackMetrics(true, kRollbackVersion))
-      .Times(1);
-  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
-}
-
-TEST_F(UpdateAttempterTest, RollbackMetricsNotRollbackSuccess) {
-  attempter_.install_plan_.reset(new InstallPlan);
-  attempter_.install_plan_->is_rollback = false;
-  attempter_.install_plan_->version = kRollbackVersion;
-
-  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
-              ReportEnterpriseRollbackMetrics(_, _))
-      .Times(0);
-  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
-}
-
-TEST_F(UpdateAttempterTest, RollbackMetricsRollbackFailure) {
-  attempter_.install_plan_.reset(new InstallPlan);
-  attempter_.install_plan_->is_rollback = true;
-  attempter_.install_plan_->version = kRollbackVersion;
-
-  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
-              ReportEnterpriseRollbackMetrics(false, kRollbackVersion))
-      .Times(1);
-  MockAction action;
-  attempter_.CreatePendingErrorEvent(&action, ErrorCode::kRollbackNotPossible);
-  attempter_.ProcessingDone(nullptr, ErrorCode::kRollbackNotPossible);
-}
-
-TEST_F(UpdateAttempterTest, RollbackMetricsNotRollbackFailure) {
-  attempter_.install_plan_.reset(new InstallPlan);
-  attempter_.install_plan_->is_rollback = false;
-  attempter_.install_plan_->version = kRollbackVersion;
-
-  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
-              ReportEnterpriseRollbackMetrics(_, _))
-      .Times(0);
-  MockAction action;
-  attempter_.CreatePendingErrorEvent(&action, ErrorCode::kRollbackNotPossible);
-  attempter_.ProcessingDone(nullptr, ErrorCode::kRollbackNotPossible);
-}
-
-TEST_F(UpdateAttempterTest, TimeToUpdateAppliedMetricFailure) {
-  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
-              ReportEnterpriseUpdateSeenToDownloadDays(_, _))
-      .Times(0);
-  attempter_.ProcessingDone(nullptr, ErrorCode::kOmahaUpdateDeferredPerPolicy);
-}
-
-TEST_F(UpdateAttempterTest, TimeToUpdateAppliedOnNonEnterprise) {
-  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
-  fake_system_state_.set_device_policy(device_policy.get());
-  // Make device policy return that this is not enterprise enrolled
-  EXPECT_CALL(*device_policy, IsEnterpriseEnrolled()).WillOnce(Return(false));
-
-  // Ensure that the metric is not recorded.
-  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
-              ReportEnterpriseUpdateSeenToDownloadDays(_, _))
-      .Times(0);
-  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
-}
-
-TEST_F(UpdateAttempterTest,
-       TimeToUpdateAppliedWithTimeRestrictionMetricSuccess) {
-  constexpr int kDaysToUpdate = 15;
-  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
-  fake_system_state_.set_device_policy(device_policy.get());
-  // Make device policy return that this is enterprise enrolled
-  EXPECT_CALL(*device_policy, IsEnterpriseEnrolled()).WillOnce(Return(true));
-  // Pretend that there's a time restriction policy in place
-  EXPECT_CALL(*device_policy, GetDisallowedTimeIntervals(_))
-      .WillOnce(Return(true));
-
-  FakePrefs fake_prefs;
-  Time update_first_seen_at = Time::Now();
-  fake_prefs.SetInt64(kPrefsUpdateFirstSeenAt,
-                      update_first_seen_at.ToInternalValue());
-
-  FakeClock fake_clock;
-  Time update_finished_at =
-      update_first_seen_at + TimeDelta::FromDays(kDaysToUpdate);
-  fake_clock.SetWallclockTime(update_finished_at);
-
-  fake_system_state_.set_clock(&fake_clock);
-  fake_system_state_.set_prefs(&fake_prefs);
-
-  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
-              ReportEnterpriseUpdateSeenToDownloadDays(true, kDaysToUpdate))
-      .Times(1);
-  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
-}
-
-TEST_F(UpdateAttempterTest,
-       TimeToUpdateAppliedWithoutTimeRestrictionMetricSuccess) {
-  constexpr int kDaysToUpdate = 15;
-  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
-  fake_system_state_.set_device_policy(device_policy.get());
-  // Make device policy return that this is enterprise enrolled
-  EXPECT_CALL(*device_policy, IsEnterpriseEnrolled()).WillOnce(Return(true));
-  // Pretend that there's no time restriction policy in place
-  EXPECT_CALL(*device_policy, GetDisallowedTimeIntervals(_))
-      .WillOnce(Return(false));
-
-  FakePrefs fake_prefs;
-  Time update_first_seen_at = Time::Now();
-  fake_prefs.SetInt64(kPrefsUpdateFirstSeenAt,
-                      update_first_seen_at.ToInternalValue());
-
-  FakeClock fake_clock;
-  Time update_finished_at =
-      update_first_seen_at + TimeDelta::FromDays(kDaysToUpdate);
-  fake_clock.SetWallclockTime(update_finished_at);
-
-  fake_system_state_.set_clock(&fake_clock);
-  fake_system_state_.set_prefs(&fake_prefs);
-
-  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
-              ReportEnterpriseUpdateSeenToDownloadDays(false, kDaysToUpdate))
-      .Times(1);
-  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
-}
-
-}  // namespace chromeos_update_engine
diff --git a/update_boot_flags_action_unittest.cc b/update_boot_flags_action_unittest.cc
index 1b2bfa5..26cbe90 100644
--- a/update_boot_flags_action_unittest.cc
+++ b/update_boot_flags_action_unittest.cc
@@ -22,18 +22,17 @@
 #include <base/bind.h>
 #include <gtest/gtest.h>
 
-#include "update_engine/fake_system_state.h"
+#include "update_engine/common/fake_boot_control.h"
 
 namespace chromeos_update_engine {
 
 class UpdateBootFlagsActionTest : public ::testing::Test {
- public:
-  FakeSystemState fake_system_state_;
+ protected:
+  FakeBootControl boot_control_;
 };
 
 TEST_F(UpdateBootFlagsActionTest, SimpleTest) {
-  auto boot_control = fake_system_state_.fake_boot_control();
-  auto action = std::make_unique<UpdateBootFlagsAction>(boot_control);
+  auto action = std::make_unique<UpdateBootFlagsAction>(&boot_control_);
   ActionProcessor processor;
   processor.EnqueueAction(std::move(action));
 
@@ -49,9 +48,8 @@
   UpdateBootFlagsAction::updated_boot_flags_ = false;
   UpdateBootFlagsAction::is_running_ = false;
 
-  auto boot_control = fake_system_state_.fake_boot_control();
-  auto action1 = std::make_unique<UpdateBootFlagsAction>(boot_control);
-  auto action2 = std::make_unique<UpdateBootFlagsAction>(boot_control);
+  auto action1 = std::make_unique<UpdateBootFlagsAction>(&boot_control_);
+  auto action2 = std::make_unique<UpdateBootFlagsAction>(&boot_control_);
   ActionProcessor processor1, processor2;
   processor1.EnqueueAction(std::move(action1));
   processor2.EnqueueAction(std::move(action2));
diff --git a/update_engine-client.gyp b/update_engine-client.gyp
deleted file mode 100644
index 588fc63..0000000
--- a/update_engine-client.gyp
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-{
-  'targets': [
-    # update_engine client library generated headers. Used by other daemons and
-    # by the update_engine_client console program to interact with
-    # update_engine.
-    {
-      'target_name': 'libupdate_engine-client-headers',
-      'type': 'none',
-      'actions': [
-        {
-          'action_name': 'update_engine_client-dbus-proxies',
-          'variables': {
-            'dbus_service_config': 'dbus_bindings/dbus-service-config.json',
-            'proxy_output_file': 'include/update_engine/dbus-proxies.h',
-            'mock_output_file': 'include/update_engine/dbus-proxy-mocks.h',
-            'proxy_path_in_mocks': 'update_engine/dbus-proxies.h',
-          },
-          'sources': [
-            'dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml',
-          ],
-          'includes': ['../../../platform2/common-mk/generate-dbus-proxies.gypi'],
-        },
-      ],
-    },
-  ],
-}
diff --git a/update_engine.conf b/update_engine.conf
index af213ad..b6ca3c4 100644
--- a/update_engine.conf
+++ b/update_engine.conf
@@ -1,2 +1,2 @@
 PAYLOAD_MAJOR_VERSION=2
-PAYLOAD_MINOR_VERSION=6
+PAYLOAD_MINOR_VERSION=7
diff --git a/update_engine.conf.chromeos b/update_engine.conf.chromeos
new file mode 100644
index 0000000..af213ad
--- /dev/null
+++ b/update_engine.conf.chromeos
@@ -0,0 +1,2 @@
+PAYLOAD_MAJOR_VERSION=2
+PAYLOAD_MINOR_VERSION=6
diff --git a/update_engine.gyp b/update_engine.gyp
deleted file mode 100644
index c2c0c62..0000000
--- a/update_engine.gyp
+++ /dev/null
@@ -1,655 +0,0 @@
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# TODO: Rename these files to pass this check.
-# gyplint: disable=GypLintSourceFileNames
-{
-  'variables': {
-    'USE_chrome_network_proxy': '1',
-    'USE_chrome_kiosk_app': '1',
-  },
-  'target_defaults': {
-    'variables': {
-      'deps': [
-        'libbrillo-<(libbase_ver)',
-        'libchrome-<(libbase_ver)',
-        # system_api depends on protobuf (or protobuf-lite). It must appear
-        # before protobuf here or the linker flags won't be in the right
-        # order.
-        'system_api',
-        'protobuf-lite',
-      ],
-      # The -DUSE_* flags are passed from platform2.py. We use sane defaults
-      # here when these USE flags are not defined. You can set the default value
-      # for the USE flag in the ebuild.
-      'USE_hwid_override%': '0',
-    },
-    'cflags': [
-      '-g',
-      '-ffunction-sections',
-      '-Wall',
-      '-Wextra',
-      '-Werror',
-      '-Wno-unused-parameter',
-    ],
-    'cflags_cc': [
-      '-fno-strict-aliasing',
-      '-Wnon-virtual-dtor',
-    ],
-    'ldflags': [
-      '-Wl,--gc-sections',
-    ],
-    'defines': [
-      '__CHROMEOS__',
-      '_FILE_OFFSET_BITS=64',
-      '_POSIX_C_SOURCE=199309L',
-      'USE_BINDER=<(USE_binder)',
-      'USE_DBUS=<(USE_dbus)',
-      'USE_FEC=0',
-      'USE_HWID_OVERRIDE=<(USE_hwid_override)',
-      'USE_CHROME_KIOSK_APP=<(USE_chrome_kiosk_app)',
-      'USE_CHROME_NETWORK_PROXY=<(USE_chrome_network_proxy)',
-      'USE_MTD=<(USE_mtd)',
-      'USE_OMAHA=1',
-      'USE_SHILL=1',
-    ],
-    'include_dirs': [
-      # We need this include dir because we include all the local code as
-      # "update_engine/...".
-      '<(platform2_root)/../aosp/system',
-      '<(platform2_root)/../aosp/system/update_engine/client_library/include',
-    ],
-  },
-  'targets': [
-    # Protobufs.
-    {
-      'target_name': 'update_metadata-protos',
-      'type': 'static_library',
-      'variables': {
-        'proto_in_dir': '.',
-        'proto_out_dir': 'include/update_engine',
-      },
-      'sources': [
-        'update_metadata.proto',
-      ],
-      'includes': ['../../../platform2/common-mk/protoc.gypi'],
-    },
-    # Chrome D-Bus bindings.
-    {
-      'target_name': 'update_engine-dbus-adaptor',
-      'type': 'none',
-      'variables': {
-        'dbus_adaptors_out_dir': 'include/dbus_bindings',
-        'dbus_xml_extension': 'dbus-xml',
-      },
-      'sources': [
-        'dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml',
-      ],
-      'includes': ['../../../platform2/common-mk/generate-dbus-adaptors.gypi'],
-    },
-    {
-      'target_name': 'update_engine-dbus-kiosk-app-client',
-      'type': 'none',
-      'actions': [{
-        'action_name': 'update_engine-dbus-kiosk-app-client-action',
-        'variables': {
-          'mock_output_file': 'include/kiosk-app/dbus-proxy-mocks.h',
-          'proxy_output_file': 'include/kiosk-app/dbus-proxies.h',
-        },
-        'sources': [
-          'dbus_bindings/org.chromium.KioskAppService.dbus-xml',
-        ],
-        'includes': ['../../../platform2/common-mk/generate-dbus-proxies.gypi'],
-      }],
-    },
-    # The payload application component and common dependencies.
-    {
-      'target_name': 'libpayload_consumer',
-      'type': 'static_library',
-      'dependencies': [
-        'update_metadata-protos',
-      ],
-      # TODO(deymo): Remove unused dependencies once we stop including files
-      # from the root directory.
-      'variables': {
-        'exported_deps': [
-          'libcrypto',
-          'xz-embedded',
-          'libbspatch',
-          'libpuffpatch',
-        ],
-        'deps': ['<@(exported_deps)'],
-      },
-      'all_dependent_settings': {
-        'variables': {
-          'deps': [
-            '<@(exported_deps)',
-          ],
-        },
-      },
-      'link_settings': {
-        'variables': {
-          'deps': [
-            '<@(exported_deps)',
-          ],
-        },
-        'libraries': [
-          '-lbz2',
-          '-lrt',
-        ],
-      },
-      'sources': [
-        'common/action_processor.cc',
-        'common/boot_control_stub.cc',
-        'common/clock.cc',
-        'common/constants.cc',
-        'common/cpu_limiter.cc',
-        'common/error_code_utils.cc',
-        'common/hash_calculator.cc',
-        'common/http_common.cc',
-        'common/http_fetcher.cc',
-        'common/hwid_override.cc',
-        'common/multi_range_http_fetcher.cc',
-        'common/platform_constants_chromeos.cc',
-        'common/prefs.cc',
-        'common/proxy_resolver.cc',
-        'common/subprocess.cc',
-        'common/terminator.cc',
-        'common/utils.cc',
-        'payload_consumer/bzip_extent_writer.cc',
-        'payload_consumer/cached_file_descriptor.cc',
-        'payload_consumer/delta_performer.cc',
-        'payload_consumer/download_action.cc',
-        'payload_consumer/extent_reader.cc',
-        'payload_consumer/extent_writer.cc',
-        'payload_consumer/file_descriptor.cc',
-        'payload_consumer/file_descriptor_utils.cc',
-        'payload_consumer/file_writer.cc',
-        'payload_consumer/filesystem_verifier_action.cc',
-        'payload_consumer/install_plan.cc',
-        'payload_consumer/mount_history.cc',
-        'payload_consumer/payload_constants.cc',
-        'payload_consumer/payload_metadata.cc',
-        'payload_consumer/payload_verifier.cc',
-        'payload_consumer/postinstall_runner_action.cc',
-        'payload_consumer/verity_writer_stub.cc',
-        'payload_consumer/xz_extent_writer.cc',
-      ],
-      'conditions': [
-        ['USE_mtd == 1', {
-          'sources': [
-            'payload_consumer/mtd_file_descriptor.cc',
-          ],
-          'link_settings': {
-            'libraries': [
-              '-lmtdutils',
-            ],
-          },
-        }],
-      ],
-    },
-    # The main daemon static_library with all the code used to check for updates
-    # with Omaha and expose a DBus daemon.
-    {
-      'target_name': 'libupdate_engine',
-      'type': 'static_library',
-      'dependencies': [
-        'libpayload_consumer',
-        'update_metadata-protos',
-        'update_engine-dbus-adaptor',
-      ],
-      'variables': {
-        'exported_deps': [
-          'dbus-1',
-          'expat',
-          'libcurl',
-          'libdebugd-client',
-          'libmetrics-<(libbase_ver)',
-          'libpower_manager-client',
-          'libsession_manager-client',
-          'libshill-client',
-          'libssl',
-          'libupdate_engine-client',
-          'vboot_host',
-        ],
-        'conditions':[
-          ['USE_dlc == 1', {
-            'exported_deps' : [
-              'libdlcservice-client',
-            ],
-          }],
-        ],
-        'deps': ['<@(exported_deps)'],
-      },
-      'all_dependent_settings': {
-        'variables': {
-          'deps': [
-            '<@(exported_deps)',
-          ],
-        },
-      },
-      'link_settings': {
-        'variables': {
-          'deps': [
-            '<@(exported_deps)',
-          ],
-        },
-        'libraries': [
-          '-lbz2',
-          '-lpolicy-<(libbase_ver)',
-          '-lrootdev',
-          '-lrt',
-        ],
-      },
-      'sources': [
-        'boot_control_chromeos.cc',
-        'certificate_checker.cc',
-        'common_service.cc',
-        'connection_manager.cc',
-        'connection_utils.cc',
-        'daemon.cc',
-        'dbus_connection.cc',
-        'dbus_service.cc',
-        'hardware_chromeos.cc',
-        'image_properties_chromeos.cc',
-        'libcurl_http_fetcher.cc',
-        'metrics_reporter_omaha.cc',
-        'metrics_utils.cc',
-        'omaha_request_action.cc',
-        'omaha_request_params.cc',
-        'omaha_response_handler_action.cc',
-        'omaha_utils.cc',
-        'p2p_manager.cc',
-        'payload_state.cc',
-        'power_manager_chromeos.cc',
-        'real_system_state.cc',
-        'shill_proxy.cc',
-        'update_attempter.cc',
-        'update_boot_flags_action.cc',
-        'update_manager/boxed_value.cc',
-        'update_manager/chromeos_policy.cc',
-        'update_manager/default_policy.cc',
-        'update_manager/enough_slots_ab_updates_policy_impl.cc',
-        'update_manager/enterprise_device_policy_impl.cc',
-        'update_manager/evaluation_context.cc',
-        'update_manager/interactive_update_policy_impl.cc',
-        'update_manager/next_update_check_policy_impl.cc',
-        'update_manager/official_build_check_policy_impl.cc',
-        'update_manager/out_of_box_experience_policy_impl.cc',
-        'update_manager/policy.cc',
-        'update_manager/policy_test_utils.cc',
-        'update_manager/real_config_provider.cc',
-        'update_manager/real_device_policy_provider.cc',
-        'update_manager/real_random_provider.cc',
-        'update_manager/real_shill_provider.cc',
-        'update_manager/real_system_provider.cc',
-        'update_manager/real_time_provider.cc',
-        'update_manager/real_updater_provider.cc',
-        'update_manager/staging_utils.cc',
-        'update_manager/state_factory.cc',
-        'update_manager/update_manager.cc',
-        'update_manager/update_time_restrictions_policy_impl.cc',
-        'update_manager/weekly_time.cc',
-        'update_status_utils.cc',
-      ],
-      'conditions': [
-        ['USE_chrome_network_proxy == 1', {
-          'sources': [
-            'chrome_browser_proxy_resolver.cc',
-          ],
-        }],
-        ['USE_chrome_kiosk_app == 1', {
-          'dependencies': [
-            'update_engine-dbus-kiosk-app-client',
-          ],
-        }],
-        ['USE_dlc == 1', {
-          'sources': [
-            'dlcservice_chromeos.cc',
-          ],
-        }],
-        ['USE_dlc == 0', {
-          'sources': [
-            'common/dlcservice_stub.cc',
-          ],
-        }],
-      ],
-    },
-    # update_engine daemon.
-    {
-      'target_name': 'update_engine',
-      'type': 'executable',
-      'dependencies': [
-        'libupdate_engine',
-      ],
-      'sources': [
-        'main.cc',
-      ],
-    },
-    # update_engine client library.
-    {
-      'target_name': 'libupdate_engine_client',
-      'type': 'static_library',
-      'variables': {
-        'deps': [
-          'dbus-1',
-          'libupdate_engine-client',
-        ],
-      },
-      'sources': [
-        'client_library/client.cc',
-        'client_library/client_dbus.cc',
-        'update_status_utils.cc',
-      ],
-      'include_dirs': [
-        'client_library/include',
-      ],
-    },
-    # update_engine console client.
-    {
-      'target_name': 'update_engine_client',
-      'type': 'executable',
-      'dependencies': [
-        'libupdate_engine_client',
-      ],
-      'sources': [
-        'common/error_code_utils.cc',
-        'omaha_utils.cc',
-        'update_engine_client.cc',
-      ],
-    },
-    # server-side code. This is used for delta_generator and unittests but not
-    # for any client code.
-    {
-      'target_name': 'libpayload_generator',
-      'type': 'static_library',
-      'dependencies': [
-        'libpayload_consumer',
-        'update_metadata-protos',
-      ],
-      'variables': {
-        'exported_deps': [
-          'ext2fs',
-          'libbsdiff',
-          'libpuffdiff',
-          'liblzma',
-        ],
-        'deps': ['<@(exported_deps)'],
-      },
-      'all_dependent_settings': {
-        'variables': {
-          'deps': [
-            '<@(exported_deps)',
-          ],
-        },
-      },
-      'link_settings': {
-        'variables': {
-          'deps': [
-            '<@(exported_deps)',
-          ],
-        },
-      },
-      'sources': [
-        'common/file_fetcher.cc',
-        'payload_generator/ab_generator.cc',
-        'payload_generator/annotated_operation.cc',
-        'payload_generator/blob_file_writer.cc',
-        'payload_generator/block_mapping.cc',
-        'payload_generator/boot_img_filesystem.cc',
-        'payload_generator/bzip.cc',
-        'payload_generator/cycle_breaker.cc',
-        'payload_generator/deflate_utils.cc',
-        'payload_generator/delta_diff_generator.cc',
-        'payload_generator/delta_diff_utils.cc',
-        'payload_generator/ext2_filesystem.cc',
-        'payload_generator/extent_ranges.cc',
-        'payload_generator/extent_utils.cc',
-        'payload_generator/full_update_generator.cc',
-        'payload_generator/graph_types.cc',
-        'payload_generator/graph_utils.cc',
-        'payload_generator/inplace_generator.cc',
-        'payload_generator/mapfile_filesystem.cc',
-        'payload_generator/payload_file.cc',
-        'payload_generator/payload_generation_config_chromeos.cc',
-        'payload_generator/payload_generation_config.cc',
-        'payload_generator/payload_signer.cc',
-        'payload_generator/raw_filesystem.cc',
-        'payload_generator/squashfs_filesystem.cc',
-        'payload_generator/tarjan.cc',
-        'payload_generator/topological_sort.cc',
-        'payload_generator/xz_chromeos.cc',
-      ],
-    },
-    # server-side delta generator.
-    {
-      'target_name': 'delta_generator',
-      'type': 'executable',
-      'dependencies': [
-        'libpayload_consumer',
-        'libpayload_generator',
-      ],
-      'link_settings': {
-        'ldflags!': [
-          '-pie',
-        ],
-      },
-      'sources': [
-        'payload_generator/generate_delta_main.cc',
-      ],
-    },
-    {
-      'target_name': 'update_engine_test_libs',
-      'type': 'static_library',
-      'variables': {
-        'deps': [
-          'libshill-client-test',
-        ],
-      },
-      'dependencies': [
-        'libupdate_engine',
-      ],
-      'includes': [
-        '../../../platform2/common-mk/common_test.gypi',
-      ],
-      'sources': [
-        'common/fake_prefs.cc',
-        'common/mock_http_fetcher.cc',
-        'common/test_utils.cc',
-        'fake_shill_proxy.cc',
-        'fake_system_state.cc',
-        'payload_consumer/fake_file_descriptor.cc',
-        'payload_generator/fake_filesystem.cc',
-        'update_manager/umtest_utils.cc',
-      ],
-    },
-  ],
-  'conditions': [
-    ['USE_test == 1', {
-      'targets': [
-        # Public keys used for unit testing.
-        {
-          'target_name': 'update_engine-testkeys',
-          'type': 'none',
-          'variables': {
-            'openssl_pem_in_dir': '.',
-            'openssl_pem_out_dir': 'include/update_engine',
-          },
-          'sources': [
-            'unittest_key.pem',
-            'unittest_key2.pem',
-            'unittest_key_RSA4096.pem',
-          ],
-          'includes': ['../../../platform2/common-mk/openssl_pem.gypi'],
-        },
-        # Unpacks sample images used for testing.
-        {
-          'target_name': 'update_engine-test_images',
-          'type': 'none',
-          'variables': {
-            'image_out_dir': '.',
-          },
-          'sources': [
-            'sample_images/sample_images.tar.bz2',
-          ],
-          'includes': ['tar_bunzip2.gypi'],
-        },
-        # Test HTTP Server.
-        {
-          'target_name': 'test_http_server',
-          'type': 'executable',
-          'sources': [
-            'common/http_common.cc',
-            'test_http_server.cc',
-          ],
-        },
-        # Test subprocess helper.
-        {
-          'target_name': 'test_subprocess',
-          'type': 'executable',
-          'sources': [
-            'test_subprocess.cc',
-          ],
-        },
-        # Main unittest file.
-        {
-          'target_name': 'update_engine_unittests',
-          'type': 'executable',
-          'variables': {
-            'deps': [
-              'libbrillo-test-<(libbase_ver)',
-              'libchrome-test-<(libbase_ver)',
-              'libdebugd-client-test',
-              'libpower_manager-client-test',
-              'libsession_manager-client-test',
-              'libshill-client-test',
-            ],
-          },
-          'dependencies': [
-            'libupdate_engine',
-            'libpayload_generator',
-            'update_engine_test_libs',
-          ],
-          'sources': [
-            'boot_control_chromeos_unittest.cc',
-            'certificate_checker_unittest.cc',
-            'common/action_pipe_unittest.cc',
-            'common/action_processor_unittest.cc',
-            'common/action_unittest.cc',
-            'common/cpu_limiter_unittest.cc',
-            'common/hash_calculator_unittest.cc',
-            'common/http_fetcher_unittest.cc',
-            'common/hwid_override_unittest.cc',
-            'common/prefs_unittest.cc',
-            'common/proxy_resolver_unittest.cc',
-            'common/subprocess_unittest.cc',
-            'common/terminator_unittest.cc',
-            'common/utils_unittest.cc',
-            'common_service_unittest.cc',
-            'connection_manager_unittest.cc',
-            'hardware_chromeos_unittest.cc',
-            'image_properties_chromeos_unittest.cc',
-            'metrics_reporter_omaha_unittest.cc',
-            'metrics_utils_unittest.cc',
-            'omaha_request_action_unittest.cc',
-            'omaha_request_params_unittest.cc',
-            'omaha_response_handler_action_unittest.cc',
-            'omaha_utils_unittest.cc',
-            'p2p_manager_unittest.cc',
-            'payload_consumer/bzip_extent_writer_unittest.cc',
-            'payload_consumer/cached_file_descriptor_unittest.cc',
-            'payload_consumer/delta_performer_integration_test.cc',
-            'payload_consumer/delta_performer_unittest.cc',
-            'payload_consumer/download_action_unittest.cc',
-            'payload_consumer/extent_reader_unittest.cc',
-            'payload_consumer/extent_writer_unittest.cc',
-            'payload_consumer/file_descriptor_utils_unittest.cc',
-            'payload_consumer/file_writer_unittest.cc',
-            'payload_consumer/filesystem_verifier_action_unittest.cc',
-            'payload_consumer/postinstall_runner_action_unittest.cc',
-            'payload_consumer/xz_extent_writer_unittest.cc',
-            'payload_generator/ab_generator_unittest.cc',
-            'payload_generator/blob_file_writer_unittest.cc',
-            'payload_generator/block_mapping_unittest.cc',
-            'payload_generator/boot_img_filesystem_unittest.cc',
-            'payload_generator/cycle_breaker_unittest.cc',
-            'payload_generator/deflate_utils_unittest.cc',
-            'payload_generator/delta_diff_utils_unittest.cc',
-            'payload_generator/ext2_filesystem_unittest.cc',
-            'payload_generator/extent_ranges_unittest.cc',
-            'payload_generator/extent_utils_unittest.cc',
-            'payload_generator/full_update_generator_unittest.cc',
-            'payload_generator/graph_utils_unittest.cc',
-            'payload_generator/inplace_generator_unittest.cc',
-            'payload_generator/mapfile_filesystem_unittest.cc',
-            'payload_generator/payload_file_unittest.cc',
-            'payload_generator/payload_generation_config_unittest.cc',
-            'payload_generator/payload_signer_unittest.cc',
-            'payload_generator/squashfs_filesystem_unittest.cc',
-            'payload_generator/tarjan_unittest.cc',
-            'payload_generator/topological_sort_unittest.cc',
-            'payload_generator/zip_unittest.cc',
-            'payload_state_unittest.cc',
-            'testrunner.cc',
-            'update_attempter_unittest.cc',
-            'update_boot_flags_action_unittest.cc',
-            'update_manager/boxed_value_unittest.cc',
-            'update_manager/chromeos_policy_unittest.cc',
-            'update_manager/evaluation_context_unittest.cc',
-            'update_manager/generic_variables_unittest.cc',
-            'update_manager/prng_unittest.cc',
-            'update_manager/real_device_policy_provider_unittest.cc',
-            'update_manager/real_random_provider_unittest.cc',
-            'update_manager/real_shill_provider_unittest.cc',
-            'update_manager/real_system_provider_unittest.cc',
-            'update_manager/real_time_provider_unittest.cc',
-            'update_manager/real_updater_provider_unittest.cc',
-            'update_manager/staging_utils_unittest.cc',
-            'update_manager/update_manager_unittest.cc',
-            'update_manager/update_time_restrictions_policy_impl_unittest.cc',
-            'update_manager/variable_unittest.cc',
-            'update_manager/weekly_time_unittest.cc',
-          ],
-        },
-      ],
-    }],
-    # Fuzzer target.
-    ['USE_fuzzer == 1', {
-      'targets': [
-        {
-          'target_name': 'update_engine_omaha_request_action_fuzzer',
-          'type': 'executable',
-          'variables': {
-            'deps': [
-              'libbrillo-test-<(libbase_ver)',
-              'libchrome-test-<(libbase_ver)',
-            ],
-          },
-          'includes': [
-            '../../../platform2/common-mk/common_fuzzer.gypi',
-          ],
-          'dependencies': [
-            'libupdate_engine',
-            'update_engine_test_libs',
-          ],
-          'sources': [
-            'omaha_request_action_fuzzer.cc',
-          ],
-        },
-      ],
-    }],
-  ],
-}
diff --git a/update_manager/android_things_policy.cc b/update_manager/android_things_policy.cc
deleted file mode 100644
index 4afcf12..0000000
--- a/update_manager/android_things_policy.cc
+++ /dev/null
@@ -1,182 +0,0 @@
-//
-// Copyright (C) 2017 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/update_manager/android_things_policy.h"
-
-#include <string>
-#include <vector>
-
-#include <base/logging.h>
-#include <base/time/time.h>
-
-#include "update_engine/update_manager/api_restricted_downloads_policy_impl.h"
-#include "update_engine/update_manager/enough_slots_ab_updates_policy_impl.h"
-#include "update_engine/update_manager/interactive_update_policy_impl.h"
-#include "update_engine/update_manager/official_build_check_policy_impl.h"
-
-using base::Time;
-using chromeos_update_engine::ErrorCode;
-using std::string;
-using std::vector;
-
-namespace chromeos_update_manager {
-
-const NextUpdateCheckPolicyConstants
-    AndroidThingsPolicy::kNextUpdateCheckPolicyConstants = {
-        .timeout_initial_interval = 7 * 60,
-        .timeout_periodic_interval = 5 * 60 * 60,
-        .timeout_max_backoff_interval = 26 * 60 * 60,
-        .timeout_regular_fuzz = 10 * 60,
-        .attempt_backoff_max_interval_in_days = 16,
-        .attempt_backoff_fuzz_in_hours = 12,
-};
-
-EvalStatus AndroidThingsPolicy::UpdateCheckAllowed(
-    EvaluationContext* ec,
-    State* state,
-    string* error,
-    UpdateCheckParams* result) const {
-  // Set the default return values.
-  result->updates_enabled = true;
-  result->target_channel.clear();
-  result->target_version_prefix.clear();
-  result->rollback_allowed = false;
-  result->rollback_allowed_milestones = -1;
-  result->interactive = false;
-
-  // Build a list of policies to consult.  Note that each policy may modify the
-  // result structure, even if it signals kContinue.
-  EnoughSlotsAbUpdatesPolicyImpl enough_slots_ab_updates_policy;
-  OnlyUpdateOfficialBuildsPolicyImpl only_update_official_builds_policy;
-  InteractiveUpdatePolicyImpl interactive_update_policy;
-  NextUpdateCheckTimePolicyImpl next_update_check_time_policy(
-      kNextUpdateCheckPolicyConstants);
-
-  vector<Policy const*> policies_to_consult = {
-      // Do not perform any updates if there are not enough slots to do
-      // A/B updates
-      &enough_slots_ab_updates_policy,
-
-      // Check to see if an interactive update was requested.
-      &interactive_update_policy,
-
-      // Unofficial builds should not perform periodic update checks.
-      &only_update_official_builds_policy,
-
-      // Ensure that periodic update checks are timed properly.
-      &next_update_check_time_policy,
-  };
-
-  // Now that the list of policy implementations, and the order to consult them,
-  // as been setup, do that.  If none of the policies make a definitive
-  // decisions about whether or not to check for updates, then allow the update
-  // check to happen.
-  EvalStatus status = ConsultPolicies(policies_to_consult,
-                                      &Policy::UpdateCheckAllowed,
-                                      ec,
-                                      state,
-                                      error,
-                                      result);
-  if (status != EvalStatus::kContinue) {
-    return status;
-  } else {
-    // It is time to check for an update.
-    LOG(INFO) << "Allowing update check.";
-    return EvalStatus::kSucceeded;
-  }
-}
-
-// Uses the |UpdateRestrictions| to determine if the download and apply can
-// occur at this time.
-EvalStatus AndroidThingsPolicy::UpdateCanBeApplied(
-    EvaluationContext* ec,
-    State* state,
-    string* error,
-    ErrorCode* result,
-    chromeos_update_engine::InstallPlan* install_plan) const {
-  // Build a list of policies to consult.  Note that each policy may modify the
-  // result structure, even if it signals kContinue.
-  ApiRestrictedDownloadsPolicyImpl api_restricted_downloads_policy;
-
-  vector<Policy const*> policies_to_consult = {
-      // Do not apply the update if all updates are restricted by the API.
-      &api_restricted_downloads_policy,
-  };
-
-  // Now that the list of policy implementations, and the order to consult them,
-  // as been setup, do that.  If none of the policies make a definitive
-  // decisions about whether or not to check for updates, then allow the update
-  // check to happen.
-  EvalStatus status = ConsultPolicies(policies_to_consult,
-                                      &Policy::UpdateCanBeApplied,
-                                      ec,
-                                      state,
-                                      error,
-                                      result,
-                                      install_plan);
-  if (EvalStatus::kContinue != status) {
-    return status;
-  } else {
-    // The update can proceed.
-    LOG(INFO) << "Allowing update to be applied.";
-    *result = ErrorCode::kSuccess;
-    return EvalStatus::kSucceeded;
-  }
-}
-
-// Always returns |EvalStatus::kSucceeded|
-EvalStatus AndroidThingsPolicy::UpdateCanStart(EvaluationContext* ec,
-                                               State* state,
-                                               string* error,
-                                               UpdateDownloadParams* result,
-                                               UpdateState update_state) const {
-  // Update is good to go.
-  result->update_can_start = true;
-  return EvalStatus::kSucceeded;
-}
-
-// Always returns |EvalStatus::kSucceeded|
-EvalStatus AndroidThingsPolicy::UpdateDownloadAllowed(EvaluationContext* ec,
-                                                      State* state,
-                                                      string* error,
-                                                      bool* result) const {
-  // By default, we allow updates.
-  *result = true;
-  return EvalStatus::kSucceeded;
-}
-
-// P2P is always disabled.  Returns |result|==|false| and
-// |EvalStatus::kSucceeded|
-EvalStatus AndroidThingsPolicy::P2PEnabled(EvaluationContext* ec,
-                                           State* state,
-                                           string* error,
-                                           bool* result) const {
-  *result = false;
-  return EvalStatus::kSucceeded;
-}
-
-// This will return immediately with |EvalStatus::kSucceeded| and set
-// |result|==|false|
-EvalStatus AndroidThingsPolicy::P2PEnabledChanged(EvaluationContext* ec,
-                                                  State* state,
-                                                  string* error,
-                                                  bool* result,
-                                                  bool prev_result) const {
-  *result = false;
-  return EvalStatus::kSucceeded;
-}
-
-}  // namespace chromeos_update_manager
diff --git a/update_manager/android_things_policy.h b/update_manager/android_things_policy.h
deleted file mode 100644
index 9fd8bc4..0000000
--- a/update_manager/android_things_policy.h
+++ /dev/null
@@ -1,92 +0,0 @@
-//
-// Copyright (C) 2017 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_UPDATE_MANAGER_ANDROID_THINGS_POLICY_H_
-#define UPDATE_ENGINE_UPDATE_MANAGER_ANDROID_THINGS_POLICY_H_
-
-#include <string>
-
-#include "update_engine/update_manager/next_update_check_policy_impl.h"
-#include "update_engine/update_manager/policy_utils.h"
-
-namespace chromeos_update_manager {
-
-// AndroidThingsPolicy implements the policy-related logic used in
-// AndroidThings.
-class AndroidThingsPolicy : public Policy {
- public:
-  AndroidThingsPolicy() = default;
-  ~AndroidThingsPolicy() override = default;
-
-  // Policy overrides.
-  EvalStatus UpdateCheckAllowed(EvaluationContext* ec,
-                                State* state,
-                                std::string* error,
-                                UpdateCheckParams* result) const override;
-
-  // Uses the |UpdateRestrictions| to determine if the download and apply can
-  // occur at this time.
-  EvalStatus UpdateCanBeApplied(
-      EvaluationContext* ec,
-      State* state,
-      std::string* error,
-      chromeos_update_engine::ErrorCode* result,
-      chromeos_update_engine::InstallPlan* install_plan) const override;
-
-  // Always returns |EvalStatus::kSucceeded|
-  EvalStatus UpdateCanStart(EvaluationContext* ec,
-                            State* state,
-                            std::string* error,
-                            UpdateDownloadParams* result,
-                            UpdateState update_state) const override;
-
-  // Always returns |EvalStatus::kSucceeded|
-  EvalStatus UpdateDownloadAllowed(EvaluationContext* ec,
-                                   State* state,
-                                   std::string* error,
-                                   bool* result) const override;
-
-  // P2P is always disabled.  Returns |result|==|false| and
-  // |EvalStatus::kSucceeded|
-  EvalStatus P2PEnabled(EvaluationContext* ec,
-                        State* state,
-                        std::string* error,
-                        bool* result) const override;
-
-  // This will return immediately with |EvalStatus::kSucceeded| and set
-  // |result|==|false|
-  EvalStatus P2PEnabledChanged(EvaluationContext* ec,
-                               State* state,
-                               std::string* error,
-                               bool* result,
-                               bool prev_result) const override;
-
- protected:
-  // Policy override.
-  std::string PolicyName() const override { return "AndroidThingsPolicy"; }
-
- private:
-  friend class UmAndroidThingsPolicyTest;
-  FRIEND_TEST(UmAndroidThingsPolicyTest, UpdateCheckAllowedWaitsForTheTimeout);
-
-  static const NextUpdateCheckPolicyConstants kNextUpdateCheckPolicyConstants;
-
-  DISALLOW_COPY_AND_ASSIGN(AndroidThingsPolicy);
-};
-
-}  // namespace chromeos_update_manager
-
-#endif  // UPDATE_ENGINE_UPDATE_MANAGER_ANDROID_THINGS_POLICY_H_
diff --git a/update_manager/android_things_policy_unittest.cc b/update_manager/android_things_policy_unittest.cc
deleted file mode 100644
index 6961efc..0000000
--- a/update_manager/android_things_policy_unittest.cc
+++ /dev/null
@@ -1,188 +0,0 @@
-//
-// Copyright (C) 2017 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/update_manager/android_things_policy.h"
-
-#include <memory>
-
-#include "update_engine/update_manager/next_update_check_policy_impl.h"
-#include "update_engine/update_manager/policy_test_utils.h"
-
-using base::Time;
-using base::TimeDelta;
-using chromeos_update_engine::ErrorCode;
-using chromeos_update_engine::InstallPlan;
-
-namespace chromeos_update_manager {
-
-class UmAndroidThingsPolicyTest : public UmPolicyTestBase {
- protected:
-  UmAndroidThingsPolicyTest() {
-    policy_ = std::make_unique<AndroidThingsPolicy>();
-  }
-
-  void SetUpDefaultState() override {
-    UmPolicyTestBase::SetUpDefaultState();
-
-    // For the purpose of the tests, this is an official build
-    fake_state_.system_provider()->var_is_official_build()->reset(
-        new bool(true));
-    // NOLINTNEXTLINE(readability/casting)
-    fake_state_.system_provider()->var_num_slots()->reset(new unsigned int(2));
-  }
-
-  // Configures the policy to return a desired value from UpdateCheckAllowed by
-  // faking the current wall clock time as needed. Restores the default state.
-  // This is used when testing policies that depend on this one.
-  virtual void SetUpdateCheckAllowed(bool allow_check) {
-    Time next_update_check;
-    CallMethodWithContext(&NextUpdateCheckTimePolicyImpl::NextUpdateCheckTime,
-                          &next_update_check,
-                          AndroidThingsPolicy::kNextUpdateCheckPolicyConstants);
-    SetUpDefaultState();
-    Time curr_time = next_update_check;
-    if (allow_check)
-      curr_time += TimeDelta::FromSeconds(1);
-    else
-      curr_time -= TimeDelta::FromSeconds(1);
-    fake_clock_.SetWallclockTime(curr_time);
-  }
-};
-
-TEST_F(UmAndroidThingsPolicyTest, UpdateCheckAllowedWaitsForTheTimeout) {
-  // We get the next update_check timestamp from the policy's private method
-  // and then we check the public method respects that value on the normal
-  // case.
-  Time next_update_check;
-  Time last_checked_time =
-      fake_clock_.GetWallclockTime() + TimeDelta::FromMinutes(1234);
-
-  LOG(INFO) << "last_checked_time: " << last_checked_time;
-  fake_state_.updater_provider()->var_last_checked_time()->reset(
-      new Time(last_checked_time));
-  CallMethodWithContext(&NextUpdateCheckTimePolicyImpl::NextUpdateCheckTime,
-                        &next_update_check,
-                        AndroidThingsPolicy::kNextUpdateCheckPolicyConstants);
-  LOG(INFO) << "Next check allowed at: " << next_update_check;
-
-  // Check that the policy blocks until the next_update_check is reached.
-  SetUpDefaultClock();
-  SetUpDefaultState();
-  fake_state_.updater_provider()->var_last_checked_time()->reset(
-      new Time(last_checked_time));
-  fake_clock_.SetWallclockTime(next_update_check - TimeDelta::FromSeconds(1));
-
-  UpdateCheckParams result;
-  ExpectPolicyStatus(
-      EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result);
-
-  SetUpDefaultClock();
-  SetUpDefaultState();
-  fake_state_.updater_provider()->var_last_checked_time()->reset(
-      new Time(last_checked_time));
-  fake_clock_.SetWallclockTime(next_update_check + TimeDelta::FromSeconds(1));
-  ExpectPolicyStatus(
-      EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
-  EXPECT_TRUE(result.updates_enabled);
-  EXPECT_FALSE(result.interactive);
-}
-
-TEST_F(UmAndroidThingsPolicyTest,
-       UpdateCheckAllowedUpdatesDisabledForUnofficialBuilds) {
-  // UpdateCheckAllowed should return kAskMeAgainLater if this is an unofficial
-  // build; we don't want periodic update checks on developer images.
-
-  fake_state_.system_provider()->var_is_official_build()->reset(
-      new bool(false));
-
-  UpdateCheckParams result;
-  ExpectPolicyStatus(
-      EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result);
-}
-
-TEST_F(UmAndroidThingsPolicyTest,
-       UpdateCheckAllowedUpdatesDisabledWhenNotEnoughSlotsAbUpdates) {
-  // UpdateCheckAllowed should return false (kSucceeded) if the image booted
-  // without enough slots to do A/B updates.
-
-  // NOLINTNEXTLINE(readability/casting)
-  fake_state_.system_provider()->var_num_slots()->reset(new unsigned int(1));
-
-  UpdateCheckParams result;
-  ExpectPolicyStatus(
-      EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
-  EXPECT_FALSE(result.updates_enabled);
-}
-
-TEST_F(UmAndroidThingsPolicyTest,
-       UpdateCheckAllowedForcedUpdateRequestedInteractive) {
-  // UpdateCheckAllowed should return true because a forced update request was
-  // signaled for an interactive update.
-
-  SetUpdateCheckAllowed(true);
-  fake_state_.updater_provider()->var_forced_update_requested()->reset(
-      new UpdateRequestStatus(UpdateRequestStatus::kInteractive));
-
-  UpdateCheckParams result;
-  ExpectPolicyStatus(
-      EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
-  EXPECT_TRUE(result.updates_enabled);
-  EXPECT_TRUE(result.interactive);
-}
-
-TEST_F(UmAndroidThingsPolicyTest,
-       UpdateCheckAllowedForcedUpdateRequestedPeriodic) {
-  // UpdateCheckAllowed should return true because a forced update request was
-  // signaled for a periodic check.
-
-  SetUpdateCheckAllowed(true);
-  fake_state_.updater_provider()->var_forced_update_requested()->reset(
-      new UpdateRequestStatus(UpdateRequestStatus::kPeriodic));
-
-  UpdateCheckParams result;
-  ExpectPolicyStatus(
-      EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
-  EXPECT_TRUE(result.updates_enabled);
-  EXPECT_FALSE(result.interactive);
-}
-
-TEST_F(UmAndroidThingsPolicyTest, UpdateCanBeAppliedOk) {
-  // UpdateCanBeApplied should return kSucceeded in the base case
-
-  InstallPlan plan;
-  ErrorCode result;
-  ExpectPolicyStatus(
-      EvalStatus::kSucceeded, &Policy::UpdateCanBeApplied, &result, &plan);
-
-  EXPECT_EQ(ErrorCode::kSuccess, result);
-}
-
-TEST_F(UmAndroidThingsPolicyTest, UpdateCanBeAppliedRestricted) {
-  // UpdateCanBeApplied should return kOmahaUpdateDeferredPerPolicy in
-  // when the restricted flag is set in the Updater.
-
-  fake_state_.updater_provider()->var_update_restrictions()->reset(
-      new UpdateRestrictions(UpdateRestrictions::kRestrictDownloading));
-
-  InstallPlan plan;
-  ErrorCode result;
-  ExpectPolicyStatus(
-      EvalStatus::kSucceeded, &Policy::UpdateCanBeApplied, &result, &plan);
-
-  EXPECT_EQ(ErrorCode::kOmahaUpdateDeferredPerPolicy, result);
-}
-
-}  // namespace chromeos_update_manager
diff --git a/update_manager/boxed_value.cc b/update_manager/boxed_value.cc
index cee1ece..907eb95 100644
--- a/update_manager/boxed_value.cc
+++ b/update_manager/boxed_value.cc
@@ -23,9 +23,10 @@
 
 #include <base/strings/string_number_conversions.h>
 #include <base/time/time.h>
+#include <base/version.h>
 
+#include "update_engine/common/connection_utils.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/connection_utils.h"
 #include "update_engine/update_manager/rollback_prefs.h"
 #include "update_engine/update_manager/shill_provider.h"
 #include "update_engine/update_manager/updater_provider.h"
@@ -51,41 +52,25 @@
 template <>
 string BoxedValue::ValuePrinter<int>(const void* value) {
   const int* val = reinterpret_cast<const int*>(value);
-#if BASE_VER < 576279
-  return base::IntToString(*val);
-#else
   return base::NumberToString(*val);
-#endif
 }
 
 template <>
 string BoxedValue::ValuePrinter<unsigned int>(const void* value) {
   const unsigned int* val = reinterpret_cast<const unsigned int*>(value);
-#if BASE_VER < 576279
-  return base::UintToString(*val);
-#else
   return base::NumberToString(*val);
-#endif
 }
 
 template <>
 string BoxedValue::ValuePrinter<int64_t>(const void* value) {
   const int64_t* val = reinterpret_cast<const int64_t*>(value);
-#if BASE_VER < 576279
-  return base::Int64ToString(*val);
-#else
   return base::NumberToString(*val);
-#endif
 }
 
 template <>
 string BoxedValue::ValuePrinter<uint64_t>(const void* value) {
   const uint64_t* val = reinterpret_cast<const uint64_t*>(value);
-#if BASE_VER < 576279
-  return base::Uint64ToString(*val);
-#else
   return base::NumberToString(*val);
-#endif
 }
 
 template <>
@@ -97,11 +82,7 @@
 template <>
 string BoxedValue::ValuePrinter<double>(const void* value) {
   const double* val = reinterpret_cast<const double*>(value);
-#if BASE_VER < 576279
-  return base::DoubleToString(*val);
-#else
   return base::NumberToString(*val);
-#endif
 }
 
 template <>
@@ -167,8 +148,6 @@
       return "Rollback and powerwash";
     case RollbackToTargetVersion::kRollbackAndRestoreIfPossible:
       return "Rollback and restore if possible";
-    case RollbackToTargetVersion::kRollbackOnlyIfRestorePossible:
-      return "Rollback only if restore is possible";
     case RollbackToTargetVersion::kMaxValue:
       NOTREACHED();
       return "Max value";
@@ -199,6 +178,8 @@
       return "Reporting Error Event";
     case Stage::kAttemptingRollback:
       return "Attempting Rollback";
+    case Stage::kCleanupPreviousUpdate:
+      return "Cleanup Previous Update";
   }
   NOTREACHED();
   return "Unknown";
@@ -254,4 +235,30 @@
   return retval;
 }
 
+template <>
+string BoxedValue::ValuePrinter<ChannelDowngradeBehavior>(const void* value) {
+  const ChannelDowngradeBehavior* val =
+      reinterpret_cast<const ChannelDowngradeBehavior*>(value);
+  switch (*val) {
+    case ChannelDowngradeBehavior::kUnspecified:
+      return "Unspecified";
+    case ChannelDowngradeBehavior::kWaitForVersionToCatchUp:
+      return "Wait for the target channel to catch up";
+    case ChannelDowngradeBehavior::kRollback:
+      return "Roll back and powerwash on channel downgrade";
+    case ChannelDowngradeBehavior::kAllowUserToConfigure:
+      return "User decides on channel downgrade behavior";
+  }
+  NOTREACHED();
+  return "Unknown";
+}
+
+template <>
+string BoxedValue::ValuePrinter<base::Version>(const void* value) {
+  const base::Version* val = reinterpret_cast<const base::Version*>(value);
+  if (val->IsValid())
+    return val->GetString();
+  return "Unknown";
+}
+
 }  // namespace chromeos_update_manager
diff --git a/update_manager/boxed_value_unittest.cc b/update_manager/boxed_value_unittest.cc
index 2fa94ec..5b87a7b 100644
--- a/update_manager/boxed_value_unittest.cc
+++ b/update_manager/boxed_value_unittest.cc
@@ -168,11 +168,6 @@
       BoxedValue(new ConnectionType(ConnectionType::kEthernet)).ToString());
   EXPECT_EQ("wifi",
             BoxedValue(new ConnectionType(ConnectionType::kWifi)).ToString());
-  EXPECT_EQ("wimax",
-            BoxedValue(new ConnectionType(ConnectionType::kWimax)).ToString());
-  EXPECT_EQ(
-      "bluetooth",
-      BoxedValue(new ConnectionType(ConnectionType::kBluetooth)).ToString());
   EXPECT_EQ(
       "cellular",
       BoxedValue(new ConnectionType(ConnectionType::kCellular)).ToString());
@@ -215,18 +210,13 @@
       BoxedValue(new RollbackToTargetVersion(
                      RollbackToTargetVersion::kRollbackAndRestoreIfPossible))
           .ToString());
-  EXPECT_EQ(
-      "Rollback only if restore is possible",
-      BoxedValue(new RollbackToTargetVersion(
-                     RollbackToTargetVersion::kRollbackOnlyIfRestorePossible))
-          .ToString());
 }
 
 TEST(UmBoxedValueTest, SetConnectionTypeToString) {
   set<ConnectionType>* set1 = new set<ConnectionType>;
-  set1->insert(ConnectionType::kWimax);
+  set1->insert(ConnectionType::kCellular);
   set1->insert(ConnectionType::kEthernet);
-  EXPECT_EQ("ethernet,wimax", BoxedValue(set1).ToString());
+  EXPECT_EQ("ethernet,cellular", BoxedValue(set1).ToString());
 
   set<ConnectionType>* set2 = new set<ConnectionType>;
   set2->insert(ConnectionType::kWifi);
diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc
index 1fa8636..24b8293 100644
--- a/update_manager/chromeos_policy.cc
+++ b/update_manager/chromeos_policy.cc
@@ -17,6 +17,7 @@
 #include "update_engine/update_manager/chromeos_policy.h"
 
 #include <algorithm>
+#include <memory>
 #include <set>
 #include <string>
 #include <vector>
@@ -48,6 +49,7 @@
 using std::min;
 using std::set;
 using std::string;
+using std::unique_ptr;
 using std::vector;
 
 namespace {
@@ -154,6 +156,7 @@
     case ErrorCode::kUnresolvedHostRecovered:
     case ErrorCode::kNotEnoughSpace:
     case ErrorCode::kDeviceCorrupted:
+    case ErrorCode::kPackageExcludedFromUpdate:
       LOG(INFO) << "Not changing URL index or failure count due to error "
                 << chromeos_update_engine::utils::ErrorCodeToString(err_code)
                 << " (" << static_cast<int>(err_code) << ")";
@@ -190,6 +193,10 @@
 
 namespace chromeos_update_manager {
 
+unique_ptr<Policy> GetSystemPolicy() {
+  return std::make_unique<ChromeOSPolicy>();
+}
+
 const NextUpdateCheckPolicyConstants
     ChromeOSPolicy::kNextUpdateCheckPolicyConstants = {
         .timeout_initial_interval = 7 * 60,
@@ -210,10 +217,13 @@
   // Set the default return values.
   result->updates_enabled = true;
   result->target_channel.clear();
+  result->lts_tag.clear();
   result->target_version_prefix.clear();
   result->rollback_allowed = false;
   result->rollback_allowed_milestones = -1;
+  result->rollback_on_channel_downgrade = false;
   result->interactive = false;
+  result->quick_fix_build_token.clear();
 
   EnoughSlotsAbUpdatesPolicyImpl enough_slots_ab_updates_policy;
   EnterpriseDevicePolicyImpl enterprise_device_policy;
@@ -456,92 +466,6 @@
   return EvalStatus::kSucceeded;
 }
 
-// TODO(garnold) Logic in this method is based on
-// ConnectionManager::IsUpdateAllowedOver(); be sure to deprecate the latter.
-//
-// TODO(garnold) The current logic generally treats the list of allowed
-// connections coming from the device policy as a whitelist, meaning that it
-// can only be used for enabling connections, but not disable them. Further,
-// certain connection types (like Bluetooth) cannot be enabled even by policy.
-// In effect, the only thing that device policy can change is to enable
-// updates over a cellular network (disabled by default). We may want to
-// revisit this semantics, allowing greater flexibility in defining specific
-// permissions over all types of networks.
-EvalStatus ChromeOSPolicy::UpdateDownloadAllowed(EvaluationContext* ec,
-                                                 State* state,
-                                                 string* error,
-                                                 bool* result) const {
-  // Get the current connection type.
-  ShillProvider* const shill_provider = state->shill_provider();
-  const ConnectionType* conn_type_p =
-      ec->GetValue(shill_provider->var_conn_type());
-  POLICY_CHECK_VALUE_AND_FAIL(conn_type_p, error);
-  ConnectionType conn_type = *conn_type_p;
-
-  // If we're tethering, treat it as a cellular connection.
-  if (conn_type != ConnectionType::kCellular) {
-    const ConnectionTethering* conn_tethering_p =
-        ec->GetValue(shill_provider->var_conn_tethering());
-    POLICY_CHECK_VALUE_AND_FAIL(conn_tethering_p, error);
-    if (*conn_tethering_p == ConnectionTethering::kConfirmed)
-      conn_type = ConnectionType::kCellular;
-  }
-
-  // By default, we allow updates for all connection types, with exceptions as
-  // noted below. This also determines whether a device policy can override the
-  // default.
-  *result = true;
-  bool device_policy_can_override = false;
-  switch (conn_type) {
-    case ConnectionType::kBluetooth:
-      *result = false;
-      break;
-
-    case ConnectionType::kCellular:
-      *result = false;
-      device_policy_can_override = true;
-      break;
-
-    case ConnectionType::kUnknown:
-      if (error)
-        *error = "Unknown connection type";
-      return EvalStatus::kFailed;
-
-    default:
-      break;  // Nothing to do.
-  }
-
-  // If update is allowed, we're done.
-  if (*result)
-    return EvalStatus::kSucceeded;
-
-  // Check whether the device policy specifically allows this connection.
-  if (device_policy_can_override) {
-    DevicePolicyProvider* const dp_provider = state->device_policy_provider();
-    const bool* device_policy_is_loaded_p =
-        ec->GetValue(dp_provider->var_device_policy_is_loaded());
-    if (device_policy_is_loaded_p && *device_policy_is_loaded_p) {
-      const set<ConnectionType>* allowed_conn_types_p =
-          ec->GetValue(dp_provider->var_allowed_connection_types_for_update());
-      if (allowed_conn_types_p) {
-        if (allowed_conn_types_p->count(conn_type)) {
-          *result = true;
-          return EvalStatus::kSucceeded;
-        }
-      } else if (conn_type == ConnectionType::kCellular) {
-        // Local user settings can allow updates over cellular iff a policy was
-        // loaded but no allowed connections were specified in it.
-        const bool* update_over_cellular_allowed_p =
-            ec->GetValue(state->updater_provider()->var_cellular_enabled());
-        if (update_over_cellular_allowed_p && *update_over_cellular_allowed_p)
-          *result = true;
-      }
-    }
-  }
-
-  return (*result ? EvalStatus::kSucceeded : EvalStatus::kAskMeAgainLater);
-}
-
 EvalStatus ChromeOSPolicy::P2PEnabled(EvaluationContext* ec,
                                       State* state,
                                       string* error,
@@ -560,8 +484,9 @@
     if (policy_au_p2p_enabled_p) {
       enabled = *policy_au_p2p_enabled_p;
     } else {
-      const string* policy_owner_p = ec->GetValue(dp_provider->var_owner());
-      if (!policy_owner_p || policy_owner_p->empty())
+      const bool* policy_has_owner_p =
+          ec->GetValue(dp_provider->var_has_owner());
+      if (!policy_has_owner_p || !*policy_has_owner_p)
         enabled = true;
     }
   }
@@ -595,7 +520,6 @@
     string* error,
     UpdateBackoffAndDownloadUrlResult* result,
     const UpdateState& update_state) const {
-  // Sanity checks.
   DCHECK_GE(update_state.download_errors_max, 0);
 
   // Set default result values.
@@ -667,7 +591,7 @@
   Time prev_err_time;
   bool is_first = true;
   for (const auto& err_tuple : update_state.download_errors) {
-    // Do some sanity checks.
+    // Do some validation checks.
     int used_url_idx = get<0>(err_tuple);
     if (is_first && url_idx >= 0 && used_url_idx != url_idx) {
       LOG(WARNING) << "First URL in error log (" << used_url_idx
diff --git a/update_manager/chromeos_policy.h b/update_manager/chromeos_policy.h
index ded5164..3c196da 100644
--- a/update_manager/chromeos_policy.h
+++ b/update_manager/chromeos_policy.h
@@ -72,11 +72,6 @@
                             UpdateDownloadParams* result,
                             UpdateState update_state) const override;
 
-  EvalStatus UpdateDownloadAllowed(EvaluationContext* ec,
-                                   State* state,
-                                   std::string* error,
-                                   bool* result) const override;
-
   EvalStatus P2PEnabled(EvaluationContext* ec,
                         State* state,
                         std::string* error,
diff --git a/update_manager/chromeos_policy_unittest.cc b/update_manager/chromeos_policy_unittest.cc
index 5341ebb..5bd416d 100644
--- a/update_manager/chromeos_policy_unittest.cc
+++ b/update_manager/chromeos_policy_unittest.cc
@@ -109,7 +109,7 @@
       curr_time += TimeDelta::FromSeconds(1);
     else
       curr_time -= TimeDelta::FromSeconds(1);
-    fake_clock_.SetWallclockTime(curr_time);
+    fake_clock_->SetWallclockTime(curr_time);
   }
 
   // Sets the policies required for a kiosk app to control Chrome OS version:
@@ -180,7 +180,7 @@
   // case.
   Time next_update_check;
   Time last_checked_time =
-      fake_clock_.GetWallclockTime() + TimeDelta::FromMinutes(1234);
+      fake_clock_->GetWallclockTime() + TimeDelta::FromMinutes(1234);
 
   fake_state_.updater_provider()->var_last_checked_time()->reset(
       new Time(last_checked_time));
@@ -195,7 +195,7 @@
   SetUpDefaultState();
   fake_state_.updater_provider()->var_last_checked_time()->reset(
       new Time(last_checked_time));
-  fake_clock_.SetWallclockTime(next_update_check - TimeDelta::FromSeconds(1));
+  fake_clock_->SetWallclockTime(next_update_check - TimeDelta::FromSeconds(1));
   ExpectPolicyStatus(
       EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result);
 
@@ -203,7 +203,7 @@
   SetUpDefaultState();
   fake_state_.updater_provider()->var_last_checked_time()->reset(
       new Time(last_checked_time));
-  fake_clock_.SetWallclockTime(next_update_check + TimeDelta::FromSeconds(1));
+  fake_clock_->SetWallclockTime(next_update_check + TimeDelta::FromSeconds(1));
   ExpectPolicyStatus(
       EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
@@ -216,7 +216,7 @@
   // Ensure that update is not allowed even if wait period is satisfied.
   Time next_update_check;
   Time last_checked_time =
-      fake_clock_.GetWallclockTime() + TimeDelta::FromMinutes(1234);
+      fake_clock_->GetWallclockTime() + TimeDelta::FromMinutes(1234);
 
   fake_state_.updater_provider()->var_last_checked_time()->reset(
       new Time(last_checked_time));
@@ -228,7 +228,7 @@
   SetUpDefaultState();
   fake_state_.updater_provider()->var_last_checked_time()->reset(
       new Time(last_checked_time));
-  fake_clock_.SetWallclockTime(next_update_check + TimeDelta::FromSeconds(1));
+  fake_clock_->SetWallclockTime(next_update_check + TimeDelta::FromSeconds(1));
   fake_state_.system_provider()->var_is_oobe_complete()->reset(new bool(false));
 
   UpdateCheckParams result;
@@ -240,7 +240,7 @@
   SetUpDefaultState();
   fake_state_.updater_provider()->var_last_checked_time()->reset(
       new Time(last_checked_time));
-  fake_clock_.SetWallclockTime(next_update_check + TimeDelta::FromSeconds(1));
+  fake_clock_->SetWallclockTime(next_update_check + TimeDelta::FromSeconds(1));
   ExpectPolicyStatus(
       EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
@@ -262,6 +262,10 @@
       new bool(false));
   fake_state_.device_policy_provider()->var_release_channel()->reset(
       new string("foo-channel"));
+  fake_state_.device_policy_provider()->var_release_lts_tag()->reset(
+      new string("foo-hint"));
+  fake_state_.device_policy_provider()->var_quick_fix_build_token()->reset(
+      new string("foo-token"));
 
   UpdateCheckParams result;
   ExpectPolicyStatus(
@@ -270,6 +274,8 @@
   EXPECT_EQ("1.2", result.target_version_prefix);
   EXPECT_EQ(5, result.rollback_allowed_milestones);
   EXPECT_EQ("foo-channel", result.target_channel);
+  EXPECT_EQ("foo-hint", result.lts_tag);
+  EXPECT_EQ("foo-token", result.quick_fix_build_token);
   EXPECT_FALSE(result.interactive);
 }
 
@@ -284,12 +290,6 @@
       true, RollbackToTargetVersion::kRollbackAndRestoreIfPossible));
 }
 
-TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackOnlyIfRestorePossible) {
-  // We're not allowed to do rollback until we support data save and restore.
-  EXPECT_FALSE(TestRollbackAllowed(
-      true, RollbackToTargetVersion::kRollbackOnlyIfRestorePossible));
-}
-
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackDisabled) {
   EXPECT_FALSE(TestRollbackAllowed(true, RollbackToTargetVersion::kDisabled));
 }
@@ -344,6 +344,26 @@
       EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result);
 }
 
+TEST_F(UmChromeOSPolicyTest, TestUpdateCheckIntervalTimeout) {
+  fake_state_.updater_provider()
+      ->var_test_update_check_interval_timeout()
+      ->reset(new int64_t(10));
+  fake_state_.system_provider()->var_is_official_build()->reset(
+      new bool(false));
+
+  // The first time, update should not be allowed.
+  UpdateCheckParams result;
+  ExpectPolicyStatus(
+      EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result);
+
+  // After moving the time forward more than the update check interval, it
+  // should now allow for update.
+  fake_clock_->SetWallclockTime(fake_clock_->GetWallclockTime() +
+                                TimeDelta::FromSeconds(11));
+  ExpectPolicyStatus(
+      EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
+}
+
 TEST_F(UmChromeOSPolicyTest,
        UpdateCheckAllowedUpdatesDisabledWhenNotEnoughSlotsAbUpdates) {
   // UpdateCheckAllowed should return false (kSucceeded) if the image booted
@@ -552,7 +572,7 @@
 
   SetUpdateCheckAllowed(false);
 
-  const Time curr_time = fake_clock_.GetWallclockTime();
+  const Time curr_time = fake_clock_->GetWallclockTime();
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(10));
   update_state.download_errors_max = 1;
   update_state.download_errors.emplace_back(
@@ -582,7 +602,7 @@
 
   SetUpdateCheckAllowed(false);
 
-  const Time curr_time = fake_clock_.GetWallclockTime();
+  const Time curr_time = fake_clock_->GetWallclockTime();
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(10));
   update_state.download_errors_max = 1;
   update_state.download_errors.emplace_back(
@@ -615,7 +635,7 @@
 
   SetUpdateCheckAllowed(false);
 
-  const Time curr_time = fake_clock_.GetWallclockTime();
+  const Time curr_time = fake_clock_->GetWallclockTime();
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(10));
   update_state.download_errors_max = 1;
   update_state.download_errors.emplace_back(
@@ -649,7 +669,7 @@
 
   SetUpdateCheckAllowed(false);
 
-  const Time curr_time = fake_clock_.GetWallclockTime();
+  const Time curr_time = fake_clock_->GetWallclockTime();
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(10));
   update_state.download_errors_max = 1;
   update_state.download_errors.emplace_back(
@@ -682,7 +702,7 @@
 
   SetUpdateCheckAllowed(false);
 
-  const Time curr_time = fake_clock_.GetWallclockTime();
+  const Time curr_time = fake_clock_->GetWallclockTime();
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(10));
   update_state.download_errors_max = 1;
   update_state.download_errors.emplace_back(
@@ -715,7 +735,7 @@
 
   SetUpdateCheckAllowed(false);
 
-  const Time curr_time = fake_clock_.GetWallclockTime();
+  const Time curr_time = fake_clock_->GetWallclockTime();
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(10));
   update_state.download_errors_max = 1;
   update_state.download_errors.emplace_back(
@@ -748,7 +768,7 @@
 
   SetUpdateCheckAllowed(false);
 
-  const Time curr_time = fake_clock_.GetWallclockTime();
+  const Time curr_time = fake_clock_->GetWallclockTime();
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(10));
   update_state.download_errors_max = 1;
   update_state.download_errors.emplace_back(
@@ -1124,7 +1144,7 @@
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromMinutes(10));
   update_state.p2p_num_attempts = 1;
   update_state.p2p_first_attempted =
-      fake_clock_.GetWallclockTime() -
+      fake_clock_->GetWallclockTime() -
       TimeDelta::FromSeconds(ChromeOSPolicy::kMaxP2PAttemptsPeriodInSeconds +
                              1);
   UpdateDownloadParams result;
@@ -1198,7 +1218,7 @@
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromMinutes(10));
   update_state.num_checks = 5;
   update_state.download_urls.emplace_back("http://another/fake/url/");
-  Time t = fake_clock_.GetWallclockTime() - TimeDelta::FromSeconds(12);
+  Time t = fake_clock_->GetWallclockTime() - TimeDelta::FromSeconds(12);
   for (int i = 0; i < 5; i++) {
     update_state.download_errors.emplace_back(
         0, ErrorCode::kDownloadTransferError, t);
@@ -1227,7 +1247,7 @@
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromMinutes(10));
   update_state.num_checks = 10;
   update_state.download_urls.emplace_back("http://another/fake/url/");
-  Time t = fake_clock_.GetWallclockTime() - TimeDelta::FromSeconds(12);
+  Time t = fake_clock_->GetWallclockTime() - TimeDelta::FromSeconds(12);
   for (int i = 0; i < 11; i++) {
     update_state.download_errors.emplace_back(
         0, ErrorCode::kDownloadTransferError, t);
@@ -1259,7 +1279,7 @@
   update_state.download_errors.emplace_back(
       0,
       ErrorCode::kPayloadHashMismatchError,
-      fake_clock_.GetWallclockTime() - TimeDelta::FromSeconds(1));
+      fake_clock_->GetWallclockTime() - TimeDelta::FromSeconds(1));
 
   // Check that the UpdateCanStart returns true.
   UpdateDownloadParams result;
@@ -1288,7 +1308,7 @@
   update_state.download_errors.emplace_back(
       1,
       ErrorCode::kPayloadHashMismatchError,
-      fake_clock_.GetWallclockTime() - TimeDelta::FromSeconds(1));
+      fake_clock_->GetWallclockTime() - TimeDelta::FromSeconds(1));
 
   // Check that the UpdateCanStart returns true.
   UpdateDownloadParams result;
@@ -1371,7 +1391,7 @@
 
   // Override specific device policy attributes.
   fake_state_.device_policy_provider()->var_au_p2p_enabled()->reset(nullptr);
-  fake_state_.device_policy_provider()->var_owner()->reset(nullptr);
+  fake_state_.device_policy_provider()->var_has_owner()->reset(new bool(false));
   fake_state_.device_policy_provider()->var_http_downloads_enabled()->reset(
       new bool(false));
 
@@ -1389,148 +1409,6 @@
   EXPECT_FALSE(result.do_increment_failures);
 }
 
-TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedEthernetDefault) {
-  // Ethernet is always allowed.
-
-  fake_state_.shill_provider()->var_conn_type()->reset(
-      new ConnectionType(ConnectionType::kEthernet));
-
-  bool result;
-  ExpectPolicyStatus(
-      EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result);
-  EXPECT_TRUE(result);
-}
-
-TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedWifiDefault) {
-  // Wifi is allowed if not tethered.
-
-  fake_state_.shill_provider()->var_conn_type()->reset(
-      new ConnectionType(ConnectionType::kWifi));
-
-  bool result;
-  ExpectPolicyStatus(
-      EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result);
-  EXPECT_TRUE(result);
-}
-
-TEST_F(UmChromeOSPolicyTest,
-       UpdateCurrentConnectionNotAllowedWifiTetheredDefault) {
-  // Tethered wifi is not allowed by default.
-
-  fake_state_.shill_provider()->var_conn_type()->reset(
-      new ConnectionType(ConnectionType::kWifi));
-  fake_state_.shill_provider()->var_conn_tethering()->reset(
-      new ConnectionTethering(ConnectionTethering::kConfirmed));
-
-  bool result;
-  ExpectPolicyStatus(
-      EvalStatus::kAskMeAgainLater, &Policy::UpdateDownloadAllowed, &result);
-}
-
-TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedWifiTetheredPolicyOverride) {
-  // Tethered wifi can be allowed by policy.
-
-  fake_state_.shill_provider()->var_conn_type()->reset(
-      new ConnectionType(ConnectionType::kWifi));
-  fake_state_.shill_provider()->var_conn_tethering()->reset(
-      new ConnectionTethering(ConnectionTethering::kConfirmed));
-  set<ConnectionType> allowed_connections;
-  allowed_connections.insert(ConnectionType::kCellular);
-  fake_state_.device_policy_provider()
-      ->var_allowed_connection_types_for_update()
-      ->reset(new set<ConnectionType>(allowed_connections));
-
-  bool result;
-  ExpectPolicyStatus(
-      EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result);
-  EXPECT_TRUE(result);
-}
-
-TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedWimaxDefault) {
-  // Wimax is always allowed.
-
-  fake_state_.shill_provider()->var_conn_type()->reset(
-      new ConnectionType(ConnectionType::kWifi));
-
-  bool result;
-  ExpectPolicyStatus(
-      EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result);
-  EXPECT_TRUE(result);
-}
-
-TEST_F(UmChromeOSPolicyTest,
-       UpdateCurrentConnectionNotAllowedBluetoothDefault) {
-  // Bluetooth is never allowed.
-
-  fake_state_.shill_provider()->var_conn_type()->reset(
-      new ConnectionType(ConnectionType::kBluetooth));
-
-  bool result;
-  ExpectPolicyStatus(
-      EvalStatus::kAskMeAgainLater, &Policy::UpdateDownloadAllowed, &result);
-}
-
-TEST_F(UmChromeOSPolicyTest,
-       UpdateCurrentConnectionNotAllowedBluetoothPolicyCannotOverride) {
-  // Bluetooth cannot be allowed even by policy.
-
-  fake_state_.shill_provider()->var_conn_type()->reset(
-      new ConnectionType(ConnectionType::kBluetooth));
-  set<ConnectionType> allowed_connections;
-  allowed_connections.insert(ConnectionType::kBluetooth);
-  fake_state_.device_policy_provider()
-      ->var_allowed_connection_types_for_update()
-      ->reset(new set<ConnectionType>(allowed_connections));
-
-  bool result;
-  ExpectPolicyStatus(
-      EvalStatus::kAskMeAgainLater, &Policy::UpdateDownloadAllowed, &result);
-}
-
-TEST_F(UmChromeOSPolicyTest, UpdateCurrentConnectionNotAllowedCellularDefault) {
-  // Cellular is not allowed by default.
-
-  fake_state_.shill_provider()->var_conn_type()->reset(
-      new ConnectionType(ConnectionType::kCellular));
-
-  bool result;
-  ExpectPolicyStatus(
-      EvalStatus::kAskMeAgainLater, &Policy::UpdateDownloadAllowed, &result);
-}
-
-TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedCellularPolicyOverride) {
-  // Update over cellular can be enabled by policy.
-
-  fake_state_.shill_provider()->var_conn_type()->reset(
-      new ConnectionType(ConnectionType::kCellular));
-  set<ConnectionType> allowed_connections;
-  allowed_connections.insert(ConnectionType::kCellular);
-  fake_state_.device_policy_provider()
-      ->var_allowed_connection_types_for_update()
-      ->reset(new set<ConnectionType>(allowed_connections));
-
-  bool result;
-  ExpectPolicyStatus(
-      EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result);
-  EXPECT_TRUE(result);
-}
-
-TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedCellularUserOverride) {
-  // Update over cellular can be enabled by user settings, but only if policy
-  // is present and does not determine allowed connections.
-
-  fake_state_.shill_provider()->var_conn_type()->reset(
-      new ConnectionType(ConnectionType::kCellular));
-  set<ConnectionType> allowed_connections;
-  allowed_connections.insert(ConnectionType::kCellular);
-  fake_state_.updater_provider()->var_cellular_enabled()->reset(new bool(true));
-
-  bool result;
-  ExpectPolicyStatus(
-      EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result);
-  EXPECT_TRUE(result);
-}
-
 TEST_F(UmChromeOSPolicyTest, UpdateCanStartAllowedScatteringSupressedDueToP2P) {
   // The UpdateCanStart policy returns true; scattering should have applied, but
   // P2P download is allowed. Scattering values are nonetheless returned, and so
@@ -1565,7 +1443,7 @@
 
   SetUpdateCheckAllowed(false);
 
-  const Time curr_time = fake_clock_.GetWallclockTime();
+  const Time curr_time = fake_clock_->GetWallclockTime();
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(10));
   update_state.download_errors_max = 1;
   update_state.download_errors.emplace_back(
@@ -1616,7 +1494,7 @@
 
 TEST_F(UmChromeOSPolicyTest, P2PEnabledAllowedDeviceEnterpriseEnrolled) {
   fake_state_.device_policy_provider()->var_au_p2p_enabled()->reset(nullptr);
-  fake_state_.device_policy_provider()->var_owner()->reset(nullptr);
+  fake_state_.device_policy_provider()->var_has_owner()->reset(new bool(false));
 
   bool result;
   ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::P2PEnabled, &result);
@@ -1631,7 +1509,7 @@
 
 TEST_F(UmChromeOSPolicyTest,
        UpdateCanBeAppliedForcedUpdatesDisablesTimeRestrictions) {
-  Time curr_time = fake_clock_.GetWallclockTime();
+  Time curr_time = fake_clock_->GetWallclockTime();
   fake_state_.updater_provider()->var_forced_update_requested()->reset(
       new UpdateRequestStatus(UpdateRequestStatus::kInteractive));
   // Should return kAskMeAgainLater when updated are not forced.
@@ -1644,7 +1522,7 @@
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCanBeAppliedFailsInDisallowedTime) {
-  Time curr_time = fake_clock_.GetWallclockTime();
+  Time curr_time = fake_clock_->GetWallclockTime();
   TestDisallowedTimeIntervals(
       {WeeklyTimeInterval(
           WeeklyTime::FromTime(curr_time),
@@ -1654,7 +1532,7 @@
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCanBeAppliedOutsideDisallowedTime) {
-  Time curr_time = fake_clock_.GetWallclockTime();
+  Time curr_time = fake_clock_->GetWallclockTime();
   TestDisallowedTimeIntervals(
       {WeeklyTimeInterval(
           WeeklyTime::FromTime(curr_time - TimeDelta::FromHours(3)),
@@ -1664,7 +1542,7 @@
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCanBeAppliedPassesOnNonKiosk) {
-  Time curr_time = fake_clock_.GetWallclockTime();
+  Time curr_time = fake_clock_->GetWallclockTime();
   TestDisallowedTimeIntervals(
       {WeeklyTimeInterval(
           WeeklyTime::FromTime(curr_time),
diff --git a/update_manager/default_policy.cc b/update_manager/default_policy.cc
index 81ab795..0713e06 100644
--- a/update_manager/default_policy.cc
+++ b/update_manager/default_policy.cc
@@ -14,10 +14,12 @@
 // limitations under the License.
 //
 
+#include "update_engine/common/system_state.h"
 #include "update_engine/update_manager/default_policy.h"
 
 using chromeos_update_engine::ErrorCode;
 using chromeos_update_engine::InstallPlan;
+using chromeos_update_engine::SystemState;
 
 namespace {
 
@@ -31,19 +33,19 @@
 
 namespace chromeos_update_manager {
 
-DefaultPolicy::DefaultPolicy(chromeos_update_engine::ClockInterface* clock)
-    : clock_(clock), aux_state_(new DefaultPolicyState()) {}
-
 EvalStatus DefaultPolicy::UpdateCheckAllowed(EvaluationContext* ec,
                                              State* state,
                                              std::string* error,
                                              UpdateCheckParams* result) const {
   result->updates_enabled = true;
   result->target_channel.clear();
+  result->lts_tag.clear();
   result->target_version_prefix.clear();
   result->rollback_allowed = false;
   result->rollback_allowed_milestones = -1;  // No version rolls should happen.
+  result->rollback_on_channel_downgrade = false;
   result->interactive = false;
+  result->quick_fix_build_token.clear();
 
   // Ensure that the minimum interval is set. If there's no clock, this defaults
   // to always allowing the update.
@@ -51,8 +53,8 @@
       ec->IsMonotonicTimeGreaterThan(
           aux_state_->last_check_allowed_time() +
           base::TimeDelta::FromSeconds(kCheckIntervalInSeconds))) {
-    if (clock_)
-      aux_state_->set_last_check_allowed_time(clock_->GetMonotonicTime());
+    aux_state_->set_last_check_allowed_time(
+        SystemState::Get()->clock()->GetMonotonicTime());
     return EvalStatus::kSucceeded;
   }
 
@@ -87,14 +89,6 @@
   return EvalStatus::kSucceeded;
 }
 
-EvalStatus DefaultPolicy::UpdateDownloadAllowed(EvaluationContext* ec,
-                                                State* state,
-                                                std::string* error,
-                                                bool* result) const {
-  *result = true;
-  return EvalStatus::kSucceeded;
-}
-
 EvalStatus DefaultPolicy::P2PEnabled(EvaluationContext* ec,
                                      State* state,
                                      std::string* error,
diff --git a/update_manager/default_policy.h b/update_manager/default_policy.h
index 1b284f4..c93bb46 100644
--- a/update_manager/default_policy.h
+++ b/update_manager/default_policy.h
@@ -22,7 +22,6 @@
 
 #include <base/time/time.h>
 
-#include "update_engine/common/clock_interface.h"
 #include "update_engine/update_manager/policy.h"
 
 namespace chromeos_update_manager {
@@ -60,9 +59,8 @@
 // actual policy being used by the UpdateManager.
 class DefaultPolicy : public Policy {
  public:
-  explicit DefaultPolicy(chromeos_update_engine::ClockInterface* clock);
-  DefaultPolicy() : DefaultPolicy(nullptr) {}
-  ~DefaultPolicy() override {}
+  DefaultPolicy() : aux_state_(new DefaultPolicyState()) {}
+  ~DefaultPolicy() override = default;
 
   // Policy overrides.
   EvalStatus UpdateCheckAllowed(EvaluationContext* ec,
@@ -83,11 +81,6 @@
                             UpdateDownloadParams* result,
                             UpdateState update_state) const override;
 
-  EvalStatus UpdateDownloadAllowed(EvaluationContext* ec,
-                                   State* state,
-                                   std::string* error,
-                                   bool* result) const override;
-
   EvalStatus P2PEnabled(EvaluationContext* ec,
                         State* state,
                         std::string* error,
@@ -104,9 +97,6 @@
   std::string PolicyName() const override { return "DefaultPolicy"; }
 
  private:
-  // A clock interface.
-  chromeos_update_engine::ClockInterface* clock_;
-
   // An auxiliary state object.
   std::unique_ptr<DefaultPolicyState> aux_state_;
 
diff --git a/update_manager/device_policy_provider.h b/update_manager/device_policy_provider.h
index 873282e..5112f68 100644
--- a/update_manager/device_policy_provider.h
+++ b/update_manager/device_policy_provider.h
@@ -21,6 +21,7 @@
 #include <string>
 
 #include <base/time/time.h>
+#include <base/version.h>
 #include <policy/libpolicy.h>
 
 #include "update_engine/update_manager/provider.h"
@@ -44,6 +45,8 @@
 
   virtual Variable<bool>* var_release_channel_delegated() = 0;
 
+  virtual Variable<std::string>* var_release_lts_tag() = 0;
+
   virtual Variable<bool>* var_update_disabled() = 0;
 
   virtual Variable<std::string>* var_target_version_prefix() = 0;
@@ -66,9 +69,9 @@
   virtual Variable<std::set<chromeos_update_engine::ConnectionType>>*
   var_allowed_connection_types_for_update() = 0;
 
-  // Variable stating the name of the device owner. For enterprise enrolled
-  // devices, this will be an empty string.
-  virtual Variable<std::string>* var_owner() = 0;
+  // Variable stating whether the device has an owner. For enterprise enrolled
+  // devices, this will be false as the device owner has an empty string.
+  virtual Variable<bool>* var_has_owner() = 0;
 
   virtual Variable<bool>* var_http_downloads_enabled() = 0;
 
@@ -85,6 +88,19 @@
   virtual Variable<WeeklyTimeIntervalVector>*
   var_disallowed_time_intervals() = 0;
 
+  // Variable that determins whether we should powerwash and rollback on channel
+  // downgrade for enrolled devices.
+  virtual Variable<ChannelDowngradeBehavior>*
+  var_channel_downgrade_behavior() = 0;
+
+  // Variable that contains Chrome OS minimum required version. It contains a
+  // Chrome OS version number.
+  virtual Variable<base::Version>* var_device_minimum_version() = 0;
+
+  // Variable that contains a token which maps to a Chrome OS Quick Fix Build to
+  // which the device would be updated if not blocked by another policy.
+  virtual Variable<std::string>* var_quick_fix_build_token() = 0;
+
  protected:
   DevicePolicyProvider() {}
 
diff --git a/update_manager/enterprise_device_policy_impl.cc b/update_manager/enterprise_device_policy_impl.cc
index a3430ef..b9a11e1 100644
--- a/update_manager/enterprise_device_policy_impl.cc
+++ b/update_manager/enterprise_device_policy_impl.cc
@@ -62,13 +62,37 @@
           ec->GetValue(system_provider->var_kiosk_required_platform_version());
       if (!kiosk_required_platform_version_p) {
         LOG(INFO) << "Kiosk app required platform version is not fetched, "
-                     "blocking update checks";
+                     "blocking update checks.";
         return EvalStatus::kAskMeAgainLater;
+      } else if (kiosk_required_platform_version_p->empty()) {
+        // The platform version could not be fetched several times. Update
+        // based on |DeviceMinimumVersion| instead (crbug.com/1048931).
+        const base::Version* device_minimum_version_p =
+            ec->GetValue(dp_provider->var_device_minimum_version());
+        const base::Version* current_version_p(
+            ec->GetValue(system_provider->var_chromeos_version()));
+        if (device_minimum_version_p && device_minimum_version_p->IsValid() &&
+            current_version_p && current_version_p->IsValid() &&
+            *current_version_p > *device_minimum_version_p) {
+          // Do not update if the current version is newer than the minimum
+          // version.
+          LOG(INFO) << "Reading kiosk app required platform version failed "
+                       "repeatedly but current version is newer than "
+                       "DeviceMinimumVersion. Blocking update checks. "
+                       "Current version: "
+                    << *current_version_p
+                    << " DeviceMinimumVersion: " << *device_minimum_version_p;
+          return EvalStatus::kAskMeAgainLater;
+        }
+        LOG(WARNING) << "Reading kiosk app required platform version failed "
+                        "repeatedly. Attempting an update without it now.";
+        // An empty string for |target_version_prefix| allows arbitrary updates.
+        result->target_version_prefix = "";
+      } else {
+        result->target_version_prefix = *kiosk_required_platform_version_p;
+        LOG(INFO) << "Allow kiosk app to control Chrome version policy is set, "
+                  << "target version is " << result->target_version_prefix;
       }
-
-      result->target_version_prefix = *kiosk_required_platform_version_p;
-      LOG(INFO) << "Allow kiosk app to control Chrome version policy is set, "
-                << "target version is " << result->target_version_prefix;
       // TODO(hunyadym): Add support for allowing rollback using the manifest
       // (if policy doesn't specify otherwise).
     } else {
@@ -91,22 +115,18 @@
         case RollbackToTargetVersion::kDisabled:
           LOG(INFO) << "Policy disables rollbacks.";
           result->rollback_allowed = false;
+          result->rollback_data_save_requested = false;
           break;
         case RollbackToTargetVersion::kRollbackAndPowerwash:
           LOG(INFO) << "Policy allows rollbacks with powerwash.";
           result->rollback_allowed = true;
+          result->rollback_data_save_requested = false;
           break;
         case RollbackToTargetVersion::kRollbackAndRestoreIfPossible:
           LOG(INFO)
               << "Policy allows rollbacks, also tries to restore if possible.";
-          // We don't support restore yet, but policy still allows rollback.
           result->rollback_allowed = true;
-          break;
-        case RollbackToTargetVersion::kRollbackOnlyIfRestorePossible:
-          LOG(INFO) << "Policy only allows rollbacks if restore is possible.";
-          // We don't support restore yet, policy doesn't allow rollback in this
-          // case.
-          result->rollback_allowed = false;
+          result->rollback_data_save_requested = true;
           break;
         case RollbackToTargetVersion::kMaxValue:
           NOTREACHED();
@@ -121,14 +141,35 @@
     if (rollback_allowed_milestones_p)
       result->rollback_allowed_milestones = *rollback_allowed_milestones_p;
 
-    // Determine whether a target channel is dictated by policy.
+    // Determine whether a target channel is dictated by policy and whether we
+    // should rollback in case that channel is more stable.
     const bool* release_channel_delegated_p =
         ec->GetValue(dp_provider->var_release_channel_delegated());
     if (release_channel_delegated_p && !(*release_channel_delegated_p)) {
       const string* release_channel_p =
           ec->GetValue(dp_provider->var_release_channel());
-      if (release_channel_p)
+      if (release_channel_p) {
         result->target_channel = *release_channel_p;
+        const ChannelDowngradeBehavior* channel_downgrade_behavior_p =
+            ec->GetValue(dp_provider->var_channel_downgrade_behavior());
+        if (channel_downgrade_behavior_p &&
+            *channel_downgrade_behavior_p ==
+                ChannelDowngradeBehavior::kRollback) {
+          result->rollback_on_channel_downgrade = true;
+        }
+      }
+    }
+
+    const string* release_lts_tag_p =
+        ec->GetValue(dp_provider->var_release_lts_tag());
+    if (release_lts_tag_p) {
+      result->lts_tag = *release_lts_tag_p;
+    }
+
+    const string* quick_fix_build_token_p =
+        ec->GetValue(dp_provider->var_quick_fix_build_token());
+    if (quick_fix_build_token_p) {
+      result->quick_fix_build_token = *quick_fix_build_token_p;
     }
   }
   return EvalStatus::kContinue;
diff --git a/update_manager/enterprise_device_policy_impl_unittest.cc b/update_manager/enterprise_device_policy_impl_unittest.cc
new file mode 100644
index 0000000..30f54b1
--- /dev/null
+++ b/update_manager/enterprise_device_policy_impl_unittest.cc
@@ -0,0 +1,170 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/enterprise_device_policy_impl.h"
+
+#include <memory>
+
+#include "update_engine/update_manager/policy_test_utils.h"
+
+namespace chromeos_update_manager {
+
+class UmEnterpriseDevicePolicyImplTest : public UmPolicyTestBase {
+ protected:
+  UmEnterpriseDevicePolicyImplTest() : UmPolicyTestBase() {
+    policy_ = std::make_unique<EnterpriseDevicePolicyImpl>();
+  }
+
+  void SetUpDefaultState() override {
+    UmPolicyTestBase::SetUpDefaultState();
+
+    fake_state_.device_policy_provider()->var_device_policy_is_loaded()->reset(
+        new bool(true));
+  }
+};
+
+TEST_F(UmEnterpriseDevicePolicyImplTest, KioskAppVersionSet) {
+  fake_state_.device_policy_provider()->var_update_disabled()->reset(
+      new bool(true));
+  fake_state_.device_policy_provider()
+      ->var_allow_kiosk_app_control_chrome_version()
+      ->reset(new bool(true));
+
+  fake_state_.system_provider()->var_kiosk_required_platform_version()->reset(
+      new std::string("1234.5.6"));
+
+  UpdateCheckParams result;
+  ExpectPolicyStatus(
+      EvalStatus::kContinue, &Policy::UpdateCheckAllowed, &result);
+  EXPECT_EQ(result.target_version_prefix, "1234.5.6");
+}
+
+TEST_F(UmEnterpriseDevicePolicyImplTest, KioskAppVersionUnreadableNoUpdate) {
+  fake_state_.device_policy_provider()->var_update_disabled()->reset(
+      new bool(true));
+  fake_state_.device_policy_provider()
+      ->var_allow_kiosk_app_control_chrome_version()
+      ->reset(new bool(true));
+
+  fake_state_.system_provider()->var_kiosk_required_platform_version()->reset(
+      nullptr);
+
+  UpdateCheckParams result;
+  ExpectPolicyStatus(
+      EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result);
+}
+
+TEST_F(UmEnterpriseDevicePolicyImplTest, KioskAppVersionUnreadableUpdate) {
+  fake_state_.device_policy_provider()->var_update_disabled()->reset(
+      new bool(true));
+  fake_state_.device_policy_provider()
+      ->var_allow_kiosk_app_control_chrome_version()
+      ->reset(new bool(true));
+
+  // The real variable returns an empty string after several unsuccessful
+  // reading attempts. Fake this by setting it directly to empty string.
+  fake_state_.system_provider()->var_kiosk_required_platform_version()->reset(
+      new std::string(""));
+
+  UpdateCheckParams result;
+  ExpectPolicyStatus(
+      EvalStatus::kContinue, &Policy::UpdateCheckAllowed, &result);
+  EXPECT_EQ(result.target_version_prefix, "");
+}
+
+TEST_F(UmEnterpriseDevicePolicyImplTest,
+       KioskAppVersionUnreadableUpdateWithMinVersion) {
+  fake_state_.device_policy_provider()->var_update_disabled()->reset(
+      new bool(true));
+  fake_state_.device_policy_provider()
+      ->var_allow_kiosk_app_control_chrome_version()
+      ->reset(new bool(true));
+
+  // The real variable returns an empty string after several unsuccessful
+  // reading attempts. Fake this by setting it directly to empty string.
+  fake_state_.system_provider()->var_kiosk_required_platform_version()->reset(
+      new std::string(""));
+  // Update if the minimum version is above the current OS version.
+  fake_state_.device_policy_provider()->var_device_minimum_version()->reset(
+      new base::Version("2.0.0"));
+  fake_state_.system_provider()->var_chromeos_version()->reset(
+      new base::Version("1.0.0"));
+
+  UpdateCheckParams result;
+  ExpectPolicyStatus(
+      EvalStatus::kContinue, &Policy::UpdateCheckAllowed, &result);
+  EXPECT_EQ(result.target_version_prefix, "");
+}
+
+TEST_F(UmEnterpriseDevicePolicyImplTest,
+       KioskAppVersionUnreadableNoUpdateWithMinVersion) {
+  fake_state_.device_policy_provider()->var_update_disabled()->reset(
+      new bool(true));
+  fake_state_.device_policy_provider()
+      ->var_allow_kiosk_app_control_chrome_version()
+      ->reset(new bool(true));
+
+  // The real variable returns an empty string after several unsuccessful
+  // reading attempts. Fake this by setting it directly to empty string.
+  fake_state_.system_provider()->var_kiosk_required_platform_version()->reset(
+      new std::string(""));
+  // Block update if the minimum version is below the current OS version.
+  fake_state_.device_policy_provider()->var_device_minimum_version()->reset(
+      new base::Version("1.0.0"));
+  fake_state_.system_provider()->var_chromeos_version()->reset(
+      new base::Version("2.0.0"));
+
+  UpdateCheckParams result;
+  ExpectPolicyStatus(
+      EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result);
+}
+
+TEST_F(UmEnterpriseDevicePolicyImplTest, ChannelDowngradeBehaviorNoRollback) {
+  fake_state_.device_policy_provider()->var_release_channel_delegated()->reset(
+      new bool(false));
+  fake_state_.device_policy_provider()->var_release_channel()->reset(
+      new std::string("stable-channel"));
+
+  UpdateCheckParams result;
+  ExpectPolicyStatus(
+      EvalStatus::kContinue, &Policy::UpdateCheckAllowed, &result);
+  EXPECT_FALSE(result.rollback_on_channel_downgrade);
+}
+
+TEST_F(UmEnterpriseDevicePolicyImplTest, ChannelDowngradeBehaviorRollback) {
+  fake_state_.device_policy_provider()->var_release_channel_delegated()->reset(
+      new bool(false));
+  fake_state_.device_policy_provider()->var_release_channel()->reset(
+      new std::string("stable-channel"));
+  fake_state_.device_policy_provider()->var_channel_downgrade_behavior()->reset(
+      new ChannelDowngradeBehavior(ChannelDowngradeBehavior::kRollback));
+
+  UpdateCheckParams result;
+  ExpectPolicyStatus(
+      EvalStatus::kContinue, &Policy::UpdateCheckAllowed, &result);
+  EXPECT_TRUE(result.rollback_on_channel_downgrade);
+}
+
+TEST_F(UmEnterpriseDevicePolicyImplTest, QuickFixBuildToken) {
+  fake_state_.device_policy_provider()->var_quick_fix_build_token()->reset(
+      new std::string("token"));
+  UpdateCheckParams result;
+  ExpectPolicyStatus(
+      EvalStatus::kContinue, &Policy::UpdateCheckAllowed, &result);
+  EXPECT_EQ(result.quick_fix_build_token, "token");
+}
+
+}  // namespace chromeos_update_manager
diff --git a/update_manager/enterprise_rollback_policy_impl.cc b/update_manager/enterprise_rollback_policy_impl.cc
new file mode 100644
index 0000000..ab4e38c
--- /dev/null
+++ b/update_manager/enterprise_rollback_policy_impl.cc
@@ -0,0 +1,40 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/enterprise_rollback_policy_impl.h"
+
+using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::InstallPlan;
+using std::string;
+
+namespace chromeos_update_manager {
+
+EvalStatus EnterpriseRollbackPolicyImpl::UpdateCanBeApplied(
+    EvaluationContext* ec,
+    State* state,
+    string* error,
+    ErrorCode* result,
+    InstallPlan* install_plan) const {
+  if (install_plan && install_plan->is_rollback) {
+    LOG(INFO)
+        << "Update is enterprise rollback, allowing update to be applied.";
+    *result = ErrorCode::kSuccess;
+    return EvalStatus::kSucceeded;
+  }
+  return EvalStatus::kContinue;
+}
+
+}  // namespace chromeos_update_manager
diff --git a/update_manager/enterprise_rollback_policy_impl.h b/update_manager/enterprise_rollback_policy_impl.h
new file mode 100644
index 0000000..bcaf95e
--- /dev/null
+++ b/update_manager/enterprise_rollback_policy_impl.h
@@ -0,0 +1,56 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_ENTERPRISE_ROLLBACK_POLICY_IMPL_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_ENTERPRISE_ROLLBACK_POLICY_IMPL_H_
+
+#include <string>
+
+#include "update_engine/common/error_code.h"
+#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/update_manager/policy_utils.h"
+
+namespace chromeos_update_manager {
+
+// If the update is an enterprise rollback, this should not block the update
+// to be applied.
+class EnterpriseRollbackPolicyImpl : public PolicyImplBase {
+ public:
+  EnterpriseRollbackPolicyImpl() = default;
+  ~EnterpriseRollbackPolicyImpl() override = default;
+
+  // Policy overrides.
+  EvalStatus UpdateCanBeApplied(
+      EvaluationContext* ec,
+      State* state,
+      std::string* error,
+      chromeos_update_engine::ErrorCode* result,
+      chromeos_update_engine::InstallPlan* install_plan) const override;
+
+ protected:
+  std::string PolicyName() const override {
+    return "EnterpriseRollbackPolicyImpl";
+  }
+
+ private:
+  EnterpriseRollbackPolicyImpl(const EnterpriseRollbackPolicyImpl&) = delete;
+  EnterpriseRollbackPolicyImpl& operator=(const EnterpriseRollbackPolicyImpl&) =
+      delete;
+};
+
+}  // namespace chromeos_update_manager
+
+#endif  // UPDATE_ENGINE_UPDATE_MANAGER_ENTERPRISE_ROLLBACK_POLICY_IMPL_H_
diff --git a/update_manager/enterprise_rollback_policy_impl_unittest.cc b/update_manager/enterprise_rollback_policy_impl_unittest.cc
new file mode 100644
index 0000000..5cc5c75
--- /dev/null
+++ b/update_manager/enterprise_rollback_policy_impl_unittest.cc
@@ -0,0 +1,56 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <memory>
+
+#include "update_engine/update_manager/enterprise_rollback_policy_impl.h"
+#include "update_engine/update_manager/policy_test_utils.h"
+#include "update_engine/update_manager/weekly_time.h"
+
+using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::InstallPlan;
+
+namespace chromeos_update_manager {
+
+class UmEnterpriseRollbackPolicyImplTest : public UmPolicyTestBase {
+ protected:
+  UmEnterpriseRollbackPolicyImplTest() {
+    policy_ = std::make_unique<EnterpriseRollbackPolicyImpl>();
+  }
+};
+
+TEST_F(UmEnterpriseRollbackPolicyImplTest,
+       ContinueWhenUpdateIsNotEnterpriseRollback) {
+  InstallPlan install_plan{.is_rollback = false};
+  ErrorCode result;
+  ExpectPolicyStatus(EvalStatus::kContinue,
+                     &Policy::UpdateCanBeApplied,
+                     &result,
+                     &install_plan);
+}
+
+TEST_F(UmEnterpriseRollbackPolicyImplTest,
+       SuccessWhenUpdateIsEnterpriseRollback) {
+  InstallPlan install_plan{.is_rollback = true};
+  ErrorCode result;
+  ExpectPolicyStatus(EvalStatus::kSucceeded,
+                     &Policy::UpdateCanBeApplied,
+                     &result,
+                     &install_plan);
+  EXPECT_EQ(result, ErrorCode::kSuccess);
+}
+
+}  // namespace chromeos_update_manager
diff --git a/update_manager/evaluation_context-inl.h b/update_manager/evaluation_context-inl.h
index 59d85da..82861fa 100644
--- a/update_manager/evaluation_context-inl.h
+++ b/update_manager/evaluation_context-inl.h
@@ -39,7 +39,7 @@
   std::string errmsg;
   const T* result =
       var->GetValue(RemainingTime(evaluation_monotonic_deadline_), &errmsg);
-  if (result == nullptr) {
+  if (result == nullptr && !var->IsMissingOk()) {
     LOG(WARNING) << "Error reading Variable " << var->GetName() << ": \""
                  << errmsg << "\"";
   }
diff --git a/update_manager/evaluation_context.cc b/update_manager/evaluation_context.cc
index e796fec..b86f41c 100644
--- a/update_manager/evaluation_context.cc
+++ b/update_manager/evaluation_context.cc
@@ -27,6 +27,7 @@
 #include <base/strings/string_util.h>
 #include <base/values.h>
 
+#include "update_engine/common/system_state.h"
 #include "update_engine/common/utils.h"
 
 using base::Callback;
@@ -34,7 +35,7 @@
 using base::Time;
 using base::TimeDelta;
 using brillo::MessageLoop;
-using chromeos_update_engine::ClockInterface;
+using chromeos_update_engine::SystemState;
 using std::string;
 using std::unique_ptr;
 
@@ -65,12 +66,10 @@
 namespace chromeos_update_manager {
 
 EvaluationContext::EvaluationContext(
-    ClockInterface* clock,
     TimeDelta evaluation_timeout,
     TimeDelta expiration_timeout,
     unique_ptr<Callback<void(EvaluationContext*)>> unregister_cb)
-    : clock_(clock),
-      evaluation_timeout_(evaluation_timeout),
+    : evaluation_timeout_(evaluation_timeout),
       expiration_timeout_(expiration_timeout),
       unregister_cb_(std::move(unregister_cb)),
       weak_ptr_factory_(this) {
@@ -98,13 +97,15 @@
 TimeDelta EvaluationContext::RemainingTime(Time monotonic_deadline) const {
   if (monotonic_deadline.is_max())
     return TimeDelta::Max();
-  TimeDelta remaining = monotonic_deadline - clock_->GetMonotonicTime();
+  TimeDelta remaining =
+      monotonic_deadline - SystemState::Get()->clock()->GetMonotonicTime();
   return std::max(remaining, TimeDelta());
 }
 
 Time EvaluationContext::MonotonicDeadline(TimeDelta timeout) {
-  return (timeout.is_max() ? Time::Max()
-                           : clock_->GetMonotonicTime() + timeout);
+  return (timeout.is_max()
+              ? Time::Max()
+              : SystemState::Get()->clock()->GetMonotonicTime() + timeout);
 }
 
 void EvaluationContext::ValueChanged(BaseVariable* var) {
@@ -139,8 +140,9 @@
 }
 
 void EvaluationContext::ResetEvaluation() {
-  evaluation_start_wallclock_ = clock_->GetWallclockTime();
-  evaluation_start_monotonic_ = clock_->GetMonotonicTime();
+  const auto* clock = SystemState::Get()->clock();
+  evaluation_start_wallclock_ = clock->GetWallclockTime();
+  evaluation_start_monotonic_ = clock->GetMonotonicTime();
   reevaluation_time_wallclock_ = Time::Max();
   reevaluation_time_monotonic_ = Time::Max();
   evaluation_monotonic_deadline_ = MonotonicDeadline(evaluation_timeout_);
diff --git a/update_manager/evaluation_context.h b/update_manager/evaluation_context.h
index c68c430..3460f2a 100644
--- a/update_manager/evaluation_context.h
+++ b/update_manager/evaluation_context.h
@@ -23,12 +23,10 @@
 
 #include <base/bind.h>
 #include <base/callback.h>
-#include <base/memory/ref_counted.h>
 #include <base/memory/weak_ptr.h>
 #include <base/time/time.h>
 #include <brillo/message_loops/message_loop.h>
 
-#include "update_engine/common/clock_interface.h"
 #include "update_engine/update_manager/boxed_value.h"
 #include "update_engine/update_manager/variable.h"
 
@@ -46,7 +44,7 @@
 //
 // Example:
 //
-//   scoped_refptr<EvaluationContext> ec = new EvaluationContext(...);
+//   auto ec = std::make_shared<EvaluationContext>(...);
 //
 //   ...
 //   // The following call to ResetEvaluation() is optional. Use it to reset the
@@ -62,18 +60,14 @@
 //   // If the provided |closure| wants to re-evaluate the policy, it should
 //   // call ec->ResetEvaluation() to start a new evaluation.
 //
-class EvaluationContext : public base::RefCounted<EvaluationContext>,
-                          private BaseVariable::ObserverInterface {
+class EvaluationContext : private BaseVariable::ObserverInterface {
  public:
   EvaluationContext(
-      chromeos_update_engine::ClockInterface* clock,
       base::TimeDelta evaluation_timeout,
       base::TimeDelta expiration_timeout,
       std::unique_ptr<base::Callback<void(EvaluationContext*)>> unregister_cb);
-  EvaluationContext(chromeos_update_engine::ClockInterface* clock,
-                    base::TimeDelta evaluation_timeout)
+  explicit EvaluationContext(base::TimeDelta evaluation_timeout)
       : EvaluationContext(
-            clock,
             evaluation_timeout,
             base::TimeDelta::Max(),
             std::unique_ptr<base::Callback<void(EvaluationContext*)>>()) {}
@@ -174,9 +168,6 @@
   // Whether the evaluation context has indeed expired.
   bool is_expired_ = false;
 
-  // Pointer to the mockable clock interface;
-  chromeos_update_engine::ClockInterface* const clock_;
-
   // The timestamps when the evaluation of this EvaluationContext started,
   // corresponding to ClockInterface::GetWallclockTime() and
   // ClockInterface::GetMonotonicTime(), respectively. These values are reset
diff --git a/update_manager/evaluation_context_unittest.cc b/update_manager/evaluation_context_unittest.cc
index eb42eb7..fdb408b 100644
--- a/update_manager/evaluation_context_unittest.cc
+++ b/update_manager/evaluation_context_unittest.cc
@@ -26,6 +26,7 @@
 #include <gtest/gtest.h>
 
 #include "update_engine/common/fake_clock.h"
+#include "update_engine/cros/fake_system_state.h"
 #include "update_engine/update_manager/fake_variable.h"
 #include "update_engine/update_manager/generic_variables.h"
 #include "update_engine/update_manager/mock_variable.h"
@@ -39,6 +40,8 @@
 using brillo::MessageLoopRunMaxIterations;
 using brillo::MessageLoopRunUntil;
 using chromeos_update_engine::FakeClock;
+using chromeos_update_engine::FakeSystemState;
+using std::shared_ptr;
 using std::string;
 using std::unique_ptr;
 using testing::_;
@@ -59,14 +62,14 @@
 }
 
 template <typename T>
-void ReadVar(scoped_refptr<EvaluationContext> ec, Variable<T>* var) {
+void ReadVar(shared_ptr<EvaluationContext> ec, Variable<T>* var) {
   ec->GetValue(var);
 }
 
 // Runs |evaluation|; if the value pointed by |count_p| is greater than zero,
 // decrement it and schedule a reevaluation; otherwise, writes true to |done_p|.
 void EvaluateRepeatedly(Closure evaluation,
-                        scoped_refptr<EvaluationContext> ec,
+                        shared_ptr<EvaluationContext> ec,
                         int* count_p,
                         bool* done_p) {
   evaluation.Run();
@@ -87,16 +90,17 @@
 class UmEvaluationContextTest : public ::testing::Test {
  protected:
   void SetUp() override {
+    FakeSystemState::CreateInstance();
+    fake_clock_ = FakeSystemState::Get()->fake_clock();
     loop_.SetAsCurrent();
     // Apr 22, 2009 19:25:00 UTC (this is a random reference point).
-    fake_clock_.SetMonotonicTime(Time::FromTimeT(1240428300));
+    fake_clock_->SetMonotonicTime(Time::FromTimeT(1240428300));
     // Mar 2, 2006 1:23:45 UTC.
-    fake_clock_.SetWallclockTime(Time::FromTimeT(1141262625));
-    eval_ctx_ = new EvaluationContext(
-        &fake_clock_,
+    fake_clock_->SetWallclockTime(Time::FromTimeT(1141262625));
+    eval_ctx_.reset(new EvaluationContext(
         default_timeout_,
         default_timeout_,
-        unique_ptr<base::Callback<void(EvaluationContext*)>>(nullptr));
+        unique_ptr<base::Callback<void(EvaluationContext*)>>(nullptr)));
   }
 
   void TearDown() override {
@@ -125,8 +129,8 @@
   TimeDelta default_timeout_ = TimeDelta::FromSeconds(5);
 
   brillo::FakeMessageLoop loop_{nullptr};
-  FakeClock fake_clock_;
-  scoped_refptr<EvaluationContext> eval_ctx_;
+  FakeClock* fake_clock_;
+  shared_ptr<EvaluationContext> eval_ctx_;
 
   // FakeVariables used for testing the EvaluationContext. These are required
   // here to prevent them from going away *before* the EvaluationContext under
@@ -210,13 +214,7 @@
   fake_const_var_.reset(new string("Hello world!"));
   EXPECT_EQ(*eval_ctx_->GetValue(&fake_const_var_), "Hello world!");
 
-  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(
-#if BASE_VER < 576279
-      Bind(&base::DoNothing)
-#else
-      base::DoNothing()
-#endif
-          ));
+  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
 }
 
 // Test that reevaluation occurs when an async variable it depends on changes.
@@ -286,23 +284,11 @@
   EXPECT_TRUE(value);
 
   // Ensure that we cannot reschedule an evaluation.
-  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(
-#if BASE_VER < 576279
-      Bind(&base::DoNothing)
-#else
-      base::DoNothing()
-#endif
-          ));
+  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
 
   // Ensure that we can reschedule an evaluation after resetting expiration.
   eval_ctx_->ResetExpiration();
-  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(
-#if BASE_VER < 576279
-      Bind(&base::DoNothing)
-#else
-      base::DoNothing()
-#endif
-          ));
+  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
 }
 
 // Test that we clear the events when destroying the EvaluationContext.
@@ -348,13 +334,7 @@
   fake_poll_var_.reset(new string("Polled value"));
   eval_ctx_->GetValue(&fake_async_var_);
   eval_ctx_->GetValue(&fake_poll_var_);
-  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(
-#if BASE_VER < 576279
-      Bind(&base::DoNothing)
-#else
-      base::DoNothing()
-#endif
-          ));
+  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
   // TearDown() checks for leaked observers on this async_variable, which means
   // that our object is still alive after removing its reference.
 }
@@ -388,8 +368,8 @@
 }
 
 TEST_F(UmEvaluationContextTest, TimeoutUpdatesWithMonotonicTime) {
-  fake_clock_.SetMonotonicTime(fake_clock_.GetMonotonicTime() +
-                               TimeDelta::FromSeconds(1));
+  fake_clock_->SetMonotonicTime(fake_clock_->GetMonotonicTime() +
+                                TimeDelta::FromSeconds(1));
 
   TimeDelta timeout = default_timeout_ - TimeDelta::FromSeconds(1);
 
@@ -398,9 +378,9 @@
 }
 
 TEST_F(UmEvaluationContextTest, ResetEvaluationResetsTimesWallclock) {
-  Time cur_time = fake_clock_.GetWallclockTime();
+  Time cur_time = fake_clock_->GetWallclockTime();
   // Advance the time on the clock but don't call ResetEvaluation yet.
-  fake_clock_.SetWallclockTime(cur_time + TimeDelta::FromSeconds(4));
+  fake_clock_->SetWallclockTime(cur_time + TimeDelta::FromSeconds(4));
 
   EXPECT_TRUE(eval_ctx_->IsWallclockTimeGreaterThan(cur_time -
                                                     TimeDelta::FromSeconds(1)));
@@ -410,7 +390,7 @@
   // Call ResetEvaluation now, which should use the new evaluation time.
   eval_ctx_->ResetEvaluation();
 
-  cur_time = fake_clock_.GetWallclockTime();
+  cur_time = fake_clock_->GetWallclockTime();
   EXPECT_TRUE(eval_ctx_->IsWallclockTimeGreaterThan(cur_time -
                                                     TimeDelta::FromSeconds(1)));
   EXPECT_FALSE(eval_ctx_->IsWallclockTimeGreaterThan(cur_time));
@@ -419,9 +399,9 @@
 }
 
 TEST_F(UmEvaluationContextTest, ResetEvaluationResetsTimesMonotonic) {
-  Time cur_time = fake_clock_.GetMonotonicTime();
+  Time cur_time = fake_clock_->GetMonotonicTime();
   // Advance the time on the clock but don't call ResetEvaluation yet.
-  fake_clock_.SetMonotonicTime(cur_time + TimeDelta::FromSeconds(4));
+  fake_clock_->SetMonotonicTime(cur_time + TimeDelta::FromSeconds(4));
 
   EXPECT_TRUE(eval_ctx_->IsMonotonicTimeGreaterThan(cur_time -
                                                     TimeDelta::FromSeconds(1)));
@@ -431,7 +411,7 @@
   // Call ResetEvaluation now, which should use the new evaluation time.
   eval_ctx_->ResetEvaluation();
 
-  cur_time = fake_clock_.GetMonotonicTime();
+  cur_time = fake_clock_->GetMonotonicTime();
   EXPECT_TRUE(eval_ctx_->IsMonotonicTimeGreaterThan(cur_time -
                                                     TimeDelta::FromSeconds(1)));
   EXPECT_FALSE(eval_ctx_->IsMonotonicTimeGreaterThan(cur_time));
@@ -442,33 +422,21 @@
 TEST_F(UmEvaluationContextTest,
        IsWallclockTimeGreaterThanSignalsTriggerReevaluation) {
   EXPECT_FALSE(eval_ctx_->IsWallclockTimeGreaterThan(
-      fake_clock_.GetWallclockTime() + TimeDelta::FromSeconds(1)));
+      fake_clock_->GetWallclockTime() + TimeDelta::FromSeconds(1)));
 
   // The "false" from IsWallclockTimeGreaterThan means that's not that timestamp
   // yet, so this should schedule a callback for when that happens.
-  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(
-#if BASE_VER < 576279
-      Bind(&base::DoNothing)
-#else
-      base::DoNothing()
-#endif
-          ));
+  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
 }
 
 TEST_F(UmEvaluationContextTest,
        IsMonotonicTimeGreaterThanSignalsTriggerReevaluation) {
   EXPECT_FALSE(eval_ctx_->IsMonotonicTimeGreaterThan(
-      fake_clock_.GetMonotonicTime() + TimeDelta::FromSeconds(1)));
+      fake_clock_->GetMonotonicTime() + TimeDelta::FromSeconds(1)));
 
   // The "false" from IsMonotonicTimeGreaterThan means that's not that timestamp
   // yet, so this should schedule a callback for when that happens.
-  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(
-#if BASE_VER < 576279
-      Bind(&base::DoNothing)
-#else
-      base::DoNothing()
-#endif
-          ));
+  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
 }
 
 TEST_F(UmEvaluationContextTest,
@@ -476,18 +444,12 @@
   // IsWallclockTimeGreaterThan() should ignore timestamps on the past for
   // reevaluation.
   EXPECT_TRUE(eval_ctx_->IsWallclockTimeGreaterThan(
-      fake_clock_.GetWallclockTime() - TimeDelta::FromSeconds(20)));
+      fake_clock_->GetWallclockTime() - TimeDelta::FromSeconds(20)));
   EXPECT_TRUE(eval_ctx_->IsWallclockTimeGreaterThan(
-      fake_clock_.GetWallclockTime() - TimeDelta::FromSeconds(1)));
+      fake_clock_->GetWallclockTime() - TimeDelta::FromSeconds(1)));
 
   // Callback should not be scheduled.
-  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(
-#if BASE_VER < 576279
-      Bind(&base::DoNothing)
-#else
-      base::DoNothing()
-#endif
-          ));
+  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
 }
 
 TEST_F(UmEvaluationContextTest,
@@ -495,18 +457,12 @@
   // IsMonotonicTimeGreaterThan() should ignore timestamps on the past for
   // reevaluation.
   EXPECT_TRUE(eval_ctx_->IsMonotonicTimeGreaterThan(
-      fake_clock_.GetMonotonicTime() - TimeDelta::FromSeconds(20)));
+      fake_clock_->GetMonotonicTime() - TimeDelta::FromSeconds(20)));
   EXPECT_TRUE(eval_ctx_->IsMonotonicTimeGreaterThan(
-      fake_clock_.GetMonotonicTime() - TimeDelta::FromSeconds(1)));
+      fake_clock_->GetMonotonicTime() - TimeDelta::FromSeconds(1)));
 
   // Callback should not be scheduled.
-  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(
-#if BASE_VER < 576279
-      Bind(&base::DoNothing)
-#else
-      base::DoNothing()
-#endif
-          ));
+  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
 }
 
 TEST_F(UmEvaluationContextTest, DumpContext) {
diff --git a/update_manager/fake_device_policy_provider.h b/update_manager/fake_device_policy_provider.h
index 7cd4d7b..762bfc5 100644
--- a/update_manager/fake_device_policy_provider.h
+++ b/update_manager/fake_device_policy_provider.h
@@ -42,6 +42,10 @@
     return &var_release_channel_delegated_;
   }
 
+  FakeVariable<std::string>* var_release_lts_tag() override {
+    return &var_release_lts_tag_;
+  }
+
   FakeVariable<bool>* var_update_disabled() override {
     return &var_update_disabled_;
   }
@@ -68,7 +72,7 @@
     return &var_allowed_connection_types_for_update_;
   }
 
-  FakeVariable<std::string>* var_owner() override { return &var_owner_; }
+  FakeVariable<bool>* var_has_owner() override { return &var_has_owner_; }
 
   FakeVariable<bool>* var_http_downloads_enabled() override {
     return &var_http_downloads_enabled_;
@@ -91,6 +95,19 @@
     return &var_disallowed_time_intervals_;
   }
 
+  FakeVariable<ChannelDowngradeBehavior>* var_channel_downgrade_behavior()
+      override {
+    return &var_channel_downgrade_behavior_;
+  }
+
+  FakeVariable<base::Version>* var_device_minimum_version() override {
+    return &var_device_minimum_version_;
+  }
+
+  FakeVariable<std::string>* var_quick_fix_build_token() override {
+    return &var_quick_fix_build_token_;
+  }
+
  private:
   FakeVariable<bool> var_device_policy_is_loaded_{"policy_is_loaded",
                                                   kVariableModePoll};
@@ -98,6 +115,8 @@
                                                  kVariableModePoll};
   FakeVariable<bool> var_release_channel_delegated_{"release_channel_delegated",
                                                     kVariableModePoll};
+  FakeVariable<std::string> var_release_lts_tag_{"release_lts_tag",
+                                                 kVariableModePoll};
   FakeVariable<bool> var_update_disabled_{"update_disabled", kVariableModePoll};
   FakeVariable<std::string> var_target_version_prefix_{"target_version_prefix",
                                                        kVariableModePoll};
@@ -110,7 +129,7 @@
   FakeVariable<std::set<chromeos_update_engine::ConnectionType>>
       var_allowed_connection_types_for_update_{
           "allowed_connection_types_for_update", kVariableModePoll};
-  FakeVariable<std::string> var_owner_{"owner", kVariableModePoll};
+  FakeVariable<bool> var_has_owner_{"owner", kVariableModePoll};
   FakeVariable<bool> var_http_downloads_enabled_{"http_downloads_enabled",
                                                  kVariableModePoll};
   FakeVariable<bool> var_au_p2p_enabled_{"au_p2p_enabled", kVariableModePoll};
@@ -119,7 +138,13 @@
   FakeVariable<std::string> var_auto_launched_kiosk_app_id_{
       "auto_launched_kiosk_app_id", kVariableModePoll};
   FakeVariable<WeeklyTimeIntervalVector> var_disallowed_time_intervals_{
-      "disallowed_time_intervals", kVariableModePoll};
+      "disallowed_time_intervals", kVariableModeAsync};
+  FakeVariable<ChannelDowngradeBehavior> var_channel_downgrade_behavior_{
+      "channel_downgrade_behavior", kVariableModePoll};
+  FakeVariable<base::Version> var_device_minimum_version_{
+      "device_minimum_version", kVariableModePoll};
+  FakeVariable<std::string> var_quick_fix_build_token_{"quick_fix_build_token",
+                                                       kVariableModePoll};
 
   DISALLOW_COPY_AND_ASSIGN(FakeDevicePolicyProvider);
 };
diff --git a/update_manager/fake_system_provider.h b/update_manager/fake_system_provider.h
index f54951b..b320c01 100644
--- a/update_manager/fake_system_provider.h
+++ b/update_manager/fake_system_provider.h
@@ -50,6 +50,10 @@
     return &var_kiosk_required_platform_version_;
   }
 
+  FakeVariable<base::Version>* var_chromeos_version() override {
+    return &var_version_;
+  }
+
  private:
   FakeVariable<bool> var_is_normal_boot_mode_{"is_normal_boot_mode",
                                               kVariableModeConst};
@@ -60,6 +64,8 @@
   FakeVariable<unsigned int> var_num_slots_{"num_slots", kVariableModePoll};
   FakeVariable<std::string> var_kiosk_required_platform_version_{
       "kiosk_required_platform_version", kVariableModePoll};
+  FakeVariable<base::Version> var_version_{"chromeos_version",
+                                           kVariableModePoll};
 
   DISALLOW_COPY_AND_ASSIGN(FakeSystemProvider);
 };
diff --git a/update_manager/fake_update_manager.h b/update_manager/fake_update_manager.h
index 173b1a9..b880582 100644
--- a/update_manager/fake_update_manager.h
+++ b/update_manager/fake_update_manager.h
@@ -26,13 +26,12 @@
 
 class FakeUpdateManager : public UpdateManager {
  public:
-  explicit FakeUpdateManager(chromeos_update_engine::ClockInterface* clock)
-      : UpdateManager(clock,
-                      base::TimeDelta::FromSeconds(5),
+  FakeUpdateManager()
+      : UpdateManager(base::TimeDelta::FromSeconds(5),
                       base::TimeDelta::FromHours(1),
                       new FakeState()) {
     // The FakeUpdateManager uses a DefaultPolicy.
-    set_policy(new DefaultPolicy(clock));
+    set_policy(new DefaultPolicy());
   }
 
   // UpdateManager overrides.
diff --git a/update_manager/fake_updater_provider.h b/update_manager/fake_updater_provider.h
index 7295765..d967f42 100644
--- a/update_manager/fake_updater_provider.h
+++ b/update_manager/fake_updater_provider.h
@@ -83,6 +83,10 @@
     return &var_update_restrictions_;
   }
 
+  FakeVariable<int64_t>* var_test_update_check_interval_timeout() override {
+    return &var_test_update_check_interval_timeout_;
+  }
+
  private:
   FakeVariable<base::Time> var_updater_started_time_{"updater_started_time",
                                                      kVariableModePoll};
@@ -108,6 +112,8 @@
       "forced_update_requested", kVariableModeAsync};
   FakeVariable<UpdateRestrictions> var_update_restrictions_{
       "update_restrictions", kVariableModePoll};
+  FakeVariable<int64_t> var_test_update_check_interval_timeout_{
+      "test_update_check_interval_timeout", kVariableModePoll};
 
   DISALLOW_COPY_AND_ASSIGN(FakeUpdaterProvider);
 };
diff --git a/update_manager/minimum_version_policy_impl.cc b/update_manager/minimum_version_policy_impl.cc
new file mode 100644
index 0000000..fb94ee4
--- /dev/null
+++ b/update_manager/minimum_version_policy_impl.cc
@@ -0,0 +1,56 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/minimum_version_policy_impl.h"
+
+#include <base/version.h>
+
+using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::InstallPlan;
+
+namespace chromeos_update_manager {
+
+EvalStatus MinimumVersionPolicyImpl::UpdateCanBeApplied(
+    EvaluationContext* ec,
+    State* state,
+    std::string* error,
+    ErrorCode* result,
+    InstallPlan* install_plan) const {
+  const base::Version* current_version(
+      ec->GetValue(state->system_provider()->var_chromeos_version()));
+  if (current_version == nullptr || !current_version->IsValid()) {
+    LOG(WARNING) << "Unable to access current version";
+    return EvalStatus::kContinue;
+  }
+
+  const base::Version* minimum_version = ec->GetValue(
+      state->device_policy_provider()->var_device_minimum_version());
+  if (minimum_version == nullptr || !minimum_version->IsValid()) {
+    LOG(WARNING) << "Unable to access minimum version";
+    return EvalStatus::kContinue;
+  }
+
+  if (*current_version < *minimum_version) {
+    LOG(INFO) << "Updating from version less than minimum required"
+                 ", allowing update to be applied.";
+    *result = ErrorCode::kSuccess;
+    return EvalStatus::kSucceeded;
+  }
+
+  return EvalStatus::kContinue;
+}
+
+}  // namespace chromeos_update_manager
diff --git a/update_manager/minimum_version_policy_impl.h b/update_manager/minimum_version_policy_impl.h
new file mode 100644
index 0000000..600d624
--- /dev/null
+++ b/update_manager/minimum_version_policy_impl.h
@@ -0,0 +1,54 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_MINIMUM_VERSION_POLICY_IMPL_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_MINIMUM_VERSION_POLICY_IMPL_H_
+
+#include <string>
+
+#include "update_engine/common/error_code.h"
+#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/update_manager/policy_utils.h"
+
+namespace chromeos_update_manager {
+
+// Check to see if an update happens from a version less than the minimum
+// required one.
+class MinimumVersionPolicyImpl : public PolicyImplBase {
+ public:
+  MinimumVersionPolicyImpl() = default;
+  ~MinimumVersionPolicyImpl() override = default;
+
+  // If current version is less than the minimum required one, then this should
+  // not block the update to be applied.
+  EvalStatus UpdateCanBeApplied(
+      EvaluationContext* ec,
+      State* state,
+      std::string* error,
+      chromeos_update_engine::ErrorCode* result,
+      chromeos_update_engine::InstallPlan* install_plan) const override;
+
+ protected:
+  std::string PolicyName() const override { return "MinimumVersionPolicyImpl"; }
+
+ private:
+  MinimumVersionPolicyImpl(const MinimumVersionPolicyImpl&) = delete;
+  MinimumVersionPolicyImpl& operator=(const MinimumVersionPolicyImpl&) = delete;
+};
+
+}  // namespace chromeos_update_manager
+
+#endif  // UPDATE_ENGINE_UPDATE_MANAGER_MINIMUM_VERSION_POLICY_IMPL_H_
diff --git a/update_manager/minimum_version_policy_impl_unittest.cc b/update_manager/minimum_version_policy_impl_unittest.cc
new file mode 100644
index 0000000..8e4dba5
--- /dev/null
+++ b/update_manager/minimum_version_policy_impl_unittest.cc
@@ -0,0 +1,111 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <memory>
+
+#include "update_engine/update_manager/minimum_version_policy_impl.h"
+#include "update_engine/update_manager/policy_test_utils.h"
+
+using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::InstallPlan;
+
+namespace {
+
+const char* kInvalidVersion = "13315.woops.12";
+const char* kOldVersion = "13315.60.12";
+const char* kNewVersion = "13315.60.15";
+
+}  // namespace
+
+namespace chromeos_update_manager {
+
+class UmMinimumVersionPolicyImplTest : public UmPolicyTestBase {
+ protected:
+  UmMinimumVersionPolicyImplTest() {
+    policy_ = std::make_unique<MinimumVersionPolicyImpl>();
+  }
+
+  void SetCurrentVersion(const std::string& version) {
+    fake_state_.system_provider()->var_chromeos_version()->reset(
+        new base::Version(version));
+  }
+
+  void SetMinimumVersion(const std::string& version) {
+    fake_state_.device_policy_provider()->var_device_minimum_version()->reset(
+        new base::Version(version));
+  }
+
+  void TestPolicy(const EvalStatus& expected_status) {
+    InstallPlan install_plan;
+    ErrorCode result;
+    ExpectPolicyStatus(
+        expected_status, &Policy::UpdateCanBeApplied, &result, &install_plan);
+    if (expected_status == EvalStatus::kSucceeded)
+      EXPECT_EQ(result, ErrorCode::kSuccess);
+  }
+};
+
+TEST_F(UmMinimumVersionPolicyImplTest, ContinueWhenCurrentVersionIsNotSet) {
+  SetMinimumVersion(kNewVersion);
+
+  TestPolicy(EvalStatus::kContinue);
+}
+
+TEST_F(UmMinimumVersionPolicyImplTest, ContinueWhenCurrentVersionIsInvalid) {
+  SetCurrentVersion(kInvalidVersion);
+  SetMinimumVersion(kNewVersion);
+
+  TestPolicy(EvalStatus::kContinue);
+}
+
+TEST_F(UmMinimumVersionPolicyImplTest, ContinueWhenMinumumVersionIsNotSet) {
+  SetCurrentVersion(kOldVersion);
+
+  TestPolicy(EvalStatus::kContinue);
+}
+
+TEST_F(UmMinimumVersionPolicyImplTest, ContinueWhenMinumumVersionIsInvalid) {
+  SetCurrentVersion(kOldVersion);
+  SetMinimumVersion(kInvalidVersion);
+
+  TestPolicy(EvalStatus::kContinue);
+}
+
+TEST_F(UmMinimumVersionPolicyImplTest,
+       ContinueWhenCurrentVersionIsGreaterThanMinimumVersion) {
+  SetCurrentVersion(kNewVersion);
+  SetMinimumVersion(kOldVersion);
+
+  TestPolicy(EvalStatus::kContinue);
+}
+
+TEST_F(UmMinimumVersionPolicyImplTest,
+       ContinueWhenCurrentVersionIsEqualToMinimumVersion) {
+  SetCurrentVersion(kNewVersion);
+  SetMinimumVersion(kNewVersion);
+
+  TestPolicy(EvalStatus::kContinue);
+}
+
+TEST_F(UmMinimumVersionPolicyImplTest,
+       SuccessWhenCurrentVersionIsLessThanMinimumVersion) {
+  SetCurrentVersion(kOldVersion);
+  SetMinimumVersion(kNewVersion);
+
+  TestPolicy(EvalStatus::kSucceeded);
+}
+
+}  // namespace chromeos_update_manager
diff --git a/update_manager/mock_policy.h b/update_manager/mock_policy.h
index 46b6c78..3c6313f 100644
--- a/update_manager/mock_policy.h
+++ b/update_manager/mock_policy.h
@@ -29,8 +29,7 @@
 // A mocked implementation of Policy.
 class MockPolicy : public Policy {
  public:
-  explicit MockPolicy(chromeos_update_engine::ClockInterface* clock)
-      : default_policy_(clock) {
+  MockPolicy() {
     // We defer to the corresponding DefaultPolicy methods, by default.
     ON_CALL(*this,
             UpdateCheckAllowed(testing::_, testing::_, testing::_, testing::_))
@@ -46,11 +45,6 @@
                 testing::_, testing::_, testing::_, testing::_, testing::_))
         .WillByDefault(
             testing::Invoke(&default_policy_, &DefaultPolicy::UpdateCanStart));
-    ON_CALL(
-        *this,
-        UpdateDownloadAllowed(testing::_, testing::_, testing::_, testing::_))
-        .WillByDefault(testing::Invoke(&default_policy_,
-                                       &DefaultPolicy::UpdateDownloadAllowed));
     ON_CALL(*this, P2PEnabled(testing::_, testing::_, testing::_, testing::_))
         .WillByDefault(
             testing::Invoke(&default_policy_, &DefaultPolicy::P2PEnabled));
@@ -60,8 +54,6 @@
         .WillByDefault(testing::Invoke(&default_policy_,
                                        &DefaultPolicy::P2PEnabledChanged));
   }
-
-  MockPolicy() : MockPolicy(nullptr) {}
   ~MockPolicy() override {}
 
   // Policy overrides.
diff --git a/update_manager/mock_update_manager.h b/update_manager/mock_update_manager.h
new file mode 100644
index 0000000..06e17d8
--- /dev/null
+++ b/update_manager/mock_update_manager.h
@@ -0,0 +1,44 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_MOCK_UPDATE_MANAGER_H
+#define UPDATE_ENGINE_MOCK_UPDATE_MANAGER_H
+
+#include <string>
+
+#include "update_engine/update_manager/update_manager.h"
+
+#include <gmock/gmock.h>
+
+namespace chromeos_update_manager {
+
+class MockUpdateManager : public UpdateManager {
+ public:
+  MockUpdateManager()
+      : UpdateManager(base::TimeDelta(), base::TimeDelta(), nullptr) {}
+
+  MOCK_METHOD2(
+      AsyncPolicyRequestUpdateCheckAllowed,
+      void(base::Callback<void(EvalStatus, const UpdateCheckParams& result)>
+               callback,
+           EvalStatus (Policy::*policy_method)(
+               EvaluationContext*, State*, std::string*, UpdateCheckParams*)
+               const));
+};
+
+}  // namespace chromeos_update_manager
+
+#endif  // UPDATE_ENGINE_MOCK_UPDATE_MANAGER_H
diff --git a/update_manager/next_update_check_policy_impl.cc b/update_manager/next_update_check_policy_impl.cc
index 6f9748e..0a78718 100644
--- a/update_manager/next_update_check_policy_impl.cc
+++ b/update_manager/next_update_check_policy_impl.cc
@@ -72,6 +72,11 @@
       ec->GetValue(updater_provider->var_updater_started_time());
   POLICY_CHECK_VALUE_AND_FAIL(updater_started_time, error);
 
+  // This value is used for testing only and it will get deleted after the first
+  // time it is read.
+  const int64_t* interval_timeout =
+      ec->GetValue(updater_provider->var_test_update_check_interval_timeout());
+
   const Time* last_checked_time =
       ec->GetValue(updater_provider->var_last_checked_time());
 
@@ -83,13 +88,21 @@
   // If this is the first attempt, compute and return an initial value.
   if (last_checked_time == nullptr ||
       *last_checked_time < *updater_started_time) {
-    *next_update_check = *updater_started_time +
-                         FuzzedInterval(&prng,
-                                        constants.timeout_initial_interval,
-                                        constants.timeout_regular_fuzz);
+    TimeDelta time_diff =
+        interval_timeout == nullptr
+            ? FuzzedInterval(&prng,
+                             constants.timeout_initial_interval,
+                             constants.timeout_regular_fuzz)
+            : TimeDelta::FromSeconds(*interval_timeout);
+    *next_update_check = *updater_started_time + time_diff;
     return EvalStatus::kSucceeded;
   }
 
+  if (interval_timeout != nullptr) {
+    *next_update_check =
+        *last_checked_time + TimeDelta::FromSeconds(*interval_timeout);
+    return EvalStatus::kSucceeded;
+  }
   // Check whether the server is enforcing a poll interval; if not, this value
   // will be zero.
   const unsigned int* server_dictated_poll_interval =
diff --git a/update_manager/next_update_check_policy_impl_unittest.cc b/update_manager/next_update_check_policy_impl_unittest.cc
index 58aff66..d80063d 100644
--- a/update_manager/next_update_check_policy_impl_unittest.cc
+++ b/update_manager/next_update_check_policy_impl_unittest.cc
@@ -52,14 +52,14 @@
   // Set the last update time so it'll appear as if this is a first update check
   // in the lifetime of the current updater.
   fake_state_.updater_provider()->var_last_checked_time()->reset(
-      new Time(fake_clock_.GetWallclockTime() - TimeDelta::FromMinutes(10)));
+      new Time(fake_clock_->GetWallclockTime() - TimeDelta::FromMinutes(10)));
 
   CallMethodWithContext(&NextUpdateCheckTimePolicyImpl::NextUpdateCheckTime,
                         &next_update_check,
                         policy_test_constants);
 
-  EXPECT_LE(fake_clock_.GetWallclockTime(), next_update_check);
-  EXPECT_GE(fake_clock_.GetWallclockTime() +
+  EXPECT_LE(fake_clock_->GetWallclockTime(), next_update_check);
+  EXPECT_GE(fake_clock_->GetWallclockTime() +
                 TimeDelta::FromSeconds(
                     policy_test_constants.timeout_initial_interval +
                     policy_test_constants.timeout_regular_fuzz / 2),
@@ -75,12 +75,12 @@
                         &next_update_check,
                         policy_test_constants);
 
-  EXPECT_LE(fake_clock_.GetWallclockTime() +
+  EXPECT_LE(fake_clock_->GetWallclockTime() +
                 TimeDelta::FromSeconds(
                     policy_test_constants.timeout_periodic_interval -
                     policy_test_constants.timeout_regular_fuzz / 2),
             next_update_check);
-  EXPECT_GE(fake_clock_.GetWallclockTime() +
+  EXPECT_GE(fake_clock_->GetWallclockTime() +
                 TimeDelta::FromSeconds(
                     policy_test_constants.timeout_periodic_interval +
                     policy_test_constants.timeout_regular_fuzz / 2),
@@ -103,11 +103,11 @@
 
   int expected_interval = policy_test_constants.timeout_periodic_interval * 4;
   EXPECT_LE(
-      fake_clock_.GetWallclockTime() +
+      fake_clock_->GetWallclockTime() +
           TimeDelta::FromSeconds(expected_interval - expected_interval / 2),
       next_update_check);
   EXPECT_GE(
-      fake_clock_.GetWallclockTime() +
+      fake_clock_->GetWallclockTime() +
           TimeDelta::FromSeconds(expected_interval + expected_interval / 2),
       next_update_check);
 }
@@ -129,10 +129,10 @@
                &next_update_check,
                policy_test_constants);
 
-  EXPECT_LE(fake_clock_.GetWallclockTime() +
+  EXPECT_LE(fake_clock_->GetWallclockTime() +
                 TimeDelta::FromSeconds(kInterval - kInterval / 2),
             next_update_check);
-  EXPECT_GE(fake_clock_.GetWallclockTime() +
+  EXPECT_GE(fake_clock_->GetWallclockTime() +
                 TimeDelta::FromSeconds(kInterval + kInterval / 2),
             next_update_check);
 }
@@ -148,12 +148,12 @@
                &next_update_check,
                policy_test_constants);
 
-  EXPECT_LE(fake_clock_.GetWallclockTime() +
+  EXPECT_LE(fake_clock_->GetWallclockTime() +
                 TimeDelta::FromSeconds(
                     policy_test_constants.timeout_max_backoff_interval -
                     policy_test_constants.timeout_max_backoff_interval / 2),
             next_update_check);
-  EXPECT_GE(fake_clock_.GetWallclockTime() +
+  EXPECT_GE(fake_clock_->GetWallclockTime() +
                 TimeDelta::FromSeconds(
                     policy_test_constants.timeout_max_backoff_interval +
                     policy_test_constants.timeout_max_backoff_interval / 2),
diff --git a/update_manager/official_build_check_policy_impl.cc b/update_manager/official_build_check_policy_impl.cc
index 096f7bf..e80c09f 100644
--- a/update_manager/official_build_check_policy_impl.cc
+++ b/update_manager/official_build_check_policy_impl.cc
@@ -27,8 +27,16 @@
   const bool* is_official_build_p =
       ec->GetValue(state->system_provider()->var_is_official_build());
   if (is_official_build_p != nullptr && !(*is_official_build_p)) {
-    LOG(INFO) << "Unofficial build, blocking periodic update checks.";
-    return EvalStatus::kAskMeAgainLater;
+    const int64_t* interval_timeout_p = ec->GetValue(
+        state->updater_provider()->var_test_update_check_interval_timeout());
+    // The |interval_timeout | is used for testing only to test periodic
+    // update checks on unofficial images.
+    if (interval_timeout_p == nullptr) {
+      LOG(INFO) << "Unofficial build, blocking periodic update checks.";
+      return EvalStatus::kAskMeAgainLater;
+    }
+    LOG(INFO) << "Unofficial build, but periodic update check interval "
+              << "timeout is defined, so update is not blocked.";
   }
   return EvalStatus::kContinue;
 }
diff --git a/update_manager/policy.h b/update_manager/policy.h
index 5d65d9a..fb4a129 100644
--- a/update_manager/policy.h
+++ b/update_manager/policy.h
@@ -17,6 +17,7 @@
 #ifndef UPDATE_ENGINE_UPDATE_MANAGER_POLICY_H_
 #define UPDATE_ENGINE_UPDATE_MANAGER_POLICY_H_
 
+#include <memory>
 #include <string>
 #include <tuple>
 #include <vector>
@@ -42,24 +43,35 @@
 // Parameters of an update check. These parameters are determined by the
 // UpdateCheckAllowed policy.
 struct UpdateCheckParams {
-  bool updates_enabled;  // Whether the auto-updates are enabled on this build.
+  // Whether the auto-updates are enabled on this build.
+  bool updates_enabled{true};
 
   // Attributes pertaining to the case where update checks are allowed.
   //
   // A target version prefix, if imposed by policy; otherwise, an empty string.
   std::string target_version_prefix;
   // Specifies whether rollback images are allowed by device policy.
-  bool rollback_allowed;
+  bool rollback_allowed{false};
+  // Specifies if rollbacks should attempt to preserve some system state.
+  bool rollback_data_save_requested{false};
   // Specifies the number of Chrome milestones rollback should be allowed,
   // starting from the stable version at any time. Value is -1 if unspecified
   // (e.g. no device policy is available yet), in this case no version
   // roll-forward should happen.
-  int rollback_allowed_milestones;
+  int rollback_allowed_milestones{0};
+  // Whether a rollback with data save should be initiated on channel
+  // downgrade (e.g. beta to stable).
+  bool rollback_on_channel_downgrade{false};
   // A target channel, if so imposed by policy; otherwise, an empty string.
   std::string target_channel;
+  // Specifies if the channel hint, e.g. LTS (Long Term Support) updates.
+  std::string lts_tag;
+  // Specifies a token which maps to a Chrome OS Quick Fix Build, if imposed by
+  // policy; otherwise, an empty string.
+  std::string quick_fix_build_token;
 
   // Whether the allowed update is interactive (user-initiated) or periodic.
-  bool interactive;
+  bool interactive{false};
 };
 
 // Input arguments to UpdateCanStart.
@@ -218,9 +230,6 @@
     if (reinterpret_cast<typeof(&Policy::UpdateCanStart)>(policy_method) ==
         &Policy::UpdateCanStart)
       return class_name + "UpdateCanStart";
-    if (reinterpret_cast<typeof(&Policy::UpdateDownloadAllowed)>(
-            policy_method) == &Policy::UpdateDownloadAllowed)
-      return class_name + "UpdateDownloadAllowed";
     if (reinterpret_cast<typeof(&Policy::P2PEnabled)>(policy_method) ==
         &Policy::P2PEnabled)
       return class_name + "P2PEnabled";
@@ -269,17 +278,6 @@
                                     UpdateDownloadParams* result,
                                     UpdateState update_state) const = 0;
 
-  // Checks whether downloading of an update is allowed; currently, this checks
-  // whether the network connection type is suitable for updating over.  May
-  // consult the shill provider as well as the device policy (if available).
-  // Returns |EvalStatus::kSucceeded|, setting |result| according to whether or
-  // not the current connection can be used; on error, returns
-  // |EvalStatus::kFailed| and sets |error| accordingly.
-  virtual EvalStatus UpdateDownloadAllowed(EvaluationContext* ec,
-                                           State* state,
-                                           std::string* error,
-                                           bool* result) const = 0;
-
   // Checks whether P2P is enabled. This may consult device policy and other
   // global settings.
   virtual EvalStatus P2PEnabled(EvaluationContext* ec,
@@ -308,6 +306,9 @@
   DISALLOW_COPY_AND_ASSIGN(Policy);
 };
 
+// Get system dependent policy implementation.
+std::unique_ptr<Policy> GetSystemPolicy();
+
 }  // namespace chromeos_update_manager
 
 #endif  // UPDATE_ENGINE_UPDATE_MANAGER_POLICY_H_
diff --git a/update_manager/policy_test_utils.cc b/update_manager/policy_test_utils.cc
index 5491e00..e8961b1 100644
--- a/update_manager/policy_test_utils.cc
+++ b/update_manager/policy_test_utils.cc
@@ -20,11 +20,13 @@
 #include <tuple>
 #include <vector>
 
+#include "update_engine/cros/fake_system_state.h"
 #include "update_engine/update_manager/next_update_check_policy_impl.h"
 
 using base::Time;
 using base::TimeDelta;
 using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::FakeSystemState;
 using std::string;
 using std::tuple;
 using std::vector;
@@ -33,8 +35,10 @@
 
 void UmPolicyTestBase::SetUp() {
   loop_.SetAsCurrent();
+  FakeSystemState::CreateInstance();
+  fake_clock_ = FakeSystemState::Get()->fake_clock();
   SetUpDefaultClock();
-  eval_ctx_ = new EvaluationContext(&fake_clock_, TimeDelta::FromSeconds(5));
+  eval_ctx_.reset(new EvaluationContext(TimeDelta::FromSeconds(5)));
   SetUpDefaultState();
 }
 
@@ -44,12 +48,12 @@
 
 // Sets the clock to fixed values.
 void UmPolicyTestBase::SetUpDefaultClock() {
-  fake_clock_.SetMonotonicTime(Time::FromInternalValue(12345678L));
-  fake_clock_.SetWallclockTime(Time::FromInternalValue(12345678901234L));
+  fake_clock_->SetMonotonicTime(Time::FromInternalValue(12345678L));
+  fake_clock_->SetWallclockTime(Time::FromInternalValue(12345678901234L));
 }
 
 void UmPolicyTestBase::SetUpDefaultTimeProvider() {
-  Time current_time = fake_clock_.GetWallclockTime();
+  Time current_time = FakeSystemState::Get()->clock()->GetWallclockTime();
   base::Time::Exploded exploded;
   current_time.LocalExplode(&exploded);
   fake_state_.time_provider()->var_curr_hour()->reset(new int(exploded.hour));
@@ -61,9 +65,9 @@
 
 void UmPolicyTestBase::SetUpDefaultState() {
   fake_state_.updater_provider()->var_updater_started_time()->reset(
-      new Time(fake_clock_.GetWallclockTime()));
+      new Time(fake_clock_->GetWallclockTime()));
   fake_state_.updater_provider()->var_last_checked_time()->reset(
-      new Time(fake_clock_.GetWallclockTime()));
+      new Time(fake_clock_->GetWallclockTime()));
   fake_state_.updater_provider()->var_consecutive_failed_update_checks()->reset(
       new unsigned int(0));  // NOLINT(readability/casting)
   fake_state_.updater_provider()->var_server_dictated_poll_interval()->reset(
@@ -78,7 +82,8 @@
 // Returns a default UpdateState structure:
 UpdateState UmPolicyTestBase::GetDefaultUpdateState(
     TimeDelta first_seen_period) {
-  Time first_seen_time = fake_clock_.GetWallclockTime() - first_seen_period;
+  Time first_seen_time =
+      FakeSystemState::Get()->clock()->GetWallclockTime() - first_seen_period;
   UpdateState update_state = UpdateState();
 
   // This is a non-interactive check returning a delta payload, seen for the
diff --git a/update_manager/policy_test_utils.h b/update_manager/policy_test_utils.h
index eb5758f..72bd3bc 100644
--- a/update_manager/policy_test_utils.h
+++ b/update_manager/policy_test_utils.h
@@ -91,9 +91,9 @@
   }
 
   brillo::FakeMessageLoop loop_{nullptr};
-  chromeos_update_engine::FakeClock fake_clock_;
+  chromeos_update_engine::FakeClock* fake_clock_;
   FakeState fake_state_;
-  scoped_refptr<EvaluationContext> eval_ctx_;
+  std::shared_ptr<EvaluationContext> eval_ctx_;
   std::unique_ptr<Policy> policy_;
 };
 
diff --git a/update_manager/policy_utils.h b/update_manager/policy_utils.h
index 3204780..aedb90c 100644
--- a/update_manager/policy_utils.h
+++ b/update_manager/policy_utils.h
@@ -55,7 +55,6 @@
     EvalStatus status =
         (policy->*policy_method)(ec, state, error, result, args...);
     if (status != EvalStatus::kContinue) {
-      LOG(INFO) << "decision by " << policy->PolicyRequestName(policy_method);
       return status;
     }
   }
@@ -93,13 +92,6 @@
     return EvalStatus::kContinue;
   };
 
-  EvalStatus UpdateDownloadAllowed(EvaluationContext* ec,
-                                   State* state,
-                                   std::string* error,
-                                   bool* result) const override {
-    return EvalStatus::kContinue;
-  };
-
   EvalStatus P2PEnabled(EvaluationContext* ec,
                         State* state,
                         std::string* error,
diff --git a/update_manager/real_device_policy_provider.cc b/update_manager/real_device_policy_provider.cc
index 586ee3e..e7b964b 100644
--- a/update_manager/real_device_policy_provider.cc
+++ b/update_manager/real_device_policy_provider.cc
@@ -25,8 +25,8 @@
 #include <base/time/time.h>
 #include <policy/device_policy.h>
 
+#include "update_engine/common/connection_utils.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/connection_utils.h"
 #include "update_engine/update_manager/generic_variables.h"
 
 using base::TimeDelta;
@@ -105,10 +105,12 @@
 
 template <typename T>
 void RealDevicePolicyProvider::UpdateVariable(
-    AsyncCopyVariable<T>* var, bool (DevicePolicy::*getter_method)(T*) const) {
+    AsyncCopyVariable<T>* var,
+    // NOLINTNEXTLINE(readability/casting)
+    bool (DevicePolicy::*getter)(T*) const) {
   T new_value;
   if (policy_provider_->device_policy_is_loaded() &&
-      (policy_provider_->GetDevicePolicy().*getter_method)(&new_value)) {
+      (policy_provider_->GetDevicePolicy().*getter)(&new_value)) {
     var->SetValue(new_value);
   } else {
     var->UnsetValue();
@@ -118,10 +120,10 @@
 template <typename T>
 void RealDevicePolicyProvider::UpdateVariable(
     AsyncCopyVariable<T>* var,
-    bool (RealDevicePolicyProvider::*getter_method)(T*) const) {
+    bool (RealDevicePolicyProvider::*getter)(T*) const) {
   T new_value;
   if (policy_provider_->device_policy_is_loaded() &&
-      (this->*getter_method)(&new_value)) {
+      (this->*getter)(&new_value)) {
     var->SetValue(new_value);
   } else {
     var->UnsetValue();
@@ -198,6 +200,30 @@
   return true;
 }
 
+bool RealDevicePolicyProvider::ConvertHasOwner(bool* has_owner) const {
+  string owner;
+  if (!policy_provider_->GetDevicePolicy().GetOwner(&owner)) {
+    return false;
+  }
+  *has_owner = !owner.empty();
+  return true;
+}
+
+bool RealDevicePolicyProvider::ConvertChannelDowngradeBehavior(
+    ChannelDowngradeBehavior* channel_downgrade_behavior) const {
+  int behavior;
+  if (!policy_provider_->GetDevicePolicy().GetChannelDowngradeBehavior(
+          &behavior)) {
+    return false;
+  }
+  if (behavior < static_cast<int>(ChannelDowngradeBehavior::kFirstValue) ||
+      behavior > static_cast<int>(ChannelDowngradeBehavior::kLastValue)) {
+    return false;
+  }
+  *channel_downgrade_behavior = static_cast<ChannelDowngradeBehavior>(behavior);
+  return true;
+}
+
 void RealDevicePolicyProvider::RefreshDevicePolicy() {
   if (!policy_provider_->Reload()) {
     LOG(INFO) << "No device policies/settings present.";
@@ -209,6 +235,7 @@
   UpdateVariable(&var_release_channel_, &DevicePolicy::GetReleaseChannel);
   UpdateVariable(&var_release_channel_delegated_,
                  &DevicePolicy::GetReleaseChannelDelegated);
+  UpdateVariable(&var_release_lts_tag_, &DevicePolicy::GetReleaseLtsTag);
   UpdateVariable(&var_update_disabled_, &DevicePolicy::GetUpdateDisabled);
   UpdateVariable(&var_target_version_prefix_,
                  &DevicePolicy::GetTargetVersionPrefix);
@@ -225,7 +252,7 @@
   UpdateVariable(
       &var_allowed_connection_types_for_update_,
       &RealDevicePolicyProvider::ConvertAllowedConnectionTypesForUpdate);
-  UpdateVariable(&var_owner_, &DevicePolicy::GetOwner);
+  UpdateVariable(&var_has_owner_, &RealDevicePolicyProvider::ConvertHasOwner);
   UpdateVariable(&var_http_downloads_enabled_,
                  &DevicePolicy::GetHttpDownloadsEnabled);
   UpdateVariable(&var_au_p2p_enabled_, &DevicePolicy::GetAuP2PEnabled);
@@ -235,6 +262,12 @@
                  &DevicePolicy::GetAutoLaunchedKioskAppId);
   UpdateVariable(&var_disallowed_time_intervals_,
                  &RealDevicePolicyProvider::ConvertDisallowedTimeIntervals);
+  UpdateVariable(&var_channel_downgrade_behavior_,
+                 &RealDevicePolicyProvider::ConvertChannelDowngradeBehavior);
+  UpdateVariable(&var_device_minimum_version_,
+                 &DevicePolicy::GetHighestDeviceMinimumVersion);
+  UpdateVariable(&var_quick_fix_build_token_,
+                 &DevicePolicy::GetDeviceQuickFixBuildToken);
 }
 
 }  // namespace chromeos_update_manager
diff --git a/update_manager/real_device_policy_provider.h b/update_manager/real_device_policy_provider.h
index bda4cff..0f84b77 100644
--- a/update_manager/real_device_policy_provider.h
+++ b/update_manager/real_device_policy_provider.h
@@ -34,7 +34,7 @@
 
 namespace chromeos_update_manager {
 
-// DevicePolicyProvider concrete implementation.
+// |DevicePolicyProvider| concrete implementation.
 class RealDevicePolicyProvider : public DevicePolicyProvider {
  public:
 #if USE_DBUS
@@ -64,6 +64,10 @@
     return &var_release_channel_delegated_;
   }
 
+  Variable<std::string>* var_release_lts_tag() override {
+    return &var_release_lts_tag_;
+  }
+
   Variable<bool>* var_update_disabled() override {
     return &var_update_disabled_;
   }
@@ -89,7 +93,7 @@
     return &var_allowed_connection_types_for_update_;
   }
 
-  Variable<std::string>* var_owner() override { return &var_owner_; }
+  Variable<bool>* var_has_owner() override { return &var_has_owner_; }
 
   Variable<bool>* var_http_downloads_enabled() override {
     return &var_http_downloads_enabled_;
@@ -109,16 +113,30 @@
     return &var_disallowed_time_intervals_;
   }
 
+  Variable<ChannelDowngradeBehavior>* var_channel_downgrade_behavior()
+      override {
+    return &var_channel_downgrade_behavior_;
+  }
+
+  Variable<base::Version>* var_device_minimum_version() override {
+    return &var_device_minimum_version_;
+  }
+
+  Variable<std::string>* var_quick_fix_build_token() override {
+    return &var_quick_fix_build_token_;
+  }
+
  private:
   FRIEND_TEST(UmRealDevicePolicyProviderTest, RefreshScheduledTest);
   FRIEND_TEST(UmRealDevicePolicyProviderTest, NonExistentDevicePolicyReloaded);
   FRIEND_TEST(UmRealDevicePolicyProviderTest, ValuesUpdated);
+  FRIEND_TEST(UmRealDevicePolicyProviderTest, HasOwnerConverted);
 
-  // A static handler for the PropertyChangedCompleted signal from the session
+  // A static handler for the |PropertyChangedCompleted| signal from the session
   // manager used as a callback.
   void OnPropertyChangedCompletedSignal(const std::string& success);
 
-  // Called when the signal in UpdateEngineLibcrosProxyResolvedInterface is
+  // Called when the signal in |UpdateEngineLibcrosProxyResolvedInterface| is
   // connected.
   void OnSignalConnected(const std::string& interface_name,
                          const std::string& signal_name,
@@ -134,36 +152,46 @@
   // passed, which is a DevicePolicy getter method.
   template <typename T>
   void UpdateVariable(AsyncCopyVariable<T>* var,
-                      bool (policy::DevicePolicy::*getter_method)(T*) const);
+                      bool (policy::DevicePolicy::*getter)(T*) const);
 
   // Updates the async variable |var| based on the result value of the getter
   // method passed, which is a wrapper getter on this class.
   template <typename T>
   void UpdateVariable(AsyncCopyVariable<T>* var,
-                      bool (RealDevicePolicyProvider::*getter_method)(T*)
-                          const);
+                      bool (RealDevicePolicyProvider::*getter)(T*) const);
 
-  // Wrapper for DevicePolicy::GetRollbackToTargetVersion() that converts the
-  // result to RollbackToTargetVersion.
+  // Wrapper for |DevicePolicy::GetRollbackToTargetVersion()| that converts the
+  // result to |RollbackToTargetVersion|.
   bool ConvertRollbackToTargetVersion(
       RollbackToTargetVersion* rollback_to_target_version) const;
 
-  // Wrapper for DevicePolicy::GetScatterFactorInSeconds() that converts the
-  // result to a base::TimeDelta. It returns the same value as
-  // GetScatterFactorInSeconds().
+  // Wrapper for |DevicePolicy::GetScatterFactorInSeconds()| that converts the
+  // result to a |base::TimeDelta|. It returns the same value as
+  // |GetScatterFactorInSeconds()|.
   bool ConvertScatterFactor(base::TimeDelta* scatter_factor) const;
 
-  // Wrapper for DevicePolicy::GetAllowedConnectionTypesForUpdate() that
-  // converts the result to a set of ConnectionType elements instead of strings.
+  // Wrapper for |DevicePolicy::GetAllowedConnectionTypesForUpdate()| that
+  // converts the result to a set of |ConnectionType| elements instead of
+  // strings.
   bool ConvertAllowedConnectionTypesForUpdate(
       std::set<chromeos_update_engine::ConnectionType>* allowed_types) const;
 
-  // Wrapper for DevicePolicy::GetUpdateTimeRestrictions() that converts
-  // the DevicePolicy::WeeklyTimeInterval structs to WeeklyTimeInterval objects,
-  // which offer more functionality.
+  // Wrapper for |DevicePolicy::GetUpdateTimeRestrictions()| that converts
+  // the |DevicePolicy::WeeklyTimeInterval| structs to |WeeklyTimeInterval|
+  // objects, which offer more functionality.
   bool ConvertDisallowedTimeIntervals(
       WeeklyTimeIntervalVector* disallowed_intervals_out) const;
 
+  // Wrapper for |DevicePolicy::GetOwner()| that converts the result to a
+  // boolean of whether the device has an owner. (Enterprise enrolled
+  // devices do not have an owner).
+  bool ConvertHasOwner(bool* has_owner) const;
+
+  // Wrapper for |DevicePolicy::GetChannelDowngradeBehavior| that converts the
+  // result to |ChannelDowngradeBehavior|.
+  bool ConvertChannelDowngradeBehavior(
+      ChannelDowngradeBehavior* channel_downgrade_behavior) const;
+
   // Used for fetching information about the device policy.
   policy::PolicyProvider* policy_provider_;
 
@@ -181,10 +209,11 @@
   AsyncCopyVariable<bool> var_device_policy_is_loaded_{"policy_is_loaded",
                                                        false};
 
-  // Variables mapping the exposed methods from the policy::DevicePolicy.
+  // Variables mapping the exposed methods from the |policy::DevicePolicy|.
   AsyncCopyVariable<std::string> var_release_channel_{"release_channel"};
   AsyncCopyVariable<bool> var_release_channel_delegated_{
       "release_channel_delegated"};
+  AsyncCopyVariable<std::string> var_release_lts_tag_{"release_lts_tag"};
   AsyncCopyVariable<bool> var_update_disabled_{"update_disabled"};
   AsyncCopyVariable<std::string> var_target_version_prefix_{
       "target_version_prefix"};
@@ -196,7 +225,7 @@
   AsyncCopyVariable<std::set<chromeos_update_engine::ConnectionType>>
       var_allowed_connection_types_for_update_{
           "allowed_connection_types_for_update"};
-  AsyncCopyVariable<std::string> var_owner_{"owner"};
+  AsyncCopyVariable<bool> var_has_owner_{"owner"};
   AsyncCopyVariable<bool> var_http_downloads_enabled_{"http_downloads_enabled"};
   AsyncCopyVariable<bool> var_au_p2p_enabled_{"au_p2p_enabled"};
   AsyncCopyVariable<bool> var_allow_kiosk_app_control_chrome_version_{
@@ -205,6 +234,12 @@
       "update_time_restrictions"};
   AsyncCopyVariable<std::string> var_auto_launched_kiosk_app_id_{
       "auto_launched_kiosk_app_id"};
+  AsyncCopyVariable<ChannelDowngradeBehavior> var_channel_downgrade_behavior_{
+      "channel_downgrade_behavior"};
+  AsyncCopyVariable<base::Version> var_device_minimum_version_{
+      "device_minimum_version"};
+  AsyncCopyVariable<std::string> var_quick_fix_build_token_{
+      "quick_fix_build_token"};
 
   DISALLOW_COPY_AND_ASSIGN(RealDevicePolicyProvider);
 };
diff --git a/update_manager/real_device_policy_provider_unittest.cc b/update_manager/real_device_policy_provider_unittest.cc
index 0d7b0d0..fd55859 100644
--- a/update_manager/real_device_policy_provider_unittest.cc
+++ b/update_manager/real_device_policy_provider_unittest.cc
@@ -34,7 +34,7 @@
 
 #include "update_engine/common/test_utils.h"
 #if USE_DBUS
-#include "update_engine/dbus_test_utils.h"
+#include "update_engine/cros/dbus_test_utils.h"
 #endif  // USE_DBUS
 #include "update_engine/update_manager/umtest_utils.h"
 
@@ -177,6 +177,7 @@
 
   UmTestUtils::ExpectVariableNotSet(provider_->var_release_channel());
   UmTestUtils::ExpectVariableNotSet(provider_->var_release_channel_delegated());
+  UmTestUtils::ExpectVariableNotSet(provider_->var_release_lts_tag());
   UmTestUtils::ExpectVariableNotSet(provider_->var_update_disabled());
   UmTestUtils::ExpectVariableNotSet(provider_->var_target_version_prefix());
   UmTestUtils::ExpectVariableNotSet(
@@ -186,7 +187,7 @@
   UmTestUtils::ExpectVariableNotSet(provider_->var_scatter_factor());
   UmTestUtils::ExpectVariableNotSet(
       provider_->var_allowed_connection_types_for_update());
-  UmTestUtils::ExpectVariableNotSet(provider_->var_owner());
+  UmTestUtils::ExpectVariableNotSet(provider_->var_has_owner());
   UmTestUtils::ExpectVariableNotSet(provider_->var_http_downloads_enabled());
   UmTestUtils::ExpectVariableNotSet(provider_->var_au_p2p_enabled());
   UmTestUtils::ExpectVariableNotSet(
@@ -194,6 +195,9 @@
   UmTestUtils::ExpectVariableNotSet(
       provider_->var_auto_launched_kiosk_app_id());
   UmTestUtils::ExpectVariableNotSet(provider_->var_disallowed_time_intervals());
+  UmTestUtils::ExpectVariableNotSet(
+      provider_->var_channel_downgrade_behavior());
+  UmTestUtils::ExpectVariableNotSet(provider_->var_quick_fix_build_token());
 }
 
 TEST_F(UmRealDevicePolicyProviderTest, ValuesUpdated) {
@@ -230,6 +234,26 @@
       string("myapp"), provider_->var_auto_launched_kiosk_app_id());
 }
 
+TEST_F(UmRealDevicePolicyProviderTest, HasOwnerConverted) {
+  SetUpExistentDevicePolicy();
+  EXPECT_TRUE(provider_->Init());
+  loop_.RunOnce(false);
+  Mock::VerifyAndClearExpectations(&mock_policy_provider_);
+
+  EXPECT_CALL(mock_device_policy_, GetOwner(_))
+      .Times(2)
+      .WillOnce(DoAll(SetArgPointee<0>(string("")), Return(true)))
+      .WillOnce(DoAll(SetArgPointee<0>(string("abc@test.org")), Return(true)));
+
+  // Enterprise enrolled device.
+  provider_->RefreshDevicePolicy();
+  UmTestUtils::ExpectVariableHasValue(false, provider_->var_has_owner());
+
+  // Has a device owner.
+  provider_->RefreshDevicePolicy();
+  UmTestUtils::ExpectVariableHasValue(true, provider_->var_has_owner());
+}
+
 TEST_F(UmRealDevicePolicyProviderTest, RollbackToTargetVersionConverted) {
   SetUpExistentDevicePolicy();
   EXPECT_CALL(mock_device_policy_, GetRollbackToTargetVersion(_))
@@ -324,14 +348,14 @@
 #else
       .Times(1)
 #endif  // USE_DBUS
-      .WillRepeatedly(DoAll(
-          SetArgPointee<0>(set<string>{"bluetooth", "wifi", "not-a-type"}),
-          Return(true)));
+      .WillRepeatedly(
+          DoAll(SetArgPointee<0>(set<string>{"ethernet", "wifi", "not-a-type"}),
+                Return(true)));
   EXPECT_TRUE(provider_->Init());
   loop_.RunOnce(false);
 
   UmTestUtils::ExpectVariableHasValue(
-      set<ConnectionType>{ConnectionType::kWifi, ConnectionType::kBluetooth},
+      set<ConnectionType>{ConnectionType::kWifi, ConnectionType::kEthernet},
       provider_->var_allowed_connection_types_for_update());
 }
 
@@ -356,4 +380,83 @@
       provider_->var_disallowed_time_intervals());
 }
 
+TEST_F(UmRealDevicePolicyProviderTest, ChannelDowngradeBehaviorConverted) {
+  SetUpExistentDevicePolicy();
+  EXPECT_CALL(mock_device_policy_, GetChannelDowngradeBehavior(_))
+#if USE_DBUS
+      .Times(2)
+#else
+      .Times(1)
+#endif  // USE_DBUS
+      .WillRepeatedly(DoAll(SetArgPointee<0>(static_cast<int>(
+                                ChannelDowngradeBehavior::kRollback)),
+                            Return(true)));
+  EXPECT_TRUE(provider_->Init());
+  loop_.RunOnce(false);
+
+  UmTestUtils::ExpectVariableHasValue(
+      ChannelDowngradeBehavior::kRollback,
+      provider_->var_channel_downgrade_behavior());
+}
+
+TEST_F(UmRealDevicePolicyProviderTest, ChannelDowngradeBehaviorTooSmall) {
+  SetUpExistentDevicePolicy();
+  EXPECT_CALL(mock_device_policy_, GetChannelDowngradeBehavior(_))
+#if USE_DBUS
+      .Times(2)
+#else
+      .Times(1)
+#endif  // USE_DBUS
+      .WillRepeatedly(DoAll(SetArgPointee<0>(-1), Return(true)));
+  EXPECT_TRUE(provider_->Init());
+  loop_.RunOnce(false);
+
+  UmTestUtils::ExpectVariableNotSet(
+      provider_->var_channel_downgrade_behavior());
+}
+
+TEST_F(UmRealDevicePolicyProviderTest, ChannelDowngradeBehaviorTooLarge) {
+  SetUpExistentDevicePolicy();
+  EXPECT_CALL(mock_device_policy_, GetChannelDowngradeBehavior(_))
+#if USE_DBUS
+      .Times(2)
+#else
+      .Times(1)
+#endif  // USE_DBUS
+      .WillRepeatedly(DoAll(SetArgPointee<0>(10), Return(true)));
+  EXPECT_TRUE(provider_->Init());
+  loop_.RunOnce(false);
+
+  UmTestUtils::ExpectVariableNotSet(
+      provider_->var_channel_downgrade_behavior());
+}
+
+TEST_F(UmRealDevicePolicyProviderTest, DeviceMinimumVersionPolicySet) {
+  SetUpExistentDevicePolicy();
+
+  base::Version device_minimum_version("13315.60.12");
+
+  EXPECT_CALL(mock_device_policy_, GetHighestDeviceMinimumVersion(_))
+      .WillRepeatedly(
+          DoAll(SetArgPointee<0>(device_minimum_version), Return(true)));
+  EXPECT_TRUE(provider_->Init());
+  loop_.RunOnce(false);
+
+  UmTestUtils::ExpectVariableHasValue(device_minimum_version,
+                                      provider_->var_device_minimum_version());
+}
+
+TEST_F(UmRealDevicePolicyProviderTest, DeviceQuickFixBuildTokenSet) {
+  SetUpExistentDevicePolicy();
+
+  EXPECT_CALL(mock_device_policy_, GetDeviceQuickFixBuildToken(_))
+      .WillRepeatedly(
+          DoAll(SetArgPointee<0>(string("some_token")), Return(true)));
+  EXPECT_TRUE(provider_->Init());
+  loop_.RunOnce(false);
+
+  UmTestUtils::ExpectVariableHasValue(string("some_token"),
+                                      provider_->var_quick_fix_build_token());
+}
+
 }  // namespace chromeos_update_manager
diff --git a/update_manager/real_shill_provider.cc b/update_manager/real_shill_provider.cc
index 0144603..4d067fd 100644
--- a/update_manager/real_shill_provider.cc
+++ b/update_manager/real_shill_provider.cc
@@ -24,6 +24,7 @@
 #include <shill/dbus-constants.h>
 #include <shill/dbus-proxies.h>
 
+using chromeos_update_engine::SystemState;
 using chromeos_update_engine::connection_utils::ParseConnectionType;
 using org::chromium::flimflam::ManagerProxyInterface;
 using org::chromium::flimflam::ServiceProxyInterface;
@@ -97,7 +98,8 @@
   bool is_connected =
       (default_service_path_.IsValid() && default_service_path_.value() != "/");
   var_is_connected_.SetValue(is_connected);
-  var_conn_last_changed_.SetValue(clock_->GetWallclockTime());
+  var_conn_last_changed_.SetValue(
+      SystemState::Get()->clock()->GetWallclockTime());
 
   if (!is_connected) {
     var_conn_type_.UnsetValue();
diff --git a/update_manager/real_shill_provider.h b/update_manager/real_shill_provider.h
index ec5c570..cd53d92 100644
--- a/update_manager/real_shill_provider.h
+++ b/update_manager/real_shill_provider.h
@@ -27,8 +27,8 @@
 #include <base/time/time.h>
 #include <dbus/object_path.h>
 
-#include "update_engine/common/clock_interface.h"
-#include "update_engine/shill_proxy_interface.h"
+#include "update_engine/common/system_state.h"
+#include "update_engine/cros/shill_proxy_interface.h"
 #include "update_engine/update_manager/generic_variables.h"
 #include "update_engine/update_manager/shill_provider.h"
 
@@ -37,9 +37,9 @@
 // ShillProvider concrete implementation.
 class RealShillProvider : public ShillProvider {
  public:
-  RealShillProvider(chromeos_update_engine::ShillProxyInterface* shill_proxy,
-                    chromeos_update_engine::ClockInterface* clock)
-      : shill_proxy_(shill_proxy), clock_(clock) {}
+  explicit RealShillProvider(
+      chromeos_update_engine::ShillProxyInterface* shill_proxy)
+      : shill_proxy_(shill_proxy) {}
 
   ~RealShillProvider() override = default;
 
@@ -81,9 +81,6 @@
   // The mockable interface to access the shill DBus proxies.
   std::unique_ptr<chromeos_update_engine::ShillProxyInterface> shill_proxy_;
 
-  // A clock abstraction (mockable).
-  chromeos_update_engine::ClockInterface* const clock_;
-
   // The provider's variables.
   AsyncCopyVariable<bool> var_is_connected_{"is_connected"};
   AsyncCopyVariable<chromeos_update_engine::ConnectionType> var_conn_type_{
diff --git a/update_manager/real_shill_provider_unittest.cc b/update_manager/real_shill_provider_unittest.cc
index dcc729a..9a2d8a8 100644
--- a/update_manager/real_shill_provider_unittest.cc
+++ b/update_manager/real_shill_provider_unittest.cc
@@ -27,17 +27,17 @@
 #include <shill/dbus-proxies.h>
 #include <shill/dbus-proxy-mocks.h>
 
-#include "update_engine/common/fake_clock.h"
 #include "update_engine/common/test_utils.h"
-#include "update_engine/dbus_test_utils.h"
-#include "update_engine/fake_shill_proxy.h"
+#include "update_engine/cros/dbus_test_utils.h"
+#include "update_engine/cros/fake_shill_proxy.h"
+#include "update_engine/cros/fake_system_state.h"
 #include "update_engine/update_manager/umtest_utils.h"
 
 using base::Time;
 using base::TimeDelta;
 using chromeos_update_engine::ConnectionTethering;
 using chromeos_update_engine::ConnectionType;
-using chromeos_update_engine::FakeClock;
+using chromeos_update_engine::FakeSystemState;
 using org::chromium::flimflam::ManagerProxyMock;
 using org::chromium::flimflam::ServiceProxyMock;
 using std::unique_ptr;
@@ -51,8 +51,6 @@
 // Fake service paths.
 const char* const kFakeEthernetServicePath = "/fake/ethernet/service";
 const char* const kFakeWifiServicePath = "/fake/wifi/service";
-const char* const kFakeWimaxServicePath = "/fake/wimax/service";
-const char* const kFakeBluetoothServicePath = "/fake/bluetooth/service";
 const char* const kFakeCellularServicePath = "/fake/cellular/service";
 const char* const kFakeVpnServicePath = "/fake/vpn/service";
 const char* const kFakeUnknownServicePath = "/fake/unknown/service";
@@ -65,10 +63,10 @@
  protected:
   // Initialize the RealShillProvider under test.
   void SetUp() override {
-    fake_clock_.SetWallclockTime(InitTime());
+    FakeSystemState::Get()->fake_clock()->SetWallclockTime(InitTime());
     loop_.SetAsCurrent();
     fake_shill_proxy_ = new chromeos_update_engine::FakeShillProxy();
-    provider_.reset(new RealShillProvider(fake_shill_proxy_, &fake_clock_));
+    provider_.reset(new RealShillProvider(fake_shill_proxy_));
 
     ManagerProxyMock* manager_proxy_mock = fake_shill_proxy_->GetManagerProxy();
 
@@ -129,11 +127,12 @@
   void SendDefaultServiceSignal(const std::string& service_path,
                                 Time* conn_change_time_p) {
     const Time conn_change_time = ConnChangedTime();
-    fake_clock_.SetWallclockTime(conn_change_time);
+    FakeSystemState::Get()->fake_clock()->SetWallclockTime(conn_change_time);
     ASSERT_TRUE(manager_property_changed_.IsHandlerRegistered());
     manager_property_changed_.signal_callback().Run(
         shill::kDefaultServiceProperty, dbus::ObjectPath(service_path));
-    fake_clock_.SetWallclockTime(conn_change_time + TimeDelta::FromSeconds(5));
+    FakeSystemState::Get()->fake_clock()->SetWallclockTime(
+        conn_change_time + TimeDelta::FromSeconds(5));
     if (conn_change_time_p)
       *conn_change_time_p = conn_change_time;
   }
@@ -204,7 +203,6 @@
   }
 
   brillo::FakeMessageLoop loop_{nullptr};
-  FakeClock fake_clock_;
   chromeos_update_engine::FakeShillProxy* fake_shill_proxy_;
 
   // The registered signal handler for the signal Manager.PropertyChanged.
@@ -317,21 +315,6 @@
       kFakeWifiServicePath, shill::kTypeWifi, ConnectionType::kWifi);
 }
 
-// Test that Wimax connection is identified correctly.
-TEST_F(UmRealShillProviderTest, ReadConnTypeWimax) {
-  InitWithDefaultService("/");
-  SetupConnectionAndTestType(
-      kFakeWimaxServicePath, shill::kTypeWimax, ConnectionType::kWimax);
-}
-
-// Test that Bluetooth connection is identified correctly.
-TEST_F(UmRealShillProviderTest, ReadConnTypeBluetooth) {
-  InitWithDefaultService("/");
-  SetupConnectionAndTestType(kFakeBluetoothServicePath,
-                             shill::kTypeBluetooth,
-                             ConnectionType::kBluetooth);
-}
-
 // Test that Cellular connection is identified correctly.
 TEST_F(UmRealShillProviderTest, ReadConnTypeCellular) {
   InitWithDefaultService("/");
diff --git a/update_manager/real_system_provider.cc b/update_manager/real_system_provider.cc
index a900071..34397f3 100644
--- a/update_manager/real_system_provider.cc
+++ b/update_manager/real_system_provider.cc
@@ -20,14 +20,17 @@
 #include <base/callback.h>
 #include <base/logging.h>
 #include <base/time/time.h>
-#if USE_CHROME_KIOSK_APP
 #include <kiosk-app/dbus-proxies.h>
-#endif  // USE_CHROME_KIOSK_APP
 
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/hardware_interface.h"
+#include "update_engine/common/system_state.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/cros/omaha_request_params.h"
 #include "update_engine/update_manager/generic_variables.h"
 #include "update_engine/update_manager/variable.h"
 
+using chromeos_update_engine::SystemState;
 using std::string;
 
 namespace chromeos_update_manager {
@@ -64,9 +67,10 @@
     std::unique_ptr<T> result(new T());
     if (!func_.Run(result.get())) {
       if (failed_attempts_ >= kRetryPollVariableMaxRetry) {
-        // Give up on the retries, set back the desired polling interval and
-        // return the default.
+        // Give up on the retries and set back the desired polling interval.
         this->SetPollInterval(base_interval_);
+        // Release the result instead of returning a |nullptr| to indicate that
+        // the result could not be fetched.
         return result.release();
       }
       this->SetPollInterval(
@@ -96,19 +100,20 @@
 
 bool RealSystemProvider::Init() {
   var_is_normal_boot_mode_.reset(new ConstCopyVariable<bool>(
-      "is_normal_boot_mode", hardware_->IsNormalBootMode()));
+      "is_normal_boot_mode",
+      SystemState::Get()->hardware()->IsNormalBootMode()));
 
   var_is_official_build_.reset(new ConstCopyVariable<bool>(
-      "is_official_build", hardware_->IsOfficialBuild()));
+      "is_official_build", SystemState::Get()->hardware()->IsOfficialBuild()));
 
   var_is_oobe_complete_.reset(new CallCopyVariable<bool>(
       "is_oobe_complete",
       base::Bind(&chromeos_update_engine::HardwareInterface::IsOOBEComplete,
-                 base::Unretained(hardware_),
+                 base::Unretained(SystemState::Get()->hardware()),
                  nullptr)));
 
   var_num_slots_.reset(new ConstCopyVariable<unsigned int>(
-      "num_slots", boot_control_->GetNumSlots()));
+      "num_slots", SystemState::Get()->boot_control()->GetNumSlots()));
 
   var_kiosk_required_platform_version_.reset(new RetryPollVariable<string>(
       "kiosk_required_platform_version",
@@ -116,12 +121,15 @@
       base::Bind(&RealSystemProvider::GetKioskAppRequiredPlatformVersion,
                  base::Unretained(this))));
 
+  var_chromeos_version_.reset(new ConstCopyVariable<base::Version>(
+      "chromeos_version",
+      base::Version(SystemState::Get()->request_params()->app_version())));
+
   return true;
 }
 
 bool RealSystemProvider::GetKioskAppRequiredPlatformVersion(
     string* required_platform_version) {
-#if USE_CHROME_KIOSK_APP
   brillo::ErrorPtr error;
   if (!kiosk_app_proxy_->GetRequiredPlatformVersion(required_platform_version,
                                                     &error)) {
@@ -129,7 +137,6 @@
     required_platform_version->clear();
     return false;
   }
-#endif  // USE_CHROME_KIOSK_APP
 
   return true;
 }
diff --git a/update_manager/real_system_provider.h b/update_manager/real_system_provider.h
index 114c6ea..558d3be 100644
--- a/update_manager/real_system_provider.h
+++ b/update_manager/real_system_provider.h
@@ -20,8 +20,8 @@
 #include <memory>
 #include <string>
 
-#include "update_engine/common/boot_control_interface.h"
-#include "update_engine/common/hardware_interface.h"
+#include <base/version.h>
+
 #include "update_engine/update_manager/system_provider.h"
 
 namespace org {
@@ -36,18 +36,8 @@
 class RealSystemProvider : public SystemProvider {
  public:
   RealSystemProvider(
-      chromeos_update_engine::HardwareInterface* hardware,
-      chromeos_update_engine::BootControlInterface* boot_control,
       org::chromium::KioskAppServiceInterfaceProxyInterface* kiosk_app_proxy)
-      : hardware_(hardware),
-#if USE_CHROME_KIOSK_APP
-        boot_control_(boot_control),
-        kiosk_app_proxy_(kiosk_app_proxy) {
-  }
-#else
-        boot_control_(boot_control) {
-  }
-#endif  // USE_CHROME_KIOSK_APP
+      : kiosk_app_proxy_(kiosk_app_proxy) {}
 
   // Initializes the provider and returns whether it succeeded.
   bool Init();
@@ -72,6 +62,10 @@
     return var_kiosk_required_platform_version_.get();
   }
 
+  Variable<base::Version>* var_chromeos_version() override {
+    return var_chromeos_version_.get();
+  }
+
  private:
   bool GetKioskAppRequiredPlatformVersion(
       std::string* required_platform_version);
@@ -81,12 +75,9 @@
   std::unique_ptr<Variable<bool>> var_is_oobe_complete_;
   std::unique_ptr<Variable<unsigned int>> var_num_slots_;
   std::unique_ptr<Variable<std::string>> var_kiosk_required_platform_version_;
+  std::unique_ptr<Variable<base::Version>> var_chromeos_version_;
 
-  chromeos_update_engine::HardwareInterface* const hardware_;
-  chromeos_update_engine::BootControlInterface* const boot_control_;
-#if USE_CHROME_KIOSK_APP
   org::chromium::KioskAppServiceInterfaceProxyInterface* const kiosk_app_proxy_;
-#endif  // USE_CHROME_KIOSK_APP
 
   DISALLOW_COPY_AND_ASSIGN(RealSystemProvider);
 };
diff --git a/update_manager/real_system_provider_unittest.cc b/update_manager/real_system_provider_unittest.cc
index f654f7a..9abcad0 100644
--- a/update_manager/real_system_provider_unittest.cc
+++ b/update_manager/real_system_provider_unittest.cc
@@ -21,55 +21,44 @@
 #include <base/time/time.h>
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
+#include <kiosk-app/dbus-proxies.h>
+#include <kiosk-app/dbus-proxy-mocks.h>
 
 #include "update_engine/common/fake_boot_control.h"
 #include "update_engine/common/fake_hardware.h"
+#include "update_engine/cros/fake_system_state.h"
 #include "update_engine/update_manager/umtest_utils.h"
-#if USE_CHROME_KIOSK_APP
-#include "kiosk-app/dbus-proxies.h"
-#include "kiosk-app/dbus-proxy-mocks.h"
 
+using chromeos_update_engine::FakeSystemState;
 using org::chromium::KioskAppServiceInterfaceProxyMock;
-#endif  // USE_CHROME_KIOSK_APP
 using std::unique_ptr;
 using testing::_;
 using testing::DoAll;
 using testing::Return;
 using testing::SetArgPointee;
 
-#if USE_CHROME_KIOSK_APP
 namespace {
 const char kRequiredPlatformVersion[] = "1234.0.0";
 }  // namespace
-#endif  // USE_CHROME_KIOSK_APP
 
 namespace chromeos_update_manager {
 
 class UmRealSystemProviderTest : public ::testing::Test {
  protected:
   void SetUp() override {
-#if USE_CHROME_KIOSK_APP
+    FakeSystemState::CreateInstance();
     kiosk_app_proxy_mock_.reset(new KioskAppServiceInterfaceProxyMock());
     ON_CALL(*kiosk_app_proxy_mock_, GetRequiredPlatformVersion(_, _, _))
         .WillByDefault(
             DoAll(SetArgPointee<0>(kRequiredPlatformVersion), Return(true)));
 
-    provider_.reset(new RealSystemProvider(
-        &fake_hardware_, &fake_boot_control_, kiosk_app_proxy_mock_.get()));
-#else
-    provider_.reset(
-        new RealSystemProvider(&fake_hardware_, &fake_boot_control_, nullptr));
-#endif  // USE_CHROME_KIOSK_APP
+    provider_.reset(new RealSystemProvider(kiosk_app_proxy_mock_.get()));
     EXPECT_TRUE(provider_->Init());
   }
 
-  chromeos_update_engine::FakeHardware fake_hardware_;
-  chromeos_update_engine::FakeBootControl fake_boot_control_;
   unique_ptr<RealSystemProvider> provider_;
 
-#if USE_CHROME_KIOSK_APP
   unique_ptr<KioskAppServiceInterfaceProxyMock> kiosk_app_proxy_mock_;
-#endif  // USE_CHROME_KIOSK_APP
 };
 
 TEST_F(UmRealSystemProviderTest, InitTest) {
@@ -77,19 +66,29 @@
   EXPECT_NE(nullptr, provider_->var_is_official_build());
   EXPECT_NE(nullptr, provider_->var_is_oobe_complete());
   EXPECT_NE(nullptr, provider_->var_kiosk_required_platform_version());
+  EXPECT_NE(nullptr, provider_->var_chromeos_version());
 }
 
 TEST_F(UmRealSystemProviderTest, IsOOBECompleteTrue) {
-  fake_hardware_.SetIsOOBEComplete(base::Time());
+  FakeSystemState::Get()->fake_hardware()->SetIsOOBEComplete(base::Time());
   UmTestUtils::ExpectVariableHasValue(true, provider_->var_is_oobe_complete());
 }
 
 TEST_F(UmRealSystemProviderTest, IsOOBECompleteFalse) {
-  fake_hardware_.UnsetIsOOBEComplete();
+  FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete();
   UmTestUtils::ExpectVariableHasValue(false, provider_->var_is_oobe_complete());
 }
 
-#if USE_CHROME_KIOSK_APP
+TEST_F(UmRealSystemProviderTest, VersionFromRequestParams) {
+  FakeSystemState::Get()->request_params()->set_app_version("1.2.3");
+  // Call |Init| again to pick up the version.
+  EXPECT_TRUE(provider_->Init());
+
+  base::Version version("1.2.3");
+  UmTestUtils::ExpectVariableHasValue(version,
+                                      provider_->var_chromeos_version());
+}
+
 TEST_F(UmRealSystemProviderTest, KioskRequiredPlatformVersion) {
   UmTestUtils::ExpectVariableHasValue(
       std::string(kRequiredPlatformVersion),
@@ -119,11 +118,21 @@
       std::string(kRequiredPlatformVersion),
       provider_->var_kiosk_required_platform_version());
 }
-#else
-TEST_F(UmRealSystemProviderTest, KioskRequiredPlatformVersion) {
+
+TEST_F(UmRealSystemProviderTest, KioskRequiredPlatformVersionRepeatedFailure) {
+  // Simulate unreadable platform version. The variable should return a
+  // null pointer |kRetryPollVariableMaxRetry| times and then return an empty
+  // string to indicate that it gave up.
+  constexpr int kNumMethodCalls = 5;
+  EXPECT_CALL(*kiosk_app_proxy_mock_, GetRequiredPlatformVersion)
+      .Times(kNumMethodCalls + 1)
+      .WillRepeatedly(Return(false));
+  for (int i = 0; i < kNumMethodCalls; ++i) {
+    UmTestUtils::ExpectVariableNotSet(
+        provider_->var_kiosk_required_platform_version());
+  }
   UmTestUtils::ExpectVariableHasValue(
-      std::string(), provider_->var_kiosk_required_platform_version());
+      std::string(""), provider_->var_kiosk_required_platform_version());
 }
-#endif  // USE_CHROME_KIOSK_APP
 
 }  // namespace chromeos_update_manager
diff --git a/update_manager/real_time_provider.cc b/update_manager/real_time_provider.cc
index efd1747..2b71fa0 100644
--- a/update_manager/real_time_provider.cc
+++ b/update_manager/real_time_provider.cc
@@ -20,11 +20,11 @@
 
 #include <base/time/time.h>
 
-#include "update_engine/common/clock_interface.h"
+#include "update_engine/common/system_state.h"
 
 using base::Time;
 using base::TimeDelta;
-using chromeos_update_engine::ClockInterface;
+using chromeos_update_engine::SystemState;
 using std::string;
 
 namespace chromeos_update_manager {
@@ -34,13 +34,13 @@
  public:
   // TODO(garnold) Turn this into an async variable with the needed callback
   // logic for when it value changes.
-  CurrDateVariable(const string& name, ClockInterface* clock)
-      : Variable<Time>(name, TimeDelta::FromHours(1)), clock_(clock) {}
+  explicit CurrDateVariable(const string& name)
+      : Variable<Time>(name, TimeDelta::FromHours(1)) {}
 
  protected:
   virtual const Time* GetValue(TimeDelta /* timeout */, string* /* errmsg */) {
     Time::Exploded now_exp;
-    clock_->GetWallclockTime().LocalExplode(&now_exp);
+    SystemState::Get()->clock()->GetWallclockTime().LocalExplode(&now_exp);
     now_exp.hour = now_exp.minute = now_exp.second = now_exp.millisecond = 0;
     Time* now = new Time();
     bool success = Time::FromLocalExploded(now_exp, now);
@@ -49,8 +49,6 @@
   }
 
  private:
-  ClockInterface* clock_;
-
   DISALLOW_COPY_AND_ASSIGN(CurrDateVariable);
 };
 
@@ -59,44 +57,40 @@
  public:
   // TODO(garnold) Turn this into an async variable with the needed callback
   // logic for when it value changes.
-  CurrHourVariable(const string& name, ClockInterface* clock)
-      : Variable<int>(name, TimeDelta::FromMinutes(5)), clock_(clock) {}
+  explicit CurrHourVariable(const string& name)
+      : Variable<int>(name, TimeDelta::FromMinutes(5)) {}
 
  protected:
   virtual const int* GetValue(TimeDelta /* timeout */, string* /* errmsg */) {
     Time::Exploded exploded;
-    clock_->GetWallclockTime().LocalExplode(&exploded);
+    SystemState::Get()->clock()->GetWallclockTime().LocalExplode(&exploded);
     return new int(exploded.hour);
   }
 
  private:
-  ClockInterface* clock_;
-
   DISALLOW_COPY_AND_ASSIGN(CurrHourVariable);
 };
 
 class CurrMinuteVariable : public Variable<int> {
  public:
-  CurrMinuteVariable(const string& name, ClockInterface* clock)
-      : Variable<int>(name, TimeDelta::FromSeconds(15)), clock_(clock) {}
+  explicit CurrMinuteVariable(const string& name)
+      : Variable<int>(name, TimeDelta::FromSeconds(15)) {}
 
  protected:
   virtual const int* GetValue(TimeDelta /* timeout */, string* /* errmsg */) {
     Time::Exploded exploded;
-    clock_->GetWallclockTime().LocalExplode(&exploded);
+    SystemState::Get()->clock()->GetWallclockTime().LocalExplode(&exploded);
     return new int(exploded.minute);
   }
 
  private:
-  ClockInterface* clock_;
-
   DISALLOW_COPY_AND_ASSIGN(CurrMinuteVariable);
 };
 
 bool RealTimeProvider::Init() {
-  var_curr_date_.reset(new CurrDateVariable("curr_date", clock_));
-  var_curr_hour_.reset(new CurrHourVariable("curr_hour", clock_));
-  var_curr_minute_.reset(new CurrMinuteVariable("curr_minute", clock_));
+  var_curr_date_.reset(new CurrDateVariable("curr_date"));
+  var_curr_hour_.reset(new CurrHourVariable("curr_hour"));
+  var_curr_minute_.reset(new CurrMinuteVariable("curr_minute"));
   return true;
 }
 
diff --git a/update_manager/real_time_provider.h b/update_manager/real_time_provider.h
index 40dab36..58b0fa5 100644
--- a/update_manager/real_time_provider.h
+++ b/update_manager/real_time_provider.h
@@ -21,7 +21,6 @@
 
 #include <base/time/time.h>
 
-#include "update_engine/common/clock_interface.h"
 #include "update_engine/update_manager/time_provider.h"
 
 namespace chromeos_update_manager {
@@ -29,8 +28,7 @@
 // TimeProvider concrete implementation.
 class RealTimeProvider : public TimeProvider {
  public:
-  explicit RealTimeProvider(chromeos_update_engine::ClockInterface* clock)
-      : clock_(clock) {}
+  RealTimeProvider() = default;
 
   // Initializes the provider and returns whether it succeeded.
   bool Init();
@@ -44,9 +42,6 @@
   Variable<int>* var_curr_minute() override { return var_curr_minute_.get(); }
 
  private:
-  // A clock abstraction (fakeable).
-  chromeos_update_engine::ClockInterface* const clock_;
-
   std::unique_ptr<Variable<base::Time>> var_curr_date_;
   std::unique_ptr<Variable<int>> var_curr_hour_;
   std::unique_ptr<Variable<int>> var_curr_minute_;
diff --git a/update_manager/real_time_provider_unittest.cc b/update_manager/real_time_provider_unittest.cc
index ce2a718..f8ed0d2 100644
--- a/update_manager/real_time_provider_unittest.cc
+++ b/update_manager/real_time_provider_unittest.cc
@@ -22,11 +22,11 @@
 #include <base/time/time.h>
 #include <gtest/gtest.h>
 
-#include "update_engine/common/fake_clock.h"
+#include "update_engine/cros/fake_system_state.h"
 #include "update_engine/update_manager/umtest_utils.h"
 
 using base::Time;
-using chromeos_update_engine::FakeClock;
+using chromeos_update_engine::FakeSystemState;
 using std::unique_ptr;
 
 namespace chromeos_update_manager {
@@ -34,8 +34,9 @@
 class UmRealTimeProviderTest : public ::testing::Test {
  protected:
   void SetUp() override {
+    FakeSystemState::CreateInstance();
     // The provider initializes correctly.
-    provider_.reset(new RealTimeProvider(&fake_clock_));
+    provider_.reset(new RealTimeProvider());
     ASSERT_NE(nullptr, provider_.get());
     ASSERT_TRUE(provider_->Init());
   }
@@ -56,7 +57,6 @@
     return time;
   }
 
-  FakeClock fake_clock_;
   unique_ptr<RealTimeProvider> provider_;
 };
 
@@ -71,7 +71,7 @@
   Time expected;
   ignore_result(Time::FromLocalExploded(exploded, &expected));
 
-  fake_clock_.SetWallclockTime(now);
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(now);
   UmTestUtils::ExpectVariableHasValue(expected, provider_->var_curr_date());
 }
 
@@ -79,7 +79,7 @@
   const Time now = CurrTime();
   Time::Exploded expected;
   now.LocalExplode(&expected);
-  fake_clock_.SetWallclockTime(now);
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(now);
   UmTestUtils::ExpectVariableHasValue(expected.hour,
                                       provider_->var_curr_hour());
 }
@@ -88,7 +88,7 @@
   const Time now = CurrTime();
   Time::Exploded expected;
   now.LocalExplode(&expected);
-  fake_clock_.SetWallclockTime(now);
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(now);
   UmTestUtils::ExpectVariableHasValue(expected.minute,
                                       provider_->var_curr_minute());
 }
diff --git a/update_manager/real_updater_provider.cc b/update_manager/real_updater_provider.cc
index 134db69..5b76332 100644
--- a/update_manager/real_updater_provider.cc
+++ b/update_manager/real_updater_provider.cc
@@ -18,6 +18,7 @@
 
 #include <inttypes.h>
 
+#include <algorithm>
 #include <string>
 
 #include <base/bind.h>
@@ -26,10 +27,10 @@
 #include <update_engine/dbus-constants.h>
 
 #include "update_engine/client_library/include/update_engine/update_status.h"
-#include "update_engine/common/clock_interface.h"
 #include "update_engine/common/prefs.h"
-#include "update_engine/omaha_request_params.h"
-#include "update_engine/update_attempter.h"
+#include "update_engine/common/system_state.h"
+#include "update_engine/cros/omaha_request_params.h"
+#include "update_engine/cros/update_attempter.h"
 #include "update_engine/update_status_utils.h"
 
 using base::StringPrintf;
@@ -48,25 +49,16 @@
 template <typename T>
 class UpdaterVariableBase : public Variable<T> {
  public:
-  UpdaterVariableBase(const string& name,
-                      VariableMode mode,
-                      SystemState* system_state)
-      : Variable<T>(name, mode), system_state_(system_state) {}
-
- protected:
-  // The system state used for pulling information from the updater.
-  inline SystemState* system_state() const { return system_state_; }
-
- private:
-  SystemState* const system_state_;
+  UpdaterVariableBase(const string& name, VariableMode mode)
+      : Variable<T>(name, mode) {}
 };
 
 // Helper class for issuing a GetStatus() to the UpdateAttempter.
 class GetStatusHelper {
  public:
-  GetStatusHelper(SystemState* system_state, string* errmsg) {
-    is_success_ =
-        system_state->update_attempter()->GetStatus(&update_engine_status_);
+  explicit GetStatusHelper(string* errmsg) {
+    is_success_ = SystemState::Get()->update_attempter()->GetStatus(
+        &update_engine_status_);
     if (!is_success_ && errmsg) {
       *errmsg = "Failed to get a status update from the update engine";
     }
@@ -96,12 +88,12 @@
 // A variable reporting the time when a last update check was issued.
 class LastCheckedTimeVariable : public UpdaterVariableBase<Time> {
  public:
-  LastCheckedTimeVariable(const string& name, SystemState* system_state)
-      : UpdaterVariableBase<Time>(name, kVariableModePoll, system_state) {}
+  explicit LastCheckedTimeVariable(const string& name)
+      : UpdaterVariableBase<Time>(name, kVariableModePoll) {}
 
  private:
   const Time* GetValue(TimeDelta /* timeout */, string* errmsg) override {
-    GetStatusHelper raw(system_state(), errmsg);
+    GetStatusHelper raw(errmsg);
     if (!raw.is_success())
       return nullptr;
 
@@ -115,12 +107,12 @@
 // between 0.0 and 1.0.
 class ProgressVariable : public UpdaterVariableBase<double> {
  public:
-  ProgressVariable(const string& name, SystemState* system_state)
-      : UpdaterVariableBase<double>(name, kVariableModePoll, system_state) {}
+  explicit ProgressVariable(const string& name)
+      : UpdaterVariableBase<double>(name, kVariableModePoll) {}
 
  private:
   const double* GetValue(TimeDelta /* timeout */, string* errmsg) override {
-    GetStatusHelper raw(system_state(), errmsg);
+    GetStatusHelper raw(errmsg);
     if (!raw.is_success())
       return nullptr;
 
@@ -141,8 +133,8 @@
 // A variable reporting the stage in which the update process is.
 class StageVariable : public UpdaterVariableBase<Stage> {
  public:
-  StageVariable(const string& name, SystemState* system_state)
-      : UpdaterVariableBase<Stage>(name, kVariableModePoll, system_state) {}
+  explicit StageVariable(const string& name)
+      : UpdaterVariableBase<Stage>(name, kVariableModePoll) {}
 
  private:
   struct CurrOpStrToStage {
@@ -169,10 +161,12 @@
      Stage::kReportingErrorEvent},
     {update_engine::kUpdateStatusAttemptingRollback,
      Stage::kAttemptingRollback},
+    {update_engine::kUpdateStatusCleanupPreviousUpdate,
+     Stage::kCleanupPreviousUpdate},
 };
 
 const Stage* StageVariable::GetValue(TimeDelta /* timeout */, string* errmsg) {
-  GetStatusHelper raw(system_state(), errmsg);
+  GetStatusHelper raw(errmsg);
   if (!raw.is_success())
     return nullptr;
 
@@ -188,12 +182,12 @@
 // A variable reporting the version number that an update is updating to.
 class NewVersionVariable : public UpdaterVariableBase<string> {
  public:
-  NewVersionVariable(const string& name, SystemState* system_state)
-      : UpdaterVariableBase<string>(name, kVariableModePoll, system_state) {}
+  explicit NewVersionVariable(const string& name)
+      : UpdaterVariableBase<string>(name, kVariableModePoll) {}
 
  private:
   const string* GetValue(TimeDelta /* timeout */, string* errmsg) override {
-    GetStatusHelper raw(system_state(), errmsg);
+    GetStatusHelper raw(errmsg);
     if (!raw.is_success())
       return nullptr;
 
@@ -206,12 +200,12 @@
 // A variable reporting the size of the update being processed in bytes.
 class PayloadSizeVariable : public UpdaterVariableBase<uint64_t> {
  public:
-  PayloadSizeVariable(const string& name, SystemState* system_state)
-      : UpdaterVariableBase<uint64_t>(name, kVariableModePoll, system_state) {}
+  explicit PayloadSizeVariable(const string& name)
+      : UpdaterVariableBase<uint64_t>(name, kVariableModePoll) {}
 
  private:
   const uint64_t* GetValue(TimeDelta /* timeout */, string* errmsg) override {
-    GetStatusHelper raw(system_state(), errmsg);
+    GetStatusHelper raw(errmsg);
     if (!raw.is_success())
       return nullptr;
 
@@ -230,20 +224,20 @@
 // policy request.
 class UpdateCompletedTimeVariable : public UpdaterVariableBase<Time> {
  public:
-  UpdateCompletedTimeVariable(const string& name, SystemState* system_state)
-      : UpdaterVariableBase<Time>(name, kVariableModePoll, system_state) {}
+  explicit UpdateCompletedTimeVariable(const string& name)
+      : UpdaterVariableBase<Time>(name, kVariableModePoll) {}
 
  private:
   const Time* GetValue(TimeDelta /* timeout */, string* errmsg) override {
     Time update_boottime;
-    if (!system_state()->update_attempter()->GetBootTimeAtUpdate(
+    if (!SystemState::Get()->update_attempter()->GetBootTimeAtUpdate(
             &update_boottime)) {
       if (errmsg)
         *errmsg = "Update completed time could not be read";
       return nullptr;
     }
 
-    chromeos_update_engine::ClockInterface* clock = system_state()->clock();
+    const auto* clock = SystemState::Get()->clock();
     Time curr_boottime = clock->GetBootTime();
     if (curr_boottime < update_boottime) {
       if (errmsg)
@@ -260,12 +254,12 @@
 // Variables reporting the current image channel.
 class CurrChannelVariable : public UpdaterVariableBase<string> {
  public:
-  CurrChannelVariable(const string& name, SystemState* system_state)
-      : UpdaterVariableBase<string>(name, kVariableModePoll, system_state) {}
+  explicit CurrChannelVariable(const string& name)
+      : UpdaterVariableBase<string>(name, kVariableModePoll) {}
 
  private:
   const string* GetValue(TimeDelta /* timeout */, string* errmsg) override {
-    OmahaRequestParams* request_params = system_state()->request_params();
+    OmahaRequestParams* request_params = SystemState::Get()->request_params();
     string channel = request_params->current_channel();
     if (channel.empty()) {
       if (errmsg)
@@ -281,12 +275,12 @@
 // Variables reporting the new image channel.
 class NewChannelVariable : public UpdaterVariableBase<string> {
  public:
-  NewChannelVariable(const string& name, SystemState* system_state)
-      : UpdaterVariableBase<string>(name, kVariableModePoll, system_state) {}
+  explicit NewChannelVariable(const string& name)
+      : UpdaterVariableBase<string>(name, kVariableModePoll) {}
 
  private:
   const string* GetValue(TimeDelta /* timeout */, string* errmsg) override {
-    OmahaRequestParams* request_params = system_state()->request_params();
+    OmahaRequestParams* request_params = SystemState::Get()->request_params();
     string channel = request_params->target_channel();
     if (channel.empty()) {
       if (errmsg)
@@ -305,24 +299,25 @@
       public chromeos_update_engine::PrefsInterface::ObserverInterface {
  public:
   BooleanPrefVariable(const string& name,
-                      chromeos_update_engine::PrefsInterface* prefs,
                       const char* key,
                       bool default_val)
       : AsyncCopyVariable<bool>(name),
-        prefs_(prefs),
         key_(key),
         default_val_(default_val) {
-    prefs->AddObserver(key, this);
+    SystemState::Get()->prefs()->AddObserver(key, this);
     OnPrefSet(key);
   }
-  ~BooleanPrefVariable() { prefs_->RemoveObserver(key_, this); }
+  ~BooleanPrefVariable() {
+    SystemState::Get()->prefs()->RemoveObserver(key_, this);
+  }
 
  private:
   // Reads the actual value from the Prefs instance and updates the Variable
   // value.
   void OnPrefSet(const string& key) override {
     bool result = default_val_;
-    if (prefs_ && prefs_->Exists(key_) && !prefs_->GetBoolean(key_, &result))
+    auto* prefs = SystemState::Get()->prefs();
+    if (prefs->Exists(key_) && !prefs->GetBoolean(key_, &result))
       result = default_val_;
     // AsyncCopyVariable will take care of values that didn't change.
     SetValue(result);
@@ -330,8 +325,6 @@
 
   void OnPrefDeleted(const string& key) override { SetValue(default_val_); }
 
-  chromeos_update_engine::PrefsInterface* prefs_;
-
   // The Boolean preference key and default value.
   const char* const key_;
   const bool default_val_;
@@ -343,16 +336,16 @@
 class ConsecutiveFailedUpdateChecksVariable
     : public UpdaterVariableBase<unsigned int> {
  public:
-  ConsecutiveFailedUpdateChecksVariable(const string& name,
-                                        SystemState* system_state)
-      : UpdaterVariableBase<unsigned int>(
-            name, kVariableModePoll, system_state) {}
+  explicit ConsecutiveFailedUpdateChecksVariable(const string& name)
+      : UpdaterVariableBase<unsigned int>(name, kVariableModePoll) {}
 
  private:
   const unsigned int* GetValue(TimeDelta /* timeout */,
                                string* /* errmsg */) override {
-    return new unsigned int(
-        system_state()->update_attempter()->consecutive_failed_update_checks());
+    // NOLINTNEXTLINE(readability/casting)
+    return new unsigned int(SystemState::Get()
+                                ->update_attempter()
+                                ->consecutive_failed_update_checks());
   }
 
   DISALLOW_COPY_AND_ASSIGN(ConsecutiveFailedUpdateChecksVariable);
@@ -362,16 +355,16 @@
 class ServerDictatedPollIntervalVariable
     : public UpdaterVariableBase<unsigned int> {
  public:
-  ServerDictatedPollIntervalVariable(const string& name,
-                                     SystemState* system_state)
-      : UpdaterVariableBase<unsigned int>(
-            name, kVariableModePoll, system_state) {}
+  explicit ServerDictatedPollIntervalVariable(const string& name)
+      : UpdaterVariableBase<unsigned int>(name, kVariableModePoll) {}
 
  private:
   const unsigned int* GetValue(TimeDelta /* timeout */,
                                string* /* errmsg */) override {
-    return new unsigned int(
-        system_state()->update_attempter()->server_dictated_poll_interval());
+    // NOLINTNEXTLINE(readability/casting)
+    return new unsigned int(SystemState::Get()
+                                ->update_attempter()
+                                ->server_dictated_poll_interval());
   }
 
   DISALLOW_COPY_AND_ASSIGN(ServerDictatedPollIntervalVariable);
@@ -381,10 +374,10 @@
 class ForcedUpdateRequestedVariable
     : public UpdaterVariableBase<UpdateRequestStatus> {
  public:
-  ForcedUpdateRequestedVariable(const string& name, SystemState* system_state)
+  explicit ForcedUpdateRequestedVariable(const string& name)
       : UpdaterVariableBase<UpdateRequestStatus>::UpdaterVariableBase(
-            name, kVariableModeAsync, system_state) {
-    system_state->update_attempter()->set_forced_update_pending_callback(
+            name, kVariableModeAsync) {
+    SystemState::Get()->update_attempter()->set_forced_update_pending_callback(
         new base::Callback<void(bool, bool)>(  // NOLINT(readability/function)
             base::Bind(&ForcedUpdateRequestedVariable::Reset,
                        base::Unretained(this))));
@@ -416,15 +409,14 @@
 class UpdateRestrictionsVariable
     : public UpdaterVariableBase<UpdateRestrictions> {
  public:
-  UpdateRestrictionsVariable(const string& name, SystemState* system_state)
-      : UpdaterVariableBase<UpdateRestrictions>(
-            name, kVariableModePoll, system_state) {}
+  explicit UpdateRestrictionsVariable(const string& name)
+      : UpdaterVariableBase<UpdateRestrictions>(name, kVariableModePoll) {}
 
  private:
   const UpdateRestrictions* GetValue(TimeDelta /* timeout */,
                                      string* /* errmsg */) override {
     UpdateAttemptFlags attempt_flags =
-        system_state()->update_attempter()->GetCurrentUpdateAttemptFlags();
+        SystemState::Get()->update_attempter()->GetCurrentUpdateAttemptFlags();
     UpdateRestrictions restriction_flags = UpdateRestrictions::kNone;
     // Don't blindly copy the whole value, test and set bits that should
     // transfer from one set of flags to the other.
@@ -439,39 +431,73 @@
   DISALLOW_COPY_AND_ASSIGN(UpdateRestrictionsVariable);
 };
 
+// A variable class for reading timeout interval prefs value.
+class TestUpdateCheckIntervalTimeoutVariable : public Variable<int64_t> {
+ public:
+  explicit TestUpdateCheckIntervalTimeoutVariable(const string& name)
+      : Variable<int64_t>(name, kVariableModePoll), read_count_(0) {
+    SetMissingOk();
+  }
+  ~TestUpdateCheckIntervalTimeoutVariable() = default;
+
+ private:
+  const int64_t* GetValue(TimeDelta /* timeout */,
+                          string* /* errmsg */) override {
+    auto key = chromeos_update_engine::kPrefsTestUpdateCheckIntervalTimeout;
+    auto* prefs = SystemState::Get()->prefs();
+    int64_t result;
+    if (prefs->Exists(key) && prefs->GetInt64(key, &result)) {
+      // This specific value is used for testing only. So it should not be kept
+      // around and should be deleted after a few reads.
+      if (++read_count_ > 5)
+        prefs->Delete(key);
+
+      // Limit the timeout interval to 10 minutes so it is not abused if it is
+      // seen on official images.
+      return new int64_t(std::min(result, static_cast<int64_t>(10 * 60)));
+    }
+    return nullptr;
+  }
+
+  // Counts how many times this variable is read. This is used to delete the
+  // underlying file defining the variable after a certain number of reads in
+  // order to prevent any abuse of this variable.
+  int read_count_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestUpdateCheckIntervalTimeoutVariable);
+};
+
 // RealUpdaterProvider methods.
 
-RealUpdaterProvider::RealUpdaterProvider(SystemState* system_state)
-    : system_state_(system_state),
-      var_updater_started_time_("updater_started_time",
-                                system_state->clock()->GetWallclockTime()),
-      var_last_checked_time_(
-          new LastCheckedTimeVariable("last_checked_time", system_state_)),
-      var_update_completed_time_(new UpdateCompletedTimeVariable(
-          "update_completed_time", system_state_)),
-      var_progress_(new ProgressVariable("progress", system_state_)),
-      var_stage_(new StageVariable("stage", system_state_)),
-      var_new_version_(new NewVersionVariable("new_version", system_state_)),
-      var_payload_size_(new PayloadSizeVariable("payload_size", system_state_)),
-      var_curr_channel_(new CurrChannelVariable("curr_channel", system_state_)),
-      var_new_channel_(new NewChannelVariable("new_channel", system_state_)),
-      var_p2p_enabled_(
-          new BooleanPrefVariable("p2p_enabled",
-                                  system_state_->prefs(),
-                                  chromeos_update_engine::kPrefsP2PEnabled,
-                                  false)),
+RealUpdaterProvider::RealUpdaterProvider()
+    : var_updater_started_time_(
+          "updater_started_time",
+          SystemState::Get()->clock()->GetWallclockTime()),
+      var_last_checked_time_(new LastCheckedTimeVariable("last_checked_time")),
+      var_update_completed_time_(
+          new UpdateCompletedTimeVariable("update_completed_time")),
+      var_progress_(new ProgressVariable("progress")),
+      var_stage_(new StageVariable("stage")),
+      var_new_version_(new NewVersionVariable("new_version")),
+      var_payload_size_(new PayloadSizeVariable("payload_size")),
+      var_curr_channel_(new CurrChannelVariable("curr_channel")),
+      var_new_channel_(new NewChannelVariable("new_channel")),
+      var_p2p_enabled_(new BooleanPrefVariable(
+          "p2p_enabled", chromeos_update_engine::kPrefsP2PEnabled, false)),
       var_cellular_enabled_(new BooleanPrefVariable(
           "cellular_enabled",
-          system_state_->prefs(),
           chromeos_update_engine::kPrefsUpdateOverCellularPermission,
           false)),
       var_consecutive_failed_update_checks_(
           new ConsecutiveFailedUpdateChecksVariable(
-              "consecutive_failed_update_checks", system_state_)),
+              "consecutive_failed_update_checks")),
       var_server_dictated_poll_interval_(new ServerDictatedPollIntervalVariable(
-          "server_dictated_poll_interval", system_state_)),
-      var_forced_update_requested_(new ForcedUpdateRequestedVariable(
-          "forced_update_requested", system_state_)),
-      var_update_restrictions_(new UpdateRestrictionsVariable(
-          "update_restrictions", system_state_)) {}
+          "server_dictated_poll_interval")),
+      var_forced_update_requested_(
+          new ForcedUpdateRequestedVariable("forced_update_requested")),
+      var_update_restrictions_(
+          new UpdateRestrictionsVariable("update_restrictions")),
+      var_test_update_check_interval_timeout_(
+          new TestUpdateCheckIntervalTimeoutVariable(
+              "test_update_check_interval_timeout")) {}
 }  // namespace chromeos_update_manager
diff --git a/update_manager/real_updater_provider.h b/update_manager/real_updater_provider.h
index 1b46895..24298d7 100644
--- a/update_manager/real_updater_provider.h
+++ b/update_manager/real_updater_provider.h
@@ -20,7 +20,6 @@
 #include <memory>
 #include <string>
 
-#include "update_engine/system_state.h"
 #include "update_engine/update_manager/generic_variables.h"
 #include "update_engine/update_manager/updater_provider.h"
 
@@ -34,8 +33,7 @@
   // guarantees that parts of the system state can be mocked out at any time
   // during testing. We further assume that, by the time Init() is called, the
   // system state object is fully populated and usable.
-  explicit RealUpdaterProvider(
-      chromeos_update_engine::SystemState* system_state);
+  RealUpdaterProvider();
 
   // Initializes the provider and returns whether it succeeded.
   bool Init() { return true; }
@@ -94,10 +92,11 @@
     return var_update_restrictions_.get();
   }
 
- private:
-  // A pointer to the update engine's system state aggregator.
-  chromeos_update_engine::SystemState* system_state_;
+  Variable<int64_t>* var_test_update_check_interval_timeout() override {
+    return var_test_update_check_interval_timeout_.get();
+  }
 
+ private:
   // Variable implementations.
   ConstCopyVariable<base::Time> var_updater_started_time_;
   std::unique_ptr<Variable<base::Time>> var_last_checked_time_;
@@ -114,6 +113,7 @@
   std::unique_ptr<Variable<unsigned int>> var_server_dictated_poll_interval_;
   std::unique_ptr<Variable<UpdateRequestStatus>> var_forced_update_requested_;
   std::unique_ptr<Variable<UpdateRestrictions>> var_update_restrictions_;
+  std::unique_ptr<Variable<int64_t>> var_test_update_check_interval_timeout_;
 
   DISALLOW_COPY_AND_ASSIGN(RealUpdaterProvider);
 };
diff --git a/update_manager/real_updater_provider_unittest.cc b/update_manager/real_updater_provider_unittest.cc
index fb7a763..4afe7fc 100644
--- a/update_manager/real_updater_provider_unittest.cc
+++ b/update_manager/real_updater_provider_unittest.cc
@@ -23,16 +23,13 @@
 #include <gtest/gtest.h>
 #include <update_engine/dbus-constants.h>
 
-#include "update_engine/common/fake_clock.h"
-#include "update_engine/common/fake_prefs.h"
-#include "update_engine/fake_system_state.h"
-#include "update_engine/mock_update_attempter.h"
-#include "update_engine/omaha_request_params.h"
+#include "update_engine/cros/fake_system_state.h"
+#include "update_engine/cros/mock_update_attempter.h"
+#include "update_engine/cros/omaha_request_params.h"
 #include "update_engine/update_manager/umtest_utils.h"
 
 using base::Time;
 using base::TimeDelta;
-using chromeos_update_engine::FakeClock;
 using chromeos_update_engine::FakePrefs;
 using chromeos_update_engine::FakeSystemState;
 using chromeos_update_engine::OmahaRequestParams;
@@ -100,10 +97,8 @@
 class UmRealUpdaterProviderTest : public ::testing::Test {
  protected:
   void SetUp() override {
-    fake_clock_ = fake_sys_state_.fake_clock();
-    fake_sys_state_.set_prefs(&fake_prefs_);
-    provider_.reset(new RealUpdaterProvider(&fake_sys_state_));
-    ASSERT_NE(nullptr, provider_.get());
+    FakeSystemState::CreateInstance();
+    provider_.reset(new RealUpdaterProvider());
     // Check that provider initializes correctly.
     ASSERT_TRUE(provider_->Init());
   }
@@ -117,31 +112,31 @@
     const Time kCurrBootTime = (valid ? kUpdateBootTime + kDurationSinceUpdate
                                       : kUpdateBootTime - kDurationSinceUpdate);
     const Time kCurrWallclockTime = FixedTime();
-    EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
+    EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(),
                 GetBootTimeAtUpdate(_))
         .WillOnce(DoAll(SetArgPointee<0>(kUpdateBootTime), Return(true)));
-    fake_clock_->SetBootTime(kCurrBootTime);
-    fake_clock_->SetWallclockTime(kCurrWallclockTime);
+    FakeSystemState::Get()->fake_clock()->SetBootTime(kCurrBootTime);
+    FakeSystemState::Get()->fake_clock()->SetWallclockTime(kCurrWallclockTime);
     return kCurrWallclockTime - kDurationSinceUpdate;
   }
 
-  FakeSystemState fake_sys_state_;
-  FakeClock* fake_clock_;  // Short for fake_sys_state_.fake_clock()
-  FakePrefs fake_prefs_;
   unique_ptr<RealUpdaterProvider> provider_;
 };
 
 TEST_F(UmRealUpdaterProviderTest, UpdaterStartedTimeIsWallclockTime) {
-  fake_clock_->SetWallclockTime(Time::FromDoubleT(123.456));
-  fake_clock_->SetMonotonicTime(Time::FromDoubleT(456.123));
-  // Run SetUp again to re-setup the provider under test to use these values.
-  SetUp();
+  FakeSystemState::Get()->fake_clock()->SetWallclockTime(
+      Time::FromDoubleT(123.456));
+  FakeSystemState::Get()->fake_clock()->SetMonotonicTime(
+      Time::FromDoubleT(456.123));
+  // Re-initialize to re-setup the provider under test to use these values.
+  provider_.reset(new RealUpdaterProvider());
+  ASSERT_TRUE(provider_->Init());
   UmTestUtils::ExpectVariableHasValue(Time::FromDoubleT(123.456),
                                       provider_->var_updater_started_time());
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetLastCheckedTimeOkay) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(
           ActionSetUpdateEngineStatusLastCheckedTime(FixedTime().ToTimeT()),
           Return(true)));
@@ -150,49 +145,49 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetLastCheckedTimeFailNoValue) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(Return(false));
   UmTestUtils::ExpectVariableNotSet(provider_->var_last_checked_time());
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetProgressOkayMin) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(ActionSetUpdateEngineStatusProgress(0.0), Return(true)));
   UmTestUtils::ExpectVariableHasValue(0.0, provider_->var_progress());
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetProgressOkayMid) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(ActionSetUpdateEngineStatusProgress(0.3), Return(true)));
   UmTestUtils::ExpectVariableHasValue(0.3, provider_->var_progress());
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetProgressOkayMax) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(ActionSetUpdateEngineStatusProgress(1.0), Return(true)));
   UmTestUtils::ExpectVariableHasValue(1.0, provider_->var_progress());
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetProgressFailNoValue) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(Return(false));
   UmTestUtils::ExpectVariableNotSet(provider_->var_progress());
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetProgressFailTooSmall) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(ActionSetUpdateEngineStatusProgress(-2.0), Return(true)));
   UmTestUtils::ExpectVariableNotSet(provider_->var_progress());
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetProgressFailTooBig) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(ActionSetUpdateEngineStatusProgress(2.0), Return(true)));
   UmTestUtils::ExpectVariableNotSet(provider_->var_progress());
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetStageOkayIdle) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(
           ActionSetUpdateEngineStatusStatus(update_engine::UpdateStatus::IDLE),
           Return(true)));
@@ -200,7 +195,7 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetStageOkayCheckingForUpdate) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(ActionSetUpdateEngineStatusStatus(
                           update_engine::UpdateStatus::CHECKING_FOR_UPDATE),
                       Return(true)));
@@ -209,7 +204,7 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetStageOkayUpdateAvailable) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(ActionSetUpdateEngineStatusStatus(
                           update_engine::UpdateStatus::UPDATE_AVAILABLE),
                       Return(true)));
@@ -218,7 +213,7 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetStageOkayDownloading) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(ActionSetUpdateEngineStatusStatus(
                           update_engine::UpdateStatus::DOWNLOADING),
                       Return(true)));
@@ -227,7 +222,7 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetStageOkayVerifying) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(ActionSetUpdateEngineStatusStatus(
                           update_engine::UpdateStatus::VERIFYING),
                       Return(true)));
@@ -236,7 +231,7 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetStageOkayFinalizing) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(ActionSetUpdateEngineStatusStatus(
                           update_engine::UpdateStatus::FINALIZING),
                       Return(true)));
@@ -245,7 +240,7 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetStageOkayUpdatedNeedReboot) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(ActionSetUpdateEngineStatusStatus(
                           update_engine::UpdateStatus::UPDATED_NEED_REBOOT),
                       Return(true)));
@@ -254,7 +249,7 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetStageOkayReportingErrorEvent) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(ActionSetUpdateEngineStatusStatus(
                           update_engine::UpdateStatus::REPORTING_ERROR_EVENT),
                       Return(true)));
@@ -263,7 +258,7 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetStageOkayAttemptingRollback) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(ActionSetUpdateEngineStatusStatus(
                           update_engine::UpdateStatus::ATTEMPTING_ROLLBACK),
                       Return(true)));
@@ -272,13 +267,13 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetStageFailNoValue) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(Return(false));
   UmTestUtils::ExpectVariableNotSet(provider_->var_stage());
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetNewVersionOkay) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(
           DoAll(ActionSetUpdateEngineStatusNewVersion("1.2.0"), Return(true)));
   UmTestUtils::ExpectVariableHasValue(string("1.2.0"),
@@ -286,13 +281,13 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetNewVersionFailNoValue) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(Return(false));
   UmTestUtils::ExpectVariableNotSet(provider_->var_new_version());
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetPayloadSizeOkayZero) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(
           ActionSetUpdateEngineStatusNewSizeBytes(static_cast<uint64_t>(0)),
           Return(true)));
@@ -301,7 +296,7 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetPayloadSizeOkayArbitrary) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(ActionSetUpdateEngineStatusNewSizeBytes(
                           static_cast<uint64_t>(567890)),
                       Return(true)));
@@ -310,7 +305,7 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetPayloadSizeOkayTwoGigabytes) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(DoAll(ActionSetUpdateEngineStatusNewSizeBytes(
                           static_cast<uint64_t>(1) << 31),
                       Return(true)));
@@ -319,44 +314,44 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetPayloadSizeFailNoValue) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetStatus(_))
       .WillOnce(Return(false));
   UmTestUtils::ExpectVariableNotSet(provider_->var_payload_size());
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetCurrChannelOkay) {
   const string kChannelName("foo-channel");
-  OmahaRequestParams request_params(&fake_sys_state_);
-  request_params.Init("", "", false);
+  OmahaRequestParams request_params;
+  request_params.Init("", "", {});
   request_params.set_current_channel(kChannelName);
-  fake_sys_state_.set_request_params(&request_params);
+  FakeSystemState::Get()->set_request_params(&request_params);
   UmTestUtils::ExpectVariableHasValue(kChannelName,
                                       provider_->var_curr_channel());
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetCurrChannelFailEmpty) {
-  OmahaRequestParams request_params(&fake_sys_state_);
-  request_params.Init("", "", false);
+  OmahaRequestParams request_params;
+  request_params.Init("", "", {});
   request_params.set_current_channel("");
-  fake_sys_state_.set_request_params(&request_params);
+  FakeSystemState::Get()->set_request_params(&request_params);
   UmTestUtils::ExpectVariableNotSet(provider_->var_curr_channel());
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetNewChannelOkay) {
   const string kChannelName("foo-channel");
-  OmahaRequestParams request_params(&fake_sys_state_);
-  request_params.Init("", "", false);
+  OmahaRequestParams request_params;
+  request_params.Init("", "", {});
   request_params.set_target_channel(kChannelName);
-  fake_sys_state_.set_request_params(&request_params);
+  FakeSystemState::Get()->set_request_params(&request_params);
   UmTestUtils::ExpectVariableHasValue(kChannelName,
                                       provider_->var_new_channel());
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetNewChannelFailEmpty) {
-  OmahaRequestParams request_params(&fake_sys_state_);
-  request_params.Init("", "", false);
+  OmahaRequestParams request_params;
+  request_params.Init("", "", {});
   request_params.set_target_channel("");
-  fake_sys_state_.set_request_params(&request_params);
+  FakeSystemState::Get()->set_request_params(&request_params);
   UmTestUtils::ExpectVariableNotSet(provider_->var_new_channel());
 }
 
@@ -365,22 +360,26 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetP2PEnabledOkayPrefReadsFalse) {
-  fake_prefs_.SetBoolean(chromeos_update_engine::kPrefsP2PEnabled, false);
+  FakeSystemState::Get()->fake_prefs()->SetBoolean(
+      chromeos_update_engine::kPrefsP2PEnabled, false);
   UmTestUtils::ExpectVariableHasValue(false, provider_->var_p2p_enabled());
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetP2PEnabledReadWhenInitialized) {
-  fake_prefs_.SetBoolean(chromeos_update_engine::kPrefsP2PEnabled, true);
-  SetUp();
+  FakeSystemState::Get()->fake_prefs()->SetBoolean(
+      chromeos_update_engine::kPrefsP2PEnabled, true);
+  provider_.reset(new RealUpdaterProvider());
+  ASSERT_TRUE(provider_->Init());
   UmTestUtils::ExpectVariableHasValue(true, provider_->var_p2p_enabled());
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetP2PEnabledUpdated) {
-  fake_prefs_.SetBoolean(chromeos_update_engine::kPrefsP2PEnabled, false);
+  auto* fake_prefs = FakeSystemState::Get()->fake_prefs();
+  fake_prefs->SetBoolean(chromeos_update_engine::kPrefsP2PEnabled, false);
   UmTestUtils::ExpectVariableHasValue(false, provider_->var_p2p_enabled());
-  fake_prefs_.SetBoolean(chromeos_update_engine::kPrefsP2PEnabled, true);
+  fake_prefs->SetBoolean(chromeos_update_engine::kPrefsP2PEnabled, true);
   UmTestUtils::ExpectVariableHasValue(true, provider_->var_p2p_enabled());
-  fake_prefs_.Delete(chromeos_update_engine::kPrefsP2PEnabled);
+  fake_prefs->Delete(chromeos_update_engine::kPrefsP2PEnabled);
   UmTestUtils::ExpectVariableHasValue(false, provider_->var_p2p_enabled());
 }
 
@@ -389,7 +388,7 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetCellularEnabledOkayPrefReadsTrue) {
-  fake_prefs_.SetBoolean(
+  FakeSystemState::Get()->fake_prefs()->SetBoolean(
       chromeos_update_engine::kPrefsUpdateOverCellularPermission, true);
   UmTestUtils::ExpectVariableHasValue(true, provider_->var_cellular_enabled());
 }
@@ -401,7 +400,8 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetUpdateCompletedTimeFailNoValue) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetBootTimeAtUpdate(_))
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(),
+              GetBootTimeAtUpdate(_))
       .WillOnce(Return(false));
   UmTestUtils::ExpectVariableNotSet(provider_->var_update_completed_time());
 }
@@ -413,7 +413,7 @@
 
 TEST_F(UmRealUpdaterProviderTest, GetConsecutiveFailedUpdateChecks) {
   const unsigned int kNumFailedChecks = 3;
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(),
               consecutive_failed_update_checks())
       .WillRepeatedly(Return(kNumFailedChecks));
   UmTestUtils::ExpectVariableHasValue(
@@ -422,7 +422,7 @@
 
 TEST_F(UmRealUpdaterProviderTest, GetServerDictatedPollInterval) {
   const unsigned int kPollInterval = 2 * 60 * 60;  // Two hours.
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(),
               server_dictated_poll_interval())
       .WillRepeatedly(Return(kPollInterval));
   UmTestUtils::ExpectVariableHasValue(
@@ -430,7 +430,7 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetUpdateRestrictions) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(),
               GetCurrentUpdateAttemptFlags())
       .WillRepeatedly(Return(UpdateAttemptFlags::kFlagRestrictDownload |
                              UpdateAttemptFlags::kFlagNonInteractive));
@@ -439,10 +439,36 @@
 }
 
 TEST_F(UmRealUpdaterProviderTest, GetUpdateRestrictionsNone) {
-  EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
+  EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(),
               GetCurrentUpdateAttemptFlags())
       .WillRepeatedly(Return(UpdateAttemptFlags::kNone));
   UmTestUtils::ExpectVariableHasValue(UpdateRestrictions::kNone,
                                       provider_->var_update_restrictions());
 }
+
+TEST_F(UmRealUpdaterProviderTest, TestUpdateCheckIntervalTimeout) {
+  UmTestUtils::ExpectVariableNotSet(
+      provider_->var_test_update_check_interval_timeout());
+  auto* fake_prefs = FakeSystemState::Get()->fake_prefs();
+  fake_prefs->SetInt64(
+      chromeos_update_engine::kPrefsTestUpdateCheckIntervalTimeout, 1);
+  UmTestUtils::ExpectVariableHasValue(
+      static_cast<int64_t>(1),
+      provider_->var_test_update_check_interval_timeout());
+
+  // Make sure the value does not exceed a threshold of 10 minutes.
+  fake_prefs->SetInt64(
+      chromeos_update_engine::kPrefsTestUpdateCheckIntervalTimeout, 11 * 60);
+  // The next 5 reads should return valid values.
+  for (int i = 0; i < 5; ++i)
+    UmTestUtils::ExpectVariableHasValue(
+        static_cast<int64_t>(10 * 60),
+        provider_->var_test_update_check_interval_timeout());
+
+  // Just to make sure it is not cached anywhere and deleted. The variable is
+  // allowd to be read 6 times.
+  UmTestUtils::ExpectVariableNotSet(
+      provider_->var_test_update_check_interval_timeout());
+}
+
 }  // namespace chromeos_update_manager
diff --git a/update_manager/rollback_prefs.h b/update_manager/rollback_prefs.h
index 11d09d6..6cbc447 100644
--- a/update_manager/rollback_prefs.h
+++ b/update_manager/rollback_prefs.h
@@ -31,9 +31,21 @@
   kDisabled = 1,
   kRollbackAndPowerwash = 2,
   kRollbackAndRestoreIfPossible = 3,
-  kRollbackOnlyIfRestorePossible = 4,
   // This value must be the last entry.
-  kMaxValue = 5
+  kMaxValue = 4
+};
+
+// Whether the device should do rollback and powerwash on channel downgrade.
+// Matches chrome_device_policy.proto's
+// |AutoUpdateSettingsProto::ChannelDowngradeBehavior|.
+enum class ChannelDowngradeBehavior {
+  kUnspecified = 0,
+  kWaitForVersionToCatchUp = 1,
+  kRollback = 2,
+  kAllowUserToConfigure = 3,
+  // These values must be kept up to date.
+  kFirstValue = kUnspecified,
+  kLastValue = kAllowUserToConfigure
 };
 
 }  // namespace chromeos_update_manager
diff --git a/update_manager/shill_provider.h b/update_manager/shill_provider.h
index c7bb2e2..ebe7a3a 100644
--- a/update_manager/shill_provider.h
+++ b/update_manager/shill_provider.h
@@ -19,7 +19,7 @@
 
 #include <base/time/time.h>
 
-#include "update_engine/connection_utils.h"
+#include "update_engine/common/connection_utils.h"
 #include "update_engine/update_manager/provider.h"
 #include "update_engine/update_manager/variable.h"
 
diff --git a/update_manager/staging_utils.cc b/update_manager/staging_utils.cc
index 4835ab2..a992975 100644
--- a/update_manager/staging_utils.cc
+++ b/update_manager/staging_utils.cc
@@ -26,12 +26,10 @@
 
 #include "update_engine/common/constants.h"
 #include "update_engine/common/hardware_interface.h"
-#include "update_engine/common/prefs_interface.h"
-#include "update_engine/system_state.h"
+#include "update_engine/common/system_state.h"
 
 using base::TimeDelta;
 using chromeos_update_engine::kPrefsWallClockStagingWaitPeriod;
-using chromeos_update_engine::PrefsInterface;
 using chromeos_update_engine::SystemState;
 using policy::DevicePolicy;
 
@@ -99,7 +97,6 @@
 }
 
 StagingCase CalculateStagingCase(const DevicePolicy* device_policy,
-                                 PrefsInterface* prefs,
                                  TimeDelta* staging_wait_time,
                                  StagingSchedule* staging_schedule) {
   // Check that the schedule in the device policy is correct.
@@ -129,7 +126,8 @@
   int64_t wait_period_in_days;
   // There exists a persisted value that is valid. That is, it's smaller than
   // the maximum amount of days of staging set by the user.
-  if (prefs->GetInt64(kPrefsWallClockStagingWaitPeriod, &wait_period_in_days) &&
+  if (SystemState::Get()->prefs()->GetInt64(kPrefsWallClockStagingWaitPeriod,
+                                            &wait_period_in_days) &&
       wait_period_in_days > 0 && wait_period_in_days <= max_days) {
     *staging_wait_time = TimeDelta::FromDays(wait_period_in_days);
     return StagingCase::kSetStagingFromPref;
diff --git a/update_manager/staging_utils.h b/update_manager/staging_utils.h
index e91bfeb..0de1dfd 100644
--- a/update_manager/staging_utils.h
+++ b/update_manager/staging_utils.h
@@ -62,7 +62,6 @@
 // contain the previous staging schedule, if there is a new schedule found, its
 // value will be replaced with the new one.
 StagingCase CalculateStagingCase(const policy::DevicePolicy* device_policy,
-                                 chromeos_update_engine::PrefsInterface* prefs,
                                  base::TimeDelta* staging_wait_time,
                                  StagingSchedule* staging_schedule);
 
diff --git a/update_manager/staging_utils_unittest.cc b/update_manager/staging_utils_unittest.cc
index 8d75acd..126617f 100644
--- a/update_manager/staging_utils_unittest.cc
+++ b/update_manager/staging_utils_unittest.cc
@@ -24,10 +24,10 @@
 #include <policy/mock_device_policy.h>
 
 #include "update_engine/common/constants.h"
-#include "update_engine/common/fake_prefs.h"
+#include "update_engine/cros/fake_system_state.h"
 
 using base::TimeDelta;
-using chromeos_update_engine::FakePrefs;
+using chromeos_update_engine::FakeSystemState;
 using chromeos_update_engine::kPrefsWallClockStagingWaitPeriod;
 using testing::_;
 using testing::DoAll;
@@ -44,6 +44,7 @@
 class StagingUtilsScheduleTest : public testing::Test {
  protected:
   void SetUp() override {
+    FakeSystemState::CreateInstance();
     test_wait_time_ = TimeDelta();
     test_staging_schedule_ = StagingSchedule();
   }
@@ -55,14 +56,13 @@
   }
 
   void SetPersistedStagingVal(int64_t wait_time) {
-    EXPECT_TRUE(
-        fake_prefs_.SetInt64(kPrefsWallClockStagingWaitPeriod, wait_time));
+    EXPECT_TRUE(FakeSystemState::Get()->fake_prefs()->SetInt64(
+        kPrefsWallClockStagingWaitPeriod, wait_time));
   }
 
   void TestStagingCase(const StagingCase& expected) {
     EXPECT_EQ(expected,
               CalculateStagingCase(&device_policy_,
-                                   &fake_prefs_,
                                    &test_wait_time_,
                                    &test_staging_schedule_));
   }
@@ -75,7 +75,6 @@
   policy::MockDevicePolicy device_policy_;
   TimeDelta test_wait_time_;
   StagingSchedule test_staging_schedule_;
-  FakePrefs fake_prefs_;
 };
 
 // Last element should be 100, if not return false.
diff --git a/update_manager/state_factory.cc b/update_manager/state_factory.cc
index 78cec6a..0ab4f7b 100644
--- a/update_manager/state_factory.cc
+++ b/update_manager/state_factory.cc
@@ -23,34 +23,31 @@
 #include <session_manager/dbus-proxies.h>
 #endif  // USE_DBUS
 
-#include "update_engine/common/clock_interface.h"
 #if USE_DBUS
-#include "update_engine/dbus_connection.h"
+#include "update_engine/cros/dbus_connection.h"
 #endif  // USE_DBUS
+#include "update_engine/common/system_state.h"
+#include "update_engine/cros/shill_proxy.h"
 #include "update_engine/update_manager/fake_shill_provider.h"
 #include "update_engine/update_manager/real_config_provider.h"
 #include "update_engine/update_manager/real_device_policy_provider.h"
 #include "update_engine/update_manager/real_random_provider.h"
+#include "update_engine/update_manager/real_shill_provider.h"
 #include "update_engine/update_manager/real_state.h"
 #include "update_engine/update_manager/real_system_provider.h"
 #include "update_engine/update_manager/real_time_provider.h"
 #include "update_engine/update_manager/real_updater_provider.h"
-#if USE_SHILL
-#include "update_engine/shill_proxy.h"
-#include "update_engine/update_manager/real_shill_provider.h"
-#endif  // USE_SHILL
 
+using chromeos_update_engine::SystemState;
 using std::unique_ptr;
 
 namespace chromeos_update_manager {
 
 State* DefaultStateFactory(
     policy::PolicyProvider* policy_provider,
-    org::chromium::KioskAppServiceInterfaceProxyInterface* kiosk_app_proxy,
-    chromeos_update_engine::SystemState* system_state) {
-  chromeos_update_engine::ClockInterface* const clock = system_state->clock();
+    org::chromium::KioskAppServiceInterfaceProxyInterface* kiosk_app_proxy) {
   unique_ptr<RealConfigProvider> config_provider(
-      new RealConfigProvider(system_state->hardware()));
+      new RealConfigProvider(SystemState::Get()->hardware()));
 #if USE_DBUS
   scoped_refptr<dbus::Bus> bus =
       chromeos_update_engine::DBusConnection::Get()->GetDBus();
@@ -62,25 +59,18 @@
   unique_ptr<RealDevicePolicyProvider> device_policy_provider(
       new RealDevicePolicyProvider(policy_provider));
 #endif  // USE_DBUS
-#if USE_SHILL
   unique_ptr<RealShillProvider> shill_provider(
-      new RealShillProvider(new chromeos_update_engine::ShillProxy(), clock));
-#else
-  unique_ptr<FakeShillProvider> shill_provider(new FakeShillProvider());
-#endif  // USE_SHILL
+      new RealShillProvider(new chromeos_update_engine::ShillProxy()));
   unique_ptr<RealRandomProvider> random_provider(new RealRandomProvider());
-  unique_ptr<RealSystemProvider> system_provider(new RealSystemProvider(
-      system_state->hardware(), system_state->boot_control(), kiosk_app_proxy));
+  unique_ptr<RealSystemProvider> system_provider(
+      new RealSystemProvider(kiosk_app_proxy));
 
-  unique_ptr<RealTimeProvider> time_provider(new RealTimeProvider(clock));
-  unique_ptr<RealUpdaterProvider> updater_provider(
-      new RealUpdaterProvider(system_state));
+  unique_ptr<RealTimeProvider> time_provider(new RealTimeProvider());
+  unique_ptr<RealUpdaterProvider> updater_provider(new RealUpdaterProvider());
 
   if (!(config_provider->Init() && device_policy_provider->Init() &&
         random_provider->Init() &&
-#if USE_SHILL
         shill_provider->Init() &&
-#endif  // USE_SHILL
         system_provider->Init() && time_provider->Init() &&
         updater_provider->Init())) {
     LOG(ERROR) << "Error initializing providers";
diff --git a/update_manager/state_factory.h b/update_manager/state_factory.h
index 1c1c1d9..c53bb9c 100644
--- a/update_manager/state_factory.h
+++ b/update_manager/state_factory.h
@@ -17,7 +17,6 @@
 #ifndef UPDATE_ENGINE_UPDATE_MANAGER_STATE_FACTORY_H_
 #define UPDATE_ENGINE_UPDATE_MANAGER_STATE_FACTORY_H_
 
-#include "update_engine/system_state.h"
 #include "update_engine/update_manager/state.h"
 
 namespace org {
@@ -35,8 +34,7 @@
 // to initialize.
 State* DefaultStateFactory(
     policy::PolicyProvider* policy_provider,
-    org::chromium::KioskAppServiceInterfaceProxyInterface* kiosk_app_proxy,
-    chromeos_update_engine::SystemState* system_state);
+    org::chromium::KioskAppServiceInterfaceProxyInterface* kiosk_app_proxy);
 
 }  // namespace chromeos_update_manager
 
diff --git a/update_manager/system_provider.h b/update_manager/system_provider.h
index 13e188b..8eb14e3 100644
--- a/update_manager/system_provider.h
+++ b/update_manager/system_provider.h
@@ -17,6 +17,10 @@
 #ifndef UPDATE_ENGINE_UPDATE_MANAGER_SYSTEM_PROVIDER_H_
 #define UPDATE_ENGINE_UPDATE_MANAGER_SYSTEM_PROVIDER_H_
 
+#include <string>
+
+#include <base/version.h>
+
 #include "update_engine/update_manager/provider.h"
 #include "update_engine/update_manager/variable.h"
 
@@ -46,6 +50,9 @@
   // with zero delay kiosk app if any.
   virtual Variable<std::string>* var_kiosk_required_platform_version() = 0;
 
+  // Chrome OS version number as provided by |ImagePropeties|.
+  virtual Variable<base::Version>* var_chromeos_version() = 0;
+
  protected:
   SystemProvider() {}
 
diff --git a/update_manager/update_manager-inl.h b/update_manager/update_manager-inl.h
index e9dee3f..045ecff 100644
--- a/update_manager/update_manager-inl.h
+++ b/update_manager/update_manager-inl.h
@@ -49,7 +49,6 @@
   ec->ResetEvaluation();
 
   const std::string policy_name = policy_->PolicyRequestName(policy_method);
-  LOG(INFO) << policy_name << ": START";
 
   // First try calling the actual policy.
   std::string error;
@@ -71,14 +70,12 @@
     }
   }
 
-  LOG(INFO) << policy_name << ": END";
-
   return status;
 }
 
 template <typename R, typename... Args>
 void UpdateManager::OnPolicyReadyToEvaluate(
-    scoped_refptr<EvaluationContext> ec,
+    std::shared_ptr<EvaluationContext> ec,
     base::Callback<void(EvalStatus status, const R& result)> callback,
     EvalStatus (Policy::*policy_method)(
         EvaluationContext*, State*, std::string*, R*, Args...) const,
@@ -119,8 +116,7 @@
         EvaluationContext*, State*, std::string*, R*, ExpectedArgs...) const,
     R* result,
     ActualArgs... args) {
-  scoped_refptr<EvaluationContext> ec(
-      new EvaluationContext(clock_, evaluation_timeout_));
+  auto ec = std::make_shared<EvaluationContext>(evaluation_timeout_);
   // A PolicyRequest always consists on a single evaluation on a new
   // EvaluationContext.
   // IMPORTANT: To ensure that ActualArgs can be converted to ExpectedArgs, we
@@ -141,15 +137,14 @@
     EvalStatus (Policy::*policy_method)(
         EvaluationContext*, State*, std::string*, R*, ExpectedArgs...) const,
     ActualArgs... args) {
-  scoped_refptr<EvaluationContext> ec = new EvaluationContext(
-      clock_,
+  auto ec = std::make_shared<EvaluationContext>(
       evaluation_timeout_,
       expiration_timeout_,
       std::unique_ptr<base::Callback<void(EvaluationContext*)>>(
           new base::Callback<void(EvaluationContext*)>(
               base::Bind(&UpdateManager::UnregisterEvalContext,
                          weak_ptr_factory_.GetWeakPtr()))));
-  if (!ec_repo_.insert(ec.get()).second) {
+  if (!ec_repo_.insert(ec).second) {
     LOG(ERROR) << "Failed to register evaluation context; this is a bug.";
   }
 
diff --git a/update_manager/update_manager.cc b/update_manager/update_manager.cc
index 5dfc09c..dbb6b33 100644
--- a/update_manager/update_manager.cc
+++ b/update_manager/update_manager.cc
@@ -15,32 +15,18 @@
 //
 
 #include "update_engine/update_manager/update_manager.h"
-
-#ifdef __ANDROID__
-#include "update_engine/update_manager/android_things_policy.h"
-#else
-#include "update_engine/update_manager/chromeos_policy.h"
-#endif  // __ANDROID__
 #include "update_engine/update_manager/state.h"
 
 namespace chromeos_update_manager {
 
-UpdateManager::UpdateManager(chromeos_update_engine::ClockInterface* clock,
-                             base::TimeDelta evaluation_timeout,
+UpdateManager::UpdateManager(base::TimeDelta evaluation_timeout,
                              base::TimeDelta expiration_timeout,
                              State* state)
-    : default_policy_(clock),
+    : policy_(GetSystemPolicy()),
       state_(state),
-      clock_(clock),
       evaluation_timeout_(evaluation_timeout),
       expiration_timeout_(expiration_timeout),
-      weak_ptr_factory_(this) {
-#ifdef __ANDROID__
-  policy_.reset(new AndroidThingsPolicy());
-#else
-  policy_.reset(new ChromeOSPolicy());
-#endif  // __ANDROID__
-}
+      weak_ptr_factory_(this) {}
 
 UpdateManager::~UpdateManager() {
   // Remove pending main loop events associated with any of the outstanding
@@ -50,8 +36,19 @@
     ec->RemoveObserversAndTimeout();
 }
 
+void UpdateManager::AsyncPolicyRequestUpdateCheckAllowed(
+    base::Callback<void(EvalStatus, const UpdateCheckParams& result)> callback,
+    EvalStatus (Policy::*policy_method)(
+        EvaluationContext*, State*, std::string*, UpdateCheckParams*) const) {
+  AsyncPolicyRequest(callback, policy_method);
+}
+
 void UpdateManager::UnregisterEvalContext(EvaluationContext* ec) {
-  if (!ec_repo_.erase(ec)) {
+  // Since |ec_repo_|'s compare function is based on the value of the raw
+  // pointer |ec|, we can just create a |shared_ptr| here and pass it along to
+  // be erased.
+  if (!ec_repo_.erase(
+          std::shared_ptr<EvaluationContext>(ec, [](EvaluationContext*) {}))) {
     LOG(ERROR) << "Unregistering an unknown evaluation context, this is a bug.";
   }
 }
diff --git a/update_manager/update_manager.h b/update_manager/update_manager.h
index b0fd97f..e266b57 100644
--- a/update_manager/update_manager.h
+++ b/update_manager/update_manager.h
@@ -22,10 +22,9 @@
 #include <string>
 
 #include <base/callback.h>
-#include <base/memory/ref_counted.h>
 #include <base/time/time.h>
 
-#include "update_engine/common/clock_interface.h"
+#include "update_engine/common/system_state.h"
 #include "update_engine/update_manager/default_policy.h"
 #include "update_engine/update_manager/evaluation_context.h"
 #include "update_engine/update_manager/policy.h"
@@ -33,22 +32,31 @@
 
 namespace chromeos_update_manager {
 
-// Comparator for scoped_refptr objects.
-template <typename T>
-struct ScopedRefPtrLess {
-  bool operator()(const scoped_refptr<T>& first,
-                  const scoped_refptr<T>& second) const {
-    return first.get() < second.get();
-  }
+// Please do not move this class into a new file for simplicity.
+// This pure virtual class is purely created for purpose of testing. The reason
+// was that |UpdateManager|'s member functions are templatized, which does not
+// play nicely when testing (mocking + faking). Whenever a specialized member of
+// |UpdateManager| must be tested, please add a specialized template member
+// function within this class for testing.
+class SpecializedPolicyRequestInterface {
+ public:
+  virtual ~SpecializedPolicyRequestInterface() = default;
+
+  virtual void AsyncPolicyRequestUpdateCheckAllowed(
+      base::Callback<void(EvalStatus, const UpdateCheckParams& result)>
+          callback,
+      EvalStatus (Policy::*policy_method)(EvaluationContext*,
+                                          State*,
+                                          std::string*,
+                                          UpdateCheckParams*) const) = 0;
 };
 
 // The main Update Manager singleton class.
-class UpdateManager {
+class UpdateManager : public SpecializedPolicyRequestInterface {
  public:
   // Creates the UpdateManager instance, assuming ownership on the provided
   // |state|.
-  UpdateManager(chromeos_update_engine::ClockInterface* clock,
-                base::TimeDelta evaluation_timeout,
+  UpdateManager(base::TimeDelta evaluation_timeout,
                 base::TimeDelta expiration_timeout,
                 State* state);
 
@@ -91,6 +99,14 @@
           EvaluationContext*, State*, std::string*, R*, ExpectedArgs...) const,
       ActualArgs... args);
 
+  void AsyncPolicyRequestUpdateCheckAllowed(
+      base::Callback<void(EvalStatus, const UpdateCheckParams& result)>
+          callback,
+      EvalStatus (Policy::*policy_method)(EvaluationContext*,
+                                          State*,
+                                          std::string*,
+                                          UpdateCheckParams*) const) override;
+
  protected:
   // The UpdateManager receives ownership of the passed Policy instance.
   void set_policy(const Policy* policy) { policy_.reset(policy); }
@@ -125,7 +141,7 @@
   // the evaluation will be re-scheduled to be called later.
   template <typename R, typename... Args>
   void OnPolicyReadyToEvaluate(
-      scoped_refptr<EvaluationContext> ec,
+      std::shared_ptr<EvaluationContext> ec,
       base::Callback<void(EvalStatus status, const R& result)> callback,
       EvalStatus (Policy::*policy_method)(
           EvaluationContext*, State*, std::string*, R*, Args...) const,
@@ -145,9 +161,6 @@
   // State Providers.
   std::unique_ptr<State> state_;
 
-  // Pointer to the mockable clock interface;
-  chromeos_update_engine::ClockInterface* clock_;
-
   // Timeout for a policy evaluation.
   const base::TimeDelta evaluation_timeout_;
 
@@ -159,9 +172,7 @@
   // destructed; alternatively, when the UpdateManager instance is destroyed, it
   // will remove all pending events associated with all outstanding contexts
   // (which should, in turn, trigger their destruction).
-  std::set<scoped_refptr<EvaluationContext>,
-           ScopedRefPtrLess<EvaluationContext>>
-      ec_repo_;
+  std::set<std::shared_ptr<EvaluationContext>> ec_repo_;
 
   base::WeakPtrFactory<UpdateManager> weak_ptr_factory_;
 
diff --git a/update_manager/update_manager_unittest.cc b/update_manager/update_manager_unittest.cc
index f1a8d17..a02d7ef 100644
--- a/update_manager/update_manager_unittest.cc
+++ b/update_manager/update_manager_unittest.cc
@@ -34,7 +34,7 @@
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
 
-#include "update_engine/common/fake_clock.h"
+#include "update_engine/cros/fake_system_state.h"
 #include "update_engine/update_manager/default_policy.h"
 #include "update_engine/update_manager/fake_state.h"
 #include "update_engine/update_manager/mock_policy.h"
@@ -48,6 +48,7 @@
 using brillo::MessageLoopRunMaxIterations;
 using chromeos_update_engine::ErrorCode;
 using chromeos_update_engine::FakeClock;
+using chromeos_update_engine::FakeSystemState;
 using std::pair;
 using std::string;
 using std::tuple;
@@ -80,11 +81,10 @@
  protected:
   void SetUp() override {
     loop_.SetAsCurrent();
+    FakeSystemState::CreateInstance();
     fake_state_ = new FakeState();
-    umut_.reset(new UpdateManager(&fake_clock_,
-                                  TimeDelta::FromSeconds(5),
-                                  TimeDelta::FromSeconds(1),
-                                  fake_state_));
+    umut_.reset(new UpdateManager(
+        TimeDelta::FromSeconds(5), TimeDelta::FromSeconds(1), fake_state_));
   }
 
   void TearDown() override { EXPECT_FALSE(loop_.PendingTasks()); }
@@ -92,7 +92,6 @@
   base::SimpleTestClock test_clock_;
   brillo::FakeMessageLoop loop_{&test_clock_};
   FakeState* fake_state_;  // Owned by the umut_.
-  FakeClock fake_clock_;
   unique_ptr<UpdateManager> umut_;
 };
 
@@ -291,13 +290,14 @@
 }
 
 TEST_F(UmUpdateManagerTest, AsyncPolicyRequestTimesOut) {
+  auto* fake_clock = FakeSystemState::Get()->fake_clock();
   // Set up an async policy call to exceed its expiration timeout, make sure
   // that the default policy was not used (no callback) and that evaluation is
   // reattempted.
   int num_called = 0;
   umut_->set_policy(new DelayPolicy(
       0,
-      fake_clock_.GetWallclockTime() + TimeDelta::FromSeconds(3),
+      fake_clock->GetWallclockTime() + TimeDelta::FromSeconds(3),
       &num_called));
 
   vector<pair<EvalStatus, UpdateCheckParams>> calls;
@@ -314,7 +314,7 @@
   // ensure that reevaluation occurred but callback was not invoked (i.e.
   // default policy was not consulted).
   test_clock_.Advance(TimeDelta::FromSeconds(2));
-  fake_clock_.SetWallclockTime(fake_clock_.GetWallclockTime() +
+  fake_clock->SetWallclockTime(fake_clock->GetWallclockTime() +
                                TimeDelta::FromSeconds(2));
   MessageLoopRunMaxIterations(MessageLoop::current(), 10);
   EXPECT_EQ(2, num_called);
@@ -322,7 +322,7 @@
   // Wait for reevaluation due to delay to happen, ensure that it occurs and
   // that the callback is invoked.
   test_clock_.Advance(TimeDelta::FromSeconds(2));
-  fake_clock_.SetWallclockTime(fake_clock_.GetWallclockTime() +
+  fake_clock->SetWallclockTime(fake_clock->GetWallclockTime() +
                                TimeDelta::FromSeconds(2));
   MessageLoopRunMaxIterations(MessageLoop::current(), 10);
   EXPECT_EQ(3, num_called);
diff --git a/update_manager/update_time_restrictions_monitor.cc b/update_manager/update_time_restrictions_monitor.cc
new file mode 100644
index 0000000..00e6ec3
--- /dev/null
+++ b/update_manager/update_time_restrictions_monitor.cc
@@ -0,0 +1,132 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/update_time_restrictions_monitor.h"
+
+#include <base/bind.h>
+#include <base/time/time.h>
+
+#include "update_engine/common/system_state.h"
+
+using base::TimeDelta;
+using brillo::MessageLoop;
+using chromeos_update_engine::SystemState;
+
+namespace chromeos_update_manager {
+
+namespace {
+
+const WeeklyTimeInterval* FindNextNearestInterval(
+    const WeeklyTimeIntervalVector& intervals, const WeeklyTime& now) {
+  const WeeklyTimeInterval* result_interval = nullptr;
+  // As we are dealing with weekly time here, the maximum duration can be one
+  // week.
+  TimeDelta duration_till_next_interval = TimeDelta::FromDays(7);
+  for (const auto& interval : intervals) {
+    if (interval.InRange(now)) {
+      return &interval;
+    }
+    const TimeDelta current_duration = now.GetDurationTo(interval.start());
+    if (current_duration < duration_till_next_interval) {
+      result_interval = &interval;
+      duration_till_next_interval = current_duration;
+    }
+  }
+  return result_interval;
+}
+
+WeeklyTime Now() {
+  return WeeklyTime::FromTime(SystemState::Get()->clock()->GetWallclockTime());
+}
+
+}  // namespace
+
+UpdateTimeRestrictionsMonitor::UpdateTimeRestrictionsMonitor(
+    DevicePolicyProvider* device_policy_provider, Delegate* delegate)
+    : evaluation_context_(/* evaluation_timeout = */ TimeDelta::Max(),
+                          /* expiration_timeout = */ TimeDelta::Max(),
+                          /* unregister_cb = */ {}),
+      device_policy_provider_(device_policy_provider),
+      delegate_(delegate),
+      weak_ptr_factory_(this) {
+  if (device_policy_provider_ != nullptr && delegate_ != nullptr)
+    StartMonitoring();
+}
+
+UpdateTimeRestrictionsMonitor::~UpdateTimeRestrictionsMonitor() {
+  StopMonitoring();
+}
+
+void UpdateTimeRestrictionsMonitor::StartMonitoring() {
+  DCHECK(device_policy_provider_);
+  const WeeklyTimeIntervalVector* new_intervals = evaluation_context_.GetValue(
+      device_policy_provider_->var_disallowed_time_intervals());
+  if (new_intervals && !new_intervals->empty())
+    WaitForRestrictedIntervalStarts(*new_intervals);
+
+  const bool is_registered = evaluation_context_.RunOnValueChangeOrTimeout(
+      base::Bind(&UpdateTimeRestrictionsMonitor::OnIntervalsChanged,
+                 base::Unretained(this)));
+  DCHECK(is_registered);
+}
+
+void UpdateTimeRestrictionsMonitor::WaitForRestrictedIntervalStarts(
+    const WeeklyTimeIntervalVector& restricted_time_intervals) {
+  DCHECK(!restricted_time_intervals.empty());
+
+  const WeeklyTimeInterval* current_interval =
+      FindNextNearestInterval(restricted_time_intervals, Now());
+  if (current_interval == nullptr) {
+    LOG(WARNING) << "Could not find next nearest restricted interval.";
+    return;
+  }
+
+  // If |current_interval| happens right now, set delay to zero.
+  const TimeDelta duration_till_start =
+      current_interval->InRange(Now())
+          ? TimeDelta::FromMicroseconds(0)
+          : Now().GetDurationTo(current_interval->start());
+  LOG(INFO) << "Found restricted interval starting at "
+            << (SystemState::Get()->clock()->GetWallclockTime() +
+                duration_till_start);
+
+  timeout_event_ = MessageLoop::current()->PostDelayedTask(
+      FROM_HERE,
+      base::Bind(&UpdateTimeRestrictionsMonitor::HandleRestrictedIntervalStarts,
+                 weak_ptr_factory_.GetWeakPtr()),
+      duration_till_start);
+}
+
+void UpdateTimeRestrictionsMonitor::HandleRestrictedIntervalStarts() {
+  timeout_event_ = MessageLoop::kTaskIdNull;
+  if (delegate_)
+    delegate_->OnRestrictedIntervalStarts();
+}
+
+void UpdateTimeRestrictionsMonitor::StopMonitoring() {
+  MessageLoop::current()->CancelTask(timeout_event_);
+  timeout_event_ = MessageLoop::kTaskIdNull;
+}
+
+void UpdateTimeRestrictionsMonitor::OnIntervalsChanged() {
+  DCHECK(!evaluation_context_.is_expired());
+
+  StopMonitoring();
+  evaluation_context_.ResetEvaluation();
+  StartMonitoring();
+}
+
+}  // namespace chromeos_update_manager
diff --git a/update_manager/update_time_restrictions_monitor.h b/update_manager/update_time_restrictions_monitor.h
new file mode 100644
index 0000000..034ac87
--- /dev/null
+++ b/update_manager/update_time_restrictions_monitor.h
@@ -0,0 +1,105 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_UPDATE_TIME_RESTRICTIONS_MONITOR_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_UPDATE_TIME_RESTRICTIONS_MONITOR_H_
+
+#include <memory>
+
+#include <base/memory/weak_ptr.h>
+#include <brillo/message_loops/message_loop.h>
+
+#include "update_engine/update_manager/device_policy_provider.h"
+#include "update_engine/update_manager/evaluation_context.h"
+#include "update_engine/update_manager/weekly_time.h"
+
+namespace chromeos_update_manager {
+
+// Represents a monitor tracking start of restricted time intervals during which
+// update download is not allowed. It reads |var_disallowed_time_intervals|,
+// chooses the next interval according to current time, awaits its start and
+// notifies the delegate. If the chosen interval is already happening, the
+// monitor notifies immediately. The monitor will never notify the delegate
+// while the current list of restricted intervals is empty.
+//
+// The monitor detects changes in the restricted intervals and handles the
+// change with following cases:
+// 1. No restricted time intervals or none of the intervals is in progress -> no
+//    new restricted intervals or none of the new intervals matches the current
+//    time.
+//    The monitor starts tracking the next interval from the new ones, if any.
+// 2. No restricted time intervals or none of the intervals is in progress ->
+//    there is a new interval matching current time.
+//    The monitor shall pick this new interval and notify the delegate
+//    immediately about the start of the restricted interval.
+class UpdateTimeRestrictionsMonitor {
+ public:
+  // Interface to handle start of a restricted time interval.
+  class Delegate {
+   public:
+    virtual ~Delegate() = default;
+
+    virtual void OnRestrictedIntervalStarts() = 0;
+  };
+
+  // Creates an instance and starts monitoring the next nearest restricted time
+  // interval if present. If no intervals are available yet the monitor will be
+  // idle until intervals list changes.
+  UpdateTimeRestrictionsMonitor(DevicePolicyProvider* device_policy_provider,
+                                Delegate* delegate);
+
+  UpdateTimeRestrictionsMonitor(const UpdateTimeRestrictionsMonitor&) = delete;
+  UpdateTimeRestrictionsMonitor& operator=(
+      const UpdateTimeRestrictionsMonitor&) = delete;
+
+  ~UpdateTimeRestrictionsMonitor();
+
+  bool IsMonitoringInterval() {
+    return timeout_event_ != brillo::MessageLoop::kTaskIdNull;
+  }
+
+ private:
+  // Starts monitoring the start of nearest restricted time interval if present
+  // and any change in restricted time intervals from policy.
+  void StartMonitoring();
+  void WaitForRestrictedIntervalStarts(
+      const WeeklyTimeIntervalVector& restricted_time_intervals);
+
+  // Called when current time lies within a restricted interval.
+  void HandleRestrictedIntervalStarts();
+
+  // Stop monotoring any restricted intervals.
+  void StopMonitoring();
+
+  // Called upon change of restricted intervals.
+  void OnIntervalsChanged();
+
+  // To access restricted time intervals from |device_policy_provider_|.
+  EvaluationContext evaluation_context_;
+
+  DevicePolicyProvider* const device_policy_provider_;
+  Delegate* const delegate_;
+
+  // The TaskId returned by the message loop identifying the timeout callback.
+  // Used for cancelling the timeout callback.
+  brillo::MessageLoop::TaskId timeout_event_{brillo::MessageLoop::kTaskIdNull};
+
+  base::WeakPtrFactory<UpdateTimeRestrictionsMonitor> weak_ptr_factory_;
+};
+
+}  // namespace chromeos_update_manager
+
+#endif  // UPDATE_ENGINE_UPDATE_MANAGER_UPDATE_TIME_RESTRICTIONS_MONITOR_H_
diff --git a/update_manager/update_time_restrictions_monitor_unittest.cc b/update_manager/update_time_restrictions_monitor_unittest.cc
new file mode 100644
index 0000000..2e474e2
--- /dev/null
+++ b/update_manager/update_time_restrictions_monitor_unittest.cc
@@ -0,0 +1,279 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <memory>
+
+#include <base/optional.h>
+#include <base/time/time.h>
+#include <base/test/simple_test_clock.h>
+#include <brillo/message_loops/fake_message_loop.h>
+#include <brillo/message_loops/message_loop_utils.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/cros/fake_system_state.h"
+#include "update_engine/update_manager/fake_state.h"
+#include "update_engine/update_manager/update_time_restrictions_monitor.h"
+
+using brillo::FakeMessageLoop;
+using brillo::MessageLoop;
+using brillo::MessageLoopRunMaxIterations;
+using chromeos_update_engine::FakeSystemState;
+
+namespace chromeos_update_manager {
+
+namespace {
+
+constexpr base::TimeDelta kDurationOffset = base::TimeDelta::FromMinutes(1);
+constexpr base::TimeDelta kHourDuration = base::TimeDelta::FromHours(1);
+constexpr base::TimeDelta kMinuteDuration = base::TimeDelta::FromMinutes(1);
+// Initial time: Monday, May 4th 2020 8:13 AM before interval.
+constexpr base::Time::Exploded kInitialTimeBeforeInterval{
+    2020, 5, 0, 4, 10, 13, 0, 0};
+// Initial time: Monday, May 4th 2020 10:20 AM within interval.
+constexpr base::Time::Exploded kInitialTimeWithinInterval{
+    2020, 5, 0, 4, 10, 20, 0, 0};
+const int current_restricted_interval_index = 0;
+
+const WeeklyTimeIntervalVector kTestOneDisallowedTimeIntervals{
+    // Monday 8:15 AM to Monday 9:30 PM.
+    WeeklyTimeInterval(WeeklyTime(1, kHourDuration * 8 + kMinuteDuration * 15),
+                       WeeklyTime(1, kHourDuration * 9 + kMinuteDuration * 30)),
+};
+
+const WeeklyTimeIntervalVector kTestTwoDisallowedTimeIntervals{
+    // Monday 10:15 AM to Monday 3:30 PM.
+    WeeklyTimeInterval(
+        WeeklyTime(1, kHourDuration * 10 + kMinuteDuration * 15),
+        WeeklyTime(1, kHourDuration * 15 + kMinuteDuration * 30)),
+    // Wednesday 8:30 PM to Thursday 8:40 AM.
+    WeeklyTimeInterval(WeeklyTime(3, kHourDuration * 20 + kMinuteDuration * 30),
+                       WeeklyTime(4, kHourDuration * 8 + kMinuteDuration * 40)),
+};
+
+}  // namespace
+
+class MockUpdateTimeRestrictionsMonitorDelegate
+    : public UpdateTimeRestrictionsMonitor::Delegate {
+ public:
+  virtual ~MockUpdateTimeRestrictionsMonitorDelegate() = default;
+
+  MOCK_METHOD0(OnRestrictedIntervalStarts, void());
+};
+
+class UmUpdateTimeRestrictionsMonitorTest : public ::testing::Test {
+ protected:
+  UmUpdateTimeRestrictionsMonitorTest() {
+    fake_loop_.SetAsCurrent();
+    FakeSystemState::CreateInstance();
+  }
+
+  void TearDown() override { EXPECT_FALSE(fake_loop_.PendingTasks()); }
+
+  bool SetNow(const base::Time::Exploded& exploded_now) {
+    base::Time now;
+    if (!base::Time::FromLocalExploded(exploded_now, &now))
+      return false;
+
+    test_clock_.SetNow(now);
+    FakeSystemState::Get()->fake_clock()->SetWallclockTime(now);
+    return true;
+  }
+
+  void AdvanceAfterTimestamp(const WeeklyTime& timestamp) {
+    const WeeklyTime now = WeeklyTime::FromTime(test_clock_.Now());
+    const base::TimeDelta duration =
+        now.GetDurationTo(timestamp) + kDurationOffset;
+    test_clock_.Advance(duration);
+    FakeSystemState::Get()->fake_clock()->SetWallclockTime(test_clock_.Now());
+  }
+
+  void VerifyExpectationsOnDelegate() {
+    testing::Mock::VerifyAndClearExpectations(&mock_delegate_);
+  }
+
+  void UpdateRestrictedIntervals(const WeeklyTimeIntervalVector& policy_value) {
+    auto* policy_variable =
+        fake_state_.device_policy_provider()->var_disallowed_time_intervals();
+    policy_variable->reset(new WeeklyTimeIntervalVector(policy_value));
+    policy_variable->NotifyValueChanged();
+  }
+
+  bool IsMonitoringInterval() {
+    return monitor_.has_value() && monitor_.value().IsMonitoringInterval();
+  }
+
+  void BuildMonitorAndVerify(const WeeklyTimeIntervalVector* policy_value,
+                             bool expect_delegate_called,
+                             bool expect_monitoring) {
+    if (expect_delegate_called)
+      EXPECT_CALL(mock_delegate_, OnRestrictedIntervalStarts()).Times(1);
+    else
+      EXPECT_CALL(mock_delegate_, OnRestrictedIntervalStarts()).Times(0);
+
+    fake_state_.device_policy_provider()
+        ->var_disallowed_time_intervals()
+        ->reset(policy_value != nullptr
+                    ? new WeeklyTimeIntervalVector(*policy_value)
+                    : nullptr);
+    monitor_.emplace(fake_state_.device_policy_provider(), &mock_delegate_);
+    if (expect_delegate_called)
+      MessageLoopRunMaxIterations(MessageLoop::current(), 10);
+    VerifyExpectationsOnDelegate();
+
+    if (expect_monitoring)
+      EXPECT_TRUE(IsMonitoringInterval());
+    else
+      EXPECT_FALSE(IsMonitoringInterval());
+  }
+
+  base::SimpleTestClock test_clock_;
+  FakeMessageLoop fake_loop_{&test_clock_};
+  FakeState fake_state_;
+  MockUpdateTimeRestrictionsMonitorDelegate mock_delegate_;
+  base::Optional<UpdateTimeRestrictionsMonitor> monitor_;
+};
+
+TEST_F(UmUpdateTimeRestrictionsMonitorTest, PolicyIsNotSet) {
+  BuildMonitorAndVerify(
+      nullptr, /*expect_delegate_called=*/false, /*expect_monitoring=*/false);
+}
+
+TEST_F(UmUpdateTimeRestrictionsMonitorTest, PolicyHasEmptyIntervalList) {
+  WeeklyTimeIntervalVector empty_policy;
+  BuildMonitorAndVerify(&empty_policy,
+                        /*expect_delegate_called=*/false,
+                        /*expect_monitoring=*/false);
+}
+
+TEST_F(UmUpdateTimeRestrictionsMonitorTest,
+       CurrentTimeOutsideOfRestrictedInterval) {
+  ASSERT_TRUE(SetNow(kInitialTimeBeforeInterval));
+  BuildMonitorAndVerify(&kTestTwoDisallowedTimeIntervals,
+                        /*expect_delegate_called=*/false,
+                        /*expect_monitoring=*/true);
+
+  // Monitor should only notify start when passing start of interval.
+  EXPECT_CALL(mock_delegate_, OnRestrictedIntervalStarts()).Times(1);
+  AdvanceAfterTimestamp(
+      kTestTwoDisallowedTimeIntervals[current_restricted_interval_index]
+          .start());
+  MessageLoopRunMaxIterations(MessageLoop::current(), 10);
+  VerifyExpectationsOnDelegate();
+}
+
+TEST_F(UmUpdateTimeRestrictionsMonitorTest,
+       CurrentTimeWithinRestrictedInterval) {
+  // Monitor should notify start when it is built with current
+  // time within interval.
+  ASSERT_TRUE(SetNow(kInitialTimeWithinInterval));
+  BuildMonitorAndVerify(&kTestTwoDisallowedTimeIntervals,
+                        /*expect_delegate_called=*/true,
+                        /*expect_monitoring=*/false);
+}
+
+TEST_F(UmUpdateTimeRestrictionsMonitorTest,
+       PolicyChangeFromNotSetToOutsideInterval) {
+  // Build monitor with empty initial list of intervals.
+  BuildMonitorAndVerify(
+      nullptr, /*expect_delegate_called=*/false, /*expect_monitoring=*/false);
+
+  // Monitor should not do any notification right after intervals update.
+  ASSERT_TRUE(SetNow(kInitialTimeBeforeInterval));
+  EXPECT_CALL(mock_delegate_, OnRestrictedIntervalStarts()).Times(0);
+  UpdateRestrictedIntervals(kTestTwoDisallowedTimeIntervals);
+  MessageLoopRunMaxIterations(MessageLoop::current(), 10);
+  VerifyExpectationsOnDelegate();
+  EXPECT_TRUE(IsMonitoringInterval());
+
+  // Advance time within new interval and check that notification happen.
+  EXPECT_CALL(mock_delegate_, OnRestrictedIntervalStarts()).Times(1);
+  AdvanceAfterTimestamp(
+      kTestTwoDisallowedTimeIntervals[current_restricted_interval_index]
+          .start());
+  MessageLoopRunMaxIterations(MessageLoop::current(), 10);
+  VerifyExpectationsOnDelegate();
+}
+
+TEST_F(UmUpdateTimeRestrictionsMonitorTest,
+       PolicyChangeFromNotSetToWithinInterval) {
+  // Build monitor with empty initial list of intervals.
+  BuildMonitorAndVerify(
+      nullptr, /*expect_delegate_called=*/false, /*expect_monitoring=*/false);
+
+  // Advance time inside upcoming new interval and update the intervals.
+  // Monitor should immediately notify about started interval.
+  ASSERT_TRUE(SetNow(kInitialTimeWithinInterval));
+  EXPECT_CALL(mock_delegate_, OnRestrictedIntervalStarts()).Times(1);
+  UpdateRestrictedIntervals(kTestTwoDisallowedTimeIntervals);
+  MessageLoopRunMaxIterations(MessageLoop::current(), 10);
+  VerifyExpectationsOnDelegate();
+}
+
+TEST_F(UmUpdateTimeRestrictionsMonitorTest,
+       PolicyChangeFromNotSetToEmptyInterval) {
+  BuildMonitorAndVerify(
+      nullptr, /*expect_delegate_called=*/false, /*expect_monitoring=*/false);
+
+  EXPECT_CALL(mock_delegate_, OnRestrictedIntervalStarts()).Times(0);
+  UpdateRestrictedIntervals(WeeklyTimeIntervalVector());
+  MessageLoopRunMaxIterations(MessageLoop::current(), 10);
+  VerifyExpectationsOnDelegate();
+  EXPECT_FALSE(IsMonitoringInterval());
+}
+
+TEST_F(UmUpdateTimeRestrictionsMonitorTest,
+       PolicyChangeFromOneOutsideIntervalToAnother) {
+  // Build monitor with current time outside the intervals.
+  BuildMonitorAndVerify(&kTestTwoDisallowedTimeIntervals,
+                        /*expect_delegate_called=*/false,
+                        /*expect_monitoring=*/true);
+
+  // Update the intervals to outide of current time and no notification should
+  // happen yet.
+  EXPECT_CALL(mock_delegate_, OnRestrictedIntervalStarts()).Times(0);
+  UpdateRestrictedIntervals(kTestOneDisallowedTimeIntervals);
+  MessageLoopRunMaxIterations(MessageLoop::current(), 10);
+  VerifyExpectationsOnDelegate();
+
+  // Advance time within new interval. Monitor should notify about started
+  // interval.
+  EXPECT_CALL(mock_delegate_, OnRestrictedIntervalStarts()).Times(1);
+  AdvanceAfterTimestamp(
+      kTestOneDisallowedTimeIntervals[current_restricted_interval_index]
+          .start());
+  MessageLoopRunMaxIterations(MessageLoop::current(), 10);
+  VerifyExpectationsOnDelegate();
+}
+
+TEST_F(UmUpdateTimeRestrictionsMonitorTest,
+       PolicyChangeFromOutsideIntervalToWithin) {
+  ASSERT_TRUE(SetNow(kInitialTimeWithinInterval));
+
+  // Build monitor with current time outside the intervals.
+  BuildMonitorAndVerify(&kTestOneDisallowedTimeIntervals,
+                        /*expect_delegate_called=*/false,
+                        /*expect_monitoring=*/true);
+
+  // Update interval such that current time is within it. Monitor should notify
+  // about started interval.
+  EXPECT_CALL(mock_delegate_, OnRestrictedIntervalStarts()).Times(1);
+  UpdateRestrictedIntervals(kTestTwoDisallowedTimeIntervals);
+  MessageLoopRunMaxIterations(MessageLoop::current(), 10);
+  VerifyExpectationsOnDelegate();
+}
+
+}  // namespace chromeos_update_manager
diff --git a/update_manager/update_time_restrictions_policy_impl_unittest.cc b/update_manager/update_time_restrictions_policy_impl_unittest.cc
index 74e7f3c..f99a285 100644
--- a/update_manager/update_time_restrictions_policy_impl_unittest.cc
+++ b/update_manager/update_time_restrictions_policy_impl_unittest.cc
@@ -60,7 +60,7 @@
 
     Time time;
     EXPECT_TRUE(Time::FromLocalExploded(exploded, &time));
-    fake_clock_.SetWallclockTime(time);
+    fake_clock_->SetWallclockTime(time);
     SetUpDefaultTimeProvider();
     fake_state_.device_policy_provider()
         ->var_disallowed_time_intervals()
diff --git a/update_manager/updater_provider.h b/update_manager/updater_provider.h
index cb62623..86af1c8 100644
--- a/update_manager/updater_provider.h
+++ b/update_manager/updater_provider.h
@@ -36,6 +36,7 @@
   kUpdatedNeedReboot,
   kReportingErrorEvent,
   kAttemptingRollback,
+  kCleanupPreviousUpdate,
 };
 
 enum class UpdateRequestStatus {
@@ -115,6 +116,10 @@
   // for all updates.
   virtual Variable<UpdateRestrictions>* var_update_restrictions() = 0;
 
+  // A variable that returns the number of seconds for the first update check to
+  // happen.
+  virtual Variable<int64_t>* var_test_update_check_interval_timeout() = 0;
+
  protected:
   UpdaterProvider() {}
 
diff --git a/update_manager/variable.h b/update_manager/variable.h
index 6c7d350..9ac7dae 100644
--- a/update_manager/variable.h
+++ b/update_manager/variable.h
@@ -83,6 +83,10 @@
   // variable. In other case, it returns 0.
   base::TimeDelta GetPollInterval() const { return poll_interval_; }
 
+  // Returns true, if the value for this variable is expected to be missing
+  // sometimes so we can avoid printing confusing error logs.
+  bool IsMissingOk() const { return missing_ok_; }
+
   // Adds and removes observers for value changes on the variable. This only
   // works for kVariableAsync variables since the other modes don't track value
   // changes. Adding the same observer twice has no effect.
@@ -115,6 +119,8 @@
     poll_interval_ = poll_interval;
   }
 
+  void SetMissingOk() { missing_ok_ = true; }
+
   // Calls ValueChanged on all the observers.
   void NotifyValueChanged() {
     // Fire all the observer methods from the main loop as single call. In order
@@ -140,7 +146,8 @@
       : name_(name),
         mode_(mode),
         poll_interval_(mode == kVariableModePoll ? poll_interval
-                                                 : base::TimeDelta()) {}
+                                                 : base::TimeDelta()),
+        missing_ok_(false) {}
 
   void OnValueChangedNotification() {
     // A ValueChanged() method can change the list of observers, for example
@@ -174,6 +181,9 @@
   // The list of value changes observers.
   std::list<BaseVariable::ObserverInterface*> observer_list_;
 
+  // Defines whether this variable is expected to have no value.
+  bool missing_ok_;
+
   DISALLOW_COPY_AND_ASSIGN(BaseVariable);
 };
 
diff --git a/update_metadata.proto b/update_metadata.proto
index 9bc0d8a..93e4e2e 100644
--- a/update_metadata.proto
+++ b/update_metadata.proto
@@ -78,7 +78,7 @@
 //   new partition.
 // - ZERO: Write zeros to the destination dst_extents.
 // - DISCARD: Discard the destination dst_extents blocks on the physical medium.
-//   the data read from those block is undefined.
+//   the data read from those blocks is undefined.
 // - REPLACE_XZ: Replace the dst_extents with the contents of the attached
 //   xz file after decompression. The xz file should only use crc32 or no crc at
 //   all to be compatible with xz-embedded.
@@ -153,35 +153,35 @@
 //
 // All fields will be set, if this message is present.
 message ImageInfo {
-  optional string board = 1;
-  optional string key = 2;
-  optional string channel = 3;
-  optional string version = 4;
+  optional string board = 1 [deprecated = true];
+  optional string key = 2 [deprecated = true];
+  optional string channel = 3 [deprecated = true];
+  optional string version = 4 [deprecated = true];
 
   // If these values aren't present, they should be assumed to match
   // the equivalent value above. They are normally only different for
   // special image types such as nplusone images.
-  optional string build_channel = 5;
-  optional string build_version = 6;
+  optional string build_channel = 5 [deprecated = true];
+  optional string build_version = 6 [deprecated = true];
 }
 
 message InstallOperation {
   enum Type {
-    REPLACE = 0;  // Replace destination extents w/ attached data
-    REPLACE_BZ = 1;  // Replace destination extents w/ attached bzipped data
-    MOVE = 2 [deprecated = true];  // Move source extents to destination extents
-    BSDIFF = 3 [deprecated = true];  // The data is a bsdiff binary diff
+    REPLACE = 0;     // Replace destination extents w/ attached data.
+    REPLACE_BZ = 1;  // Replace destination extents w/ attached bzipped data.
+    MOVE = 2 [deprecated = true];    // Move source extents to target extents.
+    BSDIFF = 3 [deprecated = true];  // The data is a bsdiff binary diff.
 
     // On minor version 2 or newer, these operations are supported:
-    SOURCE_COPY = 4; // Copy from source to target partition
-    SOURCE_BSDIFF = 5; // Like BSDIFF, but read from source partition
+    SOURCE_COPY = 4;    // Copy from source to target partition
+    SOURCE_BSDIFF = 5;  // Like BSDIFF, but read from source partition
 
     // On minor version 3 or newer and on major version 2 or newer, these
     // operations are supported:
-    REPLACE_XZ = 8; // Replace destination extents w/ attached xz data.
+    REPLACE_XZ = 8;  // Replace destination extents w/ attached xz data.
 
     // On minor version 4 or newer, these operations are supported:
-    ZERO = 6;  // Write zeros in the destination.
+    ZERO = 6;     // Write zeros in the destination.
     DISCARD = 7;  // Discard the destination blocks, reading as undefined.
     BROTLI_BSDIFF = 10;  // Like SOURCE_BSDIFF, but compressed with brotli.
 
@@ -225,6 +225,22 @@
   optional bytes src_sha256_hash = 9;
 }
 
+// Hints to VAB snapshot to skip writing some blocks if these blocks are
+// identical to the ones on the source image. The src & dst extents for each
+// CowMergeOperation should be contiguous, and they're a subset of an OTA
+// InstallOperation.
+// During merge time, we need to follow the pre-computed sequence to avoid
+// read after write, similar to the inplace update schema.
+message CowMergeOperation {
+  enum Type {
+    COW_COPY = 0;  // identical blocks
+  }
+  optional Type type = 1;
+
+  optional Extent src_extent = 2;
+  optional Extent dst_extent = 3;
+}
+
 // Describes the update to apply to a single partition.
 message PartitionUpdate {
   // A platform-specific name to identify the partition set being updated. For
@@ -288,6 +304,21 @@
 
   // The number of FEC roots.
   optional uint32 fec_roots = 16 [default = 2];
+
+  // Per-partition version used for downgrade detection, added
+  // as an effort to support partial updates. For most partitions,
+  // this is the build timestamp.
+  optional string version = 17;
+
+  // A sorted list of CowMergeOperation. When writing cow, we can choose to
+  // skip writing the raw bytes for these extents. During snapshot merge, the
+  // bytes will read from the source partitions instead.
+  repeated CowMergeOperation merge_operations = 18;
+
+  // Estimated size for COW image. This is used by libsnapshot
+  // as a hint. If set to 0, libsnapshot should use alternative
+  // methods for estimating size.
+  optional uint64 estimate_cow_size = 19;
 }
 
 message DynamicPartitionGroup {
@@ -317,14 +348,44 @@
   // partitions if possible. If this is unset, the update_engine daemon MUST
   // NOT create snapshots for dynamic partitions.
   optional bool snapshot_enabled = 2;
+
+  // If this is set to false, update_engine should not use VABC regardless. If
+  // this is set to true, update_engine may choose to use VABC if device
+  // supports it, but not guaranteed.
+  // VABC stands for Virtual AB Compression
+  optional bool vabc_enabled = 3;
+
+  // The compression algorithm used by VABC. Available ones are "gz", "brotli".
+  // See system/core/fs_mgr/libsnapshot/cow_writer.cpp for available options,
+  // as this parameter is ultimated forwarded to libsnapshot's CowWriter
+  optional string vabc_compression_param = 4;
+
+  // COW version used by VABC. The represents the major version in the COW
+  // header
+  optional uint32 cow_version = 5;
+}
+
+// Definition has been duplicated from
+// $ANDROID_BUILD_TOP/build/tools/releasetools/ota_metadata.proto. Keep in sync.
+message ApexInfo {
+  optional string package_name = 1;
+  optional int64 version = 2;
+  optional bool is_compressed = 3;
+  optional int64 decompressed_size = 4;
+}
+
+// Definition has been duplicated from
+// $ANDROID_BUILD_TOP/build/tools/releasetools/ota_metadata.proto. Keep in sync.
+message ApexMetadata {
+  repeated ApexInfo apex_info = 1;
 }
 
 message DeltaArchiveManifest {
   // Only present in major version = 1. List of install operations for the
   // kernel and rootfs partitions. For major version = 2 see the |partitions|
   // field.
-  repeated InstallOperation install_operations = 1;
-  repeated InstallOperation kernel_install_operations = 2;
+  repeated InstallOperation install_operations = 1 [deprecated = true];
+  repeated InstallOperation kernel_install_operations = 2 [deprecated = true];
 
   // (At time of writing) usually 4096
   optional uint32 block_size = 3 [default = 4096];
@@ -339,15 +400,15 @@
 
   // Only present in major version = 1. Partition metadata used to validate the
   // update. For major version = 2 see the |partitions| field.
-  optional PartitionInfo old_kernel_info = 6;
-  optional PartitionInfo new_kernel_info = 7;
-  optional PartitionInfo old_rootfs_info = 8;
-  optional PartitionInfo new_rootfs_info = 9;
+  optional PartitionInfo old_kernel_info = 6 [deprecated = true];
+  optional PartitionInfo new_kernel_info = 7 [deprecated = true];
+  optional PartitionInfo old_rootfs_info = 8 [deprecated = true];
+  optional PartitionInfo new_rootfs_info = 9 [deprecated = true];
 
   // old_image_info will only be present for delta images.
-  optional ImageInfo old_image_info = 10;
+  optional ImageInfo old_image_info = 10 [deprecated = true];
 
-  optional ImageInfo new_image_info = 11;
+  optional ImageInfo new_image_info = 11 [deprecated = true];
 
   // The minor version, also referred as "delta version", of the payload.
   // Minor version 0 is full payload, everything else is delta payload.
@@ -367,4 +428,11 @@
 
   // Metadata related to all dynamic partitions.
   optional DynamicPartitionMetadata dynamic_partition_metadata = 15;
+
+  // If the payload only updates a subset of partitions on the device.
+  optional bool partial_update = 16;
+
+  // Information on compressed APEX to figure out how much space is required for
+  // their decompression
+  repeated ApexInfo apex_info = 17;
 }
diff --git a/update_status_utils.cc b/update_status_utils.cc
index 11fd299..a702c61 100644
--- a/update_status_utils.cc
+++ b/update_status_utils.cc
@@ -16,12 +16,32 @@
 #include "update_engine/update_status_utils.h"
 
 #include <base/logging.h>
+#include <base/strings/string_number_conversions.h>
+#include <brillo/key_value_store.h>
 #include <update_engine/dbus-constants.h>
 
+using brillo::KeyValueStore;
+using std::string;
+using update_engine::UpdateEngineStatus;
 using update_engine::UpdateStatus;
 
 namespace chromeos_update_engine {
 
+namespace {
+
+// Note: Do not change these, autotest depends on these string variables being
+// exactly these matches.
+const char kCurrentOp[] = "CURRENT_OP";
+const char kIsInstall[] = "IS_INSTALL";
+const char kIsEnterpriseRollback[] = "IS_ENTERPRISE_ROLLBACK";
+const char kLastCheckedTime[] = "LAST_CHECKED_TIME";
+const char kNewSize[] = "NEW_SIZE";
+const char kNewVersion[] = "NEW_VERSION";
+const char kProgress[] = "PROGRESS";
+const char kWillPowerwashAfterReboot[] = "WILL_POWERWASH_AFTER_REBOOT";
+
+}  // namespace
+
 const char* UpdateStatusToString(const UpdateStatus& status) {
   switch (status) {
     case UpdateStatus::IDLE:
@@ -54,45 +74,23 @@
   return nullptr;
 }
 
-bool StringToUpdateStatus(const std::string& s, UpdateStatus* status) {
-  if (s == update_engine::kUpdateStatusIdle) {
-    *status = UpdateStatus::IDLE;
-    return true;
-  } else if (s == update_engine::kUpdateStatusCheckingForUpdate) {
-    *status = UpdateStatus::CHECKING_FOR_UPDATE;
-    return true;
-  } else if (s == update_engine::kUpdateStatusUpdateAvailable) {
-    *status = UpdateStatus::UPDATE_AVAILABLE;
-    return true;
-  } else if (s == update_engine::kUpdateStatusNeedPermissionToUpdate) {
-    *status = UpdateStatus::NEED_PERMISSION_TO_UPDATE;
-    return true;
-  } else if (s == update_engine::kUpdateStatusDownloading) {
-    *status = UpdateStatus::DOWNLOADING;
-    return true;
-  } else if (s == update_engine::kUpdateStatusVerifying) {
-    *status = UpdateStatus::VERIFYING;
-    return true;
-  } else if (s == update_engine::kUpdateStatusFinalizing) {
-    *status = UpdateStatus::FINALIZING;
-    return true;
-  } else if (s == update_engine::kUpdateStatusUpdatedNeedReboot) {
-    *status = UpdateStatus::UPDATED_NEED_REBOOT;
-    return true;
-  } else if (s == update_engine::kUpdateStatusReportingErrorEvent) {
-    *status = UpdateStatus::REPORTING_ERROR_EVENT;
-    return true;
-  } else if (s == update_engine::kUpdateStatusAttemptingRollback) {
-    *status = UpdateStatus::ATTEMPTING_ROLLBACK;
-    return true;
-  } else if (s == update_engine::kUpdateStatusDisabled) {
-    *status = UpdateStatus::DISABLED;
-    return true;
-  } else if (s == update_engine::kUpdateStatusCleanupPreviousUpdate) {
-    *status = UpdateStatus::CLEANUP_PREVIOUS_UPDATE;
-    return true;
-  }
-  return false;
+string UpdateEngineStatusToString(const UpdateEngineStatus& status) {
+  KeyValueStore key_value_store;
+
+  key_value_store.SetString(kLastCheckedTime,
+                            base::NumberToString(status.last_checked_time));
+  key_value_store.SetString(kProgress, base::NumberToString(status.progress));
+  key_value_store.SetString(kNewSize,
+                            base::NumberToString(status.new_size_bytes));
+  key_value_store.SetString(kCurrentOp, UpdateStatusToString(status.status));
+  key_value_store.SetString(kNewVersion, status.new_version);
+  key_value_store.SetBoolean(kIsEnterpriseRollback,
+                             status.is_enterprise_rollback);
+  key_value_store.SetBoolean(kIsInstall, status.is_install);
+  key_value_store.SetBoolean(kWillPowerwashAfterReboot,
+                             status.will_powerwash_after_reboot);
+
+  return key_value_store.SaveToString();
 }
 
 }  // namespace chromeos_update_engine
diff --git a/update_status_utils.h b/update_status_utils.h
index 30ae53b..1e3fdde 100644
--- a/update_status_utils.h
+++ b/update_status_utils.h
@@ -25,8 +25,8 @@
 
 const char* UpdateStatusToString(const update_engine::UpdateStatus& status);
 
-bool StringToUpdateStatus(const std::string& update_status_as_string,
-                          update_engine::UpdateStatus* status);
+std::string UpdateEngineStatusToString(
+    const update_engine::UpdateEngineStatus& status);
 
 }  // namespace chromeos_update_engine
 
diff --git a/update_status_utils_unittest.cc b/update_status_utils_unittest.cc
new file mode 100644
index 0000000..228201c
--- /dev/null
+++ b/update_status_utils_unittest.cc
@@ -0,0 +1,53 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_status_utils.h"
+
+#include <string>
+
+#include <gtest/gtest.h>
+
+using std::string;
+
+namespace chromeos_update_engine {
+
+TEST(UpdateStatusUtilsTest, UpdateEngineStatusToStringTest) {
+  // Keep field assignments in same order as they were declared,
+  // to prevent compiler warning, -Wreorder-init-fields.
+  update_engine::UpdateEngineStatus update_engine_status = {
+      .last_checked_time = 156000000,
+      .status = update_engine::UpdateStatus::CHECKING_FOR_UPDATE,
+      .progress = 0.5,
+      .new_size_bytes = 888,
+      .new_version = "12345.0.0",
+      .is_enterprise_rollback = true,
+      .is_install = true,
+      .will_powerwash_after_reboot = true,
+  };
+  string print =
+      R"(CURRENT_OP=UPDATE_STATUS_CHECKING_FOR_UPDATE
+IS_ENTERPRISE_ROLLBACK=true
+IS_INSTALL=true
+LAST_CHECKED_TIME=156000000
+NEW_SIZE=888
+NEW_VERSION=12345.0.0
+PROGRESS=0.5
+WILL_POWERWASH_AFTER_REBOOT=true
+)";
+  EXPECT_EQ(print, UpdateEngineStatusToString(update_engine_status));
+}
+
+}  // namespace chromeos_update_engine