Snap for 8558685 from 07b7186c9f0be06c4924c89366df60de1ae98c26 to tm-frc-ipsec-release

Change-Id: I117af082dd06f4fca0353ae0753ee92a72b3bc7a
diff --git a/Android.bp b/Android.bp
index 5f769ee..cace5b6 100644
--- a/Android.bp
+++ b/Android.bp
@@ -821,6 +821,15 @@
 }
 
 filegroup {
+    name: "update_engine_host_unittest_timeout_srcs",
+    srcs: [
+        "common/action_processor_unittest.cc",
+        "common/file_fetcher_unittest.cc",
+        "payload_generator/delta_diff_utils_unittest.cc",
+    ],
+}
+
+filegroup {
     name: "update_engine_host_unittest_srcs",
     srcs: [
         "common/action_pipe_unittest.cc",
@@ -860,6 +869,8 @@
         "payload_generator/payload_signer_unittest.cc",
         "payload_generator/squashfs_filesystem_unittest.cc",
         "payload_generator/zip_unittest.cc",
+        "payload_consumer/verity_writer_android_unittest.cc",
+        "payload_consumer/xz_extent_writer_unittest.cc",
         "testrunner.cc",
     ],
 }
@@ -876,6 +887,7 @@
     cflags: [
         "-g3",
     ],
+    tidy_timeout_srcs: [":update_engine_host_unittest_timeout_srcs"],
     srcs: [":update_engine_host_unittest_srcs"],
     data: [
         ":ue_unittest_delta_generator",
@@ -890,7 +902,6 @@
         "update_engine.conf",
     ],
     static_libs: [
-        "libcurl",
         "libgmock",
         "libpayload_generator",
     ],
@@ -900,6 +911,82 @@
 // ========================================================
 // Main unittest file.
 cc_test {
+    name: "update_engine_http_unittests",
+    defaults: [
+        "ue_defaults",
+        "liblz4diff_defaults",
+        "update_metadata-protos_exports",
+    ],
+    require_root: true,
+    static_libs: [
+        "libbase",
+        "libbrillo-test-helpers",
+        "libchrome_test_helpers",
+        "libcurl",
+        "libcutils",
+        "libdm",
+        "libgmock",
+        "libz",
+    ],
+    shared_libs: [
+        "libssl",
+        "libcrypto",
+        "libziparchive",
+        "liblog",
+    ],
+
+    data: [
+        ":test_http_server",
+        ":test_subprocess",
+        ":ue_unittest_keys",
+        "otacerts.zip",
+        "unittest_key.pem",
+        "unittest_key2.pem",
+        "unittest_key_RSA4096.pem",
+        "unittest_key_EC.pem",
+    ],
+
+    // We cannot use the default generated AndroidTest.xml because of the use of helper modules
+    // (i.e. test_http_server, test_subprocess, ue_unittest_delta_generator).
+    // test_config: "test_config.xml",
+    test_suites: ["device-tests"],
+
+    srcs: [
+        "aosp/platform_constants_android.cc",
+        "certificate_checker.cc",
+        "common/action_processor.cc",
+        "common/boot_control_stub.cc",
+        "common/error_code_utils.cc",
+        "common/file_fetcher.cc",
+        "common/hash_calculator.cc",
+        "common/http_fetcher.cc",
+        "common/multi_range_http_fetcher.cc",
+        "common/http_common.cc",
+        "common/subprocess.cc",
+        "common/test_utils.cc",
+        "common/utils.cc",
+        "common/proxy_resolver.cc",
+        "libcurl_http_fetcher.cc",
+        "payload_consumer/certificate_parser_android.cc",
+        "payload_consumer/payload_verifier.cc",
+        "payload_generator/payload_signer.cc",
+        "update_status_utils.cc",
+
+        "certificate_checker_unittest.cc",
+        "common/http_fetcher_unittest.cc",
+        "common/mock_http_fetcher.cc",
+        "common/proxy_resolver_unittest.cc",
+        "common/subprocess_unittest.cc",
+        "libcurl_http_fetcher_unittest.cc",
+        "payload_consumer/certificate_parser_android_unittest.cc",
+        "update_status_utils_unittest.cc",
+    ],
+}
+
+// update_engine_unittests (type: executable)
+// ========================================================
+// Main unittest file.
+cc_test {
     name: "update_engine_unittests",
     defaults: [
         "ue_defaults",
@@ -921,8 +1008,6 @@
     ],
 
     data: [
-        ":test_http_server",
-        ":test_subprocess",
         ":ue_unittest_delta_generator",
         ":ue_unittest_disk_imgs",
         ":ue_unittest_erofs_imgs",
@@ -940,6 +1025,13 @@
     test_config: "test_config.xml",
     test_suites: ["device-tests"],
 
+    tidy_timeout_srcs: [
+        ":update_engine_host_unittest_timeout_srcs",
+        "aosp/dynamic_partition_control_android_unittest.cc",
+        "common/http_fetcher_unittest.cc",
+        "payload_consumer/delta_performer_integration_test.cc",
+        "payload_consumer/delta_performer_unittest.cc",
+    ],
     srcs: [
         ":update_engine_host_unittest_srcs",
         "aosp/apex_handler_android_unittest.cc",
@@ -947,17 +1039,11 @@
         "aosp/dynamic_partition_control_android_unittest.cc",
         "aosp/update_attempter_android_integration_test.cc",
         "aosp/update_attempter_android_unittest.cc",
-        "certificate_checker_unittest.cc",
-        "common/http_fetcher_unittest.cc",
-        "common/proxy_resolver_unittest.cc",
-        "common/subprocess_unittest.cc",
         "common/utils_unittest.cc",
         "download_action_android_unittest.cc",
-        "libcurl_http_fetcher_unittest.cc",
         "payload_consumer/bzip_extent_writer_unittest.cc",
         "payload_consumer/cached_file_descriptor_unittest.cc",
         "payload_consumer/cow_writer_file_descriptor_unittest.cc",
-        "payload_consumer/certificate_parser_android_unittest.cc",
         "payload_consumer/delta_performer_integration_test.cc",
         "payload_consumer/delta_performer_unittest.cc",
         "payload_consumer/extent_reader_unittest.cc",
@@ -974,10 +1060,7 @@
         "payload_consumer/postinstall_runner_action_unittest.cc",
         "payload_consumer/snapshot_extent_writer_unittest.cc",
         "payload_consumer/vabc_partition_writer_unittest.cc",
-        "payload_consumer/verity_writer_android_unittest.cc",
         "payload_consumer/xor_extent_writer_unittest.cc",
-        "payload_consumer/xz_extent_writer_unittest.cc",
-        "update_status_utils_unittest.cc",
     ],
 }
 
@@ -1065,3 +1148,25 @@
         export_proto_headers: true,
     },
 }
+
+cc_binary_host {
+    name: "ota_extractor",
+    defaults: [
+        "ue_defaults",
+        "libpayload_consumer_exports",
+    ],
+    srcs: [
+        "aosp/ota_extractor.cc",
+    ],
+    static_libs: [
+        "liblog",
+        "libbrotli",
+        "libbase",
+        "libpayload_consumer",
+        "libpayload_extent_ranges",
+        "libpayload_extent_utils",
+        "libz",
+        "libgflags",
+        "update_metadata-protos",
+    ],
+}
diff --git a/TEST_MAPPING b/TEST_MAPPING
new file mode 100644
index 0000000..3a9a238
--- /dev/null
+++ b/TEST_MAPPING
@@ -0,0 +1,10 @@
+{
+  "presubmit": [
+    {
+      "name": "update_engine_unittests"
+    },
+    {
+      "name": "update_engine_http_unittests"
+    }
+  ]
+}
diff --git a/aosp/dynamic_partition_control_android.cc b/aosp/dynamic_partition_control_android.cc
index 6d33a09..27d1d54 100644
--- a/aosp/dynamic_partition_control_android.cc
+++ b/aosp/dynamic_partition_control_android.cc
@@ -82,6 +82,8 @@
 constexpr char kVirtualAbRetrofit[] = "ro.virtual_ab.retrofit";
 constexpr char kVirtualAbCompressionEnabled[] =
     "ro.virtual_ab.compression.enabled";
+constexpr auto&& kVirtualAbCompressionXorEnabled =
+    "ro.virtual_ab.compression.xor.enabled";
 
 // Currently, android doesn't have a retrofit prop for VAB Compression. However,
 // struct FeatureFlag forces us to determine if a feature is 'retrofit'. So this
@@ -126,6 +128,8 @@
       virtual_ab_(GetFeatureFlag(kVirtualAbEnabled, kVirtualAbRetrofit)),
       virtual_ab_compression_(GetFeatureFlag(kVirtualAbCompressionEnabled,
                                              kVirtualAbCompressionRetrofit)),
+      virtual_ab_compression_xor_(
+          GetFeatureFlag(kVirtualAbCompressionXorEnabled, "")),
       source_slot_(source_slot) {
   if (GetVirtualAbFeatureFlag().IsEnabled()) {
     snapshot_ = SnapshotManager::New();
@@ -152,6 +156,11 @@
   return virtual_ab_compression_;
 }
 
+FeatureFlag
+DynamicPartitionControlAndroid::GetVirtualAbCompressionXorFeatureFlag() {
+  return virtual_ab_compression_xor_;
+}
+
 bool DynamicPartitionControlAndroid::OptimizeOperation(
     const std::string& partition_name,
     const InstallOperation& operation,
@@ -465,6 +474,9 @@
   if (!SetTargetBuildVars(manifest)) {
     return false;
   }
+  for (auto& list : dynamic_partition_list_) {
+    list.clear();
+  }
 
   // Although the current build supports dynamic partitions, the given payload
   // doesn't use it for target partitions. This could happen when applying a
@@ -1280,6 +1292,9 @@
   if (!GetVirtualAbFeatureFlag().IsEnabled()) {
     return true;
   }
+  for (auto& list : dynamic_partition_list_) {
+    list.clear();
+  }
 
   LOG(INFO) << __func__ << " resetting update state and deleting snapshots.";
   TEST_AND_RETURN_FALSE(prefs != nullptr);
diff --git a/aosp/dynamic_partition_control_android.h b/aosp/dynamic_partition_control_android.h
index cebca07..92761d2 100644
--- a/aosp/dynamic_partition_control_android.h
+++ b/aosp/dynamic_partition_control_android.h
@@ -21,7 +21,7 @@
 #include <set>
 #include <string>
 #include <string_view>
-#include <vector>
+#include <array>
 
 #include <base/files/file_util.h>
 #include <libsnapshot/auto_device.h>
@@ -44,6 +44,7 @@
   FeatureFlag GetDynamicPartitionsFeatureFlag() override;
   FeatureFlag GetVirtualAbFeatureFlag() override;
   FeatureFlag GetVirtualAbCompressionFeatureFlag() override;
+  FeatureFlag GetVirtualAbCompressionXorFeatureFlag() override;
   bool OptimizeOperation(const std::string& partition_name,
                          const InstallOperation& operation,
                          InstallOperation* optimized) override;
@@ -339,6 +340,7 @@
   const FeatureFlag dynamic_partitions_;
   const FeatureFlag virtual_ab_;
   const FeatureFlag virtual_ab_compression_;
+  const FeatureFlag virtual_ab_compression_xor_;
   std::unique_ptr<android::snapshot::ISnapshotManager> snapshot_;
   std::unique_ptr<android::snapshot::AutoDevice> metadata_device_;
   bool target_supports_snapshot_ = false;
@@ -348,7 +350,9 @@
 
   uint32_t source_slot_ = UINT32_MAX;
   uint32_t target_slot_ = UINT32_MAX;
-  std::vector<std::vector<std::string>> dynamic_partition_list_{2UL};
+  // We assume that there's only 2 slots, A and B. This assumption is unlikely
+  // to change in the future. And certaintly won't change at runtime.
+  std::array<std::vector<std::string>, 2> dynamic_partition_list_{};
 
   DISALLOW_COPY_AND_ASSIGN(DynamicPartitionControlAndroid);
 };
diff --git a/aosp/mock_dynamic_partition_control_android.h b/aosp/mock_dynamic_partition_control_android.h
index 33ef39c..f55cdf7 100644
--- a/aosp/mock_dynamic_partition_control_android.h
+++ b/aosp/mock_dynamic_partition_control_android.h
@@ -73,6 +73,10 @@
   MOCK_METHOD(std::string, GetSuperPartitionName, (uint32_t), (override));
   MOCK_METHOD(FeatureFlag, GetVirtualAbFeatureFlag, (), (override));
   MOCK_METHOD(FeatureFlag, GetVirtualAbCompressionFeatureFlag, (), (override));
+  MOCK_METHOD(FeatureFlag,
+              GetVirtualAbCompressionXorFeatureFlag,
+              (),
+              (override));
   MOCK_METHOD(bool, FinishUpdate, (bool), (override));
   MOCK_METHOD(bool,
               GetSystemOtherPath,
diff --git a/aosp/ota_extractor.cc b/aosp/ota_extractor.cc
new file mode 100644
index 0000000..4a57370
--- /dev/null
+++ b/aosp/ota_extractor.cc
@@ -0,0 +1,280 @@
+//
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <array>
+#include <cstdint>
+#include <cstdio>
+#include <iterator>
+#include <memory>
+
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+
+#include <android-base/strings.h>
+#include <base/files/file_path.h>
+#include <gflags/gflags.h>
+#include <unistd.h>
+#include <xz.h>
+
+#include "update_engine/common/utils.h"
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
+#include "update_engine/payload_consumer/file_descriptor_utils.h"
+#include "update_engine/payload_consumer/install_operation_executor.h"
+#include "update_engine/payload_consumer/payload_metadata.h"
+#include "update_engine/payload_consumer/verity_writer_android.h"
+#include "update_engine/update_metadata.pb.h"
+
+DEFINE_string(payload, "", "Path to payload.bin");
+DEFINE_string(
+    input_dir,
+    "",
+    "Directory to read input images. Only required for incremental OTAs");
+DEFINE_string(output_dir, "", "Directory to put output images");
+DEFINE_int64(payload_offset,
+             0,
+             "Offset to start of payload.bin. Useful if payload path actually "
+             "points to a .zip file containing payload.bin");
+DEFINE_string(partitions,
+              "",
+              "Comma separated list of partitions to extract, leave empty for "
+              "extracting all partitions");
+
+using chromeos_update_engine::DeltaArchiveManifest;
+using chromeos_update_engine::PayloadMetadata;
+
+namespace chromeos_update_engine {
+
+void WriteVerity(const PartitionUpdate& partition,
+                 FileDescriptorPtr fd,
+                 const size_t block_size) {
+  // 512KB buffer, arbitrary value. Larger buffers may improve performance.
+  static constexpr size_t BUFFER_SIZE = 1024 * 512;
+  if (partition.hash_tree_extent().num_blocks() == 0 &&
+      partition.fec_extent().num_blocks() == 0) {
+    return;
+  }
+  InstallPlan::Partition install_part;
+  install_part.block_size = block_size;
+  CHECK(install_part.ParseVerityConfig(partition));
+  VerityWriterAndroid writer;
+  CHECK(writer.Init(install_part));
+  std::array<uint8_t, BUFFER_SIZE> buffer;
+  const auto data_size =
+      install_part.hash_tree_data_offset + install_part.hash_tree_data_size;
+  size_t offset = 0;
+  while (offset < data_size) {
+    const auto bytes_to_read =
+        static_cast<ssize_t>(std::min(BUFFER_SIZE, data_size - offset));
+    ssize_t bytes_read;
+    CHECK(
+        utils::ReadAll(fd, buffer.data(), bytes_to_read, offset, &bytes_read));
+    CHECK_EQ(bytes_read, bytes_to_read)
+        << " Failed to read at offset " << offset << " "
+        << android::base::ErrnoNumberAsString(errno);
+    writer.Update(offset, buffer.data(), bytes_read);
+    offset += bytes_read;
+  }
+  CHECK(writer.Finalize(fd.get(), fd.get()));
+  return;
+}
+
+bool ExtractImagesFromOTA(const DeltaArchiveManifest& manifest,
+                          const PayloadMetadata& metadata,
+                          int payload_fd,
+                          size_t payload_offset,
+                          std::string_view input_dir,
+                          std::string_view output_dir,
+                          const std::set<std::string>& partitions) {
+  InstallOperationExecutor executor(manifest.block_size());
+  const size_t data_begin = metadata.GetMetadataSize() +
+                            metadata.GetMetadataSignatureSize() +
+                            payload_offset;
+  const base::FilePath output_dir_path(
+      base::StringPiece(output_dir.data(), output_dir.size()));
+  const base::FilePath input_dir_path(
+      base::StringPiece(input_dir.data(), input_dir.size()));
+  std::vector<unsigned char> blob;
+  for (const auto& partition : manifest.partitions()) {
+    if (!partitions.empty() &&
+        partitions.count(partition.partition_name()) == 0) {
+      continue;
+    }
+    LOG(INFO) << "Extracting partition " << partition.partition_name()
+              << " size: " << partition.new_partition_info().size();
+    const auto output_path =
+        output_dir_path.Append(partition.partition_name() + ".img").value();
+    auto out_fd =
+        std::make_shared<chromeos_update_engine::EintrSafeFileDescriptor>();
+    TEST_AND_RETURN_FALSE_ERRNO(
+        out_fd->Open(output_path.c_str(), O_RDWR | O_CREAT, 0644));
+    auto in_fd =
+        std::make_shared<chromeos_update_engine::EintrSafeFileDescriptor>();
+    if (partition.has_old_partition_info()) {
+      const auto input_path =
+          input_dir_path.Append(partition.partition_name() + ".img").value();
+      LOG(INFO) << "Incremental OTA detected for partition "
+                << partition.partition_name() << " opening source image "
+                << input_path;
+      CHECK(in_fd->Open(input_path.c_str(), O_RDONLY))
+          << " failed to open " << input_path;
+    }
+
+    for (const auto& op : partition.operations()) {
+      if (op.has_src_sha256_hash()) {
+        brillo::Blob actual_hash;
+        TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
+            in_fd, op.src_extents(), manifest.block_size(), &actual_hash));
+        CHECK_EQ(HexEncode(ToStringView(actual_hash)),
+                 HexEncode(op.src_sha256_hash()));
+      }
+
+      blob.resize(op.data_length());
+      const auto op_data_offset = data_begin + op.data_offset();
+      ssize_t bytes_read = 0;
+      TEST_AND_RETURN_FALSE(utils::PReadAll(
+          payload_fd, blob.data(), blob.size(), op_data_offset, &bytes_read));
+      if (op.has_data_sha256_hash()) {
+        brillo::Blob actual_hash;
+        TEST_AND_RETURN_FALSE(
+            HashCalculator::RawHashOfData(blob, &actual_hash));
+        CHECK_EQ(HexEncode(ToStringView(actual_hash)),
+                 HexEncode(op.data_sha256_hash()));
+      }
+      auto direct_writer = std::make_unique<DirectExtentWriter>(out_fd);
+      if (op.type() == InstallOperation::ZERO) {
+        TEST_AND_RETURN_FALSE(executor.ExecuteZeroOrDiscardOperation(
+            op, std::move(direct_writer)));
+      } else if (op.type() == InstallOperation::REPLACE ||
+                 op.type() == InstallOperation::REPLACE_BZ ||
+                 op.type() == InstallOperation::REPLACE_XZ) {
+        TEST_AND_RETURN_FALSE(executor.ExecuteReplaceOperation(
+            op, std::move(direct_writer), blob.data(), blob.size()));
+      } else if (op.type() == InstallOperation::SOURCE_COPY) {
+        CHECK(in_fd->IsOpen());
+        TEST_AND_RETURN_FALSE(executor.ExecuteSourceCopyOperation(
+            op, std::move(direct_writer), in_fd));
+      } else {
+        CHECK(in_fd->IsOpen());
+        TEST_AND_RETURN_FALSE(executor.ExecuteDiffOperation(
+            op, std::move(direct_writer), in_fd, blob.data(), blob.size()));
+      }
+    }
+    WriteVerity(partition, out_fd, manifest.block_size());
+    int err =
+        truncate64(output_path.c_str(), partition.new_partition_info().size());
+    if (err) {
+      PLOG(ERROR) << "Failed to truncate " << output_path << " to "
+                  << partition.new_partition_info().size();
+    }
+    brillo::Blob actual_hash;
+    TEST_AND_RETURN_FALSE(
+        HashCalculator::RawHashOfFile(output_path, &actual_hash));
+    CHECK_EQ(HexEncode(ToStringView(actual_hash)),
+             HexEncode(partition.new_partition_info().hash()))
+        << " Partition " << partition.partition_name()
+        << " hash mismatches. Either the source image or OTA package is "
+           "corrupted.";
+  }
+  return true;
+}
+
+}  // namespace chromeos_update_engine
+
+namespace {
+
+bool IsIncrementalOTA(const DeltaArchiveManifest& manifest) {
+  for (const auto& part : manifest.partitions()) {
+    if (part.has_old_partition_info()) {
+      return true;
+    }
+  }
+  return false;
+}
+
+}  // namespace
+
+int main(int argc, char* argv[]) {
+  gflags::SetUsageMessage(
+      "A tool to extract device images from Android OTA packages");
+  gflags::ParseCommandLineFlags(&argc, &argv, true);
+  xz_crc32_init();
+  auto tokens = android::base::Tokenize(FLAGS_partitions, ",");
+  const std::set<std::string> partitions(
+      std::make_move_iterator(tokens.begin()),
+      std::make_move_iterator(tokens.end()));
+  if (FLAGS_payload.empty()) {
+    LOG(ERROR) << "--payload <payload path> is required";
+    return 1;
+  }
+  if (!partitions.empty()) {
+    LOG(INFO) << "Extracting " << android::base::Join(partitions, ", ");
+  }
+  int payload_fd = open(FLAGS_payload.c_str(), O_RDONLY | O_CLOEXEC);
+  if (payload_fd < 0) {
+    PLOG(ERROR) << "Failed to open payload file";
+    return 1;
+  }
+  chromeos_update_engine::ScopedFdCloser closer{&payload_fd};
+  auto payload_size = chromeos_update_engine::utils::FileSize(payload_fd);
+  if (payload_size <= 0) {
+    PLOG(ERROR)
+        << "Couldn't determine size of payload file, or payload file is empty";
+    return 1;
+  }
+
+  PayloadMetadata payload_metadata;
+  auto payload = static_cast<unsigned char*>(
+      mmap(nullptr, payload_size, PROT_READ, MAP_PRIVATE, payload_fd, 0));
+
+  if (payload == MAP_FAILED) {
+    PLOG(ERROR) << "Failed to mmap() payload file";
+    return 1;
+  }
+
+  auto munmap_deleter = [payload_size](auto payload) {
+    munmap(payload, payload_size);
+  };
+  std::unique_ptr<unsigned char, decltype(munmap_deleter)> munmapper{
+      payload, munmap_deleter};
+  if (payload_metadata.ParsePayloadHeader(payload + FLAGS_payload_offset,
+                                          payload_size - FLAGS_payload_offset,
+                                          nullptr) !=
+      chromeos_update_engine::MetadataParseResult::kSuccess) {
+    LOG(ERROR) << "Payload header parse failed!";
+    return 1;
+  }
+  DeltaArchiveManifest manifest;
+  if (!payload_metadata.GetManifest(payload + FLAGS_payload_offset,
+                                    payload_size - FLAGS_payload_offset,
+                                    &manifest)) {
+    LOG(ERROR) << "Failed to parse manifest!";
+    return 1;
+  }
+  if (IsIncrementalOTA(manifest) && FLAGS_input_dir.empty()) {
+    LOG(ERROR) << FLAGS_payload
+               << " is an incremental OTA, --input_dir parameter is required.";
+    return 1;
+  }
+  return !ExtractImagesFromOTA(manifest,
+                               payload_metadata,
+                               payload_fd,
+                               FLAGS_payload_offset,
+                               FLAGS_input_dir,
+                               FLAGS_output_dir,
+                               partitions);
+}
diff --git a/aosp/update_attempter_android.cc b/aosp/update_attempter_android.cc
index 4e609d4..a3485ea 100644
--- a/aosp/update_attempter_android.cc
+++ b/aosp/update_attempter_android.cc
@@ -400,6 +400,10 @@
 bool UpdateAttempterAndroid::ResetStatus(brillo::ErrorPtr* error) {
   LOG(INFO) << "Attempting to reset state from "
             << UpdateStatusToString(status_) << " to UpdateStatus::IDLE";
+  if (processor_->IsRunning()) {
+    return LogAndSetError(
+        error, FROM_HERE, "Already processing an update, cancel it first.");
+  }
 
   if (apex_handler_android_ != nullptr) {
     LOG(INFO) << "Cleaning up reserved space for compressed APEX (if any)";
@@ -416,12 +420,12 @@
                           "ClearUpdateCompletedMarker() failed");
   }
 
+  if (!boot_control_->GetDynamicPartitionControl()->ResetUpdate(prefs_)) {
+    LOG(WARNING) << "Failed to reset snapshots. UpdateStatus is IDLE but"
+                  << "space might not be freed.";
+  }
   switch (status_) {
     case UpdateStatus::IDLE: {
-      if (!boot_control_->GetDynamicPartitionControl()->ResetUpdate(prefs_)) {
-        LOG(WARNING) << "Failed to reset snapshots. UpdateStatus is IDLE but"
-                     << "space might not be freed.";
-      }
       return true;
     }
 
diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h
index e6ebe6a..2c01b1a 100644
--- a/common/dynamic_partition_control_interface.h
+++ b/common/dynamic_partition_control_interface.h
@@ -73,6 +73,8 @@
   // DOES NOT tell you if VABC is used for current OTA update. For that, use
   // UpdateUsesSnapshotCompression.
   virtual FeatureFlag GetVirtualAbCompressionFeatureFlag() = 0;
+  // Return the feature flag for Virtual AB Compression XOR
+  virtual FeatureFlag GetVirtualAbCompressionXorFeatureFlag() = 0;
 
   // Attempt to optimize |operation|.
   // If successful, |optimized| contains an operation with extents that
diff --git a/common/dynamic_partition_control_stub.cc b/common/dynamic_partition_control_stub.cc
index dd30a8b..6283b1d 100644
--- a/common/dynamic_partition_control_stub.cc
+++ b/common/dynamic_partition_control_stub.cc
@@ -38,6 +38,10 @@
   return FeatureFlag(FeatureFlag::Value::NONE);
 }
 
+FeatureFlag DynamicPartitionControlStub::GetVirtualAbCompressionXorFeatureFlag() {
+  return FeatureFlag(FeatureFlag::Value::NONE);
+}
+
 bool DynamicPartitionControlStub::OptimizeOperation(
     const std::string& partition_name,
     const InstallOperation& operation,
diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h
index 5aa4336..15137d2 100644
--- a/common/dynamic_partition_control_stub.h
+++ b/common/dynamic_partition_control_stub.h
@@ -27,11 +27,12 @@
 
 namespace chromeos_update_engine {
 
-class DynamicPartitionControlStub : public DynamicPartitionControlInterface {
+class DynamicPartitionControlStub final : public DynamicPartitionControlInterface {
  public:
   FeatureFlag GetDynamicPartitionsFeatureFlag() override;
   FeatureFlag GetVirtualAbFeatureFlag() override;
   FeatureFlag GetVirtualAbCompressionFeatureFlag() override;
+  FeatureFlag GetVirtualAbCompressionXorFeatureFlag() override;
   bool OptimizeOperation(const std::string& partition_name,
                          const InstallOperation& operation,
                          InstallOperation* optimized) override;
diff --git a/common/hash_calculator.cc b/common/hash_calculator.cc
index 60812d5..ea56bea 100644
--- a/common/hash_calculator.cc
+++ b/common/hash_calculator.cc
@@ -125,4 +125,18 @@
   return true;
 }
 
+std::string HashCalculator::SHA256Digest(std::string_view blob) {
+  std::vector<unsigned char> hash;
+  HashCalculator::RawHashOfBytes(blob.data(), blob.size(), &hash);
+  return HexEncode(hash);
+}
+
+std::string HashCalculator::SHA256Digest(std::vector<unsigned char> blob) {
+  return SHA256Digest(ToStringView(blob));
+}
+
+std::string HashCalculator::SHA256Digest(std::vector<char> blob) {
+  return SHA256Digest(ToStringView(blob));
+}
+
 }  // namespace chromeos_update_engine
diff --git a/common/hash_calculator.h b/common/hash_calculator.h
index 4426128..dd7b2e8 100644
--- a/common/hash_calculator.h
+++ b/common/hash_calculator.h
@@ -76,6 +76,10 @@
                              off_t length,
                              brillo::Blob* out_hash);
   static bool RawHashOfFile(const std::string& name, brillo::Blob* out_hash);
+  static std::string SHA256Digest(std::string_view blob);
+
+  static std::string SHA256Digest(std::vector<unsigned char> blob);
+  static std::string SHA256Digest(std::vector<char> blob);
 
  private:
   // If non-empty, the final raw hash. Will only be set to non-empty when
diff --git a/common/mock_dynamic_partition_control.h b/common/mock_dynamic_partition_control.h
index f3a446a..fd0a5a9 100644
--- a/common/mock_dynamic_partition_control.h
+++ b/common/mock_dynamic_partition_control.h
@@ -34,6 +34,10 @@
   MOCK_METHOD(bool, GetDeviceDir, (std::string*), (override));
   MOCK_METHOD(FeatureFlag, GetDynamicPartitionsFeatureFlag, (), (override));
   MOCK_METHOD(FeatureFlag, GetVirtualAbCompressionFeatureFlag, (), (override));
+  MOCK_METHOD(FeatureFlag,
+              GetVirtualAbCompressionXorFeatureFlag,
+              (),
+              (override));
   MOCK_METHOD(FeatureFlag, GetVirtualAbFeatureFlag, (), (override));
   MOCK_METHOD(bool, FinishUpdate, (bool), (override));
   MOCK_METHOD(std::unique_ptr<FileDescriptor>,
diff --git a/common/mock_http_fetcher.h b/common/mock_http_fetcher.h
index ea5b83d..3d7859b 100644
--- a/common/mock_http_fetcher.h
+++ b/common/mock_http_fetcher.h
@@ -36,7 +36,7 @@
 // MockHttpFetcher will send a chunk of data down in each call to BeginTransfer
 // and Unpause. For the other chunks of data, a callback is put on the run
 // loop and when that's called, another chunk is sent down.
-const size_t kMockHttpFetcherChunkSize(65536);
+static constexpr size_t kMockHttpFetcherChunkSize(65536);
 
 class MockHttpFetcher : public HttpFetcher {
  public:
diff --git a/common/utils.cc b/common/utils.cc
index 794b832..0b76eea 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -1062,16 +1062,16 @@
   return base::NumberToString(base::StringPieceHash()(str_to_convert));
 }
 
-static bool ParseTimestamp(const std::string& str, int64_t* out) {
-  if (!base::StringToInt64(str, out)) {
+static bool ParseTimestamp(std::string_view str, int64_t* out) {
+  if (!base::StringToInt64(base::StringPiece(str.data(), str.size()), out)) {
     LOG(WARNING) << "Invalid timestamp: " << str;
     return false;
   }
   return true;
 }
 
-ErrorCode IsTimestampNewer(const std::string& old_version,
-                           const std::string& new_version) {
+ErrorCode IsTimestampNewer(const std::string_view old_version,
+                           const std::string_view new_version) {
   if (old_version.empty() || new_version.empty()) {
     LOG(WARNING)
         << "One of old/new timestamp is empty, permit update anyway. Old: "
@@ -1117,4 +1117,15 @@
   return base::HexEncode(blob.data(), blob.size());
 }
 
+[[nodiscard]] std::string_view ToStringView(
+    const std::vector<unsigned char>& blob) noexcept {
+  return std::string_view{reinterpret_cast<const char*>(blob.data()),
+                          blob.size()};
+}
+
+[[nodiscard]] std::string_view ToStringView(const void* data,
+                                            size_t size) noexcept {
+  return std::string_view(reinterpret_cast<const char*>(data), size);
+}
+
 }  // namespace chromeos_update_engine
diff --git a/common/utils.h b/common/utils.h
index 0f8da22..201e47e 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -411,8 +411,8 @@
 // integer.
 // Return kPayloadTimestampError if both are integers but |new_version| <
 // |old_version|.
-ErrorCode IsTimestampNewer(const std::string& old_version,
-                           const std::string& new_version);
+ErrorCode IsTimestampNewer(const std::string_view old_version,
+                           const std::string_view new_version);
 
 std::unique_ptr<android::base::MappedFile> GetReadonlyZeroBlock(size_t size);
 
@@ -545,6 +545,17 @@
   return base::HexEncode(blob.data(), blob.size());
 }
 
+[[nodiscard]] std::string_view ToStringView(
+    const std::vector<unsigned char>& blob) noexcept;
+
+constexpr std::string_view ToStringView(
+    const std::vector<char>& blob) noexcept {
+  return std::string_view{blob.data(), blob.size()};
+}
+
+[[nodiscard]] std::string_view ToStringView(const void* data,
+                                            size_t size) noexcept;
+
 }  // namespace chromeos_update_engine
 
 #define TEST_AND_RETURN_FALSE_ERRNO(_x)                             \
diff --git a/lz4diff/lz4diff_compress.cc b/lz4diff/lz4diff_compress.cc
index 930954b..ce9082c 100644
--- a/lz4diff/lz4diff_compress.cc
+++ b/lz4diff/lz4diff_compress.cc
@@ -27,6 +27,98 @@
 
 namespace chromeos_update_engine {
 
+bool TryCompressBlob(std::string_view blob,
+                     const std::vector<CompressedBlock>& block_info,
+                     const bool zero_padding_enabled,
+                     const CompressionAlgorithm compression_algo,
+                     const SinkFunc& sink) {
+  size_t uncompressed_size = 0;
+  for (const auto& block : block_info) {
+    CHECK_EQ(uncompressed_size, block.uncompressed_offset)
+        << "Compressed block info is expected to be sorted.";
+    uncompressed_size += block.uncompressed_length;
+  }
+  auto hc = LZ4_createStreamHC();
+  DEFER {
+    if (hc) {
+      LZ4_freeStreamHC(hc);
+      hc = nullptr;
+    }
+  };
+  size_t compressed_offset = 0;
+  Blob block_buffer;
+  for (const auto& block : block_info) {
+    const auto uncompressed_block =
+        blob.substr(block.uncompressed_offset, block.uncompressed_length);
+    if (!block.IsCompressed()) {
+      TEST_EQ(sink(reinterpret_cast<const uint8_t*>(uncompressed_block.data()),
+                   uncompressed_block.size()),
+              uncompressed_block.size());
+      continue;
+    }
+    block_buffer.resize(block.compressed_length);
+    // Execute the increment at end of each loop
+    DEFER {
+      compressed_offset += block.compressed_length;
+      block_buffer.clear();
+    };
+
+    int ret = 0;
+    // LZ4 spec enforces that last op of a compressed block must be an insert op
+    // of at least 5 bytes. Compressors will try to conform to that requirement
+    // if the input size is just right. We don't want that. So always give a
+    // little bit more data.
+    switch (int src_size = uncompressed_size - block.uncompressed_offset;
+            compression_algo.type()) {
+      case CompressionAlgorithm::LZ4HC:
+        ret = LZ4_compress_HC_destSize(
+            hc,
+            uncompressed_block.data(),
+            reinterpret_cast<char*>(block_buffer.data()),
+            &src_size,
+            block.compressed_length,
+            compression_algo.level());
+        break;
+      case CompressionAlgorithm::LZ4:
+        ret =
+            LZ4_compress_destSize(uncompressed_block.data(),
+                                  reinterpret_cast<char*>(block_buffer.data()),
+                                  &src_size,
+                                  block.compressed_length);
+        break;
+      default:
+        LOG(ERROR) << "Unrecognized compression algorithm: "
+                   << compression_algo.type();
+        return {};
+    }
+    TEST_GT(ret, 0);
+    const uint64_t bytes_written = ret;
+    // Last block may have trailing zeros
+    TEST_LE(bytes_written, block.compressed_length);
+    if (bytes_written < block.compressed_length) {
+      if (zero_padding_enabled) {
+        const auto padding = block.compressed_length - bytes_written;
+        std::memmove(
+            block_buffer.data() + padding, block_buffer.data(), bytes_written);
+        std::fill(block_buffer.data(), block_buffer.data() + padding, 0);
+
+      } else {
+        std::fill(block_buffer.data() + bytes_written,
+                  block_buffer.data() + block.compressed_length,
+                  0);
+      }
+    }
+    TEST_EQ(sink(block_buffer.data(), block_buffer.size()),
+            block_buffer.size());
+  }
+  // Any trailing data will be copied to the output buffer.
+  TEST_EQ(
+      sink(reinterpret_cast<const uint8_t*>(blob.data()) + uncompressed_size,
+           blob.size() - uncompressed_size),
+      blob.size() - uncompressed_size);
+  return true;
+}
+
 Blob TryCompressBlob(std::string_view blob,
                      const std::vector<CompressedBlock>& block_info,
                      const bool zero_padding_enabled,
@@ -39,79 +131,20 @@
     uncompressed_size += block.uncompressed_length;
     compressed_size += block.compressed_length;
   }
-  CHECK_EQ(uncompressed_size, blob.size());
-  Blob output(utils::RoundUp(compressed_size, kBlockSize));
-  auto hc = LZ4_createStreamHC();
-  DEFER {
-    if (hc) {
-      LZ4_freeStreamHC(hc);
-      hc = nullptr;
-    }
-  };
-  size_t compressed_offset = 0;
-  for (const auto& block : block_info) {
-    // Execute the increment at end of each loop
-    DEFER { compressed_offset += block.compressed_length; };
-    CHECK_LE(compressed_offset + block.compressed_length, output.size());
-
-    if (!block.IsCompressed()) {
-      std::memcpy(output.data() + compressed_offset,
-                  blob.data() + block.uncompressed_offset,
-                  block.compressed_length);
-      continue;
-    }
-    // LZ4 spec enforces that last op of a compressed block must be an insert op
-    // of at least 5 bytes. Compressors will try to conform to that requirement
-    // if the input size is just right. We don't want that. So always give a
-    // little bit more data.
-    int src_size = uncompressed_size - block.uncompressed_offset;
-    uint64_t bytes_written = 0;
-    switch (compression_algo.type()) {
-      case CompressionAlgorithm::LZ4HC:
-        bytes_written = LZ4_compress_HC_destSize(
-            hc,
-            blob.data() + block.uncompressed_offset,
-            reinterpret_cast<char*>(output.data()) + compressed_offset,
-            &src_size,
-            block.compressed_length,
-            compression_algo.level());
-        break;
-      case CompressionAlgorithm::LZ4:
-        bytes_written = LZ4_compress_destSize(
-            blob.data() + block.uncompressed_offset,
-            reinterpret_cast<char*>(output.data()) + compressed_offset,
-            &src_size,
-            block.compressed_length);
-        break;
-      default:
-        CHECK(false) << "Unrecognized compression algorithm: "
-                     << compression_algo.type();
-        break;
-    }
-    // Last block may have trailing zeros
-    CHECK_LE(bytes_written, block.compressed_length);
-    if (bytes_written < block.compressed_length) {
-      if (zero_padding_enabled) {
-        const auto padding = block.compressed_length - bytes_written;
-        // LOG(INFO) << "Padding: " << padding;
-        CHECK_LE(compressed_offset + padding + bytes_written, output.size());
-        std::memmove(output.data() + compressed_offset + padding,
-                     output.data() + compressed_offset,
-                     bytes_written);
-        CHECK_LE(compressed_offset + padding, output.size());
-        std::fill(output.data() + compressed_offset,
-                  output.data() + compressed_offset + padding,
-                  0);
-
-      } else {
-        std::fill(output.data() + compressed_offset + bytes_written,
-                  output.data() + compressed_offset + block.compressed_length,
-                  0);
-      }
-    }
+  TEST_EQ(uncompressed_size, blob.size());
+  Blob output;
+  output.reserve(utils::RoundUp(compressed_size, kBlockSize));
+  if (!TryCompressBlob(blob,
+                       block_info,
+                       zero_padding_enabled,
+                       compression_algo,
+                       [&output](const uint8_t* data, size_t size) {
+                         output.insert(output.end(), data, data + size);
+                         return size;
+                       })) {
+    return {};
   }
-  // Any trailing data will be copied to the output buffer.
-  output.insert(output.end(), blob.begin() + uncompressed_size, blob.end());
+
   return output;
 }
 
@@ -164,11 +197,6 @@
         block.uncompressed_length,
         block.uncompressed_length);
     if (bytes_decompressed < 0) {
-      Blob cluster_hash;
-      HashCalculator::RawHashOfBytes(
-          cluster.data(), cluster.size(), &cluster_hash);
-      Blob blob_hash;
-      HashCalculator::RawHashOfBytes(blob.data(), blob.size(), &blob_hash);
       LOG(FATAL) << "Failed to decompress, " << bytes_decompressed
                  << ", output_cursor = "
                  << output.size() - block.uncompressed_length
@@ -177,7 +205,8 @@
                  << ", cluster_size = " << block.compressed_length
                  << ", dest capacity = " << block.uncompressed_length
                  << ", input margin = " << inputmargin << " "
-                 << HexEncode(cluster_hash) << " " << HexEncode(blob_hash);
+                 << HashCalculator::SHA256Digest(cluster) << " "
+                 << HashCalculator::SHA256Digest(blob);
       return {};
     }
     compressed_offset += block.compressed_length;
@@ -197,11 +226,6 @@
   return output;
 }
 
-[[nodiscard]] std::string_view ToStringView(const Blob& blob) noexcept {
-  return std::string_view{reinterpret_cast<const char*>(blob.data()),
-                          blob.size()};
-}
-
 Blob TryDecompressBlob(const Blob& blob,
                        const std::vector<CompressedBlock>& block_info,
                        const bool zero_padding_enabled) {
@@ -216,11 +240,6 @@
   return out;
 }
 
-[[nodiscard]] std::string_view ToStringView(const void* data,
-                                            size_t size) noexcept {
-  return std::string_view(reinterpret_cast<const char*>(data), size);
-}
-
 std::ostream& operator<<(std::ostream& out, const CompressedBlockInfo& info) {
   out << "BlockInfo { compressed_length: " << info.compressed_length()
       << ", uncompressed_length: " << info.uncompressed_length()
diff --git a/lz4diff/lz4diff_compress.h b/lz4diff/lz4diff_compress.h
index 7cbb9ac..a1ac8fa 100644
--- a/lz4diff/lz4diff_compress.h
+++ b/lz4diff/lz4diff_compress.h
@@ -18,11 +18,12 @@
 #define UPDATE_ENGINE_LZ4DIFF_LZ4DIFF_COMPRESS_H_
 
 #include "lz4diff_format.h"
-
 #include <string_view>
 
 namespace chromeos_update_engine {
 
+using SinkFunc = std::function<size_t(const uint8_t*, size_t)>;
+
 // |TryCompressBlob| and |TryDecompressBlob| are inverse function of each other.
 // One compresses data into fixed size output chunks, one decompresses fixed
 // size blocks.
@@ -36,6 +37,11 @@
                      const std::vector<CompressedBlock>& block_info,
                      const bool zero_padding_enabled,
                      const CompressionAlgorithm compression_algo);
+bool TryCompressBlob(std::string_view blob,
+                     const std::vector<CompressedBlock>& block_info,
+                     const bool zero_padding_enabled,
+                     const CompressionAlgorithm compression_algo,
+                     const SinkFunc& sink);
 
 Blob TryDecompressBlob(std::string_view blob,
                        const std::vector<CompressedBlock>& block_info,
@@ -44,11 +50,6 @@
                        const std::vector<CompressedBlock>& block_info,
                        const bool zero_padding_enabled);
 
-[[nodiscard]] std::string_view ToStringView(const Blob& blob) noexcept;
-
-[[nodiscard]] std::string_view ToStringView(const void* data,
-                                            size_t size) noexcept;
-
 std::ostream& operator<<(std::ostream& out, const CompressedBlockInfo& info);
 
 std::ostream& operator<<(std::ostream& out, const CompressedBlock& block);
diff --git a/lz4diff/lz4patch.cc b/lz4diff/lz4patch.cc
index 7766e24..9de6d58 100644
--- a/lz4diff/lz4patch.cc
+++ b/lz4diff/lz4patch.cc
@@ -30,6 +30,7 @@
 
 #include "android-base/strings.h"
 #include "lz4diff/lz4diff.h"
+#include "lz4diff/lz4diff.pb.h"
 #include "lz4diff_compress.h"
 #include "lz4diff_format.h"
 #include "puffin/puffpatch.h"
@@ -168,45 +169,6 @@
   return err == 0;
 }
 
-bool ApplyPostfixPatch(
-    std::string_view recompressed_blob,
-    const google::protobuf::RepeatedPtrField<CompressedBlockInfo>&
-        dst_block_info,
-    Blob* output) {
-  // Output size should be always identical to size of recompressed_blob
-  output->clear();
-  output->reserve(recompressed_blob.size());
-  size_t offset = 0;
-  for (const auto& block_info : dst_block_info) {
-    auto block =
-        recompressed_blob.substr(offset, block_info.compressed_length());
-    if (!block_info.sha256_hash().empty()) {
-      Blob actual_hash;
-      CHECK(HashCalculator::RawHashOfBytes(
-          block.data(), block.size(), &actual_hash));
-      if (ToStringView(actual_hash) != block_info.sha256_hash()) {
-        LOG(ERROR) << "Block " << block_info
-                   << " is corrupted. This usually means the patch generator "
-                      "used a different version of LZ4, or an incompatible LZ4 "
-                      "patch generator was used, or LZ4 produces different "
-                      "output on different platforms. Expected hash: "
-                   << HexEncode(block_info.sha256_hash())
-                   << ", actual hash: " << HexEncode(actual_hash);
-      }
-    }
-    if (!block_info.postfix_bspatch().empty()) {
-      Blob fixed_block;
-      TEST_AND_RETURN_FALSE(
-          bspatch(block, block_info.postfix_bspatch(), &fixed_block));
-      output->insert(output->end(), fixed_block.begin(), fixed_block.end());
-    } else {
-      output->insert(output->end(), block.begin(), block.end());
-    }
-    offset += block_info.compressed_length();
-  }
-  return true;
-}
-
 bool puffpatch(std::string_view input_data,
                std::string_view patch_data,
                Blob* output) {
@@ -219,6 +181,7 @@
 std::vector<CompressedBlock> ToCompressedBlockVec(
     const google::protobuf::RepeatedPtrField<CompressedBlockInfo>& rpf) {
   std::vector<CompressedBlock> ret;
+  ret.reserve(rpf.size());
   for (const auto& block : rpf) {
     auto& info = ret.emplace_back();
     info.compressed_length = block.compressed_length();
@@ -237,6 +200,129 @@
   return false;
 }
 
+size_t GetCompressedSize(
+    const google::protobuf::RepeatedPtrField<CompressedBlockInfo>& info) {
+  size_t compressed_size = 0;
+  for (const auto& block : info) {
+    compressed_size += block.compressed_length();
+  }
+  return compressed_size;
+}
+
+size_t GetDecompressedSize(
+    const google::protobuf::RepeatedPtrField<CompressedBlockInfo>& info) {
+  size_t decompressed_size = 0;
+  for (const auto& block : info) {
+    decompressed_size += block.uncompressed_length();
+  }
+  return decompressed_size;
+}
+
+bool ApplyInnerPatch(Blob decompressed_src,
+                     const Lz4diffPatch& patch,
+                     Blob* decompressed_dst) {
+  switch (patch.pb_header.inner_type()) {
+    case InnerPatchType::BSDIFF:
+      TEST_AND_RETURN_FALSE(bspatch(
+          ToStringView(decompressed_src), patch.inner_patch, decompressed_dst));
+      break;
+    case InnerPatchType::PUFFDIFF:
+      TEST_AND_RETURN_FALSE(puffpatch(
+          ToStringView(decompressed_src), patch.inner_patch, decompressed_dst));
+      break;
+    default:
+      LOG(ERROR) << "Unsupported patch type: " << patch.pb_header.inner_type();
+      return false;
+  }
+  return true;
+}
+
+// TODO(zhangkelvin) Rewrite this in C++ 20 coroutine once that's available.
+// Hand coding CPS is not fun.
+bool Lz4Patch(std::string_view src_data,
+              const Lz4diffPatch& patch,
+              const SinkFunc& sink) {
+  auto decompressed_src = TryDecompressBlob(
+      src_data,
+      ToCompressedBlockVec(patch.pb_header.src_info().block_info()),
+      patch.pb_header.src_info().zero_padding_enabled());
+  TEST_AND_RETURN_FALSE(!decompressed_src.empty());
+  Blob decompressed_dst;
+  const auto decompressed_dst_size =
+      GetDecompressedSize(patch.pb_header.dst_info().block_info());
+  decompressed_dst.reserve(decompressed_dst_size);
+
+  ApplyInnerPatch(std::move(decompressed_src), patch, &decompressed_dst);
+
+  if (!HasPosfixPatches(patch)) {
+    return TryCompressBlob(
+        ToStringView(decompressed_dst),
+        ToCompressedBlockVec(patch.pb_header.dst_info().block_info()),
+        patch.pb_header.dst_info().zero_padding_enabled(),
+        patch.pb_header.dst_info().algo(),
+        sink);
+  }
+  auto postfix_patcher =
+      [&sink,
+       block_idx = 0,
+       &dst_block_info = patch.pb_header.dst_info().block_info()](
+          const uint8_t* data, size_t size) mutable -> size_t {
+    if (block_idx >= dst_block_info.size()) {
+      return sink(data, size);
+    }
+    const auto& block_info = dst_block_info[block_idx];
+    TEST_EQ(size, block_info.compressed_length());
+    DEFER { block_idx++; };
+    if (block_info.postfix_bspatch().empty()) {
+      return sink(data, size);
+    }
+    if (!block_info.sha256_hash().empty()) {
+      Blob actual_hash;
+      TEST_AND_RETURN_FALSE(
+          HashCalculator::RawHashOfBytes(data, size, &actual_hash));
+      if (ToStringView(actual_hash) != block_info.sha256_hash()) {
+        LOG(ERROR) << "Block " << block_info
+                   << " is corrupted. This usually means the patch generator "
+                      "used a different version of LZ4, or an incompatible LZ4 "
+                      "patch generator was used, or LZ4 produces different "
+                      "output on different platforms. Expected hash: "
+                   << HexEncode(block_info.sha256_hash())
+                   << ", actual hash: " << HexEncode(actual_hash);
+        return 0;
+      }
+    }
+    Blob fixed_block;
+    TEST_AND_RETURN_FALSE(
+        bspatch(std::string_view(reinterpret_cast<const char*>(data), size),
+                block_info.postfix_bspatch(),
+                &fixed_block));
+    return sink(fixed_block.data(), fixed_block.size());
+  };
+
+  return TryCompressBlob(
+      ToStringView(decompressed_dst),
+      ToCompressedBlockVec(patch.pb_header.dst_info().block_info()),
+      patch.pb_header.dst_info().zero_padding_enabled(),
+      patch.pb_header.dst_info().algo(),
+      postfix_patcher);
+}
+
+bool Lz4Patch(std::string_view src_data,
+              const Lz4diffPatch& patch,
+              Blob* output) {
+  Blob blob;
+  const auto output_size =
+      GetCompressedSize(patch.pb_header.dst_info().block_info());
+  blob.reserve(output_size);
+  TEST_AND_RETURN_FALSE(Lz4Patch(
+      src_data, patch, [&blob](const uint8_t* data, size_t size) -> size_t {
+        blob.insert(blob.end(), data, data + size);
+        return size;
+      }));
+  *output = std::move(blob);
+  return true;
+}
+
 }  // namespace
 
 bool Lz4Patch(std::string_view src_data,
@@ -244,57 +330,15 @@
               Blob* output) {
   Lz4diffPatch patch;
   TEST_AND_RETURN_FALSE(ParseLz4DifffPatch(patch_data, &patch));
+  return Lz4Patch(src_data, patch, output);
+}
 
-  Blob decompressed_dst;
-  // This scope is here just so that |decompressed_src| can be freed earlier
-  // than function scope.
-  // This whole patching algorithm has non-trivial memory usage, as it needs to
-  // load source data in to memory and decompress that. Now both src and
-  // decompressed src data are in memory.
-  // TODO(b/206729162) Make lz4diff more memory efficient and more streaming
-  // friendly.
-  {
-    const auto decompressed_src = TryDecompressBlob(
-        src_data,
-        ToCompressedBlockVec(patch.pb_header.src_info().block_info()),
-        patch.pb_header.src_info().zero_padding_enabled());
-    switch (patch.pb_header.inner_type()) {
-      case InnerPatchType::BSDIFF:
-        TEST_AND_RETURN_FALSE(bspatch(ToStringView(decompressed_src),
-                                      patch.inner_patch,
-                                      &decompressed_dst));
-        break;
-      case InnerPatchType::PUFFDIFF:
-        TEST_AND_RETURN_FALSE(puffpatch(ToStringView(decompressed_src),
-                                        patch.inner_patch,
-                                        &decompressed_dst));
-        break;
-      default:
-        LOG(ERROR) << "Unsupported patch type: "
-                   << patch.pb_header.inner_type();
-        return false;
-    }
-  }
-
-  auto recompressed_dst = TryCompressBlob(
-      ToStringView(decompressed_dst),
-      ToCompressedBlockVec(patch.pb_header.dst_info().block_info()),
-      patch.pb_header.dst_info().zero_padding_enabled(),
-      patch.pb_header.dst_info().algo());
-  TEST_AND_RETURN_FALSE(recompressed_dst.size() > 0);
-  // free memory used by |decompressed_dst|.
-  decompressed_dst = {};
-
-  if (HasPosfixPatches(patch)) {
-    TEST_AND_RETURN_FALSE(
-        ApplyPostfixPatch(ToStringView(recompressed_dst),
-                          patch.pb_header.dst_info().block_info(),
-                          output));
-  } else {
-    *output = std::move(recompressed_dst);
-  }
-
-  return true;
+bool Lz4Patch(std::string_view src_data,
+              std::string_view patch_data,
+              const SinkFunc& sink) {
+  Lz4diffPatch patch;
+  TEST_AND_RETURN_FALSE(ParseLz4DifffPatch(patch_data, &patch));
+  return Lz4Patch(src_data, patch, sink);
 }
 
 bool Lz4Patch(const Blob& src_data, const Blob& patch_data, Blob* output) {
diff --git a/lz4diff/lz4patch.h b/lz4diff/lz4patch.h
index ce49430..8b99c23 100644
--- a/lz4diff/lz4patch.h
+++ b/lz4diff/lz4patch.h
@@ -21,6 +21,11 @@
 #include "lz4diff_format.h"
 
 namespace chromeos_update_engine {
+
+bool Lz4Patch(std::string_view src_data,
+              std::string_view patch_data,
+              const SinkFunc& sink);
+
 bool Lz4Patch(std::string_view src_data,
               std::string_view patch_data,
               Blob* output);
diff --git a/metrics_utils.cc b/metrics_utils.cc
index ade024a..ec35fe2 100644
--- a/metrics_utils.cc
+++ b/metrics_utils.cc
@@ -363,10 +363,12 @@
     return false;
 
   Time system_updated_at = Time::FromInternalValue(stored_value);
-  TimeDelta time_to_reboot = clock->GetMonotonicTime() - system_updated_at;
+  const auto current_time = clock->GetMonotonicTime();
+  TimeDelta time_to_reboot = current_time - system_updated_at;
   if (time_to_reboot.ToInternalValue() < 0) {
-    LOG(ERROR) << "time_to_reboot is negative - system_updated_at: "
-               << utils::ToString(system_updated_at);
+    LOG(WARNING) << "time_to_reboot is negative - system_updated_at: "
+                 << utils::ToString(system_updated_at) << " current time: "
+                 << utils::ToString(current_time);
     return false;
   }
   metrics_reporter->ReportTimeToReboot(time_to_reboot.InMinutes());
diff --git a/payload_consumer/block_extent_writer.cc b/payload_consumer/block_extent_writer.cc
index 6b1fba7..055b485 100644
--- a/payload_consumer/block_extent_writer.cc
+++ b/payload_consumer/block_extent_writer.cc
@@ -42,8 +42,9 @@
 
   if (buffer_.empty() && count >= cur_extent_size) {
     if (!WriteExtent(data, cur_extent, block_size_)) {
-      LOG(ERROR) << "WriteExtent(" << cur_extent.start_block() << ", " << data
-                 << ", " << cur_extent_size << ") failed.";
+      LOG(ERROR) << "WriteExtent(" << cur_extent.start_block() << ", "
+                 << static_cast<const void*>(data) << ", " << cur_extent_size
+                 << ") failed.";
       // return value is expected to be greater than 0. Return 0 to signal error
       // condition
       return 0;
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index 19877db..fc8858f 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -1117,11 +1117,11 @@
 
   if (calculated_op_hash != expected_op_hash) {
     LOG(ERROR) << "Hash verification failed for operation "
-               << next_operation_num_ << ". Expected hash = ";
-    utils::HexDumpVector(expected_op_hash);
+               << next_operation_num_
+               << ". Expected hash = " << HexEncode(expected_op_hash);
     LOG(ERROR) << "Calculated hash over " << operation.data_length()
-               << " bytes at offset: " << operation.data_offset() << " = ";
-    utils::HexDumpVector(calculated_op_hash);
+               << " bytes at offset: " << operation.data_offset() << " = "
+               << HexEncode(calculated_op_hash);
     return ErrorCode::kDownloadOperationHashMismatch;
   }
 
diff --git a/payload_consumer/install_operation_executor.cc b/payload_consumer/install_operation_executor.cc
index 5318cc3..cd6546f 100644
--- a/payload_consumer/install_operation_executor.cc
+++ b/payload_consumer/install_operation_executor.cc
@@ -255,6 +255,7 @@
           operation, std::move(writer), source_fd, data, count);
     default:
       LOG(ERROR) << "Unexpected operation type when executing diff ops "
+                 << operation.type() << " "
                  << operation.Type_Name(operation.type());
       return false;
   }
@@ -268,12 +269,18 @@
     size_t count) {
   brillo::Blob src_data;
 
-  brillo::Blob dst_data;
   TEST_AND_RETURN_FALSE(utils::ReadExtents(
       source_fd, operation.src_extents(), &src_data, block_size_));
-  TEST_AND_RETURN_FALSE(
-      Lz4Patch(ToStringView(src_data), ToStringView(data, count), &dst_data));
-  return writer->Write(dst_data.data(), dst_data.size());
+  TEST_AND_RETURN_FALSE(Lz4Patch(
+      ToStringView(src_data),
+      ToStringView(data, count),
+      [writer(writer.get())](const uint8_t* data, size_t size) -> size_t {
+        if (!writer->Write(data, size)) {
+          return 0;
+        }
+        return size;
+      }));
+  return true;
 }
 
 bool InstallOperationExecutor::ExecuteSourceBsdiffOperation(
diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc
index db0af4e..91eb53b 100644
--- a/payload_consumer/install_plan.cc
+++ b/payload_consumer/install_plan.cc
@@ -187,6 +187,44 @@
           postinstall_optional == that.postinstall_optional);
 }
 
+bool InstallPlan::Partition::ParseVerityConfig(
+    const PartitionUpdate& partition) {
+  if (partition.has_hash_tree_extent()) {
+    Extent extent = partition.hash_tree_data_extent();
+    hash_tree_data_offset = extent.start_block() * block_size;
+    hash_tree_data_size = extent.num_blocks() * block_size;
+    extent = partition.hash_tree_extent();
+    hash_tree_offset = extent.start_block() * block_size;
+    hash_tree_size = extent.num_blocks() * block_size;
+    uint64_t hash_tree_data_end = hash_tree_data_offset + hash_tree_data_size;
+    if (hash_tree_offset < hash_tree_data_end) {
+      LOG(ERROR) << "Invalid hash tree extents, hash tree data ends at "
+                 << hash_tree_data_end << ", but hash tree starts at "
+                 << hash_tree_offset;
+      return false;
+    }
+    hash_tree_algorithm = partition.hash_tree_algorithm();
+    hash_tree_salt.assign(partition.hash_tree_salt().begin(),
+                          partition.hash_tree_salt().end());
+  }
+  if (partition.has_fec_extent()) {
+    Extent extent = partition.fec_data_extent();
+    fec_data_offset = extent.start_block() * block_size;
+    fec_data_size = extent.num_blocks() * block_size;
+    extent = partition.fec_extent();
+    fec_offset = extent.start_block() * block_size;
+    fec_size = extent.num_blocks() * block_size;
+    uint64_t fec_data_end = fec_data_offset + fec_data_size;
+    if (fec_offset < fec_data_end) {
+      LOG(ERROR) << "Invalid fec extents, fec data ends at " << fec_data_end
+                 << ", but fec starts at " << fec_offset;
+      return false;
+    }
+    fec_roots = partition.fec_roots();
+  }
+  return true;
+}
+
 template <typename PartitinoUpdateArray>
 bool InstallPlan::ParseManifestToInstallPlan(
     const PartitinoUpdateArray& partitions,
@@ -226,42 +264,11 @@
     install_part.target_hash.assign(info.hash().begin(), info.hash().end());
 
     install_part.block_size = block_size;
-    if (partition.has_hash_tree_extent()) {
-      Extent extent = partition.hash_tree_data_extent();
-      install_part.hash_tree_data_offset = extent.start_block() * block_size;
-      install_part.hash_tree_data_size = extent.num_blocks() * block_size;
-      extent = partition.hash_tree_extent();
-      install_part.hash_tree_offset = extent.start_block() * block_size;
-      install_part.hash_tree_size = extent.num_blocks() * block_size;
-      uint64_t hash_tree_data_end =
-          install_part.hash_tree_data_offset + install_part.hash_tree_data_size;
-      if (install_part.hash_tree_offset < hash_tree_data_end) {
-        LOG(ERROR) << "Invalid hash tree extents, hash tree data ends at "
-                   << hash_tree_data_end << ", but hash tree starts at "
-                   << install_part.hash_tree_offset;
-        *error = ErrorCode::kDownloadNewPartitionInfoError;
-        return false;
-      }
-      install_part.hash_tree_algorithm = partition.hash_tree_algorithm();
-      install_part.hash_tree_salt.assign(partition.hash_tree_salt().begin(),
-                                         partition.hash_tree_salt().end());
-    }
-    if (partition.has_fec_extent()) {
-      Extent extent = partition.fec_data_extent();
-      install_part.fec_data_offset = extent.start_block() * block_size;
-      install_part.fec_data_size = extent.num_blocks() * block_size;
-      extent = partition.fec_extent();
-      install_part.fec_offset = extent.start_block() * block_size;
-      install_part.fec_size = extent.num_blocks() * block_size;
-      uint64_t fec_data_end =
-          install_part.fec_data_offset + install_part.fec_data_size;
-      if (install_part.fec_offset < fec_data_end) {
-        LOG(ERROR) << "Invalid fec extents, fec data ends at " << fec_data_end
-                   << ", but fec starts at " << install_part.fec_offset;
-        *error = ErrorCode::kDownloadNewPartitionInfoError;
-        return false;
-      }
-      install_part.fec_roots = partition.fec_roots();
+    if (!install_part.ParseVerityConfig(partition)) {
+      *error = ErrorCode::kDownloadNewPartitionInfoError;
+      LOG(INFO) << "Failed to parse partition `" << partition.partition_name()
+                << "` verity configs";
+      return false;
     }
 
     install_plan->partitions.push_back(install_part);
diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h
index 0278ea5..883aa60 100644
--- a/payload_consumer/install_plan.h
+++ b/payload_consumer/install_plan.h
@@ -156,6 +156,8 @@
     uint64_t fec_offset{0};
     uint64_t fec_size{0};
     uint32_t fec_roots{0};
+
+    bool ParseVerityConfig(const PartitionUpdate&);
   };
   std::vector<Partition> partitions;
 
diff --git a/payload_consumer/partition_writer.cc b/payload_consumer/partition_writer.cc
index b94f8c7..1fb929e 100644
--- a/payload_consumer/partition_writer.cc
+++ b/payload_consumer/partition_writer.cc
@@ -295,6 +295,16 @@
   return std::make_unique<DirectExtentWriter>(target_fd_);
 }
 
+bool PartitionWriter::ValidateSourceHash(const InstallOperation& operation,
+                                         const FileDescriptorPtr source_fd,
+                                         size_t block_size,
+                                         ErrorCode* error) {
+  brillo::Blob source_hash;
+  TEST_AND_RETURN_FALSE_ERRNO(fd_utils::ReadAndHashExtents(
+      source_fd, operation.src_extents(), block_size, &source_hash));
+  return ValidateSourceHash(source_hash, operation, source_fd, error);
+}
+
 bool PartitionWriter::ValidateSourceHash(const brillo::Blob& calculated_hash,
                                          const InstallOperation& operation,
                                          const FileDescriptorPtr source_fd,
diff --git a/payload_consumer/partition_writer.h b/payload_consumer/partition_writer.h
index 89e5884..e620c47 100644
--- a/payload_consumer/partition_writer.h
+++ b/payload_consumer/partition_writer.h
@@ -46,6 +46,10 @@
                                  const InstallOperation& operation,
                                  const FileDescriptorPtr source_fd,
                                  ErrorCode* error);
+  static bool ValidateSourceHash(const InstallOperation& operation,
+                                 const FileDescriptorPtr source_fd,
+                                 size_t block_size,
+                                 ErrorCode* error);
 
   // Perform necessary initialization work before InstallOperation can be
   // applied to this partition
diff --git a/payload_consumer/vabc_partition_writer.cc b/payload_consumer/vabc_partition_writer.cc
index 9db88a9..8ae0b51 100644
--- a/payload_consumer/vabc_partition_writer.cc
+++ b/payload_consumer/vabc_partition_writer.cc
@@ -33,6 +33,7 @@
 #include "update_engine/payload_consumer/extent_map.h"
 #include "update_engine/payload_consumer/extent_reader.h"
 #include "update_engine/payload_consumer/file_descriptor.h"
+#include "update_engine/payload_consumer/file_descriptor_utils.h"
 #include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/payload_consumer/partition_writer.h"
 #include "update_engine/payload_consumer/snapshot_extent_writer.h"
@@ -96,7 +97,17 @@
 bool VABCPartitionWriter::Init(const InstallPlan* install_plan,
                                bool source_may_exist,
                                size_t next_op_index) {
-  xor_map_ = ComputeXorMap(partition_update_.merge_operations());
+  if (dynamic_control_->GetVirtualAbCompressionXorFeatureFlag().IsEnabled()) {
+    xor_map_ = ComputeXorMap(partition_update_.merge_operations());
+    if (xor_map_.size() > 0) {
+      LOG(INFO) << "Virtual AB Compression with XOR is enabled";
+    } else {
+      LOG(INFO) << "Device supports Virtual AB compression with XOR, but OTA "
+                   "package does not.";
+    }
+  } else {
+    LOG(INFO) << "Virtual AB Compression with XOR is disabled.";
+  }
   TEST_AND_RETURN_FALSE(install_plan != nullptr);
   if (source_may_exist && install_part_.source_size > 0) {
     TEST_AND_RETURN_FALSE(!install_part_.source_path.empty());
@@ -267,9 +278,16 @@
 
 [[nodiscard]] bool VABCPartitionWriter::PerformSourceCopyOperation(
     const InstallOperation& operation, ErrorCode* error) {
-  // TODO(zhangkelvin) Probably just ignore SOURCE_COPY? They should be taken
-  // care of during Init();
-  return true;
+  // COPY ops are already handled during Init(), no need to do actual work, but
+  // we still want to verify that all blocks contain expected data.
+  auto source_fd = std::make_shared<EintrSafeFileDescriptor>();
+  TEST_AND_RETURN_FALSE_ERRNO(
+      source_fd->Open(install_part_.source_path.c_str(), O_RDONLY));
+  if (!operation.has_src_sha256_hash()) {
+    return true;
+  }
+  return PartitionWriter::ValidateSourceHash(
+      operation, source_fd, block_size_, error);
 }
 
 bool VABCPartitionWriter::PerformReplaceOperation(const InstallOperation& op,
diff --git a/payload_consumer/vabc_partition_writer_unittest.cc b/payload_consumer/vabc_partition_writer_unittest.cc
index f331091..20aa75f 100644
--- a/payload_consumer/vabc_partition_writer_unittest.cc
+++ b/payload_consumer/vabc_partition_writer_unittest.cc
@@ -48,7 +48,11 @@
 static constexpr size_t FAKE_PART_SIZE = 4096 * 50;
 class VABCPartitionWriterTest : public ::testing::Test {
  public:
-  void SetUp() override { ftruncate(source_part_.fd, FAKE_PART_SIZE); }
+  void SetUp() override {
+    ftruncate(source_part_.fd, FAKE_PART_SIZE);
+    ON_CALL(dynamic_control_, GetVirtualAbCompressionXorFeatureFlag())
+        .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::NONE)));
+  }
 
  protected:
   CowMergeOperation* AddMergeOp(PartitionUpdate* partition,
@@ -102,6 +106,8 @@
         ON_CALL(*cow_writer, EmitLabel(_)).WillByDefault(Return(true));
         return cow_writer;
       }));
+  EXPECT_CALL(dynamic_control_, GetVirtualAbCompressionXorFeatureFlag())
+      .WillRepeatedly(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
   ASSERT_TRUE(writer_.Init(&install_plan_, true, 0));
 }
 
@@ -125,6 +131,8 @@
             ON_CALL(*cow_writer, EmitLabel(_)).WillByDefault(Return(true));
             return cow_writer;
           }));
+  EXPECT_CALL(dynamic_control_, GetVirtualAbCompressionXorFeatureFlag())
+      .WillRepeatedly(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
   ASSERT_TRUE(writer_.Init(&install_plan_, true, 0));
 }
 
@@ -224,10 +232,12 @@
             .WillOnce(Return(true));
         return cow_writer;
       }));
+  EXPECT_CALL(dynamic_control_, GetVirtualAbCompressionXorFeatureFlag())
+      .WillRepeatedly(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
   VABCPartitionWriter writer_{
       partition_update_, install_part_, &dynamic_control_, kBlockSize};
   ASSERT_TRUE(writer_.Init(&install_plan_, true, 0));
-  auto patch_data = GetNoopBSDIFF(kBlockSize * 5);
+  const auto patch_data = GetNoopBSDIFF(kBlockSize * 5);
   ASSERT_GT(patch_data.size(), 0UL);
   ASSERT_TRUE(writer_.PerformDiffOperation(
       *install_op, nullptr, patch_data.data(), patch_data.size()));
diff --git a/payload_consumer/verity_writer_android.cc b/payload_consumer/verity_writer_android.cc
index b669b4d..91efa3e 100644
--- a/payload_consumer/verity_writer_android.cc
+++ b/payload_consumer/verity_writer_android.cc
@@ -116,19 +116,22 @@
     return false;
   }
   // All hash tree data blocks has been hashed, write hash tree to disk.
-  LOG(INFO) << "Writing verity hash tree to " << partition_->target_path;
-  TEST_AND_RETURN_FALSE(hash_tree_builder_->BuildHashTree());
-  TEST_AND_RETURN_FALSE_ERRNO(
-      write_fd->Seek(partition_->hash_tree_offset, SEEK_SET));
-  auto success =
-      hash_tree_builder_->WriteHashTree([write_fd](auto data, auto size) {
-        return utils::WriteAll(write_fd, data, size);
-      });
-  // hashtree builder already prints error messages.
-  TEST_AND_RETURN_FALSE(success);
-  hash_tree_builder_.reset();
+  LOG(INFO) << "Writing verity hash tree to "
+            << partition_->readonly_target_path;
+  if (hash_tree_builder_) {
+    TEST_AND_RETURN_FALSE(hash_tree_builder_->BuildHashTree());
+    TEST_AND_RETURN_FALSE_ERRNO(
+        write_fd->Seek(partition_->hash_tree_offset, SEEK_SET));
+    auto success =
+        hash_tree_builder_->WriteHashTree([write_fd](auto data, auto size) {
+          return utils::WriteAll(write_fd, data, size);
+        });
+    // hashtree builder already prints error messages.
+    TEST_AND_RETURN_FALSE(success);
+    hash_tree_builder_.reset();
+  }
   if (partition_->fec_size != 0) {
-    LOG(INFO) << "Writing verity FEC to " << partition_->target_path;
+    LOG(INFO) << "Writing verity FEC to " << partition_->readonly_target_path;
     TEST_AND_RETURN_FALSE(EncodeFEC(read_fd,
                                     write_fd,
                                     partition_->fec_data_offset,
diff --git a/payload_consumer/verity_writer_android_unittest.cc b/payload_consumer/verity_writer_android_unittest.cc
index 693bcda..5ff0189 100644
--- a/payload_consumer/verity_writer_android_unittest.cc
+++ b/payload_consumer/verity_writer_android_unittest.cc
@@ -165,4 +165,22 @@
   ASSERT_EQ(part_data, actual_part);
 }
 
+TEST_F(VerityWriterAndroidTest, HashTreeDisabled) {
+  partition_.hash_tree_size = 0;
+  partition_.hash_tree_data_size = 0;
+  partition_.hash_tree_offset = 0;
+  partition_.hash_tree_data_offset = 0;
+
+  partition_.fec_data_offset = 0;
+  partition_.fec_data_size = 4096;
+  partition_.fec_offset = 4096;
+  partition_.fec_size = 2 * 4096;
+  brillo::Blob part_data(3 * 4096, 0x1);
+  test_utils::WriteFileVector(partition_.target_path, part_data);
+  ASSERT_TRUE(verity_writer_.Init(partition_));
+  ASSERT_TRUE(verity_writer_.Update(0, part_data.data(), part_data.size()));
+  ASSERT_TRUE(
+      verity_writer_.Finalize(partition_fd_.get(), partition_fd_.get()));
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index a7ddee4..7124cb0 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -25,7 +25,6 @@
 
 #include <brillo/key_value_store.h>
 #include <brillo/secure_blob.h>
-#include <lz4diff/lz4diff.pb.h>
 
 #include "bsdiff/constants.h"
 #include "update_engine/payload_consumer/payload_constants.h"
diff --git a/scripts/simulate_ota.py b/scripts/simulate_ota.py
index 40f463f..bf1fc98 100644
--- a/scripts/simulate_ota.py
+++ b/scripts/simulate_ota.py
@@ -110,6 +110,7 @@
   delta_generator_args.append("--partition_names=" + ":".join(partition_names))
   delta_generator_args.append("--new_partitions=" + ":".join(new_partitions))
 
+  print("Running ", " ".join(delta_generator_args))
   subprocess.check_output(delta_generator_args)
 
   valid = True
diff --git a/scripts/update_device.py b/scripts/update_device.py
index db653dc..72cee49 100755
--- a/scripts/update_device.py
+++ b/scripts/update_device.py
@@ -442,6 +442,8 @@
                       help='Perform slot switch for this OTA package')
   parser.add_argument('--perform-reset-slot-switch', action='store_true',
                       help='Perform reset slot switch for this OTA package')
+  parser.add_argument('--wipe-user-data', action='store_true',
+                      help='Wipe userdata after installing OTA')
   args = parser.parse_args()
   logging.basicConfig(
       level=logging.WARNING if args.no_verbose else logging.INFO)
@@ -493,6 +495,8 @@
     args.extra_headers += "\nSWITCH_SLOT_ON_REBOOT=0"
   if args.no_postinstall:
     args.extra_headers += "\nRUN_POST_INSTALL=0"
+  if args.wipe_user_data:
+    args.extra_headers += "\nPOWERWASH=1"
 
   with zipfile.ZipFile(args.otafile) as zfp:
     CARE_MAP_ENTRY_NAME = "care_map.pb"
diff --git a/update_engine.conf b/update_engine.conf
index b6ca3c4..2d3a655 100644
--- a/update_engine.conf
+++ b/update_engine.conf
@@ -1,2 +1,2 @@
 PAYLOAD_MAJOR_VERSION=2
-PAYLOAD_MINOR_VERSION=7
+PAYLOAD_MINOR_VERSION=8
diff --git a/update_metadata.proto b/update_metadata.proto
index d318a62..3f454ad 100644
--- a/update_metadata.proto
+++ b/update_metadata.proto
@@ -146,25 +146,6 @@
   optional bytes hash = 2;
 }
 
-// Describe an image we are based on in a human friendly way.
-// Examples:
-//   dev-channel, x86-alex, 1.2.3, mp-v3
-//   nplusone-channel, x86-alex, 1.2.4, mp-v3, dev-channel, 1.2.3
-//
-// All fields will be set, if this message is present.
-message ImageInfo {
-  optional string board = 1 [deprecated = true];
-  optional string key = 2 [deprecated = true];
-  optional string channel = 3 [deprecated = true];
-  optional string version = 4 [deprecated = true];
-
-  // If these values aren't present, they should be assumed to match
-  // the equivalent value above. They are normally only different for
-  // special image types such as nplusone images.
-  optional string build_channel = 5 [deprecated = true];
-  optional string build_version = 6 [deprecated = true];
-}
-
 message InstallOperation {
   enum Type {
     REPLACE = 0;     // Replace destination extents w/ attached data.
@@ -401,8 +382,7 @@
   // Only present in major version = 1. List of install operations for the
   // kernel and rootfs partitions. For major version = 2 see the |partitions|
   // field.
-  repeated InstallOperation install_operations = 1 [deprecated = true];
-  repeated InstallOperation kernel_install_operations = 2 [deprecated = true];
+  reserved 1, 2;
 
   // (At time of writing) usually 4096
   optional uint32 block_size = 3 [default = 4096];
@@ -415,17 +395,8 @@
   optional uint64 signatures_offset = 4;
   optional uint64 signatures_size = 5;
 
-  // Only present in major version = 1. Partition metadata used to validate the
-  // update. For major version = 2 see the |partitions| field.
-  optional PartitionInfo old_kernel_info = 6 [deprecated = true];
-  optional PartitionInfo new_kernel_info = 7 [deprecated = true];
-  optional PartitionInfo old_rootfs_info = 8 [deprecated = true];
-  optional PartitionInfo new_rootfs_info = 9 [deprecated = true];
-
-  // old_image_info will only be present for delta images.
-  optional ImageInfo old_image_info = 10 [deprecated = true];
-
-  optional ImageInfo new_image_info = 11 [deprecated = true];
+  // Fields deprecated in major version 2.
+  reserved 6,7,8,9,10,11;
 
   // The minor version, also referred as "delta version", of the payload.
   // Minor version 0 is full payload, everything else is delta payload.