merge in nyc-release history after reset to nyc-dev
diff --git a/Android.mk b/Android.mk
index cf1310a..29591fa 100644
--- a/Android.mk
+++ b/Android.mk
@@ -552,6 +552,7 @@
ue_libpayload_generator_exported_static_libraries := \
libpayload_consumer \
update_metadata-protos \
+ liblzma \
$(ue_libpayload_consumer_exported_static_libraries) \
$(ue_update_metadata_protos_exported_static_libraries)
ue_libpayload_generator_exported_shared_libraries := \
@@ -580,7 +581,8 @@
payload_generator/payload_signer.cc \
payload_generator/raw_filesystem.cc \
payload_generator/tarjan.cc \
- payload_generator/topological_sort.cc
+ payload_generator/topological_sort.cc \
+ payload_generator/xz_android.cc
ifeq ($(HOST_OS),linux)
# Build for the host.
@@ -596,6 +598,7 @@
LOCAL_STATIC_LIBRARIES := \
libpayload_consumer \
update_metadata-protos \
+ liblzma \
$(ue_libpayload_consumer_exported_static_libraries) \
$(ue_update_metadata_protos_exported_static_libraries)
LOCAL_SHARED_LIBRARIES := \
@@ -620,6 +623,7 @@
LOCAL_STATIC_LIBRARIES := \
libpayload_consumer \
update_metadata-protos \
+ liblzma \
$(ue_libpayload_consumer_exported_static_libraries:-host=) \
$(ue_update_metadata_protos_exported_static_libraries)
LOCAL_SHARED_LIBRARIES := \
@@ -686,6 +690,38 @@
# dependencies are removed or placed behind the USE_DBUS flag.
ifdef BRILLO
+# Private and public keys for unittests.
+# ========================================================
+# Generate a module that installs a prebuilt private key and a module that
+# installs a public key generated from the private key.
+#
+# $(1): The path to the private key in pem format.
+define ue-unittest-keys
+ $(eval include $(CLEAR_VARS)) \
+ $(eval LOCAL_MODULE := ue_$(1).pem) \
+ $(eval LOCAL_MODULE_CLASS := ETC) \
+ $(eval $(ifeq $(BRILLO), 1, LOCAL_MODULE_TAGS := eng)) \
+ $(eval LOCAL_SRC_FILES := $(1).pem) \
+ $(eval LOCAL_MODULE_PATH := \
+ $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests) \
+ $(eval LOCAL_MODULE_STEM := $(1).pem) \
+ $(eval include $(BUILD_PREBUILT)) \
+ \
+ $(eval include $(CLEAR_VARS)) \
+ $(eval LOCAL_MODULE := ue_$(1).pub.pem) \
+ $(eval LOCAL_MODULE_CLASS := ETC) \
+ $(eval $(ifeq $(BRILLO), 1, LOCAL_MODULE_TAGS := eng)) \
+ $(eval LOCAL_MODULE_PATH := \
+ $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests) \
+ $(eval LOCAL_MODULE_STEM := $(1).pub.pem) \
+ $(eval include $(BUILD_SYSTEM)/base_rules.mk) \
+ $(eval $(LOCAL_BUILT_MODULE) : $(LOCAL_PATH)/$(1).pem ; \
+ openssl rsa -in $$< -pubout -out $$@)
+endef
+
+$(call ue-unittest-keys,unittest_key)
+$(call ue-unittest-keys,unittest_key2)
+
# Sample images for unittests.
# ========================================================
# Generate a prebuilt module that installs a sample image from the compressed
@@ -711,6 +747,15 @@
$(call ue-unittest-sample-image,disk_ext2_4k_empty.img)
$(call ue-unittest-sample-image,disk_ext2_unittest.img)
+# Zlib Fingerprint
+# ========================================================
+include $(CLEAR_VARS)
+LOCAL_MODULE := zlib_fingerprint
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
+LOCAL_PREBUILT_MODULE_FILE := $(TARGET_OUT_COMMON_GEN)/zlib_fingerprint
+include $(BUILD_PREBUILT)
+
# test_http_server (type: executable)
# ========================================================
# Test HTTP Server.
@@ -745,7 +790,12 @@
ue_unittest_disk_ext2_1k.img \
ue_unittest_disk_ext2_4k.img \
ue_unittest_disk_ext2_4k_empty.img \
- ue_unittest_disk_ext2_unittest.img
+ ue_unittest_disk_ext2_unittest.img \
+ ue_unittest_key.pem \
+ ue_unittest_key.pub.pem \
+ ue_unittest_key2.pem \
+ ue_unittest_key2.pub.pem \
+ zlib_fingerprint
LOCAL_MODULE_CLASS := EXECUTABLES
LOCAL_CPP_EXTENSION := .cc
LOCAL_CLANG := true
diff --git a/common/subprocess.cc b/common/subprocess.cc
index 61f2d54..9738b1d 100644
--- a/common/subprocess.cc
+++ b/common/subprocess.cc
@@ -33,6 +33,8 @@
#include <brillo/process.h>
#include <brillo/secure_blob.h>
+#include "update_engine/common/utils.h"
+
using brillo::MessageLoop;
using std::string;
using std::unique_ptr;
@@ -71,6 +73,7 @@
// Process.
bool LaunchProcess(const vector<string>& cmd,
uint32_t flags,
+ const vector<int>& output_pipes,
brillo::Process* proc) {
for (const string& arg : cmd)
proc->AddArg(arg);
@@ -84,6 +87,10 @@
env.emplace(key, value);
}
+ for (const int fd : output_pipes) {
+ proc->RedirectUsingPipe(fd, false);
+ }
+ proc->SetCloseUnusedFileDescriptors(true);
proc->RedirectUsingPipe(STDOUT_FILENO, false);
proc->SetPreExecCallback(base::Bind(&SetupChild, env, flags));
@@ -109,26 +116,21 @@
void Subprocess::OnStdoutReady(SubprocessRecord* record) {
char buf[1024];
- ssize_t rc = 0;
+ size_t bytes_read;
do {
- rc = HANDLE_EINTR(read(record->stdout_fd, buf, arraysize(buf)));
- if (rc < 0) {
- // EAGAIN and EWOULDBLOCK are normal return values when there's no more
- // input as we are in non-blocking mode.
- if (errno != EWOULDBLOCK && errno != EAGAIN) {
- PLOG(ERROR) << "Error reading fd " << record->stdout_fd;
- MessageLoop::current()->CancelTask(record->stdout_task_id);
- record->stdout_task_id = MessageLoop::kTaskIdNull;
- }
- } else if (rc == 0) {
- // A value of 0 means that the child closed its end of the pipe and there
- // is nothing else to read from stdout.
+ bytes_read = 0;
+ bool eof;
+ bool ok = utils::ReadAll(
+ record->stdout_fd, buf, arraysize(buf), &bytes_read, &eof);
+ record->stdout.append(buf, bytes_read);
+ if (!ok || eof) {
+ // There was either an error or an EOF condition, so we are done watching
+ // the file descriptor.
MessageLoop::current()->CancelTask(record->stdout_task_id);
record->stdout_task_id = MessageLoop::kTaskIdNull;
- } else {
- record->stdout.append(buf, rc);
+ return;
}
- } while (rc > 0);
+ } while (bytes_read);
}
void Subprocess::ChildExitedCallback(const siginfo_t& info) {
@@ -143,10 +145,6 @@
MessageLoop::current()->CancelTask(record->stdout_task_id);
record->stdout_task_id = MessageLoop::kTaskIdNull;
- // Release and close all the pipes now.
- record->proc.Release();
- record->proc.Reset(0);
-
// Don't print any log if the subprocess exited with exit code 0.
if (info.si_code != CLD_EXITED) {
LOG(INFO) << "Subprocess terminated with si_code " << info.si_code;
@@ -160,20 +158,28 @@
if (!record->callback.is_null()) {
record->callback.Run(info.si_status, record->stdout);
}
+ // Release and close all the pipes after calling the callback so our
+ // redirected pipes are still alive. Releasing the process first makes
+ // Reset(0) not attempt to kill the process, which is already a zombie at this
+ // point.
+ record->proc.Release();
+ record->proc.Reset(0);
+
subprocess_records_.erase(pid_record);
}
pid_t Subprocess::Exec(const vector<string>& cmd,
const ExecCallback& callback) {
- return ExecFlags(cmd, kRedirectStderrToStdout, callback);
+ return ExecFlags(cmd, kRedirectStderrToStdout, {}, callback);
}
pid_t Subprocess::ExecFlags(const vector<string>& cmd,
uint32_t flags,
+ const vector<int>& output_pipes,
const ExecCallback& callback) {
unique_ptr<SubprocessRecord> record(new SubprocessRecord(callback));
- if (!LaunchProcess(cmd, flags, &record->proc)) {
+ if (!LaunchProcess(cmd, flags, output_pipes, &record->proc)) {
LOG(ERROR) << "Failed to launch subprocess";
return 0;
}
@@ -215,6 +221,13 @@
pid_record->second->proc.Release();
}
+int Subprocess::GetPipeFd(pid_t pid, int fd) const {
+ auto pid_record = subprocess_records_.find(pid);
+ if (pid_record == subprocess_records_.end())
+ return -1;
+ return pid_record->second->proc.GetPipe(fd);
+}
+
bool Subprocess::SynchronousExec(const vector<string>& cmd,
int* return_code,
string* stdout) {
@@ -232,7 +245,10 @@
int* return_code,
string* stdout) {
brillo::ProcessImpl proc;
- if (!LaunchProcess(cmd, flags, &proc)) {
+ // It doesn't make sense to redirect some pipes in the synchronous case
+ // because we won't be reading on our end, so we don't expose the output_pipes
+ // in this case.
+ if (!LaunchProcess(cmd, flags, {}, &proc)) {
LOG(ERROR) << "Failed to launch subprocess";
return false;
}
diff --git a/common/subprocess.h b/common/subprocess.h
index 6b952dc..b655fb7 100644
--- a/common/subprocess.h
+++ b/common/subprocess.h
@@ -64,15 +64,26 @@
void Init(brillo::AsynchronousSignalHandlerInterface* async_signal_handler);
// Launches a process in the background and calls the passed |callback| when
- // the process exits.
+ // the process exits. The file descriptors specified in |output_pipes| will
+ // be available in the child as the writer end of a pipe. Use GetPipeFd() to
+ // know the reader end in the parent. Only stdin, stdout, stderr and the file
+ // descriptors in |output_pipes| will be open in the child.
// Returns the process id of the new launched process or 0 in case of failure.
pid_t Exec(const std::vector<std::string>& cmd, const ExecCallback& callback);
pid_t ExecFlags(const std::vector<std::string>& cmd,
uint32_t flags,
+ const std::vector<int>& output_pipes,
const ExecCallback& callback);
// Kills the running process with SIGTERM and ignores the callback.
- void KillExec(pid_t tag);
+ void KillExec(pid_t pid);
+
+ // Return the parent end of the pipe mapped onto |fd| in the child |pid|. This
+ // file descriptor is available until the callback for the child |pid|
+ // returns. After that the file descriptor will be closed. The passed |fd|
+ // must be one of the file descriptors passed to ExecFlags() in
+ // |output_pipes|, otherwise returns -1.
+ int GetPipeFd(pid_t pid, int fd) const;
// Executes a command synchronously. Returns true on success. If |stdout| is
// non-null, the process output is stored in it, otherwise the output is
diff --git a/common/subprocess_unittest.cc b/common/subprocess_unittest.cc
index c6cbab0..5ca44e8 100644
--- a/common/subprocess_unittest.cc
+++ b/common/subprocess_unittest.cc
@@ -37,6 +37,7 @@
#include <brillo/message_loops/message_loop.h>
#include <brillo/message_loops/message_loop_utils.h>
#include <brillo/strings/string_utils.h>
+#include <brillo/unittest_utils.h>
#include <gtest/gtest.h>
#include "update_engine/common/test_utils.h"
@@ -95,6 +96,27 @@
MessageLoop::current()->BreakLoop();
}
+void ExpectedDataOnPipe(const Subprocess* subprocess,
+ pid_t* pid,
+ int child_fd,
+ const string& child_fd_data,
+ int expected_return_code,
+ int return_code,
+ const string& /* output */) {
+ EXPECT_EQ(expected_return_code, return_code);
+
+ // Verify that we can read the data from our end of |child_fd|.
+ int fd = subprocess->GetPipeFd(*pid, child_fd);
+ EXPECT_NE(-1, fd);
+ vector<char> buf(child_fd_data.size() + 1);
+ EXPECT_EQ(static_cast<ssize_t>(child_fd_data.size()),
+ HANDLE_EINTR(read(fd, buf.data(), buf.size())));
+ EXPECT_EQ(child_fd_data,
+ string(buf.begin(), buf.begin() + child_fd_data.size()));
+
+ MessageLoop::current()->BreakLoop();
+}
+
} // namespace
TEST_F(SubprocessTest, IsASingleton) {
@@ -125,10 +147,40 @@
EXPECT_TRUE(subprocess_.ExecFlags(
{kBinPath "/sh", "-c", "echo on stdout; echo on stderr >&2"},
0,
+ {},
base::Bind(&ExpectedResults, 0, "on stdout\n")));
loop_.Run();
}
+TEST_F(SubprocessTest, PipeRedirectFdTest) {
+ pid_t pid;
+ pid = subprocess_.ExecFlags(
+ {kBinPath "/sh", "-c", "echo on pipe >&3"},
+ 0,
+ {3},
+ base::Bind(&ExpectedDataOnPipe, &subprocess_, &pid, 3, "on pipe\n", 0));
+ EXPECT_NE(0, pid);
+
+ // Wrong file descriptor values should return -1.
+ EXPECT_EQ(-1, subprocess_.GetPipeFd(pid, 123));
+ loop_.Run();
+ // Calling GetPipeFd() after the callback runs is invalid.
+ EXPECT_EQ(-1, subprocess_.GetPipeFd(pid, 3));
+}
+
+// Test that a pipe file descriptor open in the parent is not open in the child.
+TEST_F(SubprocessTest, PipeClosedWhenNotRedirectedTest) {
+ brillo::ScopedPipe pipe;
+ const vector<string> cmd = {kBinPath "/sh", "-c",
+ base::StringPrintf("echo on pipe >/proc/self/fd/%d", pipe.writer)};
+ EXPECT_TRUE(subprocess_.ExecFlags(
+ cmd,
+ 0,
+ {},
+ base::Bind(&ExpectedResults, 1, "")));
+ loop_.Run();
+}
+
TEST_F(SubprocessTest, EnvVarsAreFiltered) {
EXPECT_TRUE(
subprocess_.Exec({kUsrBinPath "/env"}, base::Bind(&ExpectedEnvVars)));
diff --git a/common/utils.cc b/common/utils.cc
index 912bc96..304aba2 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -61,6 +61,7 @@
#include "update_engine/common/subprocess.h"
#include "update_engine/payload_consumer/file_descriptor.h"
#include "update_engine/payload_consumer/file_writer.h"
+#include "update_engine/payload_consumer/payload_constants.h"
using base::Time;
using base::TimeDelta;
@@ -188,6 +189,35 @@
return true;
}
+bool ReadAll(
+ int fd, void* buf, size_t count, size_t* out_bytes_read, bool* eof) {
+ char* c_buf = static_cast<char*>(buf);
+ size_t bytes_read = 0;
+ *eof = false;
+ while (bytes_read < count) {
+ ssize_t rc = HANDLE_EINTR(read(fd, c_buf + bytes_read, count - bytes_read));
+ if (rc < 0) {
+ // EAGAIN and EWOULDBLOCK are normal return values when there's no more
+ // input and we are in non-blocking mode.
+ if (errno != EWOULDBLOCK && errno != EAGAIN) {
+ PLOG(ERROR) << "Error reading fd " << fd;
+ *out_bytes_read = bytes_read;
+ return false;
+ }
+ break;
+ } else if (rc == 0) {
+ // A value of 0 means that we reached EOF and there is nothing else to
+ // read from this fd.
+ *eof = true;
+ break;
+ } else {
+ bytes_read += rc;
+ }
+ }
+ *out_bytes_read = bytes_read;
+ return true;
+}
+
bool WriteAll(int fd, const void* buf, size_t count) {
const char* c_buf = static_cast<const char*>(buf);
ssize_t bytes_written = 0;
@@ -1133,6 +1163,19 @@
return false;
}
+bool IsZlibCompatible(const string& fingerprint) {
+ if (fingerprint.size() != sizeof(kCompatibleZlibFingerprint[0]) - 1) {
+ LOG(ERROR) << "Invalid fingerprint: " << fingerprint;
+ return false;
+ }
+ for (auto& f : kCompatibleZlibFingerprint) {
+ if (base::CompareCaseInsensitiveASCII(fingerprint, f) == 0) {
+ return true;
+ }
+ }
+ return false;
+}
+
bool ReadExtents(const string& path, const vector<Extent>& extents,
brillo::Blob* out_data, ssize_t out_data_size,
size_t block_size) {
diff --git a/common/utils.h b/common/utils.h
index df06ef1..686f117 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -75,6 +75,14 @@
size_t count,
off_t offset);
+// Calls read() repeatedly until |count| bytes are read or EOF or EWOULDBLOCK
+// is reached. Returns whether all read() calls succeeded (including EWOULDBLOCK
+// as a success case), sets |eof| to whether the eof was reached and sets
+// |out_bytes_read| to the actual number of bytes read regardless of the return
+// value.
+bool ReadAll(
+ int fd, void* buf, size_t count, size_t* out_bytes_read, bool* eof);
+
// Calls pread() repeatedly until count bytes are read, or EOF is reached.
// Returns number of bytes read in *bytes_read. Returns true on success.
bool PReadAll(int fd, void* buf, size_t count, off_t offset,
@@ -343,6 +351,9 @@
bool GetMinorVersion(const brillo::KeyValueStore& store,
uint32_t* minor_version);
+// Returns whether zlib |fingerprint| is compatible with zlib we are using.
+bool IsZlibCompatible(const std::string& fingerprint);
+
// This function reads the specified data in |extents| into |out_data|. The
// extents are read from the file at |path|. |out_data_size| is the size of
// |out_data|. Returns false if the number of bytes to read given in
diff --git a/p2p_manager.cc b/p2p_manager.cc
index 734918d..127e5ff 100644
--- a/p2p_manager.cc
+++ b/p2p_manager.cc
@@ -396,7 +396,7 @@
// We expect to run just "p2p-client" and find it in the path.
child_pid_ = Subprocess::Get().ExecFlags(
- cmd, Subprocess::kSearchPath,
+ cmd, Subprocess::kSearchPath, {},
Bind(&LookupData::OnLookupDone, base::Unretained(this)));
if (!child_pid_) {
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index f490c08..e2613da 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -827,10 +827,8 @@
bool DeltaPerformer::CanPerformInstallOperation(
const chromeos_update_engine::InstallOperation& operation) {
- // Move and source_copy operations don't require any data blob, so they can
- // always be performed.
- if (operation.type() == InstallOperation::MOVE ||
- operation.type() == InstallOperation::SOURCE_COPY)
+ // If we don't have a data blob we can apply it right away.
+ if (!operation.has_data_offset() && !operation.has_data_length())
return true;
// See if we have the entire data blob in the buffer
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index 3d7f8fa..329dc67 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -506,8 +506,8 @@
payload_config.is_delta = !full_rootfs;
payload_config.hard_chunk_size = chunk_size;
payload_config.rootfs_partition_size = kRootFSPartitionSize;
- payload_config.major_version = kChromeOSMajorPayloadVersion;
- payload_config.minor_version = minor_version;
+ payload_config.version.major = kChromeOSMajorPayloadVersion;
+ payload_config.version.minor = minor_version;
if (!full_rootfs) {
payload_config.source.partitions.emplace_back(kLegacyPartitionNameRoot);
payload_config.source.partitions.emplace_back(kLegacyPartitionNameKernel);
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index 98a378b..d1918b7 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -56,7 +56,9 @@
extern const char* kUnittestPrivateKeyPath;
extern const char* kUnittestPublicKeyPath;
-static const char* kBogusMetadataSignature1 =
+namespace {
+
+const char kBogusMetadataSignature1[] =
"awSFIUdUZz2VWFiR+ku0Pj00V7bPQPQFYQSXjEXr3vaw3TE4xHV5CraY3/YrZpBv"
"J5z4dSBskoeuaO1TNC/S6E05t+yt36tE4Fh79tMnJ/z9fogBDXWgXLEUyG78IEQr"
"YH6/eBsQGT2RJtBgXIXbZ9W+5G9KmGDoPOoiaeNsDuqHiBc/58OFsrxskH8E6vMS"
@@ -64,7 +66,13 @@
"fjoTeLYZpt+WN65Vu7jJ0cQN8e1y+2yka5112wpRf/LLtPgiAjEZnsoYpLUd7CoV"
"pLRtClp97kN2+tXGNBQqkA==";
-namespace {
+#ifdef __ANDROID__
+const char kZlibFingerprintPath[] =
+ "/data/nativetest/update_engine_unittests/zlib_fingerprint";
+#else
+const char kZlibFingerprintPath[] = "/etc/zlib_fingerprint";
+#endif // __ANDROID__
+
// Different options that determine what we should fill into the
// install_plan.metadata_signature to simulate the contents received in the
// Omaha response.
@@ -130,8 +138,8 @@
blob_data.size()));
PayloadGenerationConfig config;
- config.major_version = major_version;
- config.minor_version = minor_version;
+ config.version.major = major_version;
+ config.version.minor = minor_version;
PayloadFile payload;
EXPECT_TRUE(payload.Init(config));
@@ -812,4 +820,14 @@
EXPECT_EQ(DeltaPerformer::kSupportedMajorPayloadVersion, major_version);
}
+// Test that we recognize our own zlib compressor implementation as supported.
+// All other equivalent implementations should be added to
+// kCompatibleZlibFingerprint.
+TEST_F(DeltaPerformerTest, ZlibFingerprintMatch) {
+ string fingerprint;
+ EXPECT_TRUE(base::ReadFileToString(base::FilePath(kZlibFingerprintPath),
+ &fingerprint));
+ EXPECT_TRUE(utils::IsZlibCompatible(fingerprint));
+}
+
} // namespace chromeos_update_engine
diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc
index e47b4f8..6078a74 100644
--- a/payload_consumer/payload_constants.cc
+++ b/payload_consumer/payload_constants.cc
@@ -25,6 +25,7 @@
const uint32_t kInPlaceMinorPayloadVersion = 1;
const uint32_t kSourceMinorPayloadVersion = 2;
const uint32_t kOpSrcHashMinorPayloadVersion = 3;
+const uint32_t kImgdiffMinorPayloadVersion = 4;
const char kLegacyPartitionNameKernel[] = "boot";
const char kLegacyPartitionNameRoot[] = "system";
@@ -32,6 +33,15 @@
const char kDeltaMagic[4] = {'C', 'r', 'A', 'U'};
const char kBspatchPath[] = "bspatch";
+// The zlib in Android and Chrome OS are currently compatible with each other,
+// so they are sharing the same array, but if in the future they are no longer
+// compatible with each other, we coule make the same change on the other one to
+// make them compatible again or use ifdef here.
+const char kCompatibleZlibFingerprint[][65] = {
+ "ea973605ccbbdb24f59f449c5f65861a1a9bc7a4353377aaaa06cb3e0f1cfbd7",
+ "3747fa404cceb00a5ec3606fc779510aaa784d5864ab1d5c28b9e267c40aad5c",
+};
+
const char* InstallOperationTypeName(InstallOperation_Type op_type) {
switch (op_type) {
case InstallOperation::BSDIFF:
diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h
index c3cd363..2dbc5fa 100644
--- a/payload_consumer/payload_constants.h
+++ b/payload_consumer/payload_constants.h
@@ -43,6 +43,9 @@
// The minor version that allows per-operation source hash.
extern const uint32_t kOpSrcHashMinorPayloadVersion;
+// The minor version that allows IMGDIFF operation.
+extern const uint32_t kImgdiffMinorPayloadVersion;
+
// The kernel and rootfs partition names used by the BootControlInterface when
// handling update payloads with a major version 1. The names of the updated
@@ -53,6 +56,15 @@
extern const char kBspatchPath[];
extern const char kDeltaMagic[4];
+// The list of compatible SHA256 hashes of zlib source code.
+// This is used to check if the source image have a compatible zlib (produce
+// same compressed result given the same input).
+// When a new fingerprint is found, please examine the changes in zlib source
+// carefully and determine if it's still compatible with previous version, if
+// yes then add the new fingerprint to this array, otherwise remove all previous
+// fingerprints in the array first, and only include the new fingerprint.
+extern const char kCompatibleZlibFingerprint[2][65];
+
// A block number denoting a hole on a sparse file. Used on Extents to refer to
// section of blocks not present on disk on a sparse file.
const uint64_t kSparseHole = std::numeric_limits<uint64_t>::max();
diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc
index 7fa7009..db1ec3c 100644
--- a/payload_consumer/postinstall_runner_action.cc
+++ b/payload_consumer/postinstall_runner_action.cc
@@ -16,15 +16,17 @@
#include "update_engine/payload_consumer/postinstall_runner_action.h"
+#include <fcntl.h>
#include <signal.h>
#include <stdlib.h>
#include <sys/mount.h>
#include <sys/types.h>
-#include <vector>
+#include <unistd.h>
#include <base/files/file_path.h>
#include <base/files/file_util.h>
#include <base/logging.h>
+#include <base/strings/string_split.h>
#include <base/strings/string_util.h>
#include "update_engine/common/action_processor.h"
@@ -33,8 +35,19 @@
#include "update_engine/common/subprocess.h"
#include "update_engine/common/utils.h"
+namespace {
+
+// The file descriptor number from the postinstall program's perspective where
+// it can report status updates. This can be any number greater than 2 (stderr),
+// but must be kept in sync with the "bin/postinst_progress" defined in the
+// sample_images.sh file.
+const int kPostinstallStatusFd = 3;
+
+} // namespace
+
namespace chromeos_update_engine {
+using brillo::MessageLoop;
using std::string;
using std::vector;
@@ -50,6 +63,19 @@
}
}
+ // Initialize all the partition weights.
+ partition_weight_.resize(install_plan_.partitions.size());
+ total_weight_ = 0;
+ for (size_t i = 0; i < install_plan_.partitions.size(); ++i) {
+ // TODO(deymo): This code sets the weight to all the postinstall commands,
+ // but we could remember how long they took in the past and use those
+ // values.
+ partition_weight_[i] = install_plan_.partitions[i].run_postinstall;
+ total_weight_ += partition_weight_[i];
+ }
+ accumulated_weight_ = 0;
+ ReportProgress(0);
+
PerformPartitionPostinstall();
}
@@ -127,19 +153,104 @@
// Runs the postinstall script asynchronously to free up the main loop while
// it's running.
- vector<string> command = {abs_path, partition.target_path};
- current_command_ = Subprocess::Get().Exec(
+ vector<string> command = {abs_path};
+#ifdef __ANDROID__
+ // In Brillo and Android, we pass the slot number and status fd.
+ command.push_back(std::to_string(install_plan_.target_slot));
+ command.push_back(std::to_string(kPostinstallStatusFd));
+#else
+ // Chrome OS postinstall expects the target rootfs as the first parameter.
+ command.push_back(partition.target_path);
+#endif // __ANDROID__
+
+ current_command_ = Subprocess::Get().ExecFlags(
command,
+ Subprocess::kRedirectStderrToStdout,
+ {kPostinstallStatusFd},
base::Bind(&PostinstallRunnerAction::CompletePartitionPostinstall,
base::Unretained(this)));
// Subprocess::Exec should never return a negative process id.
CHECK_GE(current_command_, 0);
- if (!current_command_)
+ if (!current_command_) {
CompletePartitionPostinstall(1, "Postinstall didn't launch");
+ return;
+ }
+
+ // Monitor the status file descriptor.
+ progress_fd_ =
+ Subprocess::Get().GetPipeFd(current_command_, kPostinstallStatusFd);
+ int fd_flags = fcntl(progress_fd_, F_GETFL, 0) | O_NONBLOCK;
+ if (HANDLE_EINTR(fcntl(progress_fd_, F_SETFL, fd_flags)) < 0) {
+ PLOG(ERROR) << "Unable to set non-blocking I/O mode on fd " << progress_fd_;
+ }
+
+ progress_task_ = MessageLoop::current()->WatchFileDescriptor(
+ FROM_HERE,
+ progress_fd_,
+ MessageLoop::WatchMode::kWatchRead,
+ true,
+ base::Bind(&PostinstallRunnerAction::OnProgressFdReady,
+ base::Unretained(this)));
}
-void PostinstallRunnerAction::CleanupMount() {
+void PostinstallRunnerAction::OnProgressFdReady() {
+ char buf[1024];
+ size_t bytes_read;
+ do {
+ bytes_read = 0;
+ bool eof;
+ bool ok =
+ utils::ReadAll(progress_fd_, buf, arraysize(buf), &bytes_read, &eof);
+ progress_buffer_.append(buf, bytes_read);
+ // Process every line.
+ vector<string> lines = base::SplitString(
+ progress_buffer_, "\n", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+ if (!lines.empty()) {
+ progress_buffer_ = lines.back();
+ lines.pop_back();
+ for (const auto& line : lines) {
+ ProcessProgressLine(line);
+ }
+ }
+ if (!ok || eof) {
+ // There was either an error or an EOF condition, so we are done watching
+ // the file descriptor.
+ MessageLoop::current()->CancelTask(progress_task_);
+ progress_task_ = MessageLoop::kTaskIdNull;
+ return;
+ }
+ } while (bytes_read);
+}
+
+bool PostinstallRunnerAction::ProcessProgressLine(const string& line) {
+ double frac = 0;
+ if (sscanf(line.c_str(), "global_progress %lf", &frac) == 1) {
+ ReportProgress(frac);
+ return true;
+ }
+
+ return false;
+}
+
+void PostinstallRunnerAction::ReportProgress(double frac) {
+ if (!delegate_)
+ return;
+ if (current_partition_ >= partition_weight_.size()) {
+ delegate_->ProgressUpdate(1.);
+ return;
+ }
+ if (!isfinite(frac) || frac < 0)
+ frac = 0;
+ if (frac > 1)
+ frac = 1;
+ double postinst_action_progress =
+ (accumulated_weight_ + partition_weight_[current_partition_] * frac) /
+ total_weight_;
+ delegate_->ProgressUpdate(postinst_action_progress);
+}
+
+void PostinstallRunnerAction::Cleanup() {
utils::UnmountFilesystem(fs_mount_dir_);
#ifndef __ANDROID__
if (!base::DeleteFile(base::FilePath(fs_mount_dir_), false)) {
@@ -147,12 +258,19 @@
}
#endif // !__ANDROID__
fs_mount_dir_.clear();
+
+ progress_fd_ = -1;
+ if (progress_task_ != MessageLoop::kTaskIdNull) {
+ MessageLoop::current()->CancelTask(progress_task_);
+ progress_task_ = MessageLoop::kTaskIdNull;
+ }
+ progress_buffer_.clear();
}
void PostinstallRunnerAction::CompletePartitionPostinstall(
int return_code, const string& output) {
current_command_ = 0;
- CleanupMount();
+ Cleanup();
if (return_code != 0) {
LOG(ERROR) << "Postinst command failed with code: " << return_code;
@@ -173,7 +291,10 @@
}
return CompletePostinstall(error_code);
}
+ accumulated_weight_ += partition_weight_[current_partition_];
current_partition_++;
+ ReportProgress(0);
+
PerformPartitionPostinstall();
}
@@ -227,7 +348,7 @@
// the unretained reference to this object.
Subprocess::Get().KillExec(current_command_);
current_command_ = 0;
- CleanupMount();
+ Cleanup();
}
} // namespace chromeos_update_engine
diff --git a/payload_consumer/postinstall_runner_action.h b/payload_consumer/postinstall_runner_action.h
index a626ce6..4cdc47e 100644
--- a/payload_consumer/postinstall_runner_action.h
+++ b/payload_consumer/postinstall_runner_action.h
@@ -18,6 +18,10 @@
#define UPDATE_ENGINE_PAYLOAD_CONSUMER_POSTINSTALL_RUNNER_ACTION_H_
#include <string>
+#include <vector>
+
+#include <brillo/message_loops/message_loop.h>
+#include <gtest/gtest_prod.h>
#include "update_engine/common/action.h"
#include "update_engine/payload_consumer/install_plan.h"
@@ -40,12 +44,24 @@
void ResumeAction() override;
void TerminateProcessing() override;
+ class DelegateInterface {
+ public:
+ virtual ~DelegateInterface() = default;
+
+ // Called whenever there is an overall progress update from the postinstall
+ // programs.
+ virtual void ProgressUpdate(double progress) = 0;
+ };
+
+ void set_delegate(DelegateInterface* delegate) { delegate_ = delegate; }
+
// Debugging/logging
static std::string StaticType() { return "PostinstallRunnerAction"; }
std::string Type() const override { return StaticType(); }
private:
friend class PostinstallRunnerActionTest;
+ FRIEND_TEST(PostinstallRunnerActionTest, ProcessProgressLineTest);
// Special constructor used for testing purposes.
PostinstallRunnerAction(BootControlInterface* boot_control,
@@ -55,8 +71,25 @@
void PerformPartitionPostinstall();
- // Unmount and remove the mountpoint directory if needed.
- void CleanupMount();
+ // Called whenever the |progress_fd_| has data available to read.
+ void OnProgressFdReady();
+
+ // Updates the action progress according to the |line| passed from the
+ // postinstall program. Valid lines are:
+ // global_progress <frac>
+ // <frac> should be between 0.0 and 1.0; sets the progress to the
+ // <frac> value.
+ bool ProcessProgressLine(const std::string& line);
+
+ // Report the progress to the delegate given that the postinstall operation
+ // for |current_partition_| has a current progress of |frac|, a value between
+ // 0 and 1 for that step.
+ void ReportProgress(double frac);
+
+ // Cleanup the setup made when running postinstall for a given partition.
+ // Unmount and remove the mountpoint directory if needed and cleanup the
+ // status file descriptor and message loop task watching for it.
+ void Cleanup();
// Subprocess::Exec callback.
void CompletePartitionPostinstall(int return_code,
@@ -75,6 +108,22 @@
// InstallPlan.
size_t current_partition_{0};
+ // A non-negative value representing the estimated weight of each partition
+ // passed in the install plan. The weight is used to predict the overall
+ // progress from the individual progress of each partition and should
+ // correspond to the time it takes to run it.
+ std::vector<double> partition_weight_;
+
+ // The sum of all the weights in |partition_weight_|.
+ double total_weight_{0};
+
+ // The sum of all the weights in |partition_weight_| up to but not including
+ // the |current_partition_|.
+ double accumulated_weight_{0};
+
+ // The delegate used to notify of progress updates, if any.
+ DelegateInterface* delegate_{nullptr};
+
// The BootControlInerface used to mark the new slot as ready.
BootControlInterface* boot_control_;
@@ -89,6 +138,14 @@
// Postinstall command currently running, or 0 if no program running.
pid_t current_command_{0};
+ // The parent progress file descriptor used to watch for progress reports from
+ // the postinstall program and the task watching for them.
+ int progress_fd_{-1};
+ brillo::MessageLoop::TaskId progress_task_{brillo::MessageLoop::kTaskIdNull};
+
+ // A buffer of a partial read line from the progress file descriptor.
+ std::string progress_buffer_;
+
DISALLOW_COPY_AND_ASSIGN(PostinstallRunnerAction);
};
diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc
index c6c5ecf..d5b05a6 100644
--- a/payload_consumer/postinstall_runner_action_unittest.cc
+++ b/payload_consumer/postinstall_runner_action_unittest.cc
@@ -32,6 +32,7 @@
#include <brillo/bind_lambda.h>
#include <brillo/message_loops/base_message_loop.h>
#include <brillo/message_loops/message_loop_utils.h>
+#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "update_engine/common/constants.h"
@@ -74,6 +75,12 @@
bool processing_stopped_called_{false};
};
+class MockPostinstallRunnerActionDelegate
+ : public PostinstallRunnerAction::DelegateInterface {
+ public:
+ MOCK_METHOD1(ProgressUpdate, void(double progress));
+};
+
class PostinstallRunnerActionTest : public ::testing::Test {
protected:
void SetUp() override {
@@ -161,7 +168,10 @@
string postinstall_image_;
FakeBootControl fake_boot_control_;
- PostinstActionProcessorDelegate delegate_;
+ PostinstActionProcessorDelegate processor_delegate_;
+
+ // The PostinstallRunnerAction delegate receiving the progress updates.
+ PostinstallRunnerAction::DelegateInterface* setup_action_delegate_{nullptr};
// A pointer to the posinstall_runner action and the processor.
PostinstallRunnerAction* postinstall_action_{nullptr};
@@ -188,13 +198,14 @@
PostinstallRunnerAction runner_action(&fake_boot_control_,
powerwash_marker_file_.c_str());
postinstall_action_ = &runner_action;
+ runner_action.set_delegate(setup_action_delegate_);
BondActions(&feeder_action, &runner_action);
ObjectCollectorAction<InstallPlan> collector_action;
BondActions(&runner_action, &collector_action);
processor.EnqueueAction(&feeder_action);
processor.EnqueueAction(&runner_action);
processor.EnqueueAction(&collector_action);
- processor.set_delegate(&delegate_);
+ processor.set_delegate(&processor_delegate_);
loop_.PostTask(FROM_HERE,
base::Bind([&processor] { processor.StartProcessing(); }));
@@ -202,21 +213,43 @@
ASSERT_FALSE(processor.IsRunning());
postinstall_action_ = nullptr;
processor_ = nullptr;
- EXPECT_TRUE(delegate_.processing_stopped_called_ ||
- delegate_.processing_done_called_);
- if (delegate_.processing_done_called_) {
+ EXPECT_TRUE(processor_delegate_.processing_stopped_called_ ||
+ processor_delegate_.processing_done_called_);
+ if (processor_delegate_.processing_done_called_) {
// Sanity check that the code was set when the processor finishes.
- EXPECT_TRUE(delegate_.code_set_);
+ EXPECT_TRUE(processor_delegate_.code_set_);
}
}
+TEST_F(PostinstallRunnerActionTest, ProcessProgressLineTest) {
+ PostinstallRunnerAction action(&fake_boot_control_,
+ powerwash_marker_file_.c_str());
+ testing::StrictMock<MockPostinstallRunnerActionDelegate> mock_delegate_;
+ action.set_delegate(&mock_delegate_);
+
+ action.current_partition_ = 1;
+ action.partition_weight_ = {1, 2, 5};
+ action.accumulated_weight_ = 1;
+ action.total_weight_ = 8;
+
+ // 50% of the second actions is 2/8 = 0.25 of the total.
+ EXPECT_CALL(mock_delegate_, ProgressUpdate(0.25));
+ action.ProcessProgressLine("global_progress 0.5");
+ testing::Mock::VerifyAndClearExpectations(&mock_delegate_);
+
+ // None of these should trigger a progress update.
+ action.ProcessProgressLine("foo_bar");
+ action.ProcessProgressLine("global_progress");
+ action.ProcessProgressLine("global_progress ");
+}
+
// Test that postinstall succeeds in the simple case of running the default
// /postinst command which only exits 0.
TEST_F(PostinstallRunnerActionTest, RunAsRootSimpleTest) {
ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
RunPosinstallAction(loop.dev(), kPostinstallDefaultScript, false);
- EXPECT_EQ(ErrorCode::kSuccess, delegate_.code_);
- EXPECT_TRUE(delegate_.processing_done_called_);
+ EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
+ EXPECT_TRUE(processor_delegate_.processing_done_called_);
// Since powerwash_required was false, this should not trigger a powerwash.
EXPECT_FALSE(utils::FileExists(powerwash_marker_file_.c_str()));
@@ -225,14 +258,14 @@
TEST_F(PostinstallRunnerActionTest, RunAsRootRunSymlinkFileTest) {
ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
RunPosinstallAction(loop.dev(), "bin/postinst_link", false);
- EXPECT_EQ(ErrorCode::kSuccess, delegate_.code_);
+ EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
}
TEST_F(PostinstallRunnerActionTest, RunAsRootPowerwashRequiredTest) {
ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
// Run a simple postinstall program but requiring a powerwash.
RunPosinstallAction(loop.dev(), "bin/postinst_example", true);
- EXPECT_EQ(ErrorCode::kSuccess, delegate_.code_);
+ EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
// Check that the powerwash marker file was set.
string actual_cmd;
@@ -245,7 +278,7 @@
// fail.
TEST_F(PostinstallRunnerActionTest, RunAsRootCantMountTest) {
RunPosinstallAction("/dev/null", kPostinstallDefaultScript, false);
- EXPECT_EQ(ErrorCode::kPostinstallRunnerError, delegate_.code_);
+ EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
// In case of failure, Postinstall should not signal a powerwash even if it
// was requested.
@@ -257,7 +290,7 @@
TEST_F(PostinstallRunnerActionTest, RunAsRootErrScriptTest) {
ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
RunPosinstallAction(loop.dev(), "bin/postinst_fail1", false);
- EXPECT_EQ(ErrorCode::kPostinstallRunnerError, delegate_.code_);
+ EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
}
// The exit code 3 and 4 are a specials cases that would be reported back to
@@ -265,14 +298,15 @@
TEST_F(PostinstallRunnerActionTest, RunAsRootFirmwareBErrScriptTest) {
ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
RunPosinstallAction(loop.dev(), "bin/postinst_fail3", false);
- EXPECT_EQ(ErrorCode::kPostinstallBootedFromFirmwareB, delegate_.code_);
+ EXPECT_EQ(ErrorCode::kPostinstallBootedFromFirmwareB,
+ processor_delegate_.code_);
}
// Check that you can't specify an absolute path.
TEST_F(PostinstallRunnerActionTest, RunAsRootAbsolutePathNotAllowedTest) {
ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
RunPosinstallAction(loop.dev(), "/etc/../bin/sh", false);
- EXPECT_EQ(ErrorCode::kPostinstallRunnerError, delegate_.code_);
+ EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
}
#ifdef __ANDROID__
@@ -281,7 +315,7 @@
TEST_F(PostinstallRunnerActionTest, RunAsRootCheckFileContextsTest) {
ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
RunPosinstallAction(loop.dev(), "bin/self_check_context", false);
- EXPECT_EQ(ErrorCode::kSuccess, delegate_.code_);
+ EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
}
#endif // __ANDROID__
@@ -295,8 +329,8 @@
base::Unretained(this)));
RunPosinstallAction(loop.dev(), "bin/postinst_suspend", false);
// postinst_suspend returns 0 only if it was suspended at some point.
- EXPECT_EQ(ErrorCode::kSuccess, delegate_.code_);
- EXPECT_TRUE(delegate_.processing_done_called_);
+ EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
+ EXPECT_TRUE(processor_delegate_.processing_done_called_);
}
// Test that we can cancel a postinstall action while it is running.
@@ -308,8 +342,28 @@
RunPosinstallAction(loop.dev(), "bin/postinst_suspend", false);
// When canceling the action, the action never finished and therefore we had
// a ProcessingStopped call instead.
- EXPECT_FALSE(delegate_.code_set_);
- EXPECT_TRUE(delegate_.processing_stopped_called_);
+ EXPECT_FALSE(processor_delegate_.code_set_);
+ EXPECT_TRUE(processor_delegate_.processing_stopped_called_);
+}
+
+// Test that we parse and process the progress reports from the progress
+// file descriptor.
+TEST_F(PostinstallRunnerActionTest, RunAsRootProgressUpdatesTest) {
+ testing::StrictMock<MockPostinstallRunnerActionDelegate> mock_delegate_;
+ testing::InSequence s;
+ EXPECT_CALL(mock_delegate_, ProgressUpdate(0));
+
+ // The postinst_progress program will call with 0.25, 0.5 and 1.
+ EXPECT_CALL(mock_delegate_, ProgressUpdate(0.25));
+ EXPECT_CALL(mock_delegate_, ProgressUpdate(0.5));
+ EXPECT_CALL(mock_delegate_, ProgressUpdate(1.));
+
+ EXPECT_CALL(mock_delegate_, ProgressUpdate(1.));
+
+ ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
+ setup_action_delegate_ = &mock_delegate_;
+ RunPosinstallAction(loop.dev(), "bin/postinst_progress", false);
+ EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
}
} // namespace chromeos_update_engine
diff --git a/payload_generator/ab_generator.cc b/payload_generator/ab_generator.cc
index 7caf897..8c736a5 100644
--- a/payload_generator/ab_generator.cc
+++ b/payload_generator/ab_generator.cc
@@ -28,6 +28,7 @@
#include "update_engine/payload_generator/delta_diff_generator.h"
#include "update_engine/payload_generator/delta_diff_utils.h"
+using chromeos_update_engine::diff_utils::IsAReplaceOperation;
using std::string;
using std::vector;
@@ -46,19 +47,17 @@
size_t soft_chunk_blocks = config.soft_chunk_size / config.block_size;
aops->clear();
- TEST_AND_RETURN_FALSE(diff_utils::DeltaReadPartition(
- aops,
- old_part,
- new_part,
- hard_chunk_blocks,
- soft_chunk_blocks,
- blob_file,
- true)); // src_ops_allowed
+ TEST_AND_RETURN_FALSE(diff_utils::DeltaReadPartition(aops,
+ old_part,
+ new_part,
+ hard_chunk_blocks,
+ soft_chunk_blocks,
+ config.version,
+ blob_file));
LOG(INFO) << "done reading " << new_part.name;
- TEST_AND_RETURN_FALSE(FragmentOperations(aops,
- new_part.path,
- blob_file));
+ TEST_AND_RETURN_FALSE(
+ FragmentOperations(config.version, aops, new_part.path, blob_file));
SortOperationsByDestination(aops);
// Use the soft_chunk_size when merging operations to prevent merging all
@@ -69,12 +68,10 @@
merge_chunk_blocks = hard_chunk_blocks;
}
- TEST_AND_RETURN_FALSE(MergeOperations(aops,
- merge_chunk_blocks,
- new_part.path,
- blob_file));
+ TEST_AND_RETURN_FALSE(MergeOperations(
+ aops, config.version, merge_chunk_blocks, new_part.path, blob_file));
- if (config.minor_version == kOpSrcHashMinorPayloadVersion)
+ if (config.version.minor >= kOpSrcHashMinorPayloadVersion)
TEST_AND_RETURN_FALSE(AddSourceHash(aops, old_part.path));
return true;
@@ -85,24 +82,22 @@
sort(aops->begin(), aops->end(), diff_utils::CompareAopsByDestination);
}
-bool ABGenerator::FragmentOperations(
- vector<AnnotatedOperation>* aops,
- const string& target_part_path,
- BlobFileWriter* blob_file) {
+bool ABGenerator::FragmentOperations(const PayloadVersion& version,
+ vector<AnnotatedOperation>* aops,
+ const string& target_part_path,
+ BlobFileWriter* blob_file) {
vector<AnnotatedOperation> fragmented_aops;
for (const AnnotatedOperation& aop : *aops) {
if (aop.op.type() == InstallOperation::SOURCE_COPY) {
TEST_AND_RETURN_FALSE(SplitSourceCopy(aop, &fragmented_aops));
- } else if ((aop.op.type() == InstallOperation::REPLACE) ||
- (aop.op.type() == InstallOperation::REPLACE_BZ)) {
- TEST_AND_RETURN_FALSE(SplitReplaceOrReplaceBz(aop, &fragmented_aops,
- target_part_path,
- blob_file));
+ } else if (IsAReplaceOperation(aop.op.type())) {
+ TEST_AND_RETURN_FALSE(SplitAReplaceOp(
+ version, aop, target_part_path, &fragmented_aops, blob_file));
} else {
fragmented_aops.push_back(aop);
}
}
- *aops = fragmented_aops;
+ *aops = std::move(fragmented_aops);
return true;
}
@@ -159,15 +154,14 @@
return true;
}
-bool ABGenerator::SplitReplaceOrReplaceBz(
- const AnnotatedOperation& original_aop,
- vector<AnnotatedOperation>* result_aops,
- const string& target_part_path,
- BlobFileWriter* blob_file) {
+bool ABGenerator::SplitAReplaceOp(const PayloadVersion& version,
+ const AnnotatedOperation& original_aop,
+ const string& target_part_path,
+ vector<AnnotatedOperation>* result_aops,
+ BlobFileWriter* blob_file) {
InstallOperation original_op = original_aop.op;
+ TEST_AND_RETURN_FALSE(IsAReplaceOperation(original_op.type()));
const bool is_replace = original_op.type() == InstallOperation::REPLACE;
- TEST_AND_RETURN_FALSE(is_replace ||
- original_op.type() == InstallOperation::REPLACE_BZ);
uint32_t data_offset = original_op.data_offset();
for (int i = 0; i < original_op.dst_extents_size(); i++) {
@@ -188,8 +182,8 @@
AnnotatedOperation new_aop;
new_aop.op = new_op;
new_aop.name = base::StringPrintf("%s:%d", original_aop.name.c_str(), i);
- TEST_AND_RETURN_FALSE(AddDataAndSetType(&new_aop, target_part_path,
- blob_file));
+ TEST_AND_RETURN_FALSE(
+ AddDataAndSetType(&new_aop, version, target_part_path, blob_file));
result_aops->push_back(new_aop);
}
@@ -197,6 +191,7 @@
}
bool ABGenerator::MergeOperations(vector<AnnotatedOperation>* aops,
+ const PayloadVersion& version,
size_t chunk_blocks,
const string& target_part_path,
BlobFileWriter* blob_file) {
@@ -207,6 +202,7 @@
continue;
}
AnnotatedOperation& last_aop = new_aops.back();
+ bool last_is_a_replace = IsAReplaceOperation(last_aop.op.type());
if (last_aop.op.dst_extents_size() <= 0 ||
curr_aop.op.dst_extents_size() <= 0) {
@@ -221,11 +217,11 @@
uint32_t combined_block_count =
last_aop.op.dst_extents(last_dst_idx).num_blocks() +
curr_aop.op.dst_extents(0).num_blocks();
- bool good_op_type = curr_aop.op.type() == InstallOperation::SOURCE_COPY ||
- curr_aop.op.type() == InstallOperation::REPLACE ||
- curr_aop.op.type() == InstallOperation::REPLACE_BZ;
- if (good_op_type &&
- last_aop.op.type() == curr_aop.op.type() &&
+ bool is_a_replace = IsAReplaceOperation(curr_aop.op.type());
+
+ bool is_delta_op = curr_aop.op.type() == InstallOperation::SOURCE_COPY;
+ if (((is_delta_op && (last_aop.op.type() == curr_aop.op.type())) ||
+ (is_a_replace && last_is_a_replace)) &&
last_end_block == curr_start_block &&
combined_block_count <= chunk_blocks) {
// If the operations have the same type (which is a type that we can
@@ -236,34 +232,34 @@
last_aop.name.c_str(),
curr_aop.name.c_str());
- ExtendExtents(last_aop.op.mutable_src_extents(),
- curr_aop.op.src_extents());
- if (curr_aop.op.src_length() > 0)
- last_aop.op.set_src_length(last_aop.op.src_length() +
- curr_aop.op.src_length());
+ if (is_delta_op) {
+ ExtendExtents(last_aop.op.mutable_src_extents(),
+ curr_aop.op.src_extents());
+ if (curr_aop.op.src_length() > 0)
+ last_aop.op.set_src_length(last_aop.op.src_length() +
+ curr_aop.op.src_length());
+ }
ExtendExtents(last_aop.op.mutable_dst_extents(),
curr_aop.op.dst_extents());
if (curr_aop.op.dst_length() > 0)
last_aop.op.set_dst_length(last_aop.op.dst_length() +
curr_aop.op.dst_length());
// Set the data length to zero so we know to add the blob later.
- if (curr_aop.op.type() == InstallOperation::REPLACE ||
- curr_aop.op.type() == InstallOperation::REPLACE_BZ) {
+ if (is_a_replace)
last_aop.op.set_data_length(0);
- }
} else {
// Otherwise just include the extent as is.
new_aops.push_back(curr_aop);
}
}
- // Set the blobs for REPLACE/REPLACE_BZ operations that have been merged.
+ // Set the blobs for REPLACE/REPLACE_BZ/REPLACE_XZ operations that have been
+ // merged.
for (AnnotatedOperation& curr_aop : new_aops) {
if (curr_aop.op.data_length() == 0 &&
- (curr_aop.op.type() == InstallOperation::REPLACE ||
- curr_aop.op.type() == InstallOperation::REPLACE_BZ)) {
- TEST_AND_RETURN_FALSE(AddDataAndSetType(&curr_aop, target_part_path,
- blob_file));
+ IsAReplaceOperation(curr_aop.op.type())) {
+ TEST_AND_RETURN_FALSE(
+ AddDataAndSetType(&curr_aop, version, target_part_path, blob_file));
}
}
@@ -272,10 +268,10 @@
}
bool ABGenerator::AddDataAndSetType(AnnotatedOperation* aop,
+ const PayloadVersion& version,
const string& target_part_path,
BlobFileWriter* blob_file) {
- TEST_AND_RETURN_FALSE(aop->op.type() == InstallOperation::REPLACE ||
- aop->op.type() == InstallOperation::REPLACE_BZ);
+ TEST_AND_RETURN_FALSE(IsAReplaceOperation(aop->op.type()));
brillo::Blob data(aop->op.dst_length());
vector<Extent> dst_extents;
@@ -286,25 +282,16 @@
data.size(),
kBlockSize));
- brillo::Blob data_bz;
- TEST_AND_RETURN_FALSE(BzipCompress(data, &data_bz));
- CHECK(!data_bz.empty());
+ brillo::Blob blob;
+ InstallOperation_Type op_type;
+ TEST_AND_RETURN_FALSE(
+ diff_utils::GenerateBestFullOperation(data, version, &blob, &op_type));
- brillo::Blob* data_p = nullptr;
- InstallOperation_Type new_op_type;
- if (data_bz.size() < data.size()) {
- new_op_type = InstallOperation::REPLACE_BZ;
- data_p = &data_bz;
- } else {
- new_op_type = InstallOperation::REPLACE;
- data_p = &data;
- }
-
- // If the operation doesn't point to a data blob, then we add it.
- if (aop->op.type() != new_op_type ||
- aop->op.data_length() != data_p->size()) {
- aop->op.set_type(new_op_type);
- aop->SetOperationBlob(data_p, blob_file);
+ // If the operation doesn't point to a data blob or points to a data blob of
+ // a different type then we add it.
+ if (aop->op.type() != op_type || aop->op.data_length() != blob.size()) {
+ aop->op.set_type(op_type);
+ aop->SetOperationBlob(blob, blob_file);
}
return true;
diff --git a/payload_generator/ab_generator.h b/payload_generator/ab_generator.h
index c2837c0..77afb87 100644
--- a/payload_generator/ab_generator.h
+++ b/payload_generator/ab_generator.h
@@ -62,7 +62,8 @@
// The |target_part_path| is the filename of the new image, where the
// destination extents refer to. The blobs of the operations in |aops| should
// reference |blob_file|. |blob_file| are updated if needed.
- static bool FragmentOperations(std::vector<AnnotatedOperation>* aops,
+ static bool FragmentOperations(const PayloadVersion& version,
+ std::vector<AnnotatedOperation>* aops,
const std::string& target_part_path,
BlobFileWriter* blob_file);
@@ -84,25 +85,27 @@
static bool SplitSourceCopy(const AnnotatedOperation& original_aop,
std::vector<AnnotatedOperation>* result_aops);
- // Takes a REPLACE/REPLACE_BZ operation |aop|, and adds one operation for each
- // dst extent in |aop| to |ops|. The new operations added to |ops| will have
- // only one dst extent each, and may be either a REPLACE or REPLACE_BZ
- // depending on whether compression is advantageous.
- static bool SplitReplaceOrReplaceBz(
- const AnnotatedOperation& original_aop,
- std::vector<AnnotatedOperation>* result_aops,
- const std::string& target_part,
- BlobFileWriter* blob_file);
+ // Takes a REPLACE, REPLACE_BZ or REPLACE_XZ operation |aop|, and adds one
+ // operation for each dst extent in |aop| to |ops|. The new operations added
+ // to |ops| will have only one dst extent each, and may be of a different
+ // type depending on whether compression is advantageous.
+ static bool SplitAReplaceOp(const PayloadVersion& version,
+ const AnnotatedOperation& original_aop,
+ const std::string& target_part,
+ std::vector<AnnotatedOperation>* result_aops,
+ BlobFileWriter* blob_file);
// Takes a sorted (by first destination extent) vector of operations |aops|
- // and merges SOURCE_COPY, REPLACE, and REPLACE_BZ operations in that vector.
+ // and merges SOURCE_COPY, REPLACE, REPLACE_BZ and REPLACE_XZ, operations in
+ // that vector.
// It will merge two operations if:
- // - They are of the same type.
- // - They are contiguous.
+ // - They are both REPLACE_*, or they are both SOURCE_COPY,
+ // - Their destination blocks are contiguous.
// - Their combined blocks do not exceed |chunk_blocks| blocks.
// Note that unlike other methods, you can't pass a negative number in
// |chunk_blocks|.
static bool MergeOperations(std::vector<AnnotatedOperation>* aops,
+ const PayloadVersion& version,
size_t chunk_blocks,
const std::string& target_part,
BlobFileWriter* blob_file);
@@ -113,14 +116,15 @@
const std::string& source_part_path);
private:
- // Adds the data payload for a REPLACE/REPLACE_BZ operation |aop| by reading
- // its output extents from |target_part_path| and appending a corresponding
- // data blob to |data_fd|. The blob will be compressed if this is smaller than
- // the uncompressed form, and the operation type will be set accordingly.
- // |*blob_file| will be updated as well. If the operation happens to have
- // the right type and already points to a data blob, nothing is written.
- // Caller should only set type and data blob if it's valid.
+ // Adds the data payload for a REPLACE/REPLACE_BZ/REPLACE_XZ operation |aop|
+ // by reading its output extents from |target_part_path| and appending a
+ // corresponding data blob to |blob_file|. The blob will be compressed if this
+ // is smaller than the uncompressed form, and the operation type will be set
+ // accordingly. |*blob_file| will be updated as well. If the operation happens
+ // to have the right type and already points to a data blob, nothing is
+ // written. Caller should only set type and data blob if it's valid.
static bool AddDataAndSetType(AnnotatedOperation* aop,
+ const PayloadVersion& version,
const std::string& target_part_path,
BlobFileWriter* blob_file);
diff --git a/payload_generator/ab_generator_unittest.cc b/payload_generator/ab_generator_unittest.cc
index 60bdf26..224880d 100644
--- a/payload_generator/ab_generator_unittest.cc
+++ b/payload_generator/ab_generator_unittest.cc
@@ -20,8 +20,8 @@
#include <sys/stat.h>
#include <sys/types.h>
-#include <string>
#include <random>
+#include <string>
#include <vector>
#include <gtest/gtest.h>
@@ -122,8 +122,10 @@
// Split the operation.
vector<AnnotatedOperation> result_ops;
- ASSERT_TRUE(ABGenerator::SplitReplaceOrReplaceBz(
- aop, &result_ops, part_path, &blob_file));
+ PayloadVersion version(kChromeOSMajorPayloadVersion,
+ kSourceMinorPayloadVersion);
+ ASSERT_TRUE(ABGenerator::SplitAReplaceOp(
+ version, aop, part_path, &result_ops, &blob_file));
// Check the result.
InstallOperation_Type expected_type =
@@ -288,8 +290,10 @@
BlobFileWriter blob_file(data_fd, &data_file_size);
// Merge the operations.
- EXPECT_TRUE(ABGenerator::MergeOperations(
- &aops, 5, part_path, &blob_file));
+ PayloadVersion version(kChromeOSMajorPayloadVersion,
+ kSourceMinorPayloadVersion);
+ EXPECT_TRUE(
+ ABGenerator::MergeOperations(&aops, version, 5, part_path, &blob_file));
// Check the result.
InstallOperation_Type expected_op_type =
@@ -475,7 +479,9 @@
aops.push_back(third_aop);
BlobFileWriter blob_file(0, nullptr);
- EXPECT_TRUE(ABGenerator::MergeOperations(&aops, 5, "", &blob_file));
+ PayloadVersion version(kChromeOSMajorPayloadVersion,
+ kSourceMinorPayloadVersion);
+ EXPECT_TRUE(ABGenerator::MergeOperations(&aops, version, 5, "", &blob_file));
EXPECT_EQ(1U, aops.size());
InstallOperation first_result_op = aops[0].op;
@@ -512,9 +518,8 @@
// Test to make sure we don't merge operations that shouldn't be merged.
vector<AnnotatedOperation> aops;
InstallOperation first_op;
- first_op.set_type(InstallOperation::REPLACE_BZ);
+ first_op.set_type(InstallOperation::ZERO);
*(first_op.add_dst_extents()) = ExtentForRange(0, 1);
- first_op.set_data_length(kBlockSize);
AnnotatedOperation first_aop;
first_aop.op = first_op;
aops.push_back(first_aop);
@@ -547,7 +552,9 @@
aops.push_back(fourth_aop);
BlobFileWriter blob_file(0, nullptr);
- EXPECT_TRUE(ABGenerator::MergeOperations(&aops, 4, "", &blob_file));
+ PayloadVersion version(kChromeOSMajorPayloadVersion,
+ kSourceMinorPayloadVersion);
+ EXPECT_TRUE(ABGenerator::MergeOperations(&aops, version, 4, "", &blob_file));
// No operations were merged, the number of ops is the same.
EXPECT_EQ(4U, aops.size());
diff --git a/payload_generator/annotated_operation.cc b/payload_generator/annotated_operation.cc
index 984f921..b7f3434 100644
--- a/payload_generator/annotated_operation.cc
+++ b/payload_generator/annotated_operation.cc
@@ -36,12 +36,12 @@
}
} // namespace
-bool AnnotatedOperation::SetOperationBlob(brillo::Blob* blob,
+bool AnnotatedOperation::SetOperationBlob(const brillo::Blob& blob,
BlobFileWriter* blob_file) {
- off_t data_offset = blob_file->StoreBlob(*blob);
+ off_t data_offset = blob_file->StoreBlob(blob);
TEST_AND_RETURN_FALSE(data_offset != -1);
op.set_data_offset(data_offset);
- op.set_data_length(blob->size());
+ op.set_data_length(blob.size());
return true;
}
diff --git a/payload_generator/annotated_operation.h b/payload_generator/annotated_operation.h
index 4076070..653bab0 100644
--- a/payload_generator/annotated_operation.h
+++ b/payload_generator/annotated_operation.h
@@ -35,10 +35,10 @@
// The InstallOperation, as defined by the protobuf.
InstallOperation op;
- // Writes |blob| to the end of |data_fd|, and updates |data_file_size| to
- // match the new size of |data_fd|. It sets the data_offset and data_length
- // in AnnotatedOperation to match the offset and size of |blob| in |data_fd|.
- bool SetOperationBlob(brillo::Blob* blob, BlobFileWriter* blob_file);
+ // Writes |blob| to the end of |blob_file|. It sets the data_offset and
+ // data_length in AnnotatedOperation to match the offset and size of |blob|
+ // in |blob_file|.
+ bool SetOperationBlob(const brillo::Blob& blob, BlobFileWriter* blob_file);
};
// For logging purposes.
diff --git a/payload_generator/bzip.cc b/payload_generator/bzip.cc
index 9040193..c2388ca 100644
--- a/payload_generator/bzip.cc
+++ b/payload_generator/bzip.cc
@@ -24,63 +24,32 @@
#include "update_engine/common/utils.h"
-using std::string;
-
namespace chromeos_update_engine {
-namespace {
-
-// BzipData compresses or decompresses the input to the output.
-// Returns true on success.
-// Use one of BzipBuffToBuff*ompress as the template parameter to BzipData().
-int BzipBuffToBuffDecompress(uint8_t* out,
- uint32_t* out_length,
- const void* in,
- uint32_t in_length) {
- return BZ2_bzBuffToBuffDecompress(
- reinterpret_cast<char*>(out),
- out_length,
- reinterpret_cast<char*>(const_cast<void*>(in)),
- in_length,
- 0, // Silent verbosity
- 0); // Normal algorithm
-}
-
-int BzipBuffToBuffCompress(uint8_t* out,
- uint32_t* out_length,
- const void* in,
- uint32_t in_length) {
- return BZ2_bzBuffToBuffCompress(
- reinterpret_cast<char*>(out),
- out_length,
- reinterpret_cast<char*>(const_cast<void*>(in)),
- in_length,
- 9, // Best compression
- 0, // Silent verbosity
- 0); // Default work factor
-}
-
-template<int F(uint8_t* out,
- uint32_t* out_length,
- const void* in,
- uint32_t in_length)>
-bool BzipData(const void* const in,
- const size_t in_size,
- brillo::Blob* const out) {
+bool BzipCompress(const brillo::Blob& in, brillo::Blob* out) {
TEST_AND_RETURN_FALSE(out);
out->clear();
- if (in_size == 0) {
+ if (in.size() == 0)
return true;
- }
- // Try increasing buffer size until it works
- size_t buf_size = in_size;
+
+ // We expect a compression ratio of about 35% with bzip2, so we start with
+ // that much output space, which will then be doubled if needed.
+ size_t buf_size = 40 + in.size() * 35 / 100;
out->resize(buf_size);
+ // Try increasing buffer size until it works
for (;;) {
if (buf_size > std::numeric_limits<uint32_t>::max())
return false;
uint32_t data_size = buf_size;
- int rc = F(out->data(), &data_size, in, in_size);
+ int rc = BZ2_bzBuffToBuffCompress(
+ reinterpret_cast<char*>(out->data()),
+ &data_size,
+ reinterpret_cast<char*>(const_cast<uint8_t*>(in.data())),
+ in.size(),
+ 9, // Best compression
+ 0, // Silent verbosity
+ 0); // Default work factor
TEST_AND_RETURN_FALSE(rc == BZ_OUTBUFF_FULL || rc == BZ_OK);
if (rc == BZ_OK) {
// we're done!
@@ -94,37 +63,4 @@
}
}
-} // namespace
-
-bool BzipDecompress(const brillo::Blob& in, brillo::Blob* out) {
- return BzipData<BzipBuffToBuffDecompress>(in.data(), in.size(), out);
-}
-
-bool BzipCompress(const brillo::Blob& in, brillo::Blob* out) {
- return BzipData<BzipBuffToBuffCompress>(in.data(), in.size(), out);
-}
-
-namespace {
-template<bool F(const void* const in,
- const size_t in_size,
- brillo::Blob* const out)>
-bool BzipString(const string& str,
- brillo::Blob* out) {
- TEST_AND_RETURN_FALSE(out);
- brillo::Blob temp;
- TEST_AND_RETURN_FALSE(F(str.data(), str.size(), &temp));
- out->clear();
- out->insert(out->end(), temp.begin(), temp.end());
- return true;
-}
-} // namespace
-
-bool BzipCompressString(const string& str, brillo::Blob* out) {
- return BzipString<BzipData<BzipBuffToBuffCompress>>(str, out);
-}
-
-bool BzipDecompressString(const string& str, brillo::Blob* out) {
- return BzipString<BzipData<BzipBuffToBuffDecompress>>(str, out);
-}
-
} // namespace chromeos_update_engine
diff --git a/payload_generator/bzip.h b/payload_generator/bzip.h
index ca9956e..198768f 100644
--- a/payload_generator/bzip.h
+++ b/payload_generator/bzip.h
@@ -17,18 +17,12 @@
#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_BZIP_H_
#define UPDATE_ENGINE_PAYLOAD_GENERATOR_BZIP_H_
-#include <string>
-#include <vector>
-
#include <brillo/secure_blob.h>
namespace chromeos_update_engine {
-// Bzip2 compresses or decompresses str/in to out.
-bool BzipDecompress(const brillo::Blob& in, brillo::Blob* out);
+// Compresses the input buffer |in| into |out| with bzip2.
bool BzipCompress(const brillo::Blob& in, brillo::Blob* out);
-bool BzipCompressString(const std::string& str, brillo::Blob* out);
-bool BzipDecompressString(const std::string& str, brillo::Blob* out);
} // namespace chromeos_update_engine
diff --git a/payload_generator/delta_diff_generator.cc b/payload_generator/delta_diff_generator.cc
index ffe0fe9..7ca42fc5 100644
--- a/payload_generator/delta_diff_generator.cc
+++ b/payload_generator/delta_diff_generator.cc
@@ -55,6 +55,11 @@
const string& output_path,
const string& private_key_path,
uint64_t* metadata_size) {
+ if (!config.version.Validate()) {
+ LOG(ERROR) << "Unsupported major.minor version: " << config.version.major
+ << "." << config.version.minor;
+ return false;
+ }
// Create empty payload file object.
PayloadFile payload;
@@ -101,17 +106,12 @@
LOG_IF(FATAL, old_part.size > new_part.size)
<< "Shirking the filesystem size is not supported at the moment.";
}
- if (config.minor_version == kInPlaceMinorPayloadVersion) {
+ if (config.version.minor == kInPlaceMinorPayloadVersion) {
LOG(INFO) << "Using generator InplaceGenerator().";
strategy.reset(new InplaceGenerator());
- } else if (config.minor_version == kSourceMinorPayloadVersion ||
- config.minor_version == kOpSrcHashMinorPayloadVersion) {
+ } else {
LOG(INFO) << "Using generator ABGenerator().";
strategy.reset(new ABGenerator());
- } else {
- LOG(ERROR) << "Unsupported minor version given for delta payload: "
- << config.minor_version;
- return false;
}
} else {
LOG(INFO) << "Using generator FullUpdateGenerator().";
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index ba1aac4..1884681 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -31,6 +31,7 @@
#include "update_engine/payload_generator/delta_diff_generator.h"
#include "update_engine/payload_generator/extent_ranges.h"
#include "update_engine/payload_generator/extent_utils.h"
+#include "update_engine/payload_generator/xz.h"
using std::map;
using std::string;
@@ -40,6 +41,7 @@
namespace {
const char* const kBsdiffPath = "bsdiff";
+const char* const kImgdiffPath = "imgdiff";
// The maximum destination size allowed for bsdiff. In general, bsdiff should
// work for arbitrary big files, but the payload generation and payload
@@ -48,6 +50,11 @@
// Chrome binary in ASan builders.
const uint64_t kMaxBsdiffDestinationSize = 200 * 1024 * 1024; // bytes
+// The maximum destination size allowed for imgdiff. In general, imgdiff should
+// work for arbitrary big files, but the payload application is quite memory
+// intensive, so we limit these operations to 50 MiB.
+const uint64_t kMaxImgdiffDestinationSize = 50 * 1024 * 1024; // bytes
+
// Process a range of blocks from |range_start| to |range_end| in the extent at
// position |*idx_p| of |extents|. If |do_remove| is true, this range will be
// removed, which may cause the extent to be trimmed, split or removed entirely.
@@ -146,18 +153,26 @@
return removed_bytes;
}
+// Returns true if the given blob |data| contains gzip header magic.
+bool ContainsGZip(const brillo::Blob& data) {
+ const uint8_t kGZipMagic[] = {0x1f, 0x8b, 0x08, 0x00};
+ return std::search(data.begin(),
+ data.end(),
+ std::begin(kGZipMagic),
+ std::end(kGZipMagic)) != data.end();
+}
+
} // namespace
namespace diff_utils {
-bool DeltaReadPartition(
- vector<AnnotatedOperation>* aops,
- const PartitionConfig& old_part,
- const PartitionConfig& new_part,
- ssize_t hard_chunk_blocks,
- size_t soft_chunk_blocks,
- BlobFileWriter* blob_file,
- bool src_ops_allowed) {
+bool DeltaReadPartition(vector<AnnotatedOperation>* aops,
+ const PartitionConfig& old_part,
+ const PartitionConfig& new_part,
+ ssize_t hard_chunk_blocks,
+ size_t soft_chunk_blocks,
+ const PayloadVersion& version,
+ BlobFileWriter* blob_file) {
ExtentRanges old_visited_blocks;
ExtentRanges new_visited_blocks;
@@ -168,7 +183,7 @@
old_part.fs_interface ? old_part.fs_interface->GetBlockCount() : 0,
new_part.fs_interface->GetBlockCount(),
soft_chunk_blocks,
- src_ops_allowed,
+ version,
blob_file,
&old_visited_blocks,
&new_visited_blocks));
@@ -219,16 +234,15 @@
old_files_map[new_file.name], old_visited_blocks);
old_visited_blocks.AddExtents(old_file_extents);
- TEST_AND_RETURN_FALSE(DeltaReadFile(
- aops,
- old_part.path,
- new_part.path,
- old_file_extents,
- new_file_extents,
- new_file.name, // operation name
- hard_chunk_blocks,
- blob_file,
- src_ops_allowed));
+ TEST_AND_RETURN_FALSE(DeltaReadFile(aops,
+ old_part.path,
+ new_part.path,
+ old_file_extents,
+ new_file_extents,
+ new_file.name, // operation name
+ hard_chunk_blocks,
+ version,
+ blob_file));
}
// Process all the blocks not included in any file. We provided all the unused
// blocks in the old partition as available data.
@@ -250,31 +264,29 @@
// We use the soft_chunk_blocks limit for the <non-file-data> as we don't
// really know the structure of this data and we should not expect it to have
// redundancy between partitions.
- TEST_AND_RETURN_FALSE(DeltaReadFile(
- aops,
- old_part.path,
- new_part.path,
- old_unvisited,
- new_unvisited,
- "<non-file-data>", // operation name
- soft_chunk_blocks,
- blob_file,
- src_ops_allowed));
+ TEST_AND_RETURN_FALSE(DeltaReadFile(aops,
+ old_part.path,
+ new_part.path,
+ old_unvisited,
+ new_unvisited,
+ "<non-file-data>", // operation name
+ soft_chunk_blocks,
+ version,
+ blob_file));
return true;
}
-bool DeltaMovedAndZeroBlocks(
- vector<AnnotatedOperation>* aops,
- const string& old_part,
- const string& new_part,
- size_t old_num_blocks,
- size_t new_num_blocks,
- ssize_t chunk_blocks,
- bool src_ops_allowed,
- BlobFileWriter* blob_file,
- ExtentRanges* old_visited_blocks,
- ExtentRanges* new_visited_blocks) {
+bool DeltaMovedAndZeroBlocks(vector<AnnotatedOperation>* aops,
+ const string& old_part,
+ const string& new_part,
+ size_t old_num_blocks,
+ size_t new_num_blocks,
+ ssize_t chunk_blocks,
+ const PayloadVersion& version,
+ BlobFileWriter* blob_file,
+ ExtentRanges* old_visited_blocks,
+ ExtentRanges* new_visited_blocks) {
vector<BlockMapping::BlockId> old_block_ids;
vector<BlockMapping::BlockId> new_block_ids;
TEST_AND_RETURN_FALSE(MapPartitionBlocks(old_part,
@@ -285,9 +297,10 @@
&old_block_ids,
&new_block_ids));
- // For minor-version=1, we map all the blocks that didn't move, regardless of
- // the contents since they are already copied and no operation is required.
- if (!src_ops_allowed) {
+ // If the update is inplace, we map all the blocks that didn't move,
+ // regardless of the contents since they are already copied and no operation
+ // is required.
+ if (version.InplaceUpdate()) {
uint64_t num_blocks = std::min(old_num_blocks, new_num_blocks);
for (uint64_t block = 0; block < num_blocks; block++) {
if (old_block_ids[block] == new_block_ids[block] &&
@@ -338,25 +351,25 @@
old_blocks_map_it->second.back());
AppendBlockToExtents(&new_identical_blocks, block);
// We can't reuse source blocks in minor version 1 because the cycle
- // breaking algorithm doesn't support that.
- if (!src_ops_allowed)
+ // breaking algorithm used in the in-place update doesn't support that.
+ if (version.InplaceUpdate())
old_blocks_map_it->second.pop_back();
}
// Produce operations for the zero blocks split per output extent.
+ // TODO(deymo): Produce ZERO operations instead of calling DeltaReadFile().
size_t num_ops = aops->size();
new_visited_blocks->AddExtents(new_zeros);
for (Extent extent : new_zeros) {
- TEST_AND_RETURN_FALSE(DeltaReadFile(
- aops,
- "",
- new_part,
- vector<Extent>(), // old_extents
- vector<Extent>{extent}, // new_extents
- "<zeros>",
- chunk_blocks,
- blob_file,
- src_ops_allowed));
+ TEST_AND_RETURN_FALSE(DeltaReadFile(aops,
+ "",
+ new_part,
+ vector<Extent>(), // old_extents
+ vector<Extent>{extent}, // new_extents
+ "<zeros>",
+ chunk_blocks,
+ version,
+ blob_file));
}
LOG(INFO) << "Produced " << (aops->size() - num_ops) << " operations for "
<< BlocksInExtents(new_zeros) << " zeroed blocks";
@@ -376,8 +389,9 @@
aops->emplace_back();
AnnotatedOperation* aop = &aops->back();
aop->name = "<identical-blocks>";
- aop->op.set_type(src_ops_allowed ? InstallOperation::SOURCE_COPY
- : InstallOperation::MOVE);
+ aop->op.set_type(version.OperationAllowed(InstallOperation::SOURCE_COPY)
+ ? InstallOperation::SOURCE_COPY
+ : InstallOperation::MOVE);
uint64_t chunk_num_blocks =
std::min(extent.num_blocks() - op_block_offset,
@@ -407,16 +421,15 @@
return true;
}
-bool DeltaReadFile(
- vector<AnnotatedOperation>* aops,
- const string& old_part,
- const string& new_part,
- const vector<Extent>& old_extents,
- const vector<Extent>& new_extents,
- const string& name,
- ssize_t chunk_blocks,
- BlobFileWriter* blob_file,
- bool src_ops_allowed) {
+bool DeltaReadFile(vector<AnnotatedOperation>* aops,
+ const string& old_part,
+ const string& new_part,
+ const vector<Extent>& old_extents,
+ const vector<Extent>& new_extents,
+ const string& name,
+ ssize_t chunk_blocks,
+ const PayloadVersion& version,
+ BlobFileWriter* blob_file) {
brillo::Blob data;
InstallOperation operation;
@@ -424,20 +437,6 @@
if (chunk_blocks == -1)
chunk_blocks = total_blocks;
- // If bsdiff breaks again, blacklist the problem file by using:
- // bsdiff_allowed = (name != "/foo/bar")
- //
- // TODO(dgarrett): chromium-os:15274 connect this test to the command line.
- bool bsdiff_allowed = true;
- if (static_cast<uint64_t>(chunk_blocks) * kBlockSize >
- kMaxBsdiffDestinationSize) {
- bsdiff_allowed = false;
- }
-
- if (!bsdiff_allowed) {
- LOG(INFO) << "bsdiff blacklisting: " << name;
- }
-
for (uint64_t block_offset = 0; block_offset < total_blocks;
block_offset += chunk_blocks) {
// Split the old/new file in the same chunks. Note that this could drop
@@ -455,10 +454,9 @@
new_part,
old_extents_chunk,
new_extents_chunk,
- bsdiff_allowed,
+ version,
&data,
- &operation,
- src_ops_allowed));
+ &operation));
// Check if the operation writes nothing.
if (operation.dst_extents_size() == 0) {
@@ -484,7 +482,7 @@
// Write the data
if (operation.type() != InstallOperation::MOVE &&
operation.type() != InstallOperation::SOURCE_COPY) {
- TEST_AND_RETURN_FALSE(aop.SetOperationBlob(&data, blob_file));
+ TEST_AND_RETURN_FALSE(aop.SetOperationBlob(data, blob_file));
} else {
TEST_AND_RETURN_FALSE(blob_file->StoreBlob(data) != -1);
}
@@ -493,22 +491,92 @@
return true;
}
+bool GenerateBestFullOperation(const brillo::Blob& new_data,
+ const PayloadVersion& version,
+ brillo::Blob* out_blob,
+ InstallOperation_Type* out_type) {
+ if (new_data.empty())
+ return false;
+
+ if (version.OperationAllowed(InstallOperation::ZERO) &&
+ std::all_of(
+ new_data.begin(), new_data.end(), [](uint8_t x) { return x == 0; })) {
+ // The read buffer is all zeros, so produce a ZERO operation. No need to
+ // check other types of operations in this case.
+ *out_blob = brillo::Blob();
+ *out_type = InstallOperation::ZERO;
+ return true;
+ }
+
+ bool out_blob_set = false;
+
+ // Try compressing |new_data| with xz first.
+ if (version.OperationAllowed(InstallOperation::REPLACE_XZ)) {
+ brillo::Blob new_data_xz;
+ if (XzCompress(new_data, &new_data_xz) && !new_data_xz.empty()) {
+ *out_type = InstallOperation::REPLACE_XZ;
+ *out_blob = std::move(new_data_xz);
+ out_blob_set = true;
+ }
+ }
+
+ // Try compressing it with bzip2.
+ if (version.OperationAllowed(InstallOperation::REPLACE_BZ)) {
+ brillo::Blob new_data_bz;
+ // TODO(deymo): Implement some heuristic to determine if it is worth trying
+ // to compress the blob with bzip2 if we already have a good REPLACE_XZ.
+ if (BzipCompress(new_data, &new_data_bz) && !new_data_bz.empty() &&
+ (!out_blob_set || out_blob->size() > new_data_bz.size())) {
+ // A REPLACE_BZ is better or nothing else was set.
+ *out_type = InstallOperation::REPLACE_BZ;
+ *out_blob = std::move(new_data_bz);
+ out_blob_set = true;
+ }
+ }
+
+ // If nothing else worked or it was badly compressed we try a REPLACE.
+ if (!out_blob_set || out_blob->size() >= new_data.size()) {
+ *out_type = InstallOperation::REPLACE;
+ // This needs to make a copy of the data in the case bzip or xz didn't
+ // compress well, which is not the common case so the performance hit is
+ // low.
+ *out_blob = new_data;
+ }
+ return true;
+}
+
bool ReadExtentsToDiff(const string& old_part,
const string& new_part,
const vector<Extent>& old_extents,
const vector<Extent>& new_extents,
- bool bsdiff_allowed,
+ const PayloadVersion& version,
brillo::Blob* out_data,
- InstallOperation* out_op,
- bool src_ops_allowed) {
+ InstallOperation* out_op) {
InstallOperation operation;
- // Data blob that will be written to delta file.
- const brillo::Blob* data_blob = nullptr;
// We read blocks from old_extents and write blocks to new_extents.
uint64_t blocks_to_read = BlocksInExtents(old_extents);
uint64_t blocks_to_write = BlocksInExtents(new_extents);
+ // Disable bsdiff and imgdiff when the data is too big.
+ bool bsdiff_allowed =
+ version.OperationAllowed(InstallOperation::SOURCE_BSDIFF) ||
+ version.OperationAllowed(InstallOperation::BSDIFF);
+ if (bsdiff_allowed &&
+ blocks_to_read * kBlockSize > kMaxBsdiffDestinationSize) {
+ LOG(INFO) << "bsdiff blacklisted, data too big: "
+ << blocks_to_read * kBlockSize << " bytes";
+ bsdiff_allowed = false;
+ }
+
+ bool imgdiff_allowed = version.OperationAllowed(InstallOperation::IMGDIFF);
+ if (imgdiff_allowed &&
+ blocks_to_read * kBlockSize > kMaxImgdiffDestinationSize) {
+ LOG(INFO) << "imgdiff blacklisted, data too big: "
+ << blocks_to_read * kBlockSize << " bytes";
+ imgdiff_allowed = false;
+ }
+
// Make copies of the extents so we can modify them.
vector<Extent> src_extents = old_extents;
vector<Extent> dst_extents = new_extents;
@@ -522,24 +590,17 @@
kBlockSize));
TEST_AND_RETURN_FALSE(!new_data.empty());
+ // Data blob that will be written to delta file.
+ brillo::Blob data_blob;
- // Using a REPLACE is always an option.
- operation.set_type(InstallOperation::REPLACE);
- data_blob = &new_data;
-
- // Try compressing it with bzip2.
- brillo::Blob new_data_bz;
- TEST_AND_RETURN_FALSE(BzipCompress(new_data, &new_data_bz));
- CHECK(!new_data_bz.empty());
- if (new_data_bz.size() < data_blob->size()) {
- // A REPLACE_BZ is better.
- operation.set_type(InstallOperation::REPLACE_BZ);
- data_blob = &new_data_bz;
- }
+ // Try generating a full operation for the given new data, regardless of the
+ // old_data.
+ InstallOperation_Type op_type;
+ TEST_AND_RETURN_FALSE(
+ GenerateBestFullOperation(new_data, version, &data_blob, &op_type));
+ operation.set_type(op_type);
brillo::Blob old_data;
- brillo::Blob empty_blob;
- brillo::Blob bsdiff_delta;
if (blocks_to_read > 0) {
// Read old data.
TEST_AND_RETURN_FALSE(
@@ -547,38 +608,56 @@
kBlockSize * blocks_to_read, kBlockSize));
if (old_data == new_data) {
// No change in data.
- if (src_ops_allowed) {
- operation.set_type(InstallOperation::SOURCE_COPY);
- } else {
- operation.set_type(InstallOperation::MOVE);
- }
- data_blob = &empty_blob;
- } else if (bsdiff_allowed) {
+ operation.set_type(version.OperationAllowed(InstallOperation::SOURCE_COPY)
+ ? InstallOperation::SOURCE_COPY
+ : InstallOperation::MOVE);
+ data_blob = brillo::Blob();
+ } else if (bsdiff_allowed || imgdiff_allowed) {
// If the source file is considered bsdiff safe (no bsdiff bugs
// triggered), see if BSDIFF encoding is smaller.
base::FilePath old_chunk;
TEST_AND_RETURN_FALSE(base::CreateTemporaryFile(&old_chunk));
ScopedPathUnlinker old_unlinker(old_chunk.value());
- TEST_AND_RETURN_FALSE(
- utils::WriteFile(old_chunk.value().c_str(),
- old_data.data(), old_data.size()));
+ TEST_AND_RETURN_FALSE(utils::WriteFile(
+ old_chunk.value().c_str(), old_data.data(), old_data.size()));
base::FilePath new_chunk;
TEST_AND_RETURN_FALSE(base::CreateTemporaryFile(&new_chunk));
ScopedPathUnlinker new_unlinker(new_chunk.value());
- TEST_AND_RETURN_FALSE(
- utils::WriteFile(new_chunk.value().c_str(),
- new_data.data(), new_data.size()));
+ TEST_AND_RETURN_FALSE(utils::WriteFile(
+ new_chunk.value().c_str(), new_data.data(), new_data.size()));
- TEST_AND_RETURN_FALSE(
- BsdiffFiles(old_chunk.value(), new_chunk.value(), &bsdiff_delta));
- CHECK_GT(bsdiff_delta.size(), static_cast<brillo::Blob::size_type>(0));
- if (bsdiff_delta.size() < data_blob->size()) {
- if (src_ops_allowed) {
- operation.set_type(InstallOperation::SOURCE_BSDIFF);
- } else {
- operation.set_type(InstallOperation::BSDIFF);
+ if (bsdiff_allowed) {
+ brillo::Blob bsdiff_delta;
+ TEST_AND_RETURN_FALSE(DiffFiles(
+ kBsdiffPath, old_chunk.value(), new_chunk.value(), &bsdiff_delta));
+ CHECK_GT(bsdiff_delta.size(), static_cast<brillo::Blob::size_type>(0));
+ if (bsdiff_delta.size() < data_blob.size()) {
+ operation.set_type(
+ version.OperationAllowed(InstallOperation::SOURCE_BSDIFF)
+ ? InstallOperation::SOURCE_BSDIFF
+ : InstallOperation::BSDIFF);
+ data_blob = std::move(bsdiff_delta);
}
- data_blob = &bsdiff_delta;
+ }
+ if (imgdiff_allowed && ContainsGZip(old_data) && ContainsGZip(new_data)) {
+ brillo::Blob imgdiff_delta;
+ // Imgdiff might fail in some cases, only use the result if it succeed,
+ // otherwise print the extents to analyze.
+ if (DiffFiles(kImgdiffPath,
+ old_chunk.value(),
+ new_chunk.value(),
+ &imgdiff_delta) &&
+ imgdiff_delta.size() > 0) {
+ if (imgdiff_delta.size() < data_blob.size()) {
+ operation.set_type(InstallOperation::IMGDIFF);
+ data_blob = std::move(imgdiff_delta);
+ }
+ } else {
+ LOG(ERROR) << "Imgdiff failed with source extents: "
+ << ExtentsToString(src_extents)
+ << ", destination extents: "
+ << ExtentsToString(dst_extents);
+ }
}
}
}
@@ -598,23 +677,23 @@
StoreExtents(dst_extents, operation.mutable_dst_extents());
// Replace operations should not have source extents.
- if (operation.type() == InstallOperation::REPLACE ||
- operation.type() == InstallOperation::REPLACE_BZ) {
+ if (IsAReplaceOperation(operation.type())) {
operation.clear_src_extents();
operation.clear_src_length();
}
- *out_data = std::move(*data_blob);
+ *out_data = std::move(data_blob);
*out_op = operation;
return true;
}
-// Runs the bsdiff tool on two files and returns the resulting delta in
-// 'out'. Returns true on success.
-bool BsdiffFiles(const string& old_file,
- const string& new_file,
- brillo::Blob* out) {
+// Runs the bsdiff or imgdiff tool in |diff_path| on two files and returns the
+// resulting delta in |out|. Returns true on success.
+bool DiffFiles(const string& diff_path,
+ const string& old_file,
+ const string& new_file,
+ brillo::Blob* out) {
const string kPatchFile = "delta.patchXXXXXX";
string patch_file_path;
@@ -622,20 +701,30 @@
utils::MakeTempFile(kPatchFile, &patch_file_path, nullptr));
vector<string> cmd;
- cmd.push_back(kBsdiffPath);
+ cmd.push_back(diff_path);
cmd.push_back(old_file);
cmd.push_back(new_file);
cmd.push_back(patch_file_path);
int rc = 1;
brillo::Blob patch_file;
- TEST_AND_RETURN_FALSE(Subprocess::SynchronousExec(cmd, &rc, nullptr));
- TEST_AND_RETURN_FALSE(rc == 0);
+ string stdout;
+ TEST_AND_RETURN_FALSE(Subprocess::SynchronousExec(cmd, &rc, &stdout));
+ if (rc != 0) {
+ LOG(ERROR) << diff_path << " returned " << rc << std::endl << stdout;
+ return false;
+ }
TEST_AND_RETURN_FALSE(utils::ReadFile(patch_file_path, out));
unlink(patch_file_path.c_str());
return true;
}
+bool IsAReplaceOperation(InstallOperation_Type op_type) {
+ return (op_type == InstallOperation::REPLACE ||
+ op_type == InstallOperation::REPLACE_BZ ||
+ op_type == InstallOperation::REPLACE_XZ);
+}
+
// Returns true if |op| is a no-op operation that doesn't do any useful work
// (e.g., a move operation that copies blocks onto themselves).
bool IsNoopOperation(const InstallOperation& op) {
diff --git a/payload_generator/delta_diff_utils.h b/payload_generator/delta_diff_utils.h
index 5ba9bc7..11b6f79 100644
--- a/payload_generator/delta_diff_utils.h
+++ b/payload_generator/delta_diff_utils.h
@@ -36,27 +36,25 @@
// It uses the files reported by the filesystem in |old_part| and the data
// blocks in that partition (if available) to determine the best way to compress
// the new files (REPLACE, REPLACE_BZ, COPY, BSDIFF) and writes any necessary
-// data to the end of |data_fd| updating |data_file_size| accordingly.
-// |hard_chunk_blocks| and |soft_chunk_blocks| are the hard and soft chunk
-// limits in number of blocks respectively. The soft chunk limit is used to
-// split MOVE and SOURCE_COPY operations and REPLACE_BZ of zeroed blocks, while
-// the hard limit is used to split a file when generating other operations. A
-// value of -1 in |hard_chunk_blocks| means whole files.
+// data to |blob_file|. |hard_chunk_blocks| and |soft_chunk_blocks| are the hard
+// and soft chunk limits in number of blocks respectively. The soft chunk limit
+// is used to split MOVE and SOURCE_COPY operations and REPLACE_BZ of zeroed
+// blocks, while the hard limit is used to split a file when generating other
+// operations. A value of -1 in |hard_chunk_blocks| means whole files.
bool DeltaReadPartition(std::vector<AnnotatedOperation>* aops,
const PartitionConfig& old_part,
const PartitionConfig& new_part,
ssize_t hard_chunk_blocks,
size_t soft_chunk_blocks,
- BlobFileWriter* blob_file,
- bool src_ops_allowed);
+ const PayloadVersion& version,
+ BlobFileWriter* blob_file);
// Create operations in |aops| for identical blocks that moved around in the old
// and new partition and also handle zeroed blocks. The old and new partition
// are stored in the |old_part| and |new_part| files and have |old_num_blocks|
// and |new_num_blocks| respectively. The maximum operation size is
// |chunk_blocks| blocks, or unlimited if |chunk_blocks| is -1. The blobs of the
-// produced operations are stored in the |data_fd| file whose size is updated
-// in the value pointed by |data_file_size|.
+// produced operations are stored in the |blob_file|.
// The collections |old_visited_blocks| and |new_visited_blocks| state what
// blocks already have operations reading or writing them and only operations
// for unvisited blocks are produced by this function updating both collections
@@ -67,7 +65,7 @@
size_t old_num_blocks,
size_t new_num_blocks,
ssize_t chunk_blocks,
- bool src_ops_allowed,
+ const PayloadVersion& version,
BlobFileWriter* blob_file,
ExtentRanges* old_visited_blocks,
ExtentRanges* new_visited_blocks);
@@ -78,8 +76,7 @@
// stored in |new_part| in the blocks described by |new_extents| and, if it
// exists, the old version exists in |old_part| in the blocks described by
// |old_extents|. The operations added to |aops| reference the data blob
-// in the file |data_fd|, which has length *data_file_size. *data_file_size is
-// updated appropriately. Returns true on success.
+// in the |blob_file|. Returns true on success.
bool DeltaReadFile(std::vector<AnnotatedOperation>* aops,
const std::string& old_part,
const std::string& new_part,
@@ -87,32 +84,43 @@
const std::vector<Extent>& new_extents,
const std::string& name,
ssize_t chunk_blocks,
- BlobFileWriter* blob_file,
- bool src_ops_allowed);
+ const PayloadVersion& version,
+ BlobFileWriter* blob_file);
// Reads the blocks |old_extents| from |old_part| (if it exists) and the
// |new_extents| from |new_part| and determines the smallest way to encode
// this |new_extents| for the diff. It stores necessary data in |out_data| and
// fills in |out_op|. If there's no change in old and new files, it creates a
-// MOVE operation. If there is a change, the smallest of REPLACE, REPLACE_BZ,
-// or BSDIFF wins. |new_extents| must not be empty.
-// If |src_ops_allowed| is true, it will emit SOURCE_COPY and SOURCE_BSDIFF
-// operations instead of MOVE and BSDIFF, respectively.
-// Returns true on success.
+// MOVE or SOURCE_COPY operation. If there is a change, the smallest of the
+// operations allowed in the given |version| (REPLACE, REPLACE_BZ, BSDIFF,
+// SOURCE_BSDIFF or IMGDIFF) wins.
+// |new_extents| must not be empty. Returns true on success.
bool ReadExtentsToDiff(const std::string& old_part,
const std::string& new_part,
const std::vector<Extent>& old_extents,
const std::vector<Extent>& new_extents,
- bool bsdiff_allowed,
+ const PayloadVersion& version,
brillo::Blob* out_data,
- InstallOperation* out_op,
- bool src_ops_allowed);
+ InstallOperation* out_op);
-// Runs the bsdiff tool on two files and returns the resulting delta in
-// |out|. Returns true on success.
-bool BsdiffFiles(const std::string& old_file,
- const std::string& new_file,
- brillo::Blob* out);
+// Runs the bsdiff or imgdiff tool in |diff_path| on two files and returns the
+// resulting delta in |out|. Returns true on success.
+bool DiffFiles(const std::string& diff_path,
+ const std::string& old_file,
+ const std::string& new_file,
+ brillo::Blob* out);
+
+// Generates the best allowed full operation to produce |new_data|. The allowed
+// operations are based on |payload_version|. The operation blob will be stored
+// in |out_blob| and the resulting operation type in |out_type|. Returns whether
+// a valid full operation was generated.
+bool GenerateBestFullOperation(const brillo::Blob& new_data,
+ const PayloadVersion& version,
+ brillo::Blob* out_blob,
+ InstallOperation_Type* out_type);
+
+// Returns whether op_type is one of the REPLACE full operations.
+bool IsAReplaceOperation(InstallOperation_Type op_type);
// Returns true if |op| is a no-op operation that doesn't do any useful work
// (e.g., a move operation that copies blocks onto themselves).
diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc
index 72349d8..3614d32 100644
--- a/payload_generator/delta_diff_utils_unittest.cc
+++ b/payload_generator/delta_diff_utils_unittest.cc
@@ -125,21 +125,22 @@
}
// Helper function to call DeltaMovedAndZeroBlocks() using this class' data
- // members. This simply avoid repeating all the arguments that never change.
+ // members. This simply avoids repeating all the arguments that never change.
bool RunDeltaMovedAndZeroBlocks(ssize_t chunk_blocks,
- bool src_ops_allowed) {
+ uint32_t minor_version) {
BlobFileWriter blob_file(blob_fd_, &blob_size_);
- return diff_utils::DeltaMovedAndZeroBlocks(
- &aops_,
- old_part_.path,
- new_part_.path,
- old_part_.size / block_size_,
- new_part_.size / block_size_,
- chunk_blocks,
- src_ops_allowed,
- &blob_file,
- &old_visited_blocks_,
- &new_visited_blocks_);
+ PayloadVersion version(kChromeOSMajorPayloadVersion, minor_version);
+ version.imgdiff_allowed = true; // Assume no fingerprint mismatch.
+ return diff_utils::DeltaMovedAndZeroBlocks(&aops_,
+ old_part_.path,
+ new_part_.path,
+ old_part_.size / block_size_,
+ new_part_.size / block_size_,
+ chunk_blocks,
+ version,
+ &blob_file,
+ &old_visited_blocks_,
+ &new_visited_blocks_);
}
// Old and new temporary partitions used in the tests. These are initialized
@@ -178,10 +179,9 @@
new_part_.path,
old_extents,
new_extents,
- true, // bsdiff_allowed
+ PayloadVersion(kChromeOSMajorPayloadVersion, kInPlaceMinorPayloadVersion),
&data,
- &op,
- false)); // src_ops_allowed
+ &op));
EXPECT_TRUE(data.empty());
EXPECT_TRUE(op.has_type());
@@ -238,10 +238,9 @@
new_part_.path,
old_extents,
new_extents,
- true, // bsdiff_allowed
+ PayloadVersion(kChromeOSMajorPayloadVersion, kInPlaceMinorPayloadVersion),
&data,
- &op,
- false)); // src_ops_allowed
+ &op));
EXPECT_TRUE(data.empty());
@@ -302,10 +301,9 @@
new_part_.path,
old_extents,
new_extents,
- true, // bsdiff_allowed
+ PayloadVersion(kChromeOSMajorPayloadVersion, kInPlaceMinorPayloadVersion),
&data,
- &op,
- false)); // src_ops_allowed
+ &op));
EXPECT_FALSE(data.empty());
@@ -322,72 +320,6 @@
EXPECT_EQ(1U, BlocksInExtents(op.dst_extents()));
}
-TEST_F(DeltaDiffUtilsTest, BsdiffNotAllowedTest) {
- // Same setup as the previous test, but this time BSDIFF operations are not
- // allowed.
- brillo::Blob data_blob(kBlockSize);
- test_utils::FillWithData(&data_blob);
-
- // The old file is on a different block than the new one.
- vector<Extent> old_extents = { ExtentForRange(1, 1) };
- vector<Extent> new_extents = { ExtentForRange(2, 1) };
-
- EXPECT_TRUE(WriteExtents(old_part_.path, old_extents, kBlockSize, data_blob));
- // Modify one byte in the new file.
- data_blob[0]++;
- EXPECT_TRUE(WriteExtents(new_part_.path, new_extents, kBlockSize, data_blob));
-
- brillo::Blob data;
- InstallOperation op;
- EXPECT_TRUE(diff_utils::ReadExtentsToDiff(
- old_part_.path,
- new_part_.path,
- old_extents,
- new_extents,
- false, // bsdiff_allowed
- &data,
- &op,
- false)); // src_ops_allowed
-
- EXPECT_FALSE(data.empty());
-
- // The point of this test is that we don't use BSDIFF the way the above
- // did. The rest of the details are to be caught in other tests.
- EXPECT_TRUE(op.has_type());
- EXPECT_NE(InstallOperation::BSDIFF, op.type());
-}
-
-TEST_F(DeltaDiffUtilsTest, BsdiffNotAllowedMoveTest) {
- brillo::Blob data_blob(kBlockSize);
- test_utils::FillWithData(&data_blob);
-
- // The old file is on a different block than the new one.
- vector<Extent> old_extents = { ExtentForRange(1, 1) };
- vector<Extent> new_extents = { ExtentForRange(2, 1) };
-
- EXPECT_TRUE(WriteExtents(old_part_.path, old_extents, kBlockSize, data_blob));
- EXPECT_TRUE(WriteExtents(new_part_.path, new_extents, kBlockSize, data_blob));
-
- brillo::Blob data;
- InstallOperation op;
- EXPECT_TRUE(diff_utils::ReadExtentsToDiff(
- old_part_.path,
- new_part_.path,
- old_extents,
- new_extents,
- false, // bsdiff_allowed
- &data,
- &op,
- false)); // src_ops_allowed
-
- EXPECT_TRUE(data.empty());
-
- // The point of this test is that we can still use a MOVE for a file
- // that is blacklisted.
- EXPECT_TRUE(op.has_type());
- EXPECT_EQ(InstallOperation::MOVE, op.type());
-}
-
TEST_F(DeltaDiffUtilsTest, ReplaceSmallTest) {
// The old file is on a different block than the new one.
vector<Extent> old_extents = { ExtentForRange(1, 1) };
@@ -417,10 +349,10 @@
new_part_.path,
old_extents,
new_extents,
- true, // bsdiff_allowed
+ PayloadVersion(kChromeOSMajorPayloadVersion,
+ kInPlaceMinorPayloadVersion),
&data,
- &op,
- false)); // src_ops_allowed
+ &op));
EXPECT_FALSE(data.empty());
EXPECT_TRUE(op.has_type());
@@ -458,10 +390,9 @@
new_part_.path,
old_extents,
new_extents,
- true, // bsdiff_allowed
+ PayloadVersion(kChromeOSMajorPayloadVersion, kSourceMinorPayloadVersion),
&data,
- &op,
- true)); // src_ops_allowed
+ &op));
EXPECT_TRUE(data.empty());
EXPECT_TRUE(op.has_type());
@@ -491,10 +422,9 @@
new_part_.path,
old_extents,
new_extents,
- true, // bsdiff_allowed
+ PayloadVersion(kChromeOSMajorPayloadVersion, kSourceMinorPayloadVersion),
&data,
- &op,
- true)); // src_ops_allowed
+ &op));
EXPECT_FALSE(data.empty());
EXPECT_TRUE(op.has_type());
@@ -551,7 +481,7 @@
InitializePartitionWithUniqueBlocks(new_part_, block_size_, 42);
EXPECT_TRUE(RunDeltaMovedAndZeroBlocks(-1, // chunk_blocks
- false)); // src_ops_allowed
+ kInPlaceMinorPayloadVersion));
EXPECT_EQ(0U, old_visited_blocks_.blocks());
EXPECT_EQ(0U, new_visited_blocks_.blocks());
@@ -574,7 +504,7 @@
// Most of the blocks rest in the same place, but there's no need for MOVE
// operations on those blocks.
EXPECT_TRUE(RunDeltaMovedAndZeroBlocks(-1, // chunk_blocks
- false)); // src_ops_allowed
+ kInPlaceMinorPayloadVersion));
EXPECT_EQ(kDefaultBlockCount, old_visited_blocks_.blocks());
EXPECT_EQ(kDefaultBlockCount, new_visited_blocks_.blocks());
@@ -604,7 +534,7 @@
brillo::Blob(5 * kBlockSize, 'a')));
EXPECT_TRUE(RunDeltaMovedAndZeroBlocks(10, // chunk_blocks
- true)); // src_ops_allowed
+ kSourceMinorPayloadVersion));
ExtentRanges expected_ranges;
expected_ranges.AddExtent(ExtentForRange(0, 50));
@@ -657,7 +587,7 @@
EXPECT_TRUE(test_utils::WriteFileVector(new_part_.path, partition_data));
EXPECT_TRUE(RunDeltaMovedAndZeroBlocks(-1, // chunk_blocks
- true)); // src_ops_allowed
+ kSourceMinorPayloadVersion));
// There should be only one SOURCE_COPY, for the whole partition and the
// source extents should cover only the first copy of the source file since
@@ -699,7 +629,7 @@
EXPECT_TRUE(WriteExtents(old_part_.path, old_zeros, block_size_, zeros_data));
EXPECT_TRUE(RunDeltaMovedAndZeroBlocks(5, // chunk_blocks
- false)); // src_ops_allowed
+ kInPlaceMinorPayloadVersion));
// Zeroed blocks from old_visited_blocks_ were copied over, so me actually
// use them regardless of the trivial MOVE operation not being emitted.
@@ -754,7 +684,7 @@
new_contents));
EXPECT_TRUE(RunDeltaMovedAndZeroBlocks(-1, // chunk_blocks
- true)); // src_ops_allowed
+ kSourceMinorPayloadVersion));
EXPECT_EQ(permutation.size(), old_visited_blocks_.blocks());
EXPECT_EQ(permutation.size(), new_visited_blocks_.blocks());
diff --git a/payload_generator/extent_utils.cc b/payload_generator/extent_utils.cc
index 1093445..72e4b7c 100644
--- a/payload_generator/extent_utils.cc
+++ b/payload_generator/extent_utils.cc
@@ -16,12 +16,15 @@
#include "update_engine/payload_generator/extent_utils.h"
+#include <inttypes.h>
+
#include <string>
#include <utility>
#include <vector>
#include <base/logging.h>
#include <base/macros.h>
+#include <base/strings/stringprintf.h>
#include "update_engine/payload_consumer/payload_constants.h"
#include "update_engine/payload_generator/annotated_operation.h"
@@ -93,6 +96,14 @@
}
}
+string ExtentsToString(const vector<Extent>& extents) {
+ string ext_str;
+ for (const Extent& e : extents)
+ ext_str += base::StringPrintf(
+ "[%" PRIu64 ", %" PRIu64 "] ", e.start_block(), e.num_blocks());
+ return ext_str;
+}
+
void NormalizeExtents(vector<Extent>* extents) {
vector<Extent> new_extents;
for (const Extent& curr_ext : *extents) {
diff --git a/payload_generator/extent_utils.h b/payload_generator/extent_utils.h
index 91729d5..3e45264 100644
--- a/payload_generator/extent_utils.h
+++ b/payload_generator/extent_utils.h
@@ -17,6 +17,7 @@
#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_EXTENT_UTILS_H_
#define UPDATE_ENGINE_PAYLOAD_GENERATOR_EXTENT_UTILS_H_
+#include <string>
#include <vector>
#include "update_engine/payload_consumer/payload_constants.h"
@@ -76,6 +77,9 @@
void ExtentsToVector(const google::protobuf::RepeatedPtrField<Extent>& extents,
std::vector<Extent>* out_vector);
+// Returns a string representing all extents in |extents|.
+std::string ExtentsToString(const std::vector<Extent>& extents);
+
// Takes a pointer to extents |extents| and extents |extents_to_add|, and
// merges them by adding |extents_to_add| to |extents| and normalizing.
void ExtendExtents(
diff --git a/payload_generator/full_update_generator.cc b/payload_generator/full_update_generator.cc
index 56a3ca8..3240606 100644
--- a/payload_generator/full_update_generator.cc
+++ b/payload_generator/full_update_generator.cc
@@ -31,7 +31,7 @@
#include <brillo/secure_blob.h>
#include "update_engine/common/utils.h"
-#include "update_engine/payload_generator/bzip.h"
+#include "update_engine/payload_generator/delta_diff_utils.h"
using std::vector;
@@ -47,13 +47,18 @@
class ChunkProcessor : public base::DelegateSimpleThread::Delegate {
public:
// Read a chunk of |size| bytes from |fd| starting at offset |offset|.
- ChunkProcessor(int fd, off_t offset, size_t size,
- BlobFileWriter* blob_file, AnnotatedOperation* aop)
- : fd_(fd),
- offset_(offset),
- size_(size),
- blob_file_(blob_file),
- aop_(aop) {}
+ ChunkProcessor(const PayloadVersion& version,
+ int fd,
+ off_t offset,
+ size_t size,
+ BlobFileWriter* blob_file,
+ AnnotatedOperation* aop)
+ : version_(version),
+ fd_(fd),
+ offset_(offset),
+ size_(size),
+ blob_file_(blob_file),
+ aop_(aop) {}
// We use a default move constructor since all the data members are POD types.
ChunkProcessor(ChunkProcessor&&) = default;
~ChunkProcessor() override = default;
@@ -69,6 +74,7 @@
bool ProcessChunk();
// Work parameters.
+ const PayloadVersion& version_;
int fd_;
off_t offset_;
size_t size_;
@@ -95,18 +101,15 @@
offset_,
&bytes_read));
TEST_AND_RETURN_FALSE(bytes_read == static_cast<ssize_t>(size_));
- TEST_AND_RETURN_FALSE(BzipCompress(buffer_in_, &op_blob));
- InstallOperation_Type op_type = InstallOperation::REPLACE_BZ;
+ InstallOperation_Type op_type;
+ TEST_AND_RETURN_FALSE(diff_utils::GenerateBestFullOperation(
+ buffer_in_, version_, &op_blob, &op_type));
- if (op_blob.size() >= buffer_in_.size()) {
- // A REPLACE is cheaper than a REPLACE_BZ in this case.
- op_blob = std::move(buffer_in_);
- op_type = InstallOperation::REPLACE;
- }
-
- TEST_AND_RETURN_FALSE(aop_->SetOperationBlob(&op_blob, blob_file_));
aop_->op.set_type(op_type);
+ if (!op_blob.empty()) {
+ TEST_AND_RETURN_FALSE(aop_->SetOperationBlob(op_blob, blob_file_));
+ }
return true;
}
@@ -173,6 +176,7 @@
dst_extent->set_num_blocks(num_blocks);
chunk_processors.emplace_back(
+ config.version,
in_fd,
static_cast<off_t>(start_block) * config.block_size,
num_blocks * config.block_size,
diff --git a/payload_generator/full_update_generator_unittest.cc b/payload_generator/full_update_generator_unittest.cc
index d5af9d0..9e62de2 100644
--- a/payload_generator/full_update_generator_unittest.cc
+++ b/payload_generator/full_update_generator_unittest.cc
@@ -35,7 +35,7 @@
protected:
void SetUp() override {
config_.is_delta = false;
- config_.minor_version = kFullPayloadMinorVersion;
+ config_.version.minor = kFullPayloadMinorVersion;
config_.hard_chunk_size = 128 * 1024;
config_.block_size = 4096;
@@ -86,8 +86,10 @@
EXPECT_EQ(new_part_chunks, static_cast<int64_t>(aops.size()));
for (off_t i = 0; i < new_part_chunks; ++i) {
EXPECT_EQ(1, aops[i].op.dst_extents_size());
- EXPECT_EQ(static_cast<uint64_t>(i * config_.hard_chunk_size / config_.block_size),
- aops[i].op.dst_extents(0).start_block()) << "i = " << i;
+ EXPECT_EQ(
+ static_cast<uint64_t>(i * config_.hard_chunk_size / config_.block_size),
+ aops[i].op.dst_extents(0).start_block())
+ << "i = " << i;
EXPECT_EQ(config_.hard_chunk_size / config_.block_size,
aops[i].op.dst_extents(0).num_blocks());
if (aops[i].op.type() != InstallOperation::REPLACE) {
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index d4b3641..99af679 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -39,6 +39,7 @@
#include "update_engine/payload_generator/delta_diff_utils.h"
#include "update_engine/payload_generator/payload_generation_config.h"
#include "update_engine/payload_generator/payload_signer.h"
+#include "update_engine/payload_generator/xz.h"
#include "update_engine/update_metadata.pb.h"
// This file contains a simple program that takes an old path, a new path,
@@ -309,6 +310,9 @@
DEFINE_string(properties_file, "",
"If passed, dumps the payload properties of the payload passed "
"in --in_file and exits.");
+ DEFINE_string(zlib_fingerprint, "",
+ "The fingerprint of zlib in the source image in hash string "
+ "format, used to check imgdiff compatibility.");
DEFINE_string(old_channel, "",
"The channel for the old image. 'dev-channel', 'npo-channel', "
@@ -364,6 +368,9 @@
logging::InitLogging(log_settings);
+ // Initialize the Xz compressor.
+ XzCompressInit();
+
vector<int> signature_sizes;
ParseSignatureSizes(FLAGS_signature_size, &signature_sizes);
@@ -509,32 +516,40 @@
CHECK(part.OpenFilesystem());
}
- payload_config.major_version = FLAGS_major_version;
+ payload_config.version.major = FLAGS_major_version;
LOG(INFO) << "Using provided major_version=" << FLAGS_major_version;
if (FLAGS_minor_version == -1) {
// Autodetect minor_version by looking at the update_engine.conf in the old
// image.
if (payload_config.is_delta) {
- payload_config.minor_version = kInPlaceMinorPayloadVersion;
+ payload_config.version.minor = kInPlaceMinorPayloadVersion;
brillo::KeyValueStore store;
uint32_t minor_version;
for (const PartitionConfig& part : payload_config.source.partitions) {
if (part.fs_interface && part.fs_interface->LoadSettings(&store) &&
utils::GetMinorVersion(store, &minor_version)) {
- payload_config.minor_version = minor_version;
+ payload_config.version.minor = minor_version;
break;
}
}
} else {
- payload_config.minor_version = kFullPayloadMinorVersion;
+ payload_config.version.minor = kFullPayloadMinorVersion;
}
- LOG(INFO) << "Auto-detected minor_version=" << payload_config.minor_version;
+ LOG(INFO) << "Auto-detected minor_version=" << payload_config.version.minor;
} else {
- payload_config.minor_version = FLAGS_minor_version;
+ payload_config.version.minor = FLAGS_minor_version;
LOG(INFO) << "Using provided minor_version=" << FLAGS_minor_version;
}
+ if (!FLAGS_zlib_fingerprint.empty()) {
+ if (utils::IsZlibCompatible(FLAGS_zlib_fingerprint)) {
+ payload_config.version.imgdiff_allowed = true;
+ } else {
+ LOG(INFO) << "IMGDIFF operation disabled due to fingerprint mismatch.";
+ }
+ }
+
if (payload_config.is_delta) {
LOG(INFO) << "Generating delta update";
} else {
diff --git a/payload_generator/inplace_generator.cc b/payload_generator/inplace_generator.cc
index 2111748..30dbafc 100644
--- a/payload_generator/inplace_generator.cc
+++ b/payload_generator/inplace_generator.cc
@@ -47,6 +47,10 @@
namespace {
+// The only PayloadVersion supported by this implementation.
+const PayloadVersion kInPlacePayloadVersion{kChromeOSMajorPayloadVersion,
+ kInPlaceMinorPayloadVersion};
+
// This class allocates non-existent temp blocks, starting from
// kTempBlockStart. Other code is responsible for converting these
// temp blocks into real blocks, as the client can't read or write to
@@ -571,8 +575,8 @@
new_extents,
(*graph)[cut.old_dst].aop.name,
-1, // chunk_blocks, forces to have a single operation.
- blob_file,
- false)); // src_ops_allowed
+ kInPlacePayloadVersion,
+ blob_file));
TEST_AND_RETURN_FALSE(new_aop.size() == 1);
TEST_AND_RETURN_FALSE(AddInstallOpToGraph(
graph, cut.old_dst, nullptr, new_aop.front().op, new_aop.front().name));
@@ -791,6 +795,8 @@
BlobFileWriter* blob_file,
vector<AnnotatedOperation>* aops) {
TEST_AND_RETURN_FALSE(old_part.name == new_part.name);
+ TEST_AND_RETURN_FALSE(config.version.major == kInPlacePayloadVersion.major);
+ TEST_AND_RETURN_FALSE(config.version.minor == kInPlacePayloadVersion.minor);
ssize_t hard_chunk_blocks = (config.hard_chunk_size == -1 ? -1 :
config.hard_chunk_size / config.block_size);
@@ -800,14 +806,13 @@
partition_size = config.rootfs_partition_size;
LOG(INFO) << "Delta compressing " << new_part.name << " partition...";
- TEST_AND_RETURN_FALSE(
- diff_utils::DeltaReadPartition(aops,
- old_part,
- new_part,
- hard_chunk_blocks,
- soft_chunk_blocks,
- blob_file,
- false)); // src_ops_allowed
+ TEST_AND_RETURN_FALSE(diff_utils::DeltaReadPartition(aops,
+ old_part,
+ new_part,
+ hard_chunk_blocks,
+ soft_chunk_blocks,
+ config.version,
+ blob_file));
LOG(INFO) << "Done reading " << new_part.name;
TEST_AND_RETURN_FALSE(
diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc
index d268ab8..de81a39 100644
--- a/payload_generator/payload_file.cc
+++ b/payload_generator/payload_file.cc
@@ -59,10 +59,9 @@
} // namespace
bool PayloadFile::Init(const PayloadGenerationConfig& config) {
- major_version_ = config.major_version;
- TEST_AND_RETURN_FALSE(major_version_ == kChromeOSMajorPayloadVersion ||
- major_version_ == kBrilloMajorPayloadVersion);
- manifest_.set_minor_version(config.minor_version);
+ TEST_AND_RETURN_FALSE(config.version.Validate());
+ major_version_ = config.version.major;
+ manifest_.set_minor_version(config.version.minor);
if (!config.source.ImageInfoIsEmpty())
*(manifest_.mutable_old_image_info()) = config.source.image_info;
@@ -288,7 +287,7 @@
ScopedFileWriterCloser writer_closer(&writer);
uint64_t out_file_size = 0;
- for (auto& part: part_vec_) {
+ for (auto& part : part_vec_) {
for (AnnotatedOperation& aop : part.aops) {
if (!aop.op.has_data_offset())
continue;
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index 813e33c..dc2ced6 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -112,7 +112,72 @@
&& image_info.build_version().empty();
}
+PayloadVersion::PayloadVersion(uint64_t major_version, uint32_t minor_version) {
+ major = major_version;
+ minor = minor_version;
+}
+
+bool PayloadVersion::Validate() const {
+ TEST_AND_RETURN_FALSE(major == kChromeOSMajorPayloadVersion ||
+ major == kBrilloMajorPayloadVersion);
+ TEST_AND_RETURN_FALSE(minor == kFullPayloadMinorVersion ||
+ minor == kInPlaceMinorPayloadVersion ||
+ minor == kSourceMinorPayloadVersion ||
+ minor == kOpSrcHashMinorPayloadVersion ||
+ minor == kImgdiffMinorPayloadVersion);
+ return true;
+}
+
+bool PayloadVersion::OperationAllowed(InstallOperation_Type operation) const {
+ switch (operation) {
+ // Full operations:
+ case InstallOperation::REPLACE:
+ case InstallOperation::REPLACE_BZ:
+ // These operations were included in the original payload format.
+ return true;
+
+ case InstallOperation::REPLACE_XZ:
+ // These operations are included in the major version used in Brillo, but
+ // can also be used with minor version 3 or newer.
+ return major == kBrilloMajorPayloadVersion ||
+ minor >= kOpSrcHashMinorPayloadVersion;
+
+ case InstallOperation::ZERO:
+ case InstallOperation::DISCARD:
+ // The implementation of these operations had a bug in earlier versions
+ // that prevents them from being used in any payload. We will enable
+ // them for delta payloads for now.
+ return minor >= kImgdiffMinorPayloadVersion;
+
+ // Delta operations:
+ case InstallOperation::MOVE:
+ case InstallOperation::BSDIFF:
+ // MOVE and BSDIFF were replaced by SOURCE_COPY and SOURCE_BSDIFF and
+ // should not be used in newer delta versions, since the idempotent checks
+ // were removed.
+ return minor == kInPlaceMinorPayloadVersion;
+
+ case InstallOperation::SOURCE_COPY:
+ case InstallOperation::SOURCE_BSDIFF:
+ return minor >= kSourceMinorPayloadVersion;
+
+ case InstallOperation::IMGDIFF:
+ return minor >= kImgdiffMinorPayloadVersion && imgdiff_allowed;
+ }
+ return false;
+}
+
+bool PayloadVersion::IsDelta() const {
+ return minor != kFullPayloadMinorVersion;
+}
+
+bool PayloadVersion::InplaceUpdate() const {
+ return minor == kInPlaceMinorPayloadVersion;
+}
+
bool PayloadGenerationConfig::Validate() const {
+ TEST_AND_RETURN_FALSE(version.Validate());
+ TEST_AND_RETURN_FALSE(version.IsDelta() == is_delta);
if (is_delta) {
for (const PartitionConfig& part : source.partitions) {
if (!part.path.empty()) {
@@ -123,28 +188,22 @@
TEST_AND_RETURN_FALSE(part.postinstall.IsEmpty());
}
- // Check for the supported minor_version values.
- TEST_AND_RETURN_FALSE(minor_version == kInPlaceMinorPayloadVersion ||
- minor_version == kSourceMinorPayloadVersion ||
- minor_version == kOpSrcHashMinorPayloadVersion);
-
// If new_image_info is present, old_image_info must be present.
TEST_AND_RETURN_FALSE(source.ImageInfoIsEmpty() ==
target.ImageInfoIsEmpty());
} else {
// All the "source" image fields must be empty for full payloads.
TEST_AND_RETURN_FALSE(source.ValidateIsEmpty());
- TEST_AND_RETURN_FALSE(minor_version == kFullPayloadMinorVersion);
}
// In all cases, the target image must exists.
for (const PartitionConfig& part : target.partitions) {
TEST_AND_RETURN_FALSE(part.ValidateExists());
TEST_AND_RETURN_FALSE(part.size % block_size == 0);
- if (minor_version == kInPlaceMinorPayloadVersion &&
+ if (version.minor == kInPlaceMinorPayloadVersion &&
part.name == kLegacyPartitionNameRoot)
TEST_AND_RETURN_FALSE(rootfs_partition_size >= part.size);
- if (major_version == kChromeOSMajorPayloadVersion)
+ if (version.major == kChromeOSMajorPayloadVersion)
TEST_AND_RETURN_FALSE(part.postinstall.IsEmpty());
}
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index 21bb89b..2601821 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -107,6 +107,34 @@
std::vector<PartitionConfig> partitions;
};
+struct PayloadVersion {
+ PayloadVersion() : PayloadVersion(0, 0) {}
+ PayloadVersion(uint64_t major_version, uint32_t minor_version);
+
+ // Returns whether the PayloadVersion is valid.
+ bool Validate() const;
+
+ // Return whether the passed |operation| is allowed by this payload.
+ bool OperationAllowed(InstallOperation_Type operation) const;
+
+ // Whether this payload version is a delta payload.
+ bool IsDelta() const;
+
+ // Tells whether the update is done in-place, that is, whether the operations
+ // read and write from the same partition.
+ bool InplaceUpdate() const;
+
+ // The major version of the payload.
+ uint64_t major;
+
+ // The minor version of the payload.
+ uint32_t minor;
+
+ // Wheter the IMGDIFF operation is allowed based on the available compressor
+ // in the delta_generator and the one supported by the target.
+ bool imgdiff_allowed = false;
+};
+
// The PayloadGenerationConfig struct encapsulates all the configuration to
// build the requested payload. This includes information about the old and new
// image as well as the restrictions applied to the payload (like minor-version
@@ -125,11 +153,8 @@
// Wheter the requested payload is a delta payload.
bool is_delta = false;
- // The major_version of the requested payload.
- uint64_t major_version;
-
- // The minor_version of the requested payload.
- uint32_t minor_version;
+ // The major/minor version of the payload.
+ PayloadVersion version;
// The size of the rootfs partition, that not necessarily is the same as the
// filesystem in either source or target version, since there is some space
diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc
index a73b891..824195d 100644
--- a/payload_generator/payload_signer.cc
+++ b/payload_generator/payload_signer.cc
@@ -25,6 +25,7 @@
#include <brillo/data_encoding.h>
#include <brillo/streams/file_stream.h>
#include <brillo/streams/stream.h>
+#include <openssl/err.h>
#include <openssl/pem.h>
#include "update_engine/common/hash_calculator.h"
@@ -346,34 +347,35 @@
const string& private_key_path,
brillo::Blob* out_signature) {
LOG(INFO) << "Signing hash with private key: " << private_key_path;
- string sig_path;
- TEST_AND_RETURN_FALSE(
- utils::MakeTempFile("signature.XXXXXX", &sig_path, nullptr));
- ScopedPathUnlinker sig_path_unlinker(sig_path);
-
- string hash_path;
- TEST_AND_RETURN_FALSE(
- utils::MakeTempFile("hash.XXXXXX", &hash_path, nullptr));
- ScopedPathUnlinker hash_path_unlinker(hash_path);
// We expect unpadded SHA256 hash coming in
TEST_AND_RETURN_FALSE(hash.size() == 32);
brillo::Blob padded_hash(hash);
PayloadVerifier::PadRSA2048SHA256Hash(&padded_hash);
- TEST_AND_RETURN_FALSE(utils::WriteFile(hash_path.c_str(),
- padded_hash.data(),
- padded_hash.size()));
- // This runs on the server, so it's okay to copy out and call openssl
- // executable rather than properly use the library.
- vector<string> cmd = {"openssl", "rsautl", "-raw", "-sign", "-inkey",
- private_key_path, "-in", hash_path, "-out", sig_path};
- int return_code = 0;
- TEST_AND_RETURN_FALSE(Subprocess::SynchronousExec(cmd, &return_code,
- nullptr));
- TEST_AND_RETURN_FALSE(return_code == 0);
+ // The code below executes the equivalent of:
+ //
+ // openssl rsautl -raw -sign -inkey |private_key_path|
+ // -in |padded_hash| -out |out_signature|
- brillo::Blob signature;
- TEST_AND_RETURN_FALSE(utils::ReadFile(sig_path, &signature));
+ FILE* fprikey = fopen(private_key_path.c_str(), "rb");
+ TEST_AND_RETURN_FALSE(fprikey != nullptr);
+ RSA* rsa = PEM_read_RSAPrivateKey(fprikey, nullptr, nullptr, nullptr);
+ fclose(fprikey);
+ TEST_AND_RETURN_FALSE(rsa != nullptr);
+ brillo::Blob signature(RSA_size(rsa));
+ ssize_t signature_size = RSA_private_encrypt(padded_hash.size(),
+ padded_hash.data(),
+ signature.data(),
+ rsa,
+ RSA_NO_PADDING);
+ RSA_free(rsa);
+ if (signature_size < 0) {
+ LOG(ERROR) << "Signing hash failed: "
+ << ERR_error_string(ERR_get_error(), nullptr);
+ return false;
+ }
+ TEST_AND_RETURN_FALSE(static_cast<size_t>(signature_size) ==
+ signature.size());
out_signature->swap(signature);
return true;
}
diff --git a/payload_generator/payload_signer_unittest.cc b/payload_generator/payload_signer_unittest.cc
index ebdd280..096e0e8 100644
--- a/payload_generator/payload_signer_unittest.cc
+++ b/payload_generator/payload_signer_unittest.cc
@@ -141,7 +141,7 @@
&load_metadata_size,
nullptr));
EXPECT_EQ(metadata_size, payload_metadata_blob.size());
- EXPECT_EQ(config.major_version, load_major_version);
+ EXPECT_EQ(config.version.major, load_major_version);
EXPECT_EQ(metadata_size, load_metadata_size);
}
@@ -150,13 +150,13 @@
TEST_F(PayloadSignerTest, LoadPayloadV1Test) {
PayloadGenerationConfig config;
- config.major_version = kChromeOSMajorPayloadVersion;
+ config.version.major = kChromeOSMajorPayloadVersion;
DoWriteAndLoadPayloadTest(config);
}
TEST_F(PayloadSignerTest, LoadPayloadV2Test) {
PayloadGenerationConfig config;
- config.major_version = kBrilloMajorPayloadVersion;
+ config.version.major = kBrilloMajorPayloadVersion;
DoWriteAndLoadPayloadTest(config);
}
@@ -211,7 +211,7 @@
ScopedPathUnlinker payload_path_unlinker(payload_path);
PayloadGenerationConfig config;
- config.major_version = kBrilloMajorPayloadVersion;
+ config.version.major = kBrilloMajorPayloadVersion;
PayloadFile payload;
EXPECT_TRUE(payload.Init(config));
uint64_t metadata_size;
@@ -236,7 +236,7 @@
ScopedPathUnlinker payload_path_unlinker(payload_path);
PayloadGenerationConfig config;
- config.major_version = kBrilloMajorPayloadVersion;
+ config.version.major = kBrilloMajorPayloadVersion;
PayloadFile payload;
EXPECT_TRUE(payload.Init(config));
uint64_t metadata_size;
diff --git a/payload_generator/xz.h b/payload_generator/xz.h
new file mode 100644
index 0000000..6e0a26c
--- /dev/null
+++ b/payload_generator/xz.h
@@ -0,0 +1,34 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_XZ_H_
+#define UPDATE_ENGINE_PAYLOAD_GENERATOR_XZ_H_
+
+#include <brillo/secure_blob.h>
+
+namespace chromeos_update_engine {
+
+// Initialize the xz compression unit. Call once before any call to
+// XzCompress().
+void XzCompressInit();
+
+// Compresses the input buffer |in| into |out| with xz. The compressed stream
+// will be the equivalent of running xz -9 --check=none
+bool XzCompress(const brillo::Blob& in, brillo::Blob* out);
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_XZ_H_
diff --git a/payload_generator/xz_android.cc b/payload_generator/xz_android.cc
new file mode 100644
index 0000000..f3b836d
--- /dev/null
+++ b/payload_generator/xz_android.cc
@@ -0,0 +1,116 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/xz.h"
+
+#include <7zCrc.h>
+#include <Xz.h>
+#include <XzEnc.h>
+
+#include <algorithm>
+
+#include <base/logging.h>
+
+namespace {
+
+bool xz_initialized = false;
+
+// An ISeqInStream implementation that reads all the data from the passed Blob.
+struct BlobReaderStream : public ISeqInStream {
+ explicit BlobReaderStream(const brillo::Blob& data) : data_(data) {
+ Read = &BlobReaderStream::ReadStatic;
+ }
+
+ static SRes ReadStatic(void* p, void* buf, size_t* size) {
+ auto* self =
+ static_cast<BlobReaderStream*>(reinterpret_cast<ISeqInStream*>(p));
+ *size = std::min(*size, self->data_.size() - self->pos_);
+ memcpy(buf, self->data_.data() + self->pos_, *size);
+ self->pos_ += *size;
+ return SZ_OK;
+ }
+
+ const brillo::Blob& data_;
+
+ // The current reader position.
+ size_t pos_ = 0;
+};
+
+// An ISeqOutStream implementation that writes all the data to the passed Blob.
+struct BlobWriterStream : public ISeqOutStream {
+ explicit BlobWriterStream(brillo::Blob* data) : data_(data) {
+ Write = &BlobWriterStream::WriteStatic;
+ }
+
+ static size_t WriteStatic(void* p, const void* buf, size_t size) {
+ auto* self =
+ static_cast<BlobWriterStream*>(reinterpret_cast<ISeqOutStream*>(p));
+ const uint8_t* buffer = reinterpret_cast<const uint8_t*>(buf);
+ self->data_->reserve(self->data_->size() + size);
+ self->data_->insert(self->data_->end(), buffer, buffer + size);
+ return size;
+ }
+
+ brillo::Blob* data_;
+};
+
+} // namespace
+
+namespace chromeos_update_engine {
+
+void XzCompressInit() {
+ if (xz_initialized)
+ return;
+ xz_initialized = true;
+ // Although we don't include a CRC32 for the stream, the xz file header has
+ // a CRC32 of the header itself, which required the CRC table to be
+ // initialized.
+ CrcGenerateTable();
+}
+
+bool XzCompress(const brillo::Blob& in, brillo::Blob* out) {
+ CHECK(xz_initialized) << "Initialize XzCompress first";
+ out->clear();
+ if (in.empty())
+ return true;
+
+ // Xz compression properties.
+ CXzProps props;
+ XzProps_Init(&props);
+ // No checksum in the xz stream. xz-embedded (used by the decompressor) only
+ // supports CRC32, but we already check the sha-1 of the whole blob during
+ // payload application.
+ props.checkId = XZ_CHECK_NO;
+
+ // LZMA2 compression properties.
+ CLzma2EncProps lzma2Props;
+ props.lzma2Props = &lzma2Props;
+ Lzma2EncProps_Init(&lzma2Props);
+ // LZMA compression "level 6" requires 9 MB of RAM to decompress in the worst
+ // case.
+ lzma2Props.lzmaProps.level = 6;
+ lzma2Props.lzmaProps.numThreads = 1;
+ // The input size data is used to reduce the dictionary size if possible.
+ lzma2Props.lzmaProps.reduceSize = in.size();
+ Lzma2EncProps_Normalize(&lzma2Props);
+
+ BlobWriterStream out_writer(out);
+ BlobReaderStream in_reader(in);
+ SRes res = Xz_Encode(&out_writer, &in_reader, &props, nullptr /* progress */);
+ return res == SZ_OK;
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_generator/xz_chromeos.cc b/payload_generator/xz_chromeos.cc
new file mode 100644
index 0000000..a8cda4e
--- /dev/null
+++ b/payload_generator/xz_chromeos.cc
@@ -0,0 +1,28 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/xz.h"
+
+namespace chromeos_update_engine {
+
+void XzCompressInit() {}
+
+bool XzCompress(const brillo::Blob& in, brillo::Blob* out) {
+ // No Xz compressor implementation in Chrome OS delta_generator builds.
+ return false;
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_generator/zip_unittest.cc b/payload_generator/zip_unittest.cc
index 0c95a8f..c38f8b8 100644
--- a/payload_generator/zip_unittest.cc
+++ b/payload_generator/zip_unittest.cc
@@ -20,10 +20,15 @@
#include <string>
#include <vector>
+#include <brillo/make_unique_ptr.h>
#include <gtest/gtest.h>
#include "update_engine/common/test_utils.h"
+#include "update_engine/payload_consumer/bzip_extent_writer.h"
+#include "update_engine/payload_consumer/extent_writer.h"
+#include "update_engine/payload_consumer/xz_extent_writer.h"
#include "update_engine/payload_generator/bzip.h"
+#include "update_engine/payload_generator/xz.h"
using chromeos_update_engine::test_utils::kRandomString;
using std::string;
@@ -31,13 +36,55 @@
namespace chromeos_update_engine {
+namespace {
+
+// ExtentWriter class that writes to memory, used to test the decompression
+// step with the corresponding extent writer.
+class MemoryExtentWriter : public ExtentWriter {
+ public:
+ // Creates the ExtentWriter that will write all the bytes to the passed |data|
+ // blob.
+ explicit MemoryExtentWriter(brillo::Blob* data) : data_(data) {
+ data_->clear();
+ }
+ ~MemoryExtentWriter() override = default;
+
+ bool Init(FileDescriptorPtr fd,
+ const vector<Extent>& extents,
+ uint32_t block_size) override {
+ return true;
+ }
+ bool Write(const void* bytes, size_t count) override {
+ data_->reserve(data_->size() + count);
+ data_->insert(data_->end(),
+ static_cast<const uint8_t*>(bytes),
+ static_cast<const uint8_t*>(bytes) + count);
+ return true;
+ }
+ bool EndImpl() override { return true; }
+
+ private:
+ brillo::Blob* data_;
+};
+
+template <typename W>
+bool DecompressWithWriter(const brillo::Blob& in, brillo::Blob* out) {
+ std::unique_ptr<ExtentWriter> writer(
+ new W(brillo::make_unique_ptr(new MemoryExtentWriter(out))));
+ // Init() parameters are ignored by the testing MemoryExtentWriter.
+ TEST_AND_RETURN_FALSE(writer->Init(nullptr, {}, 1));
+ TEST_AND_RETURN_FALSE(writer->Write(in.data(), in.size()));
+ TEST_AND_RETURN_FALSE(writer->End());
+ return true;
+}
+
+} // namespace
+
template <typename T>
class ZipTest : public ::testing::Test {
public:
- bool ZipDecompress(const brillo::Blob& in, brillo::Blob* out) const = 0;
bool ZipCompress(const brillo::Blob& in, brillo::Blob* out) const = 0;
- bool ZipCompressString(const string& str, brillo::Blob* out) const = 0;
- bool ZipDecompressString(const string& str, brillo::Blob* out) const = 0;
+ bool ZipDecompress(const brillo::Blob& in, brillo::Blob* out) const = 0;
};
class BzipTest {};
@@ -45,34 +92,47 @@
template <>
class ZipTest<BzipTest> : public ::testing::Test {
public:
- bool ZipDecompress(const brillo::Blob& in, brillo::Blob* out) const {
- return BzipDecompress(in, out);
- }
bool ZipCompress(const brillo::Blob& in, brillo::Blob* out) const {
return BzipCompress(in, out);
}
- bool ZipCompressString(const string& str, brillo::Blob* out) const {
- return BzipCompressString(str, out);
- }
- bool ZipDecompressString(const string& str, brillo::Blob* out) const {
- return BzipDecompressString(str, out);
+ bool ZipDecompress(const brillo::Blob& in, brillo::Blob* out) const {
+ return DecompressWithWriter<BzipExtentWriter>(in, out);
}
};
+class XzTest {};
+
+template <>
+class ZipTest<XzTest> : public ::testing::Test {
+ public:
+ bool ZipCompress(const brillo::Blob& in, brillo::Blob* out) const {
+ return XzCompress(in, out);
+ }
+ bool ZipDecompress(const brillo::Blob& in, brillo::Blob* out) const {
+ return DecompressWithWriter<XzExtentWriter>(in, out);
+ }
+};
+
+#ifdef __ANDROID__
+typedef ::testing::Types<BzipTest, XzTest> ZipTestTypes;
+#else
+// Chrome OS implementation of Xz compressor just returns false.
typedef ::testing::Types<BzipTest> ZipTestTypes;
+#endif // __ANDROID__
+
TYPED_TEST_CASE(ZipTest, ZipTestTypes);
-
-
TYPED_TEST(ZipTest, SimpleTest) {
- string in("this should compress well xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
+ string in_str(
+ "this should compress well xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
+ brillo::Blob in(in_str.begin(), in_str.end());
brillo::Blob out;
- EXPECT_TRUE(this->ZipCompressString(in, &out));
+ EXPECT_TRUE(this->ZipCompress(in, &out));
EXPECT_LT(out.size(), in.size());
EXPECT_GT(out.size(), 0U);
brillo::Blob decompressed;
@@ -82,32 +142,29 @@
}
TYPED_TEST(ZipTest, PoorCompressionTest) {
- string in(reinterpret_cast<const char*>(kRandomString),
- sizeof(kRandomString));
+ brillo::Blob in(std::begin(kRandomString), std::end(kRandomString));
brillo::Blob out;
- EXPECT_TRUE(this->ZipCompressString(in, &out));
+ EXPECT_TRUE(this->ZipCompress(in, &out));
EXPECT_GT(out.size(), in.size());
- string out_string(out.begin(), out.end());
brillo::Blob decompressed;
- EXPECT_TRUE(this->ZipDecompressString(out_string, &decompressed));
+ EXPECT_TRUE(this->ZipDecompress(out, &decompressed));
EXPECT_EQ(in.size(), decompressed.size());
- EXPECT_TRUE(!memcmp(in.data(), decompressed.data(), in.size()));
+ EXPECT_EQ(in, decompressed);
}
TYPED_TEST(ZipTest, MalformedZipTest) {
- string in(reinterpret_cast<const char*>(kRandomString),
- sizeof(kRandomString));
+ brillo::Blob in(std::begin(kRandomString), std::end(kRandomString));
brillo::Blob out;
- EXPECT_FALSE(this->ZipDecompressString(in, &out));
+ EXPECT_FALSE(this->ZipDecompress(in, &out));
}
TYPED_TEST(ZipTest, EmptyInputsTest) {
- string in;
+ brillo::Blob in;
brillo::Blob out;
- EXPECT_TRUE(this->ZipDecompressString(in, &out));
+ EXPECT_TRUE(this->ZipDecompress(in, &out));
EXPECT_EQ(0U, out.size());
- EXPECT_TRUE(this->ZipCompressString(in, &out));
+ EXPECT_TRUE(this->ZipCompress(in, &out));
EXPECT_EQ(0U, out.size());
}
diff --git a/sample_images/generate_images.sh b/sample_images/generate_images.sh
index b461175..c098e39 100755
--- a/sample_images/generate_images.sh
+++ b/sample_images/generate_images.sh
@@ -166,6 +166,17 @@
exit 1
EOF
+ # A program that reports back progress.
+ sudo tee "${mntdir}"/bin/postinst_progress >/dev/null <<EOF
+#!/etc/../bin/sh
+# These values have exact representation in IEEE 754 so we avoid rounding
+# errors.
+echo global_progress 0.25 >&3
+echo global_progress 0.5 >&3
+echo global_progress 1.0 >&3
+exit 0
+EOF
+
# A postinstall bash program.
sudo tee "${mntdir}"/bin/self_check_context >/dev/null <<EOF
#!/etc/../bin/sh
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index a30d46e..3d102c7 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -197,6 +197,9 @@
# Path to the postinstall config file in target image if exists.
POSTINSTALL_CONFIG_FILE=""
+# The fingerprint of zlib in the source image.
+ZLIB_FINGERPRINT=""
+
# read_option_int <file.txt> <option_key> [default_value]
#
# Reads the unsigned integer value associated with |option_key| in a key=value
@@ -302,6 +305,11 @@
# updater supports a newer major version.
FORCE_MAJOR_VERSION="1"
+ if [[ "${partitions_array}" == "SRC_PARTITIONS" ]]; then
+ # Copy from zlib_fingerprint in source image to stdout.
+ ZLIB_FINGERPRINT=$(e2cp "${root}":/etc/zlib_fingerprint -)
+ fi
+
# When generating legacy Chrome OS images, we need to use "boot" and "system"
# for the partition names to be compatible with updating Brillo devices with
# Chrome OS images.
@@ -366,6 +374,10 @@
Disabling deltas for this source version."
exit ${EX_UNSUPPORTED_DELTA}
fi
+
+ if [[ "${FORCE_MINOR_VERSION}" -ge 4 ]]; then
+ ZLIB_FINGERPRINT=$(unzip -p "${image}" "META/zlib_fingerprint.txt")
+ fi
else
# Target image
local postinstall_config=$(create_tempfile "postinstall_config.XXXXXX")
@@ -461,6 +473,9 @@
if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
fi
+ if [[ -n "${ZLIB_FINGERPRINT}" ]]; then
+ GENERATOR_ARGS+=( --zlib_fingerprint="${ZLIB_FINGERPRINT}" )
+ fi
fi
if [[ -n "${FORCE_MAJOR_VERSION}" ]]; then
diff --git a/testrunner.cc b/testrunner.cc
index 9e22798..635e120 100644
--- a/testrunner.cc
+++ b/testrunner.cc
@@ -24,12 +24,16 @@
#include <gtest/gtest.h>
#include "update_engine/common/terminator.h"
+#include "update_engine/payload_generator/xz.h"
int main(int argc, char **argv) {
LOG(INFO) << "started";
base::AtExitManager exit_manager;
// xz-embedded requires to initialize its CRC-32 table once on startup.
xz_crc32_init();
+ // The LZMA SDK-based Xz compressor used in the payload generation requires
+ // this one-time initialization.
+ chromeos_update_engine::XzCompressInit();
// TODO(garnold) temporarily cause the unittest binary to exit with status
// code 2 upon catching a SIGTERM. This will help diagnose why the unittest
// binary is perceived as failing by the buildbot. We should revert it to use
diff --git a/update_attempter.cc b/update_attempter.cc
index cfd2425..a0f3bd4 100644
--- a/update_attempter.cc
+++ b/update_attempter.cc
@@ -88,6 +88,10 @@
namespace {
const int kMaxConsecutiveObeyProxyRequests = 20;
+// Minimum threshold to broadcast an status update in progress and time.
+const double kBroadcastThresholdProgress = 0.01; // 1%
+const int kBroadcastThresholdSeconds = 10;
+
// By default autest bypasses scattering. If we want to test scattering,
// use kScheduledAUTestURLRequest. The URL used is same in both cases, but
// different params are passed to CheckForUpdate().
@@ -582,6 +586,7 @@
InstallPlanAction* previous_action) {
shared_ptr<PostinstallRunnerAction> postinstall_runner_action(
new PostinstallRunnerAction(system_state_->boot_control()));
+ postinstall_runner_action->set_delegate(this);
actions_.push_back(shared_ptr<AbstractAction>(postinstall_runner_action));
BondActions(previous_action,
postinstall_runner_action.get());
@@ -1071,17 +1076,14 @@
// from a given URL for the URL skipping logic.
system_state_->payload_state()->DownloadProgress(bytes_progressed);
- double progress = static_cast<double>(bytes_received) /
- static_cast<double>(total);
- // Self throttle based on progress. Also send notifications if
- // progress is too slow.
- const double kDeltaPercent = 0.01; // 1%
- if (status_ != UpdateStatus::DOWNLOADING ||
- bytes_received == total ||
- progress - download_progress_ >= kDeltaPercent ||
- TimeTicks::Now() - last_notify_time_ >= TimeDelta::FromSeconds(10)) {
+ double progress = 0;
+ if (total)
+ progress = static_cast<double>(bytes_received) / static_cast<double>(total);
+ if (status_ != UpdateStatus::DOWNLOADING || bytes_received == total) {
download_progress_ = progress;
SetStatusAndNotify(UpdateStatus::DOWNLOADING);
+ } else {
+ ProgressUpdate(progress);
}
}
@@ -1127,6 +1129,18 @@
return true;
}
+void UpdateAttempter::ProgressUpdate(double progress) {
+ // Self throttle based on progress. Also send notifications if progress is
+ // too slow.
+ if (progress == 1.0 ||
+ progress - download_progress_ >= kBroadcastThresholdProgress ||
+ TimeTicks::Now() - last_notify_time_ >=
+ TimeDelta::FromSeconds(kBroadcastThresholdSeconds)) {
+ download_progress_ = progress;
+ BroadcastStatus();
+ }
+}
+
bool UpdateAttempter::ResetStatus() {
LOG(INFO) << "Attempting to reset state from "
<< UpdateStatusToString(status_) << " to UpdateStatus::IDLE";
diff --git a/update_attempter.h b/update_attempter.h
index bbc4b4e..b045614 100644
--- a/update_attempter.h
+++ b/update_attempter.h
@@ -39,6 +39,7 @@
#include "update_engine/omaha_request_params.h"
#include "update_engine/omaha_response_handler_action.h"
#include "update_engine/payload_consumer/download_action.h"
+#include "update_engine/payload_consumer/postinstall_runner_action.h"
#include "update_engine/proxy_resolver.h"
#include "update_engine/service_observer_interface.h"
#include "update_engine/system_state.h"
@@ -59,7 +60,8 @@
class UpdateAttempter : public ActionProcessorDelegate,
public DownloadActionDelegate,
public CertificateChecker::Observer,
- public WeaveServiceInterface::DelegateInterface {
+ public WeaveServiceInterface::DelegateInterface,
+ public PostinstallRunnerAction::DelegateInterface {
public:
using UpdateStatus = update_engine::UpdateStatus;
static const int kMaxDeltaUpdateFailures;
@@ -108,6 +110,9 @@
std::string* current_channel,
std::string* tracking_channel) override;
+ // PostinstallRunnerAction::DelegateInterface
+ void ProgressUpdate(double progress) override;
+
// Resets the current state to UPDATE_STATUS_IDLE.
// Used by update_engine_client for restarting a new update without
// having to reboot once the previous update has reached
diff --git a/update_attempter_android.cc b/update_attempter_android.cc
index bc9a698..e42f569 100644
--- a/update_attempter_android.cc
+++ b/update_attempter_android.cc
@@ -48,6 +48,10 @@
namespace {
+// Minimum threshold to broadcast an status update in progress and time.
+const double kBroadcastThresholdProgress = 0.01; // 1%
+const int kBroadcastThresholdSeconds = 10;
+
const char* const kErrorDomain = "update_engine";
// TODO(deymo): Convert the different errors to a numeric value to report them
// back on the service error.
@@ -275,7 +279,7 @@
// action succeeded.
const string type = action->Type();
if (type == DownloadAction::StaticType()) {
- download_progress_ = 0.0;
+ download_progress_ = 0;
}
if (code != ErrorCode::kSuccess) {
// If an action failed, the ActionProcessor will cancel the whole thing.
@@ -289,17 +293,14 @@
void UpdateAttempterAndroid::BytesReceived(uint64_t bytes_progressed,
uint64_t bytes_received,
uint64_t total) {
- double progress = 0.;
+ double progress = 0;
if (total)
progress = static_cast<double>(bytes_received) / static_cast<double>(total);
- // Self throttle based on progress. Also send notifications if
- // progress is too slow.
- const double kDeltaPercent = 0.01; // 1%
- if (status_ != UpdateStatus::DOWNLOADING || bytes_received == total ||
- progress - download_progress_ >= kDeltaPercent ||
- TimeTicks::Now() - last_notify_time_ >= TimeDelta::FromSeconds(10)) {
+ if (status_ != UpdateStatus::DOWNLOADING || bytes_received == total) {
download_progress_ = progress;
SetStatusAndNotify(UpdateStatus::DOWNLOADING);
+ } else {
+ ProgressUpdate(progress);
}
}
@@ -313,6 +314,18 @@
// Nothing needs to be done when the download completes.
}
+void UpdateAttempterAndroid::ProgressUpdate(double progress) {
+ // Self throttle based on progress. Also send notifications if progress is
+ // too slow.
+ if (progress == 1.0 ||
+ progress - download_progress_ >= kBroadcastThresholdProgress ||
+ TimeTicks::Now() - last_notify_time_ >=
+ TimeDelta::FromSeconds(kBroadcastThresholdSeconds)) {
+ download_progress_ = progress;
+ SetStatusAndNotify(status_);
+ }
+}
+
void UpdateAttempterAndroid::UpdateBootFlags() {
if (updated_boot_flags_) {
LOG(INFO) << "Already updated boot flags. Skipping.";
@@ -348,7 +361,7 @@
// Reset cpu shares back to normal.
cpu_limiter_.StopLimiter();
- download_progress_ = 0.0;
+ download_progress_ = 0;
actions_.clear();
UpdateStatus new_status =
(error_code == ErrorCode::kSuccess ? UpdateStatus::UPDATED_NEED_REBOOT
diff --git a/update_attempter_android.h b/update_attempter_android.h
index 0d74d56..4fdac2c 100644
--- a/update_attempter_android.h
+++ b/update_attempter_android.h
@@ -32,6 +32,7 @@
#include "update_engine/common/hardware_interface.h"
#include "update_engine/common/prefs_interface.h"
#include "update_engine/payload_consumer/download_action.h"
+#include "update_engine/payload_consumer/postinstall_runner_action.h"
#include "update_engine/service_delegate_android_interface.h"
#include "update_engine/service_observer_interface.h"
@@ -39,9 +40,11 @@
class DaemonStateAndroid;
-class UpdateAttempterAndroid : public ServiceDelegateAndroidInterface,
- public ActionProcessorDelegate,
- public DownloadActionDelegate {
+class UpdateAttempterAndroid
+ : public ServiceDelegateAndroidInterface,
+ public ActionProcessorDelegate,
+ public DownloadActionDelegate,
+ public PostinstallRunnerAction::DelegateInterface {
public:
using UpdateStatus = update_engine::UpdateStatus;
@@ -80,6 +83,9 @@
bool ShouldCancel(ErrorCode* cancel_reason) override;
void DownloadComplete() override;
+ // PostinstallRunnerAction::DelegateInterface
+ void ProgressUpdate(double progress) override;
+
private:
// Asynchronously marks the current slot as successful if needed. If already
// marked as good, CompleteUpdateBootFlags() is called starting the action
diff --git a/update_engine.gyp b/update_engine.gyp
index 07e6f6c..3a1d635 100644
--- a/update_engine.gyp
+++ b/update_engine.gyp
@@ -397,6 +397,7 @@
'payload_generator/raw_filesystem.cc',
'payload_generator/tarjan.cc',
'payload_generator/topological_sort.cc',
+ 'payload_generator/xz_chromeos.cc',
],
},
# server-side delta generator.