Merge pie-platform-release to aosp-master - DO NOT MERGE

Change-Id: I843aae83fcfa68767c4225b1638462a34c7d91bd
diff --git a/.classpath b/.classpath
new file mode 100644
index 0000000..ff66d5a
--- /dev/null
+++ b/.classpath
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+  <classpathentry kind="src" path="shared/src/main/java"/>
+  <classpathentry kind="src" path="shared/src/test/java"/>
+  <classpathentry kind="src" path="generator/src/main/java"/>
+  <classpathentry kind="src" path="generator/src/test/java"/>
+  <classpathentry kind="src" path="generator/src/test/resources"/>
+  <classpathentry kind="src" path="applier/src/main/java"/>
+  <classpathentry kind="src" path="applier/src/test/java"/>
+  <classpathentry kind="src" path="applier/src/test/resources"/>
+  <classpathentry kind="src" path="explainer/src/main/java"/>
+  <classpathentry kind="src" path="explainer/src/test/java"/>
+  <classpathentry kind="src" path="integrationtest/src/test/java"/>
+  <classpathentry kind="src" path="sample/src/main/java"/>
+  <classpathentry kind="src" path="tools/src/main/java"/>
+  <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
+  <classpathentry kind="con" path="org.eclipse.jdt.junit.JUNIT_CONTAINER/4"/>
+  <classpathentry kind="output" path="bin"/>
+</classpath>
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..e25096f
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,5 @@
+bin
+.settings
+build
+.gradle
+local-build-repo
diff --git a/.project b/.project
new file mode 100644
index 0000000..53768e5
--- /dev/null
+++ b/.project
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+  <name>archive-patcher</name>
+  <comment></comment>
+  <projects>
+  </projects>
+  <buildSpec>
+    <buildCommand>
+      <name>org.eclipse.jdt.core.javabuilder</name>
+      <arguments>
+      </arguments>
+    </buildCommand>
+  </buildSpec>
+  <natures>
+    <nature>org.eclipse.jdt.core.javanature</nature>
+  </natures>
+</projectDescription>
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..16f50ac
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,26 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+java_library {
+    name: "archive-patcher",
+    hostdex: true,
+    srcs: [
+        "shared/src/main/java/**/*.java",
+        "applier/src/main/java/**/*.java",
+    ],
+    sdk_version: "core_current",
+    java_version: "1.6",
+}
diff --git a/Android.mk b/Android.mk
deleted file mode 100644
index 6704350..0000000
--- a/Android.mk
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-LOCAL_PATH := $(call my-dir)
-
-archive-patcher_shared_src_files := $(call all-java-files-under,shared/src/main/java)
-archive-patcher_applier_src_files := $(call all-java-files-under,applier/src/main/java)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := archive-patcher
-LOCAL_SRC_FILES := $(archive-patcher_shared_src_files) $(archive-patcher_applier_src_files)
-# archive-patcher should be compatible with all versions of Android
-LOCAL_SDK_VERSION := 4
-LOCAL_JAVA_LANGUAGE_VERSION := 1.6
-include $(BUILD_STATIC_JAVA_LIBRARY)
-
-include $(CLEAR_VARS)
-LOCAL_SRC_FILES := $(archive-patcher_shared_src_files) $(archive-patcher_applier_src_files)
-LOCAL_MODULE := archive-patcher-hostdex
-# archive-patcher should be compatible with all versions of Android
-LOCAL_SDK_VERSION := 4
-LOCAL_JAVA_LANGUAGE_VERSION := 1.6
-include $(BUILD_HOST_DALVIK_STATIC_JAVA_LIBRARY)
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..e41154e
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,226 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+===========================================================================
+
+The following applies to DivSuffixSorter.java:
+
+MIT License:
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..8d38c44
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,19 @@
+name: "archive-patcher"
+description: "Archive-patcher is an open-source project that allows space-efficient patching of zip archives."
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://github.com/andrewhayden/archive-patcher"
+  }
+  url {
+    type: ARCHIVE
+    value: "https://github.com/andrewhayden/archive-patcher/archive/1.0.zip"
+  }
+  version: "1.0"
+  license_type: RECIPROCAL
+  last_upgrade_date {
+    year: 2018
+    month: 8
+    day: 24
+  }
+}
diff --git a/README.android b/README.android
deleted file mode 100644
index bd0bd16..0000000
--- a/README.android
+++ /dev/null
@@ -1,6 +0,0 @@
-Any Android specific modifications to upstream archive-patcher should be listed
-here:
-
-(1) Deleted /integrationtest, /tools, /explainer and /generator as these are not
-    intended to be used on Android. Only /shared and /applier remain.
-
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..e7763ee
--- /dev/null
+++ b/README.md
@@ -0,0 +1,394 @@
+# Archive Patcher Documentation
+
+Copyright 2016 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+----
+
+# Table of Contents
+* [Introduction](#introduction)
+* [How It Works](#how-it-works)
+ * [Generating a Patch](#generating-a-patch)
+ * [Applying a Patch](#applying-a-patch)
+ * [Handled Cases](#handled-cases)
+* [Sample Code: Generating a Patch](#sample-code-generating-a-patch)
+* [Sample Code: Applying a Patch](#sample-code-applying-a-patch)
+* [Background](#background)
+* [The File-by-File v1 Patch Format](#the-file-by-file-v1-patch-format)
+ * [Old Archive Uncompression Op](#old-archive-uncompression-op)
+ * [New Archive Recompression Op](#new-archive-recompression-op)
+ * [Compression Settings](#compression-settings)
+ * [Compatibility Window](#compatibility-window)
+ * [Delta Descriptor Record](#delta-descriptor-record)
+* [Appendix](#appendix)
+ * [Interesting Obstacles to Patching Archives](#interesting-obstacles-to-patching-archives)
+ * [Areas For Improvement](#areas-for-improvement)
+* [Acknowledgements](#acknowledgements)
+
+# Introduction
+**Archive-patcher is an open-source project that allows space-efficient patching of zip archives.** Many common distribution formats (such as jar and apk) are valid zip archives; archive-patcher works with all of them.
+
+Because the patching process examines each individual file within the input archives, we refer to the process as **File-by-File patching** and an individual patch generated by that process as a **File-by-File patch**. Archive-patcher processes almost all zip files, but it is most efficient for zip files created with "standard" tools like PKWARE's 'zip', Oracle's 'jar', and Google's 'aapt'.
+
+By design, **File-by-File patches are uncompressed**. This allows freedom in choosing the best compression algorithms for a given use case. It is usually best to compress the patches for storage or transport.
+
+> *Note: Archive-patcher does not currently handle 'zip64' archives (archives supporting more than 65,535 files or containing files larger than 4GB in size).*
+
+# How It Works
+Archive-patcher **transforms** archives into a **delta-friendly space** to generate and apply a delta. This transformation involves uncompressing the compressed content that has changed, while leaving everything else alone. The patch applier then recompresses the content that has changed to create a perfect binary copy of the original input file. In v1, bsdiff is the delta algorithm used within the delta-friendly space. Much more information on this subject is available in the [Appendix](#appendix).
+
+Diagrams and examples follow. In these examples we will use an old archive and a new archive, each containing 3 files: foo.txt, bar.xml, and baz.lib:
+
+* **foo.txt** has changed its content between the old and new archives. It is uncompressed from both the old and new archives during transformation to the delta-friendly space. This will allow the delta between v1 and v2 of the file to be encoded efficiently.
+* **bar.xml** has also changed its content between the old and new archives. It is already uncompressed in the old and new archives, so it is left alone during transformation to the delta-friendly space. The delta between v1 and v2 of the file can already be encoded efficiently.
+* **baz.lib** has *not* changed between the old and new archives. It is left alone during transformation to the delta-friendly space because it has not changed and the delta for an unchanged file is trivially empty.
+
+## Generating a Patch
+1. Determine which files in the new archive have changed from the old archive.
+2. Determine which of the changed files from (1) have deflate settings that can be determined and record those settings.
+3. Determine the original offsets and lengths of all files in (2) in both the old and new archives.
+4. Create delta-friendly versions of both the old and new archives, uncompressing the files from (2). The resulting intermediate artifacts are called **delta-friendly blobs**; they are no longer valid zip archives.
+5. Generate a delta between the old and new delta-friendly blobs from (4).
+6. Output the patch carrying the data from (2), (3) and (5).
+
+```
+File-by-File v1: Patch Generation Overview
+
+
+                      Delta-Friendly       Delta-Friendly
+   Old Archive           Old Blob             New Blob            New Archive
+ ----------------    ----------------     ----------------    ----------------
+ |   foo.txt    |    |   foo.txt    |     |   foo.txt    |    |   foo.txt    |
+ |   version 1  |    |   version 1  |     |   version 2  |    |   version 2  |
+ | (compressed) |    |(uncompressed)|     |(uncompressed)|    | (compressed) |
+ |--------------|    |              |     |              |    |--------------|
+ |   bar.xml    |    |              |     |              |    |   bar.xml    |
+ |   version 1  |    |--------------|     |--------------|    |   version 2  |
+ |(uncompressed)|--->|   bar.xml    |--┬--|   bar.xml    |<---|(uncompressed)|
+ |--------------|    |   version 1  |  |  |   version 2  |    |--------------|
+ |   baz.lib    |    |(uncompressed)|  |  |(uncompressed)|    |   baz.lib    |
+ |   version 1  |    |--------------|  |  |--------------|    |   version 1  |
+ | (compressed) |    |   baz.lib    |  |  |   baz.lib    |    | (compressed) |
+ ----------------    |   version 1  |  |  |   version 1  |    ----------------
+        |            | (compressed) |  |  | (compressed) |            |
+        |            ----------------  |  ----------------            |
+        v                              v                              v
+ ----------------                 ----------                  ----------------
+ |Uncompression |                 | delta  |                  |Recompression |
+ |   metadata   |                 ----------                  |   metadata   |
+ ----------------                      |                      ----------------
+        |                              v                              |
+        |                   ----------------------                    |
+        └------------------>|  File-by-File v1   |<-------------------┘
+                            |       Patch        |
+                            ----------------------
+```
+
+## Applying a Patch
+1. Reconstruct the delta-friendly old blob using information from the patch.
+2. Apply the delta to the delta-friendly old blob generated in (1). This generates the delta-friendly new blob.
+3. Recompress the files in the delta-friendly new blob using information from the patch. The result is the "new archive" that was the original input to the patch generator.
+
+```
+File-by-File v1: Patch Application Overview
+
+
+                      Delta-Friendly       Delta-Friendly
+   Old Archive           Old Blob             New Blob           New Archive
+ ----------------    ----------------     ---------------     ----------------
+ |   foo.txt    |    |   foo.txt    |     |   foo.txt    |    |   foo.txt    |
+ |   version 1  |    |   version 1  |     |   version 2  |    |   version 2  |
+ | (compressed) |    |(uncompressed)|     |(uncompressed)|    | (compressed) |
+ |--------------|    |              |     |              |    |--------------|
+ |   bar.xml    |    |              |     |              |    |   bar.xml    |
+ |   version 1  |    |--------------|     |--------------|    |   version 2  |
+ |(uncompressed)|-┬->|   bar.xml    |     |   bar.xml    |-┬->|(uncompressed)|
+ |--------------| |  |   version 1  |     |   version 2  | |  |--------------|
+ |   baz.lib    | |  |(uncompressed)|     |(uncompressed)| |  |   baz.lib    |
+ |   version 1  | |  |--------------|     |--------------| |  |   version 1  |
+ | (compressed) | |  |   baz.lib    |     |   baz.lib    | |  | (compressed) |
+ ---------------- |  |   version 1  |     |   version 1  | |  ----------------
+                  |  | (compressed) |     | (compressed) | |
+                  |  ----------------     ---------------- |
+                  |         |                    ^         |
+ ---------------- |         |     ----------     |         |  ----------------
+ |Uncompression |-┘         └---->| delta  |-----┘         └--|Recompression |
+ |   metadata   |                 ----------                  |   metadata   |
+ ----------------                      ^                      ----------------
+        ^                              |                              ^
+        |                   ----------------------                    |
+        └-------------------|  File-by-File v1   |--------------------┘
+                            |       Patch        |
+                            ----------------------
+```
+
+## Handled Cases
+The examples above used two simple archives with 3 common files to help explain the process, but there is significantly more nuance in the implementation. The implementation searches for and handles changes of many types, including some trickier edge cases such as a file that changes compression level, becomes compressed or becomes uncompressed, or is renamed without changes.
+
+Files that are only in the *new* archive are always left alone, and the delta usually encodes them as a literal copy. Files that are only in the *old* archive are similarly left alone, and the delta usually just discards their bytes completely. And of course, files whose deflate settings cannot be inferred are left alone, since they cannot be recompressed and are therefore required to remain in their existing compressed form.
+
+> *Note: The v1 implementation does not detect files that are renamed and changed at the same time. This is the domain of similar-file detection, a feature deemed desirable - but not critical - for v1.*
+
+# Sample Code: Generating a Patch
+The following code snippet illustrates how to generate a patch and compress it with deflate compression. The example in the subsequent section shows how to apply such a patch.
+
+```java
+import com.google.archivepatcher.generator.FileByFileV1DeltaGenerator;
+import com.google.archivepatcher.shared.DefaultDeflateCompatibilityWindow;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.util.zip.Deflater;
+import java.util.zip.DeflaterOutputStream;
+
+/** Generate a patch; args are old file path, new file path, and patch file path. */
+public class SamplePatchGenerator {
+  public static void main(String... args) throws Exception {
+    if (!new DefaultDeflateCompatibilityWindow().isCompatible()) {
+      System.err.println("zlib not compatible on this system");
+      System.exit(-1);
+    }
+    File oldFile = new File(args[0]); // must be a zip archive
+    File newFile = new File(args[1]); // must be a zip archive
+    Deflater compressor = new Deflater(9, true); // to compress the patch
+    try (FileOutputStream patchOut = new FileOutputStream(args[2]);
+        DeflaterOutputStream compressedPatchOut =
+            new DeflaterOutputStream(patchOut, compressor, 32768)) {
+      new FileByFileV1DeltaGenerator().generateDelta(oldFile, newFile, compressedPatchOut);
+      compressedPatchOut.finish();
+      compressedPatchOut.flush();
+    } finally {
+      compressor.end();
+    }
+  }
+}
+```
+
+# Sample Code: Applying a Patch
+The following code snippet illustrates how to apply a patch that was compressed with deflate compression, as in the previous example.
+
+```java
+import com.google.archivepatcher.applier.FileByFileV1DeltaApplier;
+import com.google.archivepatcher.shared.DefaultDeflateCompatibilityWindow;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.util.zip.Inflater;
+import java.util.zip.InflaterInputStream;
+
+/** Apply a patch; args are old file path, patch file path, and new file path. */
+public class SamplePatchApplier {
+  public static void main(String... args) throws Exception {
+    if (!new DefaultDeflateCompatibilityWindow().isCompatible()) {
+      System.err.println("zlib not compatible on this system");
+      System.exit(-1);
+    }
+    File oldFile = new File(args[0]); // must be a zip archive
+    Inflater uncompressor = new Inflater(true); // to uncompress the patch
+    try (FileInputStream compressedPatchIn = new FileInputStream(args[1]);
+        InflaterInputStream patchIn =
+            new InflaterInputStream(compressedPatchIn, uncompressor, 32768);
+        FileOutputStream newFileOut = new FileOutputStream(args[2])) {
+      new FileByFileV1DeltaApplier().applyDelta(oldFile, patchIn, newFileOut);
+    } finally {
+      uncompressor.end();
+    }
+  }
+}
+```
+
+# Background
+Patching software exists primarily to make updating software or data files **spatially efficient**. This is accomplished by figuring out what has changed between the inputs (usually an old version and a new version of a given file) and transmitting **only the changes** instead of transmitting the entire file. For example, if we wanted to update a dictionary with one new definition, it's much more efficient to send just the one updated definition than to send along a brand new dictionary! A number of excellent algorithms exist to do just this - diff, bsdiff, xdelta and many more.
+
+In order to generate **spatially efficient** patches for zip archives, the content within the zip archives needs to be uncompressed. This necessitates recompressing after applying a patch, and this in turn requires knowing the settings that were originally used to compress the data within the zip archive and being able to reproduce them exactly. These three problems are what make patching zip archives a unique challenge, and their solutions are what make archive-patcher interesting. If you'd like to read more about this now, skip down to [Interesting Obstacles to Patching Archives](#interesting-obstacles-to-patching-archives).
+
+# The File-by-File v1 Patch Format
+The v1 patch format is a sequence of bytes described below. Care has been taken to make the format friendly to streaming, so the order of fields in the patch is intended to reflect the order of operations needed to apply the patch. Unless otherwise noted, the following constraints apply:
+
+* All integer fields contain **unsigned**, **big endian**​ values. However:
+ * 32-bit integer fields have a maximum value of 2^31 ­- 1 (due to limitations in Java)
+ * 64-bit integer fields have a maximum value of 2^63 ­- 1 (due to limitations in Java)
+
+```
+|------------------------------------------------------|
+| Versioned Identifier (8 bytes) (UTF-8 text)          | Literal: "GFbFv1_0"
+|------------------------------------------------------|
+| Flags (4 bytes) (currently unused, but reserved)     |
+|------------------------------------------------------|
+| Delta-friendly old archive size (8 bytes) (uint64)   |
+|------------------------------------------------------|
+| Num old archive uncompression ops (4 bytes) (uint32) |
+|------------------------------------------------------|
+| Old archive uncompression op 1...n (variable length) | (see definition below)
+|------------------------------------------------------|
+| Num new archive recompression ops (4 bytes) (uint32) |
+|------------------------------------------------------|
+| New archive recompression op 1...n (variable length) | (see definition below)
+|------------------------------------------------------|
+| Num delta descriptor records (4 bytes) (uint32)      |
+|------------------------------------------------------|
+| Delta descriptor record 1...n (variable legth)       | (see definition below)
+|------------------------------------------------------|
+| Delta 1...n (variable length)                        | (see definition below)
+|------------------------------------------------------|
+```
+
+## Old Archive Uncompression Op
+The number of these entries is determined by the "Num old archive uncompression ops" field previously defined. Each entry consists of an offset (from the beginning of the file) and a number of bytes to uncompress. Important notes:
+
+* Entries must be ordered in ascending order by offset. This is to allow the transformation of the old archive into the delta-friendly space to be done by reading a the old archive as a stream, instead of requiring random access.
+* Entries must not overlap (for sanity)
+* Areas of the old archive that are not included in any uncompression op will be left alone, i.e. represent arbitrary data that should **not** be uncompressed, such as zip structural components or blocks of data that are stored without compression already.
+
+```
+|------------------------------------------------------|
+| Offset of first byte to uncompress (8 bytes) (uint64)|
+|------------------------------------------------------|
+| Number of bytes to uncompress (8 bytes) (uint64)     |
+|------------------------------------------------------|
+```
+
+## New Archive Recompression Op
+The number of these entries is determined by the "Num new archive recompression ops" field previously defined. Like an old archive uncompression op, each entry consists of an offset - but this time from the beginning of the delta-friendly new blob. This is followed by the number of bytes to compress, and finally a compression settings field. Important notes:
+
+* Entries must be ordered in ascending order by offset. This allows the output from the delta apply process (which creates the delta-friendly new blob) to be piped to an intelligent partially-compressing stream that is seeded with the knowledge of which ranges to recompress and the settings to use for each. This avoids the need to write the delta-friendly new blob to persistent storage, an important optimization.
+* Entries must not overlap (for sanity)
+* Areas of the new archive that are not included in any recompression op will be copied through from the delta-friendly new blob without modification. These represent arbitrary data that should **not** be compressed, such as zip structural components or blocks of data that are stored without compression in the new archive.
+
+```
+|------------------------------------------------------|
+| Offset of first byte to compress (8 bytes) (uint64)  |
+|------------------------------------------------------|
+| Number of bytes to compress (8 bytes) (uint64)       |
+|------------------------------------------------------|
+| Compression settings (4 bytes)                       | (see definition below)
+|------------------------------------------------------|
+```
+
+## Compression Settings
+The compression settings define the deflate level (in the range 1 to 9, inclusive), the deflate strategy (in the range 0 to 2, inclusive) and the wrapping mode (wrap or nowrap). The settings are specific to a **compatibility window**, discussed in the next section in more detail.
+
+> *In practice almost all entries in zip archives have strategy 0 (the default) and wrapping mode 'nowrap'. The other strategies are primarily used in-situ, e.g., the compression used within the PNG format; wrapping, on the other hand, is almost exclusively used in gzip operations.*
+
+```
+|------------------------------------------------------|
+| Compatibility window ID (1 byte) (uint8)             | (see definition below)
+|------------------------------------------------------|
+| Deflate level (1 byte) (uint8) (range: [1,9])        |
+|------------------------------------------------------|
+| Deflate strategy (1 bytes) (uint8) (range: [0,2]     |
+|------------------------------------------------------|
+| Wrap mode (1 bytes) (uint8) (0=wrap, 1=nowrap)       |
+|------------------------------------------------------|
+```
+
+## Compatibility Window
+A compatibility window specifies a compression algorithm along with a range of versions and platforms upon which it is known to produce predictable and consistent output. That is, all implementations within a given compatibility window must produce *identical output* for any *identical inputs* consisting of bytes to be compressed along with the compression settings (level, strategy, wrapping mode).
+
+In File-by-File v1, there is only one compatibility window defined. It is **the default deflate compatibility window**, having **ID=0** (all other values reserved for future expansion), and it specifies the following configuration:
+
+* Algorithm: deflate (zlib)
+* Window length: 32,768 bytes (hardcoded and implied, not explicitly set)
+* Valid compression levels: 1 through 9 (0 means store, and is unused)
+* Valid strategies: 0, 1, or 2 (java.util.zip does not support any later strategies)
+* Valid wrapping modes: wrap, nowrap
+
+The default compatibility window is compatible with the following runtime environments based on empirical testing. Other environments may be compatible, but the ones in this table are known to be.
+
+Runtime Environment | OS | Hardware Architectures | Min Version | Max Version | Notes
+--- | --- | --- | --- | --- | ---
+Sun/Oracle JRE (including OpenJDK) | Linux | x64 | 1.7 (07 Jul, 2011) | None known as of September 2016 | Still compatible as of 1.8, the latest as of August 2016. Versions prior to 1.7 have different level_flags (see [zlib change](https://github.com/madler/zlib/commit/086e982175da84b3db958191031380794315f95f)).
+Dalvik/ART | Android | armeabi­v7a, arm64­v8a, x86 | API 15 (19 Oct, 2011) | None known as of September 2016 | Still compatible as of API 24 (Nougat), the latest as of September 2016. Versions prior to API 15 (Ice Cream Sandwich) used a smaller sliding window size (see [AOSP change](https://android.googlesource.com/platform/libcore/+/909a18fd6628cee6718865a7b7bf2534ea25f5ec%5E%21/#F0)).
+
+## Delta Descriptor Record
+Delta descriptor records are grouped together before any of the actual deltas. In File-by-File v1 there is always exactly one delta, so there is exactly one delta descriptor record followed immediately by the delta data. Conceptually, the descriptor defines input and output regions of the archives along with a delta to be applied to those regions (reading from one, and writing to the other).
+
+> *In subsequent versions there may be arbitrarily many deltas. When there is more than one delta, all the descriptors are listed in a contiguous block followed by all of the deltas themselves, also in a contiguous block. This allows the patch applier to pre­process the list of all deltas that are going to be applied and allocate resources accordingly. As with the other descriptors, these must be ordered by ascending offset and overlaps are not allowed.*
+
+```
+|------------------------------------------------------|
+| Delta format ID (1 byte) (uint8)                     |
+|------------------------------------------------------|
+| Old delta-friendly region start (8 bytes) (uint64)   |
+|------------------------------------------------------|
+| Old delta-friendly region length (8 bytes) (uint64)  |
+|------------------------------------------------------|
+| New delta-friendly region start (8 bytes) (uint64)   |
+|------------------------------------------------------|
+| New delta-friendly region length (8 bytes) (uint64)  |
+|------------------------------------------------------|
+| Delta length (8 bytes) (uint64)                      |
+|------------------------------------------------------|
+```
+
+Description of the fields within this record are a little more complex than in the other parts of the patch:
+
+* **Delta format**: The only delta format in File-by-File v1 is **bsdiff**, having **ID=0**.
+* **Old delta-friendly region start**: The offset into the old archive (*after* transformation *into* the delta-friendly space) to which the delta applies. In File-by-File v1, this is always zero.
+* **Old delta-friendly region length**: The number of bytes in the old archive (again, *after* transformation *into* the delta-friendly space) to which the delta applies. In File-by-File v1, this is always the length of the old archive in the delta-friendly space.
+* **New delta-friendly region start**: The offset into the new archive (*before* transformation *out of* the delta-friendly space) to which the delta applies. In File-by-File v1, this is always zero.
+* **New delta-friendly region length**: The number of bytes in the new archive (again, *before* transformation *out of* the delta-friendly space) to which the delta applies. In File-by-File v1, this is always the length of the new archive in the delta-friendly space.
+* **Delta length**: The number of bytes in the actual delta (e.g., a bsdiff patch) that needs to be applied to the regions defined above. The type of the delta is determined by the delta format, also defined above.
+
+# Appendix
+
+## Interesting Obstacles to Patching Archives
+
+### Problem #1: Spatial Efficiency
+**Problem**: Zip files make patching hard because compression obscures the changes. Deflate, the compression algorithm used most widely in zip archives, uses a 32k "sliding window" to compress, carrying state with it as it goes. Because state is carried along, even small changes to the data that is being compressed can result in drastic changes to the bytes that are output - even if the size remains similar. If you change the definition of 'aardvark' in our imaginary dictionary (from back in the [Background](#background) section) and zip both the old and new copies, the resulting zip files will be about the **same size**, but will have very **different bytes**. If you try to generate a patch between the two zip files with the same algorithm you used before (e.g., bsdiff) you'll find that the resulting patch file is much, much larger - probably about the same size of one of the zip files. This is because the files are too dissimilar to express any changes succinctly, so the patching algorithm ends up having to just embed a copy of almost the entire file.
+
+**Solution**: Archive-patcher **transforms** the input archives into what we refer to as **delta-friendly space** where changed files are stored uncompressed, allowing diffing algorithms like bsdiff to function far more effectively.
+
+> *Note: There are techniques that can be applied to deflate to isolate changes and stop them from causing the entire output to be different, such those used in rsync-friendly gzip. However, zip archives created with such techniques are uncommon - and tend to be slightly larger in size.*
+
+### Problem #2: Correctness When Generating Patches
+**Problem**: In order for the generated patch to be correct, we need to know the **original deflate settings** that were used for any changed content that we plan to uncompress during the transformation to the delta-friendly space. This is necessary so that the patch applier can **recompress** that changed content after applying the delta, such that the resulting archive is exactly the same as the input to the patch generator. The deflate settings we care about are the **level**, **strategy**, and **wrap mode**.
+
+**Solution**: Archive-patcher iteratively recompresses each piece of changed content with different deflate settings, looking for a perfect match. The search is ordered based on empirical data and one of the first 3 guesses is extremely likely to succeed. Because deflate has a stateful and small sliding window, mismatches are quickly identified and discarded. If a match *is* found, the corresponding settings are added to the patch stream and the content is uncompressed in-place as previously described; if a match *is not* found then the content is left compressed (because we lack any way to tell the patch applier how to recompress it later).
+
+> *Note: While it is possible to change other settings for deflate (like the size of its sliding window), in practice this is almost never done. Content that has been compressed with other settings changed will be left compressed during patch generation.*
+
+### Problem #3: Correctness When Applying Patches
+**Problem**: The patch applier needs to know that it can reproduce deflate output in exactly the same way as the patch generator did. If this is not possible, patching will fail. The biggest risk is that the patch applier's implementation of deflate differs in some way from that of the patch generator that detected the deflate settings. Any deviation will cause the output to diverge from the original input to the patch generator. Archive-patcher relies on the java.util.zip package which in turn wraps a copy of zlib that ships with the JRE. It is this version of zlib that provides the implementation of deflate.
+
+**Solution**: Archive-patcher contains a ~9000 byte **corpus** of text that produces a unique output for every possible combination of deflate settings that are exposed through the java.util.zip interface (level, strategy, and wrapping mode). These outputs are digested to produce "fingerprints" for each combination of deflate settings on a given version of the zlib library; these fingerprints are then hard-coded into the application. The patch applier checks the local zlib implementation's suitability by repeating the process, deflating the corpus with each combination of java.util.zip settings and digesting the results, then checks that the resulting fingerprints match the hard-coded values.
+
+> *Note: At the time of this writing (September, 2016), all zlib versions since 1.2.0.4 (dated 10 August 2003) have identical fingerprints. This includes every version of Sun/Oracle Java from 1.6.0 onwards on x86 and x86_64 as well as all versions of the Android Open Source Project from 4.0 onward on x86, arm32 and arm64. Other platforms may also be compatible but have not been tested.*
+
+> *Note: This solution is somewhat brittle, but is demonstrably suitable to cover 13 years of zlib updates. Compatibility may be extended in a future version by bundling specific versions of zlib with the application to avoid a dependency upon the zlib in the JRE as necessary.*
+
+## Areas For Improvement
+The File-by-File v1 patching process dramatically improves the spatial efficiency of patches for zip archives, but there are many improvements that can still be made. Here are a few of the more obvious ones that did not make it into v1, but are good candidates for inclusion into later versions:
+
+* Support for detecting "similar" files between the old and new archives to handle renames that are coupled with content changes.
+* Support for additional versions of zlib or other implementations of deflate.
+* Support for other archive formats.
+* Support for other delta algorithms.
+* Support for more than one delta (i.e., applying different algorithms to different regions of the archives).
+* Support for file-specific transformations (i.e., delta-friendly optimization of different files types during the transformation into the delta-friendly space).
+* Support for partial decompression (i.e., only undoing the Huffman coding steps of deflate and operating on the LZ77 instruction stream during patch generation, allowing a much faster "recompression" step that skips LZ77).
+
+# Acknowledgements
+Major software contributions, in alphabetical order:
+
+* Andrew Hayden - design, implementation, documentation
+* Anthony Morris - code reviews, cleanups, div suffix sort port, and invaluable suggestions
+* Glenn Hartmann - code reviews, initial bsdiff port and quick suffix sort port, bsdiff cleanups
+
+Additionally, we wish to acknowledge the following, also in alphabetical order:
+
+* Colin Percival - the author of [bsdiff](http://www.daemonology.net/bsdiff/)
+* Mark Adler - the author of [zlib](http://www.zlib.net)
+* N. Jesper Larsson and Kunihiko Sadakane - for their paper "[Faster Suffix Sorting](http://www.larsson.dogma.net/ssrev-tr.pdf)", basis of the quick suffix sort algorithm
+* PKWARE, Inc. - creators and stewards of the [zip specification](https://support.pkware.com/display/PKZIP/APPNOTE)
+* Yuta Mori - for the C implementation of the "div suffix sort" ([libdivsufsort](http://code.google.com/p/libdivsufsort/))
diff --git a/README.version b/README.version
deleted file mode 100644
index 0ac7840..0000000
--- a/README.version
+++ /dev/null
@@ -1,5 +0,0 @@
-URL: https://github.com/andrewhayden/archive-patcher/archive/8ffe39d965862e3659c68208efa9147adcaea3bb.zip
-Version: 8ffe39d965862e3659c68208efa9147adcaea3bb
-BugComponent: 129875
-Owners: anamariac, admo
-
diff --git a/applier/build.gradle b/applier/build.gradle
new file mode 100644
index 0000000..a055da4
--- /dev/null
+++ b/applier/build.gradle
@@ -0,0 +1,22 @@
+// applier module
+
+apply plugin: 'java'
+
+dependencies {
+    compile project(':shared')
+
+    testCompile 'junit:junit:4.12'
+    testCompile project(':sharedtest')
+}
+
+task copyTestResources(type: Copy) {
+    // AS/IntelliJ workaround: https://code.google.com/p/android/issues/detail?id=64887#c26
+    if (System.properties['idea.platform.prefix'] != null) {
+        from sourceSets.test.resources
+        into sourceSets.test.output.classesDir
+    }
+}
+
+processTestResources.dependsOn copyTestResources
+
+// EOF
diff --git a/applier/src/main/java/com/google/archivepatcher/applier/FileByFileV1DeltaApplier.java b/applier/src/main/java/com/google/archivepatcher/applier/FileByFileV1DeltaApplier.java
index d0a578b..8ed25fe 100644
--- a/applier/src/main/java/com/google/archivepatcher/applier/FileByFileV1DeltaApplier.java
+++ b/applier/src/main/java/com/google/archivepatcher/applier/FileByFileV1DeltaApplier.java
@@ -17,7 +17,6 @@
 import com.google.archivepatcher.applier.bsdiff.BsDiffDeltaApplier;
 import com.google.archivepatcher.shared.DeltaFriendlyFile;
 import com.google.archivepatcher.shared.RandomAccessFileOutputStream;
-
 import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
@@ -39,11 +38,24 @@
   private final File tempDir;
 
   /**
+   * Creates a new delta applier that will use the default temp directory for working files. This is
+   * equivalent to calling {@link #FileByFileV1DeltaApplier(File)} with a <code>null</code> file
+   * argument.
+   */
+  public FileByFileV1DeltaApplier() {
+    this(null);
+  }
+
+  /**
    * Creates a new delta applier that will use the specified temp directory.
+   *
    * @param tempDir a temp directory where the delta-friendly old blob can be written during the
-   * patch application process
+   *     patch application process; if null, the system's default temporary directory is used
    */
   public FileByFileV1DeltaApplier(File tempDir) {
+    if (tempDir == null) {
+      tempDir = new File(System.getProperty("java.io.tmpdir"));
+    }
     this.tempDir = tempDir;
   }
 
diff --git a/applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_internal_blob_a.bin b/applier/src/test/resources/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_internal_blob_a.bin
similarity index 100%
rename from applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_internal_blob_a.bin
rename to applier/src/test/resources/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_internal_blob_a.bin
diff --git a/applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_internal_blob_b.bin b/applier/src/test/resources/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_internal_blob_b.bin
similarity index 100%
rename from applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_internal_blob_b.bin
rename to applier/src/test/resources/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_internal_blob_b.bin
diff --git a/applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_internal_patch_a_to_b.bin b/applier/src/test/resources/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_internal_patch_a_to_b.bin
similarity index 100%
rename from applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_internal_patch_a_to_b.bin
rename to applier/src/test/resources/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_internal_patch_a_to_b.bin
Binary files differ
diff --git a/applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_blob_a.bin b/applier/src/test/resources/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_blob_a.bin
similarity index 100%
rename from applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_blob_a.bin
rename to applier/src/test/resources/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_blob_a.bin
Binary files differ
diff --git a/applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_blob_b.bin b/applier/src/test/resources/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_blob_b.bin
similarity index 100%
rename from applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_blob_b.bin
rename to applier/src/test/resources/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_blob_b.bin
Binary files differ
diff --git a/applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_patch_a_to_b.bin b/applier/src/test/resources/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_patch_a_to_b.bin
similarity index 100%
rename from applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_patch_a_to_b.bin
rename to applier/src/test/resources/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_patch_a_to_b.bin
Binary files differ
diff --git a/applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_partial_a.txt b/applier/src/test/resources/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_partial_a.txt
similarity index 100%
rename from applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_partial_a.txt
rename to applier/src/test/resources/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_partial_a.txt
diff --git a/applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_partial_b.bin b/applier/src/test/resources/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_partial_b.bin
similarity index 100%
rename from applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_partial_b.bin
rename to applier/src/test/resources/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_partial_b.bin
diff --git a/build.gradle b/build.gradle
index f6a1fea..8518ccc 100644
--- a/build.gradle
+++ b/build.gradle
@@ -1,11 +1,14 @@
-apply plugin: 'java'
-
-sourceSets {
-    main {
-        java.srcDirs = ['shared/src/main/java', 'applier/src/main/java']
+buildscript {
+    repositories {
+        jcenter()
     }
-    test {
-        java.srcDirs = ['shared/src/test/java', 'applier/src/test/java']
+    dependencies {
+        classpath 'com.android.tools.build:gradle:2.2.0-rc2'
     }
 }
 
+allprojects {
+    repositories {
+        jcenter()
+    }
+}
diff --git a/explainer/build.gradle b/explainer/build.gradle
new file mode 100644
index 0000000..d2746f9
--- /dev/null
+++ b/explainer/build.gradle
@@ -0,0 +1,11 @@
+// explainer module
+apply plugin: 'java'
+
+dependencies {
+    compile project(':generator')
+    compile project(':shared')
+
+    testCompile 'junit:junit:4.12'
+    testCompile project(':sharedtest')
+}
+// EOF
diff --git a/explainer/src/main/java/com/google/archivepatcher/explainer/EntryExplanation.java b/explainer/src/main/java/com/google/archivepatcher/explainer/EntryExplanation.java
new file mode 100644
index 0000000..39243ae
--- /dev/null
+++ b/explainer/src/main/java/com/google/archivepatcher/explainer/EntryExplanation.java
@@ -0,0 +1,99 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.explainer;
+
+import com.google.archivepatcher.generator.ByteArrayHolder;
+import com.google.archivepatcher.generator.RecommendationReason;
+
+/**
+ * The explanation for a single entry that was considered during generation of patch.
+ */
+public class EntryExplanation {
+  /**
+   * The path of the entry in the new archive.
+   */
+  private final ByteArrayHolder path;
+
+  /**
+   * True if the entry only exists in the new archive.
+   */
+  private final boolean isNew;
+
+  /**
+   * If the entry is not new, the reason for its inclusion in or exclusion from the patch.
+   */
+  private final RecommendationReason reasonIncludedIfNotNew;
+
+  /**
+   * The <strong>approximate</strong> size of the entry in the patch stream.
+   */
+  private final long compressedSizeInPatch;
+
+  /**
+   * Construct a new explanation for an entry.
+   * @param path the path of the entry in the new archive
+   * @param isNew true if the entry only exists in the new archive
+   * @param reasonIncludedIfNotNew when isNew is false, the reason that the entry is included
+   * @param compressedSizeInPatch the <strong>approximate</strong> size of the entry in the patch
+   * stream
+   */
+  public EntryExplanation(
+      ByteArrayHolder path,
+      boolean isNew,
+      RecommendationReason reasonIncludedIfNotNew,
+      long compressedSizeInPatch) {
+    super();
+    this.path = path;
+    this.isNew = isNew;
+    this.reasonIncludedIfNotNew = reasonIncludedIfNotNew;
+    this.compressedSizeInPatch = compressedSizeInPatch;
+  }
+
+  /**
+   * Returns the path of the entry in the new archive.
+   * @return as described
+   */
+  public ByteArrayHolder getPath() {
+    return path;
+  }
+
+  /**
+   * Returns true if the entry only exists in the new archive.
+   * @return as described
+   */
+  public boolean isNew() {
+    return isNew;
+  }
+
+  /**
+   * When {@link #isNew()} is false, the reason that the entry is included.
+   * @return as described
+   */
+  public RecommendationReason getReasonIncludedIfNotNew() {
+    return reasonIncludedIfNotNew;
+  }
+
+  /**
+   * Returns the <strong>approximate</strong> size of the entry in the patch stream. This number is
+   * <strong>not</strong> guaranteed to be precise. Patch generation is complex, and in some cases
+   * the patching process may use arbitrary bytes from arbitrary locations in the old archive to
+   * populate bytes in the new archive. Other factors may also contribute to inaccuracies, such as
+   * overhead in the patch format itself or in compression technology, etceteras.
+   * @return as described
+   */
+  public long getCompressedSizeInPatch() {
+    return compressedSizeInPatch;
+  }
+}
diff --git a/explainer/src/main/java/com/google/archivepatcher/explainer/PatchExplainer.java b/explainer/src/main/java/com/google/archivepatcher/explainer/PatchExplainer.java
new file mode 100644
index 0000000..8a01fa8
--- /dev/null
+++ b/explainer/src/main/java/com/google/archivepatcher/explainer/PatchExplainer.java
@@ -0,0 +1,298 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.explainer;
+
+import com.google.archivepatcher.generator.ByteArrayHolder;
+import com.google.archivepatcher.generator.DeltaGenerator;
+import com.google.archivepatcher.generator.MinimalZipArchive;
+import com.google.archivepatcher.generator.MinimalZipEntry;
+import com.google.archivepatcher.generator.PreDiffExecutor;
+import com.google.archivepatcher.generator.PreDiffPlan;
+import com.google.archivepatcher.generator.QualifiedRecommendation;
+import com.google.archivepatcher.generator.RecommendationModifier;
+import com.google.archivepatcher.generator.RecommendationReason;
+import com.google.archivepatcher.generator.TempFileHolder;
+import com.google.archivepatcher.shared.Compressor;
+import com.google.archivepatcher.shared.CountingOutputStream;
+import com.google.archivepatcher.shared.DeflateUncompressor;
+import com.google.archivepatcher.shared.RandomAccessFileInputStream;
+import com.google.archivepatcher.shared.Uncompressor;
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/** Explains where the data in a patch would come from. */
+// TODO: Add explicit logic for renames
+public class PatchExplainer {
+  /**
+   * A stream that discards everything written to it.
+   */
+  private static class NullOutputStream extends OutputStream {
+    @Override
+    public void write(int b) throws IOException {
+      // Nothing.
+    }
+
+    @Override
+    public void write(byte[] b) throws IOException {
+      // Nothing.
+    }
+
+    @Override
+    public void write(byte[] b, int off, int len) throws IOException {
+      // Nothing.
+    }
+  }
+
+  /**
+   * The compressor to use for compressing patch content.
+   */
+  private final Compressor compressor;
+
+  /**
+   * The delta generator to use for generating uncompressed patch content.
+   */
+  private final DeltaGenerator deltaGenerator;
+
+  /**
+   * Construct a new patch explainer that will use the specified {@link Compressor} to establish
+   * compressed patch size estimates and the specified {@link DeltaGenerator} to generate the deltas
+   * for the patch.
+   * @param compressor the compressor to use
+   * @param deltaGenerator the delta generator to use
+   */
+  public PatchExplainer(Compressor compressor, DeltaGenerator deltaGenerator) {
+    this.compressor = compressor;
+    this.deltaGenerator = deltaGenerator;
+  }
+
+  /**
+   * Explains the patch that would be generated for the specified input files.
+   *
+   * @param oldFile the old file
+   * @param newFile the new file
+   * @param recommendationModifiers optionally, {@link RecommendationModifier}s to use during patch
+   *     planning. If null, a normal patch is generated.
+   * @return a list of the explanations for each entry that would be
+   * @throws IOException if unable to read data
+   * @throws InterruptedException if any thread interrupts this thread
+   */
+  public List<EntryExplanation> explainPatch(
+      File oldFile, File newFile, RecommendationModifier... recommendationModifiers)
+      throws IOException, InterruptedException {
+    List<EntryExplanation> result = new ArrayList<>();
+
+    // Isolate entries that are only found in the new archive.
+    Map<ByteArrayHolder, MinimalZipEntry> allOldEntries = mapEntries(oldFile);
+    Map<ByteArrayHolder, MinimalZipEntry> allNewEntries = mapEntries(newFile);
+    Map<ByteArrayHolder, MinimalZipEntry> completelyNewEntries = new HashMap<>(allNewEntries);
+    completelyNewEntries.keySet().removeAll(allOldEntries.keySet());
+
+    // Now calculate the costs for the new files and track them in the explanations returned.
+    for (Map.Entry<ByteArrayHolder, MinimalZipEntry> entry : completelyNewEntries.entrySet()) {
+      long compressedSize = getCompressedSize(newFile, entry.getValue(), compressor);
+      result.add(
+          new EntryExplanation(
+              new ByteArrayHolder(entry.getValue().getFileNameBytes()),
+              true,
+              null,
+              compressedSize));
+    }
+
+    Uncompressor uncompressor = new DeflateUncompressor();
+    PreDiffExecutor.Builder builder =
+        new PreDiffExecutor.Builder().readingOriginalFiles(oldFile, newFile);
+    for (RecommendationModifier modifier : recommendationModifiers) {
+      builder.withRecommendationModifier(modifier);
+    }
+    PreDiffExecutor executor = builder.build();
+    PreDiffPlan plan = executor.prepareForDiffing();
+    try (TempFileHolder oldTemp = new TempFileHolder();
+        TempFileHolder newTemp = new TempFileHolder();
+        TempFileHolder deltaTemp = new TempFileHolder()) {
+      for (QualifiedRecommendation qualifiedRecommendation : plan.getQualifiedRecommendations()) {
+
+        // Short-circuit for identical resources.
+        if (qualifiedRecommendation.getReason()
+            == RecommendationReason.COMPRESSED_BYTES_IDENTICAL) {
+          // Patch size should be effectively zero.
+          result.add(
+              new EntryExplanation(
+                  new ByteArrayHolder(qualifiedRecommendation.getNewEntry().getFileNameBytes()),
+                  false,
+                  qualifiedRecommendation.getReason(),
+                  0L));
+          continue;
+        }
+
+        if (qualifiedRecommendation.getOldEntry().getCrc32OfUncompressedData()
+                == qualifiedRecommendation.getNewEntry().getCrc32OfUncompressedData()
+            && qualifiedRecommendation.getOldEntry().getUncompressedSize()
+                == qualifiedRecommendation.getNewEntry().getUncompressedSize()) {
+          // If the path, size and CRC32 are the same assume it's a match. Patch size should be
+          // effectively zero.
+          result.add(
+              new EntryExplanation(
+                  new ByteArrayHolder(qualifiedRecommendation.getNewEntry().getFileNameBytes()),
+                  false,
+                  qualifiedRecommendation.getReason(),
+                  0L));
+          continue;
+        }
+
+        // Everything past here is a resource that has changed in some way.
+        // NB: This magically takes care of RecommendationReason.RESOURCE_CONSTRAINED. The logic
+        // below will keep the RESOURCE_CONSTRAINED entries compressed, running the delta on their
+        // compressed contents, and the resulting explanation will preserve the RESOURCE_CONSTRAINED
+        // reason. This will correctly attribute the size of these blobs to the RESOURCE_CONSTRAINED
+        // category.
+
+        // Get the inputs ready for running a delta: uncompress/copy the *old* content as necessary.
+        long oldOffset = qualifiedRecommendation.getOldEntry().getFileOffsetOfCompressedData();
+        long oldLength = qualifiedRecommendation.getOldEntry().getCompressedSize();
+        if (qualifiedRecommendation.getRecommendation().uncompressOldEntry) {
+          uncompress(oldFile, oldOffset, oldLength, uncompressor, oldTemp.file);
+        } else {
+          extractCopy(oldFile, oldOffset, oldLength, oldTemp.file);
+        }
+
+        // Get the inputs ready for running a delta: uncompress/copy the *new* content as necessary.
+        long newOffset = qualifiedRecommendation.getNewEntry().getFileOffsetOfCompressedData();
+        long newLength = qualifiedRecommendation.getNewEntry().getCompressedSize();
+        if (qualifiedRecommendation.getRecommendation().uncompressNewEntry) {
+          uncompress(newFile, newOffset, newLength, uncompressor, newTemp.file);
+        } else {
+          extractCopy(newFile, newOffset, newLength, newTemp.file);
+        }
+
+        // File is actually changed (or transitioned between compressed and uncompressed forms).
+        // Generate and compress a delta.
+        try (FileOutputStream deltaOut = new FileOutputStream(deltaTemp.file);
+            BufferedOutputStream bufferedDeltaOut = new BufferedOutputStream(deltaOut)) {
+          deltaGenerator.generateDelta(oldTemp.file, newTemp.file, bufferedDeltaOut);
+          bufferedDeltaOut.flush();
+          long compressedDeltaSize =
+              getCompressedSize(deltaTemp.file, 0, deltaTemp.file.length(), compressor);
+          result.add(
+              new EntryExplanation(
+                  new ByteArrayHolder(qualifiedRecommendation.getOldEntry().getFileNameBytes()),
+                  false,
+                  qualifiedRecommendation.getReason(),
+                  compressedDeltaSize));
+        }
+      }
+    }
+
+    return result;
+  }
+
+  /**
+   * Determines the size of the entry if it were compressed with the specified compressor.
+   * @param file the file to read from
+   * @param entry the entry to estimate the size of
+   * @param compressor the compressor to use for compressing
+   * @return the size of the entry if compressed with the specified compressor
+   * @throws IOException if anything goes wrong
+   */
+  private long getCompressedSize(File file, MinimalZipEntry entry, Compressor compressor)
+      throws IOException {
+    return getCompressedSize(
+        file, entry.getFileOffsetOfCompressedData(), entry.getCompressedSize(), compressor);
+  }
+
+  /**
+   * Uncompress the specified content to a new file.
+   * @param source the file to read from
+   * @param offset the offset at which to start reading
+   * @param length the number of bytes to uncompress
+   * @param uncompressor the uncompressor to use
+   * @param dest the file to write the uncompressed bytes to
+   * @throws IOException if anything goes wrong
+   */
+  private void uncompress(
+      File source, long offset, long length, Uncompressor uncompressor, File dest)
+      throws IOException {
+    try (RandomAccessFileInputStream rafis =
+            new RandomAccessFileInputStream(source, offset, length);
+        FileOutputStream out = new FileOutputStream(dest);
+        BufferedOutputStream bufferedOut = new BufferedOutputStream(out)) {
+      uncompressor.uncompress(rafis, bufferedOut);
+    }
+  }
+
+  /**
+   * Extract a copy of the specified content to a new file.
+   * @param source the file to read from
+   * @param offset the offset at which to start reading
+   * @param length the number of bytes to uncompress
+   * @param dest the file to write the uncompressed bytes to
+   * @throws IOException if anything goes wrong
+   */
+  private void extractCopy(File source, long offset, long length, File dest) throws IOException {
+    try (RandomAccessFileInputStream rafis =
+            new RandomAccessFileInputStream(source, offset, length);
+        FileOutputStream out = new FileOutputStream(dest);
+        BufferedOutputStream bufferedOut = new BufferedOutputStream(out)) {
+      byte[] buffer = new byte[32768];
+      int numRead = 0;
+      while ((numRead = rafis.read(buffer)) >= 0) {
+        bufferedOut.write(buffer, 0, numRead);
+      }
+      bufferedOut.flush();
+    }
+  }
+
+  /**
+   * Compresses an arbitrary range of bytes in the given file and returns the compressed size.
+   * @param file the file to read from
+   * @param offset the offset in the file to start reading from
+   * @param length the number of bytes to read from the input file
+   * @param compressor the compressor to use for compressing
+   * @return the size of the entry if compressed with the specified compressor
+   * @throws IOException if anything goes wrong
+   */
+  private long getCompressedSize(File file, long offset, long length, Compressor compressor)
+      throws IOException {
+    try (OutputStream sink = new NullOutputStream();
+        CountingOutputStream counter = new CountingOutputStream(sink);
+        RandomAccessFileInputStream rafis = new RandomAccessFileInputStream(file, offset, length)) {
+      compressor.compress(rafis, counter);
+      counter.flush();
+      return counter.getNumBytesWritten();
+    }
+  }
+
+  /**
+   * Convert a file into a map whose keys are {@link ByteArrayHolder} objects containing the entry
+   * paths and whose values are the corresponding {@link MinimalZipEntry} objects.
+   * @param file the file to scan, which must be a valid zip archive
+   * @return the mapping, as described
+   * @throws IOException if anything goes wrong
+   */
+  private static Map<ByteArrayHolder, MinimalZipEntry> mapEntries(File file) throws IOException {
+    List<MinimalZipEntry> allEntries = MinimalZipArchive.listEntries(file);
+    Map<ByteArrayHolder, MinimalZipEntry> result = new HashMap<>(allEntries.size());
+    for (MinimalZipEntry entry : allEntries) {
+      result.put(new ByteArrayHolder(entry.getFileNameBytes()), entry);
+    }
+    return result;
+  }
+}
diff --git a/explainer/src/main/java/com/google/archivepatcher/explainer/PatchExplanation.java b/explainer/src/main/java/com/google/archivepatcher/explainer/PatchExplanation.java
new file mode 100644
index 0000000..ce394df
--- /dev/null
+++ b/explainer/src/main/java/com/google/archivepatcher/explainer/PatchExplanation.java
@@ -0,0 +1,291 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.explainer;
+
+import com.google.archivepatcher.generator.RecommendationReason;
+import com.google.archivepatcher.generator.TotalRecompressionLimiter;
+import java.io.PrintWriter;
+import java.io.UnsupportedEncodingException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Aggregate explanation for a collection of {@link EntryExplanation} objects. This class is a
+ * convenience for tools that process the output of {@link PatchExplainer}.
+ */
+public class PatchExplanation {
+
+  /**
+   * A {@link Comparator} that performs lexical sorting of the path names within
+   * {@link EntryExplanation} objects by treating them as UTF-8 strings.
+   */
+  private static class EntryExplanationLexicalComparator implements Comparator<EntryExplanation> {
+    @Override
+    public int compare(EntryExplanation o1, EntryExplanation o2) {
+      return path(o1).compareTo(path(o2));
+    }
+  }
+
+  /**
+   * All entries that are new (only present in the new archive).
+   */
+  private final List<EntryExplanation> explainedAsNew;
+
+  /**
+   * All entries that are changed (present in both the old and new archive, but different content).
+   */
+  private final List<EntryExplanation> explainedAsChanged;
+
+  /**
+   * All entries that either unchanged or have a zero-cost in the patch (i.e., a copy or rename).
+   */
+  private final List<EntryExplanation> explainedAsUnchangedOrFree;
+
+  /**
+   * All entries that could have been uncompressed for patching but have been prevented due to
+   * resource constraints by a {@link TotalRecompressionLimiter}.
+   */
+  private final List<EntryExplanation> explainedAsResourceConstrained;
+
+  /**
+   * The sum total of the sizes of all the entries that are new in the patch stream.
+   */
+  private final long estimatedNewSize;
+
+  /**
+   * The sum total of the sizes of all the entries that are changed in the patch stream.
+   */
+  private final long estimatedChangedSize;
+
+  /**
+   * The sum total of the sizes of all the entries that were changed but that have been prevented
+   * from being uncompressed during patch generation due to resource constraints by a {@link
+   * TotalRecompressionLimiter}.
+   */
+  private final long estimatedResourceConstrainedSize;
+
+  /**
+   * Constructs a new aggregate explanation for the specified {@link EntryExplanation}s.
+   * @param entryExplanations the explanations for all of the individual entries in the patch
+   */
+  public PatchExplanation(List<EntryExplanation> entryExplanations) {
+    List<EntryExplanation> tempExplainedAsNew = new ArrayList<>();
+    List<EntryExplanation> tempExplainedAsChanged = new ArrayList<>();
+    List<EntryExplanation> tempExplainedAsUnchangedOrFree = new ArrayList<>();
+    List<EntryExplanation> tempExplainedAsResourceConstrained = new ArrayList<>();
+    long tempEstimatedNewSize = 0;
+    long tempEstimatedChangedSize = 0;
+    long tempEstimatedResourceConstrainedSize = 0;
+    for (EntryExplanation explanation : entryExplanations) {
+      if (explanation.isNew()) {
+        tempEstimatedNewSize += explanation.getCompressedSizeInPatch();
+        tempExplainedAsNew.add(explanation);
+      } else if (explanation.getReasonIncludedIfNotNew()
+          == RecommendationReason.RESOURCE_CONSTRAINED) {
+        tempEstimatedResourceConstrainedSize += explanation.getCompressedSizeInPatch();
+        tempExplainedAsResourceConstrained.add(explanation);
+      } else if (explanation.getCompressedSizeInPatch() > 0) {
+        tempEstimatedChangedSize += explanation.getCompressedSizeInPatch();
+        tempExplainedAsChanged.add(explanation);
+      } else {
+        tempExplainedAsUnchangedOrFree.add(explanation);
+      }
+    }
+    Comparator<EntryExplanation> comparator = new EntryExplanationLexicalComparator();
+    Collections.sort(tempExplainedAsNew, comparator);
+    Collections.sort(tempExplainedAsChanged, comparator);
+    Collections.sort(tempExplainedAsUnchangedOrFree, comparator);
+    Collections.sort(tempExplainedAsResourceConstrained, comparator);
+    explainedAsNew = Collections.unmodifiableList(tempExplainedAsNew);
+    explainedAsChanged = Collections.unmodifiableList(tempExplainedAsChanged);
+    explainedAsUnchangedOrFree = Collections.unmodifiableList(tempExplainedAsUnchangedOrFree);
+    explainedAsResourceConstrained =
+        Collections.unmodifiableList(tempExplainedAsResourceConstrained);
+    estimatedNewSize = tempEstimatedNewSize;
+    estimatedChangedSize = tempEstimatedChangedSize;
+    estimatedResourceConstrainedSize = tempEstimatedResourceConstrainedSize;
+  }
+
+  /**
+   * Returns a read-only view of all entries that are new (only present in the new archive), sorted
+   * in ascending order lexicographically by path.
+   * @return as described
+   */
+  public List<EntryExplanation> getExplainedAsNew() {
+    return explainedAsNew;
+  }
+
+  /**
+   * Returns a read-only view of all entries that are changed (present in both the old and new
+   * archive, but different content), sorted in ascending order lexicographically by path.
+   * @return as described
+   */
+  public List<EntryExplanation> getExplainedAsChanged() {
+    return explainedAsChanged;
+  }
+
+  /**
+   * Returns a read-only view of all entries that either unchanged or have a zero-cost in the patch
+   * (i.e., a copy or rename), sorted in ascending order lexicographically by path.
+   * @return as described
+   */
+  public List<EntryExplanation> getExplainedAsUnchangedOrFree() {
+    return explainedAsUnchangedOrFree;
+  }
+
+  /**
+   * Returns a read-only view of all entries that could have been uncompressed for patching but have
+   * been prevented due to resource constraints by a {@link TotalRecompressionLimiter}, sorted in
+   * ascending order lexicographically by path.
+   *
+   * @return as described
+   */
+  public List<EntryExplanation> getExplainedAsResourceConstrained() {
+    return explainedAsResourceConstrained;
+  }
+
+  /**
+   * Returns the sum total of the sizes of all the entries that are new in the patch stream. As
+   * noted in {@link EntryExplanation#getCompressedSizeInPatch()}, this is an
+   * <strong>approximation</strong>.
+   * @return as described
+   */
+  public long getEstimatedNewSize() {
+    return estimatedNewSize;
+  }
+
+  /**
+   * Returns the sum total of the sizes of all the entries that are changed in the patch stream. As
+   * noted in {@link EntryExplanation#getCompressedSizeInPatch()}, this is an
+   * <strong>approximation</strong>.
+   * @return as described
+   */
+  public long getEstimatedChangedSize() {
+    return estimatedChangedSize;
+  }
+
+  /**
+   * Returns the sum total of the sizes of all entries that could have been uncompressed for
+   * patching but have been prevented due to resource constraints by a {@link
+   * TotalRecompressionLimiter}. As noted in {@link EntryExplanation#getCompressedSizeInPatch()},
+   * this is an <strong>approximation</strong>.
+   *
+   * @return as described
+   */
+  public long getEstimatedResourceConstrainedSize() {
+    return estimatedResourceConstrainedSize;
+  }
+
+  /**
+   * Writes a JSON representation of the data to the specified {@link PrintWriter}. The data has the
+   * following form: <code>
+   * <br>{
+   * <br>&nbsp;&nbsp;estimatedNewSize = &lt;number&gt;,
+   * <br>&nbsp;&nbsp;estimatedChangedSize = &lt;number&gt;,
+   * <br>&nbsp;&nbsp;explainedAsNew = [
+   * <br>&nbsp;&nbsp;&nbsp;&nbsp;&lt;entry_list&gt;
+   * <br>&nbsp;&nbsp;],
+   * <br>&nbsp;&nbsp;explainedAsChanged = [
+   * <br>&nbsp;&nbsp;&nbsp;&nbsp;&lt;entry_list&gt;
+   * <br>&nbsp;&nbsp;],
+   * <br>&nbsp;&nbsp;explainedAsUnchangedOrFree = [
+   * <br>&nbsp;&nbsp;&nbsp;&nbsp;&lt;entry_list&gt;
+   * <br>&nbsp;&nbsp;]
+   * <br>}
+   * </code> <br>
+   * Where <code>&lt;entry_list&gt;</code> is a list of zero or more entries of the following form:
+   * <code>
+   * <br>{ path: '&lt;path_string&gt;', isNew: &lt;true|false&gt;,
+   * reasonIncluded: &lt;undefined|'&lt;reason_string'&gt;, compressedSizeInPatch: &lt;number&gt;
+   * }
+   * </code>
+   *
+   * @param writer the writer to write the JSON to
+   */
+  public void writeJson(PrintWriter writer) {
+    StringBuilder buffer = new StringBuilder(); // For convenience
+    buffer.append("{\n");
+    buffer.append("  estimatedNewSize: ").append(getEstimatedNewSize()).append(",\n");
+    buffer.append("  estimatedChangedSize: ").append(getEstimatedChangedSize()).append(",\n");
+    buffer
+        .append("  estimatedResourceConstrainedSize: ")
+        .append(getEstimatedResourceConstrainedSize())
+        .append(",\n");
+    dumpJson(getExplainedAsNew(), "explainedAsNew", buffer, "  ");
+    buffer.append(",\n");
+    dumpJson(getExplainedAsChanged(), "explainedAsChanged", buffer, "  ");
+    buffer.append(",\n");
+    dumpJson(getExplainedAsUnchangedOrFree(), "explainedAsUnchangedOrFree", buffer, "  ");
+    buffer.append(",\n");
+    dumpJson(getExplainedAsResourceConstrained(), "explainedAsResourceConstrained", buffer, "  ");
+    buffer.append("\n");
+    buffer.append("}");
+    writer.write(buffer.toString());
+    writer.flush();
+  }
+
+  private void dumpJson(
+      List<EntryExplanation> explanations, String listName, StringBuilder buffer, String indent) {
+    buffer.append(indent).append(listName).append(": [\n");
+    Iterator<EntryExplanation> iterator = explanations.iterator();
+    while (iterator.hasNext()) {
+      EntryExplanation explanation = iterator.next();
+      dumpJson(explanation, buffer, indent + "  ");
+      if (iterator.hasNext()) {
+        buffer.append(",");
+      }
+      buffer.append("\n");
+    }
+    buffer.append(indent).append("]");
+  }
+
+  private void dumpJson(EntryExplanation entryExplanation, StringBuilder buffer, String indent) {
+    String reasonString =
+        entryExplanation.isNew()
+            ? "undefined"
+            : "'" + entryExplanation.getReasonIncludedIfNotNew().toString() + "'";
+    buffer
+        .append(indent)
+        .append("{ ")
+        .append("path: '")
+        .append(path(entryExplanation))
+        .append("', ")
+        .append("isNew: ")
+        .append(entryExplanation.isNew())
+        .append(", ")
+        .append("reasonIncluded: ")
+        .append(reasonString)
+        .append(", ")
+        .append("compressedSizeInPatch: ")
+        .append(entryExplanation.getCompressedSizeInPatch())
+        .append(" }");
+  }
+
+  /**
+   * Returns the path from an {@link EntryExplanation} as a UTF-8 string.
+   * @param explanation the {@link EntryExplanation} to extract the path from
+   * @return as described
+   */
+  private static String path(EntryExplanation explanation) {
+    try {
+      return new String(explanation.getPath().getData(), "UTF-8");
+    } catch (UnsupportedEncodingException e) {
+      throw new RuntimeException("System doesn't support UTF-8", e);
+    }
+  }
+}
diff --git a/explainer/src/test/java/com/google/archivepatcher/explainer/PatchExplainerTest.java b/explainer/src/test/java/com/google/archivepatcher/explainer/PatchExplainerTest.java
new file mode 100644
index 0000000..1a5a379
--- /dev/null
+++ b/explainer/src/test/java/com/google/archivepatcher/explainer/PatchExplainerTest.java
@@ -0,0 +1,424 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.explainer;
+
+import com.google.archivepatcher.generator.ByteArrayHolder;
+import com.google.archivepatcher.generator.DeltaGenerator;
+import com.google.archivepatcher.generator.MinimalZipArchive;
+import com.google.archivepatcher.generator.MinimalZipEntry;
+import com.google.archivepatcher.generator.RecommendationReason;
+import com.google.archivepatcher.generator.TotalRecompressionLimiter;
+import com.google.archivepatcher.shared.Compressor;
+import com.google.archivepatcher.shared.UnitTestZipArchive;
+import com.google.archivepatcher.shared.UnitTestZipEntry;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.UnsupportedEncodingException;
+import java.util.Collections;
+import java.util.List;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/**
+ * Tests for {@link PatchExplainer}.
+ */
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public class PatchExplainerTest {
+
+  // All the A and B entries consist of a chunk of text followed by a standard corpus of text from
+  // the DefaultDeflateCompatibilityDiviner that ensures the tests will be able to discriminate
+  // between any compression level. Without this additional corpus text, multiple compression levels
+  // can match the entry and the unit tests would not be accurate.
+  private static final UnitTestZipEntry ENTRY_A1_LEVEL_6 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/path A", 6, "entry A1", null);
+  private static final UnitTestZipEntry ENTRY_A1_LEVEL_9 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/path A", 9, "entry A1", null);
+  private static final UnitTestZipEntry ENTRY_A1_STORED =
+      UnitTestZipArchive.makeUnitTestZipEntry("/path A", 0, "entry A", null);
+  private static final UnitTestZipEntry ENTRY_A2_LEVEL_9 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/path A", 9, "entry A2", null);
+  private static final UnitTestZipEntry ENTRY_A2_STORED =
+      UnitTestZipArchive.makeUnitTestZipEntry("/path A", 0, "entry A2", null);
+  private static final UnitTestZipEntry ENTRY_B_LEVEL_6 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/path B", 6, "entry B", null);
+
+  /**
+   * A "compressor" that always outputs the same exact string regardless of the input and asserts
+   * that the input is exactly as expected.
+   */
+  private static class FakeCompressor implements Compressor {
+    static final String OUTPUT = "fakecompressor output";
+    private final byte[] expectedInput;
+
+    public FakeCompressor(byte[] expectedInput) {
+      this.expectedInput = expectedInput;
+    }
+
+    @Override
+    public void compress(InputStream uncompressedIn, OutputStream compressedOut)
+        throws IOException {
+      byte[] readBuffer = new byte[32768];
+      int numRead = 0;
+      ByteArrayOutputStream actualInput = new ByteArrayOutputStream();
+      while ((numRead = uncompressedIn.read(readBuffer)) >= 0) {
+        actualInput.write(readBuffer, 0, numRead);
+      }
+      Assert.assertArrayEquals(expectedInput, actualInput.toByteArray());
+      compressedOut.write(OUTPUT.getBytes("US-ASCII"));
+    }
+  }
+
+  /**
+   * A "delta generator" that always outputs the same exact string regardless of the inputs and
+   * asserts that the input is exactly as expected.
+   */
+  private static class FakeDeltaGenerator implements DeltaGenerator {
+    static final String OUTPUT = "fakedeltagenerator output";
+    private final byte[] expectedOld;
+    private final byte[] expectedNew;
+
+    public FakeDeltaGenerator(byte[] expectedOld, byte[] expectedNew) {
+      this.expectedOld = expectedOld;
+      this.expectedNew = expectedNew;
+    }
+
+    @Override
+    public void generateDelta(File oldBlob, File newBlob, OutputStream deltaOut)
+        throws IOException {
+      assertFileEquals(oldBlob, expectedOld);
+      assertFileEquals(newBlob, expectedNew);
+      deltaOut.write(OUTPUT.getBytes("US-ASCII"));
+    }
+
+    private final void assertFileEquals(File file, byte[] expected) throws IOException {
+      byte[] actual = new byte[(int) file.length()];
+      try (FileInputStream fileIn = new FileInputStream(file);
+          DataInputStream dataIn = new DataInputStream(fileIn)) {
+        dataIn.readFully(actual);
+      }
+      Assert.assertArrayEquals(expected, actual);
+    }
+  }
+
+  /**
+   * Temporary old file.
+   */
+  private File oldFile = null;
+
+  /**
+   * Temporary new file.
+   */
+  private File newFile = null;
+
+  @Before
+  public void setup() throws IOException {
+    oldFile = File.createTempFile("patchexplainertest", "old");
+    newFile = File.createTempFile("patchexplainertest", "new");
+  }
+
+  @After
+  public void tearDown() {
+    if (oldFile != null) {
+      try {
+        oldFile.delete();
+      } catch (Exception ignored) {
+        // Nothing
+      }
+    }
+    if (newFile != null) {
+      try {
+        newFile.delete();
+      } catch (Exception ignored) {
+        // Nothing
+      }
+    }
+  }
+
+  @Test
+  public void testExplainPatch_CompressedBytesIdentical() throws Exception {
+    byte[] bytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A1_LEVEL_6));
+    save(bytes, oldFile);
+    save(bytes, newFile);
+    PatchExplainer explainer = new PatchExplainer(null, null);
+    List<EntryExplanation> explanations = explainer.explainPatch(oldFile, newFile);
+
+    EntryExplanation expected =
+        new EntryExplanation(
+            path(ENTRY_A1_LEVEL_6), false, RecommendationReason.COMPRESSED_BYTES_IDENTICAL, 0);
+    checkExplanation(explanations, expected);
+  }
+
+  @Test
+  public void testExplainPatch_CompressedBytesChanged_UncompressedUnchanged() throws Exception {
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A1_LEVEL_6));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A1_LEVEL_9));
+    save(oldBytes, oldFile);
+    save(newBytes, newFile);
+    PatchExplainer explainer = new PatchExplainer(null, null);
+    List<EntryExplanation> explanations = explainer.explainPatch(oldFile, newFile);
+    // The compressed bytes changed, but the uncompressed bytes are the same. Thus the patch size
+    // should be zero, because the entries are actually identical in the delta-friendly files.
+    // Additionally no diffing or compression should be performed.
+    EntryExplanation expected =
+        new EntryExplanation(
+            path(ENTRY_A1_LEVEL_9), false, RecommendationReason.COMPRESSED_BYTES_CHANGED, 0L);
+    checkExplanation(explanations, expected);
+  }
+
+  @Test
+  public void testExplainPatch_CompressedBytesChanged_UncompressedChanged() throws Exception {
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A1_LEVEL_6));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A2_LEVEL_9));
+    save(oldBytes, oldFile);
+    save(newBytes, newFile);
+    FakeDeltaGenerator fakeDeltaGenerator =
+        new FakeDeltaGenerator(
+            ENTRY_A1_LEVEL_6.getUncompressedBinaryContent(),
+            ENTRY_A2_LEVEL_9.getUncompressedBinaryContent());
+    FakeCompressor fakeCompressor =
+        new FakeCompressor(FakeDeltaGenerator.OUTPUT.getBytes("US-ASCII"));
+    PatchExplainer explainer = new PatchExplainer(fakeCompressor, fakeDeltaGenerator);
+    List<EntryExplanation> explanations = explainer.explainPatch(oldFile, newFile);
+    // The compressed bytes changed, and so did the uncompressed bytes. The patch size should be
+    // non-zero because the entries are not identical in the delta-friendly files.
+    EntryExplanation expected =
+        new EntryExplanation(
+            path(ENTRY_A2_LEVEL_9),
+            false,
+            RecommendationReason.COMPRESSED_BYTES_CHANGED,
+            FakeCompressor.OUTPUT.length());
+    checkExplanation(explanations, expected);
+  }
+
+  @Test
+  public void testExplainPatch_CompressedBytesChanged_UncompressedChanged_Limited()
+      throws Exception {
+    // Just like above, but this time with a TotalRecompressionLimit that changes the result.
+    TotalRecompressionLimiter limiter = new TotalRecompressionLimiter(1); // 1 byte limit!
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A1_LEVEL_6));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A2_LEVEL_9));
+    save(oldBytes, oldFile);
+    save(newBytes, newFile);
+    // Note that we will expect a diff based on the COMPRESSED bytes, not the UNCOMPRESSED bytes,
+    // because the limiter will force uncompression to be suppressed.
+    FakeDeltaGenerator fakeDeltaGenerator =
+        new FakeDeltaGenerator(
+            ENTRY_A1_LEVEL_6.getCompressedBinaryContent(),
+            ENTRY_A2_LEVEL_9.getCompressedBinaryContent());
+    FakeCompressor fakeCompressor =
+        new FakeCompressor(FakeDeltaGenerator.OUTPUT.getBytes("US-ASCII"));
+    PatchExplainer explainer = new PatchExplainer(fakeCompressor, fakeDeltaGenerator);
+    List<EntryExplanation> explanations = explainer.explainPatch(oldFile, newFile, limiter);
+    // The uncompressed bytes are not the same. The patch plan will want to uncompress the entries,
+    // but the limiter will prevent it.
+    EntryExplanation expected =
+        new EntryExplanation(
+            path(ENTRY_A2_LEVEL_9),
+            false,
+            RecommendationReason.RESOURCE_CONSTRAINED,
+            FakeCompressor.OUTPUT.length());
+    checkExplanation(explanations, expected);
+  }
+
+  @Test
+  public void testExplainPatch_BothEntriesUncompressed_BytesUnchanged() throws Exception {
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A1_STORED));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A1_STORED));
+    save(oldBytes, oldFile);
+    save(newBytes, newFile);
+    PatchExplainer explainer = new PatchExplainer(null, null);
+    List<EntryExplanation> explanations = explainer.explainPatch(oldFile, newFile);
+    // The uncompressed bytes are the same. Thus the patch size should be zero, because the entries
+    // are identical in the delta-friendly files. Additionally no diffing or compression should be
+    // performed.
+    EntryExplanation expected =
+        new EntryExplanation(
+            path(ENTRY_A1_STORED), false, RecommendationReason.BOTH_ENTRIES_UNCOMPRESSED, 0L);
+    checkExplanation(explanations, expected);
+  }
+
+  @Test
+  public void testExplainPatch_BothEntriesUncompressed_BytesChanged() throws Exception {
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A1_STORED));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A2_STORED));
+    save(oldBytes, oldFile);
+    save(newBytes, newFile);
+    FakeDeltaGenerator fakeDeltaGenerator =
+        new FakeDeltaGenerator(
+            ENTRY_A1_STORED.getUncompressedBinaryContent(),
+            ENTRY_A2_STORED.getUncompressedBinaryContent());
+    FakeCompressor fakeCompressor =
+        new FakeCompressor(FakeDeltaGenerator.OUTPUT.getBytes("US-ASCII"));
+    PatchExplainer explainer = new PatchExplainer(fakeCompressor, fakeDeltaGenerator);
+    List<EntryExplanation> explanations = explainer.explainPatch(oldFile, newFile);
+    // The uncompressed bytes are not the same. Thus the patch size should be non-zero.
+    EntryExplanation expected =
+        new EntryExplanation(
+            path(ENTRY_A2_STORED),
+            false,
+            RecommendationReason.BOTH_ENTRIES_UNCOMPRESSED,
+            FakeCompressor.OUTPUT.length());
+    checkExplanation(explanations, expected);
+  }
+
+  @Test
+  public void testExplainPatch_CompressedChangedToUncompressed() throws Exception {
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A1_LEVEL_9));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A1_STORED));
+    save(oldBytes, oldFile);
+    save(newBytes, newFile);
+    FakeDeltaGenerator fakeDeltaGenerator =
+        new FakeDeltaGenerator(
+            ENTRY_A1_LEVEL_9.getUncompressedBinaryContent(),
+            ENTRY_A1_STORED.getUncompressedBinaryContent());
+    FakeCompressor fakeCompressor =
+        new FakeCompressor(FakeDeltaGenerator.OUTPUT.getBytes("US-ASCII"));
+    PatchExplainer explainer = new PatchExplainer(fakeCompressor, fakeDeltaGenerator);
+    List<EntryExplanation> explanations = explainer.explainPatch(oldFile, newFile);
+    EntryExplanation expected =
+        new EntryExplanation(
+            path(ENTRY_A1_STORED),
+            false,
+            RecommendationReason.COMPRESSED_CHANGED_TO_UNCOMPRESSED,
+            FakeCompressor.OUTPUT.length());
+    checkExplanation(explanations, expected);
+  }
+
+  @Test
+  public void testExplainPatch_UncompressedChangedToCompressed() throws Exception {
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A1_STORED));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A1_LEVEL_6));
+    save(oldBytes, oldFile);
+    save(newBytes, newFile);
+    FakeDeltaGenerator fakeDeltaGenerator =
+        new FakeDeltaGenerator(
+            ENTRY_A1_STORED.getUncompressedBinaryContent(),
+            ENTRY_A1_LEVEL_6.getUncompressedBinaryContent());
+    FakeCompressor fakeCompressor =
+        new FakeCompressor(FakeDeltaGenerator.OUTPUT.getBytes("US-ASCII"));
+    PatchExplainer explainer = new PatchExplainer(fakeCompressor, fakeDeltaGenerator);
+    List<EntryExplanation> explanations = explainer.explainPatch(oldFile, newFile);
+    EntryExplanation expected =
+        new EntryExplanation(
+            path(ENTRY_A1_LEVEL_6),
+            false,
+            RecommendationReason.UNCOMPRESSED_CHANGED_TO_COMPRESSED,
+            FakeCompressor.OUTPUT.length());
+    checkExplanation(explanations, expected);
+  }
+
+  @Test
+  public void testExplainPatch_Unsuitable() throws Exception {
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A1_STORED));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A1_LEVEL_6));
+    save(oldBytes, oldFile);
+    save(newBytes, newFile);
+
+    // Corrupt the data in newFile and re-save. This will make the entry un-divinable.
+    MinimalZipEntry newEntry = MinimalZipArchive.listEntries(newFile).get(0);
+    newBytes[(int) newEntry.getFileOffsetOfCompressedData()] = (byte) 0xff;
+    save(newBytes, newFile);
+    byte[] justNewData = new byte[(int) newEntry.getCompressedSize()];
+    System.arraycopy(
+        newBytes,
+        (int) newEntry.getFileOffsetOfCompressedData(),
+        justNewData,
+        0,
+        (int) newEntry.getCompressedSize());
+
+    FakeDeltaGenerator fakeDeltaGenerator =
+        new FakeDeltaGenerator(ENTRY_A1_STORED.getUncompressedBinaryContent(), justNewData);
+    FakeCompressor fakeCompressor =
+        new FakeCompressor(FakeDeltaGenerator.OUTPUT.getBytes("US-ASCII"));
+    PatchExplainer explainer = new PatchExplainer(fakeCompressor, fakeDeltaGenerator);
+    List<EntryExplanation> explanations = explainer.explainPatch(oldFile, newFile);
+    EntryExplanation expected =
+        new EntryExplanation(
+            path(ENTRY_A1_LEVEL_6),
+            false,
+            RecommendationReason.UNSUITABLE,
+            FakeCompressor.OUTPUT.length());
+    checkExplanation(explanations, expected);
+  }
+
+  @Test
+  public void testExplainPatch_NewFile() throws Exception {
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A1_LEVEL_6));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_B_LEVEL_6));
+    save(oldBytes, oldFile);
+    save(newBytes, newFile);
+    FakeCompressor fakeCompressor =
+        new FakeCompressor(ENTRY_B_LEVEL_6.getCompressedBinaryContent());
+    PatchExplainer explainer = new PatchExplainer(fakeCompressor, null);
+    List<EntryExplanation> explanations = explainer.explainPatch(oldFile, newFile);
+    EntryExplanation expected =
+        new EntryExplanation(
+            path(ENTRY_B_LEVEL_6),
+            true, // isNew
+            null, // recommendation reason (null because the file is new)
+            FakeCompressor.OUTPUT.length());
+    checkExplanation(explanations, expected);
+  }
+
+  /**
+   * Check that the specified list of explanations has exactly one explanation and that it matches
+   * the expected explanation.
+   * @param explanations the explanations created by the {@link PatchExplainer}
+   * @param expected the expected explanation
+   */
+  private void checkExplanation(List<EntryExplanation> explanations, EntryExplanation expected) {
+    Assert.assertEquals(1, explanations.size());
+    EntryExplanation actual = explanations.get(0);
+    Assert.assertEquals(expected.getPath(), actual.getPath());
+    Assert.assertEquals(expected.isNew(), actual.isNew());
+    Assert.assertEquals(expected.getReasonIncludedIfNotNew(), actual.getReasonIncludedIfNotNew());
+    Assert.assertEquals(expected.getCompressedSizeInPatch(), actual.getCompressedSizeInPatch());
+  }
+
+  /**
+   * Convenience method to convert a {@link UnitTestZipEntry}'s path information into a
+   * {@link ByteArrayHolder}.
+   * @param entry the entry to get the path out of
+   * @return the path as a {@link ByteArrayHolder}
+   * @throws UnsupportedEncodingException if the system doesn't support US-ASCII. No, seriously.
+   */
+  private static ByteArrayHolder path(UnitTestZipEntry entry) throws UnsupportedEncodingException {
+    return new ByteArrayHolder(entry.path.getBytes("US-ASCII"));
+  }
+
+  /**
+   * Save the specified data to the specified file.
+   * @param data the data to save
+   * @param file the file to save to
+   * @throws IOException if saving fails
+   */
+  private static void save(byte[] data, File file) throws IOException {
+    try (FileOutputStream out = new FileOutputStream(file)) {
+      out.write(data);
+      out.flush();
+    }
+  }
+}
diff --git a/explainer/src/test/java/com/google/archivepatcher/explainer/PatchExplanationTest.java b/explainer/src/test/java/com/google/archivepatcher/explainer/PatchExplanationTest.java
new file mode 100644
index 0000000..38c2b2a
--- /dev/null
+++ b/explainer/src/test/java/com/google/archivepatcher/explainer/PatchExplanationTest.java
@@ -0,0 +1,142 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.explainer;
+
+import com.google.archivepatcher.generator.ByteArrayHolder;
+import com.google.archivepatcher.generator.RecommendationReason;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.UnsupportedEncodingException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Tests for {@link PatchExplanation}.
+ */
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public final class PatchExplanationTest {
+
+  // Construct 6 entries:
+  //   1 and 2 are classified as new (they don't exist in the old archive).
+  //   3 and 4 are changed entries that are not free (there is delta to be calculated)
+  //   5 and 6 are unchanged or free (identical uncompressed or compressed bytes)
+  // This is enough to populate each of the three lists in the PatchExplanation with 2 entries, and
+  // check that ordering and summing is working properly.
+  private static final EntryExplanation EXPLANATION_1_NEW =
+      makeExplanation("/path1", true, null, 1000);
+  private static final EntryExplanation EXPLANATION_2_NEW =
+      makeExplanation("/path2", true, null, 2000);
+  private static final EntryExplanation EXPLANATION_3_CHANGED_NOT_FREE =
+      makeExplanation("/path3", false, RecommendationReason.COMPRESSED_BYTES_CHANGED, 3000);
+  private static final EntryExplanation EXPLANATION_4_CHANGED_NOT_FREE =
+      makeExplanation("/path4", false, RecommendationReason.BOTH_ENTRIES_UNCOMPRESSED, 4000);
+  private static final EntryExplanation EXPLANATION_5_CHANGED_BUT_FREE =
+      makeExplanation("/path5", false, RecommendationReason.COMPRESSED_BYTES_CHANGED, 0);
+  private static final EntryExplanation EXPLANATION_6_UNCHANGED =
+      makeExplanation("/path6", false, RecommendationReason.COMPRESSED_BYTES_IDENTICAL, 0);
+
+  private static final List<EntryExplanation> ALL_EXPLANATIONS =
+      Collections.unmodifiableList(
+          Arrays.asList(
+              EXPLANATION_1_NEW,
+              EXPLANATION_2_NEW,
+              EXPLANATION_3_CHANGED_NOT_FREE,
+              EXPLANATION_4_CHANGED_NOT_FREE,
+              EXPLANATION_5_CHANGED_BUT_FREE,
+              EXPLANATION_6_UNCHANGED));
+
+  private static final List<EntryExplanation> EXPECTED_NEW_EXPLANATIONS =
+      Collections.unmodifiableList(Arrays.asList(EXPLANATION_1_NEW, EXPLANATION_2_NEW));
+  private static final long EXPECTED_NEW_SIZE =
+      EXPLANATION_1_NEW.getCompressedSizeInPatch() + EXPLANATION_2_NEW.getCompressedSizeInPatch();
+
+  private static final List<EntryExplanation> EXPECTED_CHANGED_EXPLANATIONS =
+      Collections.unmodifiableList(
+          Arrays.asList(EXPLANATION_3_CHANGED_NOT_FREE, EXPLANATION_4_CHANGED_NOT_FREE));
+  private static final long EXPECTED_CHANGED_SIZE =
+      EXPLANATION_3_CHANGED_NOT_FREE.getCompressedSizeInPatch()
+          + EXPLANATION_4_CHANGED_NOT_FREE.getCompressedSizeInPatch();
+
+  private static final List<EntryExplanation> EXPECTED_UNCHANGED_OR_FREE_EXPLANATIONS =
+      Collections.unmodifiableList(
+          Arrays.asList(EXPLANATION_5_CHANGED_BUT_FREE, EXPLANATION_6_UNCHANGED));
+
+  private static EntryExplanation makeExplanation(
+      String path,
+      boolean isNew,
+      RecommendationReason reasonIncludedIfNotNew,
+      long compressedSizeInPatch) {
+    try {
+      ByteArrayHolder pathHolder = new ByteArrayHolder(path.getBytes("UTF-8"));
+      return new EntryExplanation(pathHolder, isNew, reasonIncludedIfNotNew, compressedSizeInPatch);
+    } catch (UnsupportedEncodingException e) {
+      throw new RuntimeException("System doesn't support UTF-8", e);
+    }
+  }
+
+  @Test
+  public void testConstructor_Simple() {
+    PatchExplanation patchExplanation = new PatchExplanation(ALL_EXPLANATIONS);
+    Assert.assertEquals(EXPECTED_NEW_EXPLANATIONS, patchExplanation.getExplainedAsNew());
+    Assert.assertEquals(EXPECTED_CHANGED_EXPLANATIONS, patchExplanation.getExplainedAsChanged());
+    Assert.assertEquals(
+        EXPECTED_UNCHANGED_OR_FREE_EXPLANATIONS, patchExplanation.getExplainedAsUnchangedOrFree());
+    Assert.assertEquals(EXPECTED_NEW_SIZE, patchExplanation.getEstimatedNewSize());
+    Assert.assertEquals(EXPECTED_CHANGED_SIZE, patchExplanation.getEstimatedChangedSize());
+  }
+
+  @Test
+  public void testConstructor_Reversed() {
+    List<EntryExplanation> reversed = new ArrayList<>(ALL_EXPLANATIONS);
+    Collections.reverse(reversed);
+    PatchExplanation patchExplanation = new PatchExplanation(reversed);
+    // Order should remaining the same despite reversing the inputs.
+    Assert.assertEquals(EXPECTED_NEW_EXPLANATIONS, patchExplanation.getExplainedAsNew());
+    Assert.assertEquals(EXPECTED_CHANGED_EXPLANATIONS, patchExplanation.getExplainedAsChanged());
+    Assert.assertEquals(
+        EXPECTED_UNCHANGED_OR_FREE_EXPLANATIONS, patchExplanation.getExplainedAsUnchangedOrFree());
+    Assert.assertEquals(EXPECTED_NEW_SIZE, patchExplanation.getEstimatedNewSize());
+    Assert.assertEquals(EXPECTED_CHANGED_SIZE, patchExplanation.getEstimatedChangedSize());
+  }
+
+  @Test
+  public void testToJson() throws IOException {
+    // We lack a proper JSON parser in the vanilla JRE so short of string matching there's nothing
+    // that can be done here other than to ensure the output is non-null and looks kind of sane.
+    PatchExplanation patchExplanation = new PatchExplanation(ALL_EXPLANATIONS);
+    try (ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+        PrintWriter writer = new PrintWriter(buffer)) {
+      patchExplanation.writeJson(writer);
+      writer.flush();
+      String asString = buffer.toString();
+      Assert.assertTrue(asString.startsWith("{"));
+      Assert.assertFalse(asString.isEmpty());
+      for (EntryExplanation explanation : ALL_EXPLANATIONS) {
+        Assert.assertTrue(asString.contains(new String(explanation.getPath().getData(), "UTF-8")));
+      }
+      Assert.assertTrue(asString.endsWith("}"));
+    }
+  }
+}
diff --git a/generator/build.gradle b/generator/build.gradle
new file mode 100644
index 0000000..3f8c8af
--- /dev/null
+++ b/generator/build.gradle
@@ -0,0 +1,22 @@
+// generator module
+
+apply plugin: 'java'
+
+dependencies {
+    compile project(':shared')
+
+    testCompile 'junit:junit:4.12'
+    testCompile project(':sharedtest')
+}
+
+task copyTestResources(type: Copy) {
+    // AS/IntelliJ workaround: https://code.google.com/p/android/issues/detail?id=64887#c26
+    if (System.properties['idea.platform.prefix'] != null) {
+        from sourceSets.test.resources
+        into sourceSets.test.output.classesDir
+    }
+}
+
+processTestResources.dependsOn copyTestResources
+
+// EOF
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/ByteArrayHolder.java b/generator/src/main/java/com/google/archivepatcher/generator/ByteArrayHolder.java
new file mode 100644
index 0000000..ffa9efb
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/ByteArrayHolder.java
@@ -0,0 +1,63 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import java.util.Arrays;
+
+/**
+ * Holds an array of bytes, implementing {@link #equals(Object)}, {@link #hashCode()} with deep
+ * comparisons. This is intended primarily to allow raw, uninterpreted paths from
+ * {@link MinimalZipEntry#getFileNameBytes()} to be used as map keys safely.
+ */
+public class ByteArrayHolder {
+  /**
+   * The backing byte array.
+   */
+  private final byte[] data;
+
+  /**
+   * Construct a new wrapper around the specified bytes.
+   * @param data the byte array
+   */
+  public ByteArrayHolder(byte[] data) {
+    this.data = data;
+  }
+
+  /**
+   * Returns the actual byte array that backs the text.
+   * @return the array
+   */
+  public byte[] getData() {
+    return data;
+  }
+
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + Arrays.hashCode(data);
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) return true;
+    if (obj == null) return false;
+    if (getClass() != obj.getClass()) return false;
+    ByteArrayHolder other = (ByteArrayHolder) obj;
+    if (!Arrays.equals(data, other.data)) return false;
+    return true;
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/DefaultDeflateCompressionDiviner.java b/generator/src/main/java/com/google/archivepatcher/generator/DefaultDeflateCompressionDiviner.java
new file mode 100644
index 0000000..636504d
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/DefaultDeflateCompressionDiviner.java
@@ -0,0 +1,265 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import com.google.archivepatcher.shared.DefaultDeflateCompatibilityWindow;
+import com.google.archivepatcher.shared.JreDeflateParameters;
+import com.google.archivepatcher.shared.MultiViewInputStreamFactory;
+import com.google.archivepatcher.shared.RandomAccessFileInputStream;
+import com.google.archivepatcher.shared.RandomAccessFileInputStreamFactory;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.zip.Deflater;
+import java.util.zip.DeflaterOutputStream;
+import java.util.zip.Inflater;
+import java.util.zip.InflaterInputStream;
+import java.util.zip.ZipException;
+
+/**
+ * Divines information about the compression used for a resource that has been compressed with a
+ * deflate-compatible algorithm. This implementation produces results that are valid within the
+ * {@link DefaultDeflateCompatibilityWindow}.
+ */
+public class DefaultDeflateCompressionDiviner {
+
+  /**
+   * The levels to try for each strategy, in the order to attempt them.
+   */
+  private final Map<Integer, List<Integer>> levelsByStrategy = getLevelsByStrategy();
+
+  /**
+   * A simple struct that contains a {@link MinimalZipEntry} describing a specific entry from a zip
+   * archive along with an optional accompanying {@link JreDeflateParameters} describing the
+   * original compression settings that were used to generate the compressed data in that entry.
+   */
+  public static class DivinationResult {
+    /**
+     * The {@link MinimalZipEntry} for the result; never null.
+     */
+    public final MinimalZipEntry minimalZipEntry;
+
+    /**
+     * The {@link JreDeflateParameters} for the result, possibly null. This value is only set if
+     * {@link MinimalZipEntry#isDeflateCompressed()} is true <em>and</em> the compression settings
+     * were successfully divined.
+     */
+    public final JreDeflateParameters divinedParameters;
+
+    /**
+     * Creates a new result with the specified fields.
+     * @param minimalZipEntry the zip entry
+     * @param divinedParameters the parameters
+     */
+    public DivinationResult(
+        MinimalZipEntry minimalZipEntry, JreDeflateParameters divinedParameters) {
+      if (minimalZipEntry == null) {
+        throw new IllegalArgumentException("minimalZipEntry cannot be null");
+      }
+      this.minimalZipEntry = minimalZipEntry;
+      this.divinedParameters = divinedParameters;
+    }
+  }
+
+  /**
+   * Load the specified archive and attempt to divine deflate parameters for all entries within.
+   * @param archiveFile the archive file to work on
+   * @return a list of results for each entry in the archive, in file order (not central directory
+   * order). There is exactly one result per entry, regardless of whether or not that entry is
+   * compressed. Callers can filter results by checking
+   * {@link MinimalZipEntry#getCompressionMethod()} to see if the result is or is not compressed,
+   * and by checking whether a non-null {@link JreDeflateParameters} was obtained.
+   * @throws IOException if unable to read or parse the file
+   * @see DivinationResult 
+   */
+  public List<DivinationResult> divineDeflateParameters(File archiveFile) throws IOException {
+    List<DivinationResult> results = new ArrayList<DivinationResult>();
+    for (MinimalZipEntry minimalZipEntry : MinimalZipArchive.listEntries(archiveFile)) {
+      JreDeflateParameters divinedParameters = null;
+      if (minimalZipEntry.isDeflateCompressed()) {
+        // TODO(andrewhayden): Reuse streams to avoid churning file descriptors
+        RandomAccessFileInputStreamFactory rafisFactory =
+            new RandomAccessFileInputStreamFactory(
+                archiveFile,
+                minimalZipEntry.getFileOffsetOfCompressedData(),
+                minimalZipEntry.getCompressedSize());
+        divinedParameters = divineDeflateParameters(rafisFactory);
+      }
+      results.add(new DivinationResult(minimalZipEntry, divinedParameters));
+    }
+    return results;
+  }
+
+  /**
+   * Returns an unmodifiable map whose keys are deflate strategies and whose values are the levels
+   * that make sense to try with the corresponding strategy, in the recommended testing order.
+   *
+   * <ul>
+   *   <li>For strategy 0, levels 1 through 9 (inclusive) are included.
+   *   <li>For strategy 1, levels 4 through 9 (inclusive) are included. Levels 1, 2 and 3 are
+   *       excluded because they behave the same under strategy 0.
+   *   <li>For strategy 2, only level 1 is included because the level is ignored under strategy 2.
+   * </ul>
+   *
+   * @return such a mapping
+   */
+  protected Map<Integer, List<Integer>> getLevelsByStrategy() {
+    final Map<Integer, List<Integer>> levelsByStrategy = new HashMap<Integer, List<Integer>>();
+    // The best order for the levels is simply the order of popularity in the world, which is
+    // expected to be default (6), maximum compression (9), and fastest (1).
+    // The rest of the levels are rarely encountered and their order is mostly irrelevant.
+    levelsByStrategy.put(0, Collections.unmodifiableList(Arrays.asList(6, 9, 1, 4, 2, 3, 5, 7, 8)));
+    levelsByStrategy.put(1, Collections.unmodifiableList(Arrays.asList(6, 9, 4, 5, 7, 8)));
+    levelsByStrategy.put(2, Collections.singletonList(1));
+    return Collections.unmodifiableMap(levelsByStrategy);
+  }
+
+  /**
+   * Determines the original {@link JreDeflateParameters} that were used to compress a given piece
+   * of deflated data.
+   * @param compressedDataInputStreamFactory a {@link MultiViewInputStreamFactory} that can provide
+   * multiple independent {@link InputStream} instances for the compressed data; the streams
+   * produced must support {@link InputStream#mark(int)} and it is recommended that
+   * {@link RandomAccessFileInputStream} instances be provided for efficiency if a backing file is
+   * available. The stream will be reset for each recompression attempt that is required.
+   * @return the parameters that can be used to replicate the compressed data in the
+   * {@link DefaultDeflateCompatibilityWindow}, if any; otherwise <code>null</code>. Note that
+   * <code>null</code> is also returned in the case of <em>corrupt</em> zip data since, by
+   * definition, it cannot be replicated via any combination of normal deflate parameters.
+   * @throws IOException if there is a problem reading the data, i.e. if the file contents are
+   * changed while reading
+   */
+  public JreDeflateParameters divineDeflateParameters(
+      MultiViewInputStreamFactory<?> compressedDataInputStreamFactory) throws IOException {
+    InputStream compressedDataIn = compressedDataInputStreamFactory.newStream();
+    if (!compressedDataIn.markSupported()) {
+      try {
+        compressedDataIn.close();
+      } catch (Exception ignored) {
+        // Nothing to do.
+      }
+      throw new IllegalArgumentException("input stream must support mark(int)");
+    }
+
+    // Record the input stream position to return to it each time a prediction is needed.
+    compressedDataIn.mark(0); // The argument to mark is ignored and irrelevant
+
+    // Make a copy of the stream for matching bytes of compressed input
+    InputStream matchingCompressedDataIn = compressedDataInputStreamFactory.newStream();
+    matchingCompressedDataIn.mark(0); // The argument to mark is ignored and irrelevant
+
+    byte[] copyBuffer = new byte[32768];
+    try {
+      // Iterate over all relevant combinations of nowrap, strategy and level.
+      for (boolean nowrap : new boolean[] {true, false}) {
+        Inflater inflater = new Inflater(nowrap);
+        Deflater deflater = new Deflater(0, nowrap);
+        for (int strategy : new int[] {0, 1, 2}) {
+          deflater.setStrategy(strategy);
+          // Strategy 2 does not have the concept of levels, so vacuously call it 1.
+          List<Integer> levelsToSearch = levelsByStrategy.get(strategy);
+          for (int levelIndex = 0; levelIndex < levelsToSearch.size(); levelIndex++) {
+            int level = levelsToSearch.get(levelIndex);
+            deflater.setLevel(level);
+            inflater.reset();
+            deflater.reset();
+            compressedDataIn.reset();
+            matchingCompressedDataIn.reset();
+            try {
+              if (matches(
+                  compressedDataIn, inflater, deflater, matchingCompressedDataIn, copyBuffer)) {
+                return JreDeflateParameters.of(level, strategy, nowrap);
+              }
+            } catch (ZipException e) {
+              // Parse error in input. The only possibilities are corruption or the wrong nowrap.
+              // Skip all remaining levels and strategies.
+              levelIndex = levelsToSearch.size();
+              strategy = 2;
+            }
+          } // end of iteration on level
+        } // end of iteration on strategy
+      } // end of iteration on nowrap
+    } finally {
+      try {
+        compressedDataIn.close();
+      } catch (Exception ignored) {
+        // Don't care.
+      }
+      try {
+        matchingCompressedDataIn.close();
+      } catch (Exception ignored) {
+        // Don't care.
+      }
+    }
+    return null;
+  }
+
+  /**
+   * Check if the specified deflater will produce the same compressed data as the byte stream in
+   * compressedDataIn and returns true if so.
+   * @param compressedDataIn the stream of compressed data to read and reproduce
+   * @param inflater the inflater for uncompressing the stream
+   * @param deflater the deflater for recompressing the output of the inflater
+   * @param matchingStreamInput an independent but identical view of the bytes in compressedDataIn
+   * @param copyBuffer buffer to use for copying bytes between the inflater and the deflater
+   * @return true if the specified deflater reproduces the bytes in compressedDataIn, otherwise
+   * false
+   * @throws IOException if anything goes wrong; in particular, {@link ZipException} is thrown if
+   * there is a problem parsing compressedDataIn
+   */
+  private boolean matches(
+      InputStream compressedDataIn,
+      Inflater inflater,
+      Deflater deflater,
+      InputStream matchingStreamInput,
+      byte[] copyBuffer)
+      throws IOException {
+    MatchingOutputStream matcher = new MatchingOutputStream(matchingStreamInput, 32768);
+    // This stream will deliberately be left open because closing it would close the
+    // underlying compressedDataIn stream, which is not desired.
+    InflaterInputStream inflaterIn = new InflaterInputStream(compressedDataIn, inflater, 32768);
+    DeflaterOutputStream out = new DeflaterOutputStream(matcher, deflater, 32768);
+    int numRead = 0;
+    try {
+      while ((numRead = inflaterIn.read(copyBuffer)) >= 0) {
+        out.write(copyBuffer, 0, numRead);
+      }
+      // When done, all bytes have been successfully recompressed. For sanity, check that
+      // the matcher has consumed the same number of bytes and arrived at EOF as well.
+      out.finish();
+      out.flush();
+      matcher.expectEof();
+      // At this point the data in the compressed output stream was a perfect match for the
+      // data in the compressed input stream; the answer has been found.
+      return true;
+    } catch (MismatchException e) {
+      // Fast-fail case when the compressed output stream doesn't match the compressed input
+      // stream. These are not the parameters you're looking for!
+    } finally {
+      try {
+        out.close();
+      } catch (Exception ignored) {
+        // Don't care.
+      }
+    }
+    return false;
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/DeltaFriendlyOldBlobSizeLimiter.java b/generator/src/main/java/com/google/archivepatcher/generator/DeltaFriendlyOldBlobSizeLimiter.java
new file mode 100644
index 0000000..4cafebc
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/DeltaFriendlyOldBlobSizeLimiter.java
@@ -0,0 +1,118 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+
+/**
+ * Limits the size of the delta-friendly old blob, which is an implicit limitation on the amount of
+ * temp space required to apply a patch.
+ *
+ * <p>This class implements the following algorithm:
+ *
+ * <ol>
+ *   <li>Check the size of the old archive and subtract it from the maximum size, this is the number
+ *       of bytes that can be used to uncompress entries in the delta-friendly old file.
+ *   <li>Identify all of the {@link QualifiedRecommendation}s that have {@link
+ *       Recommendation#uncompressOldEntry} set to <code>true</code>. These identify all the entries
+ *       that would be uncompressed in the delta-friendly old file.
+ *   <li>Sort those {@link QualifiedRecommendation}s in order of decreasing uncompressed size.
+ *   <li>Iterate over the list in order. For each entry, calculate the difference between the
+ *       uncompressed size and the compressed size; this is the number of bytes that would be
+ *       consumed to transform the data from compressed to uncompressed in the delta-friendly old
+ *       file. If the number of bytes that would be consumed is less than the number of bytes
+ *       remaining before hitting the cap, retain it; else, discard it.
+ *   <li>Return the resulting list of the retained entries. Note that the order of this list may not
+ *       be the same as the input order (i.e., it has been sorted in order of decreasing compressed
+ *       size).
+ * </ol>
+ */
+public class DeltaFriendlyOldBlobSizeLimiter implements RecommendationModifier {
+
+  /** The maximum size of the delta-friendly old blob. */
+  private final long maxSizeBytes;
+
+  private static final Comparator<QualifiedRecommendation> COMPARATOR =
+      new UncompressedOldEntrySizeComparator();
+
+  /**
+   * Create a new limiter that will restrict the total size of the delta-friendly old blob.
+   *
+   * @param maxSizeBytes the maximum size of the delta-friendly old blob
+   */
+  public DeltaFriendlyOldBlobSizeLimiter(long maxSizeBytes) {
+    if (maxSizeBytes < 0) {
+      throw new IllegalArgumentException("maxSizeBytes must be non-negative: " + maxSizeBytes);
+    }
+    this.maxSizeBytes = maxSizeBytes;
+  }
+
+  @Override
+  public List<QualifiedRecommendation> getModifiedRecommendations(
+      File oldFile, File newFile, List<QualifiedRecommendation> originalRecommendations) {
+
+    List<QualifiedRecommendation> sorted = sortRecommendations(originalRecommendations);
+
+    List<QualifiedRecommendation> result = new ArrayList<>(sorted.size());
+    long bytesRemaining = maxSizeBytes - oldFile.length();
+    for (QualifiedRecommendation originalRecommendation : sorted) {
+      if (!originalRecommendation.getRecommendation().uncompressOldEntry) {
+        // Keep the original recommendation, no need to track size since it won't be uncompressed.
+        result.add(originalRecommendation);
+      } else {
+        long extraBytesConsumed =
+            originalRecommendation.getOldEntry().getUncompressedSize()
+                - originalRecommendation.getOldEntry().getCompressedSize();
+        if (bytesRemaining - extraBytesConsumed >= 0) {
+          // Keep the original recommendation, but also subtract from the remaining space.
+          result.add(originalRecommendation);
+          bytesRemaining -= extraBytesConsumed;
+        } else {
+          // Update the recommendation to prevent uncompressing this tuple.
+          result.add(
+              new QualifiedRecommendation(
+                  originalRecommendation.getOldEntry(),
+                  originalRecommendation.getNewEntry(),
+                  Recommendation.UNCOMPRESS_NEITHER,
+                  RecommendationReason.RESOURCE_CONSTRAINED));
+        }
+      }
+    }
+    return result;
+  }
+
+  private static List<QualifiedRecommendation> sortRecommendations(
+      List<QualifiedRecommendation> originalRecommendations) {
+    List<QualifiedRecommendation> sorted =
+        new ArrayList<QualifiedRecommendation>(originalRecommendations);
+    Collections.sort(sorted, COMPARATOR);
+    Collections.reverse(sorted);
+    return sorted;
+  }
+
+  /** Helper class implementing the sort order described in the class documentation. */
+  private static class UncompressedOldEntrySizeComparator
+      implements Comparator<QualifiedRecommendation> {
+    @Override
+    public int compare(QualifiedRecommendation qr1, QualifiedRecommendation qr2) {
+      return Long.compare(
+          qr1.getOldEntry().getUncompressedSize(), qr2.getOldEntry().getUncompressedSize());
+    }
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/DeltaGenerator.java b/generator/src/main/java/com/google/archivepatcher/generator/DeltaGenerator.java
new file mode 100644
index 0000000..03cd2b5
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/DeltaGenerator.java
@@ -0,0 +1,37 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * An interface to be implemented by delta generators.
+ */
+public interface DeltaGenerator {
+  /**
+   * Generates a delta in deltaOut that can be applied to oldBlob to produce newBlob.
+   *
+   * @param oldBlob the old blob
+   * @param newBlob the new blob
+   * @param deltaOut the stream to write the delta to
+   * @throws IOException in the event of an I/O error reading the input files or writing to the
+   *     delta output stream
+   * @throws InterruptedException if any thread has interrupted the current thread
+   */
+  public void generateDelta(File oldBlob, File newBlob, OutputStream deltaOut)
+      throws IOException, InterruptedException;
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/FileByFileV1DeltaGenerator.java b/generator/src/main/java/com/google/archivepatcher/generator/FileByFileV1DeltaGenerator.java
new file mode 100644
index 0000000..8c81761
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/FileByFileV1DeltaGenerator.java
@@ -0,0 +1,100 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import com.google.archivepatcher.generator.bsdiff.BsDiffDeltaGenerator;
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Generates file-by-file patches.
+ */
+public class FileByFileV1DeltaGenerator implements DeltaGenerator {
+
+  /** Optional modifiers for planning and patch generation. */
+  private final List<RecommendationModifier> recommendationModifiers;
+
+  /**
+   * Constructs a new generator for File-by-File v1 patches, using the specified configuration.
+   *
+   * @param recommendationModifiers optionally, {@link RecommendationModifier}s to use for modifying
+   *     the planning phase of patch generation. These can be used to, e.g., limit the total amount
+   *     of recompression that a patch applier needs to do. Modifiers are applied in the order they
+   *     are specified.
+   */
+  public FileByFileV1DeltaGenerator(RecommendationModifier... recommendationModifiers) {
+    if (recommendationModifiers != null) {
+      this.recommendationModifiers =
+          Collections.unmodifiableList(Arrays.asList(recommendationModifiers));
+    } else {
+      this.recommendationModifiers = Collections.emptyList();
+    }
+  }
+
+  /**
+   * Generate a V1 patch for the specified input files and write the patch to the specified {@link
+   * OutputStream}. The written patch is <em>raw</em>, i.e. it has not been compressed. Compression
+   * should almost always be applied to the patch, either right in the specified {@link
+   * OutputStream} or in a post-processing step, prior to transmitting the patch to the patch
+   * applier.
+   *
+   * @param oldFile the original old file to read (will not be modified)
+   * @param newFile the original new file to read (will not be modified)
+   * @param patchOut the stream to write the patch to
+   * @throws IOException if unable to complete the operation due to an I/O error
+   * @throws InterruptedException if any thread has interrupted the current thread
+   */
+  @Override
+  public void generateDelta(File oldFile, File newFile, OutputStream patchOut)
+      throws IOException, InterruptedException {
+    try (TempFileHolder deltaFriendlyOldFile = new TempFileHolder();
+        TempFileHolder deltaFriendlyNewFile = new TempFileHolder();
+        TempFileHolder deltaFile = new TempFileHolder();
+        FileOutputStream deltaFileOut = new FileOutputStream(deltaFile.file);
+        BufferedOutputStream bufferedDeltaOut = new BufferedOutputStream(deltaFileOut)) {
+      PreDiffExecutor.Builder builder =
+          new PreDiffExecutor.Builder()
+              .readingOriginalFiles(oldFile, newFile)
+              .writingDeltaFriendlyFiles(deltaFriendlyOldFile.file, deltaFriendlyNewFile.file);
+      for (RecommendationModifier modifier : recommendationModifiers) {
+        builder.withRecommendationModifier(modifier);
+      }
+      PreDiffExecutor executor = builder.build();
+      PreDiffPlan preDiffPlan = executor.prepareForDiffing();
+      DeltaGenerator deltaGenerator = getDeltaGenerator();
+      deltaGenerator.generateDelta(
+          deltaFriendlyOldFile.file, deltaFriendlyNewFile.file, bufferedDeltaOut);
+      bufferedDeltaOut.close();
+      PatchWriter patchWriter =
+          new PatchWriter(
+              preDiffPlan,
+              deltaFriendlyOldFile.file.length(),
+              deltaFriendlyNewFile.file.length(),
+              deltaFile.file);
+      patchWriter.writeV1Patch(patchOut);
+    }
+  }
+
+  // Visible for testing only
+  protected DeltaGenerator getDeltaGenerator() {
+    return new BsDiffDeltaGenerator();
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/MatchingOutputStream.java b/generator/src/main/java/com/google/archivepatcher/generator/MatchingOutputStream.java
new file mode 100644
index 0000000..f1369d2
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/MatchingOutputStream.java
@@ -0,0 +1,99 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+/**
+ * A simple {@link OutputStream} that requires all bytes that are written to match bytes from a
+ * specified corresponding {@link InputStream}. Any length or content mismatch results in the stream
+ * being closed and an error being raised.
+ * <p>
+ * Every call to one of the write(...) methods results in reading the same total number of bytes
+ * from the {@link InputStream}. The bytes in both streams must match exactly.
+ */
+public class MatchingOutputStream extends OutputStream {
+
+  /**
+   * The bytes to match against.
+   */
+  private final InputStream expectedBytesStream;
+
+  /**
+   * The buffer for reading bytes from the input stream for matching.
+   */
+  private final byte[] buffer;
+
+  /**
+   * Constructs a new stream that will match against the the specified {@link InputStream}.
+   * @param expectedBytesStream stream of bytes to expect to see
+   * @param matchBufferSize the number of bytes to reserve for matching against the specified
+   * {@link InputStream}. This
+   */
+  public MatchingOutputStream(InputStream expectedBytesStream, int matchBufferSize) {
+    if (matchBufferSize < 1) {
+      throw new IllegalArgumentException("buffer size must be >= 1");
+    }
+    this.expectedBytesStream = expectedBytesStream;
+    this.buffer = new byte[matchBufferSize];
+  }
+
+  @Override
+  public void write(int b) throws IOException {
+    int expected = expectedBytesStream.read();
+    if (expected == -1) {
+      throw new MismatchException("EOF reached in expectedBytesStream");
+    }
+    if (expected != b) {
+      throw new MismatchException("Data does not match");
+    }
+  }
+
+  @Override
+  public void write(byte[] b) throws IOException {
+    write(b, 0, b.length);
+  }
+
+  @Override
+  public void write(byte[] dataToWrite, int offset, int length) throws IOException {
+    int numReadSoFar = 0;
+    while (numReadSoFar < length) {
+      int maxToRead = Math.min(buffer.length, length - numReadSoFar);
+      int numReadThisLoop = expectedBytesStream.read(buffer, 0, maxToRead);
+      if (numReadThisLoop == -1) {
+        throw new MismatchException("EOF reached in expectedBytesStream");
+      }
+      for (int matchCount = 0; matchCount < numReadThisLoop; matchCount++) {
+        if (buffer[matchCount] != dataToWrite[offset + numReadSoFar + matchCount]) {
+          throw new MismatchException("Data does not match");
+        }
+      }
+      numReadSoFar += numReadThisLoop;
+    }
+  }
+
+  /**
+   * Expects the end-of-file to be reached in the associated {@link InputStream}.
+   * @throws IOException if the end-of-file has not yet been reached in the associated
+   * {@link InputStream}
+   */
+  public void expectEof() throws IOException {
+    if (expectedBytesStream.read() != -1) {
+      throw new MismatchException("EOF not reached in expectedBytesStream");
+    }
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/MinimalCentralDirectoryMetadata.java b/generator/src/main/java/com/google/archivepatcher/generator/MinimalCentralDirectoryMetadata.java
new file mode 100644
index 0000000..73e7132
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/MinimalCentralDirectoryMetadata.java
@@ -0,0 +1,75 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+/**
+ * Trivial struct containing the critical data about the central directory: the number of entries
+ * it contains, the position within the file at which it starts, and its length.
+ */
+class MinimalCentralDirectoryMetadata {
+  /**
+   * The number of entries in the central directory.
+   */
+  private final int numEntriesInCentralDirectory;
+
+  /**
+   * The file offset of the first byte of the central directory.
+   */
+  private final long offsetOfCentralDirectory;
+
+  /**
+   * The length of the central directory, in bytes.
+   */
+  private final long lengthOfCentralDirectory;
+
+  /**
+   * Constructs a new metadata object with the specified values
+   * @param numEntriesInCentralDirectory the number of entries in the central directory
+   * @param offsetOfCentralDirectory the file offset of the first byte of the central directory
+   * @param lengthOfCentralDirectory the length of the central directory, in bytes
+   */
+  MinimalCentralDirectoryMetadata(
+      int numEntriesInCentralDirectory,
+      long offsetOfCentralDirectory,
+      long lengthOfCentralDirectory) {
+    this.numEntriesInCentralDirectory = numEntriesInCentralDirectory;
+    this.offsetOfCentralDirectory = offsetOfCentralDirectory;
+    this.lengthOfCentralDirectory = lengthOfCentralDirectory;
+  }
+
+  /**
+   * Returns the number of entries in the central directory.
+   * @return as described
+   */
+  public final int getNumEntriesInCentralDirectory() {
+    return numEntriesInCentralDirectory;
+  }
+
+  /**
+   * Returns the file offset of the first byte of the central directory.
+   * @return as described
+   */
+  public final long getOffsetOfCentralDirectory() {
+    return offsetOfCentralDirectory;
+  }
+
+  /**
+   * Returns the length of the central directory, in bytes.
+   * @return as described
+   */
+  public final long getLengthOfCentralDirectory() {
+    return lengthOfCentralDirectory;
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/MinimalZipArchive.java b/generator/src/main/java/com/google/archivepatcher/generator/MinimalZipArchive.java
new file mode 100644
index 0000000..177904f
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/MinimalZipArchive.java
@@ -0,0 +1,112 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import com.google.archivepatcher.shared.RandomAccessFileInputStream;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.zip.ZipException;
+
+/**
+ * A simplified, structural representation of a zip or zip-like (jar, apk, etc) archive. The class
+ * provides the minimum structural information needed for patch generation and is not suitable as a
+ * general zip-processing library. In particular, there is little or no verification that any of the
+ * zip structure is correct or sane; it assumed that the input is sane.
+ */
+public class MinimalZipArchive {
+
+  /**
+   * Sorts {@link MinimalZipEntry} objects by {@link MinimalZipEntry#getFileOffsetOfLocalEntry()} in
+   * ascending order.
+   */
+  private static final Comparator<MinimalZipEntry> LOCAL_ENTRY_OFFSET_COMAPRATOR =
+      new Comparator<MinimalZipEntry>() {
+        @Override
+        public int compare(MinimalZipEntry o1, MinimalZipEntry o2) {
+          return Long.compare(o1.getFileOffsetOfLocalEntry(), o2.getFileOffsetOfLocalEntry());
+        }
+      };
+
+  /**
+   * Generate a listing of all of the files in a zip archive in file order and return it. Each entry
+   * is a {@link MinimalZipEntry}, which has just enough information to generate a patch.
+   * @param file the zip file to read
+   * @return such a listing
+   * @throws IOException if anything goes wrong while reading
+   */
+  public static List<MinimalZipEntry> listEntries(File file) throws IOException {
+    try (RandomAccessFileInputStream in = new RandomAccessFileInputStream(file)) {
+      return listEntriesInternal(in);
+    }
+  }
+
+  /**
+   * Internal implementation of {@link #listEntries(File)}.
+   * @param in the input stream to read from
+   * @return see {@link #listEntries(File)}
+   * @throws IOException if anything goes wrong while reading
+   */
+  private static List<MinimalZipEntry> listEntriesInternal(RandomAccessFileInputStream in)
+      throws IOException {
+    // Step 1: Locate the end-of-central-directory record header.
+    long offsetOfEocd = MinimalZipParser.locateStartOfEocd(in, 32768);
+    if (offsetOfEocd == -1) {
+      // Archive is weird, abort.
+      throw new ZipException("EOCD record not found in last 32k of archive, giving up");
+    }
+
+    // Step 2: Parse the end-of-central-directory data to locate the central directory itself
+    in.setRange(offsetOfEocd, in.length() - offsetOfEocd);
+    MinimalCentralDirectoryMetadata centralDirectoryMetadata = MinimalZipParser.parseEocd(in);
+
+    // Step 3: Extract a list of all central directory entries (contiguous data stream)
+    in.setRange(
+        centralDirectoryMetadata.getOffsetOfCentralDirectory(),
+        centralDirectoryMetadata.getLengthOfCentralDirectory());
+    List<MinimalZipEntry> minimalZipEntries =
+        new ArrayList<MinimalZipEntry>(centralDirectoryMetadata.getNumEntriesInCentralDirectory());
+    for (int x = 0; x < centralDirectoryMetadata.getNumEntriesInCentralDirectory(); x++) {
+      minimalZipEntries.add(MinimalZipParser.parseCentralDirectoryEntry(in));
+    }
+
+    // Step 4: Sort the entries in file order, not central directory order.
+    Collections.sort(minimalZipEntries, LOCAL_ENTRY_OFFSET_COMAPRATOR);
+
+    // Step 5: Seek out each local entry and calculate the offset of the compressed data within
+    for (int x = 0; x < minimalZipEntries.size(); x++) {
+      MinimalZipEntry entry = minimalZipEntries.get(x);
+      long offsetOfNextEntry;
+      if (x < minimalZipEntries.size() - 1) {
+        // Don't allow reading past the start of the next entry, for sanity.
+        offsetOfNextEntry = minimalZipEntries.get(x + 1).getFileOffsetOfLocalEntry();
+      } else {
+        // Last entry. Don't allow reading into the central directory, for sanity.
+        offsetOfNextEntry = centralDirectoryMetadata.getOffsetOfCentralDirectory();
+      }
+      long rangeLength = offsetOfNextEntry - entry.getFileOffsetOfLocalEntry();
+      in.setRange(entry.getFileOffsetOfLocalEntry(), rangeLength);
+      long relativeDataOffset = MinimalZipParser.parseLocalEntryAndGetCompressedDataOffset(in);
+      entry.setFileOffsetOfCompressedData(entry.getFileOffsetOfLocalEntry() + relativeDataOffset);
+    }
+
+    // Done!
+    return minimalZipEntries;
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/MinimalZipEntry.java b/generator/src/main/java/com/google/archivepatcher/generator/MinimalZipEntry.java
new file mode 100644
index 0000000..d6342d0
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/MinimalZipEntry.java
@@ -0,0 +1,273 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import java.io.UnsupportedEncodingException;
+import java.util.Arrays;
+
+/**
+ * A class that contains <em>just enough data</em> to generate a patch.
+ */
+public class MinimalZipEntry {
+  /**
+   * The compression method that was used, typically 8 (for deflate) or 0 (for stored).
+   */
+  private final int compressionMethod;
+
+  /**
+   * The CRC32 of the <em>uncompressed</em> data.
+   */
+  private final long crc32OfUncompressedData;
+
+  /**
+   * The size of the data as it exists in the archive. For compressed entries, this is the size of
+   * the compressed data; for uncompressed entries, this is the same as {@link #uncompressedSize}.
+   */
+  private final long compressedSize;
+
+  /**
+   * The size of the <em>uncompressed</em data.
+   */
+  private final long uncompressedSize;
+
+  /**
+   * The file name for the entry. By convention, names ending with '/' denote directories. The
+   * encoding is controlled by the general purpose flags, bit 11. See {@link #getFileName()} for
+   * more information.
+   */
+  private final byte[] fileNameBytes;
+
+  /**
+   * The value of the 11th bit of the general purpose flag, which controls the encoding of file
+   * names and comments. See {@link #getFileName()} for more information.
+   */
+  private final boolean generalPurposeFlagBit11;
+
+  /**
+   * The file offset at which the first byte of the local entry header begins.
+   */
+  private final long fileOffsetOfLocalEntry;
+
+  /**
+   * The file offset at which the first byte of the data for the entry begins. For compressed data,
+   * this is the first byte of the deflated data; for uncompressed data, this is the first byte of
+   * the uncompressed data.
+   */
+  private long fileOffsetOfCompressedData = -1;
+
+  /**
+   * Create a new Central Directory entry with the corresponding data.
+   * @param compressionMethod the method used to compress the data
+   * @param crc32OfUncompressedData the CRC32 of the uncompressed data
+   * @param compressedSize the size of the data in its compressed form
+   * @param uncompressedSize the size of the data in its uncompressed form
+   * @param fileNameBytes the name of the file, as a byte array; see {@link #getFileName()} for
+   * information on encoding
+   * @param generalPurposeFlagBit11 the value of the 11th bit of the general purpose flag, which
+   * nominally controls the default character encoding for file names and comments; see
+   * {@link #getFileName()} for more information on encoding
+   * @param fileOffsetOfLocalEntry the file offset at which the local entry begins
+   */
+  public MinimalZipEntry(
+      int compressionMethod,
+      long crc32OfUncompressedData,
+      long compressedSize,
+      long uncompressedSize,
+      byte[] fileNameBytes,
+      boolean generalPurposeFlagBit11,
+      long fileOffsetOfLocalEntry) {
+    this.compressionMethod = compressionMethod;
+    this.crc32OfUncompressedData = crc32OfUncompressedData;
+    this.compressedSize = compressedSize;
+    this.uncompressedSize = uncompressedSize;
+    this.fileNameBytes = fileNameBytes == null ? null : fileNameBytes.clone();
+    this.generalPurposeFlagBit11 = generalPurposeFlagBit11;
+    this.fileOffsetOfLocalEntry = fileOffsetOfLocalEntry;
+  }
+
+  /**
+   * Sets the file offset at which the data for this entry begins.
+   * @param offset the offset
+   */
+  public void setFileOffsetOfCompressedData(long offset) {
+    fileOffsetOfCompressedData = offset;
+  }
+
+  /**
+   * Returns the compression method that was used, typically 8 (for deflate) or 0 (for stored).
+   * @return as described
+   */
+  public int getCompressionMethod() {
+    return compressionMethod;
+  }
+
+  /**
+   * Returns the CRC32 of the uncompressed data.
+   * @return as described
+   */
+  public long getCrc32OfUncompressedData() {
+    return crc32OfUncompressedData;
+  }
+
+  /**
+   * Returns the size of the data as it exists in the archive. For compressed entries, this is the
+   * size of the compressed data; for uncompressed entries, this is the same as
+   * {@link #getUncompressedSize()}.
+   * @return as described
+   */
+  public long getCompressedSize() {
+    return compressedSize;
+  }
+
+  /**
+   * Returns the size of the uncompressed data.
+   * @return as described
+   */
+  public long getUncompressedSize() {
+    return uncompressedSize;
+  }
+
+  /**
+   * Returns a copy of the bytes of the file name, exactly the same as they were in the archive
+   * file. See {@link #getFileName()} for an explanation of why this is useful.
+   * @return as described
+   */
+  public byte[] getFileNameBytes() {
+    return fileNameBytes == null ? null : fileNameBytes.clone();
+  }
+
+  /**
+   * Returns a best-effort conversion of the file name into a string, based on strict adherence to
+   * the PKWARE APPNOTE that defines this behavior. If the value of the 11th bit of the general
+   * purpose flag was set to 1, these bytes should be encoded with the UTF8 character set; otherwise
+   * the character set should be Cp437. Adherence to this standard varies significantly, and some
+   * systems use the default character set for the environment instead of Cp437 when writing these
+   * bytes. For such instances, callers can obtain the raw bytes by using
+   * {@link #getFileNameBytes()} instead and checking the value of the 11th bit of the general
+   * purpose bit flag for a hint using {@link #getGeneralPurposeFlagBit11()}. There is also
+   * something called EFS ("0x0008 extra field storage") that specifies additional behavior for
+   * character encoding, but this tool doesn't support it as the use is not standardized.
+   * @return as described
+   */
+  // TODO(andrewhayden): Support EFS
+  public String getFileName() {
+    String charsetName = generalPurposeFlagBit11 ? "UTF8" : "Cp437";
+    try {
+      return new String(fileNameBytes, charsetName);
+    } catch (UnsupportedEncodingException e) {
+      // Cp437 has been supported at least since JDK 1.6.0, so this should rarely occur in practice.
+      // Older versions of the JDK also support Cp437, but as part of charsets.jar, which didn't
+      // ship in every distribution; it is conceivable that those systems might have problems here.
+      throw new RuntimeException("System doesn't support " + charsetName, e);
+    }
+  }
+
+  /**
+   * Returns the value of the 11th bit of the general purpose flag; true for 1, false for 0. See
+   * {@link #getFileName()} for more information on the usefulness of this flag.
+   * @return as described
+   */
+  public boolean getGeneralPurposeFlagBit11() {
+    return generalPurposeFlagBit11;
+  }
+
+  /**
+   * Returns the file offset at which the first byte of the local entry header begins.
+   * @return as described
+   */
+  public long getFileOffsetOfLocalEntry() {
+    return fileOffsetOfLocalEntry;
+  }
+
+  /**
+   * Returns the file offset at which the first byte of the data for the entry begins. For
+   * compressed data, this is the first byte of the deflated data; for uncompressed data, this is
+   * the first byte of the uncompressed data.
+   * @return as described
+   */
+  public long getFileOffsetOfCompressedData() {
+    return fileOffsetOfCompressedData;
+  }
+
+  /**
+   * Convenience methods that returns true if and only if the entry is compressed with deflate.
+   * @return as described
+   */
+  public boolean isDeflateCompressed() {
+    // 8 is deflate according to the zip spec.
+    if (getCompressionMethod() != 8) {
+      return false;
+    }
+    // Some tools may list compression method deflate but set level to zero (store), so they will
+    // have a compressed size equal to the uncompresesd size. Don't consider such things to be
+    // compressed, even if they are "deflated".
+    return getCompressedSize() != getUncompressedSize();
+  }
+
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + (int) (compressedSize ^ (compressedSize >>> 32));
+    result = prime * result + compressionMethod;
+    result = prime * result + (int) (crc32OfUncompressedData ^ (crc32OfUncompressedData >>> 32));
+    result = prime * result + Arrays.hashCode(fileNameBytes);
+    result =
+        prime * result + (int) (fileOffsetOfCompressedData ^ (fileOffsetOfCompressedData >>> 32));
+    result = prime * result + (int) (fileOffsetOfLocalEntry ^ (fileOffsetOfLocalEntry >>> 32));
+    result = prime * result + (generalPurposeFlagBit11 ? 1231 : 1237);
+    result = prime * result + (int) (uncompressedSize ^ (uncompressedSize >>> 32));
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) {
+      return false;
+    }
+    MinimalZipEntry other = (MinimalZipEntry) obj;
+    if (compressedSize != other.compressedSize) {
+      return false;
+    }
+    if (compressionMethod != other.compressionMethod) {
+      return false;
+    }
+    if (crc32OfUncompressedData != other.crc32OfUncompressedData) {
+      return false;
+    }
+    if (!Arrays.equals(fileNameBytes, other.fileNameBytes)) {
+      return false;
+    }
+    if (fileOffsetOfCompressedData != other.fileOffsetOfCompressedData) {
+      return false;
+    }
+    if (fileOffsetOfLocalEntry != other.fileOffsetOfLocalEntry) {
+      return false;
+    }
+    if (generalPurposeFlagBit11 != other.generalPurposeFlagBit11) {
+      return false;
+    }
+    if (uncompressedSize != other.uncompressedSize) {
+      return false;
+    }
+    return true;
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/MinimalZipParser.java b/generator/src/main/java/com/google/archivepatcher/generator/MinimalZipParser.java
new file mode 100644
index 0000000..c83fc4f
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/MinimalZipParser.java
@@ -0,0 +1,306 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import com.google.archivepatcher.shared.RandomAccessFileInputStream;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.zip.ZipException;
+
+/**
+ * A minimal set of zip-parsing utilities just adequate to produce a {@link MinimalZipEntry} and
+ * update it. This parser is neither robust nor exhaustive. The parser is built to understand
+ * version 2.0 of the ZIP specification, with the notable exception that it does not have support
+ * for encrypted central directories.
+ * <p>
+ * The offsets, lengths and fields that this parser understands and exposes are based on version
+ * 6.3.3 of the ZIP specification (the most recent available at the time of this writing), which may
+ * be found at the following URL:
+ * <br><ul><li>https://www.pkware.com/documents/APPNOTE/APPNOTE-6.3.3.TXT</li></ul>
+ * <p>
+ * Please note that the parser does not attempt to verify the version-needed-to-extract field, since
+ * there is no guarantee that all ZIP implementations have set the value correctly to the minimum
+ * needed to truly support extraction.
+ */
+class MinimalZipParser {
+
+  /**
+   * Standard 32-bit signature for a "end-of-central-directory" record in a ZIP-like archive. This
+   * is in little-endian order.
+   */
+  public static final int EOCD_SIGNATURE = 0x06054b50;
+
+  /**
+   * Standard 32-bit signature for a "central directory entry" record in a ZIP-like archive. This is
+   * in little-endian order.
+   */
+  public static final int CENTRAL_DIRECTORY_ENTRY_SIGNATURE = 0x02014b50;
+
+  /**
+   * Standard 32-bit signature for a "local file entry" in a ZIP-like archive. This is in
+   * little-endian order.
+   */
+  public static final int LOCAL_ENTRY_SIGNATURE = 0x04034b50;
+
+  /**
+   * Read exactly one byte, throwing an exception if unsuccessful.
+   * @param in the stream to read from
+   * @return the byte read
+   * @throws IOException if EOF is reached
+   */
+  private static int readByteOrDie(InputStream in) throws IOException {
+    int result = in.read();
+    if (result == -1) {
+      throw new IOException("EOF");
+    }
+    return result;
+  }
+
+  /**
+   * Skips exactly the specified number of bytes, throwing an exception if unsuccessful.
+   * @param in the stream to read from
+   * @param numBytes the number of bytes to skip
+   * @throws IOException if EOF is reached or no more bytes can be skipped
+   */
+  private static void skipOrDie(InputStream in, long numBytes) throws IOException {
+    long numLeft = numBytes;
+    long numSkipped = 0;
+    while ((numSkipped = in.skip(numLeft)) > 0) {
+      numLeft -= numSkipped;
+    }
+    if (numLeft != 0) {
+      throw new IOException("Unable to skip");
+    }
+  }
+
+  /**
+   * Reads 2 bytes from the current offset as an unsigned, 32-bit little-endian value.
+   * @param in the stream to read from
+   * @return the value as a java int
+   * @throws IOException if unable to read
+   */
+  private static int read16BitUnsigned(InputStream in) throws IOException {
+    int value = readByteOrDie(in);
+    value |= readByteOrDie(in) << 8;
+    return value;
+  }
+
+  /**
+   * Reads 4 bytes from the current offset as an unsigned, 32-bit little-endian value.
+   * @param in the stream to read from
+   * @return the value as a java long
+   * @throws IOException if unable to read
+   */
+  private static long read32BitUnsigned(InputStream in) throws IOException {
+    long value = readByteOrDie(in);
+    value |= ((long) readByteOrDie(in)) << 8;
+    value |= ((long) readByteOrDie(in)) << 16;
+    value |= ((long) readByteOrDie(in)) << 24;
+    return value;
+  }
+
+  /**
+   * Read exactly the specified amount of data into the specified buffer, throwing an exception if
+   * unsuccessful.
+   * @param in the stream to read from
+   * @param buffer the buffer to file
+   * @param offset the offset at which to start writing to the buffer
+   * @param length the number of bytes to place into the buffer from the input stream
+   * @throws IOException if unable to read
+   */
+  private static void readOrDie(InputStream in, byte[] buffer, int offset, int length)
+      throws IOException {
+    if (length < 0) {
+      throw new IllegalArgumentException("length must be >= 0");
+    }
+    int numRead = 0;
+    while (numRead < length) {
+      int readThisRound = in.read(buffer, offset + numRead, length - numRead);
+      if (numRead == -1) {
+        throw new IOException("EOF");
+      }
+      numRead += readThisRound;
+    }
+  }
+
+  /**
+   * Parse one central directory entry, starting at the current file position.
+   * @param in the input stream to read from, assumed to start at the first byte of the entry
+   * @return the entry that was parsed
+   * @throws IOException if unable to complete the parsing
+   */
+  public static MinimalZipEntry parseCentralDirectoryEntry(InputStream in) throws IOException {
+    // *** 4 bytes encode the CENTRAL_DIRECTORY_ENTRY_SIGNATURE, verify for sanity
+    // 2 bytes encode the version-made-by, ignore
+    // 2 bytes encode the version-needed-to-extract, ignore
+    // *** 2 bytes encode the general-purpose flags, read for language encoding. [READ THIS]
+    // *** 2 bytes encode the compression method, [READ THIS]
+    // 2 bytes encode the MSDOS last modified file time, ignore
+    // 2 bytes encode the MSDOS last modified file date, ignore
+    // *** 4 bytes encode the CRC32 of the uncompressed data [READ THIS]
+    // *** 4 bytes encode the compressed size [READ THIS]
+    // *** 4 bytes encode the uncompressed size [READ THIS]
+    // *** 2 bytes encode the length of the file name [READ THIS]
+    // *** 2 bytes encode the length of the extras, needed to skip the bytes later [READ THIS]
+    // *** 2 bytes encode the length of the comment, needed to skip the bytes later [READ THIS]
+    // 2 bytes encode the disk number, ignore
+    // 2 bytes encode the internal file attributes, ignore
+    // 4 bytes encode the external file attributes, ignore
+    // *** 4 bytes encode the offset of the local section entry, where the data is [READ THIS]
+    // n bytes encode the file name
+    // n bytes encode the extras
+    // n bytes encode the comment
+    if (((int) read32BitUnsigned(in)) != CENTRAL_DIRECTORY_ENTRY_SIGNATURE) {
+      throw new ZipException("Bad central directory header");
+    }
+    skipOrDie(in, 2 + 2); // Skip version stuff
+    int generalPurposeFlags = read16BitUnsigned(in);
+    int compressionMethod = read16BitUnsigned(in);
+    skipOrDie(in, 2 + 2); // Skip MSDOS junk
+    long crc32OfUncompressedData = read32BitUnsigned(in);
+    long compressedSize = read32BitUnsigned(in);
+    long uncompressedSize = read32BitUnsigned(in);
+    int fileNameLength = read16BitUnsigned(in);
+    int extrasLength = read16BitUnsigned(in);
+    int commentLength = read16BitUnsigned(in);
+    skipOrDie(in, 2 + 2 + 4); // Skip the disk number and file attributes
+    long fileOffsetOfLocalEntry = read32BitUnsigned(in);
+    byte[] fileNameBuffer = new byte[fileNameLength];
+    readOrDie(in, fileNameBuffer, 0, fileNameBuffer.length);
+    skipOrDie(in, extrasLength + commentLength);
+    // General purpose flag bit 11 is an important hint for the character set used for file names.
+    boolean generalPurposeFlagBit11 = (generalPurposeFlags & (0x1 << 10)) != 0;
+    return new MinimalZipEntry(
+        compressionMethod,
+        crc32OfUncompressedData,
+        compressedSize,
+        uncompressedSize,
+        fileNameBuffer,
+        generalPurposeFlagBit11,
+        fileOffsetOfLocalEntry);
+  }
+
+  /**
+   * Parses one local file entry and returns the offset from the first byte at which the compressed
+   * data begins
+   * @param in the input stream to read from, assumed to start at the first byte of the entry
+   * @return as described
+   * @throws IOException if unable to complete the parsing
+   */
+  public static long parseLocalEntryAndGetCompressedDataOffset(InputStream in) throws IOException {
+    // *** 4 bytes encode the LOCAL_ENTRY_SIGNATURE, verify for sanity
+    // 2 bytes encode the version-needed-to-extract, ignore
+    // 2 bytes encode the general-purpose flags, ignore
+    // 2 bytes encode the compression method, ignore (redundant with central directory)
+    // 2 bytes encode the MSDOS last modified file time, ignore
+    // 2 bytes encode the MSDOS last modified file date, ignore
+    // 4 bytes encode the CRC32 of the uncompressed data, ignore (redundant with central directory)
+    // 4 bytes encode the compressed size, ignore (redundant with central directory)
+    // 4 bytes encode the uncompressed size, ignore (redundant with central directory)
+    // *** 2 bytes encode the length of the file name, needed to skip the bytes later [READ THIS]
+    // *** 2 bytes encode the length of the extras, needed to skip the bytes later [READ THIS]
+    // The rest is the data, which is the main attraction here.
+    if (((int) read32BitUnsigned(in)) != LOCAL_ENTRY_SIGNATURE) {
+      throw new ZipException("Bad local entry header");
+    }
+    int junkLength = 2 + 2 + 2 + 2 + 2 + 4 + 4 + 4;
+    skipOrDie(in, junkLength); // Skip everything up to the length of the file name
+    final int fileNameLength = read16BitUnsigned(in);
+    final int extrasLength = read16BitUnsigned(in);
+
+    // The file name is already known and will match the central directory, so no need to read it.
+    // The extra field length can be different here versus in the central directory and is used for
+    // things like zipaligning APKs. This single value is the critical part as it dictates where the
+    // actual DATA for the entry begins.
+    return 4 + junkLength + 2 + 2 + fileNameLength + extrasLength;
+  }
+
+  /**
+   * Find the end-of-central-directory record by scanning backwards from the end of a file looking
+   * for the signature of the record.
+   * @param in the file to read from
+   * @param searchBufferLength the length of the search buffer, starting from the end of the file
+   * @return the offset in the file at which the first byte of the EOCD signature is located, or -1
+   * if the signature is not found in the search buffer
+   * @throws IOException if there is a problem reading
+   */
+  public static long locateStartOfEocd(RandomAccessFileInputStream in, int searchBufferLength)
+      throws IOException {
+    final int maxBufferSize = (int) Math.min(searchBufferLength, in.length());
+    final byte[] buffer = new byte[maxBufferSize];
+    final long rangeStart = in.length() - buffer.length;
+    in.setRange(rangeStart, buffer.length);
+    readOrDie(in, buffer, 0, buffer.length);
+    int offset = locateStartOfEocd(buffer);
+    if (offset == -1) {
+      return -1;
+    }
+    return rangeStart + offset;
+  }
+
+  /**
+   * Find the end-of-central-directory record by scanning backwards looking for the signature of the
+   * record.
+   * @param buffer the buffer in which to search
+   * @return the offset in the buffer at which the first byte of the EOCD signature is located, or
+   * -1 if the complete signature is not found
+   */
+  public static int locateStartOfEocd(byte[] buffer) {
+    int last4Bytes = 0; // This is the 32 bits of data from the file
+    for (int offset = buffer.length - 1; offset >= 0; offset--) {
+      last4Bytes <<= 8;
+      last4Bytes |= buffer[offset];
+      if (last4Bytes == EOCD_SIGNATURE) {
+        return offset;
+      }
+    }
+    return -1;
+  }
+
+  /**
+   * Parse the end-of-central-directory record and return the critical information from it.
+   * @param in the input stream to read from, assumed to start at the first byte of the entry
+   * @return the metadata
+   * @throws IOException if unable to read
+   * @throws ZipException if the metadata indicates this is a zip64 archive, which is not supported
+   */
+  public static MinimalCentralDirectoryMetadata parseEocd(InputStream in)
+      throws IOException, ZipException {
+    if (((int) read32BitUnsigned(in)) != EOCD_SIGNATURE) {
+      throw new ZipException("Bad eocd header");
+    }
+
+    // *** 4 bytes encode EOCD_SIGNATURE, ignore (already found and verified).
+    // 2 bytes encode disk number for this archive, ignore.
+    // 2 bytes encode disk number for the central directory, ignore.
+    // 2 bytes encode num entries in the central directory on this disk, ignore.
+    // *** 2 bytes encode num entries in the central directory overall [READ THIS]
+    // *** 4 bytes encode the length of the central directory [READ THIS]
+    // *** 4 bytes encode the file offset of the central directory [READ THIS]
+    // 2 bytes encode the length of the zip file comment, ignore.
+    // Everything else from here to the EOF is the zip file comment, or junk. Ignore.
+    skipOrDie(in, 2 + 2 + 2);
+    int numEntriesInCentralDirectory = read16BitUnsigned(in);
+    if (numEntriesInCentralDirectory == 0xffff) {
+      // If 0xffff, this is a zip64 archive and this code doesn't handle that.
+      throw new ZipException("No support for zip64");
+    }
+    long lengthOfCentralDirectory = read32BitUnsigned(in);
+    long offsetOfCentralDirectory = read32BitUnsigned(in);
+    return new MinimalCentralDirectoryMetadata(
+        numEntriesInCentralDirectory, offsetOfCentralDirectory, lengthOfCentralDirectory);
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/MismatchException.java b/generator/src/main/java/com/google/archivepatcher/generator/MismatchException.java
new file mode 100644
index 0000000..b56b55d
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/MismatchException.java
@@ -0,0 +1,31 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import java.io.IOException;
+
+/**
+ * Thrown when data does not match expected values.
+ */
+@SuppressWarnings("serial")
+public class MismatchException extends IOException {
+  /**
+   * Construct an exception with the specified message
+   * @param message the message
+   */
+  public MismatchException(String message) {
+    super(message);
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/PatchWriter.java b/generator/src/main/java/com/google/archivepatcher/generator/PatchWriter.java
new file mode 100644
index 0000000..d6e055f
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/PatchWriter.java
@@ -0,0 +1,138 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import com.google.archivepatcher.shared.JreDeflateParameters;
+import com.google.archivepatcher.shared.PatchConstants;
+import com.google.archivepatcher.shared.TypedRange;
+import java.io.BufferedInputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * Writes patches.
+ */
+public class PatchWriter {
+  /**
+   * The patch plan.
+   */
+  private final PreDiffPlan plan;
+
+  /**
+   * The expected size of the delta-friendly old file, provided as a convenience for the patch
+   * <strong>applier</strong> to reserve space on the filesystem for applying the patch.
+   */
+  private final long deltaFriendlyOldFileSize;
+
+  /**
+   * The expected size of the delta-friendly new file, provided for forward compatibility.
+   */
+  private final long deltaFriendlyNewFileSize;
+
+  /**
+   * The delta that transforms the old delta-friendly file into the new delta-friendly file.
+   */
+  private final File deltaFile;
+
+  /**
+   * Creates a new patch writer.
+   *
+   * @param plan the patch plan
+   * @param deltaFriendlyOldFileSize the expected size of the delta-friendly old file, provided as a
+   *     convenience for the patch <strong>applier</strong> to reserve space on the filesystem for
+   *     applying the patch
+   * @param deltaFriendlyNewFileSize the expected size of the delta-friendly new file, provided for
+   *     forward compatibility
+   * @param deltaFile the delta that transforms the old delta-friendly file into the new
+   *     delta-friendly file
+   */
+  public PatchWriter(
+      PreDiffPlan plan,
+      long deltaFriendlyOldFileSize,
+      long deltaFriendlyNewFileSize,
+      File deltaFile) {
+    this.plan = plan;
+    this.deltaFriendlyOldFileSize = deltaFriendlyOldFileSize;
+    this.deltaFriendlyNewFileSize = deltaFriendlyNewFileSize;
+    this.deltaFile = deltaFile;
+  }
+
+  /**
+   * Write a v1-style patch to the specified output stream.
+   * @param out the stream to write the patch to
+   * @throws IOException if anything goes wrong
+   */
+  public void writeV1Patch(OutputStream out) throws IOException {
+    // Use DataOutputStream for ease of writing. This is deliberately left open, as closing it would
+    // close the output stream that was passed in and that is not part of the method's documented
+    // behavior.
+    @SuppressWarnings("resource")
+    DataOutputStream dataOut = new DataOutputStream(out);
+
+    dataOut.write(PatchConstants.IDENTIFIER.getBytes("US-ASCII"));
+    dataOut.writeInt(0); // Flags (reserved)
+    dataOut.writeLong(deltaFriendlyOldFileSize);
+
+    // Write out all the delta-friendly old file uncompression instructions
+    dataOut.writeInt(plan.getOldFileUncompressionPlan().size());
+    for (TypedRange<Void> range : plan.getOldFileUncompressionPlan()) {
+      dataOut.writeLong(range.getOffset());
+      dataOut.writeLong(range.getLength());
+    }
+
+    // Write out all the delta-friendly new file recompression instructions
+    dataOut.writeInt(plan.getDeltaFriendlyNewFileRecompressionPlan().size());
+    for (TypedRange<JreDeflateParameters> range : plan.getDeltaFriendlyNewFileRecompressionPlan()) {
+      dataOut.writeLong(range.getOffset());
+      dataOut.writeLong(range.getLength());
+      // Write the deflate information
+      dataOut.write(PatchConstants.CompatibilityWindowId.DEFAULT_DEFLATE.patchValue);
+      dataOut.write(range.getMetadata().level);
+      dataOut.write(range.getMetadata().strategy);
+      dataOut.write(range.getMetadata().nowrap ? 1 : 0);
+    }
+
+    // Now the delta section
+    // First write the number of deltas present in the patch. In v1, there is always exactly one
+    // delta, and it is for the entire input; in future versions there may be multiple deltas, of
+    // arbitrary types.
+    dataOut.writeInt(1);
+    // In v1 the delta format is always bsdiff, so write it unconditionally.
+    dataOut.write(PatchConstants.DeltaFormat.BSDIFF.patchValue);
+
+    // Write the working ranges. In v1 these are always the entire contents of the delta-friendly
+    // old file and the delta-friendly new file. These are for forward compatibility with future
+    // versions that may allow deltas of arbitrary formats to be mapped to arbitrary ranges.
+    dataOut.writeLong(0); // i.e., start of the working range in the delta-friendly old file
+    dataOut.writeLong(deltaFriendlyOldFileSize); // i.e., length of the working range in old
+    dataOut.writeLong(0); // i.e., start of the working range in the delta-friendly new file
+    dataOut.writeLong(deltaFriendlyNewFileSize); // i.e., length of the working range in new
+
+    // Finally, the length of the delta and the delta itself.
+    dataOut.writeLong(deltaFile.length());
+    try (FileInputStream deltaFileIn = new FileInputStream(deltaFile);
+        BufferedInputStream deltaIn = new BufferedInputStream(deltaFileIn)) {
+      byte[] buffer = new byte[32768];
+      int numRead = 0;
+      while ((numRead = deltaIn.read(buffer)) >= 0) {
+        dataOut.write(buffer, 0, numRead);
+      }
+    }
+    dataOut.flush();
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/PreDiffExecutor.java b/generator/src/main/java/com/google/archivepatcher/generator/PreDiffExecutor.java
new file mode 100644
index 0000000..7a83ed6
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/PreDiffExecutor.java
@@ -0,0 +1,233 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import com.google.archivepatcher.generator.DefaultDeflateCompressionDiviner.DivinationResult;
+import com.google.archivepatcher.shared.DeltaFriendlyFile;
+import com.google.archivepatcher.shared.JreDeflateParameters;
+import com.google.archivepatcher.shared.TypedRange;
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Prepares resources for differencing.
+ */
+public class PreDiffExecutor {
+
+  /** A helper class to build a {@link PreDiffExecutor} with a variety of configurations. */
+  public static final class Builder {
+    private File originalOldFile;
+    private File originalNewFile;
+    private File deltaFriendlyOldFile;
+    private File deltaFriendlyNewFile;
+    private List<RecommendationModifier> recommendationModifiers =
+        new ArrayList<RecommendationModifier>();
+
+    /**
+     * Sets the original, read-only input files to the patch generation process. This has to be
+     * called at least once, and both arguments must be non-null.
+     *
+     * @param originalOldFile the original old file to read (will not be modified).
+     * @param originalNewFile the original new file to read (will not be modified).
+     * @return this builder
+     */
+    public Builder readingOriginalFiles(File originalOldFile, File originalNewFile) {
+      if (originalOldFile == null || originalNewFile == null) {
+        throw new IllegalStateException("do not set nul original input files");
+      }
+      this.originalOldFile = originalOldFile;
+      this.originalNewFile = originalNewFile;
+      return this;
+    }
+
+    /**
+     * Sets the output files that will hold the delta-friendly intermediate binaries used in patch
+     * generation. If called, both arguments must be non-null.
+     *
+     * @param deltaFriendlyOldFile the intermediate file to write (will be overwritten if it exists)
+     * @param deltaFriendlyNewFile the intermediate file to write (will be overwritten if it exists)
+     * @return this builder
+     */
+    public Builder writingDeltaFriendlyFiles(File deltaFriendlyOldFile, File deltaFriendlyNewFile) {
+      if (deltaFriendlyOldFile == null || deltaFriendlyNewFile == null) {
+        throw new IllegalStateException("do not set null delta-friendly files");
+      }
+      this.deltaFriendlyOldFile = deltaFriendlyOldFile;
+      this.deltaFriendlyNewFile = deltaFriendlyNewFile;
+      return this;
+    }
+
+    /**
+     * Appends an optional {@link RecommendationModifier} to be used during the generation of the
+     * {@link PreDiffPlan} and/or delta-friendly blobs.
+     *
+     * @param recommendationModifier the modifier to set
+     * @return this builder
+     */
+    public Builder withRecommendationModifier(RecommendationModifier recommendationModifier) {
+      if (recommendationModifier == null) {
+        throw new IllegalArgumentException("recommendationModifier cannot be null");
+      }
+      this.recommendationModifiers.add(recommendationModifier);
+      return this;
+    }
+
+    /**
+     * Builds and returns a {@link PreDiffExecutor} according to the currnet configuration.
+     *
+     * @return the executor
+     */
+    public PreDiffExecutor build() {
+      if (originalOldFile == null) {
+        // readingOriginalFiles() ensures old and new are non-null when called, so check either.
+        throw new IllegalStateException("original input files cannot be null");
+      }
+      return new PreDiffExecutor(
+          originalOldFile,
+          originalNewFile,
+          deltaFriendlyOldFile,
+          deltaFriendlyNewFile,
+          recommendationModifiers);
+    }
+  }
+
+  /** The original old file to read (will not be modified). */
+  private final File originalOldFile;
+
+  /** The original new file to read (will not be modified). */
+  private final File originalNewFile;
+
+  /**
+   * Optional file to write the delta-friendly version of the original old file to (will be created,
+   * overwriting if it already exists). If null, only the read-only planning step can be performed.
+   */
+  private final File deltaFriendlyOldFile;
+
+  /**
+   * Optional file to write the delta-friendly version of the original new file to (will be created,
+   * overwriting if it already exists). If null, only the read-only planning step can be performed.
+   */
+  private final File deltaFriendlyNewFile;
+
+  /**
+   * Optional {@link RecommendationModifier}s to be used for modifying the patch to be generated.
+   */
+  private final List<RecommendationModifier> recommendationModifiers;
+
+  /** Constructs a new PreDiffExecutor to work with the specified configuration. */
+  private PreDiffExecutor(
+      File originalOldFile,
+      File originalNewFile,
+      File deltaFriendlyOldFile,
+      File deltaFriendlyNewFile,
+      List<RecommendationModifier> recommendationModifiers) {
+    this.originalOldFile = originalOldFile;
+    this.originalNewFile = originalNewFile;
+    this.deltaFriendlyOldFile = deltaFriendlyOldFile;
+    this.deltaFriendlyNewFile = deltaFriendlyNewFile;
+    this.recommendationModifiers = recommendationModifiers;
+  }
+
+  /**
+   * Prepare resources for diffing and returns the completed plan.
+   *
+   * @return the plan
+   * @throws IOException if unable to complete the operation due to an I/O error
+   */
+  public PreDiffPlan prepareForDiffing() throws IOException {
+    PreDiffPlan preDiffPlan = generatePreDiffPlan();
+    List<TypedRange<JreDeflateParameters>> deltaFriendlyNewFileRecompressionPlan = null;
+    if (deltaFriendlyOldFile != null) {
+      // Builder.writingDeltaFriendlyFiles() ensures old and new are non-null when called, so a
+      // check on either is sufficient.
+      deltaFriendlyNewFileRecompressionPlan =
+          Collections.unmodifiableList(generateDeltaFriendlyFiles(preDiffPlan));
+    }
+    return new PreDiffPlan(
+        preDiffPlan.getQualifiedRecommendations(),
+        preDiffPlan.getOldFileUncompressionPlan(),
+        preDiffPlan.getNewFileUncompressionPlan(),
+        deltaFriendlyNewFileRecompressionPlan);
+  }
+
+  /**
+   * Generate the delta-friendly files and return the plan for recompressing the delta-friendly new
+   * file back into the original new file.
+   *
+   * @param preDiffPlan the plan to execute
+   * @return as described
+   * @throws IOException if anything goes wrong
+   */
+  private List<TypedRange<JreDeflateParameters>> generateDeltaFriendlyFiles(PreDiffPlan preDiffPlan)
+      throws IOException {
+    try (FileOutputStream out = new FileOutputStream(deltaFriendlyOldFile);
+        BufferedOutputStream bufferedOut = new BufferedOutputStream(out)) {
+      DeltaFriendlyFile.generateDeltaFriendlyFile(
+          preDiffPlan.getOldFileUncompressionPlan(), originalOldFile, bufferedOut);
+    }
+    try (FileOutputStream out = new FileOutputStream(deltaFriendlyNewFile);
+        BufferedOutputStream bufferedOut = new BufferedOutputStream(out)) {
+      return DeltaFriendlyFile.generateDeltaFriendlyFile(
+          preDiffPlan.getNewFileUncompressionPlan(), originalNewFile, bufferedOut);
+    }
+  }
+
+  /**
+   * Analyze the original old and new files and generate a plan to transform them into their
+   * delta-friendly equivalents.
+   *
+   * @return the plan, which does not yet contain information for recompressing the delta-friendly
+   *     new archive.
+   * @throws IOException if anything goes wrong
+   */
+  private PreDiffPlan generatePreDiffPlan() throws IOException {
+    Map<ByteArrayHolder, MinimalZipEntry> originalOldArchiveZipEntriesByPath =
+        new HashMap<ByteArrayHolder, MinimalZipEntry>();
+    Map<ByteArrayHolder, MinimalZipEntry> originalNewArchiveZipEntriesByPath =
+        new HashMap<ByteArrayHolder, MinimalZipEntry>();
+    Map<ByteArrayHolder, JreDeflateParameters> originalNewArchiveJreDeflateParametersByPath =
+        new HashMap<ByteArrayHolder, JreDeflateParameters>();
+
+    for (MinimalZipEntry zipEntry : MinimalZipArchive.listEntries(originalOldFile)) {
+      ByteArrayHolder key = new ByteArrayHolder(zipEntry.getFileNameBytes());
+      originalOldArchiveZipEntriesByPath.put(key, zipEntry);
+    }
+
+    DefaultDeflateCompressionDiviner diviner = new DefaultDeflateCompressionDiviner();
+    for (DivinationResult divinationResult : diviner.divineDeflateParameters(originalNewFile)) {
+      ByteArrayHolder key =
+          new ByteArrayHolder(divinationResult.minimalZipEntry.getFileNameBytes());
+      originalNewArchiveZipEntriesByPath.put(key, divinationResult.minimalZipEntry);
+      originalNewArchiveJreDeflateParametersByPath.put(key, divinationResult.divinedParameters);
+    }
+
+    PreDiffPlanner preDiffPlanner =
+        new PreDiffPlanner(
+            originalOldFile,
+            originalOldArchiveZipEntriesByPath,
+            originalNewFile,
+            originalNewArchiveZipEntriesByPath,
+            originalNewArchiveJreDeflateParametersByPath,
+            recommendationModifiers.toArray(new RecommendationModifier[] {}));
+    return preDiffPlanner.generatePreDiffPlan();
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/PreDiffPlan.java b/generator/src/main/java/com/google/archivepatcher/generator/PreDiffPlan.java
new file mode 100644
index 0000000..b87e678
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/PreDiffPlan.java
@@ -0,0 +1,155 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import java.util.Iterator;
+import java.util.List;
+
+import com.google.archivepatcher.shared.JreDeflateParameters;
+import com.google.archivepatcher.shared.TypedRange;
+
+/**
+ * A plan for transforming the old and the new archive prior to running a diffing algorithm and for
+ * recompressing the delta-friendly new archive afterwards.
+ * <p>
+ * The plan for uncompressing the old file is a {@link List} of {@link TypedRange} entries with void
+ * metadata. This describes the chunks of the old file that need to be uncompressed prior to
+ * diffing, in file order. The file produced by executing this plan is the "delta-friendly" old
+ * archive.
+ * <p>
+ * The plan for uncompressing the new file is similarly a {@link List} of {@link TypedRange}
+ * entries, but this time the metadata is of the type {@link JreDeflateParameters}. This describes
+ * the chunks of the new file that need to be uncompressed prior to diffing, in file order. The
+ * {@link JreDeflateParameters} metadata indicate the settings that need to be used to generate the
+ * inverse transform (the delta friendly new file recompression plan; see below). The file produced
+ * by executing this plan is the "delta-friendly" new archive.
+ * <p>
+ * The plan for recompressing the delta-friendly new archive is again a {@link List} of
+ * {@link TypedRange} entries with {@link JreDeflateParameters} metadata. This describes the chunks
+ * of the delta-friendly new file that need to be recompressed after diffing, again in file order.
+ * The {@link JreDeflateParameters} metadata indicate the settings to use during recompression. The
+ * file produced by executing this plan is the new archive, i.e. it reverse the transform of the
+ * new file uncompression plan.
+ * <p>
+ * Finally, a {@link List} of all the {@link QualifiedRecommendation}s upon which all the plans are
+ * based is available via {@link #getQualifiedRecommendations()}.
+ */
+public class PreDiffPlan {
+  /**
+   * The plan for uncompressing the old file, in file order.
+   */
+  private final List<TypedRange<Void>> oldFileUncompressionPlan;
+
+  /**
+   * The plan for uncompressing the new file, in file order.
+   */
+  private final List<TypedRange<JreDeflateParameters>> newFileUncompressionPlan;
+
+  /**
+   * The plan for recompressing the delta-friendly new file, in file order.
+   */
+  private final List<TypedRange<JreDeflateParameters>> deltaFriendlyNewFileRecompressionPlan;
+
+  /**
+   * The recommendations upon which the plans are based.
+   */
+  private final List<QualifiedRecommendation> qualifiedRecommendations;
+
+  /**
+   * Constructs a new plan.
+   * @param qualifiedRecommendations the recommendations upon which the plans are based
+   * @param oldFileUncompressionPlan the plan for uncompressing the old file, in file order
+   * @param newFileUncompressionPlan the plan for uncompressing the new file, in file order
+   */
+  public PreDiffPlan(
+      List<QualifiedRecommendation> qualifiedRecommendations,
+      List<TypedRange<Void>> oldFileUncompressionPlan,
+      List<TypedRange<JreDeflateParameters>> newFileUncompressionPlan) {
+    this(qualifiedRecommendations, oldFileUncompressionPlan, newFileUncompressionPlan, null);
+  }
+
+  /**
+   * Constructs a new plan.
+   * @param qualifiedRecommendations the recommendations upon which the plans are based
+   * @param oldFileUncompressionPlan the plan for uncompressing the old file, in file order
+   * @param newFileUncompressionPlan the plan for uncompressing the new file, in file order
+   * @param deltaFriendlyNewFileRecompressionPlan the plan for recompression the delta-friendly new
+   * file, in file order
+   */
+  public PreDiffPlan(
+      List<QualifiedRecommendation> qualifiedRecommendations,
+      List<TypedRange<Void>> oldFileUncompressionPlan,
+      List<TypedRange<JreDeflateParameters>> newFileUncompressionPlan,
+      List<TypedRange<JreDeflateParameters>> deltaFriendlyNewFileRecompressionPlan) {
+    ensureOrdered(oldFileUncompressionPlan);
+    ensureOrdered(newFileUncompressionPlan);
+    ensureOrdered(deltaFriendlyNewFileRecompressionPlan);
+    this.qualifiedRecommendations = qualifiedRecommendations;
+    this.oldFileUncompressionPlan = oldFileUncompressionPlan;
+    this.newFileUncompressionPlan = newFileUncompressionPlan;
+    this.deltaFriendlyNewFileRecompressionPlan = deltaFriendlyNewFileRecompressionPlan;
+  }
+
+  /**
+   * Ensures that the lists passed into the constructors are ordered and throws an exception if
+   * they are not. Null lists and lists whose size is less than 2 are ignored.
+   * @param list the list to check
+   */
+  private <T> void ensureOrdered(List<TypedRange<T>> list) {
+    if (list != null && list.size() >= 2) {
+      Iterator<TypedRange<T>> iterator = list.iterator();
+      TypedRange<T> lastEntry = iterator.next();
+      while (iterator.hasNext()) {
+        TypedRange<T> nextEntry = iterator.next();
+        if (lastEntry.compareTo(nextEntry) > 0) {
+          throw new IllegalArgumentException("List must be ordered");
+        }
+      }
+    }
+  }
+
+  /**
+   * Returns the plan for uncompressing the old file to create the delta-friendly old file.
+   * @return the plan
+   */
+  public final List<TypedRange<Void>> getOldFileUncompressionPlan() {
+    return oldFileUncompressionPlan;
+  }
+
+  /**
+   * Returns the plan for uncompressing the new file to create the delta-friendly new file.
+   * @return the plan
+   */
+  public final List<TypedRange<JreDeflateParameters>> getNewFileUncompressionPlan() {
+    return newFileUncompressionPlan;
+  }
+
+  /**
+   * Returns the plan for recompressing the delta-friendly new file to regenerate the original new
+   * file.
+   * @return the plan
+   */
+  public final List<TypedRange<JreDeflateParameters>> getDeltaFriendlyNewFileRecompressionPlan() {
+    return deltaFriendlyNewFileRecompressionPlan;
+  }
+
+  /**
+   * Returns the recommendations upon which the plans are based.
+   * @return the recommendations
+   */
+  public final List<QualifiedRecommendation> getQualifiedRecommendations() {
+    return qualifiedRecommendations;
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/PreDiffPlanner.java b/generator/src/main/java/com/google/archivepatcher/generator/PreDiffPlanner.java
new file mode 100644
index 0000000..6b2d1ee
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/PreDiffPlanner.java
@@ -0,0 +1,359 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import com.google.archivepatcher.generator.similarity.Crc32SimilarityFinder;
+import com.google.archivepatcher.generator.similarity.SimilarityFinder;
+import com.google.archivepatcher.shared.JreDeflateParameters;
+import com.google.archivepatcher.shared.RandomAccessFileInputStream;
+import com.google.archivepatcher.shared.TypedRange;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Plans archive transformations to be made prior to differencing.
+ */
+class PreDiffPlanner {
+  /**
+   * The old archive.
+   */
+  private final File oldFile;
+
+  /**
+   * The new archive.
+   */
+  private final File newFile;
+
+  /**
+   * The entries in the old archive, with paths as keys.
+   */
+  private final Map<ByteArrayHolder, MinimalZipEntry> oldArchiveZipEntriesByPath;
+
+  /**
+   * The entries in the new archive, with paths as keys.
+   */
+  private final Map<ByteArrayHolder, MinimalZipEntry> newArchiveZipEntriesByPath;
+
+  /**
+   * The divined parameters for compression of the entries in the new archive, with paths as keys.
+   */
+  private final Map<ByteArrayHolder, JreDeflateParameters> newArchiveJreDeflateParametersByPath;
+
+  /**
+   * Optional {@link RecommendationModifier}s that will be applied after the default recommendations
+   * have been made but before the {@link PreDiffPlan} is constructed.
+   */
+  private final List<RecommendationModifier> recommendationModifiers;
+
+  /**
+   * Constructs a new planner that will work on the specified inputs
+   *
+   * @param oldFile the old file, used to compare bytes between old and new entries as necessary
+   * @param oldArchiveZipEntriesByPath the entries in the old archive, with paths as keys
+   * @param newFile the new file, used to compare bytes between old and new entries as necessary
+   * @param newArchiveZipEntriesByPath the entries in the new archive, with paths as keys
+   * @param newArchiveJreDeflateParametersByPath the {@link JreDeflateParameters} for each entry in
+   *     the new archive, with paths as keys
+   * @param recommendationModifiers optionally, {@link RecommendationModifier}s to be applied after
+   *     the default recommendations have been made but before the {@link PreDiffPlan} is generated
+   *     in {@link #generatePreDiffPlan()}.
+   */
+  PreDiffPlanner(
+      File oldFile,
+      Map<ByteArrayHolder, MinimalZipEntry> oldArchiveZipEntriesByPath,
+      File newFile,
+      Map<ByteArrayHolder, MinimalZipEntry> newArchiveZipEntriesByPath,
+      Map<ByteArrayHolder, JreDeflateParameters> newArchiveJreDeflateParametersByPath,
+      RecommendationModifier... recommendationModifiers) {
+    this.oldFile = oldFile;
+    this.oldArchiveZipEntriesByPath = oldArchiveZipEntriesByPath;
+    this.newFile = newFile;
+    this.newArchiveZipEntriesByPath = newArchiveZipEntriesByPath;
+    this.newArchiveJreDeflateParametersByPath = newArchiveJreDeflateParametersByPath;
+    this.recommendationModifiers =
+          Collections.unmodifiableList(Arrays.asList(recommendationModifiers));
+  }
+
+  /**
+   * Generates and returns the plan for archive transformations to be made prior to differencing.
+   * The resulting {@link PreDiffPlan} has the old and new file uncompression plans set. The
+   * delta-friendly new file recompression plan is <em>not</em> set at this time.
+   * @return the plan
+   * @throws IOException if there are any problems reading the input files
+   */
+  PreDiffPlan generatePreDiffPlan() throws IOException {
+    List<QualifiedRecommendation> recommendations = getDefaultRecommendations();
+    for (RecommendationModifier modifier : recommendationModifiers) {
+      // Allow changing the recommendations base on arbitrary criteria.
+      recommendations = modifier.getModifiedRecommendations(oldFile, newFile, recommendations);
+    }
+
+    // Process recommendations to extract ranges for decompression & recompression
+    Set<TypedRange<Void>> oldFilePlan = new HashSet<>();
+    Set<TypedRange<JreDeflateParameters>> newFilePlan = new HashSet<>();
+    for (QualifiedRecommendation recommendation : recommendations) {
+      if (recommendation.getRecommendation().uncompressOldEntry) {
+        long offset = recommendation.getOldEntry().getFileOffsetOfCompressedData();
+        long length = recommendation.getOldEntry().getCompressedSize();
+        TypedRange<Void> range = new TypedRange<Void>(offset, length, null);
+        oldFilePlan.add(range);
+      }
+      if (recommendation.getRecommendation().uncompressNewEntry) {
+        long offset = recommendation.getNewEntry().getFileOffsetOfCompressedData();
+        long length = recommendation.getNewEntry().getCompressedSize();
+        JreDeflateParameters newJreDeflateParameters =
+            newArchiveJreDeflateParametersByPath.get(
+                new ByteArrayHolder(recommendation.getNewEntry().getFileNameBytes()));
+        TypedRange<JreDeflateParameters> range =
+            new TypedRange<JreDeflateParameters>(offset, length, newJreDeflateParameters);
+        newFilePlan.add(range);
+      }
+    }
+
+    List<TypedRange<Void>> oldFilePlanList = new ArrayList<>(oldFilePlan);
+    Collections.sort(oldFilePlanList);
+    List<TypedRange<JreDeflateParameters>> newFilePlanList = new ArrayList<>(newFilePlan);
+    Collections.sort(newFilePlanList);
+    return new PreDiffPlan(
+        Collections.unmodifiableList(recommendations),
+        Collections.unmodifiableList(oldFilePlanList),
+        Collections.unmodifiableList(newFilePlanList));
+  }
+
+  /**
+   * Analyzes the input files and returns the default recommendations for each entry in the new
+   * archive.
+   *
+   * @return the recommendations
+   * @throws IOException if anything goes wrong
+   */
+  private List<QualifiedRecommendation> getDefaultRecommendations() throws IOException {
+    List<QualifiedRecommendation> recommendations = new ArrayList<>();
+
+    // This will be used to find files that have been renamed, but not modified. This is relatively
+    // cheap to construct as it just requires indexing all entries by the uncompressed CRC32, and
+    // the CRC32 is already available in the ZIP headers.
+    SimilarityFinder trivialRenameFinder =
+        new Crc32SimilarityFinder(oldFile, oldArchiveZipEntriesByPath.values());
+
+    // Iterate over every pair of entries and get a recommendation for what to do.
+    for (Map.Entry<ByteArrayHolder, MinimalZipEntry> newEntry :
+        newArchiveZipEntriesByPath.entrySet()) {
+      ByteArrayHolder newEntryPath = newEntry.getKey();
+      MinimalZipEntry oldZipEntry = oldArchiveZipEntriesByPath.get(newEntryPath);
+      if (oldZipEntry == null) {
+        // The path is only present in the new archive, not in the old archive. Try to find a
+        // similar file in the old archive that can serve as a diff base for the new file.
+        List<MinimalZipEntry> identicalEntriesInOldArchive =
+            trivialRenameFinder.findSimilarFiles(newFile, newEntry.getValue());
+        if (!identicalEntriesInOldArchive.isEmpty()) {
+          // An identical file exists in the old archive at a different path. Use it for the
+          // recommendation and carry on with the normal logic.
+          // All entries in the returned list are identical, so just pick the first one.
+          // NB, in principle it would be optimal to select the file that required the least work
+          // to apply the patch - in practice, it is unlikely that an archive will contain multiple
+          // copies of the same file that are compressed differently, so don't bother with that
+          // degenerate case.
+          oldZipEntry = identicalEntriesInOldArchive.get(0);
+        }
+      }
+
+      // If the attempt to find a suitable diff base for the new entry has failed, oldZipEntry is
+      // null (nothing to do in that case). Otherwise, there is an old entry that is relevant, so
+      // get a recommendation for what to do.
+      if (oldZipEntry != null) {
+        recommendations.add(getRecommendation(oldZipEntry, newEntry.getValue()));
+      }
+    }
+    return recommendations;
+  }
+
+  /**
+   * Determines the right {@link QualifiedRecommendation} for handling the (oldEntry, newEntry)
+   * tuple.
+   * @param oldEntry the entry in the old archive
+   * @param newEntry the entry in the new archive
+   * @return the recommendation
+   * @throws IOException if there are any problems reading the input files
+   */
+  private QualifiedRecommendation getRecommendation(MinimalZipEntry oldEntry, MinimalZipEntry newEntry)
+      throws IOException {
+
+    // Reject anything that is unsuitable for uncompressed diffing.
+    if (unsuitable(oldEntry, newEntry)) {
+      return new QualifiedRecommendation(
+          oldEntry,
+          newEntry,
+          Recommendation.UNCOMPRESS_NEITHER,
+          RecommendationReason.UNSUITABLE);
+    }
+
+    // If both entries are already uncompressed there is nothing to do.
+    if (bothEntriesUncompressed(oldEntry, newEntry)) {
+      return new QualifiedRecommendation(
+          oldEntry,
+          newEntry,
+          Recommendation.UNCOMPRESS_NEITHER,
+          RecommendationReason.BOTH_ENTRIES_UNCOMPRESSED);
+    }
+
+    // The following are now true:
+    // 1. At least one of the entries is compressed.
+    // 1. The old entry is either uncompressed, or is compressed with deflate.
+    // 2. The new entry is either uncompressed, or is reproducibly compressed with deflate.
+
+    if (uncompressedChangedToCompressed(oldEntry, newEntry)) {
+      return new QualifiedRecommendation(
+          oldEntry,
+          newEntry,
+          Recommendation.UNCOMPRESS_NEW,
+          RecommendationReason.UNCOMPRESSED_CHANGED_TO_COMPRESSED);
+    }
+
+    if (compressedChangedToUncompressed(oldEntry, newEntry)) {
+      return new QualifiedRecommendation(
+          oldEntry,
+          newEntry,
+          Recommendation.UNCOMPRESS_OLD,
+          RecommendationReason.COMPRESSED_CHANGED_TO_UNCOMPRESSED);
+    }
+
+    // At this point, both entries must be compressed with deflate.
+    if (compressedBytesChanged(oldEntry, newEntry)) {
+      return new QualifiedRecommendation(
+          oldEntry,
+          newEntry,
+          Recommendation.UNCOMPRESS_BOTH,
+          RecommendationReason.COMPRESSED_BYTES_CHANGED);
+    }
+
+    // If the compressed bytes have not changed, there is no need to do anything.
+    return new QualifiedRecommendation(
+        oldEntry,
+        newEntry,
+        Recommendation.UNCOMPRESS_NEITHER,
+        RecommendationReason.COMPRESSED_BYTES_IDENTICAL);
+  }
+
+  /**
+   * Returns true if the entries are unsuitable for doing an uncompressed diff. This method returns
+   * true if either of the entries is compressed in an unsupported way (a non-deflate compression
+   * algorithm) or if the new entry is compressed in a supported but unreproducible way.
+   * @param oldEntry the entry in the old archive
+   * @param newEntry the entry in the new archive
+   * @return true if unsuitable
+   */
+  private boolean unsuitable(MinimalZipEntry oldEntry, MinimalZipEntry newEntry) {
+    if (oldEntry.getCompressionMethod() != 0 && !oldEntry.isDeflateCompressed()) {
+      // The old entry is compressed in a way that is not supported. It cannot be uncompressed, so
+      // no uncompressed diff is possible; leave both old and new alone.
+      return true;
+    }
+    if (newEntry.getCompressionMethod() != 0 && !newEntry.isDeflateCompressed()) {
+      // The new entry is compressed in a way that is not supported. Same result as above.
+      return true;
+    }
+    JreDeflateParameters newJreDeflateParameters =
+        newArchiveJreDeflateParametersByPath.get(new ByteArrayHolder(newEntry.getFileNameBytes()));
+    if (newEntry.isDeflateCompressed() && newJreDeflateParameters == null) {
+      // The new entry is compressed via deflate, but the parameters were undivinable. Therefore the
+      // new entry cannot be recompressed, so leave both old and new alone.
+      return true;
+    }
+    return false;
+  }
+
+  /**
+   * Returns true if the entries are already optimal for doing an uncompressed diff. This method
+   * returns true if both of the entries are already uncompressed, i.e. are already in the best form
+   * for diffing.
+   * @param oldEntry the entry in the old archive
+   * @param newEntry the entry in the new archive
+   * @return as described
+   */
+  private boolean bothEntriesUncompressed(MinimalZipEntry oldEntry, MinimalZipEntry newEntry) {
+    return oldEntry.getCompressionMethod() == 0 && newEntry.getCompressionMethod() == 0;
+  }
+
+  /**
+   * Returns true if the entry is uncompressed in the old archive and compressed in the new archive.
+   * This method does not check whether or not the compression is reproducible. It is assumed that
+   * any compressed entries encountered are reproducibly compressed.
+   * @param oldEntry the entry in the old archive
+   * @param newEntry the entry in the new archive
+   * @return as described
+   */
+  private boolean uncompressedChangedToCompressed(
+      MinimalZipEntry oldEntry, MinimalZipEntry newEntry) {
+    return oldEntry.getCompressionMethod() == 0 && newEntry.getCompressionMethod() != 0;
+  }
+
+  /**
+   * Returns true if the entry is compressed in the old archive and uncompressed in the new archive.
+   * This method does not check whether or not the compression is reproducible because that
+   * information is irrelevant to this decision (it does not matter whether the compression in the
+   * old archive is reproducible or not, because that data does not need to be recompressed at patch
+   * apply time).
+   * @param oldEntry the entry in the old archive
+   * @param newEntry the entry in the new archive
+   * @return as described
+   */
+  private boolean compressedChangedToUncompressed(
+      MinimalZipEntry oldEntry, MinimalZipEntry newEntry) {
+    return newEntry.getCompressionMethod() == 0 && oldEntry.getCompressionMethod() != 0;
+  }
+
+  /**
+   * Checks if the compressed bytes in the specified entries have changed. No attempt is made to
+   * inflate, this method just examines the raw bytes that represent the content in the specified
+   * entries and returns true if they are different.
+   * @param oldEntry the entry in the old archive
+   * @param newEntry the entry in the new archive
+   * @return true as described above
+   * @throws IOException if unable to read
+   */
+  private boolean compressedBytesChanged(MinimalZipEntry oldEntry, MinimalZipEntry newEntry)
+      throws IOException {
+    if (oldEntry.getCompressedSize() != newEntry.getCompressedSize()) {
+      // Length is not the same, so content cannot match.
+      return true;
+    }
+    byte[] buffer = new byte[4096];
+    int numRead = 0;
+    try (RandomAccessFileInputStream oldRafis =
+            new RandomAccessFileInputStream(
+                oldFile, oldEntry.getFileOffsetOfCompressedData(), oldEntry.getCompressedSize());
+        RandomAccessFileInputStream newRafis =
+            new RandomAccessFileInputStream(
+                newFile, newEntry.getFileOffsetOfCompressedData(), newEntry.getCompressedSize());
+        MatchingOutputStream matcher = new MatchingOutputStream(oldRafis, 4096)) {
+      while ((numRead = newRafis.read(buffer)) >= 0) {
+        try {
+          matcher.write(buffer, 0, numRead);
+        } catch (MismatchException mismatched) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/QualifiedRecommendation.java b/generator/src/main/java/com/google/archivepatcher/generator/QualifiedRecommendation.java
new file mode 100644
index 0000000..1ae1574
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/QualifiedRecommendation.java
@@ -0,0 +1,153 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+/**
+ * A fully qualified recommendation, consisting of an {@link MinimalZipEntry} from the old file,
+ * a {@link MinimalZipEntry} from the new file, a {@link Recommendation} for how to proceed and a
+ * {@link RecommendationReason} for that recommendation.
+ */
+public class QualifiedRecommendation {
+  /**
+   * The entry in the old file.
+   */
+  private final MinimalZipEntry oldEntry;
+
+  /**
+   * The entry in the new file.
+   */
+  private final MinimalZipEntry newEntry;
+
+  /**
+   * The recommendation for how to proceed on the pair of entries.
+   */
+  private final Recommendation recommendation;
+
+  /**
+   * The reason for the recommendation.
+   */
+  private final RecommendationReason reason;
+
+  /**
+   * Construct a new qualified recommendation with the specified data.
+   * @param oldEntry the entry in the old file
+   * @param newEntry the entry in the new file
+   * @param recommendation the recommendation for this tuple of entries
+   * @param reason the reason for the recommendation
+   */
+  public QualifiedRecommendation(
+      MinimalZipEntry oldEntry,
+      MinimalZipEntry newEntry,
+      Recommendation recommendation,
+      RecommendationReason reason) {
+    super();
+    this.oldEntry = oldEntry;
+    this.newEntry = newEntry;
+    this.recommendation = recommendation;
+    this.reason = reason;
+  }
+
+  /**
+   * Returns the entry in the old file.
+   * @return as described
+   */
+  public MinimalZipEntry getOldEntry() {
+    return oldEntry;
+  }
+
+  /**
+   * Returns the entry in the new file.
+   * @return as described
+   */
+  public MinimalZipEntry getNewEntry() {
+    return newEntry;
+  }
+
+  /**
+   * Returns the recommendation for how to proceed for this tuple of entries.
+   * @return as described
+   */
+  public Recommendation getRecommendation() {
+    return recommendation;
+  }
+
+  /**
+   * Returns the reason for the recommendation.
+   * @return as described
+   */
+  public RecommendationReason getReason() {
+    return reason;
+  }
+
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + ((newEntry == null) ? 0 : newEntry.hashCode());
+    result = prime * result + ((oldEntry == null) ? 0 : oldEntry.hashCode());
+    result = prime * result + ((reason == null) ? 0 : reason.hashCode());
+    result = prime * result + ((recommendation == null) ? 0 : recommendation.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) {
+      return false;
+    }
+    QualifiedRecommendation other = (QualifiedRecommendation) obj;
+    if (newEntry == null) {
+      if (other.newEntry != null) {
+        return false;
+      }
+    } else if (!newEntry.equals(other.newEntry)) {
+      return false;
+    }
+    if (oldEntry == null) {
+      if (other.oldEntry != null) {
+        return false;
+      }
+    } else if (!oldEntry.equals(other.oldEntry)) {
+      return false;
+    }
+    if (reason != other.reason) {
+      return false;
+    }
+    if (recommendation != other.recommendation) {
+      return false;
+    }
+    return true;
+  }
+
+  @Override
+  public String toString() {
+    return "QualifiedRecommendation [oldEntry="
+        + oldEntry.getFileName()
+        + ", newEntry="
+        + newEntry.getFileName()
+        + ", recommendation="
+        + recommendation
+        + ", reason="
+        + reason
+        + "]";
+  }
+
+}
\ No newline at end of file
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/Recommendation.java b/generator/src/main/java/com/google/archivepatcher/generator/Recommendation.java
new file mode 100644
index 0000000..2197ef9
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/Recommendation.java
@@ -0,0 +1,61 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+/**
+ * Recommendations for how to uncompress entries in old and new archives.
+ */
+public enum Recommendation {
+
+  /**
+   * Uncompress only the old entry.
+   */
+  UNCOMPRESS_OLD(true, false),
+
+  /**
+   * Uncompress only the new entry.
+   */
+  UNCOMPRESS_NEW(false, true),
+
+  /**
+   * Uncompress both the old and new entries.
+   */
+  UNCOMPRESS_BOTH(true, true),
+
+  /**
+   * Uncompress neither entry.
+   */
+  UNCOMPRESS_NEITHER(false, false);
+
+  /**
+   * True if the old entry should be uncompressed.
+   */
+  public final boolean uncompressOldEntry;
+
+  /**
+   * True if the new entry should be uncompressed.
+   */
+  public final boolean uncompressNewEntry;
+
+  /**
+   * Constructs a new recommendation with the specified behaviors.
+   * @param uncompressOldEntry true if the old entry should be uncompressed
+   * @param uncompressNewEntry true if the new entry should be uncompressed
+   */
+  private Recommendation(boolean uncompressOldEntry, boolean uncompressNewEntry) {
+    this.uncompressOldEntry = uncompressOldEntry;
+    this.uncompressNewEntry = uncompressNewEntry;
+  }
+}
\ No newline at end of file
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/RecommendationModifier.java b/generator/src/main/java/com/google/archivepatcher/generator/RecommendationModifier.java
new file mode 100644
index 0000000..364cf63
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/RecommendationModifier.java
@@ -0,0 +1,39 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import java.io.File;
+import java.util.List;
+
+/**
+ * Provides a mechanism to review and possibly modify the {@link QualifiedRecommendation}s that will
+ * be used to derive a {@link PreDiffPlan}.
+ */
+public interface RecommendationModifier {
+  /**
+   * Given a list of {@link QualifiedRecommendation} objects, returns a list of the same type that
+   * has been arbitrarily adjusted as desired by the implementation. Implementations must return a
+   * list of recommendations that contains the same tuples of (oldEntry, newEntry) but may change
+   * the results of {@link QualifiedRecommendation#getRecommendation()} and {@link
+   * QualifiedRecommendation#getReason()} to any sane values.
+   *
+   * @param oldFile the old file that is being diffed
+   * @param newFile the new file that is being diffed
+   * @param originalRecommendations the original recommendations
+   * @return the updated list of recommendations
+   */
+  public List<QualifiedRecommendation> getModifiedRecommendations(
+      File oldFile, File newFile, List<QualifiedRecommendation> originalRecommendations);
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/RecommendationReason.java b/generator/src/main/java/com/google/archivepatcher/generator/RecommendationReason.java
new file mode 100644
index 0000000..664944b
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/RecommendationReason.java
@@ -0,0 +1,57 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+/**
+ * Reasons for a corresponding {@link Recommendation}.
+ */
+public enum RecommendationReason {
+  /**
+   * The entry in the new file is compressed in a way that cannot be reliably reproduced (or one of
+   * the entries is compressed using something other than deflate, but this is very uncommon).
+   */
+  UNSUITABLE,
+
+  /**
+   * Both the old and new entries are already uncompressed.
+   */
+  BOTH_ENTRIES_UNCOMPRESSED,
+
+  /**
+   * An entry that was uncompressed in the old file is compressed in the new file.
+   */
+  UNCOMPRESSED_CHANGED_TO_COMPRESSED,
+
+  /**
+   * An entry that was compressed in the old file is uncompressed in the new file.
+   */
+  COMPRESSED_CHANGED_TO_UNCOMPRESSED,
+
+  /**
+   * The compressed bytes in the old file do not match the compressed bytes in the new file.
+   */
+  COMPRESSED_BYTES_CHANGED,
+
+  /** The compressed bytes in the old file are identical to the compressed bytes in the new file. */
+  COMPRESSED_BYTES_IDENTICAL,
+
+  /**
+   * A resource constraint prohibits touching the old entry, the new entry, or both. For example,
+   * there may be a limit on the total amount of temp space that will be available for applying a
+   * patch or a limit on the total amount of CPU time that can be expended on recompression when
+   * applying a patch, etc.
+   */
+  RESOURCE_CONSTRAINED;
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/TempFileHolder.java b/generator/src/main/java/com/google/archivepatcher/generator/TempFileHolder.java
new file mode 100644
index 0000000..ff93f30
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/TempFileHolder.java
@@ -0,0 +1,46 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.IOException;
+
+/**
+ * A closeable container for a temp file that deletes itself on {@link #close()}. This is convenient
+ * for try-with-resources constructs that need to use temp files in scope.
+ */
+public class TempFileHolder implements Closeable {
+  /**
+   * The file that is wrapped by this holder.
+   */
+  public final File file;
+
+  /**
+   * Create a new temp file and wrap it in an instance of this class. The file is automatically
+   * scheduled for deletion on JVM termination, so it is a serious error to rely on this file path
+   * being a durable artifact.
+   * @throws IOException if unable to create the file
+   */
+  public TempFileHolder() throws IOException {
+    file = File.createTempFile("archive_patcher", "tmp");
+    file.deleteOnExit();
+  }
+
+  @Override
+  public void close() throws IOException {
+    file.delete();
+  }
+}
\ No newline at end of file
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/TotalRecompressionLimiter.java b/generator/src/main/java/com/google/archivepatcher/generator/TotalRecompressionLimiter.java
new file mode 100644
index 0000000..3b42da2
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/TotalRecompressionLimiter.java
@@ -0,0 +1,127 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+
+/**
+ * Limits the total amount of recompression to be performed as part of a patch via the {@link
+ * RecommendationModifier} interface.
+ *
+ * <p>This class is useful for helping to establish an upper bound on the amount of work that needs
+ * to be done to apply a patch. For example, if the patch is to be applied on a device that can
+ * recompress at about 100K/sec and the desire is to keep recompression time to 10 seconds or less,
+ * an upper bound of 1000K would be appropriate.
+ *
+ * <p>Please note that there are many factors involved in the total patch-apply time including, but
+ * not limited to, things like the I/O speed of the device applying the patch and the time that is
+ * required to apply the delta to the uncompressed content prior to recompressing.
+ *
+ * <p>This class implements the following algorithm:
+ *
+ * <ol>
+ *   <li>Identify all of the {@link QualifiedRecommendation}s that have {@link
+ *       Recommendation#uncompressNewEntry} set to <code>true</code>. These identify all the entries
+ *       that have changed and that require recompression.
+ *   <li>Sort those {@link QualifiedRecommendation}s in order of decreasing uncompressed size.
+ *   <li>Iterate over the list in order. For each entry, if the uncompressed size is less than the
+ *       number of uncompressed bytes remaining before hitting the cap, retain it; else, discard it.
+ *   <li>Return the resulting list of the retained entries. Note that the order of this list may not
+ *       be the same as the input order (i.e., it has been sorted in order of decreasing compressed
+ *       size).
+ * </ol>
+ *
+ * This algorithm attempts to preserve the largest changed resources needing recompression, assuming
+ * that these are the most likely to be delta-friendly and therefore represent the best patch size
+ * savings. This may not be true in <em>all cases</em> but is likely in practice.
+ *
+ * <p>Please note that this algorithm does <em>not</em> limit the size of the temporary files needed
+ * to apply a patch. In particular it does <em>not</em> limit the size of the "delta-friendly old
+ * blob" that is generated during the patch-apply step, since that blob may contain an arbitrary
+ * amount of compressed resources that are not considered here. To limit the size of the
+ * delta-friendly old blob, use a {@link DeltaFriendlyOldBlobSizeLimiter}.
+ */
+public class TotalRecompressionLimiter implements RecommendationModifier {
+
+  /** The maximum number of bytes to allow to be recompressed. */
+  private final long maxBytesToRecompress;
+
+  private static final Comparator<QualifiedRecommendation> COMPARATOR =
+      new UncompressedNewEntrySizeComparator();
+
+  /**
+   * Create a new limiter that will restrict the total number of bytes that need to be recompressed
+   * to the specified quantity.
+   *
+   * @param maxBytesToRecompress the maximum number of bytes to allow to be recompressed; must be
+   *     greater than or equal to zero
+   */
+  public TotalRecompressionLimiter(long maxBytesToRecompress) {
+    if (maxBytesToRecompress < 0) {
+      throw new IllegalArgumentException(
+          "maxBytesToRecompress must be non-negative: " + maxBytesToRecompress);
+    }
+    this.maxBytesToRecompress = maxBytesToRecompress;
+  }
+
+  @Override
+  public List<QualifiedRecommendation> getModifiedRecommendations(
+      File oldFile, File newFile, List<QualifiedRecommendation> originalRecommendations) {
+
+    List<QualifiedRecommendation> sorted =
+        new ArrayList<QualifiedRecommendation>(originalRecommendations);
+    Collections.sort(sorted, COMPARATOR);
+    Collections.reverse(sorted);
+
+    List<QualifiedRecommendation> result = new ArrayList<>(sorted.size());
+    long recompressibleBytesRemaining = maxBytesToRecompress;
+    for (QualifiedRecommendation originalRecommendation : sorted) {
+      if (!originalRecommendation.getRecommendation().uncompressNewEntry) {
+        // Keep the original recommendation, no need to track size since it won't be uncompressed.
+        result.add(originalRecommendation);
+      } else {
+        long bytesToRecompress = originalRecommendation.getNewEntry().getUncompressedSize();
+        if (recompressibleBytesRemaining - bytesToRecompress >= 0) {
+          // Keep the original recommendation, but also subtract from the remaining space.
+          result.add(originalRecommendation);
+          recompressibleBytesRemaining -= bytesToRecompress;
+        } else {
+          // Update the recommendation to prevent uncompressing this tuple.
+          result.add(
+              new QualifiedRecommendation(
+                  originalRecommendation.getOldEntry(),
+                  originalRecommendation.getNewEntry(),
+                  Recommendation.UNCOMPRESS_NEITHER,
+                  RecommendationReason.RESOURCE_CONSTRAINED));
+        }
+      }
+    }
+    return result;
+  }
+
+  /** Helper class implementing the sort order described in the class documentation. */
+  private static class UncompressedNewEntrySizeComparator
+      implements Comparator<QualifiedRecommendation> {
+    @Override
+    public int compare(QualifiedRecommendation qr1, QualifiedRecommendation qr2) {
+      return Long.compare(
+          qr1.getNewEntry().getUncompressedSize(), qr2.getNewEntry().getUncompressedSize());
+    }
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/BsDiff.java b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/BsDiff.java
new file mode 100644
index 0000000..d425221
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/BsDiff.java
@@ -0,0 +1,151 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator.bsdiff;
+
+import java.io.IOException;
+
+/**
+ * A Java implementation of the "bsdiff" algorithm based on the BSD-2 licensed source code available
+ * here: https://github.com/mendsley/bsdiff.
+ * <p>
+ * A canonical description of the bsdiff algorithm can be found at the following URL:
+ * http://www.daemonology.net/bsdiff/
+ * <p>
+ * Since Java only supports "int" for array indexing, the maximum size of files that this
+ * implementation can handle is 2^31, or 2 gibibytes.
+ */
+class BsDiff {
+
+  /**
+   * Search the specified arrays for a contiguous sequence of identical bytes, starting at the
+   * specified "start" offsets and scanning as far ahead as possible till one or the other of the
+   * arrays ends or a non-matching byte is found. Returns the length of the matching sequence of
+   * bytes, which may be zero.
+   *
+   * @param oldData the old data to scan
+   * @param oldStart the position in the old data at which to start the scan
+   * @param newData the new data to scan
+   * @param newStart the position in the new data at which to start the scan
+   * @return the number of matching bytes in the two arrays starting at the specified indices; zero
+   * if the first byte fails to match
+   */
+  // Visible for testing only
+  static int lengthOfMatch(
+      final RandomAccessObject oldData,
+      final int oldStart,
+      final RandomAccessObject newData,
+      final int newStart)
+      throws IOException {
+    final int max = Math.min((int) oldData.length() - oldStart, (int) newData.length() - newStart);
+    if (max > 0) {
+      // If max is 0, it's sometimes possible for this seek to seek to length + 1 and throw an
+      // exception unnecessarily.
+      oldData.seek(oldStart);
+      newData.seek(newStart);
+      for (int offset = 0; offset < max; offset++) {
+        if (oldData.readByte() != newData.readByte()) {
+          return offset;
+        }
+      }
+    }
+
+    return max;
+  }
+
+  // Visible for testing only
+  static Match searchForMatchBaseCase(
+      final RandomAccessObject groupArray,
+      final RandomAccessObject oldData,
+      final RandomAccessObject newData,
+      final int newStart,
+      final int oldDataRangeStartA,
+      final int oldDataRangeStartB)
+      throws IOException {
+    // Located the start of a matching range (no further search required) or the size of the range
+    // has shrunk to one byte (no further search possible).
+    groupArray.seekToIntAligned(oldDataRangeStartA);
+    final int groupArrayOldDataRangeStartA = groupArray.readInt();
+    final int lengthOfMatchA =
+        lengthOfMatch(oldData, groupArrayOldDataRangeStartA, newData, newStart);
+    groupArray.seekToIntAligned(oldDataRangeStartB);
+    final int groupArrayOldDataRangeStartB = groupArray.readInt();
+    final int lengthOfMatchB =
+        lengthOfMatch(oldData, groupArrayOldDataRangeStartB, newData, newStart);
+
+    if (lengthOfMatchA > lengthOfMatchB) {
+      return Match.of(groupArrayOldDataRangeStartA, lengthOfMatchA);
+    }
+
+    return Match.of(groupArrayOldDataRangeStartB, lengthOfMatchB);
+  }
+
+  /**
+   * Locates the run of bytes in |oldData| which matches the longest prefix of
+   * newData[newStart ... newData.length - 1].
+   * @param groupArray
+   * @param oldData the old data to scan
+   * @param newData the new data to scan
+   * @param newStart the position of the first byte in newData to consider
+   * @param oldDataRangeStartA
+   * @param oldDataRangeStartB
+   * @return a Match containing the length of the matching range, and the position at which the
+   * matching range begins.
+   */
+  // Visible for testing only
+  static Match searchForMatch(
+      final RandomAccessObject groupArray,
+      final RandomAccessObject oldData,
+      final RandomAccessObject newData,
+      final int newStart,
+      final int oldDataRangeStartA,
+      final int oldDataRangeStartB)
+      throws IOException {
+    if (oldDataRangeStartB - oldDataRangeStartA < 2) {
+      return searchForMatchBaseCase(
+          groupArray, oldData, newData, newStart, oldDataRangeStartA, oldDataRangeStartB);
+    }
+
+    // Cut range in half and search again
+    final int rangeLength = oldDataRangeStartB - oldDataRangeStartA;
+    final int pivot = oldDataRangeStartA + (rangeLength / 2);
+    groupArray.seekToIntAligned(pivot);
+    final int groupArrayPivot = groupArray.readInt();
+    if (BsUtil.lexicographicalCompare(
+            oldData,
+            groupArrayPivot,
+            (int) oldData.length() - groupArrayPivot,
+            newData,
+            newStart,
+            (int) newData.length() - newStart)
+        < 0) {
+      return searchForMatch(groupArray, oldData, newData, newStart, pivot, oldDataRangeStartB);
+    }
+    return searchForMatch(groupArray, oldData, newData, newStart, oldDataRangeStartA, pivot);
+  }
+
+  static class Match {
+    final int start;
+    final int length;
+
+    static Match of(int start, int length) {
+      return new Match(start, length);
+    }
+
+    private Match(int start, int length) {
+      this.start = start;
+      this.length = length;
+    }
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/BsDiffDeltaGenerator.java b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/BsDiffDeltaGenerator.java
new file mode 100644
index 0000000..1f0c79b
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/BsDiffDeltaGenerator.java
@@ -0,0 +1,37 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator.bsdiff;
+
+import com.google.archivepatcher.generator.DeltaGenerator;
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * An implementation of {@link DeltaGenerator} that uses {@link BsDiffPatchWriter} to write a
+ * bsdiff patch that represents the delta between given inputs.
+ */
+public class BsDiffDeltaGenerator implements DeltaGenerator {
+  /**
+   * The minimum match length to use for bsdiff.
+   */
+  private static final int MATCH_LENGTH_BYTES = 16;
+
+  @Override
+  public void generateDelta(File oldBlob, File newBlob, OutputStream deltaOut)
+      throws IOException, InterruptedException {
+    BsDiffPatchWriter.generatePatch(oldBlob, newBlob, deltaOut, MATCH_LENGTH_BYTES);
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/BsDiffMatcher.java b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/BsDiffMatcher.java
new file mode 100644
index 0000000..e456823
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/BsDiffMatcher.java
@@ -0,0 +1,167 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator.bsdiff;
+
+import java.io.IOException;
+
+/**
+ * Implementation of matcher used by BsDiff. Exact matches between newData[a ... a + len - 1]
+ * and oldData[b ... b + len - 1] are valid if:
+ * <ul>
+ * <li>|len| > mMinimumMatchLength</li>
+ * <li>The number of matches between newData[a ... a + len - 1] and
+ *     oldData[previous_b ... previous_b + len - 1] < |len| - mMinimumMatchLength where
+ *     |previous_b| is the |b| value of the previous match if there was one and zero otherwise.</li>
+ * </ul>
+ */
+class BsDiffMatcher implements Matcher {
+  private final RandomAccessObject mOldData;
+  private final RandomAccessObject mNewData;
+
+  /**
+   * Contains order of the sorted suffixes of |oldData|. The element at mGroupArray[i] contains the
+   * position of oldData[i ... oldData.length - 1] in the sorted list of suffixes of |oldData|.
+   */
+  private final RandomAccessObject mGroupArray;
+
+  /**
+   * The index in |oldData| of the first byte of the match. Zero if no matches have been found yet.
+   */
+  private int mOldPos;
+
+  /**
+   * The index in |newData| of the first byte of the match. Zero if no matches have been found yet.
+   * The next match will be searched starting at |mNewPos| + |mMatchLen|.
+   */
+  private int mNewPos;
+
+  /**
+   * Minimum match length in bytes.
+   */
+  private final int mMinimumMatchLength;
+
+  /**
+   * A limit on how many total match lengths encountered, to exit the match extension loop in next()
+   * and prevent O(n^2) behavior.
+   */
+  private final long mTotalMatchLenBudget = 1L << 26;  // ~64 million.
+
+  /**
+   * The number of bytes, |n|, which match between newData[mNewPos ... mNewPos + n] and
+   * oldData[mOldPos ... mOldPos + n].
+   */
+  private int mMatchLen;
+
+  /**
+   * Create a standard BsDiffMatcher.
+   * @param minimumMatchLength the minimum "match" (in bytes) for BsDiff to consider between the
+   * oldData and newData. This can have a significant effect on both the generated patch size and
+   * the amount of time and memory required to apply the patch.
+   */
+  BsDiffMatcher(
+      RandomAccessObject oldData,
+      RandomAccessObject newData,
+      RandomAccessObject groupArray,
+      int minimumMatchLength) {
+    mOldData = oldData;
+    mNewData = newData;
+    mGroupArray = groupArray;
+    mOldPos = 0;
+    mMinimumMatchLength = minimumMatchLength;
+  }
+
+  @Override
+  public Matcher.NextMatch next() throws IOException, InterruptedException {
+    RandomAccessObject oldData = mOldData;
+    RandomAccessObject newData = mNewData;
+
+    // The offset between between the indices in |oldData| and |newData|
+    // of the previous match.
+    int previousOldOffset = mOldPos - mNewPos;
+
+    // Look for a new match starting from the end of the previous match.
+    mNewPos += mMatchLen;
+
+    // The number of matching bytes in the forward extension of the previous match:
+    // oldData[mNewPos + previousOldOffset ... mNewPos + previousOldOffset + mMatchLen - 1]
+    // and newData[mNewPos ... mNewPos + mMatchLen - 1].
+    int numMatches = 0;
+
+    // The size of the range for which |numMatches| has been computed.
+    int matchesCacheSize = 0;
+
+    // Sum over all match lengths encountered, to exit loop if we take too long to compute.
+    long totalMatchLen = 0;
+
+    while (mNewPos < newData.length()) {
+      if (Thread.interrupted()) {
+        throw new InterruptedException();
+      }
+      BsDiff.Match match =
+          BsDiff.searchForMatch(mGroupArray, oldData, newData, mNewPos, 0, (int) oldData.length());
+      mOldPos = match.start;
+      mMatchLen = match.length;
+      totalMatchLen += mMatchLen;
+
+      // Update |numMatches| for the new value of |matchLen|.
+      for (; matchesCacheSize < mMatchLen; ++matchesCacheSize) {
+        int oldIndex = mNewPos + previousOldOffset + matchesCacheSize;
+        int newIndex = mNewPos + matchesCacheSize;
+        if (oldIndex < oldData.length()) {
+          oldData.seek(oldIndex);
+          newData.seek(newIndex);
+
+          if (oldData.readByte() == newData.readByte()) {
+            ++numMatches;
+          }
+        }
+      }
+
+      // Also return if we've been trying to extend a large match for a long time.
+      if (mMatchLen > numMatches + mMinimumMatchLength || totalMatchLen >= mTotalMatchLenBudget) {
+        return Matcher.NextMatch.of(true, mOldPos, mNewPos);
+      }
+
+      if (mMatchLen == 0) {
+        ++mNewPos;
+      } else if (mMatchLen == numMatches) {
+        // This seems to be an optimization because it is unlikely to find a valid match in the
+        // range mNewPos = [ mNewPos ... mNewPos + mMatchLen - 1 ] especially for large values of
+        // |mMatchLen|.
+        mNewPos += numMatches;
+        numMatches = 0;
+        matchesCacheSize = 0;
+      } else {
+        // Update |numMatches| for the value of |mNewPos| in the next iteration of the loop. In the
+        // next iteration of the loop, the new value of |numMatches| will be at least
+        // |numMatches - 1| because
+        // oldData[mNewPos + previousOldOffset + 1 ... mNewPos + previousOldOffset + mMatchLen - 1]
+        // matches newData[mNewPos + 1 ... mNewPos + mMatchLen - 1].
+        if (mNewPos + previousOldOffset < oldData.length()) {
+          oldData.seek(mNewPos + previousOldOffset);
+          newData.seek(mNewPos);
+
+          if (oldData.readByte() == newData.readByte()) {
+            --numMatches;
+          }
+        }
+        ++mNewPos;
+        --matchesCacheSize;
+      }
+    }
+
+    return Matcher.NextMatch.of(false, 0, 0);
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/BsDiffPatchWriter.java b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/BsDiffPatchWriter.java
new file mode 100644
index 0000000..43e7898
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/BsDiffPatchWriter.java
@@ -0,0 +1,391 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator.bsdiff;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.nio.charset.StandardCharsets;
+
+// TODO(andrewhayden) clean up the various generatePatch(...) methods, there are too many.
+
+/**
+ * A helper class that handles the main BsDiff I/O and patch generation, by calling into the main
+ * algorithm implementation in {@link BsDiff}
+ */
+public class BsDiffPatchWriter {
+
+  static final int DEFAULT_MINIMUM_MATCH_LENGTH = 16;
+
+  /**
+   * Write a patch entry.
+   *
+   * @param newData
+   * @param oldData
+   * @param newPosition the first byte in |newData| to write either raw or as a diff against
+   *     |oldData|.
+   * @param oldPosition the first byte in |oldData| to diff against |newData|. Ignored if
+   *     |diffLength| is empty.
+   * @param diffLength the number of bytes to diff between |newData| and |oldData| starting at
+   *     |newPosition| and |oldPosition| respectively.
+   * @param extraLength the number of bytes from |newData| to write starting at |newPosition +
+   *     diffLength|.
+   * @param oldPositionOffsetForNextEntry the offset between |oldPosition| for the next entry and
+   *     |oldPosition| + |diffLength| for this entry.
+   * @param outputStream the output stream to write the patch entry to.
+   * @throws IOException if unable to read or write data
+   */
+  private static void writeEntry(
+      RandomAccessObject newData,
+      RandomAccessObject oldData,
+      int newPosition,
+      int oldPosition,
+      int diffLength,
+      int extraLength,
+      int oldPositionOffsetForNextEntry,
+      OutputStream outputStream)
+      throws IOException {
+    // Write control data
+    BsUtil.writeFormattedLong(diffLength, outputStream);
+    BsUtil.writeFormattedLong(extraLength, outputStream);
+    BsUtil.writeFormattedLong(oldPositionOffsetForNextEntry, outputStream);
+
+    newData.seek(newPosition);
+    oldData.seek(oldPosition);
+    // Write diff data
+    for (int i = 0; i < diffLength; ++i) {
+      // TODO(hartmanng): test using a small buffer to insulate read() calls (and write() for that
+      // matter).
+      outputStream.write(newData.readUnsignedByte() - oldData.readUnsignedByte());
+    }
+
+    if (extraLength > 0) {
+      // This seek will throw an IOException sometimes, if we try to seek to the byte after
+      // the end of the RandomAccessObject.
+      newData.seek(newPosition + diffLength);
+      // Write extra data
+      for (int i = 0; i < extraLength; ++i) {
+        // TODO(hartmanng): same as above - test buffering readByte().
+        outputStream.write(newData.readByte());
+      }
+    }
+  }
+
+  /**
+   * Generate a BsDiff patch given a Matcher.
+   *
+   * @param oldData the old blob
+   * @param newData the new blob
+   * @param matcher a Matcher to find binary matches between oldData and newData
+   * @param outputStream the outputStream for the new generated patch
+   * @throws IOException if unable to read or write data
+   * @throws InterruptedException if any thread interrupts this thread
+   */
+  // Visible for testing only
+  static void generatePatchWithMatcher(
+      RandomAccessObject oldData,
+      RandomAccessObject newData,
+      Matcher matcher,
+      OutputStream outputStream)
+      throws IOException, InterruptedException {
+    // Compute the differences, writing ctrl as we go
+    int lastNewPosition = 0;
+    int lastOldPosition = 0;
+
+    int newPosition = 0;
+    int oldPosition = 0;
+    while (newPosition < newData.length()) {
+      if (Thread.interrupted()) {
+        throw new InterruptedException();
+      }
+      Matcher.NextMatch nextMatch = matcher.next();
+      if (nextMatch.didFindMatch) {
+        newPosition = nextMatch.newPosition;
+        oldPosition = nextMatch.oldPosition;
+      } else {
+        newPosition = (int) newData.length();
+      }
+
+      // Extend the current match (|newPosition|, |oldPosition|) backward such that 50% of the bytes
+      // match. We have written diff / extra data up till |lastNewPosition| so we cannot extend
+      // further back than |lastNewPosition|.
+      int backwardExtension = 0;
+      if (newPosition < newData.length()) {
+        int score = 0;
+        int bestScore = 0;
+        for (int i = 1; newPosition - i >= lastNewPosition && oldPosition >= i; ++i) {
+          oldData.seek(oldPosition - i);
+          newData.seek(newPosition - i);
+          if (oldData.readByte() == newData.readByte()) {
+            ++score;
+          } else {
+            --score;
+          }
+
+          if (score > bestScore) {
+            bestScore = score;
+            backwardExtension = i;
+          }
+        }
+      }
+
+      // Extend the previous match (|lastNewPosition|, |lastOldPosition|) forward such that 50% of
+      // the bytes match. (|lastNewPosition|, |lastOldPosition|) were extended backward in the
+      // previous iteration of the loop.
+      int forwardExtension = 0;
+      {
+        int score = 0;
+        int bestScore = 0;
+        oldData.seek(lastOldPosition);
+        newData.seek(lastNewPosition);
+        for (int i = 0;
+            lastNewPosition + i < newPosition && lastOldPosition + i < oldData.length();
+            ++i) {
+          if (oldData.readByte() == newData.readByte()) {
+            ++score;
+          } else {
+            --score;
+          }
+          if (score > bestScore) {
+            bestScore = score;
+            forwardExtension = i + 1;
+          }
+        }
+      }
+
+      // Adjust |backwardExtension| and |forwardExtension| such that the extended matches do
+      // not intersect in |newData|. They can intersect in |oldData|.
+      int overlap = (lastNewPosition + forwardExtension) - (newPosition - backwardExtension);
+      if (overlap > 0) {
+        int score = 0;
+        int bestScore = 0;
+        int backwardExtensionDecrement = 0;
+        for (int i = 0; i < overlap; ++i) {
+          newData.seek(lastNewPosition + forwardExtension - overlap + i);
+          oldData.seek(lastOldPosition + forwardExtension - overlap + i);
+          if (newData.readByte() == oldData.readByte()) {
+            ++score;
+          }
+
+          newData.seek(newPosition - backwardExtension + i);
+          oldData.seek(oldPosition - backwardExtension + i);
+          if (newData.readByte() == oldData.readByte()) {
+            --score;
+          }
+          if (score > bestScore) {
+            bestScore = score;
+            backwardExtensionDecrement = i + 1;
+          }
+        }
+        forwardExtension -= overlap - backwardExtensionDecrement;
+        backwardExtension -= backwardExtensionDecrement;
+      }
+
+      // Write an entry with:
+      // - The diff between |newData| and |oldData| for the previous extended match:
+      //   oldData[lastOldPosition ... lastOldPosition + forwardExtension - 1] and
+      //   newData[lastNewPosition ... lastNewPosition + forwardExtension - 1].
+      // - The bytes in |newData| between |lastNewPosition| and |newPosition| which are part of
+      //   neither the previous extended match or the new extended match:
+      //   newData[lastNewPosition + forwardExtension ... newPosition - backwardExtension - 1]
+
+      int oldPositionOffset = 0;
+      if (newPosition < newData.length()) {
+        // The offset from the byte after the last byte of the previous match in |newData| to the
+        // first byte of the new match in |oldData|.
+        oldPositionOffset =
+            (oldPosition - backwardExtension) - (lastOldPosition + forwardExtension);
+      }
+
+      // The number of bytes in |newData| between |lastNewPosition| and |newPosition| which are part
+      // of neither the previous extended match or the new extended match.
+      int newNoMatchLength =
+          (newPosition - backwardExtension) - (lastNewPosition + forwardExtension);
+
+      writeEntry(
+          newData,
+          oldData,
+          lastNewPosition,
+          lastOldPosition,
+          forwardExtension,
+          newNoMatchLength,
+          oldPositionOffset,
+          outputStream);
+
+      lastNewPosition = newPosition - backwardExtension;
+      lastOldPosition = oldPosition - backwardExtension;
+    }
+  }
+
+  /**
+   * Generate a diff between the old data and the new, writing to the specified stream. Uses {@link
+   * #DEFAULT_MINIMUM_MATCH_LENGTH} as the match length.
+   *
+   * @param oldData the old data
+   * @param newData the new data
+   * @param outputStream where output should be written
+   * @param randomAccessObjectFactory factory to create auxiliary storage during BsDiff
+   * @throws IOException if unable to read or write data
+   * @throws InterruptedException if any thread interrupts this thread
+   */
+  public static void generatePatch(
+      final RandomAccessObject oldData,
+      final RandomAccessObject newData,
+      final OutputStream outputStream,
+      final RandomAccessObjectFactory randomAccessObjectFactory)
+      throws IOException, InterruptedException {
+    generatePatch(
+        oldData, newData, outputStream, randomAccessObjectFactory, DEFAULT_MINIMUM_MATCH_LENGTH);
+  }
+
+  /**
+   * Generate a diff between the old data and the new, writing to the specified stream. Uses
+   * in-memory byte array storage for ancillary allocations and {@link
+   * #DEFAULT_MINIMUM_MATCH_LENGTH} as the match length.
+   *
+   * @param oldData the old data
+   * @param newData the new data
+   * @param outputStream where output should be written
+   * @throws IOException if unable to read or write data
+   * @throws InterruptedException if any thread interrupts this thread
+   */
+  public static void generatePatch(
+      final byte[] oldData, final byte[] newData, final OutputStream outputStream)
+      throws IOException, InterruptedException {
+    generatePatch(oldData, newData, outputStream, DEFAULT_MINIMUM_MATCH_LENGTH);
+  }
+
+  /**
+   * Generate a diff between the old data and the new, writing to the specified stream. Uses
+   * in-memory byte array storage for ancillary allocations.
+   *
+   * @param oldData the old data
+   * @param newData the new data
+   * @param outputStream where output should be written
+   * @param minimumMatchLength the minimum "match" (in bytes) for BsDiff to consider between the
+   *     oldData and newData. This can have a significant effect on both the generated patch size
+   *     and the amount of time and memory required to apply the patch.
+   * @throws IOException if unable to read or write data
+   * @throws InterruptedException if any thread interrupts this thread
+   */
+  public static void generatePatch(
+      final byte[] oldData,
+      final byte[] newData,
+      final OutputStream outputStream,
+      final int minimumMatchLength)
+      throws IOException, InterruptedException {
+    try (RandomAccessObject oldDataRAO =
+            new RandomAccessObject.RandomAccessByteArrayObject(oldData);
+        RandomAccessObject newDataRAO =
+            new RandomAccessObject.RandomAccessByteArrayObject(newData); ) {
+      generatePatch(
+          oldDataRAO,
+          newDataRAO,
+          outputStream,
+          new RandomAccessObjectFactory.RandomAccessByteArrayObjectFactory(),
+          minimumMatchLength);
+    }
+  }
+
+  /**
+   * Generate a diff between the old data and the new, writing to the specified stream. Uses
+   * file-based storage for ancillary operations and {@link #DEFAULT_MINIMUM_MATCH_LENGTH} as the
+   * match length.
+   *
+   * @param oldData a file containing the old data
+   * @param newData a file containing the new data
+   * @param outputStream where output should be written
+   * @throws IOException if unable to read or write data
+   * @throws InterruptedException if any thread interrupts this thread
+   */
+  public static void generatePatch(
+      final File oldData, final File newData, final OutputStream outputStream)
+      throws IOException, InterruptedException {
+    generatePatch(oldData, newData, outputStream, DEFAULT_MINIMUM_MATCH_LENGTH);
+  }
+
+  /**
+   * Generate a diff between the old data and the new, writing to the specified stream. Uses
+   * file-based storage for ancillary allocations.
+   *
+   * @param oldData a file containing the old data
+   * @param newData a file containing the new data
+   * @param outputStream where output should be written
+   * @param minimumMatchLength the minimum "match" (in bytes) for BsDiff to consider between the
+   *     oldData and newData. This can have a significant effect on both the generated patch size
+   *     and the amount of time and memory required to apply the patch.
+   * @throws IOException if unable to read or write data
+   * @throws InterruptedException if any thread interrupts this thread
+   */
+  public static void generatePatch(
+      final File oldData,
+      final File newData,
+      final OutputStream outputStream,
+      final int minimumMatchLength)
+      throws IOException, InterruptedException {
+    try (RandomAccessFile oldDataRAF = new RandomAccessFile(oldData, "r");
+        RandomAccessFile newDataRAF = new RandomAccessFile(newData, "r");
+        RandomAccessObject oldDataRAO =
+            new RandomAccessObject.RandomAccessMmapObject(oldDataRAF, "r");
+        RandomAccessObject newDataRAO =
+            new RandomAccessObject.RandomAccessMmapObject(newDataRAF, "r"); ) {
+      generatePatch(
+          oldDataRAO,
+          newDataRAO,
+          outputStream,
+          new RandomAccessObjectFactory.RandomAccessMmapObjectFactory("rw"),
+          minimumMatchLength);
+    }
+
+    // Due to a bug in the JVM (http://bugs.java.com/view_bug.do?bug_id=6417205), we need to call
+    // gc() and runFinalization() explicitly to get rid of any MappedByteBuffers we may have used
+    // during patch generation.
+    System.gc();
+    System.runFinalization();
+  }
+
+  /**
+   * Generate a diff between the old data and the new, writing to the specified stream.
+   *
+   * @param oldData the old data
+   * @param newData the new data
+   * @param outputStream where output should be written
+   * @param randomAccessObjectFactory factory to create auxiliary storage during BsDiff
+   * @param minimumMatchLength the minimum "match" (in bytes) for BsDiff to consider between the
+   *     oldData and newData. This can have a significant effect on both the generated patch size
+   *     and the amount of time and memory required to apply the patch.
+   * @throws IOException if unable to read or write data
+   * @throws InterruptedException if any thread interrupts this thread
+   */
+  public static void generatePatch(
+      final RandomAccessObject oldData,
+      final RandomAccessObject newData,
+      final OutputStream outputStream,
+      final RandomAccessObjectFactory randomAccessObjectFactory,
+      final int minimumMatchLength)
+      throws IOException, InterruptedException {
+    // Write header (signature + new file length)
+    outputStream.write("ENDSLEY/BSDIFF43".getBytes(StandardCharsets.US_ASCII));
+    BsUtil.writeFormattedLong(newData.length(), outputStream);
+
+    // Do the suffix search.
+    try (final RandomAccessObject groupArray =
+        new DivSuffixSorter(randomAccessObjectFactory).suffixSort(oldData)) {
+      BsDiffMatcher matcher = new BsDiffMatcher(oldData, newData, groupArray, minimumMatchLength);
+      generatePatchWithMatcher(oldData, newData, matcher, outputStream);
+    }
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/BsUtil.java b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/BsUtil.java
new file mode 100644
index 0000000..3631dcc
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/BsUtil.java
@@ -0,0 +1,106 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator.bsdiff;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+/**
+ * Utility functions to be shared between BsDiff and BsPatch.
+ */
+class BsUtil {
+    /**
+     * Mask to determine whether a long written by {@link #writeFormattedLong(long, OutputStream)}
+     * is negative.
+     */
+    private static final long NEGATIVE_MASK = 1L << 63;
+
+    /**
+     * Writes a 64-bit signed integer to the specified {@link OutputStream}. The least significant
+     * byte is written first and the most significant byte is written last.
+     * @param value the value to write
+     * @param outputStream the stream to write to
+     */
+    static void writeFormattedLong(final long value, OutputStream outputStream)
+      throws IOException {
+        long y = value;
+        if (y < 0) {
+            y = (-y) | NEGATIVE_MASK;
+        }
+
+        for (int i = 0; i < 8; ++i) {
+            outputStream.write((byte) (y & 0xff));
+            y >>>= 8;
+        }
+    }
+
+    /**
+     * Reads a 64-bit signed integer written by {@link #writeFormattedLong(long, OutputStream)} from
+     * the specified {@link InputStream}.
+     * @param inputStream the stream to read from
+     */
+    static long readFormattedLong(InputStream inputStream) throws IOException {
+        long result = 0;
+        for (int bitshift = 0; bitshift < 64; bitshift += 8) {
+            result |= ((long) inputStream.read()) << bitshift;
+        }
+
+        if ((result - NEGATIVE_MASK) > 0) {
+            result = (result & ~NEGATIVE_MASK) * -1;
+        }
+        return result;
+    }
+
+  /**
+   * Provides functional equivalent to C/C++ lexicographical_compare. Warning: this calls {@link
+   * RandomAccessObject#seek(long)}, so the internal state of the data objects will be modified.
+   *
+   * @param data1 first byte array
+   * @param start1 index in the first array at which to start comparing
+   * @param length1 length of first byte array
+   * @param data2 second byte array
+   * @param start2 index in the second array at which to start comparing
+   * @param length2 length of second byte array
+   * @return result of lexicographical compare: negative if the first difference has a lower value
+   *     in the first array, positive if the first difference has a lower value in the second array.
+   *     If both arrays compare equal until one of them ends, the shorter sequence is
+   *     lexicographically less than the longer one (i.e., it returns len(first array) -
+   *     len(second array)).
+   */
+  static int lexicographicalCompare(
+      final RandomAccessObject data1,
+      final int start1,
+      final int length1,
+      final RandomAccessObject data2,
+      final int start2,
+      final int length2)
+      throws IOException {
+    int bytesLeft = Math.min(length1, length2);
+
+    data1.seek(start1);
+    data2.seek(start2);
+    while (bytesLeft-- > 0) {
+      final int i1 = data1.readUnsignedByte();
+      final int i2 = data2.readUnsignedByte();
+
+      if (i1 != i2) {
+        return i1 - i2;
+      }
+    }
+
+    return length1 - length2;
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/DivSuffixSorter.java b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/DivSuffixSorter.java
new file mode 100644
index 0000000..35ed2fe
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/DivSuffixSorter.java
@@ -0,0 +1,2086 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Licensed under the MIT License. Text in LICENSE file.
+
+package com.google.archivepatcher.generator.bsdiff;
+
+import java.io.IOException;
+
+/**
+ * Taken from
+ * https://github.com/carrotsearch/jsuffixarrays/blob/master/src/main/java/org/jsuffixarrays/
+ * DivSufSort.java and refactored to support RandomAccessObject instead of just arrays.
+ *
+ * <p>Straightforward reimplementation of the divsufsort algorithm given in: <pre><code>
+ * Yuta Mori, Short description of improved two-stage suffix sorting
+ * algorithm, 2005.
+ * http://homepage3.nifty.com/wpage/software/itssort.txt
+ * </code></pre>
+ *
+ * <p>This implementation is basically a translation of the C version given by Yuta Mori:
+ * <tt>libdivsufsort-2.0.0, http://code.google.com/p/libdivsufsort/</tt>
+ */
+public final class DivSuffixSorter implements SuffixSorter {
+
+  // TODO(admo): Clean up the code, variable names and documentation of this class
+
+  private static final int ALPHABET_SIZE = 256;
+  private static final int BUCKET_A_SIZE = ALPHABET_SIZE;
+  private static final int BUCKET_B_SIZE = ALPHABET_SIZE * ALPHABET_SIZE;
+  private static final int SS_INSERTIONSORT_THRESHOLD = 8;
+  private static final int SS_BLOCKSIZE = 1024;
+  private static final int SS_MISORT_STACKSIZE = 16;
+  private static final int SS_SMERGE_STACKSIZE = 32;
+  private static final int TR_STACKSIZE = 64;
+  private static final int TR_INSERTIONSORT_THRESHOLD = 8;
+
+  private static final int[] SQQ_TABLE = {
+    0, 16, 22, 27, 32, 35, 39, 42, 45, 48, 50, 53, 55, 57, 59, 61, 64, 65, 67, 69, 71, 73, 75, 76,
+    78, 80, 81, 83, 84, 86, 87, 89, 90, 91, 93, 94, 96, 97, 98, 99, 101, 102, 103, 104, 106, 107,
+    108, 109, 110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 128,
+    128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 144, 145,
+    146, 147, 148, 149, 150, 150, 151, 152, 153, 154, 155, 155, 156, 157, 158, 159, 160, 160, 161,
+    162, 163, 163, 164, 165, 166, 167, 167, 168, 169, 170, 170, 171, 172, 173, 173, 174, 175, 176,
+    176, 177, 178, 178, 179, 180, 181, 181, 182, 183, 183, 184, 185, 185, 186, 187, 187, 188, 189,
+    189, 190, 191, 192, 192, 193, 193, 194, 195, 195, 196, 197, 197, 198, 199, 199, 200, 201, 201,
+    202, 203, 203, 204, 204, 205, 206, 206, 207, 208, 208, 209, 209, 210, 211, 211, 212, 212, 213,
+    214, 214, 215, 215, 216, 217, 217, 218, 218, 219, 219, 220, 221, 221, 222, 222, 223, 224, 224,
+    225, 225, 226, 226, 227, 227, 228, 229, 229, 230, 230, 231, 231, 232, 232, 233, 234, 234, 235,
+    235, 236, 236, 237, 237, 238, 238, 239, 240, 240, 241, 241, 242, 242, 243, 243, 244, 244, 245,
+    245, 246, 246, 247, 247, 248, 248, 249, 249, 250, 250, 251, 251, 252, 252, 253, 253, 254, 254,
+    255
+  };
+
+  private static final int[] LG_TABLE = {
+    -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+  };
+
+  /* fields */
+  private final RandomAccessObjectFactory randomAccessObjectFactory;
+
+  private RandomAccessObject suffixArray;
+  private RandomAccessObject input;
+
+  public DivSuffixSorter(RandomAccessObjectFactory randomAccessObjectFactory) {
+    this.randomAccessObjectFactory = randomAccessObjectFactory;
+  }
+
+  @Override
+  public RandomAccessObject suffixSort(RandomAccessObject input) throws IOException, InterruptedException {
+    if (4 * (input.length() + 1) >= Integer.MAX_VALUE) {
+      throw new IllegalArgumentException("Input too large (" + input.length() + " bytes)");
+    }
+    int length = (int) input.length();
+
+    RandomAccessObject suffixArray = randomAccessObjectFactory.create((length + 1) * 4);
+    suffixArray.seek(0);
+    suffixArray.writeInt(length);
+    this.suffixArray = suffixArray;
+
+    // Deal with small cases separately.
+    if (length == 0) {
+      return suffixArray;
+    } else if (length == 1) {
+      writeSuffixArray(0, 0);
+      return suffixArray;
+    }
+
+    this.input = input;
+    int[] bucketA = new int[BUCKET_A_SIZE];
+    int[] bucketB = new int[BUCKET_B_SIZE];
+    /* Suffixsort. */
+    int m = sortTypeBstar(bucketA, bucketB, length);
+    constructSuffixArray(bucketA, bucketB, length, m);
+    return suffixArray;
+  }
+
+  /**
+   * Constructs the suffix array by using the sorted order of type B* suffixes.
+   */
+  private final void constructSuffixArray(int[] bucketA, int[] bucketB, int n, int m)
+      throws IOException {
+    int i, j, k; // ptr
+    int s, c0, c1, c2;
+    // (_c1)])
+    if (0 < m) {
+      /*
+       * Construct the sorted order of type B suffixes by using the sorted order of
+       * type B suffixes.
+       */
+      for (c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
+        /* Scan the suffix array from right to left. */
+        for (i = bucketB[(c1) * ALPHABET_SIZE + (c1 + 1)], j = bucketA[c1 + 1] - 1, k = 0, c2 = -1;
+            i <= j;
+            --j) {
+          if (0 < (s = readSuffixArray(j))) {
+            writeSuffixArray(j, ~s);
+            c0 = readInput(--s);
+            if ((0 < s) && (readInput(s - 1) > c0)) {
+              s = ~s;
+            }
+            if (c0 != c2) {
+              if (0 <= c2) {
+                bucketB[(c1) * ALPHABET_SIZE + (c2)] = k;
+              }
+              k = bucketB[(c1) * ALPHABET_SIZE + (c2 = c0)];
+            }
+            writeSuffixArray(k--, s);
+          } else {
+            writeSuffixArray(j, ~s);
+          }
+        }
+      }
+    }
+
+    /*
+     * Construct the suffix array by using the sorted order of type B suffixes.
+     */
+    k = bucketA[c2 = readInput(n - 1)];
+    writeSuffixArray(k++, readInput(n - 2) < c2 ? ~(n - 1) : (n - 1));
+    /* Scan the suffix array from left to right. */
+    for (i = 0, j = n; i < j; ++i) {
+      if (0 < (s = readSuffixArray(i))) {
+        c0 = readInput(--s);
+        if ((s == 0) || (readInput(s - 1) < c0)) {
+          s = ~s;
+        }
+        if (c0 != c2) {
+          bucketA[c2] = k;
+          k = bucketA[c2 = c0];
+        }
+        writeSuffixArray(k++, s);
+      } else {
+        writeSuffixArray(i, ~s);
+      }
+    }
+  }
+
+  private final int sortTypeBstar(int[] bucketA, int[] bucketB, int n)
+      throws IOException, InterruptedException {
+    int PAb, ISAb, buf;
+
+    int i, j, k, t, m, bufsize;
+    int c0, c1 = 0;
+
+    /*
+     * Count the number of occurrences of the first one or two characters of each type
+     * A, B and B suffix. Moreover, store the beginning position of all type B
+     * suffixes into the array SA.
+     */
+    for (i = n - 1, m = n, c0 = readInput(n - 1); 0 <= i; ) {
+      /* type A suffix. */
+      do {
+        ++bucketA[c1 = c0];
+      } while ((0 <= --i) && ((c0 = readInput(i)) >= c1));
+      if (0 <= i) {
+        /* type B suffix. */
+        ++bucketB[(c0) * ALPHABET_SIZE + (c1)];
+        writeSuffixArray(--m, i);
+        /* type B suffix. */
+        for (--i, c1 = c0; (0 <= i) && ((c0 = readInput(i)) <= c1); --i, c1 = c0) {
+          ++bucketB[(c1) * ALPHABET_SIZE + (c0)];
+        }
+      }
+    }
+    m = n - m;
+
+    // note:
+    // A type B* suffix is lexicographically smaller than a type B suffix
+    // that
+    // begins with the same first two characters.
+
+    // Calculate the index of 0/end point of each bucket.
+    for (c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) {
+      t = i + bucketA[c0];
+      bucketA[c0] = i + j; /* 0 point */
+      i = t + bucketB[(c0) * ALPHABET_SIZE + (c0)];
+      for (c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) {
+        j += bucketB[(c0) * ALPHABET_SIZE + (c1)];
+        bucketB[(c0) * ALPHABET_SIZE + (c1)] = j; // end point
+        i += bucketB[(c1) * ALPHABET_SIZE + (c0)];
+      }
+    }
+
+    if (0 < m) {
+      // Sort the type B* suffixes by their first two characters.
+      PAb = n - m; // SA
+      ISAb = m; // SA
+      for (i = m - 2; 0 <= i; --i) {
+        t = readSuffixArray(PAb + i);
+        c0 = readInput(t);
+        c1 = readInput(t + 1);
+        writeSuffixArray(--bucketB[(c0) * ALPHABET_SIZE + (c1)], i);
+      }
+      t = readSuffixArray(PAb + m - 1);
+      c0 = readInput(t);
+      c1 = readInput(t + 1);
+      writeSuffixArray(--bucketB[(c0) * ALPHABET_SIZE + (c1)], m - 1);
+
+      // Sort the type B* substrings using sssort.
+
+      buf = m; // SA
+      bufsize = n - (2 * m);
+
+      for (c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
+        if (Thread.interrupted()) {
+          throw new InterruptedException();
+        }
+        for (c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
+          i = bucketB[(c0) * ALPHABET_SIZE + (c1)];
+          if (1 < (j - i)) {
+            ssSort(PAb, i, j, buf, bufsize, 2, n, readSuffixArray(i) == (m - 1));
+          }
+        }
+      }
+
+      // Compute ranks of type B* substrings.
+      for (i = m - 1; 0 <= i; --i) {
+        if (0 <= readSuffixArray(i)) {
+          j = i;
+          do {
+            writeSuffixArray(ISAb + readSuffixArray(i), i);
+          } while ((0 <= --i) && (0 <= readSuffixArray(i)));
+          writeSuffixArray(i + 1, i - j);
+          if (i <= 0) {
+            break;
+          }
+        }
+        j = i;
+        do {
+          writeSuffixArray(ISAb + (writeSuffixArray(i, ~readSuffixArray(i))), j);
+        } while (readSuffixArray(--i) < 0);
+        writeSuffixArray(ISAb + readSuffixArray(i), j);
+      }
+      // Construct the inverse suffix array of type B* suffixes using
+      // trsort.
+      trSort(ISAb, m, 1);
+      // Set the sorted order of type B* suffixes.
+      for (i = n - 1, j = m, c0 = readInput(n - 1); 0 <= i; ) {
+        if (Thread.interrupted()) {
+          throw new InterruptedException();
+        }
+        for (--i, c1 = c0; (0 <= i) && ((c0 = readInput(i)) >= c1); --i, c1 = c0) {}
+        if (0 <= i) {
+          t = i;
+          for (--i, c1 = c0; (0 <= i) && ((c0 = readInput(i)) <= c1); --i, c1 = c0) {}
+          writeSuffixArray(readSuffixArray(ISAb + --j), ((t == 0) || (1 < (t - i))) ? t : ~t);
+        }
+      }
+
+      // Calculate the index of 0/end point of each bucket.
+      bucketB[(ALPHABET_SIZE - 1) * ALPHABET_SIZE + (ALPHABET_SIZE - 1)] = n; // end point
+      for (c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) {
+        if (Thread.interrupted()) {
+          throw new InterruptedException();
+        }
+        i = bucketA[c0 + 1] - 1;
+        for (c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) {
+          t = i - bucketB[(c1) * ALPHABET_SIZE + (c0)];
+          bucketB[(c1) * ALPHABET_SIZE + (c0)] = i; // end point
+
+          // Move all type B* suffixes to the correct position.
+          for (i = t, j = bucketB[(c0) * ALPHABET_SIZE + (c1)]; j <= k; --i, --k) {
+            writeSuffixArray(i, readSuffixArray(k));
+          }
+        }
+        bucketB[(c0) * ALPHABET_SIZE + (c0 + 1)] = i - bucketB[(c0) * ALPHABET_SIZE + (c0)] + 1;
+        bucketB[(c0) * ALPHABET_SIZE + (c0)] = i; // end point
+      }
+    }
+
+    return m;
+  }
+
+  private final void ssSort(
+      final int PA, int first, int last, int buf, int bufsize, int depth, int n, boolean lastsuffix)
+      throws IOException {
+    int a, b, middle, curbuf; // SA pointer
+
+    int j, k, curbufsize, limit;
+
+    int i;
+
+    if (lastsuffix) {
+      ++first;
+    }
+
+    if ((bufsize < SS_BLOCKSIZE)
+        && (bufsize < (last - first))
+        && (bufsize < (limit = ssIsqrt(last - first)))) {
+      if (SS_BLOCKSIZE < limit) {
+        limit = SS_BLOCKSIZE;
+      }
+      buf = middle = last - limit;
+      bufsize = limit;
+    } else {
+      middle = last;
+      limit = 0;
+    }
+    for (a = first, i = 0; SS_BLOCKSIZE < (middle - a); a += SS_BLOCKSIZE, ++i) {
+      ssMintroSort(PA, a, a + SS_BLOCKSIZE, depth);
+      curbufsize = last - (a + SS_BLOCKSIZE);
+      curbuf = a + SS_BLOCKSIZE;
+      if (curbufsize <= bufsize) {
+        curbufsize = bufsize;
+        curbuf = buf;
+      }
+      for (b = a, k = SS_BLOCKSIZE, j = i; (j & 1) != 0; b -= k, k <<= 1, j >>= 1) {
+        ssSwapMerge(PA, b - k, b, b + k, curbuf, curbufsize, depth);
+      }
+    }
+    ssMintroSort(PA, a, middle, depth);
+    for (k = SS_BLOCKSIZE; i != 0; k <<= 1, i >>= 1) {
+      if ((i & 1) != 0) {
+        ssSwapMerge(PA, a - k, a, middle, buf, bufsize, depth);
+        a -= k;
+      }
+    }
+    if (limit != 0) {
+      ssMintroSort(PA, middle, last, depth);
+      ssInplaceMerge(PA, first, middle, last, depth);
+    }
+
+    if (lastsuffix) {
+      int p1 = readSuffixArray(PA + readSuffixArray(first - 1));
+      int p11 = n - 2;
+      for (a = first, i = readSuffixArray(first - 1);
+          (a < last)
+              && ((readSuffixArray(a) < 0)
+                  || (0 < ssCompare(p1, p11, PA + readSuffixArray(a), depth)));
+          ++a) {
+        writeSuffixArray(a - 1, readSuffixArray(a));
+      }
+      writeSuffixArray(a - 1, i);
+    }
+  }
+
+  /**
+   * special version of ssCompare for handling
+   * <code>ssCompare(T, &(PAi[0]), PA + *a, depth)</code> situation.
+   */
+  private final int ssCompare(int pa, int pb, int p2, int depth) throws IOException {
+    int U1, U2, U1n, U2n; // pointers to T
+
+    for (U1 = depth + pa, U2 = depth + readSuffixArray(p2), U1n = pb + 2,
+            U2n = readSuffixArray(p2 + 1) + 2;
+        (U1 < U1n) && (U2 < U2n) && (readInput(U1) == readInput(U2));
+        ++U1, ++U2) {}
+
+    return U1 < U1n ? (U2 < U2n ? readInput(U1) - readInput(U2) : 1) : (U2 < U2n ? -1 : 0);
+  }
+
+  private final int ssCompare(int p1, int p2, int depth) throws IOException {
+    int U1, U2, U1n, U2n; // pointers to T
+
+    for (U1 = depth + readSuffixArray(p1), U2 = depth + readSuffixArray(p2),
+            U1n = readSuffixArray(p1 + 1) + 2, U2n = readSuffixArray(p2 + 1) + 2;
+        (U1 < U1n) && (U2 < U2n) && (readInput(U1) == readInput(U2));
+        ++U1, ++U2) {}
+
+    return U1 < U1n ? (U2 < U2n ? readInput(U1) - readInput(U2) : 1) : (U2 < U2n ? -1 : 0);
+  }
+
+  private final void ssInplaceMerge(int PA, int first, int middle, int last, int depth)
+      throws IOException {
+    // PA, middle, first, last are pointers to SA
+    int p, a, b; // pointer to SA
+    int len, half;
+    int q, r;
+    int x;
+
+    for (; ; ) {
+      if (readSuffixArray(last - 1) < 0) {
+        x = 1;
+        p = PA + ~readSuffixArray(last - 1);
+      } else {
+        x = 0;
+        p = PA + readSuffixArray(last - 1);
+      }
+      for (a = first, len = middle - first, half = len >> 1, r = -1;
+          0 < len;
+          len = half, half >>= 1) {
+        b = a + half;
+        q =
+            ssCompare(
+                PA + ((0 <= readSuffixArray(b)) ? readSuffixArray(b) : ~readSuffixArray(b)),
+                p,
+                depth);
+        if (q < 0) {
+          a = b + 1;
+          half -= (len & 1) ^ 1;
+        } else {
+          r = q;
+        }
+      }
+      if (a < middle) {
+        if (r == 0) {
+          writeSuffixArray(a, ~readSuffixArray(a));
+        }
+        ssRotate(a, middle, last);
+        last -= middle - a;
+        middle = a;
+        if (first == middle) {
+          break;
+        }
+      }
+      --last;
+      if (x != 0) {
+        while (readSuffixArray(--last) < 0) {
+          // nop
+        }
+      }
+      if (middle == last) {
+        break;
+      }
+    }
+  }
+
+  private final void ssRotate(int first, int middle, int last) throws IOException {
+    // first, middle, last are pointers in SA
+    int a, b, t; // pointers in SA
+    int l, r;
+    l = middle - first;
+    r = last - middle;
+    for (; (0 < l) && (0 < r); ) {
+      if (l == r) {
+        ssBlockSwap(first, middle, l);
+        break;
+      }
+      if (l < r) {
+        a = last - 1;
+        b = middle - 1;
+        t = readSuffixArray(a);
+        do {
+          writeSuffixArray(a--, readSuffixArray(b));
+          writeSuffixArray(b--, readSuffixArray(a));
+          if (b < first) {
+            writeSuffixArray(a, t);
+            last = a;
+            if ((r -= l + 1) <= l) {
+              break;
+            }
+            a -= 1;
+            b = middle - 1;
+            t = readSuffixArray(a);
+          }
+        } while (true);
+      } else {
+        a = first;
+        b = middle;
+        t = readSuffixArray(a);
+        do {
+          writeSuffixArray(a++, readSuffixArray(b));
+          writeSuffixArray(b++, readSuffixArray(a));
+          if (last <= b) {
+            writeSuffixArray(a, t);
+            first = a + 1;
+            if ((l -= r + 1) <= r) {
+              break;
+            }
+            a += 1;
+            b = middle;
+            t = readSuffixArray(a);
+          }
+        } while (true);
+      }
+    }
+  }
+
+  private final void ssBlockSwap(int a, int b, int n) throws IOException {
+    // a, b -- pointer to SA
+    int t;
+    for (; 0 < n; --n, ++a, ++b) {
+      t = readSuffixArray(a);
+      writeSuffixArray(a, readSuffixArray(b));
+      writeSuffixArray(b, t);
+    }
+  }
+
+  private static final int getIDX(int a) {
+    return (0 <= (a)) ? (a) : (~(a));
+  }
+
+  private static final int min(int a, int b) {
+    return a < b ? a : b;
+  }
+
+  /**
+   * D&C based merge.
+   */
+  private final void ssSwapMerge(
+      int PA, int first, int middle, int last, int buf, int bufsize, int depth) throws IOException {
+    // Pa, first, middle, last and buf - pointers in SA array
+
+    final int STACK_SIZE = SS_SMERGE_STACKSIZE;
+    StackElement[] stack = new StackElement[STACK_SIZE];
+    int l, r, lm, rm; // pointers in SA
+    int m, len, half;
+    int ssize;
+    int check, next;
+
+    for (check = 0, ssize = 0; ; ) {
+
+      if ((last - middle) <= bufsize) {
+        if ((first < middle) && (middle < last)) {
+          ssMergeBackward(PA, first, middle, last, buf, depth);
+        }
+        if (((check & 1) != 0)
+            || (((check & 2) != 0)
+                && (ssCompare(
+                        PA + getIDX(readSuffixArray(first - 1)), PA + readSuffixArray(first), depth)
+                    == 0))) {
+          writeSuffixArray(first, ~readSuffixArray(first));
+        }
+        if (((check & 4) != 0)
+            && ((ssCompare(
+                    PA + getIDX(readSuffixArray(last - 1)), PA + readSuffixArray(last), depth)
+                == 0))) {
+          writeSuffixArray(last, ~readSuffixArray(last));
+        }
+
+        if (ssize > 0) {
+          StackElement se = stack[--ssize];
+          first = se.a;
+          middle = se.b;
+          last = se.c;
+          check = se.d;
+        } else {
+          return;
+        }
+        continue;
+      }
+
+      if ((middle - first) <= bufsize) {
+        if (first < middle) {
+          ssMergeForward(PA, first, middle, last, buf, depth);
+        }
+        if (((check & 1) != 0)
+            || (((check & 2) != 0)
+                && (ssCompare(
+                        PA + getIDX(readSuffixArray(first - 1)), PA + readSuffixArray(first), depth)
+                    == 0))) {
+          writeSuffixArray(first, ~readSuffixArray(first));
+        }
+        if (((check & 4) != 0)
+            && ((ssCompare(
+                    PA + getIDX(readSuffixArray(last - 1)), PA + readSuffixArray(last), depth)
+                == 0))) {
+          writeSuffixArray(last, ~readSuffixArray(last));
+        }
+
+        if (ssize > 0) {
+          StackElement se = stack[--ssize];
+          first = se.a;
+          middle = se.b;
+          last = se.c;
+          check = se.d;
+        } else {
+          return;
+        }
+
+        continue;
+      }
+
+      for (m = 0, len = min(middle - first, last - middle), half = len >> 1;
+          0 < len;
+          len = half, half >>= 1) {
+        if (ssCompare(
+                PA + getIDX(readSuffixArray(middle + m + half)),
+                PA + getIDX(readSuffixArray(middle - m - half - 1)),
+                depth)
+            < 0) {
+          m += half + 1;
+          half -= (len & 1) ^ 1;
+        }
+      }
+
+      if (0 < m) {
+        lm = middle - m;
+        rm = middle + m;
+        ssBlockSwap(lm, middle, m);
+        l = r = middle;
+        next = 0;
+        if (rm < last) {
+          if (readSuffixArray(rm) < 0) {
+            writeSuffixArray(rm, ~readSuffixArray(rm));
+            if (first < lm) {
+              for (; readSuffixArray(--l) < 0; ) {}
+              next |= 4;
+            }
+            next |= 1;
+          } else if (first < lm) {
+            for (; readSuffixArray(r) < 0; ++r) {}
+            next |= 2;
+          }
+        }
+
+        if ((l - first) <= (last - r)) {
+          stack[ssize++] = new StackElement(r, rm, last, (next & 3) | (check & 4));
+
+          middle = lm;
+          last = l;
+          check = (check & 3) | (next & 4);
+        } else {
+          if (((next & 2) != 0) && (r == middle)) {
+            next ^= 6;
+          }
+          stack[ssize++] = new StackElement(first, lm, l, (check & 3) | (next & 4));
+
+          first = r;
+          middle = rm;
+          check = (next & 3) | (check & 4);
+        }
+      } else {
+        if (ssCompare(PA + getIDX(readSuffixArray(middle - 1)), PA + readSuffixArray(middle), depth)
+            == 0) {
+          writeSuffixArray(middle, ~readSuffixArray(middle));
+        }
+
+        if (((check & 1) != 0)
+            || (((check & 2) != 0)
+                && (ssCompare(
+                        PA + getIDX(readSuffixArray(first - 1)), PA + readSuffixArray(first), depth)
+                    == 0))) {
+          writeSuffixArray(first, ~readSuffixArray(first));
+        }
+        if (((check & 4) != 0)
+            && ((ssCompare(
+                    PA + getIDX(readSuffixArray(last - 1)), PA + readSuffixArray(last), depth)
+                == 0))) {
+          writeSuffixArray(last, ~readSuffixArray(last));
+        }
+
+        if (ssize > 0) {
+          StackElement se = stack[--ssize];
+          first = se.a;
+          middle = se.b;
+          last = se.c;
+          check = se.d;
+        } else {
+          return;
+        }
+      }
+    }
+  }
+
+  /**
+   * Merge-forward with internal buffer.
+   */
+  private final void ssMergeForward(int PA, int first, int middle, int last, int buf, int depth)
+      throws IOException {
+    // PA, first, middle, last, buf are pointers to SA
+    int a, b, c, bufend; // pointers to SA
+    int t, r;
+
+    bufend = buf + (middle - first) - 1;
+    ssBlockSwap(buf, first, middle - first);
+
+    for (t = readSuffixArray(a = first), b = buf, c = middle; ; ) {
+      r = ssCompare(PA + readSuffixArray(b), PA + readSuffixArray(c), depth);
+      if (r < 0) {
+        do {
+          writeSuffixArray(a++, readSuffixArray(b));
+          if (bufend <= b) {
+            writeSuffixArray(bufend, t);
+            return;
+          }
+          writeSuffixArray(b++, readSuffixArray(a));
+        } while (readSuffixArray(b) < 0);
+      } else if (r > 0) {
+        do {
+          writeSuffixArray(a++, readSuffixArray(c));
+          writeSuffixArray(c++, readSuffixArray(a));
+          if (last <= c) {
+            while (b < bufend) {
+              writeSuffixArray(a++, readSuffixArray(b));
+              writeSuffixArray(b++, readSuffixArray(a));
+            }
+            writeSuffixArray(a, readSuffixArray(b));
+            writeSuffixArray(b, t);
+            return;
+          }
+        } while (readSuffixArray(c) < 0);
+      } else {
+        writeSuffixArray(c, ~readSuffixArray(c));
+        do {
+          writeSuffixArray(a++, readSuffixArray(b));
+          if (bufend <= b) {
+            writeSuffixArray(bufend, t);
+            return;
+          }
+          writeSuffixArray(b++, readSuffixArray(a));
+        } while (readSuffixArray(b) < 0);
+
+        do {
+          writeSuffixArray(a++, readSuffixArray(c));
+          writeSuffixArray(c++, readSuffixArray(a));
+          if (last <= c) {
+            while (b < bufend) {
+              writeSuffixArray(a++, readSuffixArray(b));
+              writeSuffixArray(b++, readSuffixArray(a));
+            }
+            writeSuffixArray(a, readSuffixArray(b));
+            writeSuffixArray(b, t);
+            return;
+          }
+        } while (readSuffixArray(c) < 0);
+      }
+    }
+  }
+
+  /**
+   * Merge-backward with internal buffer.
+   */
+  private final void ssMergeBackward(int PA, int first, int middle, int last, int buf, int depth)
+      throws IOException {
+    // PA, first, middle, last, buf are pointers in SA
+    int p1, p2; // pointers in SA
+    int a, b, c, bufend; // pointers in SA
+    int t, r, x;
+
+    bufend = buf + (last - middle) - 1;
+    ssBlockSwap(buf, middle, last - middle);
+
+    x = 0;
+    if (readSuffixArray(bufend) < 0) {
+      p1 = PA + ~readSuffixArray(bufend);
+      x |= 1;
+    } else {
+      p1 = PA + readSuffixArray(bufend);
+    }
+    if (readSuffixArray(middle - 1) < 0) {
+      p2 = PA + ~readSuffixArray(middle - 1);
+      x |= 2;
+    } else {
+      p2 = PA + readSuffixArray(middle - 1);
+    }
+    for (t = readSuffixArray(a = last - 1), b = bufend, c = middle - 1; ; ) {
+      r = ssCompare(p1, p2, depth);
+      if (0 < r) {
+        if ((x & 1) != 0) {
+          do {
+            writeSuffixArray(a--, readSuffixArray(b));
+            writeSuffixArray(b--, readSuffixArray(a));
+          } while (readSuffixArray(b) < 0);
+          x ^= 1;
+        }
+        writeSuffixArray(a--, readSuffixArray(b));
+        if (b <= buf) {
+          writeSuffixArray(buf, t);
+          break;
+        }
+        writeSuffixArray(b--, readSuffixArray(a));
+        if (readSuffixArray(b) < 0) {
+          p1 = PA + ~readSuffixArray(b);
+          x |= 1;
+        } else {
+          p1 = PA + readSuffixArray(b);
+        }
+      } else if (r < 0) {
+        if ((x & 2) != 0) {
+          do {
+            writeSuffixArray(a--, readSuffixArray(c));
+            writeSuffixArray(c--, readSuffixArray(a));
+          } while (readSuffixArray(c) < 0);
+          x ^= 2;
+        }
+        writeSuffixArray(a--, readSuffixArray(c));
+        writeSuffixArray(c--, readSuffixArray(a));
+        if (c < first) {
+          while (buf < b) {
+            writeSuffixArray(a--, readSuffixArray(b));
+            writeSuffixArray(b--, readSuffixArray(a));
+          }
+          writeSuffixArray(a, readSuffixArray(b));
+          writeSuffixArray(b, t);
+          break;
+        }
+        if (readSuffixArray(c) < 0) {
+          p2 = PA + ~readSuffixArray(c);
+          x |= 2;
+        } else {
+          p2 = PA + readSuffixArray(c);
+        }
+      } else {
+        if ((x & 1) != 0) {
+          do {
+            writeSuffixArray(a--, readSuffixArray(b));
+            writeSuffixArray(b--, readSuffixArray(a));
+          } while (readSuffixArray(b) < 0);
+          x ^= 1;
+        }
+        writeSuffixArray(a--, ~readSuffixArray(b));
+        if (b <= buf) {
+          writeSuffixArray(buf, t);
+          break;
+        }
+        writeSuffixArray(b--, readSuffixArray(a));
+        if ((x & 2) != 0) {
+          do {
+            writeSuffixArray(a--, readSuffixArray(c));
+            writeSuffixArray(c--, readSuffixArray(a));
+          } while (readSuffixArray(c) < 0);
+          x ^= 2;
+        }
+        writeSuffixArray(a--, readSuffixArray(c));
+        writeSuffixArray(c--, readSuffixArray(a));
+        if (c < first) {
+          while (buf < b) {
+            writeSuffixArray(a--, readSuffixArray(b));
+            writeSuffixArray(b--, readSuffixArray(a));
+          }
+          writeSuffixArray(a, readSuffixArray(b));
+          writeSuffixArray(b, t);
+          break;
+        }
+        if (readSuffixArray(b) < 0) {
+          p1 = PA + ~readSuffixArray(b);
+          x |= 1;
+        } else {
+          p1 = PA + readSuffixArray(b);
+        }
+        if (readSuffixArray(c) < 0) {
+          p2 = PA + ~readSuffixArray(c);
+          x |= 2;
+        } else {
+          p2 = PA + readSuffixArray(c);
+        }
+      }
+    }
+  }
+
+  /**
+   * Insertion sort for small size groups
+   */
+  private final void ssInsertionSort(int PA, int first, int last, int depth) throws IOException {
+    // PA, first, last are pointers in SA
+    int i, j; // pointers in SA
+    int t, r;
+
+    for (i = last - 2; first <= i; --i) {
+      for (t = readSuffixArray(i), j = i + 1;
+          0 < (r = ssCompare(PA + t, PA + readSuffixArray(j), depth));
+          ) {
+        do {
+          writeSuffixArray(j - 1, readSuffixArray(j));
+        } while ((++j < last) && (readSuffixArray(j) < 0));
+        if (last <= j) {
+          break;
+        }
+      }
+      if (r == 0) {
+        writeSuffixArray(j, ~readSuffixArray(j));
+      }
+      writeSuffixArray(j - 1, t);
+    }
+  }
+
+  private static final int ssIsqrt(int x) {
+    int y, e;
+
+    if (x >= (SS_BLOCKSIZE * SS_BLOCKSIZE)) {
+      return SS_BLOCKSIZE;
+    }
+    e =
+        ((x & 0xffff0000) != 0)
+            ? (((x & 0xff000000) != 0)
+                ? 24 + LG_TABLE[(x >> 24) & 0xff]
+                : 16 + LG_TABLE[(x >> 16) & 0xff])
+            : (((x & 0x0000ff00) != 0) ? 8 + LG_TABLE[(x >> 8) & 0xff] : LG_TABLE[(x >> 0) & 0xff]);
+
+    if (e >= 16) {
+      y = SQQ_TABLE[x >> ((e - 6) - (e & 1))] << ((e >> 1) - 7);
+      if (e >= 24) {
+        y = (y + 1 + x / y) >> 1;
+      }
+      y = (y + 1 + x / y) >> 1;
+    } else if (e >= 8) {
+      y = (SQQ_TABLE[x >> ((e - 6) - (e & 1))] >> (7 - (e >> 1))) + 1;
+    } else {
+      return SQQ_TABLE[x] >> 4;
+    }
+
+    return (x < (y * y)) ? y - 1 : y;
+  }
+
+  /** Multikey introsort for medium size groups. */
+  private final void ssMintroSort(int PA, int first, int last, int depth) throws IOException {
+    final int STACK_SIZE = SS_MISORT_STACKSIZE;
+    StackElement[] stack = new StackElement[STACK_SIZE];
+    int Td; // T ptr
+    int a, b, c, d, e, f; // SA ptr
+    int s, t;
+    int ssize;
+    int limit;
+    int v, x = 0;
+    for (ssize = 0, limit = ssIlg(last - first); ; ) {
+
+      if ((last - first) <= SS_INSERTIONSORT_THRESHOLD) {
+        if (1 < (last - first)) {
+          ssInsertionSort(PA, first, last, depth);
+        }
+        if (ssize > 0) {
+          StackElement se = stack[--ssize];
+          first = se.a;
+          last = se.b;
+          depth = se.c;
+          limit = se.d;
+        } else {
+          return;
+        }
+
+        continue;
+      }
+
+      Td = depth;
+      if (limit-- == 0) {
+        ssHeapSort(Td, PA, first, last - first);
+      }
+      if (limit < 0) {
+        for (a = first + 1, v = readInput(Td + readSuffixArray(PA + readSuffixArray(first)));
+            a < last;
+            ++a) {
+          if ((x = readInput(Td + readSuffixArray(PA + readSuffixArray(a)))) != v) {
+            if (1 < (a - first)) {
+              break;
+            }
+            v = x;
+            first = a;
+          }
+        }
+
+        if (readInput(Td + readSuffixArray(PA + readSuffixArray(first)) - 1) < v) {
+          first = ssPartition(PA, first, a, depth);
+        }
+        if ((a - first) <= (last - a)) {
+          if (1 < (a - first)) {
+            stack[ssize++] = new StackElement(a, last, depth, -1);
+            last = a;
+            depth += 1;
+            limit = ssIlg(a - first);
+          } else {
+            first = a;
+            limit = -1;
+          }
+        } else {
+          if (1 < (last - a)) {
+            stack[ssize++] = new StackElement(first, a, depth + 1, ssIlg(a - first));
+            first = a;
+            limit = -1;
+          } else {
+            last = a;
+            depth += 1;
+            limit = ssIlg(a - first);
+          }
+        }
+        continue;
+      }
+
+      // choose pivot
+      a = ssPivot(Td, PA, first, last);
+      v = readInput(Td + readSuffixArray(PA + readSuffixArray(a)));
+      swapInSA(first, a);
+
+      // partition
+      for (b = first;
+          (++b < last) && ((x = readInput(Td + readSuffixArray(PA + readSuffixArray(b)))) == v);
+          ) {}
+      if (((a = b) < last) && (x < v)) {
+        for (;
+            (++b < last) && ((x = readInput(Td + readSuffixArray(PA + readSuffixArray(b)))) <= v);
+            ) {
+          if (x == v) {
+            swapInSA(b, a);
+            ++a;
+          }
+        }
+      }
+
+      for (c = last;
+          (b < --c) && ((x = readInput(Td + readSuffixArray(PA + readSuffixArray(c)))) == v);
+          ) {}
+      if ((b < (d = c)) && (x > v)) {
+        for (;
+            (b < --c) && ((x = readInput(Td + readSuffixArray(PA + readSuffixArray(c)))) >= v);
+            ) {
+          if (x == v) {
+            swapInSA(c, d);
+            --d;
+          }
+        }
+      }
+
+      for (; b < c; ) {
+        swapInSA(b, c);
+        for (;
+            (++b < c) && ((x = readInput(Td + readSuffixArray(PA + readSuffixArray(b)))) <= v);
+            ) {
+          if (x == v) {
+            swapInSA(b, a);
+            ++a;
+          }
+        }
+        for (;
+            (b < --c) && ((x = readInput(Td + readSuffixArray(PA + readSuffixArray(c)))) >= v);
+            ) {
+          if (x == v) {
+            swapInSA(c, d);
+            --d;
+          }
+        }
+      }
+
+      if (a <= d) {
+        c = b - 1;
+
+        if ((s = a - first) > (t = b - a)) {
+          s = t;
+        }
+        for (e = first, f = b - s; 0 < s; --s, ++e, ++f) {
+          swapInSA(e, f);
+        }
+        if ((s = d - c) > (t = last - d - 1)) {
+          s = t;
+        }
+        for (e = b, f = last - s; 0 < s; --s, ++e, ++f) {
+          swapInSA(e, f);
+        }
+
+        a = first + (b - a);
+        c = last - (d - c);
+        b =
+            (v <= readInput(Td + readSuffixArray(PA + readSuffixArray(a)) - 1))
+                ? a
+                : ssPartition(PA, a, c, depth);
+
+        if ((a - first) <= (last - c)) {
+          if ((last - c) <= (c - b)) {
+            stack[ssize++] = new StackElement(b, c, depth + 1, ssIlg(c - b));
+            stack[ssize++] = new StackElement(c, last, depth, limit);
+            last = a;
+          } else if ((a - first) <= (c - b)) {
+            stack[ssize++] = new StackElement(c, last, depth, limit);
+            stack[ssize++] = new StackElement(b, c, depth + 1, ssIlg(c - b));
+            last = a;
+          } else {
+            stack[ssize++] = new StackElement(c, last, depth, limit);
+            stack[ssize++] = new StackElement(first, a, depth, limit);
+            first = b;
+            last = c;
+            depth += 1;
+            limit = ssIlg(c - b);
+          }
+        } else {
+          if ((a - first) <= (c - b)) {
+            stack[ssize++] = new StackElement(b, c, depth + 1, ssIlg(c - b));
+            stack[ssize++] = new StackElement(first, a, depth, limit);
+            first = c;
+          } else if ((last - c) <= (c - b)) {
+            stack[ssize++] = new StackElement(first, a, depth, limit);
+            stack[ssize++] = new StackElement(b, c, depth + 1, ssIlg(c - b));
+            first = c;
+          } else {
+            stack[ssize++] = new StackElement(first, a, depth, limit);
+            stack[ssize++] = new StackElement(c, last, depth, limit);
+            first = b;
+            last = c;
+            depth += 1;
+            limit = ssIlg(c - b);
+          }
+        }
+
+      } else {
+        limit += 1;
+        if (readInput(Td + readSuffixArray(PA + readSuffixArray(first)) - 1) < v) {
+          first = ssPartition(PA, first, last, depth);
+          limit = ssIlg(last - first);
+        }
+        depth += 1;
+      }
+    }
+  }
+
+  /**
+   * Returns the pivot element.
+   */
+  private final int ssPivot(int Td, int PA, int first, int last) throws IOException {
+    int middle; // SA pointer
+    int t = last - first;
+    middle = first + t / 2;
+
+    if (t <= 512) {
+      if (t <= 32) {
+        return ssMedian3(Td, PA, first, middle, last - 1);
+      } else {
+        t >>= 2;
+        return ssMedian5(Td, PA, first, first + t, middle, last - 1 - t, last - 1);
+      }
+    }
+    t >>= 3;
+    first = ssMedian3(Td, PA, first, first + t, first + (t << 1));
+    middle = ssMedian3(Td, PA, middle - t, middle, middle + t);
+    last = ssMedian3(Td, PA, last - 1 - (t << 1), last - 1 - t, last - 1);
+    return ssMedian3(Td, PA, first, middle, last);
+  }
+
+  /**
+   * Returns the median of five elements
+   */
+  private final int ssMedian5(int Td, int PA, int v1, int v2, int v3, int v4, int v5)
+      throws IOException {
+    int t;
+    if (readInput(Td + readSuffixArray(PA + readSuffixArray(v2)))
+        > readInput(Td + readSuffixArray(PA + readSuffixArray(v3)))) {
+      t = v2;
+      v2 = v3;
+      v3 = t;
+    }
+    if (readInput(Td + readSuffixArray(PA + readSuffixArray(v4)))
+        > readInput(Td + readSuffixArray(PA + readSuffixArray(v5)))) {
+      t = v4;
+      v4 = v5;
+      v5 = t;
+    }
+    if (readInput(Td + readSuffixArray(PA + readSuffixArray(v2)))
+        > readInput(Td + readSuffixArray(PA + readSuffixArray(v4)))) {
+      t = v2;
+      v2 = v4;
+      v4 = t;
+      t = v3;
+      v3 = v5;
+      v5 = t;
+    }
+    if (readInput(Td + readSuffixArray(PA + readSuffixArray(v1)))
+        > readInput(Td + readSuffixArray(PA + readSuffixArray(v3)))) {
+      t = v1;
+      v1 = v3;
+      v3 = t;
+    }
+    if (readInput(Td + readSuffixArray(PA + readSuffixArray(v1)))
+        > readInput(Td + readSuffixArray(PA + readSuffixArray(v4)))) {
+      t = v1;
+      v1 = v4;
+      v4 = t;
+      t = v3;
+      v3 = v5;
+      v5 = t;
+    }
+    if (readInput(Td + readSuffixArray(PA + readSuffixArray(v3)))
+        > readInput(Td + readSuffixArray(PA + readSuffixArray(v4)))) {
+      return v4;
+    }
+    return v3;
+  }
+
+  /**
+   * Returns the median of three elements.
+   */
+  private final int ssMedian3(int Td, int PA, int v1, int v2, int v3) throws IOException {
+    if (readInput(Td + readSuffixArray(PA + readSuffixArray(v1)))
+        > readInput(Td + readSuffixArray(PA + readSuffixArray(v2)))) {
+      int t = v1;
+      v1 = v2;
+      v2 = t;
+    }
+    if (readInput(Td + readSuffixArray(PA + readSuffixArray(v2)))
+        > readInput(Td + readSuffixArray(PA + readSuffixArray(v3)))) {
+      if (readInput(Td + readSuffixArray(PA + readSuffixArray(v1)))
+          > readInput(Td + readSuffixArray(PA + readSuffixArray(v3)))) {
+        return v1;
+      } else {
+        return v3;
+      }
+    }
+    return v2;
+  }
+
+  /**
+   * Binary partition for substrings.
+   */
+  private final int ssPartition(int PA, int first, int last, int depth) throws IOException {
+    int a, b; // SA pointer
+    int t;
+    for (a = first - 1, b = last; ; ) {
+      for (;
+          (++a < b)
+              && ((readSuffixArray(PA + readSuffixArray(a)) + depth)
+                  >= (readSuffixArray(PA + readSuffixArray(a) + 1) + 1));
+          ) {
+        writeSuffixArray(a, ~readSuffixArray(a));
+      }
+      for (;
+          (a < --b)
+              && ((readSuffixArray(PA + readSuffixArray(b)) + depth)
+                  < (readSuffixArray(PA + readSuffixArray(b) + 1) + 1));
+          ) {}
+      if (b <= a) {
+        break;
+      }
+      t = ~readSuffixArray(b);
+      writeSuffixArray(b, readSuffixArray(a));
+      writeSuffixArray(a, t);
+    }
+    if (first < a) {
+      writeSuffixArray(first, ~readSuffixArray(first));
+    }
+    return a;
+  }
+
+  /**
+   * Simple top-down heapsort.
+   */
+  private final void ssHeapSort(int Td, int PA, int sa, int size) throws IOException {
+    int i, m, t;
+
+    m = size;
+    if ((size % 2) == 0) {
+      m--;
+      if (readInput(Td + readSuffixArray(PA + readSuffixArray(sa + (m / 2))))
+          < readInput(Td + readSuffixArray(PA + readSuffixArray(sa + m)))) {
+        swapInSA(sa + m, sa + (m / 2));
+      }
+    }
+
+    for (i = m / 2 - 1; 0 <= i; --i) {
+      ssFixDown(Td, PA, sa, i, m);
+    }
+    if ((size % 2) == 0) {
+      swapInSA(sa, sa + m);
+      ssFixDown(Td, PA, sa, 0, m);
+    }
+    for (i = m - 1; 0 < i; --i) {
+      t = readSuffixArray(sa);
+      writeSuffixArray(sa, readSuffixArray(sa + i));
+      ssFixDown(Td, PA, sa, 0, i);
+      writeSuffixArray(sa + i, t);
+    }
+  }
+
+  private final void ssFixDown(int Td, int PA, int sa, int i, int size) throws IOException {
+    int j, k;
+    int v;
+    int c, d, e;
+
+    for (v = readSuffixArray(sa + i), c = readInput(Td + readSuffixArray(PA + v));
+        (j = 2 * i + 1) < size;
+        writeSuffixArray(sa + i, readSuffixArray(sa + k)), i = k) {
+      d = readInput(Td + readSuffixArray(PA + readSuffixArray(sa + (k = j++))));
+      if (d < (e = readInput(Td + readSuffixArray(PA + readSuffixArray(sa + j))))) {
+        k = j;
+        d = e;
+      }
+      if (d <= c) {
+        break;
+      }
+    }
+    writeSuffixArray(i + sa, v);
+  }
+
+  private static final int ssIlg(int n) {
+    return ((n & 0xff00) != 0) ? 8 + LG_TABLE[(n >> 8) & 0xff] : LG_TABLE[(n >> 0) & 0xff];
+  }
+
+  private final void swapInSA(int a, int b) throws IOException {
+    int tmp = readSuffixArray(a);
+    writeSuffixArray(a, readSuffixArray(b));
+    writeSuffixArray(b, tmp);
+  }
+
+  /** Tandem repeat sort */
+  private final void trSort(int ISA, int n, int depth) throws IOException, InterruptedException {
+    TRBudget budget = new TRBudget(trIlg(n) * 2 / 3, n);
+    int ISAd;
+    int first, last; // SA pointers
+    int t, skip, unsorted;
+    for (ISAd = ISA + depth; -n < readSuffixArray(0); ISAd += ISAd - ISA) {
+      if (Thread.interrupted()) {
+        throw new InterruptedException();
+      }
+      first = 0;
+      skip = 0;
+      unsorted = 0;
+      do {
+        if ((t = readSuffixArray(first)) < 0) {
+          first -= t;
+          skip += t;
+        } else {
+          if (skip != 0) {
+            writeSuffixArray(first + skip, skip);
+            skip = 0;
+          }
+          last = readSuffixArray(ISA + t) + 1;
+          if (1 < (last - first)) {
+            budget.count = 0;
+            trIntroSort(ISA, ISAd, first, last, budget);
+            if (budget.count != 0) {
+              unsorted += budget.count;
+            } else {
+              skip = first - last;
+            }
+          } else if ((last - first) == 1) {
+            skip = -1;
+          }
+          first = last;
+        }
+      } while (first < n);
+      if (skip != 0) {
+        writeSuffixArray(first + skip, skip);
+      }
+      if (unsorted == 0) {
+        break;
+      }
+    }
+  }
+
+  private final TRPartitionResult trPartition(
+      int ISAd, int first, int middle, int last, int v) throws IOException {
+    int a, b, c, d, e, f; // ptr
+    int t, s, x = 0;
+
+    for (b = middle - 1;
+        (++b < last) && ((x = readSuffixArray(ISAd + readSuffixArray(b))) == v);
+        ) {}
+    if (((a = b) < last) && (x < v)) {
+      for (; (++b < last) && ((x = readSuffixArray(ISAd + readSuffixArray(b))) <= v); ) {
+        if (x == v) {
+          swapInSA(a, b);
+          ++a;
+        }
+      }
+    }
+    for (c = last; (b < --c) && ((x = readSuffixArray(ISAd + readSuffixArray(c))) == v); ) {}
+    if ((b < (d = c)) && (x > v)) {
+      for (; (b < --c) && ((x = readSuffixArray(ISAd + readSuffixArray(c))) >= v); ) {
+        if (x == v) {
+          swapInSA(c, d);
+          --d;
+        }
+      }
+    }
+    for (; b < c; ) {
+      swapInSA(c, b);
+      for (; (++b < c) && ((x = readSuffixArray(ISAd + readSuffixArray(b))) <= v); ) {
+        if (x == v) {
+          swapInSA(a, b);
+          ++a;
+        }
+      }
+      for (; (b < --c) && ((x = readSuffixArray(ISAd + readSuffixArray(c))) >= v); ) {
+        if (x == v) {
+          swapInSA(c, d);
+          --d;
+        }
+      }
+    }
+
+    if (a <= d) {
+      c = b - 1;
+      if ((s = a - first) > (t = b - a)) {
+        s = t;
+      }
+      for (e = first, f = b - s; 0 < s; --s, ++e, ++f) {
+        swapInSA(e, f);
+      }
+      if ((s = d - c) > (t = last - d - 1)) {
+        s = t;
+      }
+      for (e = b, f = last - s; 0 < s; --s, ++e, ++f) {
+        swapInSA(e, f);
+      }
+      first += (b - a);
+      last -= (d - c);
+    }
+    return new TRPartitionResult(first, last);
+  }
+
+  private final void trIntroSort(int ISA, int ISAd, int first, int last, TRBudget budget)
+      throws IOException {
+    final int STACK_SIZE = TR_STACKSIZE;
+    StackElement[] stack = new StackElement[STACK_SIZE];
+    int a = 0, b = 0, c; // pointers
+    int v, x = 0;
+    int incr = ISAd - ISA;
+    int limit, next;
+    int ssize, trlink = -1;
+    for (ssize = 0, limit = trIlg(last - first); ; ) {
+      if (limit < 0) {
+        if (limit == -1) {
+          /* tandem repeat partition */
+          TRPartitionResult res = trPartition(ISAd - incr, first, first, last, last - 1);
+          a = res.a;
+          b = res.b;
+          /* update ranks */
+          if (a < last) {
+            for (c = first, v = a - 1; c < a; ++c) {
+              writeSuffixArray(ISA + readSuffixArray(c), v);
+            }
+          }
+          if (b < last) {
+            for (c = a, v = b - 1; c < b; ++c) {
+              writeSuffixArray(ISA + readSuffixArray(c), v);
+            }
+          }
+
+          /* push */
+          if (1 < (b - a)) {
+            stack[ssize++] = new StackElement(0, a, b, 0, 0);
+            stack[ssize++] = new StackElement(ISAd - incr, first, last, -2, trlink);
+            trlink = ssize - 2;
+          }
+          if ((a - first) <= (last - b)) {
+            if (1 < (a - first)) {
+              stack[ssize++] = new StackElement(ISAd, b, last, trIlg(last - b), trlink);
+              last = a;
+              limit = trIlg(a - first);
+            } else if (1 < (last - b)) {
+              first = b;
+              limit = trIlg(last - b);
+            } else {
+              if (ssize > 0) {
+                StackElement se = stack[--ssize];
+                ISAd = se.a;
+                first = se.b;
+                last = se.c;
+                limit = se.d;
+                trlink = se.e;
+              } else {
+                return;
+              }
+            }
+          } else {
+            if (1 < (last - b)) {
+              stack[ssize++] = new StackElement(ISAd, first, a, trIlg(a - first), trlink);
+              first = b;
+              limit = trIlg(last - b);
+            } else if (1 < (a - first)) {
+              last = a;
+              limit = trIlg(a - first);
+            } else {
+              if (ssize > 0) {
+                StackElement se = stack[--ssize];
+                ISAd = se.a;
+                first = se.b;
+                last = se.c;
+                limit = se.d;
+                trlink = se.e;
+              } else {
+                return;
+              }
+            }
+          }
+        } else if (limit == -2) {
+          /* tandem repeat copy */
+          StackElement se = stack[--ssize];
+          a = se.b;
+          b = se.c;
+          if (stack[ssize].d == 0) {
+            trCopy(ISA, first, a, b, last, ISAd - ISA);
+          } else {
+            if (0 <= trlink) {
+              stack[trlink].d = -1;
+            }
+            trPartialCopy(ISA, first, a, b, last, ISAd - ISA);
+          }
+          if (ssize > 0) {
+            se = stack[--ssize];
+            ISAd = se.a;
+            first = se.b;
+            last = se.c;
+            limit = se.d;
+            trlink = se.e;
+          } else {
+            return;
+          }
+        } else {
+          /* sorted partition */
+          if (0 <= readSuffixArray(first)) {
+            a = first;
+            do {
+              writeSuffixArray(ISA + readSuffixArray(a), a);
+            } while ((++a < last) && (0 <= readSuffixArray(a)));
+            first = a;
+          }
+          if (first < last) {
+            a = first;
+            do {
+              writeSuffixArray(a, ~readSuffixArray(a));
+            } while (readSuffixArray(++a) < 0);
+            next =
+                (readSuffixArray(ISA + readSuffixArray(a))
+                        != readSuffixArray(ISAd + readSuffixArray(a)))
+                    ? trIlg(a - first + 1)
+                    : -1;
+            if (++a < last) {
+              for (b = first, v = a - 1; b < a; ++b) {
+                writeSuffixArray(ISA + readSuffixArray(b), v);
+              }
+            }
+
+            /* push */
+            if (budget.check(a - first) != 0) {
+              if ((a - first) <= (last - a)) {
+                stack[ssize++] = new StackElement(ISAd, a, last, -3, trlink);
+                ISAd += incr;
+                last = a;
+                limit = next;
+              } else {
+                if (1 < (last - a)) {
+                  stack[ssize++] = new StackElement(ISAd + incr, first, a, next, trlink);
+                  first = a;
+                  limit = -3;
+                } else {
+                  ISAd += incr;
+                  last = a;
+                  limit = next;
+                }
+              }
+            } else {
+              if (0 <= trlink) {
+                stack[trlink].d = -1;
+              }
+              if (1 < (last - a)) {
+                first = a;
+                limit = -3;
+              } else {
+                if (ssize > 0) {
+                  StackElement se = stack[--ssize];
+                  ISAd = se.a;
+                  first = se.b;
+                  last = se.c;
+                  limit = se.d;
+                  trlink = se.e;
+                } else {
+                  return;
+                }
+              }
+            }
+          } else {
+            if (ssize > 0) {
+              StackElement se = stack[--ssize];
+              ISAd = se.a;
+              first = se.b;
+              last = se.c;
+              limit = se.d;
+              trlink = se.e;
+            } else {
+              return;
+            }
+          }
+        }
+        continue;
+      }
+
+      if ((last - first) <= TR_INSERTIONSORT_THRESHOLD) {
+        trInsertionSort(ISAd, first, last);
+        limit = -3;
+        continue;
+      }
+
+      if (limit-- == 0) {
+        trHeapSort(ISAd, first, last - first);
+        for (a = last - 1; first < a; a = b) {
+          for (x = readSuffixArray(ISAd + readSuffixArray(a)), b = a - 1;
+              (first <= b) && (readSuffixArray(ISAd + readSuffixArray(b)) == x);
+              --b) {
+            writeSuffixArray(b, ~readSuffixArray(b));
+          }
+        }
+        limit = -3;
+        continue;
+      }
+      // choose pivot
+      a = trPivot(ISAd, first, last);
+      swapInSA(first, a);
+      v = readSuffixArray(ISAd + readSuffixArray(first));
+
+      // partition
+      TRPartitionResult res = trPartition(ISAd, first, first + 1, last, v);
+      a = res.a;
+      b = res.b;
+
+      if ((last - first) != (b - a)) {
+        next = (readSuffixArray(ISA + readSuffixArray(a)) != v) ? trIlg(b - a) : -1;
+
+        /* update ranks */
+        for (c = first, v = a - 1; c < a; ++c) {
+          writeSuffixArray(ISA + readSuffixArray(c), v);
+        }
+        if (b < last) {
+          for (c = a, v = b - 1; c < b; ++c) {
+            writeSuffixArray(ISA + readSuffixArray(c), v);
+          }
+        }
+
+        /* push */
+        if ((1 < (b - a)) && ((budget.check(b - a) != 0))) {
+          if ((a - first) <= (last - b)) {
+            if ((last - b) <= (b - a)) {
+              if (1 < (a - first)) {
+                stack[ssize++] = new StackElement(ISAd + incr, a, b, next, trlink);
+                stack[ssize++] = new StackElement(ISAd, b, last, limit, trlink);
+                last = a;
+              } else if (1 < (last - b)) {
+                stack[ssize++] = new StackElement(ISAd + incr, a, b, next, trlink);
+                first = b;
+              } else {
+                ISAd += incr;
+                first = a;
+                last = b;
+                limit = next;
+              }
+            } else if ((a - first) <= (b - a)) {
+              if (1 < (a - first)) {
+                stack[ssize++] = new StackElement(ISAd, b, last, limit, trlink);
+                stack[ssize++] = new StackElement(ISAd + incr, a, b, next, trlink);
+                last = a;
+              } else {
+                stack[ssize++] = new StackElement(ISAd, b, last, limit, trlink);
+                ISAd += incr;
+                first = a;
+                last = b;
+                limit = next;
+              }
+            } else {
+              stack[ssize++] = new StackElement(ISAd, b, last, limit, trlink);
+              stack[ssize++] = new StackElement(ISAd, first, a, limit, trlink);
+              ISAd += incr;
+              first = a;
+              last = b;
+              limit = next;
+            }
+          } else {
+            if ((a - first) <= (b - a)) {
+              if (1 < (last - b)) {
+                stack[ssize++] = new StackElement(ISAd + incr, a, b, next, trlink);
+                stack[ssize++] = new StackElement(ISAd, first, a, limit, trlink);
+                first = b;
+              } else if (1 < (a - first)) {
+                stack[ssize++] = new StackElement(ISAd + incr, a, b, next, trlink);
+                last = a;
+              } else {
+                ISAd += incr;
+                first = a;
+                last = b;
+                limit = next;
+              }
+            } else if ((last - b) <= (b - a)) {
+              if (1 < (last - b)) {
+                stack[ssize++] = new StackElement(ISAd, first, a, limit, trlink);
+                stack[ssize++] = new StackElement(ISAd + incr, a, b, next, trlink);
+                first = b;
+              } else {
+                stack[ssize++] = new StackElement(ISAd, first, a, limit, trlink);
+                ISAd += incr;
+                first = a;
+                last = b;
+                limit = next;
+              }
+            } else {
+              stack[ssize++] = new StackElement(ISAd, first, a, limit, trlink);
+              stack[ssize++] = new StackElement(ISAd, b, last, limit, trlink);
+              ISAd += incr;
+              first = a;
+              last = b;
+              limit = next;
+            }
+          }
+        } else {
+          if ((1 < (b - a)) && (0 <= trlink)) {
+            stack[trlink].d = -1;
+          }
+          if ((a - first) <= (last - b)) {
+            if (1 < (a - first)) {
+              stack[ssize++] = new StackElement(ISAd, b, last, limit, trlink);
+              last = a;
+            } else if (1 < (last - b)) {
+              first = b;
+            } else {
+              if (ssize > 0) {
+                StackElement se = stack[--ssize];
+                ISAd = se.a;
+                first = se.b;
+                last = se.c;
+                limit = se.d;
+                trlink = se.e;
+              } else {
+                return;
+              }
+            }
+          } else {
+            if (1 < (last - b)) {
+              stack[ssize++] = new StackElement(ISAd, first, a, limit, trlink);
+              first = b;
+            } else if (1 < (a - first)) {
+              last = a;
+            } else {
+              if (ssize > 0) {
+                StackElement se = stack[--ssize];
+                ISAd = se.a;
+                first = se.b;
+                last = se.c;
+                limit = se.d;
+                trlink = se.e;
+              } else {
+                return;
+              }
+            }
+          }
+        }
+      } else {
+        if (budget.check(last - first) != 0) {
+          limit = trIlg(last - first);
+          ISAd += incr;
+        } else {
+          if (0 <= trlink) {
+            stack[trlink].d = -1;
+          }
+          if (ssize > 0) {
+            StackElement se = stack[--ssize];
+            ISAd = se.a;
+            first = se.b;
+            last = se.c;
+            limit = se.d;
+            trlink = se.e;
+          } else {
+            return;
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * Returns the pivot element.
+   */
+  private final int trPivot(int ISAd, int first, int last) throws IOException {
+    int middle;
+    int t;
+
+    t = last - first;
+    middle = first + t / 2;
+
+    if (t <= 512) {
+      if (t <= 32) {
+        return trMedian3(ISAd, first, middle, last - 1);
+      } else {
+        t >>= 2;
+        return trMedian5(ISAd, first, first + t, middle, last - 1 - t, last - 1);
+      }
+    }
+    t >>= 3;
+    first = trMedian3(ISAd, first, first + t, first + (t << 1));
+    middle = trMedian3(ISAd, middle - t, middle, middle + t);
+    last = trMedian3(ISAd, last - 1 - (t << 1), last - 1 - t, last - 1);
+    return trMedian3(ISAd, first, middle, last);
+  }
+
+  /**
+   * Returns the median of five elements.
+   */
+  private final int trMedian5(int ISAd, int v1, int v2, int v3, int v4, int v5) throws IOException {
+    int t;
+    if (readSuffixArray(ISAd + readSuffixArray(v2)) > readSuffixArray(ISAd + readSuffixArray(v3))) {
+      t = v2;
+      v2 = v3;
+      v3 = t;
+    }
+    if (readSuffixArray(ISAd + readSuffixArray(v4)) > readSuffixArray(ISAd + readSuffixArray(v5))) {
+      t = v4;
+      v4 = v5;
+      v5 = t;
+    }
+    if (readSuffixArray(ISAd + readSuffixArray(v2)) > readSuffixArray(ISAd + readSuffixArray(v4))) {
+      t = v2;
+      v2 = v4;
+      v4 = t;
+      t = v3;
+      v3 = v5;
+      v5 = t;
+    }
+    if (readSuffixArray(ISAd + readSuffixArray(v1)) > readSuffixArray(ISAd + readSuffixArray(v3))) {
+      t = v1;
+      v1 = v3;
+      v3 = t;
+    }
+    if (readSuffixArray(ISAd + readSuffixArray(v1)) > readSuffixArray(ISAd + readSuffixArray(v4))) {
+      t = v1;
+      v1 = v4;
+      v4 = t;
+      t = v3;
+      v3 = v5;
+      v5 = t;
+    }
+    if (readSuffixArray(ISAd + readSuffixArray(v3)) > readSuffixArray(ISAd + readSuffixArray(v4))) {
+      return v4;
+    }
+    return v3;
+  }
+
+  /**
+   * Returns the median of three elements.
+   */
+  private final int trMedian3(int ISAd, int v1, int v2, int v3) throws IOException {
+    if (readSuffixArray(ISAd + readSuffixArray(v1)) > readSuffixArray(ISAd + readSuffixArray(v2))) {
+      int t = v1;
+      v1 = v2;
+      v2 = t;
+    }
+    if (readSuffixArray(ISAd + readSuffixArray(v2)) > readSuffixArray(ISAd + readSuffixArray(v3))) {
+      if (readSuffixArray(ISAd + readSuffixArray(v1))
+          > readSuffixArray(ISAd + readSuffixArray(v3))) {
+        return v1;
+      } else {
+        return v3;
+      }
+    }
+    return v2;
+  }
+
+  private final void trHeapSort(int ISAd, int sa, int size) throws IOException {
+    int i, m, t;
+
+    m = size;
+    if ((size % 2) == 0) {
+      m--;
+      if (readSuffixArray(ISAd + readSuffixArray(sa + m / 2))
+          < readSuffixArray(ISAd + readSuffixArray(sa + m))) {
+        swapInSA(sa + m, sa + m / 2);
+      }
+    }
+
+    for (i = m / 2 - 1; 0 <= i; --i) {
+      trFixDown(ISAd, sa, i, m);
+    }
+    if ((size % 2) == 0) {
+      swapInSA(sa, sa + m);
+      trFixDown(ISAd, sa, 0, m);
+    }
+    for (i = m - 1; 0 < i; --i) {
+      t = readSuffixArray(sa);
+      writeSuffixArray(sa, readSuffixArray(sa + i));
+      trFixDown(ISAd, sa, 0, i);
+      writeSuffixArray(sa + i, t);
+    }
+  }
+
+  private final void trFixDown(int ISAd, int sa, int i, int size) throws IOException {
+    int j, k;
+    int v;
+    int c, d, e;
+
+    for (v = readSuffixArray(sa + i), c = readSuffixArray(ISAd + v);
+        (j = 2 * i + 1) < size;
+        writeSuffixArray(sa + i, readSuffixArray(sa + k)), i = k) {
+      d = readSuffixArray(ISAd + readSuffixArray(sa + (k = j++)));
+      if (d < (e = readSuffixArray(ISAd + readSuffixArray(sa + j)))) {
+        k = j;
+        d = e;
+      }
+      if (d <= c) {
+        break;
+      }
+    }
+    writeSuffixArray(sa + i, v);
+  }
+
+  private final void trInsertionSort(int ISAd, int first, int last) throws IOException {
+    int a, b; // SA ptr
+    int t, r;
+
+    for (a = first + 1; a < last; ++a) {
+      for (t = readSuffixArray(a), b = a - 1;
+          0 > (r = readSuffixArray(ISAd + t) - readSuffixArray(ISAd + readSuffixArray(b)));
+          ) {
+        do {
+          writeSuffixArray(b + 1, readSuffixArray(b));
+        } while ((first <= --b) && (readSuffixArray(b) < 0));
+        if (b < first) {
+          break;
+        }
+      }
+      if (r == 0) {
+        writeSuffixArray(b, ~readSuffixArray(b));
+      }
+      writeSuffixArray(b + 1, t);
+    }
+  }
+
+  private final void trPartialCopy(int ISA, int first, int a, int b, int last, int depth)
+      throws IOException {
+    int c, d, e; // ptr
+    int s, v;
+    int rank, lastrank, newrank = -1;
+
+    v = b - 1;
+    lastrank = -1;
+    for (c = first, d = a - 1; c <= d; ++c) {
+      if ((0 <= (s = readSuffixArray(c) - depth)) && (readSuffixArray(ISA + s) == v)) {
+        writeSuffixArray(++d, s);
+        rank = readSuffixArray(ISA + s + depth);
+        if (lastrank != rank) {
+          lastrank = rank;
+          newrank = d;
+        }
+        writeSuffixArray(ISA + s, newrank);
+      }
+    }
+
+    lastrank = -1;
+    for (e = d; first <= e; --e) {
+      rank = readSuffixArray(ISA + readSuffixArray(e));
+      if (lastrank != rank) {
+        lastrank = rank;
+        newrank = e;
+      }
+      if (newrank != rank) {
+        writeSuffixArray(ISA + readSuffixArray(e), newrank);
+      }
+    }
+
+    lastrank = -1;
+    for (c = last - 1, e = d + 1, d = b; e < d; --c) {
+      if ((0 <= (s = readSuffixArray(c) - depth)) && (readSuffixArray(ISA + s) == v)) {
+        writeSuffixArray(--d, s);
+        rank = readSuffixArray(ISA + s + depth);
+        if (lastrank != rank) {
+          lastrank = rank;
+          newrank = d;
+        }
+        writeSuffixArray(ISA + s, newrank);
+      }
+    }
+  }
+
+  /**
+   * Sort suffixes of middle partition by using sorted order of suffixes of left and right
+   * partition.
+   */
+  private final void trCopy(int ISA, int first, int a, int b, int last, int depth)
+      throws IOException {
+    int c, d, e; // ptr
+    int s, v;
+
+    v = b - 1;
+    for (c = first, d = a - 1; c <= d; ++c) {
+      s = readSuffixArray(c) - depth;
+      if ((0 <= s) && (readSuffixArray(ISA + s) == v)) {
+        writeSuffixArray(++d, s);
+        writeSuffixArray(ISA + s, d);
+      }
+    }
+    for (c = last - 1, e = d + 1, d = b; e < d; --c) {
+      s = readSuffixArray(c) - depth;
+      if ((0 <= s) && (readSuffixArray(ISA + s) == v)) {
+        writeSuffixArray(--d, s);
+        writeSuffixArray(ISA + s, d);
+      }
+    }
+  }
+
+  private static final int trIlg(int n) {
+    return ((n & 0xffff0000) != 0)
+        ? (((n & 0xff000000) != 0)
+            ? 24 + LG_TABLE[(n >> 24) & 0xff]
+            : 16 + LG_TABLE[(n >> 16) & 0xff])
+        : (((n & 0x0000ff00) != 0) ? 8 + LG_TABLE[(n >> 8) & 0xff] : LG_TABLE[(n >> 0) & 0xff]);
+  }
+
+  private static final class StackElement {
+    final int a, b, c, e;
+    int d;
+
+    StackElement(int a, int b, int c, int d, int e) {
+      this.a = a;
+      this.b = b;
+      this.c = c;
+      this.d = d;
+      this.e = e;
+    }
+
+    StackElement(int a, int b, int c, int d) {
+      this(a, b, c, d, 0);
+    }
+  }
+
+  private static final class TRBudget {
+    int chance;
+    int remain;
+    int incval;
+    int count;
+
+    private TRBudget(int chance, int incval) {
+      this.chance = chance;
+      this.remain = incval;
+      this.incval = incval;
+    }
+
+    private int check(int size) {
+      if (size <= this.remain) {
+        this.remain -= size;
+        return 1;
+      }
+      if (this.chance == 0) {
+        this.count += size;
+        return 0;
+      }
+      this.remain += this.incval - size;
+      this.chance -= 1;
+      return 1;
+    }
+  }
+
+  private static final class TRPartitionResult {
+    final int a;
+    final int b;
+
+    public TRPartitionResult(int a, int b) {
+      this.a = a;
+      this.b = b;
+    }
+  }
+
+  private int readInput(long pos) throws IOException {
+    input.seek(pos);
+    return input.readUnsignedByte();
+  }
+
+  private int readSuffixArray(long pos) throws IOException {
+    /*
+     * This is an ugly hack because the imported code omits the first entry in the suffix array
+     * (which is always the length of the array) and shifts everything by one. So we do the
+     * correction here.
+     */
+    suffixArray.seekToIntAligned(pos + 1);
+    return suffixArray.readInt();
+  }
+
+  private int writeSuffixArray(long pos, int write) throws IOException {
+    /*
+     * This is an ugly hack because the imported code omits the first entry in the suffix array
+     * (which is always the length of the array) and shifts everything by one. So we do the
+     * correction here.
+     */
+    suffixArray.seekToIntAligned(pos + 1);
+    suffixArray.writeInt(write);
+    return write;
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/Matcher.java b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/Matcher.java
new file mode 100644
index 0000000..2e496df
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/Matcher.java
@@ -0,0 +1,49 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator.bsdiff;
+
+import java.io.IOException;
+
+/**
+ * Helper class which iterates through |newData| finding the longest valid exact matches between
+ * |newData| and |oldData|. The interface exists for the sake of testing.
+ */
+interface Matcher {
+  /**
+   * Determine the range for the next match, and store it in member state.
+   * @return a {@link NextMatch} describing the result
+   */
+  NextMatch next() throws IOException, InterruptedException;
+
+  /**
+   * Contains a boolean which indicates whether a match was found, the old position (if a match was
+   * found), and the new position (if a match was found).
+   */
+  static class NextMatch {
+    final boolean didFindMatch;
+    final int oldPosition;
+    final int newPosition;
+
+    static NextMatch of(boolean didFindMatch, int oldPosition, int newPosition) {
+      return new NextMatch(didFindMatch, oldPosition, newPosition);
+    }
+
+    private NextMatch(boolean didFindMatch, int oldPosition, int newPosition) {
+      this.didFindMatch = didFindMatch;
+      this.oldPosition = oldPosition;
+      this.newPosition = newPosition;
+    }
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/RandomAccessObject.java b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/RandomAccessObject.java
new file mode 100644
index 0000000..6a8789a
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/RandomAccessObject.java
@@ -0,0 +1,495 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator.bsdiff;
+
+import java.io.Closeable;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+
+// TODO(andrewhayden): clean up the implementations, we only really need two and they can be in
+// separate files.
+
+/**
+ * Interface which offers random access interface. This class exists to allow BsDiff to use either
+ * a RandomAccessFile for disk-based io, or a byte[] for fully in-memory operation.
+ */
+public interface RandomAccessObject extends DataInput, DataOutput, Closeable {
+
+  /**
+   * Returns the length of the file or byte array associated with the RandomAccessObject.
+   *
+   * @return the length of the file or byte array associated with the RandomAccessObject
+   * @throws IOException if unable to determine the length of the file, when backed by a file
+   */
+  public long length() throws IOException;
+
+  /**
+   * Seeks to a specified position, in bytes, into the file or byte array.
+   *
+   * @param pos the position to seek to
+   * @throws IOException if seeking fails
+   */
+  public void seek(long pos) throws IOException;
+
+  /**
+   * Seek to a specified integer-aligned position in the associated file or byte array. For example,
+   * seekToIntAligned(5) will seek to the beginning of the 5th integer, or in other words the 20th
+   * byte. In general, seekToIntAligned(n) will seek to byte 4n.
+   *
+   * @param pos the position to seek to
+   * @throws IOException if seeking fails
+   */
+  public void seekToIntAligned(long pos) throws IOException;
+
+  /**
+   * A {@link RandomAccessFile}-based implementation of {@link RandomAccessObject} which just
+   * delegates all operations to the equivalents in {@link RandomAccessFile}. Slower than the
+   * {@link RandomAccessMmapObject} implementation.
+   */
+  public static final class RandomAccessFileObject extends RandomAccessFile
+      implements RandomAccessObject {
+    private final boolean mShouldDeleteFileOnClose;
+    private final File mFile;
+
+    /**
+     * This constructor takes in a temporary file. This constructor does not take ownership of the
+     * file, and the file will not be deleted on {@link #close()}.
+     *
+     * @param tempFile the file backing this object
+     * @param mode the mode to use, e.g. "r" or "w" for read or write
+     * @throws IOException if unable to open the file for the specified mode
+     */
+    public RandomAccessFileObject(final File tempFile, final String mode) throws IOException {
+      this(tempFile, mode, false);
+    }
+
+    /**
+     * This constructor takes in a temporary file. If deleteFileOnClose is true, the constructor
+     * takes ownership of that file, and this file is deleted on close().
+     *
+     * @param tempFile the file backing this object
+     * @param mode the mode to use, e.g. "r" or "w" for read or write
+     * @param deleteFileOnClose if true the constructor takes ownership of that file, and this file
+     *     is deleted on close().
+     * @throws IOException if unable to open the file for the specified mode
+     * @throws IllegalArgumentException if the size of the file is too great
+     */
+    // TODO(hartmanng): rethink the handling of these temp files. It's confusing and shouldn't
+    // really be the responsibility of RandomAccessObject.
+    public RandomAccessFileObject(final File tempFile, final String mode, boolean deleteFileOnClose)
+        throws IOException {
+      super(tempFile, mode);
+      mShouldDeleteFileOnClose = deleteFileOnClose;
+      mFile = tempFile;
+      if (mShouldDeleteFileOnClose) {
+        mFile.deleteOnExit();
+      }
+    }
+
+    @Override
+    public void seekToIntAligned(long pos) throws IOException {
+      seek(pos * 4);
+    }
+
+    /**
+     * Close the associated file. Also delete the associated temp file if specified in the
+     * constructor. This should be called on every RandomAccessObject when it is no longer needed.
+     */
+    // TODO(hartmanng): rethink the handling of these temp files. It's confusing and shouldn't
+    // really be the responsibility of RandomAccessObject.
+    @Override
+    public void close() throws IOException {
+      super.close();
+      if (mShouldDeleteFileOnClose) {
+        mFile.delete();
+      }
+    }
+  }
+
+  /**
+   * An array-based implementation of {@link RandomAccessObject} for entirely in-memory operations.
+   */
+  public static class RandomAccessByteArrayObject implements RandomAccessObject {
+    protected ByteBuffer mByteBuffer;
+
+    /**
+     * The passed-in byte array will be treated as big-endian when dealing with ints.
+     *
+     * @param byteArray the byte array to wrap
+     */
+    public RandomAccessByteArrayObject(final byte[] byteArray) {
+      mByteBuffer = ByteBuffer.wrap(byteArray);
+    }
+
+    /**
+     * Allocate a new ByteBuffer of given length. This will be treated as big-endian.
+     *
+     * @param length the length of the buffer to allocate
+     */
+    public RandomAccessByteArrayObject(final int length) {
+      mByteBuffer = ByteBuffer.allocate(length);
+    }
+
+    protected RandomAccessByteArrayObject() {
+      // No-op, this is just used by the extending class RandomAccessMmapObject.
+    }
+
+    @Override
+    public long length() {
+      return mByteBuffer.capacity();
+    }
+
+    @Override
+    public byte readByte() {
+      return mByteBuffer.get();
+    }
+
+    /**
+     * Reads an integer from the underlying data array in big-endian order.
+     */
+    @Override
+    public int readInt() {
+      return mByteBuffer.getInt();
+    }
+
+    public void writeByte(byte b) {
+      mByteBuffer.put(b);
+    }
+
+    @Override
+    public void writeInt(int i) {
+      mByteBuffer.putInt(i);
+    }
+
+    /**
+     * RandomAccessByteArrayObject.seek() only supports addresses up to 2^31-1, due to the fact
+     * that it needs to support byte[] backend (and in Java, arrays are indexable only by int).
+     * This means that it can only seek up to a 2GiB byte[].
+     */
+    @Override
+    public void seek(long pos) {
+      if (pos > Integer.MAX_VALUE) {
+        throw new IllegalArgumentException(
+            "RandomAccessByteArrayObject can only handle seek() "
+                + "addresses up to Integer.MAX_VALUE.");
+      }
+
+      mByteBuffer.position((int) pos);
+    }
+
+    @Override
+    public void seekToIntAligned(long pos) {
+      seek(pos * 4);
+    }
+
+    @Override
+    public void close() throws IOException {
+      // Nothing necessary.
+    }
+
+    @Override
+    public boolean readBoolean() {
+      return readByte() != 0;
+    }
+
+    @Override
+    public char readChar() {
+      return mByteBuffer.getChar();
+    }
+
+    @Override
+    public double readDouble() {
+      return mByteBuffer.getDouble();
+    }
+
+    @Override
+    public float readFloat() {
+      return mByteBuffer.getFloat();
+    }
+
+    @Override
+    public void readFully(byte[] b) {
+      mByteBuffer.get(b);
+    }
+
+    @Override
+    public void readFully(byte[] b, int off, int len) {
+      mByteBuffer.get(b, off, len);
+    }
+
+    @Override
+    public String readLine() {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public long readLong() {
+      return mByteBuffer.getLong();
+    }
+
+    @Override
+    public short readShort() {
+      return mByteBuffer.getShort();
+    }
+
+    @Override
+    public int readUnsignedByte() {
+      return mByteBuffer.get() & 0xff;
+    }
+
+    @Override
+    public int readUnsignedShort() {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public String readUTF() {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int skipBytes(int n) {
+      mByteBuffer.position(mByteBuffer.position() + n);
+      return n;
+    }
+
+    @Override
+    public void write(byte[] b) {
+      mByteBuffer.put(b);
+    }
+
+    @Override
+    public void write(byte[] b, int off, int len) {
+      mByteBuffer.put(b, off, len);
+    }
+
+    @Override
+    public void write(int b) {
+      writeByte((byte) b);
+    }
+
+    @Override
+    public void writeBoolean(boolean v) {
+      writeByte(v ? (byte) 1 : (byte) 0);
+    }
+
+    @Override
+    public void writeByte(int v) {
+      writeByte((byte) v);
+    }
+
+    @Override
+    public void writeBytes(String s) {
+      for (int x = 0; x < s.length(); x++) {
+        writeByte((byte) s.charAt(x));
+      }
+    }
+
+    @Override
+    public void writeChar(int v) {
+      mByteBuffer.putChar((char) v);
+    }
+
+    @Override
+    public void writeChars(String s) {
+      for (int x = 0; x < s.length(); x++) {
+        writeChar(s.charAt(x));
+      }
+    }
+
+    @Override
+    public void writeDouble(double v) {
+      mByteBuffer.putDouble(v);
+    }
+
+    @Override
+    public void writeFloat(float v) {
+      mByteBuffer.putFloat(v);
+    }
+
+    @Override
+    public void writeLong(long v) {
+      mByteBuffer.putLong(v);
+    }
+
+    @Override
+    public void writeShort(int v) {
+      mByteBuffer.putShort((short) v);
+    }
+
+    @Override
+    public void writeUTF(String s) {
+      throw new UnsupportedOperationException();
+    }
+  }
+
+  /**
+   * A {@link ByteBuffer}-based implementation of {@link RandomAccessObject} that uses files on
+   * disk, but is significantly faster than the RandomAccessFile implementation.
+   */
+  public static final class RandomAccessMmapObject extends RandomAccessByteArrayObject {
+    private final boolean mShouldDeleteFileOnRelease;
+    private final File mFile;
+    private final FileChannel mFileChannel;
+
+    public RandomAccessMmapObject(final RandomAccessFile randomAccessFile, String mode)
+        throws IOException, IllegalArgumentException {
+      if (randomAccessFile.length() > Integer.MAX_VALUE) {
+        throw new IllegalArgumentException("Only files up to 2GiB in size are supported.");
+      }
+
+      FileChannel.MapMode mapMode;
+      if (mode.equals("r")) {
+        mapMode = FileChannel.MapMode.READ_ONLY;
+      } else {
+        mapMode = FileChannel.MapMode.READ_WRITE;
+      }
+
+      mFileChannel = randomAccessFile.getChannel();
+      mByteBuffer = mFileChannel.map(mapMode, 0, randomAccessFile.length());
+      mByteBuffer.position(0);
+      mShouldDeleteFileOnRelease = false;
+      mFile = null;
+    }
+
+    /**
+     * This constructor creates a temporary file. This file is deleted on close(), so be sure to
+     * call it when you're done, otherwise it'll leave stray files.
+     *
+     * @param tempFileName the path to the file backing this object
+     * @param mode the mode to use, e.g. "r" or "w" for read or write
+     * @param length the size of the file to be read or written
+     * @throws IOException if unable to open the file for the specified mode
+     * @throws IllegalArgumentException if the size of the file is too great
+     */
+    // TODO(hartmanng): rethink the handling of these temp files. It's confusing and shouldn't
+    // really be the responsibility of RandomAccessObject.
+    @SuppressWarnings("resource") // RandomAccessFile deliberately left open
+    public RandomAccessMmapObject(final String tempFileName, final String mode, long length)
+        throws IOException, IllegalArgumentException {
+      if (length > Integer.MAX_VALUE) {
+        throw new IllegalArgumentException(
+            "RandomAccessMmapObject only supports file sizes up to " + "Integer.MAX_VALUE.");
+      }
+
+      mFile = File.createTempFile(tempFileName, "temp");
+      mFile.deleteOnExit();
+      mShouldDeleteFileOnRelease = true;
+
+      FileChannel.MapMode mapMode;
+      if (mode.equals("r")) {
+        mapMode = FileChannel.MapMode.READ_ONLY;
+      } else {
+        mapMode = FileChannel.MapMode.READ_WRITE;
+      }
+
+      RandomAccessFile file = null;
+      try {
+        file = new RandomAccessFile(mFile, mode);
+        mFileChannel = file.getChannel();
+        mByteBuffer = mFileChannel.map(mapMode, 0, (int) length);
+        mByteBuffer.position(0);
+      } catch (IOException e) {
+        if (file != null) {
+          try {
+            file.close();
+          } catch (Exception ignored) {
+            // Nothing more can be done
+          }
+        }
+        close();
+        throw new IOException("Unable to open file", e);
+      }
+    }
+
+    /**
+     * This constructor takes in a temporary file, and takes ownership of that file. This file is
+     * deleted on close() OR IF THE CONSTRUCTOR FAILS. The main purpose of this constructor is to
+     * test close() on the passed-in file.
+     *
+     * @param tempFile the the file backing this object
+     * @param mode the mode to use, e.g. "r" or "w" for read or write
+     * @throws IOException if unable to open the file for the specified mode
+     * @throws IllegalArgumentException if the size of the file is too great
+     */
+    // TODO(hartmanng): rethink the handling of these temp files. It's confusing and shouldn't
+    // really be the responsibility of RandomAccessObject.
+    @SuppressWarnings("resource") // RandomAccessFile deliberately left open
+    public RandomAccessMmapObject(final File tempFile, final String mode)
+        throws IOException, IllegalArgumentException {
+      if (tempFile.length() > Integer.MAX_VALUE) {
+        throw new IllegalArgumentException("Only files up to 2GiB in size are supported.");
+      }
+
+      mFile = tempFile;
+      mFile.deleteOnExit();
+      mShouldDeleteFileOnRelease = true;
+
+      FileChannel.MapMode mapMode;
+      if (mode.equals("r")) {
+        mapMode = FileChannel.MapMode.READ_ONLY;
+      } else {
+        mapMode = FileChannel.MapMode.READ_WRITE;
+      }
+
+      RandomAccessFile file = null;
+      try {
+        file = new RandomAccessFile(mFile, mode);
+        mFileChannel = file.getChannel();
+        mByteBuffer = mFileChannel.map(mapMode, 0, tempFile.length());
+        mByteBuffer.position(0);
+      } catch (IOException e) {
+        if (file != null) {
+          try {
+            file.close();
+          } catch (Exception ignored) {
+            // Nothing more can be done
+          }
+        }
+        close();
+        throw new IOException("Unable to open file", e);
+      }
+    }
+
+    @Override
+    public void close() throws IOException {
+      if (mFileChannel != null) {
+        mFileChannel.close();
+      }
+
+      // There is a long-standing bug with memory mapped objects in Java that requires the JVM to
+      // finalize the MappedByteBuffer reference before the unmap operation is performed. This leaks
+      // file handles and fills the virtual address space. Worse, on some systems (Windows for one)
+      // the active mmap prevents the temp file from being deleted - even if File.deleteOnExit() is
+      // used. The only safe way to ensure that file handles and actual files are not leaked by this
+      // class is to force an explicit full gc after explicitly nulling the MappedByteBuffer
+      // reference. This has to be done before attempting file deletion.
+      //
+      // See https://github.com/andrewhayden/archive-patcher/issues/5 for more information.
+      mByteBuffer = null;
+      System.gc();
+
+      if (mShouldDeleteFileOnRelease && mFile != null) {
+        mFile.delete();
+      }
+
+    }
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/RandomAccessObjectFactory.java b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/RandomAccessObjectFactory.java
new file mode 100644
index 0000000..24369fa
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/RandomAccessObjectFactory.java
@@ -0,0 +1,109 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator.bsdiff;
+
+import com.google.archivepatcher.generator.bsdiff.RandomAccessObject.RandomAccessByteArrayObject;
+import com.google.archivepatcher.generator.bsdiff.RandomAccessObject.RandomAccessFileObject;
+import com.google.archivepatcher.generator.bsdiff.RandomAccessObject.RandomAccessMmapObject;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+
+/**
+ * A factory for creating instances of {@link RandomAccessObject}. BsDiff needs to store some
+ * ancillary data of size proportional to the binary to be patched. This allows abstraction of the
+ * allocation so that BsDiff can run either entirely in-memory (faster) or with file-backed swap
+ * (handles bigger inputs without consuming inordinate amounts of memory).
+ */
+public interface RandomAccessObjectFactory {
+  public RandomAccessObject create(int size) throws IOException;
+
+  /**
+   * A factory that produces {@link RandomAccessFileObject} instances backed by temp files.
+   */
+  // TODO(hartmanng): rethink the handling of these temp files. It's confusing and shouldn't
+  // really be the responsibility of RandomAccessObject.
+  public static final class RandomAccessFileObjectFactory implements RandomAccessObjectFactory {
+    private static final String FILE_NAME_PREFIX = "wavsprafof";
+    private final String mMode;
+
+    /**
+     * Factory for a RandomAccessFileObject.
+     * @param mode the file mode string ("r", "w", "rw", etc - see documentation for
+     * {@link RandomAccessFile})
+     */
+    public RandomAccessFileObjectFactory(String mode) {
+      mMode = mode;
+    }
+
+    /**
+     * Creates a temp file, and returns a {@link RandomAccessFile} wrapped in a
+     * {@link RandomAccessFileObject} representing the new temp file. The temp file does not need to
+     * explicitly be managed (deleted) by the caller, as long as the caller ensures
+     * {@link RandomAccessObject#close()} is called when the object is no longer needed.
+     */
+    // TODO(hartmanng): rethink the handling of these temp files. It's confusing and shouldn't
+    // really be the responsibility of RandomAccessObject.
+    @Override
+    public RandomAccessObject create(int size) throws IOException {
+      return new RandomAccessObject.RandomAccessFileObject(
+          File.createTempFile(FILE_NAME_PREFIX, "temp"), mMode, true);
+    }
+  }
+
+  /**
+   * A factory that produces {@link RandomAccessByteArrayObject} instances backed by memory.
+   */
+  public static final class RandomAccessByteArrayObjectFactory
+      implements RandomAccessObjectFactory {
+    @Override
+    public RandomAccessObject create(int size) {
+      return new RandomAccessObject.RandomAccessByteArrayObject(size);
+    }
+  }
+
+  /**
+   * A factory that produces {@link RandomAccessMmapObject} instances backed by temp files..
+   */
+  // TODO(hartmanng): rethink the handling of these temp files. It's confusing and shouldn't
+  // really be the responsibility of RandomAccessObject.
+  public static final class RandomAccessMmapObjectFactory implements RandomAccessObjectFactory {
+    private static final String FILE_NAME_PREFIX = "wavsprafof";
+    private String mMode;
+
+    /**
+     * Factory for a RandomAccessMmapObject.
+     * @param mode the file mode string ("r", "w", "rw", etc - see documentation for
+     * {@link RandomAccessFile})
+     */
+    public RandomAccessMmapObjectFactory(String mode) {
+      mMode = mode;
+    }
+
+    /**
+     * Creates a temp file, and returns a {@link RandomAccessFile} wrapped in a
+     * {@link RandomAccessMmapObject} representing the new temp file. The temp file does not need to
+     * explicitly be managed (deleted) by the caller, as long as the caller ensures
+     * {@link RandomAccessObject#close()} is called when the object is no longer needed.
+     */
+    // TODO(hartmanng): rethink the handling of these temp files. It's confusing and shouldn't
+    // really be the responsibility of RandomAccessObject.
+    @Override
+    public RandomAccessObject create(int size) throws IOException {
+      return new RandomAccessObject.RandomAccessMmapObject(FILE_NAME_PREFIX, mMode, size);
+    }
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/SuffixSorter.java b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/SuffixSorter.java
new file mode 100644
index 0000000..a6231ce
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/bsdiff/SuffixSorter.java
@@ -0,0 +1,36 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator.bsdiff;
+
+import java.io.IOException;
+
+/**
+ * An algorithm that performs a suffix sort on a given input and returns a suffix array.
+ * See https://en.wikipedia.org/wiki/Suffix_array
+ */
+public interface SuffixSorter {
+
+  /**
+   * Perform a "suffix sort". Note: the returned {@link RandomAccessObject} should be closed by the
+   * caller.
+   *
+   * @param data the data to sort
+   * @return the suffix array, as a {@link RandomAccessObject}
+   * @throws IOException if unable to read data
+   * @throws InterruptedException if any thread interrupts this thread
+   */
+  RandomAccessObject suffixSort(RandomAccessObject data) throws IOException, InterruptedException;
+}
+
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/similarity/Crc32SimilarityFinder.java b/generator/src/main/java/com/google/archivepatcher/generator/similarity/Crc32SimilarityFinder.java
new file mode 100644
index 0000000..0dc4185
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/similarity/Crc32SimilarityFinder.java
@@ -0,0 +1,66 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator.similarity;
+
+import com.google.archivepatcher.generator.MinimalZipEntry;
+
+import java.io.File;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Detects identical files on the basis of the CRC32 of uncompressed content. All entries that have
+ * the same CRC32 will be identified as similar (and presumably are identical, in the absence of
+ * hash collisions).
+ */
+public class Crc32SimilarityFinder extends SimilarityFinder {
+
+  /**
+   * All entries in the base archive, organized by CRC32.
+   */
+  private final Map<Long, List<MinimalZipEntry>> baseEntriesByCrc32 = new HashMap<>();
+
+  /**
+   * Constructs a new similarity finder with the specified parameters.
+   * @param baseArchive the base archive that contains the entries to be searched
+   * @param baseEntries the entries in the base archive that are eligible to be searched
+   */
+  public Crc32SimilarityFinder(File baseArchive, Collection<MinimalZipEntry> baseEntries) {
+    super(baseArchive, baseEntries);
+    for (MinimalZipEntry oldEntry : baseEntries) {
+      long crc32 = oldEntry.getCrc32OfUncompressedData();
+      List<MinimalZipEntry> entriesForCrc32 = baseEntriesByCrc32.get(crc32);
+      if (entriesForCrc32 == null) {
+        entriesForCrc32 = new LinkedList<>();
+        baseEntriesByCrc32.put(crc32, entriesForCrc32);
+      }
+      entriesForCrc32.add(oldEntry);
+    }
+  }
+
+  @Override
+  public List<MinimalZipEntry> findSimilarFiles(File newArchive, MinimalZipEntry newEntry) {
+    List<MinimalZipEntry> matchedEntries =
+        baseEntriesByCrc32.get(newEntry.getCrc32OfUncompressedData());
+    if (matchedEntries == null) {
+      return Collections.emptyList();
+    }
+    return Collections.unmodifiableList(matchedEntries);
+  }
+}
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/similarity/SimilarityFinder.java b/generator/src/main/java/com/google/archivepatcher/generator/similarity/SimilarityFinder.java
new file mode 100644
index 0000000..37a1257
--- /dev/null
+++ b/generator/src/main/java/com/google/archivepatcher/generator/similarity/SimilarityFinder.java
@@ -0,0 +1,59 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator.similarity;
+
+import java.io.File;
+import java.util.Collection;
+import java.util.List;
+
+import com.google.archivepatcher.generator.MinimalZipEntry;
+
+/**
+ * A class that analyzes an archive to find files similar to a specified file.
+ */
+public abstract class SimilarityFinder {
+
+  /**
+   * The base archive that contains the entries to be searched.
+   */
+  protected final File baseArchive;
+
+  /**
+   * The entries in the base archive that are eligible to be searched.
+   */
+  protected final Collection<MinimalZipEntry> baseEntries;
+
+  /**
+   * Create a new instance to check for similarity of arbitrary files against the specified entries
+   * in the specified archive.
+   * @param baseArchive the base archive that contains the entries to be scored against
+   * @param baseEntries the entries in the base archive that are eligible to be scored against.
+   */
+  public SimilarityFinder(File baseArchive, Collection<MinimalZipEntry> baseEntries) {
+    this.baseArchive = baseArchive;
+    this.baseEntries = baseEntries;
+  }
+
+  /**
+   * Searches for files similar to the specified entry in the specified new archive against all of
+   * the available entries in the base archive.
+   * @param newArchive the new archive that contains the new entry
+   * @param newEntry the new entry to compare against the entries in the base archive
+   * @return a {@link List} of {@link MinimalZipEntry} entries (possibly empty but never null) from
+   * the base archive that are similar to the new archive; if the list has more than one entry, the
+   * entries should be in order from most similar to least similar.
+   */
+  public abstract List<MinimalZipEntry> findSimilarFiles(File newArchive, MinimalZipEntry newEntry);
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/ByteArrayHolderTest.java b/generator/src/test/java/com/google/archivepatcher/generator/ByteArrayHolderTest.java
new file mode 100644
index 0000000..9ec338f
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/ByteArrayHolderTest.java
@@ -0,0 +1,68 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/**
+ * Tests for {@link ByteArrayHolder}.
+ */
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public class ByteArrayHolderTest {
+
+  @Test
+  public void testGetters() {
+    byte[] data = "hello world".getBytes();
+    ByteArrayHolder byteArrayHolder = new ByteArrayHolder(data);
+    Assert.assertSame(data, byteArrayHolder.getData());
+  }
+
+  @Test
+  public void testHashCode() {
+    byte[] data1a = "hello world".getBytes();
+    byte[] data1b = new String("hello world").getBytes();
+    byte[] data2 = "hello another world".getBytes();
+    ByteArrayHolder rawText1a = new ByteArrayHolder(data1a);
+    ByteArrayHolder rawText1b = new ByteArrayHolder(data1b);
+    Assert.assertEquals(rawText1a.hashCode(), rawText1b.hashCode());
+    ByteArrayHolder rawText2 = new ByteArrayHolder(data2);
+    Assert.assertNotEquals(rawText1a.hashCode(), rawText2.hashCode());
+    ByteArrayHolder rawText3 = new ByteArrayHolder(null);
+    Assert.assertNotEquals(rawText1a.hashCode(), rawText3.hashCode());
+    Assert.assertNotEquals(rawText2.hashCode(), rawText3.hashCode());
+  }
+
+  @Test
+  public void testEquals() {
+    byte[] data1a = "hello world".getBytes();
+    byte[] data1b = new String("hello world").getBytes();
+    byte[] data2 = "hello another world".getBytes();
+    ByteArrayHolder rawText1a = new ByteArrayHolder(data1a);
+    Assert.assertEquals(rawText1a, rawText1a);
+    ByteArrayHolder rawText1b = new ByteArrayHolder(data1b);
+    Assert.assertEquals(rawText1a, rawText1b);
+    ByteArrayHolder rawText2 = new ByteArrayHolder(data2);
+    Assert.assertNotEquals(rawText1a, rawText2);
+    ByteArrayHolder rawText3 = new ByteArrayHolder(null);
+    Assert.assertNotEquals(rawText1a, rawText3);
+    Assert.assertNotEquals(rawText3, rawText1a);
+    Assert.assertNotEquals(rawText1a, 42);
+    Assert.assertNotEquals(rawText1a, null);
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/DefaultDeflateCompressionDivinerTest.java b/generator/src/test/java/com/google/archivepatcher/generator/DefaultDeflateCompressionDivinerTest.java
new file mode 100644
index 0000000..057c790
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/DefaultDeflateCompressionDivinerTest.java
@@ -0,0 +1,208 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import com.google.archivepatcher.generator.DefaultDeflateCompressionDiviner.DivinationResult;
+import com.google.archivepatcher.shared.DefaultDeflateCompatibilityWindow;
+import com.google.archivepatcher.shared.DeflateCompressor;
+import com.google.archivepatcher.shared.JreDeflateParameters;
+import com.google.archivepatcher.shared.MultiViewInputStreamFactory;
+import com.google.archivepatcher.shared.UnitTestZipArchive;
+import com.google.archivepatcher.shared.UnitTestZipEntry;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.List;
+
+/**
+ * Tests for {@link DefaultDeflateCompressionDiviner}.
+ */
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public class DefaultDeflateCompressionDivinerTest {
+  /**
+   * The object under test.
+   */
+  private DefaultDeflateCompressionDiviner diviner = null;
+
+  /**
+   * Test data written to the file.
+   */
+  private byte[] testData = null;
+
+  @Before
+  public void setup() {
+    testData = new DefaultDeflateCompatibilityWindow().getCorpus();
+    diviner = new DefaultDeflateCompressionDiviner();
+  }
+
+  /**
+   * Deflates the test data using the specified parameters, storing them in a temp file and
+   * returns the temp file created.
+   * @param parameters the parameters to use for deflating
+   * @return the temp file with the data
+   */
+  private byte[] deflate(JreDeflateParameters parameters) throws IOException {
+    DeflateCompressor compressor = new DeflateCompressor();
+    compressor.setNowrap(parameters.nowrap);
+    compressor.setStrategy(parameters.strategy);
+    compressor.setCompressionLevel(parameters.level);
+    ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+    compressor.compress(new ByteArrayInputStream(testData), buffer);
+    return buffer.toByteArray();
+  }
+
+  private static class ByteArrayInputStreamFactory
+      implements MultiViewInputStreamFactory<ByteArrayInputStream> {
+    private final byte[] data;
+    private final boolean supportMark;
+    private final boolean dieOnClose;
+
+    /**
+     * Create a factory the returns streams on the specified data buffer, optionally supporting
+     * {@link InputStream#mark(int)}.
+     * @param data the data buffer to return streams for
+     * @param supportMark whether or not to support marking
+     * @param dieOnClose whether or not to throw nasty exceptions on close()
+     */
+    public ByteArrayInputStreamFactory(byte[] data, boolean supportMark, boolean dieOnClose) {
+      this.data = data;
+      this.supportMark = supportMark;
+      this.dieOnClose = dieOnClose;
+    }
+
+    @Override
+    public ByteArrayInputStream newStream() throws IOException {
+      return new ByteArrayInputStream(data) {
+        @Override
+        public boolean markSupported() {
+          return supportMark;
+        }
+
+        @Override
+        public void close() throws IOException {
+          if (dieOnClose) {
+            throw new IOException("brainnnnnnnnnnssssss!");
+          }
+          super.close();
+        }
+      };
+    }
+  }
+
+  @Test
+  public void testDivineDeflateParameters_NoMarkInputStreamFactory() throws IOException {
+    final JreDeflateParameters parameters = JreDeflateParameters.of(1, 0, true);
+    final byte[] buffer = deflate(parameters);
+    try {
+      // The factory here will NOT support mark(int), which should cause failure. Also, throw
+      // exceptions on close() to be extra rude.
+      diviner.divineDeflateParameters(new ByteArrayInputStreamFactory(buffer, false, true));
+      Assert.fail("operating without a markable stream");
+    } catch (IllegalArgumentException expected) {
+      // Correct!
+    }
+  }
+
+  @Test
+  public void testDivineDeflateParameters_BadCloseInputStreamFactory() throws IOException {
+    final JreDeflateParameters parameters = JreDeflateParameters.of(1, 0, true);
+    final byte[] buffer = deflate(parameters);
+    // The factory here will produce streams that throw exceptions when close() is called.
+    // These exceptions should be ignored.
+    JreDeflateParameters result =
+        diviner.divineDeflateParameters(new ByteArrayInputStreamFactory(buffer, true, true));
+    Assert.assertEquals(result, parameters);
+  }
+
+  @Test
+  public void testDivineDeflateParameters_JunkData() throws IOException {
+    final byte[] junk = new byte[] {0, 1, 2, 3, 4};
+    Assert.assertNull(
+        diviner.divineDeflateParameters(new ByteArrayInputStreamFactory(junk, true, false)));
+  }
+
+  @Test
+  public void testDivineDeflateParameters_AllValidSettings() throws IOException {
+    for (boolean nowrap : new boolean[] {true, false}) {
+      for (int strategy : new int[] {0, 1, 2}) {
+        for (int level : new int[] {1, 2, 3, 4, 5, 6, 7, 8, 9}) {
+          JreDeflateParameters trueParameters = JreDeflateParameters.of(level, strategy, nowrap);
+          final byte[] buffer = deflate(trueParameters);
+          ByteArrayInputStreamFactory factory =
+              new ByteArrayInputStreamFactory(buffer, true, false);
+          JreDeflateParameters divinedParameters = diviner.divineDeflateParameters(factory);
+          Assert.assertNotNull(divinedParameters);
+          // TODO(andrewhayden) make *CERTAIN 100%( that strategy doesn't matter for level < 4.
+          if (strategy == 1 && level <= 3) {
+            // Strategy 1 produces identical output at levels 1, 2 and 3.
+            Assert.assertEquals(JreDeflateParameters.of(level, 0, nowrap), divinedParameters);
+          } else if (strategy == 2) {
+            // All levels are the same with strategy 2.
+            // TODO: Assert only one test gets done for this, should be the first level always.
+            Assert.assertEquals(nowrap, divinedParameters.nowrap);
+            Assert.assertEquals(strategy, divinedParameters.strategy);
+          } else {
+            Assert.assertEquals(trueParameters, divinedParameters);
+          }
+        } // End of iteration on level
+      } // End of iteration on strategy
+    } // End of iteration on nowrap
+  }
+
+  @Test
+  public void testDivineDeflateParameters_File() throws IOException {
+    File tempFile = File.createTempFile("ddcdt", "tmp");
+    tempFile.deleteOnExit();
+    try {
+      UnitTestZipArchive.saveTestZip(tempFile);
+      List<DivinationResult> results = diviner.divineDeflateParameters(tempFile);
+      Assert.assertEquals(UnitTestZipArchive.allEntriesInFileOrder.size(), results.size());
+      for (int x = 0; x < results.size(); x++) {
+        UnitTestZipEntry expected = UnitTestZipArchive.allEntriesInFileOrder.get(x);
+        DivinationResult actual = results.get(x);
+        Assert.assertEquals(expected.path, actual.minimalZipEntry.getFileName());
+        int expectedLevel = expected.level;
+        if (expectedLevel > 0) {
+          // Compressed entry
+          Assert.assertTrue(actual.minimalZipEntry.isDeflateCompressed());
+          Assert.assertNotNull(actual.divinedParameters);
+          Assert.assertEquals(expectedLevel, actual.divinedParameters.level);
+          Assert.assertEquals(0, actual.divinedParameters.strategy);
+          Assert.assertTrue(actual.divinedParameters.nowrap);
+        } else {
+          // Uncompressed entry
+          Assert.assertFalse(actual.minimalZipEntry.isDeflateCompressed());
+          Assert.assertNull(actual.divinedParameters);
+        }
+      }
+    } finally {
+      try {
+        tempFile.delete();
+      } catch (Exception ignoreD) {
+        // Nothing
+      }
+    }
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/DeltaFriendlyOldBlobSizeLimiterTest.java b/generator/src/test/java/com/google/archivepatcher/generator/DeltaFriendlyOldBlobSizeLimiterTest.java
new file mode 100644
index 0000000..0bbacaf
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/DeltaFriendlyOldBlobSizeLimiterTest.java
@@ -0,0 +1,316 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Tests for {@link DeltaFriendlyOldBlobSizeLimiter}. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public class DeltaFriendlyOldBlobSizeLimiterTest {
+  private static final int DEFLATE_COMPRESSION_METHOD = 8;
+
+  private static final MinimalZipEntry UNIMPORTANT = makeFakeEntry("/unimportant", 1337, 1337);
+  private static final MinimalZipEntry ENTRY_A_100K =
+      makeFakeEntry("/a/100k", 100 * 1024, 200 * 1024);
+  private static final MinimalZipEntry ENTRY_B_200K =
+      makeFakeEntry("/b/200k", 100 * 1024, 300 * 1024);
+  private static final MinimalZipEntry ENTRY_C_300K =
+      makeFakeEntry("/c/300k", 100 * 1024, 400 * 1024);
+  private static final MinimalZipEntry ENTRY_D_400K =
+      makeFakeEntry("/d/400k", 100 * 1024, 500 * 1024);
+  private static final MinimalZipEntry IGNORED_A = makeFakeEntry("/ignored/a", 1234, 5678);
+  private static final MinimalZipEntry IGNORED_B = makeFakeEntry("/ignored/b", 5678, 9101112);
+  private static final MinimalZipEntry IGNORED_C = makeFakeEntry("/ignored/c", 9101112, 13141516);
+
+  // First four recommendations are all ones where uncompression of the old resource is required.
+  // Note that there is a mix of UNCOMPRESS_OLD and UNCOMPRESS_BOTH, both of which will have the
+  // "old" entry flagged for uncompression (i.e., should be relevant to the filtering logic).
+  private static final QualifiedRecommendation REC_A_100K =
+      new QualifiedRecommendation(
+          ENTRY_A_100K,
+          UNIMPORTANT,
+          Recommendation.UNCOMPRESS_BOTH,
+          RecommendationReason.COMPRESSED_BYTES_CHANGED);
+  private static final QualifiedRecommendation REC_B_200K =
+      new QualifiedRecommendation(
+          ENTRY_B_200K,
+          UNIMPORTANT,
+          Recommendation.UNCOMPRESS_OLD,
+          RecommendationReason.UNCOMPRESSED_CHANGED_TO_COMPRESSED);
+  private static final QualifiedRecommendation REC_C_300K =
+      new QualifiedRecommendation(
+          ENTRY_C_300K,
+          UNIMPORTANT,
+          Recommendation.UNCOMPRESS_BOTH,
+          RecommendationReason.COMPRESSED_BYTES_CHANGED);
+  private static final QualifiedRecommendation REC_D_400K =
+      new QualifiedRecommendation(
+          ENTRY_D_400K,
+          UNIMPORTANT,
+          Recommendation.UNCOMPRESS_BOTH,
+          RecommendationReason.COMPRESSED_CHANGED_TO_UNCOMPRESSED);
+
+  // Remaining recommendations are all ones where recompression is NOT required. Note the mixture of
+  // UNCOMPRESS_NEITHER and UNCOMPRESS_OLD, neither of which will have the "new" entry flagged for
+  // recompression (ie., must be ignored by the filtering logic).
+  private static final QualifiedRecommendation REC_IGNORED_A_UNCHANGED =
+      new QualifiedRecommendation(
+          IGNORED_A,
+          UNIMPORTANT,
+          Recommendation.UNCOMPRESS_NEITHER,
+          RecommendationReason.COMPRESSED_BYTES_IDENTICAL);
+  private static final QualifiedRecommendation REC_IGNORED_B_BOTH_UNCOMPRESSED =
+      new QualifiedRecommendation(
+          IGNORED_B,
+          UNIMPORTANT,
+          Recommendation.UNCOMPRESS_NEITHER,
+          RecommendationReason.BOTH_ENTRIES_UNCOMPRESSED);
+  private static final QualifiedRecommendation REC_IGNORED_C_UNSUITABLE =
+      new QualifiedRecommendation(
+          IGNORED_C,
+          UNIMPORTANT,
+          Recommendation.UNCOMPRESS_NEITHER,
+          RecommendationReason.UNSUITABLE);
+
+  /** Convenience reference to all the recommendations that should be ignored by filtering. */
+  private static final List<QualifiedRecommendation> ALL_IGNORED_RECS =
+      Collections.unmodifiableList(
+          Arrays.asList(
+              REC_IGNORED_A_UNCHANGED, REC_IGNORED_B_BOTH_UNCOMPRESSED, REC_IGNORED_C_UNSUITABLE));
+
+  /** Convenience reference to all the recommendations that are subject to filtering. */
+  private static final List<QualifiedRecommendation> ALL_RECS =
+      Collections.unmodifiableList(
+          Arrays.asList(
+              REC_IGNORED_A_UNCHANGED,
+              REC_A_100K,
+              REC_IGNORED_B_BOTH_UNCOMPRESSED,
+              REC_D_400K,
+              REC_IGNORED_C_UNSUITABLE,
+              REC_B_200K,
+              REC_C_300K));
+
+  /**
+   * Make a structurally valid but totally bogus {@link MinimalZipEntry} for the purpose of testing
+   * the {@link RecommendationModifier}.
+   *
+   * @param path the path to set on the entry, to help with debugging
+   * @param compressedSize the compressed size of the entry, in bytes
+   * @param uncompressedSize the uncompressed size of the entry, in bytes
+   */
+  private static MinimalZipEntry makeFakeEntry(
+      String path, long compressedSize, long uncompressedSize) {
+    try {
+      return new MinimalZipEntry(
+          DEFLATE_COMPRESSION_METHOD, // == deflate
+          0, // crc32OfUncompressedData (ignored for this test)
+          compressedSize,
+          uncompressedSize,
+          path.getBytes("UTF8"),
+          true, // generalPurposeFlagBit11 (true=UTF8)
+          0 // fileOffsetOfLocalEntry (ignored for this test)
+          );
+    } catch (UnsupportedEncodingException e) {
+      throw new RuntimeException(e); // Impossible on any modern system
+    }
+  }
+
+  @Test
+  public void testNegativeLimit() {
+    try {
+      new DeltaFriendlyOldBlobSizeLimiter(-1);
+      Assert.fail("Set a negative limit");
+    } catch (IllegalArgumentException expected) {
+      // Pass
+    }
+  }
+
+  /**
+   * Asserts that the two collections contain exactly the same elements. This isn't as rigorous as
+   * it should be, but is ok for this test scenario. Checks the contents but not the iteration order
+   * of the collections handed in.
+   */
+  private static <T> void assertEquivalence(Collection<T> c1, Collection<T> c2) {
+    String errorMessage = "Expected " + c1 + " but was " + c2;
+    Assert.assertEquals(errorMessage, c1.size(), c2.size());
+    Assert.assertTrue(errorMessage, c1.containsAll(c2));
+    Assert.assertTrue(errorMessage, c2.containsAll(c1));
+  }
+
+  /**
+   * Given {@link QualifiedRecommendation}s, manufacture equivalents altered in the way that the
+   * {@link DeltaFriendlyOldBlobSizeLimiter} would.
+   *
+   * @param originals the original recommendations
+   * @return the altered recommendations
+   */
+  private static final List<QualifiedRecommendation> suppressed(
+      QualifiedRecommendation... originals) {
+    List<QualifiedRecommendation> result = new ArrayList<>(originals.length);
+    for (QualifiedRecommendation original : originals) {
+      result.add(
+          new QualifiedRecommendation(
+              original.getOldEntry(),
+              original.getNewEntry(),
+              Recommendation.UNCOMPRESS_NEITHER,
+              RecommendationReason.RESOURCE_CONSTRAINED));
+    }
+    return result;
+  }
+
+  private File tempFile = null;
+
+  @Before
+  public void setup() throws IOException {
+    // Make an empty file to test the recommender's limitation logic
+    tempFile = File.createTempFile("DeltaFriendlyOldBlobSizeLimiterTest", "test");
+    tempFile.deleteOnExit();
+  }
+
+  @After
+  public void tearDown() {
+    tempFile.delete();
+  }
+
+  @Test
+  public void testZeroLimit() {
+    DeltaFriendlyOldBlobSizeLimiter limiter = new DeltaFriendlyOldBlobSizeLimiter(0);
+    List<QualifiedRecommendation> expected = new ArrayList<QualifiedRecommendation>();
+    expected.addAll(suppressed(REC_A_100K, REC_B_200K, REC_C_300K, REC_D_400K));
+    expected.addAll(ALL_IGNORED_RECS);
+    assertEquivalence(expected, limiter.getModifiedRecommendations(tempFile, tempFile, ALL_RECS));
+  }
+
+  @Test
+  public void testMaxLimit() {
+    DeltaFriendlyOldBlobSizeLimiter limiter = new DeltaFriendlyOldBlobSizeLimiter(Long.MAX_VALUE);
+    assertEquivalence(ALL_RECS, limiter.getModifiedRecommendations(tempFile, tempFile, ALL_RECS));
+  }
+
+  @Test
+  public void testLimit_ExactlySmallest() {
+    long limit =
+        REC_A_100K.getOldEntry().getUncompressedSize()
+            - REC_A_100K.getOldEntry().getCompressedSize(); // Exactly large enough
+    DeltaFriendlyOldBlobSizeLimiter limiter = new DeltaFriendlyOldBlobSizeLimiter(limit);
+    List<QualifiedRecommendation> expected = new ArrayList<QualifiedRecommendation>();
+    expected.add(REC_A_100K);
+    expected.addAll(suppressed(REC_B_200K, REC_C_300K, REC_D_400K));
+    expected.addAll(ALL_IGNORED_RECS);
+    assertEquivalence(expected, limiter.getModifiedRecommendations(tempFile, tempFile, ALL_RECS));
+  }
+
+  @Test
+  public void testLimit_EdgeUnderSmallest() {
+    long limit =
+        REC_A_100K.getOldEntry().getUncompressedSize()
+            - REC_A_100K.getOldEntry().getCompressedSize()
+            - 1; // 1 byte too small
+    DeltaFriendlyOldBlobSizeLimiter limiter = new DeltaFriendlyOldBlobSizeLimiter(limit);
+    List<QualifiedRecommendation> expected = new ArrayList<QualifiedRecommendation>();
+    expected.addAll(suppressed(REC_A_100K, REC_B_200K, REC_C_300K, REC_D_400K));
+    expected.addAll(ALL_IGNORED_RECS);
+    assertEquivalence(expected, limiter.getModifiedRecommendations(tempFile, tempFile, ALL_RECS));
+  }
+
+  @Test
+  public void testLimit_EdgeOverSmallest() {
+    long limit =
+        REC_A_100K.getOldEntry().getUncompressedSize()
+            - REC_A_100K.getOldEntry().getCompressedSize()
+            + 1; // 1 byte extra room
+    DeltaFriendlyOldBlobSizeLimiter limiter = new DeltaFriendlyOldBlobSizeLimiter(limit);
+    List<QualifiedRecommendation> expected = new ArrayList<QualifiedRecommendation>();
+    expected.add(REC_A_100K);
+    expected.addAll(suppressed(REC_B_200K, REC_C_300K, REC_D_400K));
+    expected.addAll(ALL_IGNORED_RECS);
+    assertEquivalence(expected, limiter.getModifiedRecommendations(tempFile, tempFile, ALL_RECS));
+  }
+
+  @Test
+  public void testLimit_ExactlyLargest() {
+    long limit =
+        REC_D_400K.getOldEntry().getUncompressedSize()
+            - REC_D_400K.getOldEntry().getCompressedSize(); // Exactly large enough
+    DeltaFriendlyOldBlobSizeLimiter limiter = new DeltaFriendlyOldBlobSizeLimiter(limit);
+    List<QualifiedRecommendation> expected = new ArrayList<QualifiedRecommendation>();
+    expected.add(REC_D_400K);
+    expected.addAll(suppressed(REC_A_100K, REC_B_200K, REC_C_300K));
+    expected.addAll(ALL_IGNORED_RECS);
+    assertEquivalence(expected, limiter.getModifiedRecommendations(tempFile, tempFile, ALL_RECS));
+  }
+
+  @Test
+  public void testLimit_EdgeUnderLargest() {
+    long limit =
+        REC_D_400K.getOldEntry().getUncompressedSize()
+            - REC_D_400K.getOldEntry().getCompressedSize()
+            - 1; // 1 byte too small
+    DeltaFriendlyOldBlobSizeLimiter limiter = new DeltaFriendlyOldBlobSizeLimiter(limit);
+    List<QualifiedRecommendation> expected = new ArrayList<QualifiedRecommendation>();
+    expected.add(REC_C_300K);
+    expected.addAll(suppressed(REC_A_100K, REC_B_200K, REC_D_400K));
+    expected.addAll(ALL_IGNORED_RECS);
+    assertEquivalence(expected, limiter.getModifiedRecommendations(tempFile, tempFile, ALL_RECS));
+  }
+
+  @Test
+  public void testLimit_EdgeOverLargest() {
+    long limit =
+        REC_D_400K.getOldEntry().getUncompressedSize()
+            - REC_D_400K.getOldEntry().getCompressedSize()
+            + 1; // 1 byte extra room
+    DeltaFriendlyOldBlobSizeLimiter limiter = new DeltaFriendlyOldBlobSizeLimiter(limit);
+    List<QualifiedRecommendation> expected = new ArrayList<QualifiedRecommendation>();
+    expected.add(REC_D_400K);
+    expected.addAll(suppressed(REC_A_100K, REC_B_200K, REC_C_300K));
+    expected.addAll(ALL_IGNORED_RECS);
+    assertEquivalence(expected, limiter.getModifiedRecommendations(tempFile, tempFile, ALL_RECS));
+  }
+
+  @Test
+  public void testLimit_Complex() {
+    // A more nuanced test. Here we set up a limit of 600k - big enough to get the largest and the
+    // THIRD largest files. The second largest will fail because there isn't enough space after
+    // adding the first largest, and the fourth largest will fail because there is not enough space
+    // after adding the third largest. Tricky.
+    long limit =
+        (REC_D_400K.getOldEntry().getUncompressedSize()
+                - REC_D_400K.getOldEntry().getCompressedSize())
+            + (REC_B_200K.getOldEntry().getUncompressedSize()
+                - REC_B_200K.getOldEntry().getCompressedSize());
+    DeltaFriendlyOldBlobSizeLimiter limiter = new DeltaFriendlyOldBlobSizeLimiter(limit);
+    List<QualifiedRecommendation> expected = new ArrayList<QualifiedRecommendation>();
+    expected.add(REC_B_200K);
+    expected.add(REC_D_400K);
+    expected.addAll(suppressed(REC_A_100K, REC_C_300K));
+    expected.addAll(ALL_IGNORED_RECS);
+    assertEquivalence(expected, limiter.getModifiedRecommendations(tempFile, tempFile, ALL_RECS));
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/FileByFileV1DeltaGeneratorTest.java b/generator/src/test/java/com/google/archivepatcher/generator/FileByFileV1DeltaGeneratorTest.java
new file mode 100644
index 0000000..a43d168
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/FileByFileV1DeltaGeneratorTest.java
@@ -0,0 +1,51 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import com.google.archivepatcher.shared.UnitTestZipArchive;
+import java.io.ByteArrayOutputStream;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/**
+ * Tests for {@link FileByFileV1DeltaGenerator}. This relies heavily on the correctness of {@link
+ * PatchWriterTest}, which validates the patch writing process itself, {@link PreDiffPlannerTest},
+ * which validates the decision making process for delta-friendly blobs, and {@link
+ * PreDiffExecutorTest}, which validates the ability to create the delta-friendly blobs. The {@link
+ * FileByFileV1DeltaGenerator} <em>itself</em> is relatively simple, combining all of these pieces
+ * of functionality together to create a patch; so the tests here are just ensuring that a patch can
+ * be produced.
+ */
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public class FileByFileV1DeltaGeneratorTest {
+
+  @Test
+  public void testGenerateDelta_BaseCase() throws Exception {
+    // Simple test of generating a patch with no changes.
+    FileByFileV1DeltaGenerator generator = new FileByFileV1DeltaGenerator();
+    ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+    try (TempFileHolder oldArchive = new TempFileHolder();
+        TempFileHolder newArchive = new TempFileHolder()) {
+      UnitTestZipArchive.saveTestZip(oldArchive.file);
+      UnitTestZipArchive.saveTestZip(newArchive.file);
+      generator.generateDelta(oldArchive.file, newArchive.file, buffer);
+    }
+    byte[] result = buffer.toByteArray();
+    Assert.assertTrue(result.length > 0);
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/MatchingOutputStreamTest.java b/generator/src/test/java/com/google/archivepatcher/generator/MatchingOutputStreamTest.java
new file mode 100644
index 0000000..23891ca
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/MatchingOutputStreamTest.java
@@ -0,0 +1,127 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+
+/**
+ * Tests for {@link MatchingOutputStream}.
+ */
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public class MatchingOutputStreamTest {
+  /**
+   * The data to write to the stream.
+   */
+  private byte[] data;
+
+  /**
+   * Input for matching.
+   */
+  private ByteArrayInputStream inputStream;
+
+  /**
+   * The stream under test.
+   */
+  private MatchingOutputStream outputStream;
+
+  @Before
+  public void setup() {
+    data = new byte[] {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+    inputStream = new ByteArrayInputStream(data);
+    outputStream = new MatchingOutputStream(inputStream, 3 /* buffer size */);
+  }
+
+  @Test
+  public void testWrite_OneByte() throws IOException {
+    for (int x = 0; x < data.length; x++) {
+      outputStream.write(data[x] & 0xff);
+    }
+  }
+
+  @Test
+  public void testWrite_WholeBuffer() throws IOException {
+    outputStream.write(data);
+  }
+
+  @Test
+  public void testWrite_WholeBuffer_RealisticCopyBuffer() throws IOException {
+    outputStream = new MatchingOutputStream(inputStream, 32768); // realistic copy buffer size
+    outputStream.write(data);
+  }
+
+  @Test
+  public void testWrite_PartialBuffer() throws IOException {
+    for (int x = 0; x < data.length; x++) {
+      outputStream.write(data, x, 1);
+    }
+  }
+
+  @Test
+  public void testExpectEof() throws IOException {
+    outputStream.write(data);
+    outputStream.expectEof();
+  }
+
+  @Test(expected = MismatchException.class)
+  public void testWrite_OneByte_MatchFail() throws IOException {
+    outputStream.write(0);
+    outputStream.write(77);
+  }
+
+  @Test(expected = MismatchException.class)
+  public void testWrite_OneByte_StreamFail() throws IOException {
+    // Write one byte more than the data match stream contains
+    for (int x = 0; x <= data.length; x++) {
+      outputStream.write(x);
+    }
+  }
+
+  @Test(expected = MismatchException.class)
+  public void testWrite_WholeBuffer_Fail() throws IOException {
+    byte[] tweaked = new byte[] {0, 1, 2, 3, 4, 55, 6, 7, 8, 9};
+    outputStream.write(tweaked);
+  }
+
+  @Test(expected = MismatchException.class)
+  public void testWrite_PartialBuffer_Fail() throws IOException {
+    byte[] tweaked = new byte[] {0, 1, 2, 3, 4, 55, 6, 7, 8, 9};
+    outputStream.write(tweaked, 0, 8);
+  }
+
+  @Test(expected = MismatchException.class)
+  public void testExpectEof_Fail() throws IOException {
+    outputStream.write(data, 0, data.length - 1);
+    outputStream.expectEof();
+  }
+
+  @Test(expected = MismatchException.class)
+  public void testWrite_PastEndOfMatchStream() throws IOException {
+    outputStream.write(data);
+    outputStream.write(data);
+  }
+
+  @SuppressWarnings({"resource", "unused"})
+  @Test(expected = IllegalArgumentException.class)
+  public void testConstructor_BadMatchBufferLength() {
+    new MatchingOutputStream(inputStream, 0);
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/MinimalZipArchiveTest.java b/generator/src/test/java/com/google/archivepatcher/generator/MinimalZipArchiveTest.java
new file mode 100644
index 0000000..62a5363
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/MinimalZipArchiveTest.java
@@ -0,0 +1,112 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import com.google.archivepatcher.shared.UnitTestZipArchive;
+import com.google.archivepatcher.shared.UnitTestZipEntry;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.List;
+import java.util.zip.CRC32;
+
+/**
+ * Tests for {@link MinimalZipParser}.
+ */
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public class MinimalZipArchiveTest {
+  private byte[] unitTestZipArchive;
+  private File tempFile;
+
+  @Before
+  public void setup() throws Exception {
+    unitTestZipArchive = UnitTestZipArchive.makeTestZip();
+    tempFile = File.createTempFile("MinimalZipArchiveTest", "zip");
+    tempFile.deleteOnExit();
+    try {
+      FileOutputStream out = new FileOutputStream(tempFile);
+      out.write(unitTestZipArchive);
+      out.flush();
+      out.close();
+    } catch (IOException e) {
+      try {
+        tempFile.delete();
+      } catch (Exception ignored) {
+        // Nothing
+      }
+      throw e;
+    }
+  }
+
+  @After
+  public void tearDown() {
+    if (tempFile != null) {
+      try {
+        tempFile.delete();
+      } catch (Exception ignored) {
+        // Nothing
+      }
+    }
+  }
+
+  @Test
+  public void testListEntries() throws IOException {
+    // Ensure all entries are found, and that they are in file order.
+    List<MinimalZipEntry> parsedEntries = MinimalZipArchive.listEntries(tempFile);
+    long lastSeenHeaderOffset = -1;
+    for (int x = 0; x < UnitTestZipArchive.allEntriesInFileOrder.size(); x++) {
+      UnitTestZipEntry expected = UnitTestZipArchive.allEntriesInFileOrder.get(x);
+      MinimalZipEntry actual = parsedEntries.get(x);
+      Assert.assertEquals(expected.path, actual.getFileName());
+      Assert.assertEquals(expected.level == 0 ? 0 : 8, actual.getCompressionMethod());
+      Assert.assertEquals(expected.getCompressedBinaryContent().length, actual.getCompressedSize());
+      Assert.assertEquals(
+          expected.getUncompressedBinaryContent().length, actual.getUncompressedSize());
+      Assert.assertEquals(false, actual.getGeneralPurposeFlagBit11());
+      CRC32 crc32 = new CRC32();
+      crc32.update(expected.getUncompressedBinaryContent());
+      Assert.assertEquals(crc32.getValue(), actual.getCrc32OfUncompressedData());
+
+      // Offset verification is a little trickier
+      // 1. Verify that the offsets are in ascending order and increasing.
+      Assert.assertTrue(actual.getFileOffsetOfLocalEntry() > lastSeenHeaderOffset);
+      lastSeenHeaderOffset = actual.getFileOffsetOfLocalEntry();
+
+      // 2. Verify that the local signature header is at the calculated position
+      byte[] expectedSignatureBlock = new byte[] {0x50, 0x4b, 0x03, 0x04};
+      for (int index = 0; index < 4; index++) {
+        byte actualByte = unitTestZipArchive[((int) actual.getFileOffsetOfLocalEntry()) + index];
+        Assert.assertEquals(expectedSignatureBlock[index], actualByte);
+      }
+
+      // 3. Verify that the data is at the calculated position
+      byte[] expectedContent = expected.getCompressedBinaryContent();
+      int calculatedDataOffset = (int) actual.getFileOffsetOfCompressedData();
+      for (int index = 0; index < expectedContent.length; index++) {
+        Assert.assertEquals(
+            expectedContent[index], unitTestZipArchive[calculatedDataOffset + index]);
+      }
+    }
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/MinimalZipEntryTest.java b/generator/src/test/java/com/google/archivepatcher/generator/MinimalZipEntryTest.java
new file mode 100644
index 0000000..d0bde74
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/MinimalZipEntryTest.java
@@ -0,0 +1,264 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Tests for {@link MinimalZipEntry}. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public class MinimalZipEntryTest {
+  private static final int COMPRESSION_METHOD = 8; // (meaning deflate)
+  private static final long CRC32 = 123;
+  private static final long COMPRESSED_SIZE = 456;
+  private static final long UNCOMPRESSED_SIZE = 789;
+  private static final byte[] FILE_NAME_BYTES = new byte[] {'f', 'o', 'o', '.', 'b', 'a', 'r'};
+  private static final boolean GENERAL_PURPOSE_BIT_FLAG_11 = true; // (meaning file name is UTF8)
+  private static final long FILE_OFFSET_OF_LOCAL_ENTRY = 1337;
+  private static final long FILE_OFFSET_OF_COMPRESSED_DATA = 2674;
+
+  private MinimalZipEntry defaultEntry;
+  private MinimalZipEntry clonedDefaultEntry;
+  private MinimalZipEntry alteredCompressionMethod;
+  private MinimalZipEntry alteredCrc32;
+  private MinimalZipEntry alteredCompressedSize;
+  private MinimalZipEntry alteredUncompressedSize;
+  private MinimalZipEntry alteredFileNameBytes;
+  private MinimalZipEntry alteredGeneralPurposeBitFlag11;
+  private MinimalZipEntry alteredOffsetOfLocalEntry;
+  private MinimalZipEntry alteredFileOffsetOfCompressedData;
+  private List<MinimalZipEntry> allMutations;
+
+  @Before
+  public void setup() throws Exception {
+    defaultEntry =
+        new MinimalZipEntry(
+            COMPRESSION_METHOD,
+            CRC32,
+            COMPRESSED_SIZE,
+            UNCOMPRESSED_SIZE,
+            FILE_NAME_BYTES,
+            GENERAL_PURPOSE_BIT_FLAG_11,
+            FILE_OFFSET_OF_LOCAL_ENTRY);
+    clonedDefaultEntry =
+        new MinimalZipEntry(
+            COMPRESSION_METHOD,
+            CRC32,
+            COMPRESSED_SIZE,
+            UNCOMPRESSED_SIZE,
+            FILE_NAME_BYTES,
+            GENERAL_PURPOSE_BIT_FLAG_11,
+            FILE_OFFSET_OF_LOCAL_ENTRY);
+    alteredCompressionMethod =
+        new MinimalZipEntry(
+            COMPRESSION_METHOD - 1,
+            CRC32,
+            COMPRESSED_SIZE,
+            UNCOMPRESSED_SIZE,
+            FILE_NAME_BYTES,
+            GENERAL_PURPOSE_BIT_FLAG_11,
+            FILE_OFFSET_OF_LOCAL_ENTRY);
+    alteredCrc32 =
+        new MinimalZipEntry(
+            COMPRESSION_METHOD,
+            CRC32 - 1,
+            COMPRESSED_SIZE,
+            UNCOMPRESSED_SIZE,
+            FILE_NAME_BYTES,
+            GENERAL_PURPOSE_BIT_FLAG_11,
+            FILE_OFFSET_OF_LOCAL_ENTRY);
+    alteredCompressedSize =
+        new MinimalZipEntry(
+            COMPRESSION_METHOD,
+            CRC32,
+            COMPRESSED_SIZE - 1,
+            UNCOMPRESSED_SIZE,
+            FILE_NAME_BYTES,
+            GENERAL_PURPOSE_BIT_FLAG_11,
+            FILE_OFFSET_OF_LOCAL_ENTRY);
+    alteredUncompressedSize =
+        new MinimalZipEntry(
+            COMPRESSION_METHOD,
+            CRC32,
+            COMPRESSED_SIZE,
+            UNCOMPRESSED_SIZE - 1,
+            FILE_NAME_BYTES,
+            GENERAL_PURPOSE_BIT_FLAG_11,
+            FILE_OFFSET_OF_LOCAL_ENTRY);
+    alteredFileNameBytes =
+        new MinimalZipEntry(
+            COMPRESSION_METHOD,
+            CRC32,
+            COMPRESSED_SIZE,
+            UNCOMPRESSED_SIZE,
+            new byte[] {'x'},
+            GENERAL_PURPOSE_BIT_FLAG_11,
+            FILE_OFFSET_OF_LOCAL_ENTRY);
+    alteredGeneralPurposeBitFlag11 =
+        new MinimalZipEntry(
+            COMPRESSION_METHOD,
+            CRC32,
+            COMPRESSED_SIZE,
+            UNCOMPRESSED_SIZE,
+            FILE_NAME_BYTES,
+            !GENERAL_PURPOSE_BIT_FLAG_11,
+            FILE_OFFSET_OF_LOCAL_ENTRY);
+    alteredOffsetOfLocalEntry =
+        new MinimalZipEntry(
+            COMPRESSION_METHOD,
+            CRC32,
+            COMPRESSED_SIZE,
+            UNCOMPRESSED_SIZE,
+            FILE_NAME_BYTES,
+            GENERAL_PURPOSE_BIT_FLAG_11,
+            FILE_OFFSET_OF_LOCAL_ENTRY - 1);
+    alteredFileOffsetOfCompressedData =
+        new MinimalZipEntry(
+            COMPRESSION_METHOD,
+            CRC32,
+            COMPRESSED_SIZE,
+            UNCOMPRESSED_SIZE,
+            FILE_NAME_BYTES,
+            GENERAL_PURPOSE_BIT_FLAG_11,
+            FILE_OFFSET_OF_LOCAL_ENTRY);
+    alteredFileOffsetOfCompressedData.setFileOffsetOfCompressedData(FILE_OFFSET_OF_COMPRESSED_DATA);
+    allMutations =
+        Collections.unmodifiableList(
+            Arrays.asList(
+                alteredCompressionMethod,
+                alteredCrc32,
+                alteredCompressedSize,
+                alteredUncompressedSize,
+                alteredFileNameBytes,
+                alteredGeneralPurposeBitFlag11,
+                alteredOffsetOfLocalEntry,
+                alteredFileOffsetOfCompressedData));
+  }
+
+  @Test
+  public void testGetFileName() throws Exception {
+    // Make a string with some chars that are from DOS ANSI art days, these chars have different
+    // binary representations in UTF8 and Cp437. We use light, medium, and dark "shade" characters
+    // (0x2591, 0x2592, 0x2593 respectively) for this purpose. Go go ANSI art!
+    // https://en.wikipedia.org/wiki/Code_page_437
+    // https://en.wikipedia.org/wiki/Block_Elements
+    String fileName = new String("\u2591\u2592\u2593AWESOME\u2593\u2592\u2591");
+    byte[] utf8Bytes = fileName.getBytes("UTF8");
+    byte[] cp437Bytes = fileName.getBytes("Cp437");
+    Assert.assertFalse(Arrays.equals(utf8Bytes, cp437Bytes)); // For test sanity
+
+    MinimalZipEntry utf8Entry =
+        new MinimalZipEntry(
+            COMPRESSION_METHOD,
+            CRC32,
+            COMPRESSED_SIZE,
+            UNCOMPRESSED_SIZE,
+            utf8Bytes,
+            true /* utf8 */,
+            FILE_OFFSET_OF_LOCAL_ENTRY);
+    Assert.assertArrayEquals(utf8Bytes, utf8Entry.getFileNameBytes());
+    String fileNameFromUtf8Bytes = utf8Entry.getFileName();
+    Assert.assertEquals(fileName, fileNameFromUtf8Bytes);
+
+    MinimalZipEntry cp437Entry =
+        new MinimalZipEntry(
+            COMPRESSION_METHOD,
+            CRC32,
+            COMPRESSED_SIZE,
+            UNCOMPRESSED_SIZE,
+            cp437Bytes,
+            false /* cp437 */,
+            FILE_OFFSET_OF_LOCAL_ENTRY);
+    Assert.assertArrayEquals(cp437Bytes, cp437Entry.getFileNameBytes());
+    String fileNameFromCp437Bytes = cp437Entry.getFileName();
+    Assert.assertEquals(fileName, fileNameFromCp437Bytes);
+  }
+
+  @Test
+  public void testIsDeflateCompressed() {
+    // Compression method == 8, and uncompressed size != compressed size
+    Assert.assertTrue(defaultEntry.isDeflateCompressed());
+    // Compression method == 8, but uncompressed size == compressed size (ie, STOR'ed entry)
+    MinimalZipEntry stored =
+        new MinimalZipEntry(
+            COMPRESSION_METHOD,
+            CRC32,
+            1000,
+            1000,
+            FILE_NAME_BYTES,
+            GENERAL_PURPOSE_BIT_FLAG_11,
+            FILE_OFFSET_OF_LOCAL_ENTRY);
+    Assert.assertFalse(stored.isDeflateCompressed());
+    // Compression method != 8 (obviously not deflate)
+    Assert.assertFalse(alteredCompressionMethod.isDeflateCompressed());
+  }
+
+  @Test
+  @SuppressWarnings("EqualsIncompatibleType") // For ErrorProne
+  public void testEquals() {
+    Assert.assertEquals(defaultEntry, defaultEntry);
+    MinimalZipEntry clonedDefaultEntry =
+        new MinimalZipEntry(
+            COMPRESSION_METHOD,
+            CRC32,
+            COMPRESSED_SIZE,
+            UNCOMPRESSED_SIZE,
+            FILE_NAME_BYTES,
+            GENERAL_PURPOSE_BIT_FLAG_11,
+            FILE_OFFSET_OF_LOCAL_ENTRY);
+    Assert.assertEquals(defaultEntry, clonedDefaultEntry);
+    for (MinimalZipEntry mutation : allMutations) {
+      Assert.assertNotEquals(defaultEntry, mutation);
+    }
+    Assert.assertFalse(defaultEntry.equals(null));
+    Assert.assertFalse(defaultEntry.equals("foo"));
+  }
+
+  @Test
+  public void testHashCode() {
+    Set<MinimalZipEntry> hashSet = new HashSet<>();
+    hashSet.add(defaultEntry);
+    hashSet.add(clonedDefaultEntry);
+    Assert.assertEquals(1, hashSet.size());
+    hashSet.addAll(allMutations);
+    Assert.assertEquals(1 + allMutations.size(), hashSet.size());
+  }
+
+  @Test
+  public void testGetters() {
+    Assert.assertEquals(COMPRESSED_SIZE, defaultEntry.getCompressedSize());
+    Assert.assertEquals(COMPRESSION_METHOD, defaultEntry.getCompressionMethod());
+    Assert.assertEquals(CRC32, defaultEntry.getCrc32OfUncompressedData());
+    Assert.assertArrayEquals(FILE_NAME_BYTES, defaultEntry.getFileNameBytes());
+    Assert.assertEquals(FILE_OFFSET_OF_LOCAL_ENTRY, defaultEntry.getFileOffsetOfLocalEntry());
+    Assert.assertEquals(GENERAL_PURPOSE_BIT_FLAG_11, defaultEntry.getGeneralPurposeFlagBit11());
+    Assert.assertEquals(UNCOMPRESSED_SIZE, defaultEntry.getUncompressedSize());
+
+    // Special one, only alteredFileOffsetOfCompressedData has this field set...
+    Assert.assertEquals(-1, defaultEntry.getFileOffsetOfCompressedData());
+    Assert.assertEquals(
+        FILE_OFFSET_OF_COMPRESSED_DATA,
+        alteredFileOffsetOfCompressedData.getFileOffsetOfCompressedData());
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/MinimalZipParserTest.java b/generator/src/test/java/com/google/archivepatcher/generator/MinimalZipParserTest.java
new file mode 100644
index 0000000..2296fea
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/MinimalZipParserTest.java
@@ -0,0 +1,217 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import com.google.archivepatcher.shared.RandomAccessFileInputStream;
+import com.google.archivepatcher.shared.UnitTestZipArchive;
+import com.google.archivepatcher.shared.UnitTestZipEntry;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.zip.CRC32;
+
+/**
+ * Tests for {@link MinimalZipParser}.
+ */
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public class MinimalZipParserTest {
+  private byte[] unitTestZipArchive;
+
+  @Before
+  public void setup() throws Exception {
+    unitTestZipArchive = UnitTestZipArchive.makeTestZip();
+  }
+
+  private void checkExpectedBytes(byte[] expectedData, int unitTestZipArchiveOffset) {
+    for (int index = 0; index < 4; index++) {
+      byte actualByte = unitTestZipArchive[unitTestZipArchiveOffset + index];
+      Assert.assertEquals(expectedData[index], actualByte);
+    }
+  }
+
+  @Test
+  public void testLocateStartOfEocd_WithArray() {
+    int eocdOffset = MinimalZipParser.locateStartOfEocd(unitTestZipArchive);
+    checkExpectedBytes(new byte[] {0x50, 0x4b, 0x05, 0x06}, eocdOffset);
+  }
+
+  @Test
+  public void testLocateStartOfEocd_WithArray_NoEocd() {
+    int eocdOffset = MinimalZipParser.locateStartOfEocd(new byte[32768]);
+    Assert.assertEquals(-1, eocdOffset);
+  }
+
+  @Test
+  public void testLocateStartOfEocd_WithFile() throws IOException {
+    // Create a temp file with some zeroes, the EOCD header, and more zeroes.
+    int bytesBefore = 53754;
+    int bytesAfter = 107;
+    File tempFile = File.createTempFile("MinimalZipParserTest", "zip");
+    tempFile.deleteOnExit();
+    try {
+      FileOutputStream out = new FileOutputStream(tempFile);
+      out.write(new byte[bytesBefore]);
+      out.write(new byte[] {0x50, 0x4b, 0x05, 0x06});
+      out.write(new byte[bytesAfter]);
+      out.flush();
+      out.close();
+    } catch (IOException e) {
+      try {
+        tempFile.delete();
+      } catch (Exception ignored) {
+        // Nothing
+      }
+      throw e;
+    }
+
+    // Now expect to find the EOCD at the right place.
+    try (RandomAccessFileInputStream in = new RandomAccessFileInputStream(tempFile)) {
+      long eocdOffset = MinimalZipParser.locateStartOfEocd(in, 32768);
+      Assert.assertEquals(bytesBefore, eocdOffset);
+    }
+  }
+
+  @Test
+  public void testLocateStartOfEocd_WithFile_NoEocd() throws IOException {
+    // Create a temp file with some zeroes and no EOCD header at all
+    File tempFile = File.createTempFile("MinimalZipParserTest", "zip");
+    tempFile.deleteOnExit();
+    try {
+      FileOutputStream out = new FileOutputStream(tempFile);
+      out.write(new byte[4000]);
+      out.flush();
+      out.close();
+    } catch (IOException e) {
+      try {
+        tempFile.delete();
+      } catch (Exception ignored) {
+        // Nothing
+      }
+      throw e;
+    }
+
+    // Now expect to find no EOCD.
+    try (RandomAccessFileInputStream in = new RandomAccessFileInputStream(tempFile)) {
+      long eocdOffset = MinimalZipParser.locateStartOfEocd(in, 4000);
+      Assert.assertEquals(-1, eocdOffset);
+    }
+  }
+
+  @Test
+  public void testParseEocd() throws IOException {
+    int eocdOffset = MinimalZipParser.locateStartOfEocd(unitTestZipArchive);
+    ByteArrayInputStream in = new ByteArrayInputStream(unitTestZipArchive);
+    Assert.assertEquals(eocdOffset, in.skip(eocdOffset));
+    MinimalCentralDirectoryMetadata centralDirectoryMetadata = MinimalZipParser.parseEocd(in);
+    Assert.assertNotNull(centralDirectoryMetadata);
+
+    // Check that the central directory's first record is at the calculated offset
+    //0x02014b50
+    checkExpectedBytes(
+        new byte[] {0x50, 0x4b, 0x01, 0x02},
+        (int) centralDirectoryMetadata.getOffsetOfCentralDirectory());
+    // Check that the central directory's length is correct, i.e. that the EOCD record follows it.
+    long calculatedEndOfCentralDirectory =
+        centralDirectoryMetadata.getOffsetOfCentralDirectory()
+            + centralDirectoryMetadata.getLengthOfCentralDirectory();
+    checkExpectedBytes(new byte[] {0x50, 0x4b, 0x05, 0x06}, (int) calculatedEndOfCentralDirectory);
+    Assert.assertEquals(
+        UnitTestZipArchive.allEntriesInFileOrder.size(),
+        centralDirectoryMetadata.getNumEntriesInCentralDirectory());
+  }
+
+  @Test
+  public void testParseCentralDirectoryEntry() throws Exception {
+    ByteArrayInputStream in = new ByteArrayInputStream(unitTestZipArchive);
+    in.mark(unitTestZipArchive.length);
+    int eocdOffset = MinimalZipParser.locateStartOfEocd(unitTestZipArchive);
+    Assert.assertEquals(eocdOffset, in.skip(eocdOffset));
+    MinimalCentralDirectoryMetadata metadata = MinimalZipParser.parseEocd(in);
+    in.reset();
+    Assert.assertEquals(
+        metadata.getOffsetOfCentralDirectory(), in.skip(metadata.getOffsetOfCentralDirectory()));
+
+    // Read each entry and verify all fields *except* the value returned by
+    // MinimalZipEntry.getFileOffsetOfCompressedData(), as that has yet to be computed.
+    for (UnitTestZipEntry expectedEntry : UnitTestZipArchive.allEntriesInFileOrder) {
+      MinimalZipEntry parsed = MinimalZipParser.parseCentralDirectoryEntry(in);
+      Assert.assertEquals(expectedEntry.path, parsed.getFileName());
+
+      // Verify that the local signature header is at the calculated position
+      byte[] expectedSignatureBlock = new byte[] {0x50, 0x4b, 0x03, 0x04};
+      for (int index = 0; index < 4; index++) {
+        byte actualByte = unitTestZipArchive[((int) parsed.getFileOffsetOfLocalEntry()) + index];
+        Assert.assertEquals(expectedSignatureBlock[index], actualByte);
+      }
+
+      if (expectedEntry.level > 0) {
+        Assert.assertEquals(8 /* deflate */, parsed.getCompressionMethod());
+      } else {
+        Assert.assertEquals(0 /* store */, parsed.getCompressionMethod());
+      }
+      byte[] uncompressedContent = expectedEntry.getUncompressedBinaryContent();
+      Assert.assertEquals(uncompressedContent.length, parsed.getUncompressedSize());
+      CRC32 crc32 = new CRC32();
+      crc32.update(uncompressedContent);
+      Assert.assertEquals(crc32.getValue(), parsed.getCrc32OfUncompressedData());
+      byte[] compressedContent = expectedEntry.getCompressedBinaryContent();
+      Assert.assertEquals(compressedContent.length, parsed.getCompressedSize());
+    }
+  }
+
+  @Test
+  public void testParseLocalEntryAndGetCompressedDataOffset() throws Exception {
+    ByteArrayInputStream in = new ByteArrayInputStream(unitTestZipArchive);
+    in.mark(unitTestZipArchive.length);
+    int eocdOffset = MinimalZipParser.locateStartOfEocd(unitTestZipArchive);
+    Assert.assertEquals(eocdOffset, in.skip(eocdOffset));
+    MinimalCentralDirectoryMetadata metadata = MinimalZipParser.parseEocd(in);
+    in.reset();
+    Assert.assertEquals(
+        metadata.getOffsetOfCentralDirectory(), in.skip(metadata.getOffsetOfCentralDirectory()));
+
+    // Read each entry and verify all fields *except* the value returned by
+    // MinimalZipEntry.getFileOffsetOfCompressedData(), as that has yet to be computed.
+    List<MinimalZipEntry> parsedEntries = new ArrayList<MinimalZipEntry>();
+    for (int x = 0; x < UnitTestZipArchive.allEntriesInFileOrder.size(); x++) {
+      parsedEntries.add(MinimalZipParser.parseCentralDirectoryEntry(in));
+    }
+
+    for (int x = 0; x < UnitTestZipArchive.allEntriesInFileOrder.size(); x++) {
+      UnitTestZipEntry expectedEntry = UnitTestZipArchive.allEntriesInFileOrder.get(x);
+      MinimalZipEntry parsedEntry = parsedEntries.get(x);
+      in.reset();
+      Assert.assertEquals(
+          parsedEntry.getFileOffsetOfLocalEntry(),
+          in.skip(parsedEntry.getFileOffsetOfLocalEntry()));
+      long relativeDataOffset = MinimalZipParser.parseLocalEntryAndGetCompressedDataOffset(in);
+      Assert.assertTrue(relativeDataOffset > 0);
+      checkExpectedBytes(
+          expectedEntry.getCompressedBinaryContent(),
+          (int) (parsedEntry.getFileOffsetOfLocalEntry() + relativeDataOffset));
+    }
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/PatchWriterTest.java b/generator/src/test/java/com/google/archivepatcher/generator/PatchWriterTest.java
new file mode 100644
index 0000000..67652ae
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/PatchWriterTest.java
@@ -0,0 +1,152 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import com.google.archivepatcher.shared.JreDeflateParameters;
+import com.google.archivepatcher.shared.PatchConstants;
+import com.google.archivepatcher.shared.TypedRange;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Tests for {@link PatchWriter}.
+ */
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public class PatchWriterTest {
+  // This is Integer.MAX_VALUE + 1.
+  private static final long BIG = 2048L * 1024L * 1024L;
+
+  private static final JreDeflateParameters DEFLATE_PARAMS = JreDeflateParameters.of(6, 0, true);
+
+  private static final TypedRange<Void> OLD_DELTA_FRIENDLY_UNCOMPRESS_RANGE =
+      new TypedRange<Void>(BIG, 17L, null);
+
+  private static final TypedRange<JreDeflateParameters> NEW_DELTA_FRIENDLY_UNCOMPRESS_RANGE =
+      new TypedRange<JreDeflateParameters>(BIG - 100L, BIG, DEFLATE_PARAMS);
+
+  private static final TypedRange<JreDeflateParameters> NEW_DELTA_FRIENDLY_RECOMPRESS_RANGE =
+      new TypedRange<JreDeflateParameters>(BIG, BIG, DEFLATE_PARAMS);
+
+  private static final List<TypedRange<Void>> OLD_DELTA_FRIENDLY_UNCOMPRESS_PLAN =
+      Collections.singletonList(OLD_DELTA_FRIENDLY_UNCOMPRESS_RANGE);
+
+  private static final List<TypedRange<JreDeflateParameters>> NEW_DELTA_FRIENDLY_UNCOMPRESS_PLAN =
+      Collections.singletonList(NEW_DELTA_FRIENDLY_UNCOMPRESS_RANGE);
+
+  private static final List<TypedRange<JreDeflateParameters>> NEW_DELTA_FRIENDLY_RECOMPRESS_PLAN =
+      Collections.singletonList(NEW_DELTA_FRIENDLY_RECOMPRESS_RANGE);
+
+  private static final long DELTA_FRIENDLY_OLD_FILE_SIZE = BIG - 75L;
+
+  private static final long DELTA_FRIENDLY_NEW_FILE_SIZE = BIG + 75L;
+
+  private static final PreDiffPlan PLAN =
+      new PreDiffPlan(
+          Collections.<QualifiedRecommendation>emptyList(),
+          OLD_DELTA_FRIENDLY_UNCOMPRESS_PLAN,
+          NEW_DELTA_FRIENDLY_UNCOMPRESS_PLAN,
+          NEW_DELTA_FRIENDLY_RECOMPRESS_PLAN);
+
+  private static final String DELTA_CONTENT = "this is a really cool delta, woo";
+
+  private File deltaFile = null;
+
+  private ByteArrayOutputStream buffer = null;
+
+  @Before
+  public void setup() throws IOException {
+    buffer = new ByteArrayOutputStream();
+    deltaFile = File.createTempFile("patchwritertest", "delta");
+    deltaFile.deleteOnExit();
+    try (FileOutputStream out = new FileOutputStream(deltaFile)) {
+      out.write(DELTA_CONTENT.getBytes());
+      out.flush();
+    }
+  }
+
+  @After
+  public void tearDown() {
+    deltaFile.delete();
+  }
+
+  @Test
+  public void testWriteV1Patch() throws IOException {
+    // ---------------------------------------------------------------------------------------------
+    // CAUTION - DO NOT CHANGE THIS FUNCTION WITHOUT DUE CONSIDERATION FOR BREAKING THE PATCH FORMAT
+    // ---------------------------------------------------------------------------------------------
+    // This test writes a simple patch with all the static data listed above and verifies it.
+    // This code MUST be INDEPENDENT of the real patch parser code, even if it is partially
+    // redundant; this guards against accidental changes to the patch writer that could alter the
+    // format and otherwise escape detection.
+    PatchWriter writer =
+        new PatchWriter(
+            PLAN, DELTA_FRIENDLY_OLD_FILE_SIZE, DELTA_FRIENDLY_NEW_FILE_SIZE, deltaFile);
+    writer.writeV1Patch(buffer);
+    DataInputStream patchIn = new DataInputStream(new ByteArrayInputStream(buffer.toByteArray()));
+    byte[] eightBytes = new byte[8];
+
+    // Start by reading the signature and flags
+    patchIn.readFully(eightBytes);
+    Assert.assertArrayEquals(PatchConstants.IDENTIFIER.getBytes("US-ASCII"), eightBytes);
+    Assert.assertEquals(0, patchIn.readInt()); // Flags, all reserved in v1
+
+    Assert.assertEquals(DELTA_FRIENDLY_OLD_FILE_SIZE, patchIn.readLong());
+
+    // Read the uncompression instructions
+    Assert.assertEquals(1, patchIn.readInt()); // Number of old archive uncompression instructions
+    Assert.assertEquals(OLD_DELTA_FRIENDLY_UNCOMPRESS_RANGE.getOffset(), patchIn.readLong());
+    Assert.assertEquals(OLD_DELTA_FRIENDLY_UNCOMPRESS_RANGE.getLength(), patchIn.readLong());
+
+    // Read the recompression instructions
+    Assert.assertEquals(1, patchIn.readInt()); // Number of new archive recompression instructions
+    Assert.assertEquals(NEW_DELTA_FRIENDLY_RECOMPRESS_RANGE.getOffset(), patchIn.readLong());
+    Assert.assertEquals(NEW_DELTA_FRIENDLY_RECOMPRESS_RANGE.getLength(), patchIn.readLong());
+    // Now the JreDeflateParameters for the record
+    Assert.assertEquals(
+        PatchConstants.CompatibilityWindowId.DEFAULT_DEFLATE.patchValue, patchIn.read());
+    Assert.assertEquals(DEFLATE_PARAMS.level, patchIn.read());
+    Assert.assertEquals(DEFLATE_PARAMS.strategy, patchIn.read());
+    Assert.assertEquals(DEFLATE_PARAMS.nowrap ? 1 : 0, patchIn.read());
+
+    // Delta section. V1 patches have exactly one delta entry and it is always mapped to the entire
+    // file contents of the delta-friendly archives
+    Assert.assertEquals(1, patchIn.readInt()); // Number of difference records
+    Assert.assertEquals(PatchConstants.DeltaFormat.BSDIFF.patchValue, patchIn.read());
+    Assert.assertEquals(0, patchIn.readLong()); // Old delta-friendly range start
+    Assert.assertEquals(DELTA_FRIENDLY_OLD_FILE_SIZE, patchIn.readLong()); // old range length
+    Assert.assertEquals(0, patchIn.readLong()); // New delta-friendly range start
+    Assert.assertEquals(DELTA_FRIENDLY_NEW_FILE_SIZE, patchIn.readLong()); // new range length
+    byte[] expectedDeltaContent = DELTA_CONTENT.getBytes("US-ASCII");
+    Assert.assertEquals(expectedDeltaContent.length, patchIn.readLong());
+    byte[] actualDeltaContent = new byte[expectedDeltaContent.length];
+    patchIn.readFully(actualDeltaContent);
+    Assert.assertArrayEquals(expectedDeltaContent, actualDeltaContent);
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/PreDiffExecutorTest.java b/generator/src/test/java/com/google/archivepatcher/generator/PreDiffExecutorTest.java
new file mode 100644
index 0000000..e697ffa
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/PreDiffExecutorTest.java
@@ -0,0 +1,231 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import com.google.archivepatcher.shared.UnitTestZipArchive;
+import com.google.archivepatcher.shared.UnitTestZipEntry;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/**
+ * Tests for {@link PreDiffExecutor}.
+ */
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public class PreDiffExecutorTest {
+  private static final UnitTestZipEntry ENTRY_LEVEL_6 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/for/great/justice", 6, "entry A", null);
+  private static final UnitTestZipEntry ENTRY_LEVEL_9 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/for/great/justice", 9, "entry A", null);
+
+  private List<File> tempFilesCreated;
+  private File deltaFriendlyOldFile;
+  private File deltaFriendlyNewFile;
+
+  @Before
+  public void setup() throws IOException {
+    tempFilesCreated = new LinkedList<File>();
+    deltaFriendlyOldFile = newTempFile();
+    deltaFriendlyNewFile = newTempFile();
+  }
+
+  @After
+  public void tearDown() {
+    for (File file : tempFilesCreated) {
+      try {
+        file.delete();
+      } catch (Exception ignored) {
+        // Nothing
+      }
+    }
+  }
+
+  /**
+   * Stores the specified bytes to disk in a temp file and returns the temp file.
+   * @param data the bytes to store
+   * @throws IOException if it fails
+   */
+  private File store(byte[] data) throws IOException {
+    File file = newTempFile();
+    FileOutputStream out = new FileOutputStream(file);
+    out.write(data);
+    out.flush();
+    out.close();
+    return file;
+  }
+
+  /**
+   * Make a new temp file and schedule it for deletion on exit and during teardown.
+   * @return the file created
+   * @throws IOException if anything goes wrong
+   */
+  private File newTempFile() throws IOException {
+    File file = File.createTempFile("pdet", "bin");
+    tempFilesCreated.add(file);
+    file.deleteOnExit();
+    return file;
+  }
+
+  private MinimalZipEntry findEntry(File file, String path) throws IOException {
+    List<MinimalZipEntry> entries = MinimalZipArchive.listEntries(file);
+    for (MinimalZipEntry entry : entries) {
+      if (path.equals(entry.getFileName())) {
+        return entry;
+      }
+    }
+    Assert.fail("path not found: " + path);
+    return null; // Never executed
+  }
+
+  private byte[] readFile(File file) throws IOException {
+    byte[] result = new byte[(int) file.length()];
+    try (FileInputStream fis = new FileInputStream(file);
+        DataInputStream dis = new DataInputStream(fis)) {
+      dis.readFully(result);
+    }
+    return result;
+  }
+
+  private void assertFileEquals(File file1, File file2) throws IOException {
+    Assert.assertEquals(file1.length(), file2.length());
+    byte[] content1 = readFile(file1);
+    byte[] content2 = readFile(file2);
+    Assert.assertArrayEquals(content1, content2);
+  }
+
+  @Test
+  public void testPrepareForDiffing_OneCompressedEntry_Unchanged() throws IOException {
+    byte[] bytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_LEVEL_6));
+    File oldFile = store(bytes);
+    File newFile = store(bytes);
+    PreDiffExecutor executor =
+        new PreDiffExecutor.Builder()
+            .readingOriginalFiles(oldFile, newFile)
+            .writingDeltaFriendlyFiles(deltaFriendlyOldFile, deltaFriendlyNewFile)
+            .build();
+    PreDiffPlan plan = executor.prepareForDiffing();
+    Assert.assertNotNull(plan);
+    // The plan should be to leave everything alone because there is no change.
+    Assert.assertTrue(plan.getOldFileUncompressionPlan().isEmpty());
+    Assert.assertTrue(plan.getNewFileUncompressionPlan().isEmpty());
+    Assert.assertTrue(plan.getDeltaFriendlyNewFileRecompressionPlan().isEmpty());
+    // Because nothing has changed, the delta-friendly files should be exact matches for the
+    // original files.
+    assertFileEquals(oldFile, deltaFriendlyOldFile);
+    assertFileEquals(newFile, deltaFriendlyNewFile);
+  }
+
+  @Test
+  public void testPrepareForDiffing_OneCompressedEntry_Changed() throws IOException {
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_LEVEL_6));
+    File oldFile = store(oldBytes);
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_LEVEL_9));
+    File newFile = store(newBytes);
+    PreDiffExecutor executor =
+        new PreDiffExecutor.Builder()
+            .readingOriginalFiles(oldFile, newFile)
+            .writingDeltaFriendlyFiles(deltaFriendlyOldFile, deltaFriendlyNewFile)
+            .build();
+    PreDiffPlan plan = executor.prepareForDiffing();
+    Assert.assertNotNull(plan);
+    // The plan should be to uncompress the data in both the old and new files.
+    Assert.assertEquals(1, plan.getOldFileUncompressionPlan().size());
+    Assert.assertEquals(1, plan.getNewFileUncompressionPlan().size());
+    Assert.assertEquals(1, plan.getDeltaFriendlyNewFileRecompressionPlan().size());
+    // The delta-friendly files should be larger than the originals.
+    Assert.assertTrue(oldFile.length() < deltaFriendlyOldFile.length());
+    Assert.assertTrue(newFile.length() < deltaFriendlyNewFile.length());
+
+    // Nitty-gritty, assert that the file content is exactly what is expected.
+    // 1. Find the entry in the old file.
+    // 2. Create a buffer to hold the expected data.
+    // 3. Copy all the file data that PRECEDES the compressed data into the buffer.
+    // 4. Copy the UNCOMPRESSED data from the unit test object into the buffer.
+    // 5. Copy all the file data the FOLLOWS the compressed data into the buffer.
+    // This should be exactly what is produced. Note that this is not a valid ZIP archive, as the
+    // offsets and lengths in the zip metadata are no longer tied to the actual data. This is
+    // normal and expected, since the delta-friendly file is not actually an archive anymore.
+    { // Scoping block for sanity
+      MinimalZipEntry oldEntry = findEntry(oldFile, ENTRY_LEVEL_6.path);
+      ByteArrayOutputStream expectedDeltaFriendlyOldFileBytes = new ByteArrayOutputStream();
+      expectedDeltaFriendlyOldFileBytes.write(
+          oldBytes, 0, (int) oldEntry.getFileOffsetOfCompressedData());
+      expectedDeltaFriendlyOldFileBytes.write(ENTRY_LEVEL_6.getUncompressedBinaryContent());
+      int oldRemainderOffset =
+          (int) (oldEntry.getFileOffsetOfCompressedData() + oldEntry.getCompressedSize());
+      int oldRemainderLength = oldBytes.length - oldRemainderOffset;
+      expectedDeltaFriendlyOldFileBytes.write(oldBytes, oldRemainderOffset, oldRemainderLength);
+      byte[] expectedOld = expectedDeltaFriendlyOldFileBytes.toByteArray();
+      byte[] actualOld = readFile(deltaFriendlyOldFile);
+      Assert.assertArrayEquals(expectedOld, actualOld);
+    }
+
+    // Now do the same for the new file and new entry
+    { // Scoping block for sanity
+      MinimalZipEntry newEntry = findEntry(newFile, ENTRY_LEVEL_9.path);
+      ByteArrayOutputStream expectedDeltaFriendlyNewFileBytes = new ByteArrayOutputStream();
+      expectedDeltaFriendlyNewFileBytes.write(
+          newBytes, 0, (int) newEntry.getFileOffsetOfCompressedData());
+      expectedDeltaFriendlyNewFileBytes.write(ENTRY_LEVEL_9.getUncompressedBinaryContent());
+      int newRemainderOffset =
+          (int) (newEntry.getFileOffsetOfCompressedData() + newEntry.getCompressedSize());
+      int newRemainderLength = newBytes.length - newRemainderOffset;
+      expectedDeltaFriendlyNewFileBytes.write(newBytes, newRemainderOffset, newRemainderLength);
+      byte[] expectedNew = expectedDeltaFriendlyNewFileBytes.toByteArray();
+      byte[] actualNew = readFile(deltaFriendlyNewFile);
+      Assert.assertArrayEquals(expectedNew, actualNew);
+    }
+  }
+
+  @Test
+  public void testPrepareForDiffing_OneCompressedEntry_Changed_Limited() throws IOException {
+    // Like above, but this time limited by a TotalRecompressionLimiter that will prevent the
+    // uncompression of the resources.
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_LEVEL_6));
+    File oldFile = store(oldBytes);
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_LEVEL_9));
+    File newFile = store(newBytes);
+    TotalRecompressionLimiter limiter = new TotalRecompressionLimiter(1); // 1 byte limitation
+    PreDiffExecutor executor =
+        new PreDiffExecutor.Builder()
+            .readingOriginalFiles(oldFile, newFile)
+            .writingDeltaFriendlyFiles(deltaFriendlyOldFile, deltaFriendlyNewFile)
+            .withRecommendationModifier(limiter)
+            .build();
+    PreDiffPlan plan = executor.prepareForDiffing();
+    Assert.assertNotNull(plan);
+    // The plan should be to leave everything alone because of the limiter
+    Assert.assertTrue(plan.getOldFileUncompressionPlan().isEmpty());
+    Assert.assertTrue(plan.getNewFileUncompressionPlan().isEmpty());
+    Assert.assertTrue(plan.getDeltaFriendlyNewFileRecompressionPlan().isEmpty());
+    // Because nothing has changed, the delta-friendly files should be exact matches for the
+    // original files.
+    assertFileEquals(oldFile, deltaFriendlyOldFile);
+    assertFileEquals(newFile, deltaFriendlyNewFile);
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/PreDiffPlanTest.java b/generator/src/test/java/com/google/archivepatcher/generator/PreDiffPlanTest.java
new file mode 100644
index 0000000..a7e2962
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/PreDiffPlanTest.java
@@ -0,0 +1,82 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import com.google.archivepatcher.shared.JreDeflateParameters;
+import com.google.archivepatcher.shared.TypedRange;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Tests for {@link PreDiffPlan}.
+ */
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public class PreDiffPlanTest {
+  private static final List<TypedRange<Void>> SORTED_VOID_LIST =
+      Collections.unmodifiableList(
+          Arrays.asList(new TypedRange<Void>(0, 1, null), new TypedRange<Void>(1, 1, null)));
+  private static final List<TypedRange<JreDeflateParameters>> SORTED_DEFLATE_LIST =
+      Collections.unmodifiableList(
+          Arrays.asList(
+              new TypedRange<JreDeflateParameters>(0, 1, JreDeflateParameters.of(1, 0, true)),
+              new TypedRange<JreDeflateParameters>(1, 1, JreDeflateParameters.of(1, 0, true))));
+
+  private <T> List<T> reverse(List<T> list) {
+    List<T> reversed = new ArrayList<T>(list);
+    Collections.reverse(reversed);
+    return reversed;
+  }
+
+  @Test
+  public void testConstructor_OrderOK() {
+    new PreDiffPlan(
+        Collections.<QualifiedRecommendation>emptyList(), SORTED_VOID_LIST, SORTED_DEFLATE_LIST);
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testConstructor_OldFileUncompressionOrderNotOK() {
+    new PreDiffPlan(
+        Collections.<QualifiedRecommendation>emptyList(),
+        reverse(SORTED_VOID_LIST),
+        SORTED_DEFLATE_LIST,
+        SORTED_DEFLATE_LIST);
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testConstructor_NewFileUncompressionOrderNotOK() {
+    new PreDiffPlan(
+        Collections.<QualifiedRecommendation>emptyList(),
+        SORTED_VOID_LIST,
+        reverse(SORTED_DEFLATE_LIST),
+        SORTED_DEFLATE_LIST);
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testConstructor_NewFileRecompressionOrderNotOK() {
+    new PreDiffPlan(
+        Collections.<QualifiedRecommendation>emptyList(),
+        SORTED_VOID_LIST,
+        SORTED_DEFLATE_LIST,
+        reverse(SORTED_DEFLATE_LIST));
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/PreDiffPlannerTest.java b/generator/src/test/java/com/google/archivepatcher/generator/PreDiffPlannerTest.java
new file mode 100644
index 0000000..9ba39e5
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/PreDiffPlannerTest.java
@@ -0,0 +1,719 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import com.google.archivepatcher.generator.DefaultDeflateCompressionDiviner.DivinationResult;
+import com.google.archivepatcher.shared.DefaultDeflateCompatibilityWindow;
+import com.google.archivepatcher.shared.JreDeflateParameters;
+import com.google.archivepatcher.shared.RandomAccessFileInputStream;
+import com.google.archivepatcher.shared.TypedRange;
+import com.google.archivepatcher.shared.UnitTestZipArchive;
+import com.google.archivepatcher.shared.UnitTestZipEntry;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.io.UnsupportedEncodingException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/**
+ * Tests for {@link PreDiffPlanner}.
+ */
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public class PreDiffPlannerTest {
+
+  // All the A and B entries consist of a chunk of text followed by a standard corpus of text from
+  // the DefaultDeflateCompatibilityDiviner that ensures the tests will be able to discriminate
+  // between any compression level. Without this additional corpus text, multiple compression levels
+  // can match the entry and the unit tests would not be accurate.
+  private static final UnitTestZipEntry ENTRY_A_LEVEL_6 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/path A", 6, "entry A", null);
+  private static final UnitTestZipEntry ENTRY_A_LEVEL_9 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/path A", 9, "entry A", null);
+  private static final UnitTestZipEntry ENTRY_A_STORED =
+      UnitTestZipArchive.makeUnitTestZipEntry("/path A", 0, "entry A", null);
+  private static final UnitTestZipEntry ENTRY_B_LEVEL_6 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/path B", 6, "entry B", null);
+  private static final UnitTestZipEntry ENTRY_B_LEVEL_9 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/path B", 9, "entry B", null);
+
+  /**
+   * Entry C1 is a small entry WITHOUT the standard corpus of text from
+   * {@link DefaultDeflateCompatibilityWindow} appended. It has exactly the same compressed length
+   * as {@link #FIXED_LENGTH_ENTRY_C2_LEVEL_6}, and can be used to test the byte-matching logic in
+   * the code when the compressed lengths are identical.
+   */
+  private static final UnitTestZipEntry FIXED_LENGTH_ENTRY_C1_LEVEL_6 =
+      new UnitTestZipEntry("/path C", 6, "qqqqqqqqqqqqqqqqqqqqqqqqqqqq", null);
+
+  /**
+   * Entry C2 is a small entry WITHOUT the standard corpus of text from
+   * {@link DefaultDeflateCompatibilityWindow} appended. It has exactly the same compressed length
+   * as {@link #FIXED_LENGTH_ENTRY_C1_LEVEL_6}, and can be used to test the byte-matching logic in
+   * the code when the compressed lengths are identical.
+   */
+  private static final UnitTestZipEntry FIXED_LENGTH_ENTRY_C2_LEVEL_6 =
+      new UnitTestZipEntry("/path C", 6, "rrrrrrrrrrrrrrrrrrrrrrrrrrrr", null);
+
+  // The "shadow" entries are exact copies of ENTRY_A_* but have a different path. These are used
+  // for the detection of renames that don't involve modification (i.e., the uncompressed CRC32 is
+  // exactly the same as the ENTRY_A_* entries)
+  private static final UnitTestZipEntry SHADOW_ENTRY_A_LEVEL_1 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/uncompressed data same as A", 1, "entry A", null);
+  private static final UnitTestZipEntry SHADOW_ENTRY_A_LEVEL_6 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/same as A level 6", 6, "entry A", null);
+  private static final UnitTestZipEntry SHADOW_ENTRY_A_LEVEL_9 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/same as A level 9", 9, "entry A", null);
+  private static final UnitTestZipEntry SHADOW_ENTRY_A_STORED =
+      UnitTestZipArchive.makeUnitTestZipEntry("/same as A stored", 0, "entry A", null);
+
+  private List<File> tempFilesCreated;
+  private Map<File, Map<ByteArrayHolder, MinimalZipEntry>> entriesByPathByTempFile;
+
+  @Before
+  public void setup() {
+    tempFilesCreated = new LinkedList<File>();
+    entriesByPathByTempFile = new HashMap<File, Map<ByteArrayHolder, MinimalZipEntry>>();
+  }
+
+  @After
+  public void tearDown() {
+    for (File file : tempFilesCreated) {
+      try {
+        file.delete();
+      } catch (Exception ignored) {
+        // Nothing
+      }
+    }
+  }
+
+  /**
+   * Stores the specified bytes to disk in a temp file, returns the temp file and caches the zip
+   * entries for the file for use in later code.
+   * @param data the bytes to store, expected to be a valid zip file
+   * @throws IOException if it fails
+   */
+  private File storeAndMapArchive(byte[] data) throws IOException {
+    File file = File.createTempFile("pdpt", "zip");
+    tempFilesCreated.add(file);
+    file.deleteOnExit();
+    FileOutputStream out = new FileOutputStream(file);
+    out.write(data);
+    out.flush();
+    out.close();
+    Map<ByteArrayHolder, MinimalZipEntry> entriesByPath = new HashMap<>();
+    for (MinimalZipEntry zipEntry : MinimalZipArchive.listEntries(file)) {
+      ByteArrayHolder key = new ByteArrayHolder(zipEntry.getFileNameBytes());
+      entriesByPath.put(key, zipEntry);
+    }
+    entriesByPathByTempFile.put(file, entriesByPath);
+    return file;
+  }
+
+  /**
+   * Finds a unit test entry in the specified temp file.
+   * @param tempFile the archive to search within
+   * @param unitTestEntry the unit test entry to look up
+   * @return the {@link MinimalZipEntry} corresponding to the unit test entry
+   */
+  private MinimalZipEntry findEntry(File tempFile, UnitTestZipEntry unitTestEntry) {
+    Map<ByteArrayHolder, MinimalZipEntry> subMap = entriesByPathByTempFile.get(tempFile);
+    Assert.assertNotNull("temp file not mapped", subMap);
+    ByteArrayHolder key;
+    try {
+      key = new ByteArrayHolder(unitTestEntry.path.getBytes("UTF8"));
+    } catch (UnsupportedEncodingException e) {
+      throw new RuntimeException(e);
+    }
+    return subMap.get(key);
+  }
+
+  /**
+   * Finds the {@link TypedRange} corresponding to the compressed data for the specified unit test
+   * entry in the specified temp file.
+   * @param tempFile the archive to search within
+   * @param unitTestEntry the unit test entry to look up
+   * @return the {@link TypedRange} for the unit test entry's compressed data
+   */
+  private TypedRange<Void> findRangeWithoutParams(File tempFile, UnitTestZipEntry unitTestEntry) {
+    MinimalZipEntry found = findEntry(tempFile, unitTestEntry);
+    Assert.assertNotNull("entry not found in temp file", found);
+    return new TypedRange<Void>(
+        found.getFileOffsetOfCompressedData(), found.getCompressedSize(), null);
+  }
+
+  /**
+   * Finds the {@link TypedRange} corresponding to the compressed data for the specified unit test
+   * entry in the specified temp file.
+   * @param tempFile the archive to search within
+   * @param unitTestEntry the unit test entry to look up
+   * @return the {@link TypedRange} for the unit test entry's compressed data
+   */
+  private TypedRange<JreDeflateParameters> findRangeWithParams(
+      File tempFile, UnitTestZipEntry unitTestEntry) {
+    MinimalZipEntry found = findEntry(tempFile, unitTestEntry);
+    Assert.assertNotNull("entry not found in temp file", found);
+    return new TypedRange<JreDeflateParameters>(
+        found.getFileOffsetOfCompressedData(),
+        found.getCompressedSize(),
+        JreDeflateParameters.of(unitTestEntry.level, 0, true));
+  }
+
+  /**
+   * Deliberately introduce an error into the specified entry. This will make the entry impossible
+   * to divine the settings for, because it is broken.
+   * @param tempFile the archive to search within
+   * @param unitTestEntry the unit test entry to deliberately corrupt
+   */
+  private void corruptEntryData(File tempFile, UnitTestZipEntry unitTestEntry) throws IOException {
+    TypedRange<Void> range = findRangeWithoutParams(tempFile, unitTestEntry);
+    Assert.assertTrue("range too short to corrupt with 'junk'", range.getLength() >= 4);
+    try (RandomAccessFile raf = new RandomAccessFile(tempFile, "rw")) {
+      raf.seek(range.getOffset());
+      raf.write("junk".getBytes("UTF8"));
+    }
+  }
+
+  /**
+   * Deliberately garble the compression method in the specified entry such that it is no longer
+   * deflate.
+   * @param tempFile the archive to search within
+   * @param unitTestEntry the unit test entry to deliberately corrupt
+   */
+  private void corruptCompressionMethod(File tempFile, UnitTestZipEntry unitTestEntry)
+      throws IOException {
+    long centralDirectoryRecordOffset = -1;
+    try (RandomAccessFileInputStream rafis = new RandomAccessFileInputStream(tempFile)) {
+      long startOfEocd = MinimalZipParser.locateStartOfEocd(rafis, 32768);
+      rafis.setRange(startOfEocd, tempFile.length() - startOfEocd);
+      MinimalCentralDirectoryMetadata centralDirectoryMetadata = MinimalZipParser.parseEocd(rafis);
+      int numEntries = centralDirectoryMetadata.getNumEntriesInCentralDirectory();
+      rafis.setRange(
+          centralDirectoryMetadata.getOffsetOfCentralDirectory(),
+          centralDirectoryMetadata.getLengthOfCentralDirectory());
+      for (int x = 0; x < numEntries; x++) {
+        long recordStartOffset = rafis.getPosition();
+        MinimalZipEntry candidate = MinimalZipParser.parseCentralDirectoryEntry(rafis);
+        if (candidate.getFileName().equals(unitTestEntry.path)) {
+          // Located! Track offset and bail out.
+          centralDirectoryRecordOffset = recordStartOffset;
+          x = numEntries;
+        }
+      }
+    }
+
+    Assert.assertNotEquals("Entry not found", -1L, centralDirectoryRecordOffset);
+    try (RandomAccessFile raf = new RandomAccessFile(tempFile, "rw")) {
+      // compression method is a 2 byte field stored 10 bytes into the record
+      raf.seek(centralDirectoryRecordOffset + 10);
+      raf.write(7);
+      raf.write(7);
+    }
+  }
+
+  private PreDiffPlan invokeGeneratePreDiffPlan(
+      File oldFile, File newFile, RecommendationModifier... recommendationModifiers)
+      throws IOException {
+    Map<ByteArrayHolder, MinimalZipEntry> originalOldArchiveZipEntriesByPath =
+        new LinkedHashMap<ByteArrayHolder, MinimalZipEntry>();
+    Map<ByteArrayHolder, MinimalZipEntry> originalNewArchiveZipEntriesByPath =
+        new LinkedHashMap<ByteArrayHolder, MinimalZipEntry>();
+    Map<ByteArrayHolder, JreDeflateParameters> originalNewArchiveJreDeflateParametersByPath =
+        new LinkedHashMap<ByteArrayHolder, JreDeflateParameters>();
+
+    for (MinimalZipEntry zipEntry : MinimalZipArchive.listEntries(oldFile)) {
+      ByteArrayHolder key = new ByteArrayHolder(zipEntry.getFileNameBytes());
+      originalOldArchiveZipEntriesByPath.put(key, zipEntry);
+    }
+
+    DefaultDeflateCompressionDiviner diviner = new DefaultDeflateCompressionDiviner();
+    for (DivinationResult divinationResult : diviner.divineDeflateParameters(newFile)) {
+      ByteArrayHolder key = new ByteArrayHolder(divinationResult.minimalZipEntry.getFileNameBytes());
+      originalNewArchiveZipEntriesByPath.put(key, divinationResult.minimalZipEntry);
+      originalNewArchiveJreDeflateParametersByPath.put(key, divinationResult.divinedParameters);
+    }
+
+    PreDiffPlanner preDiffPlanner =
+        new PreDiffPlanner(
+            oldFile,
+            originalOldArchiveZipEntriesByPath,
+            newFile,
+            originalNewArchiveZipEntriesByPath,
+            originalNewArchiveJreDeflateParametersByPath,
+            recommendationModifiers);
+    return preDiffPlanner.generatePreDiffPlan();
+  }
+
+  private void checkRecommendation(PreDiffPlan plan, QualifiedRecommendation... expected) {
+    Assert.assertNotNull(plan.getQualifiedRecommendations());
+    Assert.assertEquals(expected.length, plan.getQualifiedRecommendations().size());
+    for (int x = 0; x < expected.length; x++) {
+      QualifiedRecommendation actual = plan.getQualifiedRecommendations().get(x);
+      Assert.assertEquals(
+          expected[x].getOldEntry().getFileName(), actual.getOldEntry().getFileName());
+      Assert.assertEquals(
+          expected[x].getNewEntry().getFileName(), actual.getNewEntry().getFileName());
+      Assert.assertEquals(expected[x].getRecommendation(), actual.getRecommendation());
+      Assert.assertEquals(expected[x].getReason(), actual.getReason());
+    }
+  }
+
+  @Test
+  public void testGeneratePreDiffPlan_OneCompressedEntry_Unchanged() throws IOException {
+    byte[] bytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_LEVEL_6));
+    File oldFile = storeAndMapArchive(bytes);
+    File newFile = storeAndMapArchive(bytes);
+    PreDiffPlan plan = invokeGeneratePreDiffPlan(oldFile, newFile);
+    Assert.assertNotNull(plan);
+    // The plan should be to leave the entry alone in both the old and new archives (empty plans).
+    Assert.assertTrue(plan.getOldFileUncompressionPlan().isEmpty());
+    Assert.assertTrue(plan.getNewFileUncompressionPlan().isEmpty());
+    checkRecommendation(plan, new QualifiedRecommendation(
+        findEntry(oldFile, ENTRY_A_LEVEL_6),
+        findEntry(newFile, ENTRY_A_LEVEL_6),
+        Recommendation.UNCOMPRESS_NEITHER,
+        RecommendationReason.COMPRESSED_BYTES_IDENTICAL));
+  }
+
+  @Test
+  public void testGeneratePreDiffPlan_OneCompressedEntry_LengthsChanged() throws IOException {
+    // Test detection of compressed entry differences based on length mismatch.
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_LEVEL_6));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_LEVEL_9));
+    File oldFile = storeAndMapArchive(oldBytes);
+    File newFile = storeAndMapArchive(newBytes);
+    PreDiffPlan plan = invokeGeneratePreDiffPlan(oldFile, newFile);
+    Assert.assertNotNull(plan);
+    // The plan should be to uncompress the entry in both the old and new archives.
+    Assert.assertEquals(1, plan.getOldFileUncompressionPlan().size());
+    Assert.assertEquals(
+        findRangeWithoutParams(oldFile, ENTRY_A_LEVEL_6),
+        plan.getOldFileUncompressionPlan().get(0));
+    Assert.assertEquals(1, plan.getNewFileUncompressionPlan().size());
+    Assert.assertEquals(
+        findRangeWithParams(newFile, ENTRY_A_LEVEL_9), plan.getNewFileUncompressionPlan().get(0));
+    checkRecommendation(plan, new QualifiedRecommendation(
+        findEntry(oldFile, ENTRY_A_LEVEL_6),
+        findEntry(newFile, ENTRY_A_LEVEL_9),
+        Recommendation.UNCOMPRESS_BOTH,
+        RecommendationReason.COMPRESSED_BYTES_CHANGED));
+  }
+
+  @Test
+  public void testGeneratePreDiffPlan_OneCompressedEntry_BytesChanged() throws IOException {
+    // Test detection of compressed entry differences based on binary content mismatch where the
+    // compressed lengths are exactly the same - i.e., force a byte-by-byte comparison of the
+    // compressed data in the two entries.
+    byte[] oldBytes =
+        UnitTestZipArchive.makeTestZip(Collections.singletonList(FIXED_LENGTH_ENTRY_C1_LEVEL_6));
+    byte[] newBytes =
+        UnitTestZipArchive.makeTestZip(Collections.singletonList(FIXED_LENGTH_ENTRY_C2_LEVEL_6));
+    File oldFile = storeAndMapArchive(oldBytes);
+    File newFile = storeAndMapArchive(newBytes);
+    PreDiffPlan plan = invokeGeneratePreDiffPlan(oldFile, newFile);
+    Assert.assertNotNull(plan);
+    // The plan should be to uncompress the entry in both the old and new archives.
+    Assert.assertEquals(1, plan.getOldFileUncompressionPlan().size());
+    Assert.assertEquals(1, plan.getNewFileUncompressionPlan().size());
+    Assert.assertEquals(
+        findRangeWithoutParams(oldFile, FIXED_LENGTH_ENTRY_C1_LEVEL_6),
+        plan.getOldFileUncompressionPlan().get(0));
+    Assert.assertEquals(
+        findRangeWithParams(newFile, FIXED_LENGTH_ENTRY_C2_LEVEL_6),
+        plan.getNewFileUncompressionPlan().get(0));
+    checkRecommendation(plan, new QualifiedRecommendation(
+        findEntry(oldFile, FIXED_LENGTH_ENTRY_C1_LEVEL_6),
+        findEntry(newFile, FIXED_LENGTH_ENTRY_C2_LEVEL_6),
+        Recommendation.UNCOMPRESS_BOTH,
+        RecommendationReason.COMPRESSED_BYTES_CHANGED));
+  }
+
+  @Test
+  public void testGeneratePreDiffPlan_OneUncompressedEntry() throws IOException {
+    // Test with uncompressed old and new. It doesn't matter whether the bytes are changed or
+    // unchanged in this case.
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_STORED));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_STORED));
+    File oldFile = storeAndMapArchive(oldBytes);
+    File newFile = storeAndMapArchive(newBytes);
+    PreDiffPlan plan = invokeGeneratePreDiffPlan(oldFile, newFile);
+    Assert.assertNotNull(plan);
+    // The plan should be to do nothing because both entries are already uncompressed
+    Assert.assertTrue(plan.getOldFileUncompressionPlan().isEmpty());
+    Assert.assertTrue(plan.getNewFileUncompressionPlan().isEmpty());
+    checkRecommendation(plan, new QualifiedRecommendation(
+        findEntry(oldFile, ENTRY_A_STORED),
+        findEntry(newFile, ENTRY_A_STORED),
+        Recommendation.UNCOMPRESS_NEITHER,
+        RecommendationReason.BOTH_ENTRIES_UNCOMPRESSED));
+  }
+
+  @Test
+  public void testGeneratePreDiffPlan_OneEntry_CompressedToUncompressed() throws IOException {
+    // Test the migration of an entry from compressed (old archive) to uncompressed (new archive).
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_LEVEL_9));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_STORED));
+    File oldFile = storeAndMapArchive(oldBytes);
+    File newFile = storeAndMapArchive(newBytes);
+    PreDiffPlan plan = invokeGeneratePreDiffPlan(oldFile, newFile);
+    Assert.assertNotNull(plan);
+    // The plan should be to uncompress the entry in the old archive and do nothing in the new
+    // archive (empty plan)
+    Assert.assertEquals(1, plan.getOldFileUncompressionPlan().size());
+    Assert.assertEquals(
+        findRangeWithoutParams(oldFile, ENTRY_A_LEVEL_9),
+        plan.getOldFileUncompressionPlan().get(0));
+    Assert.assertTrue(plan.getNewFileUncompressionPlan().isEmpty());
+    checkRecommendation(plan, new QualifiedRecommendation(
+        findEntry(oldFile, ENTRY_A_LEVEL_9),
+        findEntry(newFile, ENTRY_A_STORED),
+        Recommendation.UNCOMPRESS_OLD,
+        RecommendationReason.COMPRESSED_CHANGED_TO_UNCOMPRESSED));
+  }
+
+  @Test
+  public void testGeneratePreDiffPlan_OneEntry_UncompressedToCompressed() throws IOException {
+    // Test the migration of an entry from uncompressed (old archive) to compressed (new archive).
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_STORED));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_LEVEL_6));
+    File oldFile = storeAndMapArchive(oldBytes);
+    File newFile = storeAndMapArchive(newBytes);
+    PreDiffPlan plan = invokeGeneratePreDiffPlan(oldFile, newFile);
+    Assert.assertNotNull(plan);
+    // The plan should be to do nothing in the old archive (empty plan) and uncompress the entry in
+    // the new archive
+    Assert.assertTrue(plan.getOldFileUncompressionPlan().isEmpty());
+    Assert.assertEquals(1, plan.getNewFileUncompressionPlan().size());
+    Assert.assertEquals(
+        findRangeWithParams(newFile, ENTRY_A_LEVEL_6), plan.getNewFileUncompressionPlan().get(0));
+    checkRecommendation(plan, new QualifiedRecommendation(
+        findEntry(oldFile, ENTRY_A_STORED),
+        findEntry(newFile, ENTRY_A_LEVEL_6),
+        Recommendation.UNCOMPRESS_NEW,
+        RecommendationReason.UNCOMPRESSED_CHANGED_TO_COMPRESSED));
+  }
+
+  @Test
+  public void testGeneratePreDiffPlan_OneEntry_UncompressedToUndivinable() throws IOException {
+    // Test the migration of an entry from uncompressed (old archive) to compressed (new archive),
+    // but make the new entry un-divinable and therefore un-recompressible.
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_STORED));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_LEVEL_6));
+    File oldFile = storeAndMapArchive(oldBytes);
+    File newFile = storeAndMapArchive(newBytes);
+    // Deliberately break the entry in the new file so that it will not be divinable
+    corruptEntryData(newFile, ENTRY_A_LEVEL_6);
+    PreDiffPlan plan = invokeGeneratePreDiffPlan(oldFile, newFile);
+    Assert.assertNotNull(plan);
+    // The plan WOULD be to do nothing in the old archive (empty plan) and uncompress the entry in
+    // the new archive, but because the new entry is un-divinable it cannot be recompressed and so
+    // the plan for the new archive should be empty as well.
+    Assert.assertTrue(plan.getOldFileUncompressionPlan().isEmpty());
+    Assert.assertTrue(plan.getNewFileUncompressionPlan().isEmpty());
+    checkRecommendation(plan, new QualifiedRecommendation(
+        findEntry(oldFile, ENTRY_A_STORED),
+        findEntry(newFile, ENTRY_A_LEVEL_6),
+        Recommendation.UNCOMPRESS_NEITHER,
+        RecommendationReason.UNSUITABLE));
+  }
+
+  @Test
+  public void testGeneratePreDiffPlan_OneEntry_OldUncompressed_NewNonDeflate() throws IOException {
+    // Test the case where the entry is compressed with something other than deflate in the new
+    // archive; it is thus not reproducible, not divinable, and therefore cannot be uncompressed.
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_STORED));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_LEVEL_9));
+    File oldFile = storeAndMapArchive(oldBytes);
+    File newFile = storeAndMapArchive(newBytes);
+    corruptCompressionMethod(newFile, ENTRY_A_LEVEL_9);
+    PreDiffPlan plan = invokeGeneratePreDiffPlan(oldFile, newFile);
+    Assert.assertNotNull(plan);
+    // The plan should be to do nothing (empty plans) because the the entry in the old archive is
+    // already uncompressed and the entry in the new archive is not compressed with deflate (i.e.,
+    // cannot be recompressed so cannot be touched).
+    Assert.assertTrue(plan.getOldFileUncompressionPlan().isEmpty());
+    Assert.assertTrue(plan.getNewFileUncompressionPlan().isEmpty());
+    checkRecommendation(plan, new QualifiedRecommendation(
+        findEntry(oldFile, ENTRY_A_STORED),
+        findEntry(newFile, ENTRY_A_LEVEL_9),
+        Recommendation.UNCOMPRESS_NEITHER,
+        RecommendationReason.UNSUITABLE));
+  }
+
+  @Test
+  public void testGeneratePreDiffPlan_OneEntry_OldNonDeflate_NewUncompressed() throws IOException {
+    // Test the case where the entry is compressed with something other than deflate in the old
+    // archive; it can't be uncompressed, so there's no point in modifying the new entry either.
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_LEVEL_9));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_STORED));
+    File oldFile = storeAndMapArchive(oldBytes);
+    File newFile = storeAndMapArchive(newBytes);
+    corruptCompressionMethod(oldFile, ENTRY_A_LEVEL_9);
+    PreDiffPlan plan = invokeGeneratePreDiffPlan(oldFile, newFile);
+    Assert.assertNotNull(plan);
+    // The plan should be to do nothing (empty plans) because the the entry in the old archive is
+    // not compressed with deflate, so there is no point in trying to do anything at all.
+    Assert.assertTrue(plan.getOldFileUncompressionPlan().isEmpty());
+    Assert.assertTrue(plan.getNewFileUncompressionPlan().isEmpty());
+    checkRecommendation(plan, new QualifiedRecommendation(
+        findEntry(oldFile, ENTRY_A_LEVEL_9),
+        findEntry(newFile, ENTRY_A_STORED),
+        Recommendation.UNCOMPRESS_NEITHER,
+        RecommendationReason.UNSUITABLE));
+  }
+
+  @Test
+  public void testGeneratePreDiffPlan_OneEntry_BothNonDeflate() throws IOException {
+    // Test the case where the entry is compressed with something other than deflate; it is thus
+    // not reproducible, not divinable, and therefore cannot be uncompressed.
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_LEVEL_6));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_LEVEL_9));
+    File oldFile = storeAndMapArchive(oldBytes);
+    File newFile = storeAndMapArchive(newBytes);
+    corruptCompressionMethod(oldFile, ENTRY_A_LEVEL_6);
+    corruptCompressionMethod(newFile, ENTRY_A_LEVEL_9);
+    PreDiffPlan plan = invokeGeneratePreDiffPlan(oldFile, newFile);
+    Assert.assertNotNull(plan);
+    // The plan should be to do nothing (empty plans) because the entries are not compressed with
+    // deflate
+    Assert.assertTrue(plan.getOldFileUncompressionPlan().isEmpty());
+    Assert.assertTrue(plan.getNewFileUncompressionPlan().isEmpty());
+    checkRecommendation(plan, new QualifiedRecommendation(
+        findEntry(oldFile, ENTRY_A_LEVEL_6),
+        findEntry(newFile, ENTRY_A_LEVEL_9),
+        Recommendation.UNCOMPRESS_NEITHER,
+        RecommendationReason.UNSUITABLE));
+  }
+
+  @Test
+  public void testGeneratePreDiffPlan_TwoDifferentEntries_DifferentPaths() throws IOException {
+    // Test the case where file paths are different as well as content within those files, i.e. each
+    // entry is exclusive to its archive and is not the same
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_LEVEL_6));
+    byte[] newBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_B_LEVEL_6));
+    File oldFile = storeAndMapArchive(oldBytes);
+    File newFile = storeAndMapArchive(newBytes);
+    PreDiffPlan plan = invokeGeneratePreDiffPlan(oldFile, newFile);
+    Assert.assertNotNull(plan);
+    // The plan should be to do nothing (empty plans) because entry A is only in the old archive and
+    // entry B is only in the new archive, so there is nothing to diff.
+    Assert.assertTrue(plan.getOldFileUncompressionPlan().isEmpty());
+    Assert.assertTrue(plan.getNewFileUncompressionPlan().isEmpty());
+    Assert.assertTrue(plan.getQualifiedRecommendations().isEmpty());
+  }
+
+  @Test
+  public void testGeneratePreDiffPlan_TwoEntriesEachArchive_SwappingOrder() throws IOException {
+    // Test the case where two entries in each archive have both changed, AND they have changed
+    // places in the file. The plan is supposed to be in file order, so that streaming is possible;
+    // check that it is so.
+    byte[] oldBytes =
+        UnitTestZipArchive.makeTestZip(Arrays.asList(ENTRY_A_LEVEL_6, ENTRY_B_LEVEL_6));
+    byte[] newBytes =
+        UnitTestZipArchive.makeTestZip(Arrays.asList(ENTRY_B_LEVEL_9, ENTRY_A_LEVEL_9));
+    File oldFile = storeAndMapArchive(oldBytes);
+    File newFile = storeAndMapArchive(newBytes);
+    PreDiffPlan plan = invokeGeneratePreDiffPlan(oldFile, newFile);
+    Assert.assertNotNull(plan);
+    // The plan should be to uncompress both entries, but the order is important. File order should
+    // be in both plans.
+    Assert.assertEquals(2, plan.getOldFileUncompressionPlan().size());
+    Assert.assertEquals(2, plan.getNewFileUncompressionPlan().size());
+    Assert.assertEquals(
+        findRangeWithoutParams(oldFile, ENTRY_A_LEVEL_6),
+        plan.getOldFileUncompressionPlan().get(0));
+    Assert.assertEquals(
+        findRangeWithoutParams(oldFile, ENTRY_B_LEVEL_6),
+        plan.getOldFileUncompressionPlan().get(1));
+    Assert.assertEquals(
+        findRangeWithParams(newFile, ENTRY_B_LEVEL_9), plan.getNewFileUncompressionPlan().get(0));
+    Assert.assertEquals(
+        findRangeWithParams(newFile, ENTRY_A_LEVEL_9), plan.getNewFileUncompressionPlan().get(1));
+  }
+
+  @Test
+  public void testGeneratePreDiffPlan_SimpleRename_Unchanged() throws IOException {
+    // Test the case where file paths are different but the uncompressed content is the same.
+    // The compression method used for both entries is identical, as are the compressed bytes.
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_LEVEL_6));
+    byte[] newBytes =
+        UnitTestZipArchive.makeTestZip(Collections.singletonList(SHADOW_ENTRY_A_LEVEL_6));
+    File oldFile = storeAndMapArchive(oldBytes);
+    File newFile = storeAndMapArchive(newBytes);
+    PreDiffPlan plan = invokeGeneratePreDiffPlan(oldFile, newFile);
+    Assert.assertNotNull(plan);
+    // The plan should be to do nothing (empty plans) because the bytes are identical in both files
+    // so the entries should remain compressed. However, unlike the case where there was no match,
+    // there is now a qualified recommendation in the returned list.
+    Assert.assertTrue(plan.getOldFileUncompressionPlan().isEmpty());
+    Assert.assertTrue(plan.getNewFileUncompressionPlan().isEmpty());
+    checkRecommendation(
+        plan,
+        new QualifiedRecommendation(
+            findEntry(oldFile, ENTRY_A_LEVEL_6),
+            findEntry(newFile, SHADOW_ENTRY_A_LEVEL_6),
+            Recommendation.UNCOMPRESS_NEITHER,
+            RecommendationReason.COMPRESSED_BYTES_IDENTICAL));
+  }
+
+  @Test
+  public void testGeneratePreDiffPlan_SimpleRename_CompressionLevelChanged() throws IOException {
+    // Test the case where file paths are different but the uncompressed content is the same.
+    // The compression method used for each entry is different but the CRC32 is still the same, so
+    // unlike like the plan with identical entries this time the plan should be to uncompress both
+    // entries, allowing a super-efficient delta.
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_LEVEL_6));
+    byte[] newBytes =
+        UnitTestZipArchive.makeTestZip(Collections.singletonList(SHADOW_ENTRY_A_LEVEL_9));
+    File oldFile = storeAndMapArchive(oldBytes);
+    File newFile = storeAndMapArchive(newBytes);
+    PreDiffPlan plan = invokeGeneratePreDiffPlan(oldFile, newFile);
+    Assert.assertNotNull(plan);
+    // The plan should be to uncompress both entries so that a super-efficient delta can be done.
+    Assert.assertEquals(1, plan.getOldFileUncompressionPlan().size());
+    Assert.assertEquals(
+        findRangeWithoutParams(oldFile, ENTRY_A_LEVEL_6),
+        plan.getOldFileUncompressionPlan().get(0));
+    Assert.assertEquals(1, plan.getNewFileUncompressionPlan().size());
+    Assert.assertEquals(
+        findRangeWithParams(newFile, SHADOW_ENTRY_A_LEVEL_9),
+        plan.getNewFileUncompressionPlan().get(0));
+    checkRecommendation(
+        plan,
+        new QualifiedRecommendation(
+            findEntry(oldFile, ENTRY_A_LEVEL_6),
+            findEntry(newFile, SHADOW_ENTRY_A_LEVEL_9),
+            Recommendation.UNCOMPRESS_BOTH,
+            RecommendationReason.COMPRESSED_BYTES_CHANGED));
+  }
+
+  @Test
+  public void testGeneratePreDiffPlan_ClonedAndCompressionLevelChanged() throws IOException {
+    // Test the case where an entry exists in both old and new APK with identical uncompressed
+    // content but different compressed content ***AND*** additionally a new copy exists in the new
+    // archive, also with identical uncompressed content and different compressed content, i.e.:
+    //
+    // OLD APK:                                NEW APK:
+    // ------------------------------------    -----------------------------------------------
+    // foo.xml (compressed level 6)            foo.xml (compressed level 9, content unchanged)
+    //                                         bar.xml (copy of foo.xml, compressed level 1)
+    //
+    // This test ensures that in such cases the foo.xml from the old apk is only enqueued for
+    // uncompression ONE TIME.
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_LEVEL_6));
+    byte[] newBytes =
+        UnitTestZipArchive.makeTestZip(
+            Arrays.asList(SHADOW_ENTRY_A_LEVEL_1, SHADOW_ENTRY_A_LEVEL_9));
+    File oldFile = storeAndMapArchive(oldBytes);
+    File newFile = storeAndMapArchive(newBytes);
+    PreDiffPlan plan = invokeGeneratePreDiffPlan(oldFile, newFile);
+    Assert.assertNotNull(plan);
+    // The plan should be to uncompress both entries so that a super-efficient delta can be done.
+    // Critically there should only be ONE command for the old file uncompression step!
+    Assert.assertEquals(1, plan.getOldFileUncompressionPlan().size());
+    Assert.assertEquals(
+        findRangeWithoutParams(oldFile, ENTRY_A_LEVEL_6),
+        plan.getOldFileUncompressionPlan().get(0));
+    Assert.assertEquals(2, plan.getNewFileUncompressionPlan().size());
+    Assert.assertEquals(
+        findRangeWithParams(newFile, SHADOW_ENTRY_A_LEVEL_1),
+        plan.getNewFileUncompressionPlan().get(0));
+    Assert.assertEquals(
+        findRangeWithParams(newFile, SHADOW_ENTRY_A_LEVEL_9),
+        plan.getNewFileUncompressionPlan().get(1));
+    checkRecommendation(
+        plan,
+        new QualifiedRecommendation(
+            findEntry(oldFile, ENTRY_A_LEVEL_6),
+            findEntry(newFile, SHADOW_ENTRY_A_LEVEL_1),
+            Recommendation.UNCOMPRESS_BOTH,
+            RecommendationReason.COMPRESSED_BYTES_CHANGED),
+        new QualifiedRecommendation(
+            findEntry(oldFile, ENTRY_A_LEVEL_6),
+            findEntry(newFile, SHADOW_ENTRY_A_LEVEL_9),
+            Recommendation.UNCOMPRESS_BOTH,
+            RecommendationReason.COMPRESSED_BYTES_CHANGED));
+  }
+
+  @Test
+  public void testGeneratePreDiffPlan_SimpleRename_CompressedToUncompressed() throws IOException {
+    // Test the case where file paths are different but the uncompressed content is the same.
+    // The compression method is changed from compressed to uncompressed but the rename should still
+    // be detected and the plan should be to uncompress the old entry only.
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_LEVEL_6));
+    byte[] newBytes =
+        UnitTestZipArchive.makeTestZip(Collections.singletonList(SHADOW_ENTRY_A_STORED));
+    File oldFile = storeAndMapArchive(oldBytes);
+    File newFile = storeAndMapArchive(newBytes);
+    PreDiffPlan plan = invokeGeneratePreDiffPlan(oldFile, newFile);
+    Assert.assertNotNull(plan);
+    // The plan should be to uncompress the old entry so that a super-efficient delta can be done.
+    // The new entry isn't touched because it is already uncompressed.
+    Assert.assertEquals(1, plan.getOldFileUncompressionPlan().size());
+    Assert.assertEquals(
+        findRangeWithoutParams(oldFile, ENTRY_A_LEVEL_6),
+        plan.getOldFileUncompressionPlan().get(0));
+    Assert.assertTrue(plan.getNewFileUncompressionPlan().isEmpty());
+    checkRecommendation(
+        plan,
+        new QualifiedRecommendation(
+            findEntry(oldFile, ENTRY_A_LEVEL_6),
+            findEntry(newFile, SHADOW_ENTRY_A_STORED),
+            Recommendation.UNCOMPRESS_OLD,
+            RecommendationReason.COMPRESSED_CHANGED_TO_UNCOMPRESSED));
+  }
+
+  @Test
+  public void testGeneratePreDiffPlan_SimpleRename_UncompressedToCompressed() throws IOException {
+    // Test the case where file paths are different but the uncompressed content is the same.
+    // The compression method is changed from uncompressed to compressed but the rename should still
+    // be detected and the plan should be to uncompress the new entry only.
+    byte[] oldBytes = UnitTestZipArchive.makeTestZip(Collections.singletonList(ENTRY_A_STORED));
+    byte[] newBytes =
+        UnitTestZipArchive.makeTestZip(Collections.singletonList(SHADOW_ENTRY_A_LEVEL_6));
+    File oldFile = storeAndMapArchive(oldBytes);
+    File newFile = storeAndMapArchive(newBytes);
+    PreDiffPlan plan = invokeGeneratePreDiffPlan(oldFile, newFile);
+    Assert.assertNotNull(plan);
+    // The plan should be to uncompress the new entry so that a super-efficient delta can be done.
+    // The old entry isn't touched because it is already uncompressed.
+    Assert.assertTrue(plan.getOldFileUncompressionPlan().isEmpty());
+    Assert.assertEquals(1, plan.getNewFileUncompressionPlan().size());
+    Assert.assertEquals(
+        findRangeWithParams(newFile, SHADOW_ENTRY_A_LEVEL_6),
+        plan.getNewFileUncompressionPlan().get(0));
+    checkRecommendation(
+        plan,
+        new QualifiedRecommendation(
+            findEntry(oldFile, ENTRY_A_STORED),
+            findEntry(newFile, SHADOW_ENTRY_A_LEVEL_6),
+            Recommendation.UNCOMPRESS_NEW,
+            RecommendationReason.UNCOMPRESSED_CHANGED_TO_COMPRESSED));
+  }
+
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/QualifiedRecommendationTest.java b/generator/src/test/java/com/google/archivepatcher/generator/QualifiedRecommendationTest.java
new file mode 100644
index 0000000..0848fb4
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/QualifiedRecommendationTest.java
@@ -0,0 +1,106 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Tests for {@link QualifiedRecommendation}. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public class QualifiedRecommendationTest {
+  private static final byte[] FILENAME1 = {'f', 'o', 'o'};
+  private static final byte[] FILENAME2 = {'b', 'a', 'r'};
+  private static final MinimalZipEntry ENTRY1 = new MinimalZipEntry(0, 1, 2, 3, FILENAME1, true, 0);
+  private static final MinimalZipEntry ENTRY2 = new MinimalZipEntry(1, 2, 3, 4, FILENAME2, true, 0);
+
+  private static final QualifiedRecommendation DEFAULT_QUALIFIED_RECOMMENDATION =
+      new QualifiedRecommendation(
+          ENTRY1,
+          ENTRY2,
+          Recommendation.UNCOMPRESS_BOTH,
+          RecommendationReason.COMPRESSED_BYTES_CHANGED);
+  private static final QualifiedRecommendation CLONED_DEFAULT_QUALIFIED_RECOMMENDATION =
+      new QualifiedRecommendation(
+          ENTRY1,
+          ENTRY2,
+          Recommendation.UNCOMPRESS_BOTH,
+          RecommendationReason.COMPRESSED_BYTES_CHANGED);
+  private static final QualifiedRecommendation ALTERED_ENTRY1 =
+      new QualifiedRecommendation(
+          ENTRY2,
+          ENTRY2,
+          Recommendation.UNCOMPRESS_BOTH,
+          RecommendationReason.COMPRESSED_BYTES_CHANGED);
+  private static final QualifiedRecommendation ALTERED_ENTRY2 =
+      new QualifiedRecommendation(
+          ENTRY1,
+          ENTRY1,
+          Recommendation.UNCOMPRESS_BOTH,
+          RecommendationReason.COMPRESSED_BYTES_CHANGED);
+  private static final QualifiedRecommendation ALTERED_RECOMMENDATION =
+      new QualifiedRecommendation(
+          ENTRY1,
+          ENTRY2,
+          Recommendation.UNCOMPRESS_NEITHER,
+          RecommendationReason.COMPRESSED_BYTES_CHANGED);
+  private static final QualifiedRecommendation ALTERED_REASON =
+      new QualifiedRecommendation(
+          ENTRY1, ENTRY2, Recommendation.UNCOMPRESS_BOTH, RecommendationReason.UNSUITABLE);
+  private static final List<QualifiedRecommendation> ALL_MUTATIONS =
+      Collections.unmodifiableList(
+          Arrays.asList(ALTERED_ENTRY1, ALTERED_ENTRY2, ALTERED_RECOMMENDATION, ALTERED_REASON));
+
+  @Test
+  @SuppressWarnings("EqualsIncompatibleType") // For ErrorProne
+  public void testEquals() {
+    Assert.assertEquals(DEFAULT_QUALIFIED_RECOMMENDATION, DEFAULT_QUALIFIED_RECOMMENDATION);
+    Assert.assertEquals(DEFAULT_QUALIFIED_RECOMMENDATION, CLONED_DEFAULT_QUALIFIED_RECOMMENDATION);
+    Assert.assertNotSame(DEFAULT_QUALIFIED_RECOMMENDATION, CLONED_DEFAULT_QUALIFIED_RECOMMENDATION);
+    for (QualifiedRecommendation mutation : ALL_MUTATIONS) {
+      Assert.assertNotEquals(DEFAULT_QUALIFIED_RECOMMENDATION, mutation);
+    }
+    Assert.assertFalse(DEFAULT_QUALIFIED_RECOMMENDATION.equals(null));
+    Assert.assertFalse(DEFAULT_QUALIFIED_RECOMMENDATION.equals("foo"));
+  }
+
+  @Test
+  public void testHashCode() {
+    Set<QualifiedRecommendation> hashSet = new HashSet<>();
+    hashSet.add(DEFAULT_QUALIFIED_RECOMMENDATION);
+    hashSet.add(CLONED_DEFAULT_QUALIFIED_RECOMMENDATION);
+    Assert.assertEquals(1, hashSet.size());
+    hashSet.addAll(ALL_MUTATIONS);
+    Assert.assertEquals(1 + ALL_MUTATIONS.size(), hashSet.size());
+  }
+
+  @Test
+  public void testGetters() {
+    Assert.assertEquals(ENTRY1, DEFAULT_QUALIFIED_RECOMMENDATION.getOldEntry());
+    Assert.assertEquals(ENTRY2, DEFAULT_QUALIFIED_RECOMMENDATION.getNewEntry());
+    Assert.assertEquals(
+        Recommendation.UNCOMPRESS_BOTH, DEFAULT_QUALIFIED_RECOMMENDATION.getRecommendation());
+    Assert.assertEquals(
+        RecommendationReason.COMPRESSED_BYTES_CHANGED,
+        DEFAULT_QUALIFIED_RECOMMENDATION.getReason());
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/TempFileHolderTest.java b/generator/src/test/java/com/google/archivepatcher/generator/TempFileHolderTest.java
new file mode 100644
index 0000000..f91c44e
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/TempFileHolderTest.java
@@ -0,0 +1,42 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+import java.io.File;
+import java.io.IOException;
+
+/**
+* Tests for {@link TempFileHolder}.
+*/
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public class TempFileHolderTest {
+  @Test
+  public void testConstructAndClose() throws IOException {
+    // Tests that a temp file can be created and that it is deleted upon close().
+    File allocated = null;
+    try(TempFileHolder holder = new TempFileHolder()) {
+      Assert.assertNotNull(holder.file);
+      Assert.assertTrue(holder.file.exists());
+      allocated = holder.file;
+    }
+    Assert.assertFalse(allocated.exists());
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/TotalRecompressionLimiterTest.java b/generator/src/test/java/com/google/archivepatcher/generator/TotalRecompressionLimiterTest.java
new file mode 100644
index 0000000..4e5d5f0
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/TotalRecompressionLimiterTest.java
@@ -0,0 +1,291 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator;
+
+import java.io.File;
+import java.io.UnsupportedEncodingException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Tests for {@link TotalRecompressionLimiter}. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public class TotalRecompressionLimiterTest {
+
+  private static final File OLD_FILE = null;
+  private static final File NEW_FILE = null;
+
+  private static final MinimalZipEntry UNIMPORTANT = makeFakeEntry("/unimportant", 1337);
+  private static final MinimalZipEntry ENTRY_A_100K = makeFakeEntry("/a/100k", 100 * 1024);
+  private static final MinimalZipEntry ENTRY_B_200K = makeFakeEntry("/b/200k", 200 * 1024);
+  private static final MinimalZipEntry ENTRY_C_300K = makeFakeEntry("/c/300k", 300 * 1024);
+  private static final MinimalZipEntry ENTRY_D_400K = makeFakeEntry("/d/400k", 400 * 1024);
+  private static final MinimalZipEntry IGNORED_A = makeFakeEntry("/ignored/a", 1234);
+  private static final MinimalZipEntry IGNORED_B = makeFakeEntry("/ignored/b", 5678);
+  private static final MinimalZipEntry IGNORED_C = makeFakeEntry("/ignored/c", 9101112);
+  private static final MinimalZipEntry IGNORED_D = makeFakeEntry("/ignored/d", 13141516);
+
+  // First four recommendations are all ones where recompression is required. Note that there is a
+  // mix of UNCOMPRESS_NEW and UNCOMPRESS_BOTH, both of which will have the "new" entry flagged for
+  // recompression (i.e., should be relevant to the filtering logic).
+  private static final QualifiedRecommendation REC_A_100K =
+      new QualifiedRecommendation(
+          UNIMPORTANT,
+          ENTRY_A_100K,
+          Recommendation.UNCOMPRESS_BOTH,
+          RecommendationReason.COMPRESSED_BYTES_CHANGED);
+  private static final QualifiedRecommendation REC_B_200K =
+      new QualifiedRecommendation(
+          UNIMPORTANT,
+          ENTRY_B_200K,
+          Recommendation.UNCOMPRESS_NEW,
+          RecommendationReason.UNCOMPRESSED_CHANGED_TO_COMPRESSED);
+  private static final QualifiedRecommendation REC_C_300K =
+      new QualifiedRecommendation(
+          UNIMPORTANT,
+          ENTRY_C_300K,
+          Recommendation.UNCOMPRESS_BOTH,
+          RecommendationReason.COMPRESSED_BYTES_CHANGED);
+  private static final QualifiedRecommendation REC_D_400K =
+      new QualifiedRecommendation(
+          UNIMPORTANT,
+          ENTRY_D_400K,
+          Recommendation.UNCOMPRESS_BOTH,
+          RecommendationReason.COMPRESSED_BYTES_CHANGED);
+
+  // Remaining recommendations are all ones where recompression is NOT required. Note the mixture of
+  // UNCOMPRESS_NEITHER and UNCOMPRESS_OLD, neither of which will have the "new" entry flagged for
+  // recompression (ie., must be ignored by the filtering logic).
+  private static final QualifiedRecommendation REC_IGNORED_A_UNCHANGED =
+      new QualifiedRecommendation(
+          UNIMPORTANT,
+          IGNORED_A,
+          Recommendation.UNCOMPRESS_NEITHER,
+          RecommendationReason.COMPRESSED_BYTES_IDENTICAL);
+  private static final QualifiedRecommendation REC_IGNORED_B_BOTH_UNCOMPRESSED =
+      new QualifiedRecommendation(
+          UNIMPORTANT,
+          IGNORED_B,
+          Recommendation.UNCOMPRESS_NEITHER,
+          RecommendationReason.BOTH_ENTRIES_UNCOMPRESSED);
+  private static final QualifiedRecommendation REC_IGNORED_C_UNSUITABLE =
+      new QualifiedRecommendation(
+          UNIMPORTANT,
+          IGNORED_C,
+          Recommendation.UNCOMPRESS_NEITHER,
+          RecommendationReason.UNSUITABLE);
+  private static final QualifiedRecommendation REC_IGNORED_D_CHANGED_TO_UNCOMPRESSED =
+      new QualifiedRecommendation(
+          UNIMPORTANT,
+          IGNORED_D,
+          Recommendation.UNCOMPRESS_OLD,
+          RecommendationReason.COMPRESSED_CHANGED_TO_UNCOMPRESSED);
+
+  /** Convenience reference to all the recommendations that should be ignored by filtering. */
+  private static final List<QualifiedRecommendation> ALL_IGNORED_RECS =
+      Collections.unmodifiableList(
+          Arrays.asList(
+              REC_IGNORED_A_UNCHANGED,
+              REC_IGNORED_B_BOTH_UNCOMPRESSED,
+              REC_IGNORED_C_UNSUITABLE,
+              REC_IGNORED_D_CHANGED_TO_UNCOMPRESSED));
+
+  /** Convenience reference to all the recommendations that are subject to filtering. */
+  private static final List<QualifiedRecommendation> ALL_RECS =
+      Collections.unmodifiableList(
+          Arrays.asList(
+              REC_IGNORED_A_UNCHANGED,
+              REC_A_100K,
+              REC_IGNORED_B_BOTH_UNCOMPRESSED,
+              REC_D_400K,
+              REC_IGNORED_C_UNSUITABLE,
+              REC_B_200K,
+              REC_IGNORED_D_CHANGED_TO_UNCOMPRESSED,
+              REC_C_300K));
+
+  /**
+   * Given {@link QualifiedRecommendation}s, manufacture equivalents altered in the way that the
+   * {@link TotalRecompressionLimiter} would.
+   *
+   * @param originals the original recommendations
+   * @return the altered recommendations
+   */
+  private static final List<QualifiedRecommendation> suppressed(
+      QualifiedRecommendation... originals) {
+    List<QualifiedRecommendation> result = new ArrayList<>(originals.length);
+    for (QualifiedRecommendation original : originals) {
+      result.add(
+          new QualifiedRecommendation(
+              original.getOldEntry(),
+              original.getNewEntry(),
+              Recommendation.UNCOMPRESS_NEITHER,
+              RecommendationReason.RESOURCE_CONSTRAINED));
+    }
+    return result;
+  }
+
+  /**
+   * Make a structurally valid but totally bogus {@link MinimalZipEntry} for the purpose of testing
+   * the {@link RecommendationModifier}.
+   *
+   * @param path the path to set on the entry, to help with debugging
+   * @param uncompressedSize the uncompressed size of the entry, in bytes
+   * @return the entry
+   */
+  private static MinimalZipEntry makeFakeEntry(String path, long uncompressedSize) {
+    try {
+      return new MinimalZipEntry(
+          8, // == deflate
+          0, // crc32OfUncompressedData (ignored for this test)
+          0, // compressedSize (ignored for this test)
+          uncompressedSize,
+          path.getBytes("UTF8"),
+          true, // generalPurposeFlagBit11 (true=UTF8)
+          0 // fileOffsetOfLocalEntry (ignored for this test)
+          );
+    } catch (UnsupportedEncodingException e) {
+      throw new RuntimeException(e); // Impossible on any modern system
+    }
+  }
+
+  @Test
+  public void testNegativeLimit() {
+    try {
+      new TotalRecompressionLimiter(-1);
+      Assert.fail("Set a negative limit");
+    } catch (IllegalArgumentException expected) {
+      // Pass
+    }
+  }
+
+  /**
+   * Asserts that the two collections contain exactly the same elements. This isn't as rigorous as
+   * it should be, but is ok for this test scenario. Checks the contents but not the iteration order
+   * of the collections handed in.
+   *
+   * @param c1 the first collection
+   * @param c2 the second collection
+   */
+  private static <T> void assertEquivalence(Collection<T> c1, Collection<T> c2) {
+    Assert.assertEquals(c1.size(), c2.size());
+    Assert.assertTrue(c1.containsAll(c2));
+    Assert.assertTrue(c2.containsAll(c1));
+  }
+
+  @Test
+  public void testZeroLimit() {
+    TotalRecompressionLimiter limiter = new TotalRecompressionLimiter(0);
+    List<QualifiedRecommendation> expected = new ArrayList<QualifiedRecommendation>();
+    expected.addAll(suppressed(REC_A_100K, REC_B_200K, REC_C_300K, REC_D_400K));
+    expected.addAll(ALL_IGNORED_RECS);
+    assertEquivalence(expected, limiter.getModifiedRecommendations(OLD_FILE, NEW_FILE, ALL_RECS));
+  }
+
+  @Test
+  public void testMaxLimit() {
+    TotalRecompressionLimiter limiter = new TotalRecompressionLimiter(Long.MAX_VALUE);
+    assertEquivalence(ALL_RECS, limiter.getModifiedRecommendations(OLD_FILE, NEW_FILE, ALL_RECS));
+  }
+
+  @Test
+  public void testLimit_ExactlySmallest() {
+    long limit = REC_A_100K.getNewEntry().getUncompressedSize(); // Exactly large enough
+    TotalRecompressionLimiter limiter = new TotalRecompressionLimiter(limit);
+    List<QualifiedRecommendation> expected = new ArrayList<QualifiedRecommendation>();
+    expected.add(REC_A_100K);
+    expected.addAll(suppressed(REC_B_200K, REC_C_300K, REC_D_400K));
+    expected.addAll(ALL_IGNORED_RECS);
+    assertEquivalence(expected, limiter.getModifiedRecommendations(OLD_FILE, NEW_FILE, ALL_RECS));
+  }
+
+  @Test
+  public void testLimit_EdgeUnderSmallest() {
+    long limit = REC_A_100K.getNewEntry().getUncompressedSize() - 1; // 1 byte too small
+    TotalRecompressionLimiter limiter = new TotalRecompressionLimiter(limit);
+    List<QualifiedRecommendation> expected = new ArrayList<QualifiedRecommendation>();
+    expected.addAll(suppressed(REC_A_100K, REC_B_200K, REC_C_300K, REC_D_400K));
+    expected.addAll(ALL_IGNORED_RECS);
+    assertEquivalence(expected, limiter.getModifiedRecommendations(OLD_FILE, NEW_FILE, ALL_RECS));
+  }
+
+  @Test
+  public void testLimit_EdgeOverSmallest() {
+    long limit = REC_A_100K.getNewEntry().getUncompressedSize() + 1; // 1 byte extra room
+    TotalRecompressionLimiter limiter = new TotalRecompressionLimiter(limit);
+    List<QualifiedRecommendation> expected = new ArrayList<QualifiedRecommendation>();
+    expected.add(REC_A_100K);
+    expected.addAll(suppressed(REC_B_200K, REC_C_300K, REC_D_400K));
+    expected.addAll(ALL_IGNORED_RECS);
+    assertEquivalence(expected, limiter.getModifiedRecommendations(OLD_FILE, NEW_FILE, ALL_RECS));
+  }
+
+  @Test
+  public void testLimit_ExactlyLargest() {
+    long limit = REC_D_400K.getNewEntry().getUncompressedSize(); // Exactly large enough
+    TotalRecompressionLimiter limiter = new TotalRecompressionLimiter(limit);
+    List<QualifiedRecommendation> expected = new ArrayList<QualifiedRecommendation>();
+    expected.add(REC_D_400K);
+    expected.addAll(suppressed(REC_A_100K, REC_B_200K, REC_C_300K));
+    expected.addAll(ALL_IGNORED_RECS);
+    assertEquivalence(expected, limiter.getModifiedRecommendations(OLD_FILE, NEW_FILE, ALL_RECS));
+  }
+
+  @Test
+  public void testLimit_EdgeUnderLargest() {
+    long limit = REC_D_400K.getNewEntry().getUncompressedSize() - 1; // 1 byte too small
+    TotalRecompressionLimiter limiter = new TotalRecompressionLimiter(limit);
+    List<QualifiedRecommendation> expected = new ArrayList<QualifiedRecommendation>();
+    expected.add(REC_C_300K);
+    expected.addAll(suppressed(REC_A_100K, REC_B_200K, REC_D_400K));
+    expected.addAll(ALL_IGNORED_RECS);
+    assertEquivalence(expected, limiter.getModifiedRecommendations(OLD_FILE, NEW_FILE, ALL_RECS));
+  }
+
+  @Test
+  public void testLimit_EdgeOverLargest() {
+    long limit = REC_D_400K.getNewEntry().getUncompressedSize() + 1; // 1 byte extra room
+    TotalRecompressionLimiter limiter = new TotalRecompressionLimiter(limit);
+    List<QualifiedRecommendation> expected = new ArrayList<QualifiedRecommendation>();
+    expected.add(REC_D_400K);
+    expected.addAll(suppressed(REC_A_100K, REC_B_200K, REC_C_300K));
+    expected.addAll(ALL_IGNORED_RECS);
+    assertEquivalence(expected, limiter.getModifiedRecommendations(OLD_FILE, NEW_FILE, ALL_RECS));
+  }
+
+  @Test
+  public void testLimit_Complex() {
+    // A more nuanced test. Here we set up a limit of 600k - big enough to get the largest and the
+    // THIRD largest files. The second largest will fail because there isn't enough space after
+    // adding the first largest, and the fourth largest will fail because there is not enough space
+    // after adding the third largest. Tricky.
+    long limit =
+        REC_D_400K.getNewEntry().getUncompressedSize()
+            + REC_B_200K.getNewEntry().getUncompressedSize();
+    TotalRecompressionLimiter limiter = new TotalRecompressionLimiter(limit);
+    List<QualifiedRecommendation> expected = new ArrayList<QualifiedRecommendation>();
+    expected.add(REC_B_200K);
+    expected.add(REC_D_400K);
+    expected.addAll(suppressed(REC_A_100K, REC_C_300K));
+    expected.addAll(ALL_IGNORED_RECS);
+    assertEquivalence(expected, limiter.getModifiedRecommendations(OLD_FILE, NEW_FILE, ALL_RECS));
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/BsDiffTest.java b/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/BsDiffTest.java
new file mode 100644
index 0000000..2a7d7ae
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/BsDiffTest.java
@@ -0,0 +1,524 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator.bsdiff;
+
+import com.google.archivepatcher.generator.bsdiff.Matcher.NextMatch;
+import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.charset.Charset;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+@RunWith(JUnit4.class)
+public class BsDiffTest {
+
+  @Test
+  public void lengthOfMatchTest() throws IOException {
+    String s1 =
+        "this is a string that starts the same and has some sameness in the middle, but "
+            + "ends differently";
+    String s2 =
+        "this is a string that starts the samish and has some sameness in the middle, but "
+            + "then ends didlyiefferently";
+    byte[] s1b = s1.getBytes(Charset.forName("US-ASCII"));
+    byte[] s2b = s2.getBytes(Charset.forName("US-ASCII"));
+    RandomAccessObject s1ro = new RandomAccessObject.RandomAccessByteArrayObject(s1b);
+    RandomAccessObject s2ro = new RandomAccessObject.RandomAccessByteArrayObject(s2b);
+
+    Assert.assertEquals(36, BsDiff.lengthOfMatch(s1ro, 0, s2ro, 0));
+    Assert.assertEquals(0, BsDiff.lengthOfMatch(s1ro, 5, s2ro, 0));
+    Assert.assertEquals(31, BsDiff.lengthOfMatch(s1ro, 5, s2ro, 5));
+    Assert.assertEquals(42, BsDiff.lengthOfMatch(s1ro, 37, s2ro, 39));
+    Assert.assertEquals(0, BsDiff.lengthOfMatch(s1ro, 38, s2ro, 39));
+    Assert.assertEquals(32, BsDiff.lengthOfMatch(s1ro, 47, s2ro, 49));
+    Assert.assertEquals(2, BsDiff.lengthOfMatch(s1ro, 90, s2ro, 83));
+  }
+
+  @Test
+  public void searchForMatchBaseCaseShortGroupArrayTest() throws IOException {
+    final String s1 = "asdf;1234;this should match;5678";
+    final String s2 = "hkjl.9999.00vbn,``'=-this should match.9900-mmmnmn,,,.x??'";
+    final byte[] s1b = s1.getBytes(Charset.forName("US-ASCII"));
+    final byte[] s2b = s2.getBytes(Charset.forName("US-ASCII"));
+    final RandomAccessObject s1ro = new RandomAccessObject.RandomAccessByteArrayObject(s1b);
+    final RandomAccessObject s2ro = new RandomAccessObject.RandomAccessByteArrayObject(s2b);
+    final RandomAccessObject groupArrayRO =
+        intArrayToRandomAccessObject(BsDiffTestData.SHORT_GROUP_ARRAY);
+
+    BsDiff.Match ret = BsDiff.searchForMatchBaseCase(groupArrayRO, s1ro, s2ro, 0, 0, 12);
+    Assert.assertEquals(0, ret.length);
+    Assert.assertEquals(12, ret.start);
+
+    ret = BsDiff.searchForMatchBaseCase(groupArrayRO, s1ro, s2ro, 0, 9, 10);
+    Assert.assertEquals(0, ret.length);
+    Assert.assertEquals(10, ret.start);
+  }
+
+  @Test
+  public void searchForMatchBaseCaseLongGroupArrayTest() throws IOException {
+    final RandomAccessObject groupArray2RO =
+        intArrayToRandomAccessObject(BsDiffTestData.LONG_GROUP_ARRAY_100);
+
+    final int scan = 1;
+    BsDiff.Match ret =
+        BsDiff.searchForMatchBaseCase(
+            groupArray2RO,
+            BsDiffTestData.LONG_DATA_99_RO,
+            BsDiffTestData.LONG_DATA_104_NEW_RO,
+            scan,
+            0,
+            BsDiffTestData.LONG_DATA_99.length);
+    Assert.assertEquals(0, ret.length);
+    Assert.assertEquals(10, ret.start);
+
+    ret =
+        BsDiff.searchForMatchBaseCase(
+            groupArray2RO,
+            BsDiffTestData.LONG_DATA_99_RO,
+            BsDiffTestData.LONG_DATA_104_NEW_RO,
+            scan,
+            64,
+            65);
+    Assert.assertEquals(0, ret.length);
+    Assert.assertEquals(52, ret.start);
+
+    ret =
+        BsDiff.searchForMatchBaseCase(
+            groupArray2RO,
+            BsDiffTestData.LONG_DATA_99_RO,
+            BsDiffTestData.LONG_DATA_104_NEW_RO,
+            scan,
+            1,
+            2);
+    Assert.assertEquals(0, ret.length);
+    Assert.assertEquals(46, ret.start);
+  }
+
+  @Test
+  public void searchForMatchBaseCaseVeryLongGroupArrayTest() throws IOException {
+    final RandomAccessObject groupArray3RO =
+        intArrayToRandomAccessObject(BsDiffTestData.LONGER_GROUP_ARRAY_350);
+
+    final int scan = 1;
+    BsDiff.Match ret =
+        BsDiff.searchForMatchBaseCase(
+            groupArray3RO,
+            BsDiffTestData.LONGER_DATA_349_RO,
+            BsDiffTestData.LONGER_DATA_354_NEW_RO,
+            scan,
+            0,
+            BsDiffTestData.LONGER_DATA_349.length);
+    Assert.assertEquals(0, ret.length);
+    Assert.assertEquals(246, ret.start);
+
+    ret =
+        BsDiff.searchForMatchBaseCase(
+            groupArray3RO,
+            BsDiffTestData.LONGER_DATA_349_RO,
+            BsDiffTestData.LONGER_DATA_354_NEW_RO,
+            scan,
+            219,
+            220);
+    Assert.assertEquals(0, ret.length);
+    Assert.assertEquals(251, ret.start);
+  }
+
+  @Test
+  public void searchForMatchShortGroupArrayTest() throws IOException {
+    final String s1 = "asdf;1234;this should match;5678";
+    final String s2 = "hkjl.9999.00vbn,``'=-this should match.9900-mmmnmn,,,.x??'";
+    final byte[] s1b = s1.getBytes(Charset.forName("US-ASCII"));
+    final byte[] s2b = s2.getBytes(Charset.forName("US-ASCII"));
+    final RandomAccessObject s1ro = new RandomAccessObject.RandomAccessByteArrayObject(s1b);
+    final RandomAccessObject s2ro = new RandomAccessObject.RandomAccessByteArrayObject(s2b);
+    final RandomAccessObject groupArrayRO =
+        intArrayToRandomAccessObject(BsDiffTestData.SHORT_GROUP_ARRAY);
+
+    BsDiff.Match ret = BsDiff.searchForMatch(groupArrayRO, s1ro, s2ro, 0, 0, 12);
+    Assert.assertEquals(0, ret.length);
+    Assert.assertEquals(10, ret.start);
+  }
+
+  @Test
+  public void searchForMatchLongGroupArrayTest() throws IOException {
+    final RandomAccessObject groupArray2RO =
+        intArrayToRandomAccessObject(BsDiffTestData.LONG_GROUP_ARRAY_100);
+
+    final int scan = 1;
+    BsDiff.Match ret =
+        BsDiff.searchForMatch(
+            groupArray2RO,
+            BsDiffTestData.LONG_DATA_99_RO,
+            BsDiffTestData.LONG_DATA_104_NEW_RO,
+            scan,
+            0,
+            BsDiffTestData.LONG_DATA_99.length);
+    Assert.assertEquals(0, ret.length);
+    Assert.assertEquals(52, ret.start);
+  }
+
+  @Test
+  public void searchForMatchVeryLongGroupArrayTest() throws IOException {
+    final RandomAccessObject groupArray3RO =
+        intArrayToRandomAccessObject(BsDiffTestData.LONGER_GROUP_ARRAY_350);
+
+    final int scan = 1;
+    BsDiff.Match ret =
+        BsDiff.searchForMatch(
+            groupArray3RO,
+            BsDiffTestData.LONGER_DATA_349_RO,
+            BsDiffTestData.LONGER_DATA_354_NEW_RO,
+            scan,
+            0,
+            BsDiffTestData.LONGER_DATA_349.length);
+    Assert.assertEquals(0, ret.length);
+    Assert.assertEquals(251, ret.start);
+  }
+
+  @Test
+  public void searchForMatch() throws Exception {
+    String[] testCases = {
+      "a",
+      "aa",
+      "az",
+      "za",
+      "aaaaa",
+      "CACAO",
+      "banana",
+      "tobeornottobe",
+      "the quick brown fox jumps over the lazy dog.",
+      "elephantelephantelephantelephantelephant",
+      "011010011001011010010110011010010",
+    };
+    for (String testCase : testCases) {
+      int size = testCase.length();
+      byte[] bytes = testCase.getBytes(StandardCharsets.US_ASCII);
+      RandomAccessObject input = new RandomAccessObject.RandomAccessByteArrayObject(bytes);
+      RandomAccessObject suffixArray =
+          new DivSuffixSorter(new RandomAccessObjectFactory.RandomAccessByteArrayObjectFactory())
+              .suffixSort(input);
+
+      // Test exact matches for every non-empty substring.
+      for (int lo = 0; lo < size; ++lo) {
+        for (int hi = lo + 1; hi <= size; ++hi) {
+          byte[] query = Arrays.copyOfRange(bytes, lo, hi);
+          int querySize = query.length;
+          Assert.assertEquals(querySize, hi - lo);
+          RandomAccessObject queryBuf = new RandomAccessObject.RandomAccessByteArrayObject(query);
+
+          BsDiff.Match match = BsDiff.searchForMatch(suffixArray, input, queryBuf, 0, 0, size);
+
+          Assert.assertEquals(querySize, match.length);
+          Assert.assertTrue(match.start >= 0);
+          Assert.assertTrue(match.start <= size - match.length);
+          byte[] suffix = Arrays.copyOfRange(bytes, match.start, match.start + match.length);
+          Assert.assertArrayEquals(query, suffix);
+        }
+      }
+    }
+  }
+
+  @Test
+  public void generatePatchTest() throws Exception {
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    byte[] oldData = readTestData("BsDiffInternalTestOld.txt");
+    byte[] newData = readTestData("BsDiffInternalTestNew.txt");
+    byte[] expectedPatch = readTestData("BsDiffInternalTestPatchExpected.patch");
+
+    BsDiffPatchWriter.generatePatch(oldData, newData, out);
+
+    byte[] actualPatch = out.toByteArray();
+    Assert.assertEquals(actualPatch.length, expectedPatch.length);
+    Assert.assertArrayEquals(actualPatch, expectedPatch);
+  }
+
+  public void generatePatchOnRealCompiledBinaryTest() throws Exception {
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    byte[] oldData = readTestData("minimalBlobA.bin");
+    byte[] newData = readTestData("minimalBlobB.bin");
+    byte[] expectedPatch = readTestData("minimalBlobPatch.patch");
+
+    BsDiffPatchWriter.generatePatch(oldData, newData, out);
+
+    byte[] actualPatch = out.toByteArray();
+    Assert.assertEquals(actualPatch.length, expectedPatch.length);
+    Assert.assertArrayEquals(actualPatch, expectedPatch);
+  }
+
+  /**
+   * Naive implementation of BsDiff.Matcher. Exact matches between newData[a ... a + len - 1] and
+   * oldData[b ... b + len - 1] are valid if |len| >= 3.
+   */
+  @Test
+  public void generatePatchWithMatcherTest() throws Exception {
+    {
+      // Test that all of the characters are diffed if two strings are identical even if there
+      // is no "valid match" because the strings are too short.
+      CtrlEntry[] expectedCtrlEntries = {new CtrlEntry(2, 0, 0)};
+      Assert.assertTrue(generatePatchAndCheckCtrlEntries("aa", "aa", expectedCtrlEntries));
+    }
+
+    {
+      // Test that all of the characters are diffed if two strings are identical and are long
+      // enough to be considered a "valid match".
+      CtrlEntry[] expectedCtrlEntries = {new CtrlEntry(0, 0, 0), new CtrlEntry(3, 0, 0)};
+      Assert.assertTrue(generatePatchAndCheckCtrlEntries("aaa", "aaa", expectedCtrlEntries));
+    }
+
+    {
+      // Test that none of the characters are diffed if the strings do not match.
+      CtrlEntry[] expectedCtrlEntries = {new CtrlEntry(0, 2, 0)};
+      Assert.assertTrue(generatePatchAndCheckCtrlEntries("aa", "bb", expectedCtrlEntries));
+    }
+
+    {
+      // Test that characters are diffed if the beginning of the strings match even if the match
+      // is not long enough to be considered valid.
+      CtrlEntry[] expectedCtrlEntries = {new CtrlEntry(2, 6, 3), new CtrlEntry(3, 0, 0)};
+      Assert.assertTrue(
+          generatePatchAndCheckCtrlEntries("aazzzbbb", "aaayyyyybbb", expectedCtrlEntries));
+    }
+
+    {
+      // Test that none of the characters are diffed if the beginning of the strings do not
+      // match and the available match is not long enough to be considered valid.
+      CtrlEntry[] expectedCtrlEntries = {new CtrlEntry(0, 3, 0)};
+      Assert.assertTrue(generatePatchAndCheckCtrlEntries("zzzbb", "abb", expectedCtrlEntries));
+    }
+
+    {
+      // Test that all of the characters are either diffed or are included in the extra
+      // string when Matcher's match is extended.
+      CtrlEntry[] expectedCtrlEntries = { // extended match | extra string
+        new CtrlEntry(0, 1, 2), // n/a            | #
+        new CtrlEntry(6, 3, 1), // 012345         | %^&
+        new CtrlEntry(13, 0, 0) // abcdefghijklm  | n/a
+      };
+      Assert.assertTrue(
+          generatePatchAndCheckCtrlEntries(
+              "@@012345@ab@de@ghijklm", "#012$45%^&abcdefghijklm", expectedCtrlEntries));
+    }
+
+    {
+      // Test that things work when the n + 1 match in the old string is before the nth match
+      // in the old string.
+      CtrlEntry[] expectedCtrlEntries = { // extended match | extra string
+        new CtrlEntry(0, 1, 16), // n/a            | #
+        new CtrlEntry(6, 3, -21), // 012345         | %^&
+        new CtrlEntry(13, 0, 0) // abcdefghijklm  | n/a
+      };
+      Assert.assertTrue(
+          generatePatchAndCheckCtrlEntries(
+              "@ab@de@ghijklm@@012345", "#012$45%^&abcdefghijklm", expectedCtrlEntries));
+    }
+
+    {
+      // Test the behavior when the n + 1's match backward extension overlaps with the n's match
+      // forward extension.
+      // "567" can be forward extended to "567@9n1x3s56"
+      // "exus6" can be backward extended to "9n1x3s56exus6"
+      CtrlEntry[] expectedCtrlEntries = {
+        new CtrlEntry(0, 0, 5), new CtrlEntry(4, 0, 17), new CtrlEntry(13, 0, 0),
+      };
+      Assert.assertTrue(
+          generatePatchAndCheckCtrlEntries(
+              "012345678901234567890nexus9nexus5nexus6", "567@9n1x3s56exus6", expectedCtrlEntries));
+    }
+
+    {
+      // Test that a match is not backward extended past the previous match.
+      // "bbb" cannot be backward extended to "bb@bb@bbaaabbb" because "aaa" is a valid match.
+      CtrlEntry[] expectedCtrlEntries = {
+        new CtrlEntry(0, 8, 0), new CtrlEntry(3, 0, 3), new CtrlEntry(3, 0, 0),
+      };
+      Assert.assertTrue(
+          generatePatchAndCheckCtrlEntries(
+              "aaazzzbbbbbbbb", "bb@bb@bbaaabbb", expectedCtrlEntries));
+    }
+
+    {
+      // Test that a match is not forward extended past the next match.
+      // "aaa" cannot be forward extended to "aaabbbaa@aa@aa" because "bbb" is a valid match.
+      CtrlEntry[] expectedCtrlEntries = {
+        new CtrlEntry(0, 0, 0), new CtrlEntry(3, 0, 11), new CtrlEntry(3, 8, 0),
+      };
+      Assert.assertTrue(
+          generatePatchAndCheckCtrlEntries(
+              "aaaaaaaaaaazzzbbb", "aaabbbaa@aa@aa", expectedCtrlEntries));
+    }
+
+    {
+      // Test that a match can be extended to take up the entire string.
+      CtrlEntry[] expectedCtrlEntries = {
+        new CtrlEntry(0, 0, 0), new CtrlEntry(9, 0, 0),
+      };
+      Assert.assertTrue(
+          generatePatchAndCheckCtrlEntries("abcdefghi", "ab@def@hi", expectedCtrlEntries));
+    }
+  }
+
+  private RandomAccessObject intArrayToRandomAccessObject(final int[] array) throws IOException {
+    RandomAccessObject ret =
+        new RandomAccessObject.RandomAccessByteArrayObject(new byte[array.length * 4]);
+    ret.seekToIntAligned(0);
+
+    for (int element : array) {
+      ret.writeInt(element);
+    }
+
+    return ret;
+  }
+
+  // Some systems force all text files to end in a newline, which screws up this test.
+  private static byte[] stripNewlineIfNecessary(byte[] b) {
+    if (b[b.length - 1] != (byte) '\n') {
+      return b;
+    }
+
+    byte[] ret = new byte[b.length - 1];
+    System.arraycopy(b, 0, ret, 0, ret.length);
+    return ret;
+  }
+
+  private byte[] readTestData(String fileName) throws IOException {
+    InputStream in = getClass().getResourceAsStream("testdata/" + fileName);
+    ByteArrayOutputStream result = new ByteArrayOutputStream();
+    byte[] buffer = new byte[32768];
+    int numRead = 0;
+    while ((numRead = in.read(buffer)) >= 0) {
+      result.write(buffer, 0, numRead);
+    }
+    return stripNewlineIfNecessary(result.toByteArray());
+  }
+
+  private static class NaiveMatcher implements Matcher {
+    private final byte[] mOldData;
+    private final byte[] mNewData;
+    private int mOldPos;
+    private int mNewPos;
+    private int mMatchLen;
+
+    NaiveMatcher(byte[] oldData, byte[] newData) {
+      mOldData = oldData;
+      mNewData = newData;
+      mOldPos = 0;
+      mMatchLen = 0;
+    }
+
+    @Override
+    public NextMatch next() {
+      mNewPos += mMatchLen;
+      for (; mNewPos < mNewData.length; ++mNewPos) {
+        BsDiff.Match longestMatch = findLongestMatchInOld(mNewPos);
+        mOldPos = longestMatch.start;
+        mMatchLen = longestMatch.length;
+        if (mMatchLen >= 3) {
+          return NextMatch.of(true, mOldPos, mNewPos);
+        }
+      }
+
+      return NextMatch.of(false, 0, 0);
+    }
+
+    /**
+     * Finds the longest match between mNewData[newStartIndex ... mNewData.length - 1] and
+     * |mOldData|.
+     */
+    private BsDiff.Match findLongestMatchInOld(int newStartIndex) {
+      int bestMatchIndex = 0;
+      int bestMatchLength = 0;
+      for (int i = 0; i < mOldData.length; ++i) {
+        int matchLength = 0;
+        for (int newIndex = newStartIndex, oldIndex = i;
+            newIndex < mNewData.length && oldIndex < mOldData.length;
+            ++newIndex, ++oldIndex) {
+          if (mOldData[oldIndex] != mNewData[newIndex]) {
+            break;
+          }
+          ++matchLength;
+        }
+
+        if (matchLength > bestMatchLength) {
+          bestMatchIndex = i;
+          bestMatchLength = matchLength;
+        }
+      }
+
+      return BsDiff.Match.of(bestMatchIndex, bestMatchLength);
+    }
+  }
+
+  private static class CtrlEntry {
+    public int diffLength;
+    public int extraLength;
+    public int oldOffset;
+
+    public CtrlEntry(int diffLength, int extraLength, int oldOffset) {
+      this.diffLength = diffLength;
+      this.extraLength = extraLength;
+      this.oldOffset = oldOffset;
+    }
+  }
+
+  /**
+   * Generates a patch from the differences between |oldData| and |newData| and checks that the
+   * patch's control data matches |expected|. For the sake of simplicity, assumes that chars are
+   * always 1 byte.
+   *
+   * @param oldData
+   * @param newData
+   * @param expected The expected control entries in the generated patch
+   * @return returns whether the actual control entries in the generated patch match the expected
+   *     ones
+   */
+  private boolean generatePatchAndCheckCtrlEntries(
+      String oldData, String newData, CtrlEntry[] expected) throws Exception {
+    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
+    byte[] oldBytes = oldData.getBytes(Charset.forName("US-ASCII"));
+    byte[] newBytes = newData.getBytes(Charset.forName("US-ASCII"));
+    RandomAccessObject oldBytesRo = new RandomAccessObject.RandomAccessByteArrayObject(oldBytes);
+    RandomAccessObject newBytesRo = new RandomAccessObject.RandomAccessByteArrayObject(newBytes);
+    BsDiffPatchWriter.generatePatchWithMatcher(
+        oldBytesRo, newBytesRo, new NaiveMatcher(oldBytes, newBytes), outputStream);
+
+    ByteArrayInputStream patchInputStream = new ByteArrayInputStream(outputStream.toByteArray());
+    for (CtrlEntry element : expected) {
+      if (patchInputStream.available() < 24
+          || BsUtil.readFormattedLong(patchInputStream) != element.diffLength
+          || BsUtil.readFormattedLong(patchInputStream) != element.extraLength
+          || BsUtil.readFormattedLong(patchInputStream) != element.oldOffset) {
+        return false;
+      }
+
+      patchInputStream.skip(element.diffLength + element.extraLength);
+    }
+
+    if (patchInputStream.available() > 0) {
+      return false;
+    }
+
+    return true;
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/BsDiffTestData.java b/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/BsDiffTestData.java
new file mode 100644
index 0000000..245f7a1
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/BsDiffTestData.java
@@ -0,0 +1,310 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator.bsdiff;
+
+import java.nio.charset.Charset;
+
+class BsDiffTestData {
+  public static final int[] SHORT_GROUP_ARRAY =
+      new int[] {
+        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+        25, 26, 27, 28, 29, 30, 31
+      };
+
+  public static final int[] LONG_GROUP_ARRAY_100 =
+      new int[] {
+        68, 14, 46, 66, 36, 59, 47, 3, 27, 0, 43, 61, 53, 22, 75, 72, 59, 1, 27, 42, 8, 19, 23, 78,
+        25, 49, 69, 56, 11, 67, 46, 23, 8, 34, 30, 77, 74, 74, 47, 45, 71, 35, 59, 49, 0, 70, 73,
+        10, 60, 64, 71, 69, 67, 38, 6, 0, 67, 6, 17, 49, 27, 0, 20, 18, 31, 52, 76, 23, 64, 13, 80,
+        67, 80, 24, 63, 61, 17, 44, 35, 14, 11, 63, 47, 54, 2, 68, 2, 79, 28, 18, 7, 4, 32, 77, 82,
+        24, 31, 72, 65, 10
+      };
+
+  public static final int[] LONG_INVERSE_ARRAY_100 =
+      new int[] {
+        0, 3, 33, 33, 50, 71, 39, 53, 11, 44, 14, 29, 28, 25, 4, 0, 28, 7, 28, 84, 80, 47, 7, 47,
+        28, 34, 43, 16, 80, 70, 79, 65, 64, 69, 35, 56, 76, 25, 73, 58, 56, 2, 13, 3, 78, 37, 83,
+        70, 71, 13, 68, 16, 70, 17, 1, 64, 40, 80, 73, 44, 71, 76, 63, 55, 7, 51, 79, 59, 51, 27,
+        36, 75, 51, 1, 16, 83, 45, 12, 6, 19, 54, 49, 12, 54, 16, 65, 37, 61, 40, 46, 37, 47, 17,
+        43, 2, 50, 32, 1, 16, 25
+      };
+
+  public static final int[] LONGER_GROUP_ARRAY_350 =
+      new int[] {
+        239, 252, 85, 126, 72, 15, 203, 97, 136, 3, 252, 27, 197, 56, 306, 94, 196, 26, 121, 240,
+        281, 244, 192, 152, 106, 244, 153, 288, 227, 27, 116, 124, 193, 103, 77, 283, 266, 185, 30,
+        107, 175, 116, 20, 22, 9, 87, 230, 165, 296, 261, 69, 68, 250, 249, 261, 296, 278, 21, 40,
+        277, 49, 160, 103, 302, 128, 6, 172, 306, 127, 185, 84, 306, 45, 251, 191, 37, 146, 214,
+        108, 28, 87, 24, 263, 73, 203, 27, 267, 233, 106, 286, 243, 16, 216, 58, 215, 64, 64, 198,
+        254, 260, 25, 160, 171, 48, 229, 228, 292, 4, 73, 235, 166, 49, 261, 71, 261, 169, 59, 185,
+        267, 191, 87, 88, 227, 120, 301, 154, 226, 144, 126, 109, 9, 283, 194, 198, 239, 271, 110,
+        105, 161, 47, 223, 283, 59, 215, 118, 15, 270, 8, 20, 114, 56, 59, 248, 3, 108, 96, 79, 231,
+        49, 202, 260, 229, 41, 54, 165, 144, 0, 227, 78, 80, 38, 102, 305, 206, 86, 190, 244, 61,
+        285, 54, 154, 214, 303, 212, 282, 203, 288, 279, 75, 307, 190, 77, 210, 55, 197, 16, 86, 5,
+        134, 274, 63, 10, 28, 152, 136, 270, 28, 79, 297, 88, 223, 178, 12, 261, 204, 198, 97, 286,
+        156, 299, 251, 57, 276, 120, 31, 289, 296, 227, 27, 170, 80, 236, 50, 262, 302, 76, 122,
+        225, 265, 102, 114, 13, 55, 120, 98, 280, 1, 17, 243, 3, 77, 8, 220, 268, 93, 226, 38, 255,
+        200, 94, 186, 49, 14, 248, 32, 89, 298, 83, 282, 301, 175, 2, 35, 247, 45, 268, 5, 284, 235,
+        249, 30, 93, 245, 157, 132, 19, 231, 255, 8, 193, 67, 97, 97, 155, 96, 245, 94, 195, 263,
+        93, 23, 32, 276, 306, 113, 215, 257, 293, 191, 135, 16, 257, 197, 71, 210, 227, 60, 6, 220,
+        138, 307, 9, 109, 206, 123, 84, 58, 2, 147, 154, 101, 35, 132, 130, 223, 85, 42, 248, 26,
+        38, 164, 114, 280, 245, 221, 158, 154, 215, 80, 246
+      };
+
+  public static final int[] LONGER_INVERSE_ARRAY_350 =
+      new int[] {
+        38, 57, 109, 42, 15, 53, 29, 11, 41, 187, 158, 258, 116, 232, 261, 294, 156, 190, 26, 205,
+        77, 233, 142, 47, 60, 227, 273, 119, 157, 174, 202, 80, 120, 250, 59, 123, 206, 236, 277,
+        12, 13, 126, 217, 153, 18, 140, 30, 8, 94, 179, 101, 148, 52, 165, 52, 278, 152, 152, 287,
+        209, 74, 200, 257, 28, 78, 234, 214, 110, 185, 163, 224, 213, 91, 62, 174, 250, 41, 85, 156,
+        178, 6, 294, 94, 237, 32, 134, 63, 179, 266, 133, 209, 96, 39, 141, 184, 209, 48, 208, 110,
+        148, 271, 196, 291, 120, 26, 44, 107, 126, 2, 177, 205, 219, 84, 33, 169, 175, 34, 88, 80,
+        166, 231, 118, 128, 245, 106, 264, 92, 153, 57, 16, 173, 128, 263, 108, 252, 298, 162, 298,
+        176, 16, 171, 22, 121, 81, 265, 162, 248, 108, 30, 2, 302, 46, 278, 181, 130, 71, 300, 137,
+        48, 181, 220, 119, 132, 270, 241, 202, 223, 91, 22, 162, 225, 38, 199, 64, 12, 69, 10, 55,
+        295, 67, 100, 205, 125, 269, 117, 13, 307, 51, 111, 236, 30, 280, 7, 52, 56, 154, 144, 127,
+        180, 176, 111, 118, 217, 299, 129, 175, 184, 303, 92, 162, 67, 259, 239, 250, 239, 259, 262,
+        251, 29, 120, 182, 179, 42, 273, 176, 43, 138, 214, 281, 168, 24, 60, 238, 6, 64, 216, 255,
+        296, 273, 296, 263, 92, 181, 30, 82, 262, 14, 80, 288, 240, 220, 168, 23, 45, 260, 260, 18,
+        249, 241, 23, 124, 31, 5, 286, 15, 298, 138, 173, 70, 212, 236, 285, 166, 247, 76, 38, 107,
+        293, 190, 229, 183, 282, 168, 170, 198, 291, 249, 207, 306, 62, 229, 260, 212, 289, 22, 298,
+        200, 306, 72, 289, 198, 90, 25, 126, 301, 245, 20, 47, 180, 216, 77, 86, 231, 307, 152, 297,
+        84, 144, 13, 233, 33, 79, 183, 177, 160, 257, 195, 250, 247, 37, 138, 49, 46, 43, 294, 245,
+        85, 214, 190, 45, 75, 114, 143, 100, 136, 263, 130, 63, 237, 45
+      };
+
+  public static final int[] SPLIT_BASE_CASE_INVERSE_TEST_ARRAY =
+      new int[] {
+        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+        25, 26, 27, 28, 29, 30, 31
+      };
+
+  public static final int[] SPLIT_BASE_CASE_TEST_GA_CONTROL =
+      new int[] {
+        -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+        -1, -1, -1, -1, -1, -1, -1, -1, -1
+      };
+
+  public static final int[] SPLIT_BASE_CASE_TEST_IA_CONTROL =
+      new int[] {
+        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+        25, 26, 27, 28, 29, 30, 31
+      };
+
+  public static final int[] SPLIT_BASE_CASE_TEST_GROUP_ARRAY_2 = new int[] {0, 1, 2, 3, 3, 1, 0, 2};
+  public static final int[] SPLIT_BASE_CASE_TEST_INVERSE_ARRAY_2 =
+      new int[] {3, 2, 0, 3, -1, 0, 2, 3};
+  public static final int[] SPLIT_BASE_CASE_TEST_GA_CONTROL_2 =
+      new int[] {0, 1, 2, 3, 3, -1, -1, -1};
+  public static final int[] SPLIT_BASE_CASE_TEST_IA_CONTROL_2 = new int[] {7, 5, 6, 4, -1, 0, 2, 3};
+
+  public static final int[] SPLIT_TEST_GA_CONTROL =
+      new int[] {
+        68, 14, 46, 66, 36, -1, 65, 46, 35, 35, 6, 6, -1, 56, 14, 0, 0, 0, 0, 74, 74, 69, 69, -1, 2,
+        2, 76, 43, 19, -1, 71, 71, 61, 61, 8, 8, 10, 4, 10, -1, -1, 17, 17, 82, 78, 80, 80, 18, 18,
+        -1, -1, -1, 64, 60, 64, 27, 27, 27, 63, 63, -1, 72, 72, 52, 75, -1, 31, 31, 59, 59, 59, -1,
+        -1, 47, 47, 24, 47, 24, -1, 77, 77, 23, 23, 23, -1, -1, -1, -1, 49, 49, 49, -1, -1, 67, 67,
+        38, 67, 67, 11, 11
+      };
+
+  public static final int[] SPLIT_TEST_IA_CONTROL =
+      new int[] {
+        18, 49, 25, 40, 38, 71, 11, 5, 38, 44, 38, 99, 28, 50, 14, 0, 28, 42, 48, 28, 92, 47, 91,
+        83, 77, 85, 43, 57, 87, 70, 86, 67, 65, 69, 23, 9, 76, 25, 97, 58, 56, 2, 84, 28, 78, 29, 7,
+        77, 71, 90, 68, 16, 64, 60, 72, 64, 14, 80, 73, 70, 54, 33, 63, 59, 54, 7, 79, 97, 39, 22,
+        12, 31, 62, 51, 22, 64, 28, 80, 44, 71, 46, 49, 44, 54, 16, 65, 37, 61, 40, 46, 37, 47, 17,
+        43, 2, 50, 32, 1, 16, 25
+      };
+
+  public static final int[] SPLIT_TEST_GA_CONTROL_2 =
+      new int[] {
+        239, 252, 85, 126, 72, 15, 203, 97, 136, 3, 252, 27, 197, 56, 306, 94, 196, 71, 71, 38, 38,
+        38, 30, 30, 165, 165, -1, 255, 255, 120, 120, 120, 130, 35, 35, 247, -1, 285, 132, 132, 243,
+        243, 250, -1, -1, 9, 9, 9, 54, 54, -1, -1, -1, -1, -1, -1, -1, 96, 96, 244, 244, 244, 14,
+        298, 45, 45, -1, 87, 87, 87, 185, 185, 185, -1, -1, -1, 235, 235, 64, 64, 280, 280, 289, 77,
+        77, 77, 225, 164, -1, -1, -1, -1, 301, 301, 109, 109, 134, 55, 55, 307, 103, 103, 307, -1,
+        302, 302, 108, 108, -1, 63, 158, 73, 73, -1, -1, 97, 97, 97, 267, 267, 138, 124, 58, 58, 89,
+        -1, 26, 26, -1, 257, 257, -1, 94, 94, 191, 191, 191, 32, 32, 3, 3, 175, 175, -1, 152, 152,
+        210, 210, 80, 80, 251, 80, 251, 113, 122, -1, -1, -1, -1, 84, 84, -1, 305, 47, 48, -1, -1,
+        -1, -1, -1, 200, 127, 136, 160, 160, -1, 110, 263, 263, 220, 220, -1, -1, 20, 20, 106, 106,
+        190, 215, 215, 215, 215, 190, 40, 78, 212, 21, 156, -1, 288, 288, 123, 276, 276, -1, -1, 28,
+        28, 28, 5, 5, -1, 16, 16, 16, 6, 6, 226, 226, -1, -1, -1, -1, 59, 59, 59, 283, 283, 283, 88,
+        88, 49, 49, 49, 49, -1, 248, 248, 248, -1, -1, 144, 144, 296, 296, 296, -1, 254, 101, -1,
+        -1, 197, 197, 206, 206, 203, 203, 147, 12, -1, 114, 114, 114, -1, 27, 27, 27, -1, 86, 86,
+        260, 260, -1, 193, 193, 102, 102, 262, -1, -1, -1, -1, -1, 8, 8, 8, 270, 270, 261, 261, 261,
+        261, 261, 223, 223, 223, -1, 249, 249, 245, 245, 245, -1, 204, 24, 227, 227, 227, 227, 227,
+        2, 2, -1, 282, 282, -1, 231, 231, 79, 79, -1, -1, -1, 116, 116, 154, 154, 154, 154, 229,
+        229, 93, 93, 93, 214, 214, -1, -1, 306, 306, 306, 128, 286, 286, 268, 268, 198, 198, 198, -1
+      };
+
+  public static final int[] SPLIT_TEST_IA_CONTROL_2 =
+      new int[] {
+        205, 169, 311, 140, 239, 210, 216, 11, 285, 47, 246, 258, 258, 131, 63, 75, 214, 235, 26,
+        168, 184, 197, 73, 158, 304, 74, 127, 266, 208, 174, 25, 26, 138, 250, 59, 35, 206, 50, 21,
+        12, 195, 114, 161, 153, 18, 65, 30, 164, 164, 234, 279, 148, 52, 165, 49, 98, 282, 272, 124,
+        225, 175, 278, 257, 110, 81, 234, 214, 55, 103, 167, 224, 18, 91, 112, 174, 51, 157, 85,
+        195, 319, 152, 294, 94, 54, 160, 240, 269, 69, 230, 124, 209, 96, 39, 333, 133, 209, 58,
+        119, 128, 148, 271, 248, 277, 102, 26, 181, 186, 52, 107, 95, 178, 219, 84, 154, 262, 175,
+        324, 88, 165, 166, 32, 125, 154, 203, 121, 264, 349, 174, 343, 16, 32, 128, 39, 108, 98,
+        321, 174, 298, 121, 16, 171, 22, 121, 81, 242, 162, 90, 258, 30, 2, 302, 46, 145, 156, 328,
+        220, 197, 222, 110, 181, 174, 221, 132, 270, 87, 25, 89, 91, 22, 211, 88, 113, 263, 64, 12,
+        142, 10, 55, 66, 67, 100, 205, 125, 269, 117, 72, 166, 51, 111, 236, 192, 136, 143, 274,
+        322, 155, 144, 252, 348, 176, 174, 118, 312, 256, 304, 175, 254, 303, 92, 162, 147, 259,
+        195, 250, 335, 192, 56, 251, 29, 120, 180, 43, 42, 295, 176, 87, 218, 309, 336, 330, 337,
+        317, 238, 250, 64, 77, 315, 296, 273, 320, 296, 92, 181, 42, 61, 301, 198, 35, 238, 298, 42,
+        152, 23, 45, 248, 28, 18, 130, 241, 23, 271, 292, 277, 178, 15, 91, 53, 119, 345, 212, 287,
+        219, 166, 247, 182, 38, 203, 302, 267, 259, 81, 280, 314, 228, 204, 39, 343, 207, 200, 85,
+        229, 260, 108, 44, 22, 298, 245, 36, 63, 249, 198, 93, 105, 281, 301, 164, 340, 102, 180,
+        216, 77, 86, 231, 307, 152, 297, 84, 144, 13, 233, 33, 79, 183, 177, 160, 257, 195, 250,
+        247, 37, 138, 49, 46, 43, 294, 245, 85, 214, 190, 45, 75, 114, 143, 100, 136, 263, 130, 63,
+        237, 45
+      };
+
+  public static final String LONG_DATA_99_S =
+      "Ad\"u%uuFm0BWP)-2)S-p7qI{zw@Jt|-E$Yxetqiv'I>EH|`|Y"
+          + "A72g|.fr]F^\\(tnGKjTyO{7{e@Odm,tW}e+rRgRvkA(TNF&a>r";
+
+  public static final String LONGER_DATA_349_S =
+      "TL?I\"a)tAARPRLH)ZNuYRBZ>+A2lO;D<SL;9EPhz?y7W>#U"
+          + "<`bDa80CNDT)AvVZRq3' Gcn6*o3U_)`E={[q;,)T/5Ntk,>K=v<Q4?o&Q!m2Za;Dt5'hMTZ?.\\ri#'Qu*>3"
+          + "bo9pO}-?Ar/;\\9epjz&`Y_i{FZw'@HTfLI\\3(kOi^6{_9TC_m8C^zAuV'2hg%[AC@op(/=V+PvPNh<s^vqa"
+          + "b@KWddY+425eS_j(jGX<!ZxzL$042\"$PnU<cZ|Us#%4R_5s)&: vS*:sk#tGC=V'W|^\"O&nY+5og8[oTf!$"
+          + "Dm@Hu3M+bD?6knRa,>k_(-O4P^_kOk}<D_Jxo^s+(9R\\y.uv::ng";
+
+  public static final byte[] LONG_DATA_99 = LONG_DATA_99_S.getBytes(Charset.forName("US-ASCII"));
+  public static final byte[] LONGER_DATA_349 =
+      LONGER_DATA_349_S.getBytes(Charset.forName("US-ASCII"));
+
+  public static final RandomAccessObject LONG_DATA_99_RO =
+      new RandomAccessObject.RandomAccessByteArrayObject(LONG_DATA_99);
+  public static final RandomAccessObject LONGER_DATA_349_RO =
+      new RandomAccessObject.RandomAccessByteArrayObject(LONGER_DATA_349);
+
+  public static final int[] QUICK_SUFFIX_SORT_INIT_TEST_GA_CONTROL =
+      new int[] {
+        -1, -1, -1, -1, -1, -1, 61, 91, 13, 16, -1, -1, 14, 18, 30, -1, -1, 15, 51, 20, 50, 71, 42,
+        97, 26, 74, 0, 49, 90, -1, 31, 43, 7, 58, 94, -1, -1, 22, 41, -1, -1, -1, 69, 75, -1, 85,
+        87, -1, 67, 92, 11, 80, 33, 48, -1, -1, -1, -1, -1, 1, 76, 35, 73, 82, -1, 52, 86, -1, -1,
+        -1, 8, 77, -1, -1, 21, 37, 56, 84, 98, 28, 36, 62, 79, 3, 5, 6, 39, 88, -1, -1, -1, -1, 23,
+        70, 72, 29, 45, 47, 53, -1
+      };
+
+  public static final int[] QUICK_SUFFIX_SORT_INIT_TEST_IA_CONTROL =
+      new int[] {
+        28, 60, 1, 85, 3, 85, 85, 34, 71, 16, 29, 51, 44, 9, 14, 18, 9, 47, 14, 73, 21, 75, 38, 94,
+        91, 88, 25, 39, 82, 98, 14, 31, 2, 53, 89, 63, 82, 75, 67, 87, 5, 38, 23, 31, 36, 98, 57,
+        98, 53, 28, 21, 18, 66, 98, 15, 64, 78, 55, 34, 56, 54, 7, 82, 72, 35, 40, 68, 49, 90, 43,
+        94, 21, 94, 63, 25, 43, 60, 71, 11, 82, 51, 99, 63, 10, 78, 46, 66, 46, 87, 69, 28, 7, 49,
+        41, 34, 4, 58, 23, 78, 0
+      };
+
+  public static final int[] QUICK_SUFFIX_SORT_INIT_TEST_GA_CONTROL_2 =
+      new int[] {
+        -1, 67, 264, 105, 234, 295, 4, 243, 281, 45, 124, 254, 271, 239, 244, 296, 191, 255, 103,
+        149, 262, 283, 66, 114, 125, 158, 187, 277, 167, 198, 229, 317, 337, 6, 15, 58, 77, 86, 261,
+        72, 128, 267, 24, 202, 221, 286, 304, 336, 85, 93, 313, 137, 318, 120, 342, 88, 141, 199,
+        53, 240, 26, 107, 188, 223, 242, 65, 74, 130, 166, 302, 100, 222, 241, 256, 320, 89, 113,
+        224, 259, 287, 71, 172, 308, -1, 52, 180, 290, 35, 133, 144, 175, 338, 263, 268, 345, 346,
+        29, 34, 84, 110, 142, 31, 47, 98, 208, 233, 248, 328, 80, 96, 200, 275, 23, 44, 94, 129,
+        314, 2, 40, 101, 119, 138, 307, 159, 195, 215, 299, 8, 9, 25, 59, 139, 184, 193, -1, 54,
+        177, 181, 194, 274, 30, 50, 56, 111, 297, 306, 329, 36, 79, -1, 68, 231, 273, 14, 160, 300,
+        3, 164, -1, 95, 216, 1, 13, 33, 163, 238, 116, 303, 17, 55, 90, 206, 28, 135, 169, 282, 319,
+        325, 11, 37, 203, 205, 245, 321, 99, 104, 126, 10, 12, 20, 63, 257, 311, 339, 32, 226, 266,
+        0, 57, 87, 117, 161, 176, 293, 46, 75, 247, 252, 61, 186, 201, 276, 43, 217, 278, -1, 19,
+        151, 220, 285, 16, 22, 62, 108, 118, 156, 235, 250, 82, 192, 291, 121, 143, 165, 340, 171,
+        182, 210, 280, 322, 334, 76, 152, 174, 178, 227, 258, 316, 323, 330, 48, 78, 150, 5, 51,
+        109, 213, 312, 49, 131, 214, 305, 69, 249, 218, 219, 145, 225, 162, 294, 190, 289, 348, 38,
+        115, 189, 207, 123, 153, 170, 147, 228, 230, 92, 168, 270, 309, 315, 324, 326, -1, 106, 179,
+        298, 70, 246, 284, 310, 347, 73, 102, 132, 196, 288, 292, 333, 134, 146, 197, 64, 83, 212,
+        122, 140, 209, 253, 260, 269, 335, 7, 91, 112, 272, 18, 127, 185, 301, 343, 60, 97, 204,
+        211, 265, 344, -1, 236, 332, 41, 341, 39, 148, 183, 237, 81, 154, 173, 251, 279, 136, 327
+      };
+
+  public static final int[] QUICK_SUFFIX_SORT_INIT_TEST_IA_CONTROL_2 =
+      new int[] {
+        203, 165, 122, 157, 8, 257, 38, 322, 133, 133, 193, 183, 193, 165, 155, 38, 227, 171, 327,
+        219, 193, 134, 227, 116, 47, 133, 64, 290, 177, 100, 146, 107, 196, 165, 100, 91, 148, 183,
+        276, 342, 122, 338, 83, 214, 116, 12, 207, 107, 252, 261, 146, 257, 86, 59, 139, 171, 146,
+        203, 38, 133, 333, 211, 227, 193, 311, 69, 27, 2, 152, 263, 298, 82, 41, 305, 69, 207, 249,
+        38, 252, 148, 111, 345, 230, 311, 100, 50, 38, 203, 57, 79, 171, 322, 289, 50, 116, 160,
+        111, 333, 107, 186, 74, 122, 305, 21, 186, 5, 293, 64, 227, 257, 100, 146, 322, 79, 27, 276,
+        167, 203, 227, 122, 54, 234, 313, 279, 12, 27, 186, 327, 41, 116, 69, 261, 305, 91, 308,
+        177, 349, 52, 122, 133, 313, 57, 100, 234, 91, 267, 308, 282, 342, 21, 252, 219, 249, 279,
+        345, 149, 227, 334, 27, 126, 155, 203, 269, 165, 157, 234, 69, 32, 289, 177, 279, 240, 82,
+        345, 249, 91, 203, 139, 249, 293, 86, 139, 240, 342, 133, 327, 211, 27, 64, 276, 272, 17,
+        230, 133, 139, 126, 305, 308, 32, 57, 111, 211, 47, 183, 333, 183, 171, 276, 107, 318, 240,
+        333, 311, 257, 261, 126, 160, 214, 265, 265, 219, 47, 74, 64, 79, 267, 196, 249, 282, 32,
+        282, 152, 215, 107, 5, 227, 336, 342, 165, 15, 59, 74, 64, 8, 15, 183, 298, 207, 107, 263,
+        227, 347, 207, 318, 12, 17, 74, 193, 249, 79, 318, 38, 21, 95, 2, 333, 196, 41, 95, 318,
+        289, 12, 322, 152, 139, 111, 211, 27, 214, 347, 240, 8, 177, 21, 298, 219, 47, 79, 305, 272,
+        86, 230, 305, 203, 269, 5, 15, 146, 293, 126, 155, 327, 69, 167, 47, 261, 146, 122, 82, 289,
+        298, 193, 257, 50, 116, 289, 249, 32, 52, 177, 74, 183, 240, 249, 289, 177, 289, 349, 107,
+        146, 249, 158, 336, 305, 240, 318, 47, 32, 91, 193, 234, 338, 54, 327, 333, 95, 95, 298,
+        272, 0
+      };
+
+  public static final int[] QUICK_SUFFIX_SORT_TEST_GA_CONTROL =
+      new int[] {
+        99, 2, 32, 4, 95, 40, 91, 61, 13, 16, 83, 78, 14, 30, 18, 54, 9, 15, 51, 50, 20, 71, 42, 97,
+        26, 74, 90, 49, 0, 10, 31, 43, 94, 58, 7, 64, 44, 41, 22, 27, 65, 93, 75, 69, 12, 85, 87,
+        17, 92, 67, 11, 80, 48, 33, 60, 57, 59, 46, 96, 1, 76, 82, 73, 35, 55, 86, 52, 38, 66, 89,
+        77, 8, 63, 19, 21, 37, 98, 84, 56, 79, 62, 36, 28, 3, 6, 5, 39, 88, 25, 34, 68, 24, 70, 72,
+        23, 29, 53, 47, 45, 81
+      };
+
+  public static final int[] QUICK_SUFFIX_SORT_TEST_IA_CONTROL =
+      new int[] {
+        349, 67, 264, 295, 234, 105, 243, 281, 4, 254, 124, 45, 271, 239, 296, 244, 255, 191, 262,
+        103, 149, 283, 66, 187, 158, 125, 277, 114, 317, 198, 337, 229, 167, 261, 58, 86, 15, 77, 6,
+        267, 128, 72, 336, 221, 286, 24, 202, 304, 85, 93, 313, 137, 318, 120, 342, 88, 141, 199,
+        240, 53, 242, 223, 107, 188, 26, 65, 166, 302, 74, 130, 241, 222, 100, 320, 256, 113, 89,
+        224, 287, 259, 71, 308, 172, 42, 52, 180, 290, 35, 338, 175, 144, 133, 263, 345, 346, 268,
+        84, 34, 29, 110, 142, 233, 328, 98, 31, 47, 248, 208, 275, 200, 96, 80, 44, 23, 129, 94,
+        314, 119, 307, 138, 2, 101, 40, 159, 299, 215, 195, 25, 8, 193, 9, 139, 184, 59, 21, 274,
+        194, 54, 181, 177, 30, 306, 56, 329, 50, 297, 111, 79, 36, 155, 273, 231, 68, 14, 160, 300,
+        3, 164, 331, 95, 216, 238, 33, 1, 13, 163, 303, 116, 55, 206, 90, 17, 282, 319, 28, 169,
+        325, 135, 205, 11, 321, 37, 245, 203, 104, 99, 126, 20, 12, 10, 339, 257, 311, 63, 266, 32,
+        226, 57, 87, 176, 0, 117, 293, 161, 46, 247, 75, 252, 186, 276, 201, 61, 43, 217, 278, 232,
+        220, 285, 19, 151, 22, 118, 16, 62, 108, 156, 235, 250, 192, 291, 82, 165, 143, 121, 340,
+        280, 171, 322, 334, 210, 182, 316, 76, 258, 174, 330, 152, 227, 323, 178, 78, 150, 48, 5,
+        312, 51, 109, 213, 214, 305, 49, 131, 249, 69, 219, 218, 225, 145, 294, 162, 348, 190, 289,
+        207, 115, 189, 38, 123, 170, 153, 228, 230, 147, 270, 92, 168, 324, 315, 309, 326, 27, 106,
+        179, 298, 70, 310, 246, 284, 347, 102, 73, 132, 292, 333, 288, 196, 197, 134, 146, 64, 83,
+        212, 140, 122, 253, 260, 335, 209, 269, 112, 7, 272, 91, 127, 301, 185, 18, 343, 344, 97,
+        204, 265, 60, 211, 157, 332, 236, 341, 41, 148, 39, 183, 237, 154, 81, 173, 251, 279, 136,
+        327
+      };
+
+  public static final String LONG_DATA_104_NEW_S =
+      "Ad\"u%uuFm0B___-2)S-p7qI{zw@Jt|-E$Yxetqi==v2h"
+          + "3oH|`|YA72g|.fr]F^\\(tnGKjTys{7{e@Odm,tW}e+rRgRvkA(TNF&a532>8";
+
+  public static final String LONGER_DATA_354_NEW_S =
+      "TL?I\"a)tAARPRLH)ZNuYRsdf8yu032D<SL;9EPh2nz"
+          + "?y7W>#U<`bDa80CNDT)AvVZRq3' Gcn6*o3U_)`E={[q;,)T/5Ntk,>K=v<Q4?o&Q!m2Za;Dt5'hMTZ?.\\ri"
+          + "#'Qu*>3bKo9pO}-?Ar/;\\9epjz&`Y_i{FZw'@HTfLI\\3(kOi^6{_9TC_m8C^zAuV'2hg%[AC@op(/=V+PvP"
+          + "Nh<s^vqab@KWddY+425eS_j.jGX<!ZxzL$042\"$PnU<cZ|Us#%4R_5s)&: vS*:sk#tGC=V'W|^\"O&nY+5o"
+          + "g8[oT2h80otugs7s9+bD?6knRa,>k_(-O4P^_kOk}<D_Jxo^s+(9R\\y.uv::ng";
+
+  public static final byte[] LONG_DATA_104_NEW =
+      LONG_DATA_104_NEW_S.getBytes(Charset.forName("US-ASCII"));
+  public static final byte[] LONGER_DATA_354_NEW =
+      LONGER_DATA_354_NEW_S.getBytes(Charset.forName("US-ASCII"));
+
+  public static final RandomAccessObject LONG_DATA_104_NEW_RO =
+      new RandomAccessObject.RandomAccessByteArrayObject(LONG_DATA_104_NEW);
+  public static final RandomAccessObject LONGER_DATA_354_NEW_RO =
+      new RandomAccessObject.RandomAccessByteArrayObject(LONGER_DATA_354_NEW);
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/BsUtilTest.java b/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/BsUtilTest.java
new file mode 100644
index 0000000..6929d99
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/BsUtilTest.java
@@ -0,0 +1,130 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator.bsdiff;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.nio.charset.Charset;
+
+@RunWith(JUnit4.class)
+public class BsUtilTest {
+  @Test
+  public void writeFormattedLongTest() throws IOException {
+    ByteArrayOutputStream outputStream = new ByteArrayOutputStream(16);
+    BsUtil.writeFormattedLong(0x12345678, outputStream);
+    BsUtil.writeFormattedLong(0x0eadbeef, outputStream);
+    byte[] actual = outputStream.toByteArray();
+
+    byte[] expected = {
+      (byte) 0x78,
+      (byte) 0x56,
+      (byte) 0x34,
+      (byte) 0x12,
+      (byte) 0,
+      (byte) 0,
+      (byte) 0,
+      (byte) 0,
+      (byte) 0xef,
+      (byte) 0xbe,
+      (byte) 0xad,
+      (byte) 0x0e,
+      (byte) 0,
+      (byte) 0,
+      (byte) 0,
+      (byte) 0
+    };
+    Assert.assertArrayEquals(expected, actual);
+  }
+
+  @Test
+  public void readFormattedLongTest() throws IOException {
+    byte[] data = {
+      (byte) 0x78,
+      (byte) 0x56,
+      (byte) 0x34,
+      (byte) 0x12,
+      (byte) 0,
+      (byte) 0,
+      (byte) 0,
+      (byte) 0,
+      (byte) 0xef,
+      (byte) 0xbe,
+      (byte) 0xad,
+      (byte) 0x0e,
+      (byte) 0,
+      (byte) 0,
+      (byte) 0,
+      (byte) 0
+    };
+    ByteArrayInputStream inputStream = new ByteArrayInputStream(data);
+
+    Assert.assertEquals(0x12345678, BsUtil.readFormattedLong(inputStream));
+    Assert.assertEquals(0x0eadbeef, BsUtil.readFormattedLong(inputStream));
+  }
+
+  private long writeThenReadFormattedLong(long value) throws IOException {
+    ByteArrayOutputStream outputStream = new ByteArrayOutputStream(8);
+    BsUtil.writeFormattedLong(value, outputStream);
+    byte[] outputBytes = outputStream.toByteArray();
+    ByteArrayInputStream inputStream = new ByteArrayInputStream(outputBytes);
+    return BsUtil.readFormattedLong(inputStream);
+  }
+
+  @Test
+  public void writeThenReadFormattedLongTest() throws IOException {
+    Assert.assertEquals(-1, writeThenReadFormattedLong(-1));
+    Assert.assertEquals(0x7fffffff, writeThenReadFormattedLong(0x7fffffff));
+    Assert.assertEquals(0, writeThenReadFormattedLong(0));
+    Assert.assertEquals(Long.MAX_VALUE, writeThenReadFormattedLong(Long.MAX_VALUE));
+    Assert.assertEquals(Long.MIN_VALUE, writeThenReadFormattedLong(Long.MIN_VALUE));
+  }
+
+  @Test
+  public void lexicographicalCompareTest() throws IOException {
+    String s1 = "this is a string";
+    String s2 = "that was a string";
+    byte[] s1b = s1.getBytes(Charset.forName("US-ASCII"));
+    byte[] s2b = s2.getBytes(Charset.forName("US-ASCII"));
+    RandomAccessObject s1ro = new RandomAccessObject.RandomAccessByteArrayObject(s1b);
+    RandomAccessObject s2ro = new RandomAccessObject.RandomAccessByteArrayObject(s2b);
+
+    int r = BsUtil.lexicographicalCompare(s1ro, 0, s1b.length, s2ro, 0, s2b.length);
+    Assert.assertTrue(r > 0);
+
+    r = BsUtil.lexicographicalCompare(s1ro, 5, s1b.length - 5, s2ro, 5, s2b.length - 5);
+    Assert.assertTrue(r < 0);
+
+    r = BsUtil.lexicographicalCompare(s1ro, 7, s1b.length - 7, s2ro, 8, s2b.length - 7);
+    Assert.assertTrue(r < 0);
+
+    r = BsUtil.lexicographicalCompare(s1ro, 7, s1b.length - 8, s2ro, 8, s2b.length - 8);
+    Assert.assertTrue(r < 0);
+
+    r = BsUtil.lexicographicalCompare(s1ro, 0, 2, s2ro, 0, 2);
+    Assert.assertEquals(0, r);
+
+    r = BsUtil.lexicographicalCompare(s1ro, 0, 1, s2ro, 0, 2);
+    Assert.assertTrue(r < 0);
+
+    r = BsUtil.lexicographicalCompare(s1ro, 0, 2, s2ro, 0, 1);
+    Assert.assertTrue(r > 0);
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/DivSuffixSorterTest.java b/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/DivSuffixSorterTest.java
new file mode 100644
index 0000000..ed347e4
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/DivSuffixSorterTest.java
@@ -0,0 +1,36 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator.bsdiff;
+
+import org.junit.Before;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+@RunWith(JUnit4.class)
+public class DivSuffixSorterTest extends SuffixSorterTestBase {
+  
+  DivSuffixSorter divSuffixSorter;
+  
+  @Before
+  public void setup() {
+    divSuffixSorter =
+        new DivSuffixSorter(new RandomAccessObjectFactory.RandomAccessByteArrayObjectFactory());
+  }
+
+  @Override
+  public SuffixSorter getSuffixSorter() {
+    return divSuffixSorter;
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/RandomAccessObjectTest.java b/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/RandomAccessObjectTest.java
new file mode 100644
index 0000000..6136d5a
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/RandomAccessObjectTest.java
@@ -0,0 +1,626 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator.bsdiff;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+import java.io.ByteArrayInputStream;
+import java.io.EOFException;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.RandomAccessFile;
+import java.nio.BufferOverflowException;
+import java.nio.BufferUnderflowException;
+
+@RunWith(JUnit4.class)
+public class RandomAccessObjectTest {
+  private static final byte[] BLOB = new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13};
+
+  @Test
+  public void fileLengthTest() throws IOException {
+    File tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try (RandomAccessObject obj = new RandomAccessObject.RandomAccessFileObject(tmpFile, "r")) {
+      Assert.assertEquals(13, obj.length());
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  @Test
+  public void byteArrayLengthTest() throws IOException {
+    try (RandomAccessObject obj = new RandomAccessObject.RandomAccessByteArrayObject(BLOB)) {
+      Assert.assertEquals(13, obj.length());
+    }
+  }
+
+  @Test
+  public void mmapLengthTest() throws IOException {
+    File tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try (RandomAccessObject obj =
+        new RandomAccessObject.RandomAccessMmapObject(new RandomAccessFile(tmpFile, "r"), "r")) {
+      Assert.assertEquals(13, obj.length());
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  @Test
+  public void fileReadByteTest() throws IOException {
+    File tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try (RandomAccessObject obj = new RandomAccessObject.RandomAccessFileObject(tmpFile, "r")) {
+      for (int x = 0; x < BLOB.length; x++) {
+        Assert.assertEquals(x + 1, obj.readByte());
+      }
+
+      try {
+        obj.readByte();
+        Assert.fail("Should've thrown an IOException");
+      } catch (IOException expected) {
+      }
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  @Test
+  public void byteArrayReadByteTest() throws IOException {
+    // Mix positives and negatives to test sign preservation in readByte()
+    byte[] bytes = new byte[] {-128, -127, -126, -1, 0, 1, 125, 126, 127};
+    try (RandomAccessObject obj = new RandomAccessObject.RandomAccessByteArrayObject(bytes)) {
+      for (int x = 0; x < bytes.length; x++) {
+        Assert.assertEquals(bytes[x], obj.readByte());
+      }
+
+      try {
+        obj.readByte();
+        Assert.fail("Should've thrown an IOException");
+      } catch (BufferUnderflowException expected) {
+      }
+    }
+  }
+
+  @Test
+  public void byteArrayReadUnsignedByteTest() throws IOException {
+    // Test values above 127 to test unsigned-ness of readUnsignedByte()
+    int[] ints = new int[] {255, 254, 253};
+    byte[] bytes = new byte[] {(byte) 0xff, (byte) 0xfe, (byte) 0xfd};
+    try (RandomAccessObject obj = new RandomAccessObject.RandomAccessByteArrayObject(bytes)) {
+      for (int x = 0; x < bytes.length; x++) {
+        Assert.assertEquals(ints[x], obj.readUnsignedByte());
+      }
+
+      try {
+        obj.readUnsignedByte();
+        Assert.fail("Should've thrown an IOException");
+      } catch (BufferUnderflowException expected) {
+      }
+    }
+  }
+
+  @Test
+  public void mmapReadByteTest() throws IOException {
+    File tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try (RandomAccessObject obj =
+        new RandomAccessObject.RandomAccessMmapObject(new RandomAccessFile(tmpFile, "r"), "r")) {
+      for (int x = 0; x < BLOB.length; x++) {
+        Assert.assertEquals(x + 1, obj.readByte());
+      }
+
+      try {
+        obj.readByte();
+        Assert.fail("Should've thrown an BufferUnderflowException");
+      } catch (BufferUnderflowException expected) {
+      }
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  @Test
+  public void fileWriteByteTest() throws IOException {
+    File tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try (RandomAccessObject obj = new RandomAccessObject.RandomAccessFileObject(tmpFile, "rw")) {
+      for (int x = 0; x < BLOB.length; x++) {
+        obj.writeByte((byte) (5 - x));
+      }
+
+      // Writing a byte past the end of a file should be ok - this just extends the file.
+      obj.writeByte((byte) 243);
+
+      // As per RandomAccessFile documentation, the reported length should update after writing off
+      // the end of a file.
+      Assert.assertEquals(BLOB.length + 1, obj.length());
+
+      obj.seek(0);
+      for (int x = 0; x < BLOB.length; x++) {
+        Assert.assertEquals(5 - x, obj.readByte());
+      }
+
+      // Note that because of signed bytes, if cased to an int, this would actually resolve to -13.
+      Assert.assertEquals((byte) 243, obj.readByte());
+
+      try {
+        obj.readByte();
+        Assert.fail("Should've thrown an IOException");
+      } catch (IOException expected) {
+      }
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  @Test
+  public void fileWriteByteToEmptyFileTest() throws IOException {
+    File tmpFile = File.createTempFile("RandomAccessObjectTest", "temp");
+
+    try (RandomAccessObject obj = new RandomAccessObject.RandomAccessFileObject(tmpFile, "rw")) {
+      for (int x = 0; x < BLOB.length; x++) {
+        obj.writeByte((byte) (5 - x));
+      }
+
+      obj.seek(0);
+      for (int x = 0; x < BLOB.length; x++) {
+        Assert.assertEquals(5 - x, obj.readByte());
+      }
+
+      Assert.assertEquals(BLOB.length, obj.length());
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  @Test
+  public void byteArrayWriteByteTest() throws IOException {
+    final int len = 13;
+    try (RandomAccessObject obj =
+        new RandomAccessObject.RandomAccessByteArrayObject(new byte[len])) {
+      for (int x = 0; x < len; x++) {
+        obj.writeByte((byte) (5 - x));
+      }
+
+      try {
+        // Writing a byte past the end of an array is not ok.
+        obj.writeByte((byte) 243);
+        Assert.fail("Should've thrown a BufferOverflowException");
+      } catch (BufferOverflowException expected) {
+      }
+
+      obj.seek(0);
+      for (int x = 0; x < len; x++) {
+        Assert.assertEquals(5 - x, obj.readByte());
+      }
+
+      try {
+        obj.readByte();
+        Assert.fail("Should've thrown a BufferUnderflowException");
+      } catch (BufferUnderflowException expected) {
+      }
+    }
+  }
+
+  @Test
+  public void mmapWriteByteTest() throws IOException {
+    File tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try (RandomAccessObject obj =
+        new RandomAccessObject.RandomAccessMmapObject(new RandomAccessFile(tmpFile, "rw"), "rw")) {
+      for (int x = 0; x < BLOB.length; x++) {
+        obj.writeByte((byte) (5 - x));
+      }
+
+      try {
+        // Writing a byte past the end of an mmap is not ok.
+        obj.writeByte((byte) 243);
+        Assert.fail("Should've thrown an BufferOverflowException");
+      } catch (BufferOverflowException expected) {
+      }
+
+      Assert.assertEquals(BLOB.length, obj.length());
+
+      obj.seek(0);
+      for (int x = 0; x < BLOB.length; x++) {
+        Assert.assertEquals(5 - x, obj.readByte());
+      }
+
+      try {
+        obj.readByte();
+        Assert.fail("Should've thrown an BufferUnderflowException");
+      } catch (BufferUnderflowException expected) {
+      }
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  @Test
+  public void mmapWriteByteToEmptyFileTest() throws IOException {
+    File tmpFile = File.createTempFile("RandomAccessObjectTest", "temp");
+
+    try (RandomAccessObject obj =
+        new RandomAccessObject.RandomAccessMmapObject(new RandomAccessFile(tmpFile, "rw"), "rw")) {
+      for (int x = 0; x < BLOB.length; x++) {
+        try {
+          // Writing a byte past the end of an mmap is not ok.
+          obj.writeByte((byte) (5 - x));
+          Assert.fail("Should've thrown an BufferOverflowException");
+        } catch (BufferOverflowException expected) {
+        }
+      }
+
+      try {
+        obj.seek(BLOB.length);
+        Assert.fail("Should've thrown an IllegalArgumentException");
+      } catch (IllegalArgumentException expected) {
+      }
+
+      for (int x = 0; x < BLOB.length; x++) {
+        try {
+          obj.readByte();
+          Assert.fail("Should've thrown an BufferUnderflowException");
+        } catch (BufferUnderflowException expected) {
+        }
+      }
+
+      Assert.assertEquals(0, obj.length());
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  @Test
+  public void fileSeekTest() throws IOException {
+    File tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try {
+      RandomAccessObject obj = new RandomAccessObject.RandomAccessFileObject(tmpFile, "rw");
+      seekTest(obj);
+
+      try {
+        obj.seek(-1);
+        Assert.fail("Should've thrown an IOException");
+      } catch (IOException expected) {
+      }
+
+      // This should not throw an exception.
+      obj.seek(BLOB.length);
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  @Test
+  public void byteArraySeekTest() throws IOException {
+    byte[] data = new byte[BLOB.length];
+    System.arraycopy(BLOB, 0, data, 0, BLOB.length);
+    RandomAccessObject obj = new RandomAccessObject.RandomAccessByteArrayObject(data);
+    seekTest(obj);
+
+    try {
+      obj.seek(-1);
+      Assert.fail("Should've thrown an IllegalArgumentException");
+    } catch (IllegalArgumentException expected) {
+    }
+
+    // Should not fail.
+    obj.seek(BLOB.length);
+
+    // Only fails once you try to read past the end.
+    try {
+      obj.readByte();
+      Assert.fail("Should've thrown a BufferUnderflowException");
+    } catch (BufferUnderflowException expected) {
+    }
+  }
+
+  @Test
+  public void mmapSeekTest() throws IOException {
+    File tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try {
+      RandomAccessObject obj =
+          new RandomAccessObject.RandomAccessMmapObject(new RandomAccessFile(tmpFile, "rw"), "rw");
+      seekTest(obj);
+
+      try {
+        obj.seek(-1);
+        Assert.fail("Should've thrown an IllegalArgumentException");
+      } catch (IllegalArgumentException expected) {
+      }
+
+      // Should not fail.
+      obj.seek(BLOB.length);
+
+      // Only fails once you try to read past the end.
+      try {
+        obj.readByte();
+        Assert.fail("Should've thrown a BufferUnderflowException");
+      } catch (BufferUnderflowException expected) {
+      }
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  @Test
+  public void fileReadIntTest() throws IOException {
+    File tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try {
+      RandomAccessObject obj = new RandomAccessObject.RandomAccessFileObject(tmpFile, "r");
+      readIntTest(obj);
+      try {
+        obj.readInt();
+        Assert.fail("Should've thrown a EOFException");
+      } catch (EOFException expected) {
+      }
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  @Test
+  public void byteArrayReadIntTest() throws IOException {
+    RandomAccessObject obj = new RandomAccessObject.RandomAccessByteArrayObject(BLOB);
+    readIntTest(obj);
+    try {
+      obj.readInt();
+      Assert.fail("Should've thrown a BufferUnderflowException");
+    } catch (BufferUnderflowException expected) {
+    }
+  }
+
+  @Test
+  public void mmapReadIntTest() throws IOException {
+    File tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try {
+      RandomAccessObject obj =
+          new RandomAccessObject.RandomAccessMmapObject(new RandomAccessFile(tmpFile, "r"), "r");
+      readIntTest(obj);
+
+      try {
+        obj.readInt();
+        Assert.fail("Should've thrown an BufferUnderflowException");
+      } catch (BufferUnderflowException expected) {
+      }
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  @Test
+  public void fileWriteIntTest() throws IOException {
+    File tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try (RandomAccessObject obj = new RandomAccessObject.RandomAccessFileObject(tmpFile, "rw")) {
+      for (int x = 0; x < BLOB.length / 4; x++) {
+        obj.writeInt(500 + x);
+      }
+
+      obj.seekToIntAligned(0);
+      for (int x = 0; x < BLOB.length / 4; x++) {
+        Assert.assertEquals(500 + x, obj.readInt());
+      }
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  @Test
+  public void byteArrayWriteIntTest() throws IOException {
+    final int len = 13;
+    try (RandomAccessObject obj =
+        new RandomAccessObject.RandomAccessByteArrayObject(new byte[len])) {
+      for (int x = 0; x < len / 4; x++) {
+        obj.writeInt(500 + x);
+      }
+
+      obj.seek(0);
+      for (int x = 0; x < len / 4; x++) {
+        Assert.assertEquals(500 + x, obj.readInt());
+      }
+    }
+  }
+
+  @Test
+  public void mmapWriteIntTest() throws IOException {
+    File tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try (RandomAccessObject obj =
+        new RandomAccessObject.RandomAccessMmapObject(new RandomAccessFile(tmpFile, "rw"), "rw")) {
+      for (int x = 0; x < BLOB.length / 4; x++) {
+        obj.writeInt(500 + x);
+      }
+
+      obj.seekToIntAligned(0);
+      for (int x = 0; x < BLOB.length / 4; x++) {
+        Assert.assertEquals(500 + x, obj.readInt());
+      }
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  @Test
+  public void fileSeekToIntAlignedTest() throws IOException {
+    File tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try {
+      RandomAccessObject obj = new RandomAccessObject.RandomAccessFileObject(tmpFile, "rw");
+      seekToIntAlignedTest(obj);
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  @Test
+  public void byteArraySeekToIntAlignedTest() throws IOException {
+    byte[] data = new byte[BLOB.length];
+    System.arraycopy(BLOB, 0, data, 0, BLOB.length);
+    RandomAccessObject obj = new RandomAccessObject.RandomAccessByteArrayObject(data);
+    seekToIntAlignedTest(obj);
+  }
+
+  @Test
+  public void mmapSeekToIntAlignedTest() throws IOException {
+    File tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try {
+      RandomAccessObject obj =
+          new RandomAccessObject.RandomAccessMmapObject(new RandomAccessFile(tmpFile, "rw"), "rw");
+      seekToIntAlignedTest(obj);
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  @Test
+  public void fileCloseTest() throws IOException {
+    File tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try {
+      RandomAccessObject obj = new RandomAccessObject.RandomAccessFileObject(tmpFile, "r", true);
+      obj.close();
+      Assert.assertFalse(tmpFile.exists());
+      tmpFile = null;
+    } finally {
+      if (tmpFile != null) {
+        tmpFile.delete();
+      }
+    }
+
+    tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try {
+      RandomAccessObject obj = new RandomAccessObject.RandomAccessFileObject(tmpFile, "r");
+      obj.close();
+      Assert.assertTrue(tmpFile.exists());
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  @Test
+  public void mmapCloseTest() throws IOException {
+    File tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try {
+      try (RandomAccessObject obj = new RandomAccessObject.RandomAccessMmapObject(tmpFile, "r")) {}
+      Assert.assertFalse(tmpFile.exists());
+      tmpFile = null;
+    } finally {
+      if (tmpFile != null) {
+        tmpFile.delete();
+      }
+    }
+
+    tmpFile = storeInTempFile(new ByteArrayInputStream(BLOB));
+
+    try {
+      try (RandomAccessObject obj =
+          new RandomAccessObject.RandomAccessMmapObject(new RandomAccessFile(tmpFile, "r"), "r")) {}
+      Assert.assertTrue(tmpFile.exists());
+    } finally {
+      tmpFile.delete();
+    }
+  }
+
+  private void seekTest(final RandomAccessObject obj) throws IOException {
+    obj.seek(7);
+    Assert.assertEquals(8, obj.readByte());
+    obj.seek(3);
+    Assert.assertEquals(4, obj.readByte());
+    obj.seek(9);
+    Assert.assertEquals(10, obj.readByte());
+    obj.seek(5);
+    obj.writeByte((byte) 23);
+    obj.seek(5);
+    Assert.assertEquals(23, obj.readByte());
+    obj.seek(4);
+    Assert.assertEquals(5, obj.readByte());
+
+    obj.seek(0);
+    for (int x = 0; x < BLOB.length; x++) {
+      if (x == 5) {
+        Assert.assertEquals(23, obj.readByte());
+      } else {
+        Assert.assertEquals(x + 1, obj.readByte());
+      }
+    }
+  }
+
+  private void readIntTest(final RandomAccessObject obj) throws IOException {
+    Assert.assertEquals(0x01020304, obj.readInt());
+    Assert.assertEquals(0x05060708, obj.readInt());
+    Assert.assertEquals(0x090A0B0C, obj.readInt());
+  }
+
+  private void seekToIntAlignedTest(final RandomAccessObject obj) throws IOException {
+    obj.seekToIntAligned(3);
+    Assert.assertEquals(3 * 4 + 1, obj.readByte());
+
+    obj.seekToIntAligned(2);
+    Assert.assertEquals(2 * 4 + 1, obj.readByte());
+    Assert.assertEquals(0x0A0B0C0D, obj.readInt());
+
+    obj.seekToIntAligned(0);
+    Assert.assertEquals(1, obj.readByte());
+
+    obj.seekToIntAligned(1);
+    Assert.assertEquals(5, obj.readByte());
+    Assert.assertEquals(0x06070809, obj.readInt());
+
+    obj.seekToIntAligned(2);
+    obj.writeInt(0x26391bd2);
+
+    obj.seekToIntAligned(0);
+    Assert.assertEquals(0x01020304, obj.readInt());
+    Assert.assertEquals(0x05060708, obj.readInt());
+    Assert.assertEquals(0x26391bd2, obj.readInt());
+  }
+
+  private File storeInTempFile(InputStream content) throws IOException {
+    File tmpFile = null;
+    try {
+      tmpFile = File.createTempFile("RandomAccessObjectTest", "temp");
+      tmpFile.deleteOnExit();
+      FileOutputStream out = new FileOutputStream(tmpFile);
+      byte[] buffer = new byte[32768];
+      int numRead = 0;
+      while ((numRead = content.read(buffer)) >= 0) {
+        out.write(buffer, 0, numRead);
+      }
+      out.flush();
+      out.close();
+      return tmpFile;
+    } catch (IOException e) {
+      if (tmpFile != null) {
+        // Attempt immediate cleanup.
+        tmpFile.delete();
+      }
+      throw e;
+    }
+  }
+}
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/SuffixSorterTestBase.java b/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/SuffixSorterTestBase.java
new file mode 100644
index 0000000..cb72d8f
--- /dev/null
+++ b/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/SuffixSorterTestBase.java
@@ -0,0 +1,163 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.generator.bsdiff;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.Random;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Base class for suffix sorter tests with common tests for a suffix sorter algorithm.
+ */
+public abstract class SuffixSorterTestBase {
+
+  public abstract SuffixSorter getSuffixSorter();
+
+  @Test
+  public void suffixSortEmptyDataTest() throws Exception {
+    checkSuffixSort( new int[] {0}, new byte[] {});
+  }
+
+  @Test
+  public void suffixSortShortDataTest() throws Exception {
+    checkSuffixSort(new int[] {1, 0}, new byte[] {23});
+    checkSuffixSort(new int[] {2, 1, 0}, new byte[] {23, 20});
+    checkSuffixSort(new int[] {2, 0, 1}, new byte[] {0, 127});
+    checkSuffixSort(new int[] {2, 1, 0}, new byte[] {42, 42});
+  }
+
+  private void checkSuffixSort(int[] expectedSuffixArray, byte[] inputBytes) throws Exception {
+    RandomAccessObject input = new RandomAccessObject.RandomAccessByteArrayObject(inputBytes);
+    RandomAccessObject groupArray = getSuffixSorter().suffixSort(input);
+
+    assertSorted(groupArray, input);
+    Assert.assertArrayEquals(expectedSuffixArray, randomAccessObjectToIntArray(groupArray));
+  }
+
+  @Test
+  public void suffixSortLongDataTest() throws Exception {
+    RandomAccessObject groupArrayRO = getSuffixSorter().suffixSort(BsDiffTestData.LONG_DATA_99_RO);
+
+    assertSorted(groupArrayRO, BsDiffTestData.LONG_DATA_99_RO);
+
+    Assert.assertArrayEquals(
+        BsDiffTestData.QUICK_SUFFIX_SORT_TEST_GA_CONTROL,
+        randomAccessObjectToIntArray(groupArrayRO));
+  }
+
+  @Test
+  public void suffixSortVeryLongDataTest() throws Exception {
+    RandomAccessObject groupArray2RO =
+        getSuffixSorter().suffixSort(BsDiffTestData.LONGER_DATA_349_RO);
+
+    assertSorted(groupArray2RO, BsDiffTestData.LONGER_DATA_349_RO);
+
+    Assert.assertArrayEquals(
+        BsDiffTestData.QUICK_SUFFIX_SORT_TEST_IA_CONTROL,
+        randomAccessObjectToIntArray(groupArray2RO));
+  }
+
+  @Test
+  public void testRandom() throws Exception {
+    Random rand = new Random(1123458);
+    for (int i = 1; i <= 10; i++) {
+      RandomAccessObject input = generateRandom(rand, i * 10000);
+      RandomAccessObject suffixArray = getSuffixSorter().suffixSort(input);
+
+      assertSorted(suffixArray, input);
+    }
+  }
+
+  private static RandomAccessObject generateRandom(Random rand, int length) {
+    byte[] bytes = new byte[length];
+    rand.nextBytes(bytes);
+    return new RandomAccessObject.RandomAccessByteArrayObject(bytes);
+  }
+
+  protected static RandomAccessObject intArrayToRandomAccessObject(final int[] array)
+      throws Exception {
+    RandomAccessObject ret =
+        new RandomAccessObject.RandomAccessByteArrayObject(new byte[array.length * 4]);
+    ret.seekToIntAligned(0);
+
+    for (int element : array) {
+      ret.writeInt(element);
+    }
+
+    return ret;
+  }
+
+  protected static boolean intArrayEqualsRandomAccessObject(
+      int[] array, RandomAccessObject randomAccessObject) throws Exception {
+    randomAccessObject.seekToIntAligned(0);
+
+    for (int element : array) {
+      if (element != randomAccessObject.readInt()) {
+        return false;
+      }
+    }
+
+    return true;
+  }
+
+  protected static int[] randomAccessObjectToIntArray(RandomAccessObject randomAccessObject)
+      throws Exception {
+    int[] ret = new int[(int) (randomAccessObject.length() / 4)];
+    randomAccessObject.seekToIntAligned(0);
+
+    for (int i = 0; i < ret.length; i++) {
+      ret[i] = randomAccessObject.readInt();
+    }
+
+    return ret;
+  }
+
+  private static boolean checkSuffixLessThanOrEqual(
+      RandomAccessObject input, int index1, int index2) throws Exception {
+    while (true) {
+      if (index1 == input.length()) {
+        return true;
+      }
+      input.seek(index1);
+      int unsignedByte1 = input.readUnsignedByte();
+      input.seek(index2);
+      int unsignedByte2 = input.readUnsignedByte();
+      if (unsignedByte1 < unsignedByte2) {
+        return true;
+      }
+      if (unsignedByte1 > unsignedByte2) {
+        return false;
+      }
+      index1++;
+      index2++;
+    }
+  }
+
+  private static void assertSorted(RandomAccessObject suffixArray, RandomAccessObject input)
+      throws Exception {
+    for (int i = 0; i < input.length(); i++) {
+      suffixArray.seekToIntAligned(i);
+      int index1 = suffixArray.readInt();
+      suffixArray.seekToIntAligned(i+1);
+      int index2 = suffixArray.readInt();
+      if (!checkSuffixLessThanOrEqual(input, index1, index2)) {
+        fail();
+      }
+    }
+  }
+}
diff --git a/generator/src/test/resources/com/google/archivepatcher/generator/bsdiff/testdata/BsDiffInternalTestNew.txt b/generator/src/test/resources/com/google/archivepatcher/generator/bsdiff/testdata/BsDiffInternalTestNew.txt
new file mode 100644
index 0000000..0f5c371
--- /dev/null
+++ b/generator/src/test/resources/com/google/archivepatcher/generator/bsdiff/testdata/BsDiffInternalTestNew.txt
@@ -0,0 +1 @@
+TL?I"a)tAARPRLH)ZNuYRsdf8yu032D<SL;9EPh2nz?y7W>#U<`bDa80CNDT)AvVZRq3' Gcn6*o3U_)`E={[q;,)T/5Ntk,>K=v<Q4?o&Q!m2Za;Dt5'hMTZ?.\ri#'Qu*>3bKo9pO}-?Ar/;\9epjz&`Y_i{FZw'@HTfLI\3(kOi^6{_9TC_m8C^zAuV'2hg%[AC@op(/=V+PvPNh<s^vqab@KWddY+425eS_j.jGX<!ZxzL$042"$PnU<cZ|Us#%4R_5s)&: vS*:sk#tGC=V'W|^"O&nY+5og8[oT2h80otugs7s9+bD?6knRa,>k_(-O4P^_kOk}<D_Jxo^s+(9R\y.uv::ng
diff --git a/generator/src/test/resources/com/google/archivepatcher/generator/bsdiff/testdata/BsDiffInternalTestOld.txt b/generator/src/test/resources/com/google/archivepatcher/generator/bsdiff/testdata/BsDiffInternalTestOld.txt
new file mode 100644
index 0000000..68445f3
--- /dev/null
+++ b/generator/src/test/resources/com/google/archivepatcher/generator/bsdiff/testdata/BsDiffInternalTestOld.txt
@@ -0,0 +1 @@
+TL?I"a)tAARPRLH)ZNuYRBZ>+A2lO;D<SL;9EPhz?y7W>#U<`bDa80CNDT)AvVZRq3' Gcn6*o3U_)`E={[q;,)T/5Ntk,>K=v<Q4?o&Q!m2Za;Dt5'hMTZ?.\ri#'Qu*>3bo9pO}-?Ar/;\9epjz&`Y_i{FZw'@HTfLI\3(kOi^6{_9TC_m8C^zAuV'2hg%[AC@op(/=V+PvPNh<s^vqab@KWddY+425eS_j(jGX<!ZxzL$042"$PnU<cZ|Us#%4R_5s)&: vS*:sk#tGC=V'W|^"O&nY+5og8[oTf!$Dm@Hu3M+bD?6knRa,>k_(-O4P^_kOk}<D_Jxo^s+(9R\y.uv::ng
diff --git a/applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_internal_patch_a_to_b.bin b/generator/src/test/resources/com/google/archivepatcher/generator/bsdiff/testdata/BsDiffInternalTestPatchExpected.patch
similarity index 100%
copy from applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_internal_patch_a_to_b.bin
copy to generator/src/test/resources/com/google/archivepatcher/generator/bsdiff/testdata/BsDiffInternalTestPatchExpected.patch
Binary files differ
diff --git a/applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_blob_a.bin b/generator/src/test/resources/com/google/archivepatcher/generator/bsdiff/testdata/minimalBlobA.bin
similarity index 100%
copy from applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_blob_a.bin
copy to generator/src/test/resources/com/google/archivepatcher/generator/bsdiff/testdata/minimalBlobA.bin
Binary files differ
diff --git a/applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_blob_b.bin b/generator/src/test/resources/com/google/archivepatcher/generator/bsdiff/testdata/minimalBlobB.bin
similarity index 100%
copy from applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_blob_b.bin
copy to generator/src/test/resources/com/google/archivepatcher/generator/bsdiff/testdata/minimalBlobB.bin
Binary files differ
diff --git a/applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_patch_a_to_b.bin b/generator/src/test/resources/com/google/archivepatcher/generator/bsdiff/testdata/minimalBlobPatch.patch
similarity index 100%
copy from applier/src/test/java/com/google/archivepatcher/applier/bsdiff/testdata/bsdifftest_minimal_patch_a_to_b.bin
copy to generator/src/test/resources/com/google/archivepatcher/generator/bsdiff/testdata/minimalBlobPatch.patch
Binary files differ
diff --git a/gradle.properties b/gradle.properties
new file mode 100644
index 0000000..1d3591c
--- /dev/null
+++ b/gradle.properties
@@ -0,0 +1,18 @@
+# Project-wide Gradle settings.
+
+# IDE (e.g. Android Studio) users:
+# Gradle settings configured through the IDE *will override*
+# any settings specified in this file.
+
+# For more details on how to configure your build environment visit
+# http://www.gradle.org/docs/current/userguide/build_environment.html
+
+# Specifies the JVM arguments used for the daemon process.
+# The setting is particularly useful for tweaking memory settings.
+# Default value: -Xmx10248m -XX:MaxPermSize=256m
+# org.gradle.jvmargs=-Xmx2048m -XX:MaxPermSize=512m -XX:+HeapDumpOnOutOfMemoryError -Dfile.encoding=UTF-8
+
+# When configured, Gradle will run in incubating parallel mode.
+# This option should only be used with decoupled projects. More details, visit
+# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
+# org.gradle.parallel=true
\ No newline at end of file
diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar
new file mode 100644
index 0000000..747bb13
--- /dev/null
+++ b/gradle/wrapper/gradle-wrapper.jar
Binary files differ
diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties
new file mode 100644
index 0000000..747e6d9
--- /dev/null
+++ b/gradle/wrapper/gradle-wrapper.properties
@@ -0,0 +1,6 @@
+#Tue Sep 06 11:53:20 BST 2016
+distributionBase=GRADLE_USER_HOME
+distributionPath=wrapper/dists
+zipStoreBase=GRADLE_USER_HOME
+zipStorePath=wrapper/dists
+distributionUrl=https\://services.gradle.org/distributions/gradle-2.14.1-all.zip
diff --git a/gradlew b/gradlew
new file mode 100755
index 0000000..9d82f78
--- /dev/null
+++ b/gradlew
@@ -0,0 +1,160 @@
+#!/usr/bin/env bash
+
+##############################################################################
+##
+##  Gradle start up script for UN*X
+##
+##############################################################################
+
+# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+DEFAULT_JVM_OPTS=""
+
+APP_NAME="Gradle"
+APP_BASE_NAME=`basename "$0"`
+
+# Use the maximum available, or set MAX_FD != -1 to use that value.
+MAX_FD="maximum"
+
+warn ( ) {
+    echo "$*"
+}
+
+die ( ) {
+    echo
+    echo "$*"
+    echo
+    exit 1
+}
+
+# OS specific support (must be 'true' or 'false').
+cygwin=false
+msys=false
+darwin=false
+case "`uname`" in
+  CYGWIN* )
+    cygwin=true
+    ;;
+  Darwin* )
+    darwin=true
+    ;;
+  MINGW* )
+    msys=true
+    ;;
+esac
+
+# Attempt to set APP_HOME
+# Resolve links: $0 may be a link
+PRG="$0"
+# Need this for relative symlinks.
+while [ -h "$PRG" ] ; do
+    ls=`ls -ld "$PRG"`
+    link=`expr "$ls" : '.*-> \(.*\)$'`
+    if expr "$link" : '/.*' > /dev/null; then
+        PRG="$link"
+    else
+        PRG=`dirname "$PRG"`"/$link"
+    fi
+done
+SAVED="`pwd`"
+cd "`dirname \"$PRG\"`/" >/dev/null
+APP_HOME="`pwd -P`"
+cd "$SAVED" >/dev/null
+
+CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
+
+# Determine the Java command to use to start the JVM.
+if [ -n "$JAVA_HOME" ] ; then
+    if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+        # IBM's JDK on AIX uses strange locations for the executables
+        JAVACMD="$JAVA_HOME/jre/sh/java"
+    else
+        JAVACMD="$JAVA_HOME/bin/java"
+    fi
+    if [ ! -x "$JAVACMD" ] ; then
+        die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+    fi
+else
+    JAVACMD="java"
+    which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+fi
+
+# Increase the maximum file descriptors if we can.
+if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then
+    MAX_FD_LIMIT=`ulimit -H -n`
+    if [ $? -eq 0 ] ; then
+        if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
+            MAX_FD="$MAX_FD_LIMIT"
+        fi
+        ulimit -n $MAX_FD
+        if [ $? -ne 0 ] ; then
+            warn "Could not set maximum file descriptor limit: $MAX_FD"
+        fi
+    else
+        warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
+    fi
+fi
+
+# For Darwin, add options to specify how the application appears in the dock
+if $darwin; then
+    GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
+fi
+
+# For Cygwin, switch paths to Windows format before running java
+if $cygwin ; then
+    APP_HOME=`cygpath --path --mixed "$APP_HOME"`
+    CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
+    JAVACMD=`cygpath --unix "$JAVACMD"`
+
+    # We build the pattern for arguments to be converted via cygpath
+    ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
+    SEP=""
+    for dir in $ROOTDIRSRAW ; do
+        ROOTDIRS="$ROOTDIRS$SEP$dir"
+        SEP="|"
+    done
+    OURCYGPATTERN="(^($ROOTDIRS))"
+    # Add a user-defined pattern to the cygpath arguments
+    if [ "$GRADLE_CYGPATTERN" != "" ] ; then
+        OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
+    fi
+    # Now convert the arguments - kludge to limit ourselves to /bin/sh
+    i=0
+    for arg in "$@" ; do
+        CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
+        CHECK2=`echo "$arg"|egrep -c "^-"`                                 ### Determine if an option
+
+        if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then                    ### Added a condition
+            eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
+        else
+            eval `echo args$i`="\"$arg\""
+        fi
+        i=$((i+1))
+    done
+    case $i in
+        (0) set -- ;;
+        (1) set -- "$args0" ;;
+        (2) set -- "$args0" "$args1" ;;
+        (3) set -- "$args0" "$args1" "$args2" ;;
+        (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
+        (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
+        (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
+        (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
+        (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
+        (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
+    esac
+fi
+
+# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules
+function splitJvmOpts() {
+    JVM_OPTS=("$@")
+}
+eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS
+JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME"
+
+exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@"
diff --git a/gradlew.bat b/gradlew.bat
new file mode 100644
index 0000000..8a0b282
--- /dev/null
+++ b/gradlew.bat
@@ -0,0 +1,90 @@
+@if "%DEBUG%" == "" @echo off
+@rem ##########################################################################
+@rem
+@rem  Gradle startup script for Windows
+@rem
+@rem ##########################################################################
+
+@rem Set local scope for the variables with windows NT shell
+if "%OS%"=="Windows_NT" setlocal
+
+@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+set DEFAULT_JVM_OPTS=
+
+set DIRNAME=%~dp0
+if "%DIRNAME%" == "" set DIRNAME=.
+set APP_BASE_NAME=%~n0
+set APP_HOME=%DIRNAME%
+
+@rem Find java.exe
+if defined JAVA_HOME goto findJavaFromJavaHome
+
+set JAVA_EXE=java.exe
+%JAVA_EXE% -version >NUL 2>&1
+if "%ERRORLEVEL%" == "0" goto init
+
+echo.
+echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:findJavaFromJavaHome
+set JAVA_HOME=%JAVA_HOME:"=%
+set JAVA_EXE=%JAVA_HOME%/bin/java.exe
+
+if exist "%JAVA_EXE%" goto init
+
+echo.
+echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:init
+@rem Get command-line arguments, handling Windowz variants
+
+if not "%OS%" == "Windows_NT" goto win9xME_args
+if "%@eval[2+2]" == "4" goto 4NT_args
+
+:win9xME_args
+@rem Slurp the command line arguments.
+set CMD_LINE_ARGS=
+set _SKIP=2
+
+:win9xME_args_slurp
+if "x%~1" == "x" goto execute
+
+set CMD_LINE_ARGS=%*
+goto execute
+
+:4NT_args
+@rem Get arguments from the 4NT Shell from JP Software
+set CMD_LINE_ARGS=%$
+
+:execute
+@rem Setup the command line
+
+set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
+
+@rem Execute Gradle
+"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
+
+:end
+@rem End local scope for the variables with windows NT shell
+if "%ERRORLEVEL%"=="0" goto mainEnd
+
+:fail
+rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
+rem the _cmd.exe /c_ return code!
+if  not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
+exit /b 1
+
+:mainEnd
+if "%OS%"=="Windows_NT" endlocal
+
+:omega
diff --git a/integrationtest/build.gradle b/integrationtest/build.gradle
new file mode 100644
index 0000000..bbe778f
--- /dev/null
+++ b/integrationtest/build.gradle
@@ -0,0 +1,12 @@
+// intergationtest module
+apply plugin: 'java'
+
+dependencies {
+    compile project(':shared')
+    compile project(':applier')
+    compile project(':generator')
+
+    testCompile 'junit:junit:4.12'
+    testCompile project(':sharedtest')
+}
+// EOF
diff --git a/integrationtest/src/test/java/com/google/archivepatcher/integrationtest/FileByFileV1IntegrationTest.java b/integrationtest/src/test/java/com/google/archivepatcher/integrationtest/FileByFileV1IntegrationTest.java
new file mode 100644
index 0000000..27eade7
--- /dev/null
+++ b/integrationtest/src/test/java/com/google/archivepatcher/integrationtest/FileByFileV1IntegrationTest.java
@@ -0,0 +1,197 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.integrationtest;
+
+import com.google.archivepatcher.applier.FileByFileV1DeltaApplier;
+import com.google.archivepatcher.generator.FileByFileV1DeltaGenerator;
+import com.google.archivepatcher.shared.UnitTestZipArchive;
+import com.google.archivepatcher.shared.UnitTestZipEntry;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/**
+ * High-level integration tests that fully exercise the code without any mocking or subclassing.
+ */
+@RunWith(JUnit4.class)
+@SuppressWarnings("javadoc")
+public class FileByFileV1IntegrationTest {
+
+  // Inputs to the patching system
+  private File tempDir = null;
+  private File oldFile = null;
+  private File newFile = null;
+
+  // A test archive. The contents are as follows:
+  // PATH       OLD_FORMAT          NEW_FORMAT          NOTES
+  // ---------------------------------------------------------------------------
+  // /entry1    compressed (L6)     compressed (L6)     Unchanged, compressed
+  // /entry2    compressed (L6)     compressed (L9)     Only compressed bytes changed
+  // /entry3    compressed (L6)     compressed (L6)     Compressed and uncompressed bytes changed
+  // /entry4    uncompressed        compressed (L6)     Transition from uncompressed to compressed
+  // /entry5    compressed (L6)     uncompressed        Transition from compressed to uncompressed
+  // /entry6    uncompressed        uncompressed        Unchanged, uncompressed
+  // /entry7    uncompressed        uncompressed        Uncompressed and bytes changed
+  // /entry8    uncompressed        compressed (L6)     Like /entry4 but also with changed bytes
+  // /entry9    compressed (L6)     uncompressed        Like /entry5 but also with changed bytes
+  // /entry10*  compressed (L6)     compressed (L6)     Renamed from /entry10A to /entry10B
+  // /entry11*  uncompressed        uncompressed        Renamed from /entry11A to /entry11B
+  // /entry12*  compressed (L6)     compressed (L6)     Like /entry10 but also with changed bytes
+  // /entry13*  uncompressed        uncompressed        Like /entry11 but also with changed bytes
+  private static final UnitTestZipEntry OLD_ENTRY1 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry1", 6, "entry 1", null);
+  private static final UnitTestZipEntry NEW_ENTRY1 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry1", 6, "entry 1", null);
+  private static final UnitTestZipEntry OLD_ENTRY2 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry2", 6, "entry 2", null);
+  private static final UnitTestZipEntry NEW_ENTRY2 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry2", 9, "entry 2", null);
+  private static final UnitTestZipEntry OLD_ENTRY3 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry3", 6, "entry 3A", null);
+  private static final UnitTestZipEntry NEW_ENTRY3 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry3", 6, "entry 3B", null);
+  private static final UnitTestZipEntry OLD_ENTRY4 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry4", 0, "entry 4", null);
+  private static final UnitTestZipEntry NEW_ENTRY4 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry4", 6, "entry 4", null);
+  private static final UnitTestZipEntry OLD_ENTRY5 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry5", 6, "entry 5", null);
+  private static final UnitTestZipEntry NEW_ENTRY5 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry5", 0, "entry 5", null);
+  private static final UnitTestZipEntry OLD_ENTRY6 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry6", 0, "entry 6", null);
+  private static final UnitTestZipEntry NEW_ENTRY6 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry6", 0, "entry 6", null);
+  private static final UnitTestZipEntry OLD_ENTRY7 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry7", 0, "entry 7A", null);
+  private static final UnitTestZipEntry NEW_ENTRY7 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry7", 0, "entry 7B", null);
+  private static final UnitTestZipEntry OLD_ENTRY8 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry8", 0, "entry 8A", null);
+  private static final UnitTestZipEntry NEW_ENTRY8 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry8", 6, "entry 8B", null);
+  private static final UnitTestZipEntry OLD_ENTRY9 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry9", 6, "entry 9A", null);
+  private static final UnitTestZipEntry NEW_ENTRY9 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry9", 0, "entry 9B", null);
+  private static final UnitTestZipEntry OLD_ENTRY10 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry10A", 6, "entry 10", null);
+  private static final UnitTestZipEntry NEW_ENTRY10 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry10B", 6, "entry 10", null);
+  private static final UnitTestZipEntry OLD_ENTRY11 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry11A", 0, "entry 11", null);
+  private static final UnitTestZipEntry NEW_ENTRY11 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry11B", 0, "entry 11", null);
+  private static final UnitTestZipEntry OLD_ENTRY12 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry12A", 6, "entry 12A", null);
+  private static final UnitTestZipEntry NEW_ENTRY12 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry12B", 6, "entry 12B", null);
+  private static final UnitTestZipEntry OLD_ENTRY13 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry13A", 0, "entry 13A", null);
+  private static final UnitTestZipEntry NEW_ENTRY13 =
+      UnitTestZipArchive.makeUnitTestZipEntry("/entry13B", 0, "entry 13B", null);
+
+  @Before
+  public void setUp() throws IOException {
+    oldFile = File.createTempFile("fbf_test", "old");
+    oldFile.deleteOnExit();
+    newFile = File.createTempFile("fbf_test", "new");
+    newFile.deleteOnExit();
+    tempDir = oldFile.getParentFile();
+  }
+
+  @After
+  public void tearDown() {
+    oldFile.delete();
+    newFile.delete();
+  }
+
+  private static void writeFile(File file, byte[] content) throws IOException {
+    try (FileOutputStream out = new FileOutputStream(file)) {
+      out.write(content);
+      out.flush();
+    }
+  }
+
+  /**
+   * High-level integration test that covers the most common kinds of operations expected to be
+   * found in the real world.
+   */
+  @Test
+  public void testPatchAndApply() throws Exception {
+    // Write the old archive to disk.
+    byte[] oldArchiveBytes = UnitTestZipArchive.makeTestZip(Arrays.asList(
+        OLD_ENTRY1,
+        OLD_ENTRY2,
+        OLD_ENTRY3,
+        OLD_ENTRY4,
+        OLD_ENTRY5,
+        OLD_ENTRY6,
+        OLD_ENTRY7,
+        OLD_ENTRY8,
+        OLD_ENTRY9,
+        OLD_ENTRY10,
+        OLD_ENTRY11,
+        OLD_ENTRY12,
+        OLD_ENTRY13));
+    writeFile(oldFile, oldArchiveBytes);
+
+    // Write the new archive to disk. Flip the order around to fully exercise reordering logic where
+    // the offsets might otherwise be exactly the same by chance.
+    List<UnitTestZipEntry> newEntries = Arrays.asList(
+        NEW_ENTRY1,
+        NEW_ENTRY2,
+        NEW_ENTRY3,
+        NEW_ENTRY4,
+        NEW_ENTRY5,
+        NEW_ENTRY6,
+        NEW_ENTRY7,
+        NEW_ENTRY8,
+        NEW_ENTRY9,
+        NEW_ENTRY10,
+        NEW_ENTRY11,
+        NEW_ENTRY12,
+        NEW_ENTRY13);
+    Collections.reverse(newEntries);
+    byte[] newArchiveBytes = UnitTestZipArchive.makeTestZip(newEntries);
+    writeFile(newFile, newArchiveBytes);
+
+    // Generate the patch.
+    ByteArrayOutputStream patchBuffer = new ByteArrayOutputStream();
+    FileByFileV1DeltaGenerator generator = new FileByFileV1DeltaGenerator();
+    generator.generateDelta(oldFile, newFile, patchBuffer);
+
+    // Apply the patch.
+    FileByFileV1DeltaApplier applier = new FileByFileV1DeltaApplier(tempDir);
+    ByteArrayInputStream patchIn = new ByteArrayInputStream(patchBuffer.toByteArray());
+    ByteArrayOutputStream newOut = new ByteArrayOutputStream();
+    applier.applyDelta(oldFile, patchIn, newOut);
+
+    // Finally, expect that the result of applying the patch is exactly the same as the new archive
+    // that was written to disk.
+    Assert.assertArrayEquals(newArchiveBytes, newOut.toByteArray());
+  }
+}
diff --git a/sample/src/main/java/com/google/archivepatcher/sample/SamplePatchApplier.java b/sample/src/main/java/com/google/archivepatcher/sample/SamplePatchApplier.java
new file mode 100644
index 0000000..a6be559
--- /dev/null
+++ b/sample/src/main/java/com/google/archivepatcher/sample/SamplePatchApplier.java
@@ -0,0 +1,43 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.sample;
+
+import com.google.archivepatcher.applier.FileByFileV1DeltaApplier;
+import com.google.archivepatcher.shared.DefaultDeflateCompatibilityWindow;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.util.zip.Inflater;
+import java.util.zip.InflaterInputStream;
+
+/** Apply a patch; args are old file path, patch file path, and new file path. */
+public class SamplePatchApplier {
+  public static void main(String... args) throws Exception {
+    if (!new DefaultDeflateCompatibilityWindow().isCompatible()) {
+      System.err.println("zlib not compatible on this system");
+      System.exit(-1);
+    }
+    File oldFile = new File(args[0]); // must be a zip archive
+    Inflater uncompressor = new Inflater(true);
+    try (FileInputStream compressedPatchIn = new FileInputStream(args[1]);
+        InflaterInputStream patchIn =
+            new InflaterInputStream(compressedPatchIn, uncompressor, 32768);
+        FileOutputStream newFileOut = new FileOutputStream(args[2])) {
+      new FileByFileV1DeltaApplier().applyDelta(oldFile, patchIn, newFileOut);
+    } finally {
+      uncompressor.end();
+    }
+  }
+}
diff --git a/sample/src/main/java/com/google/archivepatcher/sample/SamplePatchGenerator.java b/sample/src/main/java/com/google/archivepatcher/sample/SamplePatchGenerator.java
new file mode 100644
index 0000000..12f9f90
--- /dev/null
+++ b/sample/src/main/java/com/google/archivepatcher/sample/SamplePatchGenerator.java
@@ -0,0 +1,44 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.sample;
+
+import com.google.archivepatcher.generator.FileByFileV1DeltaGenerator;
+import com.google.archivepatcher.shared.DefaultDeflateCompatibilityWindow;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.util.zip.Deflater;
+import java.util.zip.DeflaterOutputStream;
+
+/** Generate a patch; args are old file path, new file path, and patch file path. */
+public class SamplePatchGenerator {
+  public static void main(String... args) throws Exception {
+    if (!new DefaultDeflateCompatibilityWindow().isCompatible()) {
+      System.err.println("zlib not compatible on this system");
+      System.exit(-1);
+    }
+    File oldFile = new File(args[0]); // must be a zip archive
+    File newFile = new File(args[1]); // must be a zip archive
+    Deflater compressor = new Deflater(9, true); // to compress the patch
+    try (FileOutputStream patchOut = new FileOutputStream(args[2]);
+        DeflaterOutputStream compressedPatchOut =
+            new DeflaterOutputStream(patchOut, compressor, 32768)) {
+      new FileByFileV1DeltaGenerator().generateDelta(oldFile, newFile, compressedPatchOut);
+      compressedPatchOut.finish();
+      compressedPatchOut.flush();
+    } finally {
+      compressor.end();
+    }
+  }
+}
diff --git a/settings.gradle b/settings.gradle
new file mode 100644
index 0000000..d34f004
--- /dev/null
+++ b/settings.gradle
@@ -0,0 +1,7 @@
+include ':sharedtest'
+include ':shared'
+include ':applier'
+include ':generator'
+include ':explainer'
+include ':tools'
+include ':integrationtest'
diff --git a/shared/build.gradle b/shared/build.gradle
new file mode 100644
index 0000000..b6c7973
--- /dev/null
+++ b/shared/build.gradle
@@ -0,0 +1,9 @@
+// shared module
+
+apply plugin: 'java'
+
+dependencies {
+    testCompile 'junit:junit:4.12'
+    testCompile project(':sharedtest')
+}
+// EOF
diff --git a/shared/src/main/java/com/google/archivepatcher/shared/DeltaFriendlyFile.java b/shared/src/main/java/com/google/archivepatcher/shared/DeltaFriendlyFile.java
index 2e4f91e..25f3329 100644
--- a/shared/src/main/java/com/google/archivepatcher/shared/DeltaFriendlyFile.java
+++ b/shared/src/main/java/com/google/archivepatcher/shared/DeltaFriendlyFile.java
@@ -31,15 +31,17 @@
   public static final int DEFAULT_COPY_BUFFER_SIZE = 32768;
 
   /**
-   * Invoke {@link #generateDeltaFriendlyFile(List, File, OutputStream, boolean, int)} with
-   * <code>generateInverse</code> set to <code>true</code> and a copy buffer size of
-   * {@link #DEFAULT_COPY_BUFFER_SIZE}.
+   * Invoke {@link #generateDeltaFriendlyFile(List, File, OutputStream, boolean, int)} with <code>
+   * generateInverse</code> set to <code>true</code> and a copy buffer size of {@link
+   * #DEFAULT_COPY_BUFFER_SIZE}.
+   *
+   * @param <T> the type of the data associated with the ranges
    * @param rangesToUncompress the ranges to be uncompressed during transformation to a
-   * delta-friendly form
+   *     delta-friendly form
    * @param file the file to read from
    * @param deltaFriendlyOut a stream to write the delta-friendly file to
    * @return the ranges in the delta-friendly file that correspond to the ranges in the original
-   * file, with identical metadata and in the same order
+   *     file, with identical metadata and in the same order
    * @throws IOException if anything goes wrong
    */
   public static <T> List<TypedRange<T>> generateDeltaFriendlyFile(
@@ -51,21 +53,23 @@
 
   /**
    * Generate one delta-friendly file and (optionally) return the ranges necessary to invert the
-   * transform, in file order. There is a 1:1 correspondence between the ranges in the input
-   * list and the returned list, but the offsets and lengths will be different (the input list
-   * represents compressed data, the output list represents uncompressed data). The ability to
-   * suppress generation of the inverse range and to specify the size of the copy buffer are
-   * provided for clients that desire a minimal memory footprint.
+   * transform, in file order. There is a 1:1 correspondence between the ranges in the input list
+   * and the returned list, but the offsets and lengths will be different (the input list represents
+   * compressed data, the output list represents uncompressed data). The ability to suppress
+   * generation of the inverse range and to specify the size of the copy buffer are provided for
+   * clients that desire a minimal memory footprint.
+   *
+   * @param <T> the type of the data associated with the ranges
    * @param rangesToUncompress the ranges to be uncompressed during transformation to a
-   * delta-friendly form
+   *     delta-friendly form
    * @param file the file to read from
    * @param deltaFriendlyOut a stream to write the delta-friendly file to
    * @param generateInverse if <code>true</code>, generate and return a list of inverse ranges in
-   * file order; otherwise, do all the normal work but return null instead of the inverse ranges
+   *     file order; otherwise, do all the normal work but return null instead of the inverse ranges
    * @param copyBufferSize the size of the buffer to use for copying bytes between streams
    * @return if <code>generateInverse</code> was true, returns the ranges in the delta-friendly file
-   * that correspond to the ranges in the original file, with identical metadata and in the same
-   * order; otherwise, return null
+   *     that correspond to the ranges in the original file, with identical metadata and in the same
+   *     order; otherwise, return null
    * @throws IOException if anything goes wrong
    */
   public static <T> List<TypedRange<T>> generateDeltaFriendlyFile(
diff --git a/sharedtest/build.gradle b/sharedtest/build.gradle
new file mode 100644
index 0000000..de25f1d
--- /dev/null
+++ b/sharedtest/build.gradle
@@ -0,0 +1,9 @@
+// sharedtest module
+
+apply plugin: 'java'
+
+dependencies {
+    compile 'junit:junit:4.12'
+    compile project(':shared')
+}
+// EOF
diff --git a/shared/src/test/java/com/google/archivepatcher/shared/UnitTestZipArchive.java b/sharedtest/src/main/java/com/google/archivepatcher/shared/UnitTestZipArchive.java
similarity index 100%
rename from shared/src/test/java/com/google/archivepatcher/shared/UnitTestZipArchive.java
rename to sharedtest/src/main/java/com/google/archivepatcher/shared/UnitTestZipArchive.java
diff --git a/shared/src/test/java/com/google/archivepatcher/shared/UnitTestZipEntry.java b/sharedtest/src/main/java/com/google/archivepatcher/shared/UnitTestZipEntry.java
similarity index 97%
rename from shared/src/test/java/com/google/archivepatcher/shared/UnitTestZipEntry.java
rename to sharedtest/src/main/java/com/google/archivepatcher/shared/UnitTestZipEntry.java
index 1b31d9e..4a5d046 100644
--- a/shared/src/test/java/com/google/archivepatcher/shared/UnitTestZipEntry.java
+++ b/sharedtest/src/main/java/com/google/archivepatcher/shared/UnitTestZipEntry.java
@@ -61,8 +61,10 @@
 
   /**
    * Creates a new entry.
+   *
    * @param path the path under which the data is located in the archive
    * @param level the compression level of the entry
+   * @param nowrap the wrapping mode (false to wrap the entry like gzip, true otherwise)
    * @param content the binary content of the entry, as an ASCII string
    * @param comment optional comment, as an ASCII string
    */
diff --git a/tools/build.gradle b/tools/build.gradle
new file mode 100644
index 0000000..8220424
--- /dev/null
+++ b/tools/build.gradle
@@ -0,0 +1,22 @@
+// tools module
+
+apply plugin: 'java'
+
+def mainClassName = 'com.google.archivepatcher.tools.FileByFileTool'
+jar {
+    manifest {
+        attributes "Main-Class": mainClassName
+    }
+
+    from {
+        configurations.compile.collect { it.isDirectory() ? it : zipTree(it) }
+    }
+}
+
+dependencies {
+    compile project(':applier')
+    compile project(':explainer')
+    compile project(':generator')
+    compile project(':shared')
+}
+// EOF
diff --git a/tools/src/main/java/com/google/archivepatcher/tools/AbstractTool.java b/tools/src/main/java/com/google/archivepatcher/tools/AbstractTool.java
new file mode 100644
index 0000000..30913b1
--- /dev/null
+++ b/tools/src/main/java/com/google/archivepatcher/tools/AbstractTool.java
@@ -0,0 +1,70 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.tools;
+
+import java.io.File;
+import java.util.Iterator;
+
+/**
+ * Simple base class for tools. Minimal standalone functionality free of third-party argument parser
+ * dependencies.
+ */
+public abstract class AbstractTool {
+
+  /**
+   * Pop an argument from the argument iterator or exit with a usage message about the expected
+   * type of argument that was supposed to be found.
+   * @param iterator the iterator to take an element from if available
+   * @param expectedType description for the thing that was supposed to be in the iterator, for
+   * error messages
+   * @return the element retrieved from the iterator
+   */
+  protected String popOrDie(Iterator<String> iterator, String expectedType) {
+    if (!iterator.hasNext()) {
+      exitWithUsage("missing argument for " + expectedType);
+    }
+    return iterator.next();
+  }
+
+  /**
+   * Find and return a readable file if it exists, exit with a usage message if it does not.
+   * @param path the path to check and get a {@link File} for
+   * @param description what the file represents, for error messages
+   * @return a {@link File} representing the path, which exists and is readable
+   */
+  protected File getRequiredFileOrDie(String path, String description) {
+    File result = new File(path);
+    if (!result.exists() || !result.canRead()) {
+      exitWithUsage(description + " does not exist or cannot be read: " + path);
+    }
+    return result;
+  }
+
+  /**
+   * Terminate the program with an error message and usage instructions.
+   * @param message the error message to give to the user prior to the usage instructions
+   */
+  protected void exitWithUsage(String message) {
+    System.err.println("Error: " + message);
+    System.err.println(getUsage());
+    System.exit(1);
+  }
+
+  /**
+   * Returns a string describing the usage for this tool.
+   * @return the string
+   */
+  protected abstract String getUsage();
+}
diff --git a/tools/src/main/java/com/google/archivepatcher/tools/FileByFileTool.java b/tools/src/main/java/com/google/archivepatcher/tools/FileByFileTool.java
new file mode 100644
index 0000000..5c12c2e
--- /dev/null
+++ b/tools/src/main/java/com/google/archivepatcher/tools/FileByFileTool.java
@@ -0,0 +1,232 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.tools;
+
+import com.google.archivepatcher.applier.FileByFileV1DeltaApplier;
+import com.google.archivepatcher.generator.DeltaFriendlyOldBlobSizeLimiter;
+import com.google.archivepatcher.generator.FileByFileV1DeltaGenerator;
+import com.google.archivepatcher.generator.RecommendationModifier;
+import com.google.archivepatcher.generator.TotalRecompressionLimiter;
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * Simple command-line tool for generating and applying patches.
+ */
+public class FileByFileTool extends AbstractTool {
+
+  /** Usage instructions for the command line. */
+  private static final String USAGE =
+      "java -cp <classpath> com.google.archivepatcher.tools.FileByFileTool <options>\n"
+          + "\nOptions:\n"
+          + "  --generate      generate a patch\n"
+          + "  --apply         apply a patch\n"
+          + "  --old           the old file\n"
+          + "  --new           the new file\n"
+          + "  --patch         the patch file\n"
+          + "  --trl           optionally, the total bytes of recompression to allow (see below)\n"
+          + "  --dfobsl        optionally, a limit on the total size of the delta-friendly old blob (see below)\n"
+          + "\nTotal Recompression Limit (trl):\n"
+          + "  When generating a patch, a limit can be specified on the total number of bytes to\n"
+          + "  allow to be recompressed during the patch apply process. This can be for a variety\n"
+          + "  of reasons, with the most obvious being to limit the amount of effort that has to\n"
+          + "  be expended applying the patch on the target platform. To properly explain a\n"
+          + "  patch that had such a limitation, it is necessary to specify the same limitation\n"
+          + "  here. This argument is illegal for --apply, since it only applies to --generate.\n"
+          + "\nDelta Friendly Old Blob Size Limit (dfobsl):\n"
+          + "  When generating a patch, a limit can be specified on the total size of the delta-\n"
+          + "  friendly old blob. This implicitly limits the size of the temporary file that\n"
+          + "  needs to be created when applying the patch. The size limit is \"soft\" in that \n"
+          + "  the delta-friendly old blob needs to at least contain the original data that was\n"
+          + "  within it; but the limit specified here will constrain any attempt to uncompress\n"
+          + "  the content. If the limit is less than or equal to the size of the old file, no\n"
+          + "  uncompression will be performed at all. Otherwise, the old file can expand into\n"
+          + "  delta-friendly old blob until the size reaches this limit.\n"
+          + "\nExamples:\n"
+          + "  To generate a patch from OLD to NEW, saving the patch in PATCH:\n"
+          + "    java -cp <classpath> com.google.archivepatcher.tools.FileByFileTool --generate \\\n"
+          + "      --old OLD --new NEW --patch PATCH\n"
+          + "  To generate a patch from OLD to NEW, limiting to 1,000,000 recompress bytes:\n"
+          + "    java -cp <classpath> com.google.archivepatcher.tools.FileByFileTool --generate \\\n"
+          + "      --old OLD --new NEW --trl 1000000 --patch PATCH\n"
+          + "  To apply a patch PATCH to OLD, saving the result in NEW:\n"
+          + "    java -cp <classpath> com.google.archivepatcher.tools.FileByFileTool --apply \\\n"
+          + "      --old OLD --patch PATCH --new NEW";
+
+  /**
+   * Modes of operation.
+   */
+  private static enum Mode {
+    /**
+     * Generate a patch.
+     */
+    GENERATE,
+
+    /**
+     * Apply a patch.
+     */
+    APPLY;
+  }
+
+  /**
+   * Runs the tool. See usage instructions for more information.
+   *
+   * @param args command line arguments
+   * @throws IOException if anything goes wrong
+   * @throws InterruptedException if any thread has interrupted the current thread
+   */
+  public static void main(String... args) throws IOException, InterruptedException {
+    new FileByFileTool().run(args);
+  }
+
+  /**
+   * Run the tool.
+   *
+   * @param args command line arguments
+   * @throws IOException if anything goes wrong
+   * @throws InterruptedException if any thread has interrupted the current thread
+   */
+  public void run(String... args) throws IOException, InterruptedException {
+    String oldPath = null;
+    String newPath = null;
+    String patchPath = null;
+    Long totalRecompressionLimit = null;
+    Long deltaFriendlyOldBlobSizeLimit = null;
+    Mode mode = null;
+    Iterator<String> argIterator = new LinkedList<String>(Arrays.asList(args)).iterator();
+    while (argIterator.hasNext()) {
+      String arg = argIterator.next();
+      if ("--old".equals(arg)) {
+        oldPath = popOrDie(argIterator, "--old");
+      } else if ("--new".equals(arg)) {
+        newPath = popOrDie(argIterator, "--new");
+      } else if ("--patch".equals(arg)) {
+        patchPath = popOrDie(argIterator, "--patch");
+      } else if ("--generate".equals(arg)) {
+        mode = Mode.GENERATE;
+      } else if ("--apply".equals(arg)) {
+        mode = Mode.APPLY;
+      } else if ("--trl".equals(arg)) {
+        totalRecompressionLimit = Long.parseLong(popOrDie(argIterator, "--trl"));
+        if (totalRecompressionLimit < 0) {
+          exitWithUsage("--trl cannot be negative: " + totalRecompressionLimit);
+        }
+      } else if ("--dfobsl".equals(arg)) {
+        deltaFriendlyOldBlobSizeLimit = Long.parseLong(popOrDie(argIterator, "--dfobsl"));
+        if (deltaFriendlyOldBlobSizeLimit < 0) {
+          exitWithUsage("--dfobsl cannot be negative: " + deltaFriendlyOldBlobSizeLimit);
+        }
+      } else {
+        exitWithUsage("unknown argument: " + arg);
+      }
+    }
+    if (oldPath == null || newPath == null || patchPath == null || mode == null) {
+      exitWithUsage("missing required argument(s)");
+    }
+    if (mode == Mode.APPLY && totalRecompressionLimit != null) {
+      exitWithUsage("--trl can only be used with --generate");
+    }
+    if (mode == Mode.APPLY && deltaFriendlyOldBlobSizeLimit != null) {
+      exitWithUsage("--dfobsl can only be used with --generate");
+    }
+    File oldFile = getRequiredFileOrDie(oldPath, "old file");
+    if (mode == Mode.GENERATE) {
+      File newFile = getRequiredFileOrDie(newPath, "new file");
+      generatePatch(
+          oldFile,
+          newFile,
+          new File(patchPath),
+          totalRecompressionLimit,
+          deltaFriendlyOldBlobSizeLimit);
+    } else { // mode == Mode.APPLY
+      File patchFile = getRequiredFileOrDie(patchPath, "patch file");
+      applyPatch(oldFile, patchFile, new File(newPath));
+    }
+  }
+
+  /**
+   * Generate a specified patch to transform the specified old file to the specified new file.
+   *
+   * @param oldFile the old file (will be read)
+   * @param newFile the new file (will be read)
+   * @param patchFile the patch file (will be written)
+   * @param totalRecompressionLimit optional limit for total number of bytes of recompression to
+   *     allow in the resulting patch
+   * @param deltaFriendlyOldBlobSizeLimit optional limit for the size of the delta-friendly old
+   *     blob, which implies a limit on the temporary space needed to apply the generated patch
+   * @throws IOException if anything goes wrong
+   * @throws InterruptedException if any thread has interrupted the current thread
+   */
+  public static void generatePatch(
+      File oldFile,
+      File newFile,
+      File patchFile,
+      Long totalRecompressionLimit,
+      Long deltaFriendlyOldBlobSizeLimit)
+      throws IOException, InterruptedException {
+    List<RecommendationModifier> recommendationModifiers = new ArrayList<RecommendationModifier>();
+    if (totalRecompressionLimit != null) {
+      recommendationModifiers.add(new TotalRecompressionLimiter(totalRecompressionLimit));
+    }
+    if (deltaFriendlyOldBlobSizeLimit != null) {
+      recommendationModifiers.add(
+          new DeltaFriendlyOldBlobSizeLimiter(deltaFriendlyOldBlobSizeLimit));
+    }
+    FileByFileV1DeltaGenerator generator =
+        new FileByFileV1DeltaGenerator(
+            recommendationModifiers.toArray(new RecommendationModifier[] {}));
+    try (FileOutputStream patchOut = new FileOutputStream(patchFile);
+        BufferedOutputStream bufferedPatchOut = new BufferedOutputStream(patchOut)) {
+      generator.generateDelta(oldFile, newFile, bufferedPatchOut);
+      bufferedPatchOut.flush();
+    }
+  }
+
+  /**
+   * Apply a specified patch to the specified old file, creating the specified new file.
+   * @param oldFile the old file (will be read)
+   * @param patchFile the patch file (will be read)
+   * @param newFile the new file (will be written)
+   * @throws IOException if anything goes wrong
+   */
+  public static void applyPatch(File oldFile, File patchFile, File newFile) throws IOException {
+    // Figure out temp directory
+    File tempFile = File.createTempFile("fbftool", "tmp");
+    File tempDir = tempFile.getParentFile();
+    tempFile.delete();
+    FileByFileV1DeltaApplier applier = new FileByFileV1DeltaApplier(tempDir);
+    try (FileInputStream patchIn = new FileInputStream(patchFile);
+        BufferedInputStream bufferedPatchIn = new BufferedInputStream(patchIn);
+        FileOutputStream newOut = new FileOutputStream(newFile);
+        BufferedOutputStream bufferedNewOut = new BufferedOutputStream(newOut)) {
+      applier.applyDelta(oldFile, bufferedPatchIn, bufferedNewOut);
+      bufferedNewOut.flush();
+    }
+  }
+
+  @Override
+  protected String getUsage() {
+    return USAGE;
+  }
+}
diff --git a/tools/src/main/java/com/google/archivepatcher/tools/PatchExplainerTool.java b/tools/src/main/java/com/google/archivepatcher/tools/PatchExplainerTool.java
new file mode 100644
index 0000000..a5589c5
--- /dev/null
+++ b/tools/src/main/java/com/google/archivepatcher/tools/PatchExplainerTool.java
@@ -0,0 +1,246 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.tools;
+
+import com.google.archivepatcher.explainer.EntryExplanation;
+import com.google.archivepatcher.explainer.PatchExplainer;
+import com.google.archivepatcher.explainer.PatchExplanation;
+import com.google.archivepatcher.generator.DeltaFriendlyOldBlobSizeLimiter;
+import com.google.archivepatcher.generator.RecommendationModifier;
+import com.google.archivepatcher.generator.RecommendationReason;
+import com.google.archivepatcher.generator.TotalRecompressionLimiter;
+import com.google.archivepatcher.generator.bsdiff.BsDiffDeltaGenerator;
+import com.google.archivepatcher.shared.DeflateCompressor;
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.UnsupportedEncodingException;
+import java.text.NumberFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * Simple command-line tool for explaining patches.
+ */
+public class PatchExplainerTool extends AbstractTool {
+
+  /** Usage instructions for the command line. */
+  private static final String USAGE =
+      "java -cp <classpath> com.google.archivepatcher.tools.PatchExplainerTool <options>\n"
+          + "\nOptions:\n"
+          + "  --old           the old file\n"
+          + "  --new           the new file\n"
+          + "  --trl           optionally, the total bytes of recompression to allow (see below)\n"
+          + "  --dfobsl        optionally, a limit on the total size of the delta-friendly old blob (see below)\n"
+          + "  --json          output JSON results instead of plain text\n"
+          + "\nTotal Recompression Limit (trl):\n"
+          + "  When generating a patch, a limit can be specified on the total number of bytes to\n"
+          + "  allow to be recompressed during the patch apply process. This can be for a variety\n"
+          + "  of reasons, with the most obvious being to limit the amount of effort that has to\n"
+          + "  be expended applying the patch on the target platform. To properly explain a\n"
+          + "  patch that had such a limitation, it is necessary to specify the same limitation\n"
+          + "  here.\n"
+          + "\nDelta Friendly Old Blob Size Limit (dfobsl):\n"
+          + "  When generating a patch, a limit can be specified on the total size of the delta-\n"
+          + "  friendly old blob. This implicitly limits the size of the temporary file that\n"
+          + "  needs to be created when applying the patch. The size limit is \"soft\" in that \n"
+          + "  the delta-friendly old blob needs to at least contain the original data that was\n"
+          + "  within it; but the limit specified here will constrain any attempt to uncompress\n"
+          + "  the content. If the limit is less than or equal to the size of the old file, no\n"
+          + "  uncompression will be performed at all. Otherwise, the old file can expand into\n"
+          + "  delta-friendly old blob until the size reaches this limit.\n"
+          + "\nExamples:\n"
+          + "  To explain a patch from OLD to NEW, dumping plain human-readable text output:\n"
+          + "    java -cp <classpath> com.google.archivepatcher.tools.PatchExplainerTool \\\n"
+          + "      --old OLD --new NEW\n"
+          + "  To explain a patch from OLD to NEW, dumping JSON-formatted output:\n"
+          + "    java -cp <classpath> com.google.archivepatcher.tools.PatchExplainerTool \\\n"
+          + "      --old OLD --new NEW --json\n"
+          + "  To explain a patch from OLD to NEW, limiting to 1,000,000 recompress bytes:\n"
+          + "    java -cp <classpath> com.google.archivepatcher.tools.PatchExplainerTool \\\n"
+          + "      --old OLD --new NEW --trl 1000000\n";
+
+  /**
+   * Runs the tool. See usage instructions for more information.
+   *
+   * @param args command line arguments
+   * @throws IOException if anything goes wrong
+   * @throws InterruptedException if the thread is interrupted
+   */
+  public static void main(String... args) throws IOException, InterruptedException {
+    new PatchExplainerTool().run(args);
+  }
+
+  /**
+   * Used for pretty-printing sizes and counts.
+   */
+  private final NumberFormat format = NumberFormat.getNumberInstance();
+
+  /**
+   * Runs the tool. See usage instructions for more information.
+   *
+   * @param args command line arguments
+   * @throws IOException if anything goes wrong
+   * @throws InterruptedException if the thread is interrupted
+   */
+  public void run(String... args) throws IOException, InterruptedException {
+    String oldPath = null;
+    String newPath = null;
+    Long totalRecompressionLimit = null;
+    Long deltaFriendlyOldBlobSizeLimit = null;
+    boolean outputJson = false;
+    Iterator<String> argIterator = new LinkedList<String>(Arrays.asList(args)).iterator();
+    while (argIterator.hasNext()) {
+      String arg = argIterator.next();
+      if ("--old".equals(arg)) {
+        oldPath = popOrDie(argIterator, "--old");
+      } else if ("--new".equals(arg)) {
+        newPath = popOrDie(argIterator, "--new");
+      } else if ("--json".equals(arg)) {
+        outputJson = true;
+      } else if ("--trl".equals(arg)) {
+        totalRecompressionLimit = Long.parseLong(popOrDie(argIterator, "--trl"));
+        if (totalRecompressionLimit < 0) {
+          exitWithUsage("--trl cannot be negative: " + totalRecompressionLimit);
+        }
+      } else if ("--dfobsl".equals(arg)) {
+        deltaFriendlyOldBlobSizeLimit = Long.parseLong(popOrDie(argIterator, "--dfobsl"));
+        if (deltaFriendlyOldBlobSizeLimit < 0) {
+          exitWithUsage("--dfobsl cannot be negative: " + deltaFriendlyOldBlobSizeLimit);
+        }
+      } else {
+        exitWithUsage("unknown argument: " + arg);
+      }
+    }
+    if (oldPath == null || newPath == null) {
+      exitWithUsage("missing required argument(s)");
+    }
+    File oldFile = getRequiredFileOrDie(oldPath, "old file");
+    File newFile = getRequiredFileOrDie(newPath, "new file");
+    DeflateCompressor compressor = new DeflateCompressor();
+    compressor.setCaching(true);
+    compressor.setCompressionLevel(9);
+    PatchExplainer explainer =
+        new PatchExplainer(new DeflateCompressor(), new BsDiffDeltaGenerator());
+    List<RecommendationModifier> recommendationModifiers = new ArrayList<RecommendationModifier>();
+    if (totalRecompressionLimit != null) {
+      recommendationModifiers.add(new TotalRecompressionLimiter(totalRecompressionLimit));
+    }
+    if (deltaFriendlyOldBlobSizeLimit != null) {
+      recommendationModifiers.add(
+          new DeltaFriendlyOldBlobSizeLimiter(deltaFriendlyOldBlobSizeLimit));
+    }
+    PatchExplanation patchExplanation =
+        new PatchExplanation(
+            explainer.explainPatch(
+                oldFile,
+                newFile,
+                recommendationModifiers.toArray(new RecommendationModifier[] {})));
+    if (outputJson) {
+      patchExplanation.writeJson(new PrintWriter(System.out));
+    } else {
+      dumpPlainText(patchExplanation);
+    }
+  }
+
+  private void dumpPlainText(PatchExplanation patchExplanation) {
+    dumpPlainText(patchExplanation.getExplainedAsNew());
+    dumpPlainText(patchExplanation.getExplainedAsChanged());
+    dumpPlainText(patchExplanation.getExplainedAsUnchangedOrFree());
+    System.out.println("----------");
+    System.out.println(
+        "Num unchanged files: " + patchExplanation.getExplainedAsUnchangedOrFree().size());
+    System.out.println(
+        "Num changed files:   "
+            + patchExplanation.getExplainedAsChanged().size()
+            + " (estimated patch size "
+            + format.format(patchExplanation.getEstimatedChangedSize())
+            + " bytes)");
+    System.out.println(
+        "Num new files:       "
+            + patchExplanation.getExplainedAsNew().size()
+            + " (estimated patch size "
+            + format.format(patchExplanation.getEstimatedNewSize())
+            + " bytes)");
+    System.out.println(
+        "Num files changed but forced to stay compressed by the total recompression limit: "
+            + patchExplanation.getExplainedAsResourceConstrained().size()
+            + " (estimated patch size "
+            + format.format(patchExplanation.getEstimatedResourceConstrainedSize())
+            + " bytes)");
+    long estimatedTotalSize =
+        patchExplanation.getEstimatedChangedSize()
+            + patchExplanation.getEstimatedNewSize()
+            + patchExplanation.getEstimatedResourceConstrainedSize();
+    System.out.println(
+        "Estimated total patch size: " + format.format(estimatedTotalSize) + " bytes");
+  }
+
+  private void dumpPlainText(List<EntryExplanation> explanations) {
+    for (EntryExplanation entryExplanation : explanations) {
+      String text = toPlainText(entryExplanation);
+      if (text != null) {
+        System.out.println(text);
+      }
+    }
+  }
+
+  /**
+   * Returns the path from an {@link EntryExplanation} as a UTF-8 string.
+   * @param explanation the {@link EntryExplanation} to extract the path from
+   * @return as described
+   */
+  private static String path(EntryExplanation explanation) {
+    try {
+      return new String(explanation.getPath().getData(), "UTF-8");
+    } catch (UnsupportedEncodingException e) {
+      throw new RuntimeException("System doesn't support UTF-8", e);
+    }
+  }
+
+  private static String toPlainText(EntryExplanation explanation) {
+    String path = path(explanation);
+    if (explanation.isNew()) {
+      return "New file '"
+          + path
+          + "', approximate size of data in patch: "
+          + explanation.getCompressedSizeInPatch()
+          + " bytes";
+    }
+    if (explanation.getCompressedSizeInPatch() > 0) {
+      String metadata = "";
+      if (explanation.getReasonIncludedIfNotNew() == RecommendationReason.RESOURCE_CONSTRAINED) {
+        metadata = " (forced to stay compressed by a limit)";
+      }
+      return "Changed file '"
+          + path
+          + "'"
+          + metadata
+          + ", approximate size of data in patch: "
+          + explanation.getCompressedSizeInPatch()
+          + " bytes";
+    } else {
+      return "Unchanged or zero-delta-cost file '" + path + "'";
+    }
+  }
+
+  @Override
+  protected String getUsage() {
+    return USAGE;
+  }
+}