Upgrade archive-patcher to v1.1

This version includes a few bug fixes from v1.0.

Bug: 129498355
Test: atest CtsLibcoreTestCases:libcore.java.util.zip.DeflateRegressionTest
Change-Id: I17dcd7ceaf067fbcb87e870d0940c261e0bdb4cb
diff --git a/METADATA b/METADATA
index 8d38c44..9523363 100644
--- a/METADATA
+++ b/METADATA
@@ -7,13 +7,13 @@
   }
   url {
     type: ARCHIVE
-    value: "https://github.com/andrewhayden/archive-patcher/archive/1.0.zip"
+    value: "https://github.com/andrewhayden/archive-patcher/archive/v1.1.zip"
   }
-  version: "1.0"
+  version: "v1.1"
   license_type: RECIPROCAL
   last_upgrade_date {
-    year: 2018
-    month: 8
-    day: 24
+    year: 2019
+    month: 3
+    day: 25
   }
 }
diff --git a/README.md b/README.md
index e7763ee..9dd0b84 100644
--- a/README.md
+++ b/README.md
@@ -392,3 +392,6 @@
 * N. Jesper Larsson and Kunihiko Sadakane - for their paper "[Faster Suffix Sorting](http://www.larsson.dogma.net/ssrev-tr.pdf)", basis of the quick suffix sort algorithm
 * PKWARE, Inc. - creators and stewards of the [zip specification](https://support.pkware.com/display/PKZIP/APPNOTE)
 * Yuta Mori - for the C implementation of the "div suffix sort" ([libdivsufsort](http://code.google.com/p/libdivsufsort/))
+
+# Contact
+archive-patcher-contacts@google.com
diff --git a/applier/src/main/java/com/google/archivepatcher/applier/bsdiff/BsPatch.java b/applier/src/main/java/com/google/archivepatcher/applier/bsdiff/BsPatch.java
index a7b2803..cfa8ded 100644
--- a/applier/src/main/java/com/google/archivepatcher/applier/bsdiff/BsPatch.java
+++ b/applier/src/main/java/com/google/archivepatcher/applier/bsdiff/BsPatch.java
@@ -15,13 +15,14 @@
 package com.google.archivepatcher.applier.bsdiff;
 
 import com.google.archivepatcher.applier.PatchFormatException;
-
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.RandomAccessFile;
+import java.util.logging.Level;
+import java.util.logging.Logger;
 
 /**
  * A Java implementation of the "bspatch" algorithm based on the BSD-2 licensed source code
@@ -29,9 +30,10 @@
  * size of 2GB for all binaries involved (old, new and patch binaries).
  */
 public class BsPatch {
-  /**
-   * Standard header found at the start of every patch.
-   */
+  /** If true, output verbose debugging information. */
+  private static final boolean VERBOSE = false;
+  
+  /** Standard header found at the start of every patch. */
   private static final String SIGNATURE = "ENDSLEY/BSDIFF43";
 
   /**
@@ -56,6 +58,9 @@
    */
   private static final int OUTPUT_STREAM_BUFFER_SIZE = 16 * 1024;
 
+  /** An instance of Java logger for use with the {@code VERBOSE} mode. */
+  private static final Logger logger = Logger.getLogger(BsPatch.class.getName());
+
   /**
    * Applies a patch from |patchData| to the data in |oldData|, writing the result to |newData|.
    *
@@ -68,22 +73,39 @@
   public static void applyPatch(
       RandomAccessFile oldData, OutputStream newData, InputStream patchData)
       throws PatchFormatException, IOException {
+    applyPatch(oldData, newData, patchData, null);
+  }
+
+  /**
+   * Applies a patch from |patchData| to the data in |oldData|, writing the result to |newData|
+   * while verifying that the expectedSize is obtained.
+   *
+   * @param oldData data to which the patch should be applied
+   * @param newData stream to write the new artifact to
+   * @param patchData stream to read patch instructions from
+   * @param expectedNewSize the expected number of bytes in |newData| when patching completes. Can
+   *     be null in which case no expectedNewSize checks will be performed.
+   * @throws PatchFormatException if the patch stream is invalid
+   * @throws IOException if unable to read or write any of the data
+   */
+  public static void applyPatch(
+      RandomAccessFile oldData, OutputStream newData, InputStream patchData, Long expectedNewSize)
+      throws PatchFormatException, IOException {
     patchData = new BufferedInputStream(patchData, PATCH_STREAM_BUFFER_SIZE);
     newData = new BufferedOutputStream(newData, OUTPUT_STREAM_BUFFER_SIZE);
     try {
-      applyPatchInternal(oldData, newData, patchData);
+      applyPatchInternal(oldData, newData, patchData, expectedNewSize);
     } finally {
       newData.flush();
     }
   }
 
-  /**
-   * Does the work of the public applyPatch method.
-   */
+  /** Does the work of the public applyPatch method. */
   private static void applyPatchInternal(
       final RandomAccessFile oldData,
       final OutputStream newData,
-      final InputStream patchData)
+      final InputStream patchData,
+      final Long expectedNewSize)
       throws PatchFormatException, IOException {
     final byte[] signatureBuffer = new byte[SIGNATURE.length()];
     try {
@@ -106,6 +128,9 @@
     if (newSize < 0 || newSize > Integer.MAX_VALUE) {
       throw new PatchFormatException("bad newSize");
     }
+    if (expectedNewSize != null && expectedNewSize != newSize) {
+      throw new PatchFormatException("expectedNewSize != newSize");
+    }
 
     // These buffers are used for performing transformations and copies. They are not stateful.
     final byte[] buffer1 = new byte[PATCH_BUFFER_SIZE];
@@ -114,6 +139,7 @@
     // Offsets into |oldData| and |newData|.
     long oldDataOffset = 0; // strobes |oldData| in order specified by the patch file
     long newDataBytesWritten = 0; // monotonically increases from 0 .. |expectedNewSize|
+    int numDirectives = 0; // only used for debugging output
 
     while (newDataBytesWritten < newSize) {
       // Read "control data" for the operation. There are three values here:
@@ -133,6 +159,24 @@
       //    be accumulated into |oldDataOffset| while |copySegmentLength| must NOT be.
       final long offsetToNextInput = readBsdiffLong(patchData);
 
+      if (VERBOSE) {
+        numDirectives++;
+        logger.log(
+            Level.FINE,
+            "Patch directive "
+                + numDirectives
+                + ": oldDataOffset="
+                + oldDataOffset
+                + ", newDataBytesWritten="
+                + newDataBytesWritten
+                + ", diffSegmentLength="
+                + diffSegmentLength
+                + ", copySegmentLength="
+                + copySegmentLength
+                + ", offsetToNextInput="
+                + offsetToNextInput);
+      }
+
       // Sanity-checks
       if (diffSegmentLength < 0 || diffSegmentLength > Integer.MAX_VALUE) {
         throw new PatchFormatException("bad diffSegmentLength");
diff --git a/applier/src/main/java/com/google/archivepatcher/applier/gdiff/Gdiff.java b/applier/src/main/java/com/google/archivepatcher/applier/gdiff/Gdiff.java
new file mode 100644
index 0000000..4315e90
--- /dev/null
+++ b/applier/src/main/java/com/google/archivepatcher/applier/gdiff/Gdiff.java
@@ -0,0 +1,233 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.applier.gdiff;
+
+import com.google.archivepatcher.applier.PatchFormatException;
+import java.io.BufferedInputStream;
+import java.io.DataInputStream;
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.RandomAccessFile;
+
+/** Clean implementation of http://www.w3.org/TR/NOTE-gdiff-19970901 */
+public class Gdiff {
+
+  /** Magic bytes at start of file */
+  private static final int GDIFF_FILE_MAGIC = 0xD1FFD1FF;
+  /** Version code at start of file */
+  private static final int GDIFF_FILE_VERSION = 4;
+
+  /** The end of the patch */
+  private static final int EOF = 0;
+  /** Codes 1..246 represent inline streams of 1..246 bytes */
+  // private static final int DATA_MIN = 1;
+  // private static final int DATA_MAX = 246;
+  /** Copy inline data. The next two bytes are the number of bytes to copy */
+  private static final int DATA_USHORT = 247;
+  /** Copy inline data. The next four bytes are the number of bytes to copy */
+  private static final int DATA_INT = 248;
+  /**
+   * The copy commands are defined as follows: The first argument is the offset in the original
+   * file, and the second argument is the number of bytes to copy to the new file.
+   */
+  private static final int COPY_USHORT_UBYTE = 249;
+
+  private static final int COPY_USHORT_USHORT = 250;
+  private static final int COPY_USHORT_INT = 251;
+  private static final int COPY_INT_UBYTE = 252;
+  private static final int COPY_INT_USHORT = 253;
+  private static final int COPY_INT_INT = 254;
+  private static final int COPY_LONG_INT = 255;
+
+  /**
+   * We're patching APKs which are big. We might as well use a big buffer. TODO: 64k? This would
+   * allow us to do the USHORT copies in a single pass.
+   */
+  private static final int COPY_BUFFER_SIZE = 16 * 1024;
+
+  /**
+   * The patch is typically compressed and the input stream is decompressing on-the-fly. A small
+   * buffer greatly improves efficiency on complicated patches with lots of short directives. See
+   * b/21109650 for more information.
+   */
+  private static final int PATCH_STREAM_BUFFER_SIZE = 4 * 1024;
+
+  /**
+   * Apply a patch to a file.
+   *
+   * @param inputFile base file
+   * @param patchFile patch file
+   * @param output output stream to write the file to
+   * @param expectedOutputSize expected size of the output.
+   * @throws IOException on file I/O as well as when patch under/over run happens.
+   */
+  public static long patch(
+      RandomAccessFile inputFile,
+      InputStream patchFile,
+      OutputStream output,
+      long expectedOutputSize)
+      throws IOException {
+    byte[] buffer = new byte[COPY_BUFFER_SIZE];
+    long outputSize = 0;
+
+    // Wrap patchfile with a small buffer to cushion the 1,2,4,8 byte reads
+    patchFile = new BufferedInputStream(patchFile, PATCH_STREAM_BUFFER_SIZE);
+    // Use DataInputStream to help with big-endian data
+    DataInputStream patchDataStream = new DataInputStream(patchFile);
+    // Confirm first 4 bytes are magic signature.
+    int magic = patchDataStream.readInt();
+    if (magic != GDIFF_FILE_MAGIC) {
+      throw new PatchFormatException("Unexpected magic=" + String.format("%x", magic));
+    }
+    // Confirm patch format version
+    int version = patchDataStream.read();
+    if (version != GDIFF_FILE_VERSION) {
+      throw new PatchFormatException("Unexpected version=" + version);
+    }
+
+    try {
+      // Start copying
+      while (true) {
+        int copyLength;
+        long copyOffset;
+        long maxCopyLength = expectedOutputSize - outputSize;
+        int command = patchDataStream.read();
+        switch (command) {
+          case -1:
+            throw new IOException("Patch file overrun");
+          case EOF:
+            return outputSize;
+          case DATA_USHORT:
+            copyLength = patchDataStream.readUnsignedShort();
+            copyFromPatch(buffer, patchDataStream, output, copyLength, maxCopyLength);
+            break;
+          case DATA_INT:
+            copyLength = patchDataStream.readInt();
+            copyFromPatch(buffer, patchDataStream, output, copyLength, maxCopyLength);
+            break;
+          case COPY_USHORT_UBYTE:
+            copyOffset = patchDataStream.readUnsignedShort();
+            copyLength = patchDataStream.read();
+            if (copyLength == -1) {
+              throw new IOException("Unexpected end of patch");
+            }
+            copyFromOriginal(buffer, inputFile, output, copyOffset, copyLength, maxCopyLength);
+            break;
+          case COPY_USHORT_USHORT:
+            copyOffset = patchDataStream.readUnsignedShort();
+            copyLength = patchDataStream.readUnsignedShort();
+            copyFromOriginal(buffer, inputFile, output, copyOffset, copyLength, maxCopyLength);
+            break;
+          case COPY_USHORT_INT:
+            copyOffset = patchDataStream.readUnsignedShort();
+            copyLength = patchDataStream.readInt();
+            copyFromOriginal(buffer, inputFile, output, copyOffset, copyLength, maxCopyLength);
+            break;
+          case COPY_INT_UBYTE:
+            copyOffset = patchDataStream.readInt();
+            copyLength = patchDataStream.read();
+            if (copyLength == -1) {
+              throw new IOException("Unexpected end of patch");
+            }
+            copyFromOriginal(buffer, inputFile, output, copyOffset, copyLength, maxCopyLength);
+            break;
+          case COPY_INT_USHORT:
+            copyOffset = patchDataStream.readInt();
+            copyLength = patchDataStream.readUnsignedShort();
+            copyFromOriginal(buffer, inputFile, output, copyOffset, copyLength, maxCopyLength);
+            break;
+          case COPY_INT_INT:
+            copyOffset = patchDataStream.readInt();
+            copyLength = patchDataStream.readInt();
+            copyFromOriginal(buffer, inputFile, output, copyOffset, copyLength, maxCopyLength);
+            break;
+          case COPY_LONG_INT:
+            copyOffset = patchDataStream.readLong();
+            copyLength = patchDataStream.readInt();
+            copyFromOriginal(buffer, inputFile, output, copyOffset, copyLength, maxCopyLength);
+            break;
+          default:
+            // The only possible bytes remaining are DATA_MIN through DATA_MAX,
+            // barring any programming error.
+            copyLength = command;
+            copyFromPatch(buffer, patchDataStream, output, copyLength, maxCopyLength);
+            break;
+        }
+        outputSize += copyLength;
+      }
+    } finally {
+      output.flush();
+    }
+  }
+
+  /** Copy a series of inline bytes from the patch file to the output file */
+  private static void copyFromPatch(
+      byte[] buffer,
+      DataInputStream patchDataStream,
+      OutputStream output,
+      int copyLength,
+      long maxCopyLength)
+      throws IOException {
+    if (copyLength < 0) {
+      throw new IOException("copyLength negative");
+    }
+    if (copyLength > maxCopyLength) {
+      throw new IOException("Output length overrun");
+    }
+    try {
+      while (copyLength > 0) {
+        int spanLength = (copyLength < COPY_BUFFER_SIZE) ? copyLength : COPY_BUFFER_SIZE;
+        patchDataStream.readFully(buffer, 0, spanLength);
+        output.write(buffer, 0, spanLength);
+        copyLength -= spanLength;
+      }
+    } catch (EOFException e) {
+      throw new IOException("patch underrun");
+    }
+  }
+
+  /** Copy a series of bytes from the input (original) file to the output file */
+  private static void copyFromOriginal(
+      byte[] buffer,
+      RandomAccessFile inputFile,
+      OutputStream output,
+      long inputOffset,
+      int copyLength,
+      long maxCopyLength)
+      throws IOException {
+    if (copyLength < 0) {
+      throw new IOException("copyLength negative");
+    }
+    if (inputOffset < 0) {
+      throw new IOException("inputOffset negative");
+    }
+    if (copyLength > maxCopyLength) {
+      throw new IOException("Output length overrun");
+    }
+    try {
+      inputFile.seek(inputOffset);
+      while (copyLength > 0) {
+        int spanLength = (copyLength < COPY_BUFFER_SIZE) ? copyLength : COPY_BUFFER_SIZE;
+        inputFile.readFully(buffer, 0, spanLength);
+        output.write(buffer, 0, spanLength);
+        copyLength -= spanLength;
+      }
+    } catch (EOFException e) {
+      throw new IOException("patch underrun", e);
+    }
+  }
+}
diff --git a/applier/src/test/java/com/google/archivepatcher/applier/bsdiff/BsPatchTest.java b/applier/src/test/java/com/google/archivepatcher/applier/bsdiff/BsPatchTest.java
index ddb1c87..9c97692 100644
--- a/applier/src/test/java/com/google/archivepatcher/applier/bsdiff/BsPatchTest.java
+++ b/applier/src/test/java/com/google/archivepatcher/applier/bsdiff/BsPatchTest.java
@@ -15,14 +15,6 @@
 package com.google.archivepatcher.applier.bsdiff;
 
 import com.google.archivepatcher.applier.PatchFormatException;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
-
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.File;
@@ -31,6 +23,12 @@
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.RandomAccessFile;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
 
 /**
  * Tests for {@link BsPatch}.
@@ -178,6 +176,29 @@
       Assert.assertEquals("bad signature", actual);
     }
   }
+  
+  @Test
+  public void testApplyPatch_NewLengthMismatch() throws Exception {
+    createEmptyOldFile(10);
+    InputStream patchIn =
+        makePatch(
+            SIGNATURE,
+            10, // newLength (illegal)
+            10, // diffSegmentLength
+            0, // copySegmentLength
+            0, // offsetToNextInput
+            new byte[10] // addends
+            );
+    ByteArrayOutputStream newData = new ByteArrayOutputStream();
+    try {
+      BsPatch.applyPatch(new RandomAccessFile(oldFile, "r"), newData, patchIn, (long) 10 + 1);
+      Assert.fail("Read patch with mismatched newLength");
+    } catch (PatchFormatException expected) {
+      // No way to mock the internal logic, so resort to testing exception string for coverage
+      String actual = expected.getMessage();
+      Assert.assertEquals("expectedNewSize != newSize", actual);
+    }
+  }
 
   @Test
   public void testApplyPatch_NewLengthNegative() throws Exception {
@@ -410,22 +431,8 @@
   @Test
   public void testReadBsdiffLong() throws Exception {
     byte[] data = {
-      (byte) 0x78,
-      (byte) 0x56,
-      (byte) 0x34,
-      (byte) 0x12,
-      (byte) 0,
-      (byte) 0,
-      (byte) 0,
-      (byte) 0,
-      (byte) 0xef,
-      (byte) 0xbe,
-      (byte) 0xad,
-      (byte) 0x0e,
-      (byte) 0,
-      (byte) 0,
-      (byte) 0,
-      (byte) 0
+      (byte) 0x78, (byte) 0x56, (byte) 0x34, (byte) 0x12, (byte) 0, (byte) 0, (byte) 0, (byte) 0,
+      (byte) 0xef, (byte) 0xbe, (byte) 0xad, (byte) 0x0e, (byte) 0, (byte) 0, (byte) 0, (byte) 0
     };
     ByteArrayInputStream inputStream = new ByteArrayInputStream(data);
     long actual = BsPatch.readBsdiffLong(inputStream);
@@ -441,14 +448,8 @@
         BsPatch.readBsdiffLong(
             new ByteArrayInputStream(
                 new byte[] {
-                  (byte) 0x00,
-                  (byte) 0x00,
-                  (byte) 0x00,
-                  (byte) 0x00,
-                  (byte) 0x00,
-                  (byte) 0x00,
-                  (byte) 0x00,
-                  (byte) 0x00
+                  (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00,
+                  (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00
                 }));
     Assert.assertEquals(expected, actual);
   }
@@ -460,14 +461,8 @@
         BsPatch.readBsdiffLong(
             new ByteArrayInputStream(
                 new byte[] {
-                  (byte) 0xff,
-                  (byte) 0xff,
-                  (byte) 0xff,
-                  (byte) 0x7f,
-                  (byte) 0x00,
-                  (byte) 0x00,
-                  (byte) 0x00,
-                  (byte) 0x00
+                  (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0x7f,
+                  (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00
                 }));
     Assert.assertEquals(expected, actual);
   }
@@ -479,14 +474,8 @@
         BsPatch.readBsdiffLong(
             new ByteArrayInputStream(
                 new byte[] {
-                  (byte) 0x00,
-                  (byte) 0x00,
-                  (byte) 0x00,
-                  (byte) 0x80,
-                  (byte) 0x00,
-                  (byte) 0x00,
-                  (byte) 0x00,
-                  (byte) 0x80
+                  (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x80,
+                  (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x80
                 }));
     Assert.assertEquals(expected, actual);
   }
@@ -498,14 +487,8 @@
         BsPatch.readBsdiffLong(
             new ByteArrayInputStream(
                 new byte[] {
-                  (byte) 0xff,
-                  (byte) 0xff,
-                  (byte) 0xff,
-                  (byte) 0xff,
-                  (byte) 0xff,
-                  (byte) 0xff,
-                  (byte) 0xff,
-                  (byte) 0x7f
+                  (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff,
+                  (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0x7f
                 }));
     Assert.assertEquals(expected, actual);
   }
@@ -519,14 +502,8 @@
         BsPatch.readBsdiffLong(
             new ByteArrayInputStream(
                 new byte[] {
-                  (byte) 0xff,
-                  (byte) 0xff,
-                  (byte) 0xff,
-                  (byte) 0xff,
-                  (byte) 0xff,
-                  (byte) 0xff,
-                  (byte) 0xff,
-                  (byte) 0xff
+                  (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff,
+                  (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff
                 }));
     Assert.assertEquals(expected, actual);
   }
@@ -538,14 +515,8 @@
       BsPatch.readBsdiffLong(
           new ByteArrayInputStream(
               new byte[] {
-                (byte) 0x00,
-                (byte) 0x00,
-                (byte) 0x00,
-                (byte) 0x00,
-                (byte) 0x00,
-                (byte) 0x00,
-                (byte) 0x00,
-                (byte) 0x80
+                (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00,
+                (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x80
               }));
       Assert.fail("Tolerated negative zero");
     } catch (PatchFormatException expected) {
@@ -555,7 +526,7 @@
 
   @Test
   public void testReadFully() throws IOException {
-    final byte[] input = "this is a sample string to read".getBytes();
+    final byte[] input = "this is a sample string to read".getBytes("UTF-8");
     final ByteArrayInputStream inputStream = new ByteArrayInputStream(input);
     final byte[] dst = new byte[50];
 
diff --git a/applier/src/test/java/com/google/archivepatcher/applier/gdiff/GdiffTest.java b/applier/src/test/java/com/google/archivepatcher/applier/gdiff/GdiffTest.java
new file mode 100644
index 0000000..1907637
--- /dev/null
+++ b/applier/src/test/java/com/google/archivepatcher/applier/gdiff/GdiffTest.java
@@ -0,0 +1,334 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.applier.gdiff;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.Random;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/**
+ * A fairly comprehensive set of tests for sunny and rainy day conditions for GDiff operations.
+ * The intention is that a bad patch will *always* throw an IOException, either via explicit test
+ * or by known safety checks (e.g. reading from a nonexistent part of the input file.)
+ */
+@RunWith(JUnit4.class)
+public class GdiffTest {
+  /**
+   * Very lightweight smoke test using example in http://www.w3.org/TR/NOTE-gdiff-19970901
+   */
+  @Test
+  public void testExample() throws IOException {
+    byte[] oldBytes = new byte[] {
+        (byte) 'A', (byte) 'B', (byte) 'C', (byte) 'D', (byte) 'E', (byte) 'F', (byte) 'G' };
+    byte[] newBytes = new byte[] {
+        (byte) 'A', (byte) 'B', (byte) 'X', (byte) 'Y', (byte) 'C', (byte) 'D',
+        (byte) 'B', (byte) 'C', (byte) 'D', (byte) 'E' };
+    byte[] patch = new byte[] {
+        (byte) 0xd1, (byte) 0xff, (byte) 0xd1, (byte) 0xff, (byte) 4,  // magic+version
+        (byte) 249, 0, 0, 2,                                           // COPY_USHORT_UBYTE 0, 2
+        (byte) 2, (byte) 'X', (byte) 'Y',                              // DATA_2
+        (byte) 249, 0, 2, 2,                                           // COPY_USHORT_UBYTE 2, 2
+        (byte) 249, 0, 1, 4,                                           // COPY_USHORT_UBYTE 1, 4
+        0 };                                                           // EOF
+
+    // Create "input file".
+    File inputFile = File.createTempFile("testExample", null);
+    FileOutputStream writeInputFile = new FileOutputStream(inputFile);
+    writeInputFile.write(oldBytes);
+    writeInputFile.close();
+    RandomAccessFile readInputFile = new RandomAccessFile(inputFile, "r");
+
+    // Create "patch" file - this is just a stream
+    ByteArrayInputStream patchStream = new ByteArrayInputStream(patch);
+
+    // Create "output" file - this is just a stream
+    ByteArrayOutputStream outputStream = new ByteArrayOutputStream(newBytes.length);
+
+    long outputLength = Gdiff.patch(readInputFile, patchStream, outputStream, newBytes.length);
+    Assert.assertEquals(newBytes.length, outputLength);
+    Assert.assertArrayEquals(newBytes, outputStream.toByteArray());
+  }
+
+  /**
+   * A test to artificially (and minimally) exercise each opcode except for the full range of
+   * data opcodes.
+   *
+   *          01234567890123456
+   * Input:   ABCDEFGHIJ
+   * Output:  XYZABBCCDDEEFFGGH
+   */
+  @Test
+  public void testOpcodes() throws IOException {
+    byte[] oldBytes = new byte[] {
+        (byte) 'A', (byte) 'B', (byte) 'C', (byte) 'D', (byte) 'E', (byte) 'F', (byte) 'G',
+        (byte) 'H', (byte) 'I', (byte) 'J' };
+    byte[] newBytes = new byte[] {
+        (byte) 'X', (byte) 'Y', (byte) 'Z',
+        (byte) 'A', (byte) 'B', (byte) 'B', (byte) 'C', (byte) 'C', (byte) 'D', (byte) 'D',
+        (byte) 'E', (byte) 'E', (byte) 'F', (byte) 'F', (byte) 'G', (byte) 'G', (byte) 'H' };
+    byte[] patch = new byte[] {
+        (byte) 0xd1, (byte) 0xff, (byte) 0xd1, (byte) 0xff, (byte) 4,    // magic+version
+        (byte) 1, (byte) 'X',                                            // DATA_MIN (DATA_1)
+        (byte) 247, 0, 1, (byte) 'Y',                                    // DATA_USHORT
+        (byte) 248, 0, 0, 0, 1, (byte) 'Z',                              // DATA_INT
+        (byte) 249, 0, 0, 2,                                             // COPY_USHORT_UBYTE
+        (byte) 250, 0, 1, 0, 2,                                          // COPY_USHORT_USHORT
+        (byte) 251, 0, 2, 0, 0, 0, 2,                                    // COPY_USHORT_INT
+        (byte) 252, 0, 0, 0, 3, 2,                                       // COPY_INT_UBYTE
+        (byte) 253, 0, 0, 0, 4, 0, 2,                                    // COPY_INT_USHORT
+        (byte) 254, 0, 0, 0, 5, 0, 0, 0, 2,                              // COPY_INT_INT
+        (byte) 255, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 2,                  // COPY_LONG_INT
+        0 };                                                             // EOF
+
+    // Create "input file".
+    File inputFile = File.createTempFile("testExample", null);
+    FileOutputStream writeInputFile = new FileOutputStream(inputFile);
+    writeInputFile.write(oldBytes);
+    writeInputFile.close();
+    RandomAccessFile readInputFile = new RandomAccessFile(inputFile, "r");
+
+    // Create "patch" file - this is just a stream
+    ByteArrayInputStream patchStream = new ByteArrayInputStream(patch);
+
+    // Create "output" file - this is just a stream
+    ByteArrayOutputStream outputStream = new ByteArrayOutputStream(newBytes.length);
+
+    long outputLength = Gdiff.patch(readInputFile, patchStream, outputStream, newBytes.length);
+    Assert.assertEquals(newBytes.length, outputLength);
+    Assert.assertArrayEquals(newBytes, outputStream.toByteArray());
+  }
+
+  /**
+   * A test to exercise all of the data inline opcodes (commands 1..246)
+   */
+  @Test
+  public void testInlineDataCommands() throws IOException {
+    // We never read "input" so create an single, empty one.
+    File inputFile = File.createTempFile("testExample", null);
+    FileOutputStream writeInputFile = new FileOutputStream(inputFile);
+    writeInputFile.close();
+    RandomAccessFile readInputFile = new RandomAccessFile(inputFile, "r");
+
+    // First 5 bytes for copying into each patch
+    byte[] magicAndVersion = new byte[] {
+        (byte) 0xd1, (byte) 0xff, (byte) 0xd1, (byte) 0xff, (byte) 4 };
+
+    // Use a pseudo random source to fill each data stream differently
+    Random random = new Random();
+
+    for (int spanLength = 1; spanLength <= 246; spanLength++) {
+      // Generate data array (save for later to compare against output)
+      byte[] data = new byte[spanLength];
+      random.setSeed(spanLength);
+      random.nextBytes(data);
+
+      // The patch will consist of the following data:
+      //  magic+version
+      //  command "n"
+      //  "n" bytes
+      //  EOF
+      int patchLength = 5 + 1 + spanLength + 1;
+      byte[] patch = new byte[patchLength];
+      System.arraycopy(magicAndVersion, 0, patch, 0, magicAndVersion.length);
+      patch[5] = (byte) spanLength;
+      System.arraycopy(data, 0, patch, 6, spanLength);
+      patch[6 + spanLength] = 0;  // EOF
+
+      // Create "patch" file - this is just a stream
+      ByteArrayInputStream patchStream = new ByteArrayInputStream(patch);
+
+      // Create "output" file - this is just a stream
+      ByteArrayOutputStream outputStream = new ByteArrayOutputStream(data.length);
+
+      // Run the patch and check the output file
+      long outputLength = Gdiff.patch(readInputFile, patchStream, outputStream, data.length);
+      Assert.assertEquals(spanLength, outputLength);
+      Assert.assertArrayEquals(data, outputStream.toByteArray());
+    }
+  }
+
+  /**
+   * Tests for patch underrun (the patch terminates earlier than expected in each case).
+   *
+   * Cases:   1. Short magic bytes
+   *          2. No version
+   *          3. After an opcode (note - opcode arg underruns covered by testReaders_underrun())
+   *          4. During inline data
+   *          5. Missing EOF
+   */
+  @Test
+  public void testPatchUnderrun() throws IOException {
+    byte[] oldBytes = new byte[] {
+        (byte) 'A', (byte) 'B', (byte) 'C', (byte) 'D', (byte) 'E', (byte) 'F', (byte) 'G' };
+    byte[] patch = new byte[] {
+        (byte) 0xd1, (byte) 0xff, (byte) 0xd1, (byte) 0xff, (byte) 4,        // magic+version
+        (byte) 249, 0, 0, 2,                // COPY_USHORT_UBYTE 0, 2
+        (byte) 2, (byte) 'X', (byte) 'Y',   // DATA_2
+        (byte) 249, 0, 2, 2,                // COPY_USHORT_UBYTE 2, 2
+        (byte) 249, 0, 1, 4,                // COPY_USHORT_UBYTE 1, 4
+        0 };                                // EOF
+
+    checkExpectedIOException(oldBytes, -1, patch, 3, -1);       // Short magic bytes
+    checkExpectedIOException(oldBytes, -1, patch, 4, -1);       // No version
+    checkExpectedIOException(oldBytes, -1, patch, 6, -1);       // After an opcode
+    checkExpectedIOException(oldBytes, -1, patch, 11, -1);      // During inline data
+    checkExpectedIOException(oldBytes, -1, patch, 20, -1);      // Missing EOF
+  }
+
+  /**
+   * Tests for input underrun (the input terminates earlier than expected in each case).
+   * Reuses the "all opcodes" patch for maximum coverage.
+   */
+  @Test
+  public void testInputUnderrun() throws IOException {
+    byte[] oldBytes = new byte[] {
+        (byte) 'A', (byte) 'B', (byte) 'C', (byte) 'D', (byte) 'E', (byte) 'F', (byte) 'G',
+        (byte) 'H' };
+    byte[] patch = new byte[] {
+        (byte) 0xd1, (byte) 0xff, (byte) 0xd1, (byte) 0xff, (byte) 4,    // magic+version
+        (byte) 1, (byte) 'X',                                            // DATA_MIN (DATA_1)
+        (byte) 247, 0, 1, (byte) 'Y',                                    // DATA_USHORT
+        (byte) 248, 0, 0, 0, 1, (byte) 'Z',                              // DATA_INT
+        (byte) 249, 0, 0, 2,                                             // COPY_USHORT_UBYTE
+        (byte) 250, 0, 1, 0, 2,                                          // COPY_USHORT_USHORT
+        (byte) 251, 0, 2, 0, 0, 0, 2,                                    // COPY_USHORT_INT
+        (byte) 252, 0, 0, 0, 3, 2,                                       // COPY_INT_UBYTE
+        (byte) 253, 0, 0, 0, 4, 0, 2,                                    // COPY_INT_USHORT
+        (byte) 254, 0, 0, 0, 5, 0, 0, 0, 2,                              // COPY_INT_INT
+        (byte) 255, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 2,                  // COPY_LONG_INT
+        0 };                                                             // EOF
+
+    // The oldBytes array above is sufficient to satisfy the patch, but this loop terminates
+    // one byte short of using the entire input array.
+    for (int inputLimit = 0; inputLimit < oldBytes.length; inputLimit++) {
+      checkExpectedIOException(oldBytes, inputLimit, patch, -1, -1);
+    }
+  }
+
+  /**
+   * Tests for overrun limiter in output file.
+   * Reuses the "all opcodes" patch for maximum coverage.
+   */
+  @Test
+  public void testOutputOverrun() throws IOException {
+    byte[] oldBytes = new byte[] {
+        (byte) 'A', (byte) 'B', (byte) 'C', (byte) 'D', (byte) 'E', (byte) 'F', (byte) 'G',
+        (byte) 'H', (byte) 'I', (byte) 'J' };
+    byte[] newBytes = new byte[] {
+        (byte) 'X', (byte) 'Y', (byte) 'Z',
+        (byte) 'A', (byte) 'B', (byte) 'B', (byte) 'C', (byte) 'C', (byte) 'D', (byte) 'D',
+        (byte) 'E', (byte) 'E', (byte) 'F', (byte) 'F', (byte) 'G', (byte) 'G', (byte) 'H' };
+    byte[] patch = new byte[] {
+        (byte) 0xd1, (byte) 0xff, (byte) 0xd1, (byte) 0xff, (byte) 4,    // magic+version
+        (byte) 1, (byte) 'X',                                            // DATA_MIN (DATA_1)
+        (byte) 247, 0, 1, (byte) 'Y',                                    // DATA_USHORT
+        (byte) 248, 0, 0, 0, 1, (byte) 'Z',                              // DATA_INT
+        (byte) 249, 0, 0, 2,                                             // COPY_USHORT_UBYTE
+        (byte) 250, 0, 1, 0, 2,                                          // COPY_USHORT_USHORT
+        (byte) 251, 0, 2, 0, 0, 0, 2,                                    // COPY_USHORT_INT
+        (byte) 252, 0, 0, 0, 3, 2,                                       // COPY_INT_UBYTE
+        (byte) 253, 0, 0, 0, 4, 0, 2,                                    // COPY_INT_USHORT
+        (byte) 254, 0, 0, 0, 5, 0, 0, 0, 2,                              // COPY_INT_INT
+        (byte) 255, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 2,                  // COPY_LONG_INT
+        0 };                                                             // EOF
+
+    for (int newBytesLimit = 0; newBytesLimit < newBytes.length; newBytesLimit++) {
+      checkExpectedIOException(oldBytes, -1, patch, -1, newBytesLimit);
+    }
+  }
+
+  /**
+   * Test for negative values in patch command arguments.  Since the checks for this are
+   * in two specific methods, copyFromPatch() and copyFromOriginal(), I'm not going to exercise
+   * the full set of opcodes here - just the full set of argument widths.  Note that we don't
+   * test "ubyte" or "ushort" values here, as they are unsigned.
+   */
+  @Test
+  public void testNegativeArgumentValues() throws IOException {
+    byte[] oldBytes = new byte[] {
+        (byte) 'A', (byte) 'B', (byte) 'C', (byte) 'D', (byte) 'E', (byte) 'F', (byte) 'G',
+        (byte) 'H', (byte) 'I', (byte) 'J' };
+
+    byte[] negativeDataInt = new byte[] {
+        (byte) 0xd1, (byte) 0xff, (byte) 0xd1, (byte) 0xff, (byte) 4,        // magic+version
+        (byte) 248, (byte) 255, (byte) 255, (byte) 255, (byte) 255,          // DATA_INT
+        (byte) 'Y',
+        0 };                                                                 // EOF
+    byte[] negativeCopyLengthInt = new byte[] {
+        (byte) 0xd1, (byte) 0xff, (byte) 0xd1, (byte) 0xff, (byte) 4,        // magic+version
+        (byte) 251, 0, 0, (byte) 255, (byte) 255, (byte) 255, (byte) 255,    // COPY_USHORT_INT
+        0 };                                                                 // EOF
+    byte[] negativeCopyOffsetInt = new byte[] {
+        (byte) 0xd1, (byte) 0xff, (byte) 0xd1, (byte) 0xff, (byte) 4,        // magic+version
+        (byte) 252, (byte) 255, (byte) 255, (byte) 255, (byte) 255, 0,       // COPY_INT_UBYTE
+        0 };                                                                 // EOF
+    byte[] negativeCopyOffsetLong = new byte[] {
+        (byte) 0xd1, (byte) 0xff, (byte) 0xd1, (byte) 0xff, (byte) 4,        // magic+version
+        (byte) 255, (byte) 255, (byte) 255, (byte) 255, (byte) 255,          // COPY_LONG_INT
+        (byte) 255, (byte) 255, (byte) 255, (byte) 255, 0, 0, 0 };           // EOF
+
+    // All of these should throw exceptions due to negative numbers in the arguments
+    checkExpectedIOException(oldBytes, -1, negativeDataInt, -1, -1);
+    checkExpectedIOException(oldBytes, -1, negativeCopyLengthInt, -1, -1);
+    checkExpectedIOException(oldBytes, -1, negativeCopyOffsetInt, -1, -1);
+    checkExpectedIOException(oldBytes, -1, negativeCopyOffsetLong, -1, -1);
+  }
+
+  /**
+   * Helper for calling patch(), expects to throw an error.
+   *
+   * @param inputBytes the bytes representing the input (original) file
+   * @param inputLimit if -1, use entire input array.  Otherwise, shorten input to this length.
+   * @param patchBytes byte array containing patch
+   * @param patchLimit if -1, use entire patch array.  Otherwise, shorten patch to this length.
+   * @param outputLimit if -1, expect a "very large" output.  Otherwise, set limit this length.
+   */
+  private void checkExpectedIOException(byte[] inputBytes, int inputLimit,
+      byte[] patchBytes, int patchLimit, int outputLimit) throws IOException {
+    if (inputLimit == -1) {
+      inputLimit = inputBytes.length;
+    }
+    // Create "input file".
+    File inputFile = File.createTempFile("testExample", null);
+    FileOutputStream writeInputFile = new FileOutputStream(inputFile);
+    writeInputFile.write(inputBytes, 0, inputLimit);
+    writeInputFile.close();
+    RandomAccessFile readInputFile = new RandomAccessFile(inputFile, "r");
+
+    if (patchLimit == -1) {
+      patchLimit = patchBytes.length;
+    }
+    ByteArrayInputStream patchStream = new ByteArrayInputStream(patchBytes, 0, patchLimit);
+
+    if (outputLimit == -1) {
+      outputLimit = (inputBytes.length * 2) + (patchBytes.length * 2);    // 100% arbitrary
+    }
+    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
+    try {
+      Gdiff.patch(readInputFile, patchStream, outputStream, outputLimit);
+      Assert.fail("Expected IOException");
+    } catch (IOException expected) {
+    }
+    Assert.assertTrue(outputStream.size() <= outputLimit);
+  }
+}
diff --git a/explainer/src/test/java/com/google/archivepatcher/explainer/PatchExplainerTest.java b/explainer/src/test/java/com/google/archivepatcher/explainer/PatchExplainerTest.java
index 1a5a379..5307541 100644
--- a/explainer/src/test/java/com/google/archivepatcher/explainer/PatchExplainerTest.java
+++ b/explainer/src/test/java/com/google/archivepatcher/explainer/PatchExplainerTest.java
@@ -359,7 +359,7 @@
         new EntryExplanation(
             path(ENTRY_A1_LEVEL_6),
             false,
-            RecommendationReason.UNSUITABLE,
+            RecommendationReason.DEFLATE_UNSUITABLE,
             FakeCompressor.OUTPUT.length());
     checkExplanation(explanations, expected);
   }
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/DefaultDeflateCompressionDiviner.java b/generator/src/main/java/com/google/archivepatcher/generator/DefaultDeflateCompressionDiviner.java
index 636504d..d0c7a8b 100644
--- a/generator/src/main/java/com/google/archivepatcher/generator/DefaultDeflateCompressionDiviner.java
+++ b/generator/src/main/java/com/google/archivepatcher/generator/DefaultDeflateCompressionDiviner.java
@@ -14,10 +14,10 @@
 
 package com.google.archivepatcher.generator;
 
+import com.google.archivepatcher.shared.ByteArrayInputStreamFactory;
 import com.google.archivepatcher.shared.DefaultDeflateCompatibilityWindow;
 import com.google.archivepatcher.shared.JreDeflateParameters;
 import com.google.archivepatcher.shared.MultiViewInputStreamFactory;
-import com.google.archivepatcher.shared.RandomAccessFileInputStream;
 import com.google.archivepatcher.shared.RandomAccessFileInputStreamFactory;
 import java.io.File;
 import java.io.IOException;
@@ -41,15 +41,13 @@
  */
 public class DefaultDeflateCompressionDiviner {
 
-  /**
-   * The levels to try for each strategy, in the order to attempt them.
-   */
-  private final Map<Integer, List<Integer>> levelsByStrategy = getLevelsByStrategy();
+  /** The levels to try for each strategy, in the order to attempt them. */
+  private static final Map<Integer, List<Integer>> LEVELS_BY_STRATEGY = getLevelsByStrategy();
 
   /**
    * A simple struct that contains a {@link MinimalZipEntry} describing a specific entry from a zip
    * archive along with an optional accompanying {@link JreDeflateParameters} describing the
-   * original compression settings that were used to generate the compressed data in that entry.
+   * original compression settings that were used to generate the compressed delivery in that entry.
    */
   public static class DivinationResult {
     /**
@@ -91,17 +89,30 @@
    * @see DivinationResult 
    */
   public List<DivinationResult> divineDeflateParameters(File archiveFile) throws IOException {
-    List<DivinationResult> results = new ArrayList<DivinationResult>();
+    List<DivinationResult> results = new ArrayList<>();
     for (MinimalZipEntry minimalZipEntry : MinimalZipArchive.listEntries(archiveFile)) {
       JreDeflateParameters divinedParameters = null;
       if (minimalZipEntry.isDeflateCompressed()) {
-        // TODO(andrewhayden): Reuse streams to avoid churning file descriptors
-        RandomAccessFileInputStreamFactory rafisFactory =
+        // TODO(pasc): Reuse streams to avoid churning file descriptors
+        MultiViewInputStreamFactory isFactory =
             new RandomAccessFileInputStreamFactory(
                 archiveFile,
                 minimalZipEntry.getFileOffsetOfCompressedData(),
                 minimalZipEntry.getCompressedSize());
-        divinedParameters = divineDeflateParameters(rafisFactory);
+
+        // Keep small entries in memory to avoid unnecessary file I/O.
+        if (minimalZipEntry.getCompressedSize() < (100 * 1024)) {
+          try (InputStream is = isFactory.newStream()) {
+            byte[] compressedBytes = new byte[(int) minimalZipEntry.getCompressedSize()];
+            is.read(compressedBytes);
+            divinedParameters =
+                divineDeflateParameters(new ByteArrayInputStreamFactory(compressedBytes));
+          } catch (Exception ignore) {
+            divinedParameters = null;
+          }
+        } else {
+          divinedParameters = divineDeflateParameters(isFactory);
+        }
       }
       results.add(new DivinationResult(minimalZipEntry, divinedParameters));
     }
@@ -121,124 +132,103 @@
    *
    * @return such a mapping
    */
-  protected Map<Integer, List<Integer>> getLevelsByStrategy() {
-    final Map<Integer, List<Integer>> levelsByStrategy = new HashMap<Integer, List<Integer>>();
+  private static Map<Integer, List<Integer>> getLevelsByStrategy() {
+    final Map<Integer, List<Integer>> levelsByStrategy = new HashMap<>();
     // The best order for the levels is simply the order of popularity in the world, which is
     // expected to be default (6), maximum compression (9), and fastest (1).
     // The rest of the levels are rarely encountered and their order is mostly irrelevant.
     levelsByStrategy.put(0, Collections.unmodifiableList(Arrays.asList(6, 9, 1, 4, 2, 3, 5, 7, 8)));
     levelsByStrategy.put(1, Collections.unmodifiableList(Arrays.asList(6, 9, 4, 5, 7, 8)));
+    // Strategy 2 does not have the concept of levels, so vacuously call it 1.
     levelsByStrategy.put(2, Collections.singletonList(1));
     return Collections.unmodifiableMap(levelsByStrategy);
   }
 
   /**
    * Determines the original {@link JreDeflateParameters} that were used to compress a given piece
-   * of deflated data.
+   * of deflated delivery.
+   *
    * @param compressedDataInputStreamFactory a {@link MultiViewInputStreamFactory} that can provide
-   * multiple independent {@link InputStream} instances for the compressed data; the streams
-   * produced must support {@link InputStream#mark(int)} and it is recommended that
-   * {@link RandomAccessFileInputStream} instances be provided for efficiency if a backing file is
-   * available. The stream will be reset for each recompression attempt that is required.
-   * @return the parameters that can be used to replicate the compressed data in the
-   * {@link DefaultDeflateCompatibilityWindow}, if any; otherwise <code>null</code>. Note that
-   * <code>null</code> is also returned in the case of <em>corrupt</em> zip data since, by
-   * definition, it cannot be replicated via any combination of normal deflate parameters.
-   * @throws IOException if there is a problem reading the data, i.e. if the file contents are
-   * changed while reading
+   *     multiple independent {@link InputStream} instances for the compressed delivery.
+   * @return the parameters that can be used to replicate the compressed delivery in the {@link
+   *     DefaultDeflateCompatibilityWindow}, if any; otherwise <code>null</code>. Note that <code>
+   *     null</code> is also returned in the case of <em>corrupt</em> zip delivery since, by definition,
+   *     it cannot be replicated via any combination of normal deflate parameters.
+   * @throws IOException if there is a problem reading the delivery, i.e. if the file contents are
+   *     changed while reading
    */
   public JreDeflateParameters divineDeflateParameters(
-      MultiViewInputStreamFactory<?> compressedDataInputStreamFactory) throws IOException {
-    InputStream compressedDataIn = compressedDataInputStreamFactory.newStream();
-    if (!compressedDataIn.markSupported()) {
-      try {
-        compressedDataIn.close();
-      } catch (Exception ignored) {
-        // Nothing to do.
-      }
-      throw new IllegalArgumentException("input stream must support mark(int)");
-    }
+      MultiViewInputStreamFactory compressedDataInputStreamFactory) throws IOException {
+    byte[] copyBuffer = new byte[32 * 1024];
+    // Iterate over all relevant combinations of nowrap, strategy and level.
+    for (boolean nowrap : new boolean[] {true, false}) {
+      Inflater inflater = new Inflater(nowrap);
+      Deflater deflater = new Deflater(0, nowrap);
 
-    // Record the input stream position to return to it each time a prediction is needed.
-    compressedDataIn.mark(0); // The argument to mark is ignored and irrelevant
-
-    // Make a copy of the stream for matching bytes of compressed input
-    InputStream matchingCompressedDataIn = compressedDataInputStreamFactory.newStream();
-    matchingCompressedDataIn.mark(0); // The argument to mark is ignored and irrelevant
-
-    byte[] copyBuffer = new byte[32768];
-    try {
-      // Iterate over all relevant combinations of nowrap, strategy and level.
-      for (boolean nowrap : new boolean[] {true, false}) {
-        Inflater inflater = new Inflater(nowrap);
-        Deflater deflater = new Deflater(0, nowrap);
-        for (int strategy : new int[] {0, 1, 2}) {
-          deflater.setStrategy(strategy);
-          // Strategy 2 does not have the concept of levels, so vacuously call it 1.
-          List<Integer> levelsToSearch = levelsByStrategy.get(strategy);
-          for (int levelIndex = 0; levelIndex < levelsToSearch.size(); levelIndex++) {
-            int level = levelsToSearch.get(levelIndex);
-            deflater.setLevel(level);
-            inflater.reset();
-            deflater.reset();
-            compressedDataIn.reset();
-            matchingCompressedDataIn.reset();
-            try {
-              if (matches(
-                  compressedDataIn, inflater, deflater, matchingCompressedDataIn, copyBuffer)) {
-                return JreDeflateParameters.of(level, strategy, nowrap);
-              }
-            } catch (ZipException e) {
-              // Parse error in input. The only possibilities are corruption or the wrong nowrap.
-              // Skip all remaining levels and strategies.
-              levelIndex = levelsToSearch.size();
-              strategy = 2;
+      strategy_loop:
+      for (int strategy : new int[] {0, 1, 2}) {
+        deflater.setStrategy(strategy);
+        for (int level : LEVELS_BY_STRATEGY.get(strategy)) {
+          deflater.setLevel(level);
+          inflater.reset();
+          deflater.reset();
+          try {
+            if (matches(inflater, deflater, compressedDataInputStreamFactory, copyBuffer)) {
+              end(inflater, deflater);
+              return JreDeflateParameters.of(level, strategy, nowrap);
             }
-          } // end of iteration on level
-        } // end of iteration on strategy
-      } // end of iteration on nowrap
-    } finally {
-      try {
-        compressedDataIn.close();
-      } catch (Exception ignored) {
-        // Don't care.
+          } catch (ZipException e) {
+            // Parse error in input. The only possibilities are corruption or the wrong nowrap.
+            // Skip all remaining levels and strategies.
+            break strategy_loop;
+          }
+        }
       }
-      try {
-        matchingCompressedDataIn.close();
-      } catch (Exception ignored) {
-        // Don't care.
-      }
+      end(inflater, deflater);
     }
     return null;
   }
 
   /**
-   * Check if the specified deflater will produce the same compressed data as the byte stream in
-   * compressedDataIn and returns true if so.
-   * @param compressedDataIn the stream of compressed data to read and reproduce
+   * Closes the (de)compressor and discards any unprocessed input. This method should be called when
+   * the (de)compressor is no longer being used. Once this method is called, the behavior
+   * De/Inflater is undefined.
+   *
+   * @see Inflater#end
+   * @see Deflater#end
+   */
+  private static void end(Inflater inflater, Deflater deflater) {
+    inflater.end();
+    deflater.end();
+  }
+
+  /**
+   * Checks whether the specified deflater will produce the same compressed delivery as the byte
+   * stream.
+   *
    * @param inflater the inflater for uncompressing the stream
    * @param deflater the deflater for recompressing the output of the inflater
-   * @param matchingStreamInput an independent but identical view of the bytes in compressedDataIn
    * @param copyBuffer buffer to use for copying bytes between the inflater and the deflater
    * @return true if the specified deflater reproduces the bytes in compressedDataIn, otherwise
-   * false
+   *     false
    * @throws IOException if anything goes wrong; in particular, {@link ZipException} is thrown if
-   * there is a problem parsing compressedDataIn
+   *     there is a problem parsing compressedDataIn
    */
   private boolean matches(
-      InputStream compressedDataIn,
       Inflater inflater,
       Deflater deflater,
-      InputStream matchingStreamInput,
+      MultiViewInputStreamFactory compressedDataInputStreamFactory,
       byte[] copyBuffer)
       throws IOException {
-    MatchingOutputStream matcher = new MatchingOutputStream(matchingStreamInput, 32768);
-    // This stream will deliberately be left open because closing it would close the
-    // underlying compressedDataIn stream, which is not desired.
-    InflaterInputStream inflaterIn = new InflaterInputStream(compressedDataIn, inflater, 32768);
-    DeflaterOutputStream out = new DeflaterOutputStream(matcher, deflater, 32768);
-    int numRead = 0;
-    try {
+
+    try (MatchingOutputStream matcher =
+            new MatchingOutputStream(
+                compressedDataInputStreamFactory.newStream(), copyBuffer.length);
+        InflaterInputStream inflaterIn =
+            new InflaterInputStream(
+                compressedDataInputStreamFactory.newStream(), inflater, copyBuffer.length);
+        DeflaterOutputStream out = new DeflaterOutputStream(matcher, deflater, copyBuffer.length)) {
+      int numRead;
       while ((numRead = inflaterIn.read(copyBuffer)) >= 0) {
         out.write(copyBuffer, 0, numRead);
       }
@@ -247,19 +237,13 @@
       out.finish();
       out.flush();
       matcher.expectEof();
-      // At this point the data in the compressed output stream was a perfect match for the
-      // data in the compressed input stream; the answer has been found.
+      // At this point the delivery in the compressed output stream was a perfect match for the
+      // delivery in the compressed input stream; the answer has been found.
       return true;
     } catch (MismatchException e) {
       // Fast-fail case when the compressed output stream doesn't match the compressed input
       // stream. These are not the parameters you're looking for!
-    } finally {
-      try {
-        out.close();
-      } catch (Exception ignored) {
-        // Don't care.
-      }
+      return false;
     }
-    return false;
   }
 }
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/FileByFileV1DeltaGenerator.java b/generator/src/main/java/com/google/archivepatcher/generator/FileByFileV1DeltaGenerator.java
index 8c81761..c704811 100644
--- a/generator/src/main/java/com/google/archivepatcher/generator/FileByFileV1DeltaGenerator.java
+++ b/generator/src/main/java/com/google/archivepatcher/generator/FileByFileV1DeltaGenerator.java
@@ -93,6 +93,33 @@
     }
   }
 
+  /**
+   * Generate a V1 patch pre diffing plan.
+   *
+   * @param oldFile the original old file to read (will not be modified)
+   * @param newFile the original new file to read (will not be modified)
+   * @return the plan
+   * @throws IOException if unable to complete the operation due to an I/O error
+   * @throws InterruptedException if any thread has interrupted the current thread
+   */
+  public PreDiffPlan generatePreDiffPlan(File oldFile, File newFile)
+      throws IOException, InterruptedException {
+    try (TempFileHolder deltaFriendlyOldFile = new TempFileHolder();
+        TempFileHolder deltaFriendlyNewFile = new TempFileHolder()) {
+      PreDiffExecutor.Builder builder =
+          new PreDiffExecutor.Builder()
+              .readingOriginalFiles(oldFile, newFile)
+              .writingDeltaFriendlyFiles(deltaFriendlyOldFile.file, deltaFriendlyNewFile.file);
+      for (RecommendationModifier modifier : recommendationModifiers) {
+        builder.withRecommendationModifier(modifier);
+      }
+
+      PreDiffExecutor executor = builder.build();
+
+      return executor.prepareForDiffing();
+    }
+  }
+
   // Visible for testing only
   protected DeltaGenerator getDeltaGenerator() {
     return new BsDiffDeltaGenerator();
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/MatchingOutputStream.java b/generator/src/main/java/com/google/archivepatcher/generator/MatchingOutputStream.java
index f1369d2..f3c5363 100644
--- a/generator/src/main/java/com/google/archivepatcher/generator/MatchingOutputStream.java
+++ b/generator/src/main/java/com/google/archivepatcher/generator/MatchingOutputStream.java
@@ -86,6 +86,11 @@
     }
   }
 
+  @Override
+  public void close() throws IOException {
+    expectedBytesStream.close();
+  }
+
   /**
    * Expects the end-of-file to be reached in the associated {@link InputStream}.
    * @throws IOException if the end-of-file has not yet been reached in the associated
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/PreDiffPlanner.java b/generator/src/main/java/com/google/archivepatcher/generator/PreDiffPlanner.java
index 6b2d1ee..7b037ea 100644
--- a/generator/src/main/java/com/google/archivepatcher/generator/PreDiffPlanner.java
+++ b/generator/src/main/java/com/google/archivepatcher/generator/PreDiffPlanner.java
@@ -199,6 +199,16 @@
       throws IOException {
 
     // Reject anything that is unsuitable for uncompressed diffing.
+    // Reason singled out in order to monitor unsupported versions of zlib.
+    if (unsuitableDeflate(newEntry)) {
+      return new QualifiedRecommendation(
+          oldEntry,
+          newEntry,
+          Recommendation.UNCOMPRESS_NEITHER,
+          RecommendationReason.DEFLATE_UNSUITABLE);
+    }
+
+    // Reject anything that is unsuitable for uncompressed diffing.
     if (unsuitable(oldEntry, newEntry)) {
       return new QualifiedRecommendation(
           oldEntry,
@@ -257,7 +267,8 @@
   /**
    * Returns true if the entries are unsuitable for doing an uncompressed diff. This method returns
    * true if either of the entries is compressed in an unsupported way (a non-deflate compression
-   * algorithm) or if the new entry is compressed in a supported but unreproducible way.
+   * algorithm).
+   *
    * @param oldEntry the entry in the old archive
    * @param newEntry the entry in the new archive
    * @return true if unsuitable
@@ -272,6 +283,18 @@
       // The new entry is compressed in a way that is not supported. Same result as above.
       return true;
     }
+    return false;
+  }
+
+  /**
+   * Returns true if the entries are unsuitable for doing an uncompressed diff as a result of the
+   * new entry being compressed via deflate, with undivinable parameters. This could be the result
+   * of an unsupported version of zlib being used.
+   *
+   * @param newEntry the entry in the new archive
+   * @return true if unsuitable
+   */
+  private boolean unsuitableDeflate(MinimalZipEntry newEntry) {
     JreDeflateParameters newJreDeflateParameters =
         newArchiveJreDeflateParametersByPath.get(new ByteArrayHolder(newEntry.getFileNameBytes()));
     if (newEntry.isDeflateCompressed() && newJreDeflateParameters == null) {
@@ -279,6 +302,7 @@
       // new entry cannot be recompressed, so leave both old and new alone.
       return true;
     }
+
     return false;
   }
 
@@ -339,13 +363,16 @@
     }
     byte[] buffer = new byte[4096];
     int numRead = 0;
-    try (RandomAccessFileInputStream oldRafis =
-            new RandomAccessFileInputStream(
-                oldFile, oldEntry.getFileOffsetOfCompressedData(), oldEntry.getCompressedSize());
-        RandomAccessFileInputStream newRafis =
+    try (RandomAccessFileInputStream newRafis =
             new RandomAccessFileInputStream(
                 newFile, newEntry.getFileOffsetOfCompressedData(), newEntry.getCompressedSize());
-        MatchingOutputStream matcher = new MatchingOutputStream(oldRafis, 4096)) {
+        MatchingOutputStream matcher =
+            new MatchingOutputStream(
+                new RandomAccessFileInputStream(
+                    oldFile,
+                    oldEntry.getFileOffsetOfCompressedData(),
+                    oldEntry.getCompressedSize()),
+                4096)) {
       while ((numRead = newRafis.read(buffer)) >= 0) {
         try {
           matcher.write(buffer, 0, numRead);
diff --git a/generator/src/main/java/com/google/archivepatcher/generator/RecommendationReason.java b/generator/src/main/java/com/google/archivepatcher/generator/RecommendationReason.java
index 664944b..129428b 100644
--- a/generator/src/main/java/com/google/archivepatcher/generator/RecommendationReason.java
+++ b/generator/src/main/java/com/google/archivepatcher/generator/RecommendationReason.java
@@ -19,6 +19,11 @@
  */
 public enum RecommendationReason {
   /**
+   * The entry in the new file is compressed using deflate in a way that cannot be reliably
+   * reproduced. This could be caused by using an unsupported version of zlib.
+   */
+  DEFLATE_UNSUITABLE,
+  /**
    * The entry in the new file is compressed in a way that cannot be reliably reproduced (or one of
    * the entries is compressed using something other than deflate, but this is very uncommon).
    */
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/DefaultDeflateCompressionDivinerTest.java b/generator/src/test/java/com/google/archivepatcher/generator/DefaultDeflateCompressionDivinerTest.java
index 057c790..c712a2b 100644
--- a/generator/src/test/java/com/google/archivepatcher/generator/DefaultDeflateCompressionDivinerTest.java
+++ b/generator/src/test/java/com/google/archivepatcher/generator/DefaultDeflateCompressionDivinerTest.java
@@ -15,26 +15,23 @@
 package com.google.archivepatcher.generator;
 
 import com.google.archivepatcher.generator.DefaultDeflateCompressionDiviner.DivinationResult;
+import com.google.archivepatcher.shared.ByteArrayInputStreamFactory;
 import com.google.archivepatcher.shared.DefaultDeflateCompatibilityWindow;
 import com.google.archivepatcher.shared.DeflateCompressor;
 import com.google.archivepatcher.shared.JreDeflateParameters;
-import com.google.archivepatcher.shared.MultiViewInputStreamFactory;
 import com.google.archivepatcher.shared.UnitTestZipArchive;
 import com.google.archivepatcher.shared.UnitTestZipEntry;
-
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.JUnit4;
 
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.List;
-
 /**
  * Tests for {@link DefaultDeflateCompressionDiviner}.
  */
@@ -47,7 +44,7 @@
   private DefaultDeflateCompressionDiviner diviner = null;
 
   /**
-   * Test data written to the file.
+   * Test delivery written to the file.
    */
   private byte[] testData = null;
 
@@ -58,10 +55,10 @@
   }
 
   /**
-   * Deflates the test data using the specified parameters, storing them in a temp file and
+   * Deflates the test delivery using the specified parameters, storing them in a temp file and
    * returns the temp file created.
    * @param parameters the parameters to use for deflating
-   * @return the temp file with the data
+   * @return the temp file with the delivery
    */
   private byte[] deflate(JreDeflateParameters parameters) throws IOException {
     DeflateCompressor compressor = new DeflateCompressor();
@@ -73,74 +70,10 @@
     return buffer.toByteArray();
   }
 
-  private static class ByteArrayInputStreamFactory
-      implements MultiViewInputStreamFactory<ByteArrayInputStream> {
-    private final byte[] data;
-    private final boolean supportMark;
-    private final boolean dieOnClose;
-
-    /**
-     * Create a factory the returns streams on the specified data buffer, optionally supporting
-     * {@link InputStream#mark(int)}.
-     * @param data the data buffer to return streams for
-     * @param supportMark whether or not to support marking
-     * @param dieOnClose whether or not to throw nasty exceptions on close()
-     */
-    public ByteArrayInputStreamFactory(byte[] data, boolean supportMark, boolean dieOnClose) {
-      this.data = data;
-      this.supportMark = supportMark;
-      this.dieOnClose = dieOnClose;
-    }
-
-    @Override
-    public ByteArrayInputStream newStream() throws IOException {
-      return new ByteArrayInputStream(data) {
-        @Override
-        public boolean markSupported() {
-          return supportMark;
-        }
-
-        @Override
-        public void close() throws IOException {
-          if (dieOnClose) {
-            throw new IOException("brainnnnnnnnnnssssss!");
-          }
-          super.close();
-        }
-      };
-    }
-  }
-
-  @Test
-  public void testDivineDeflateParameters_NoMarkInputStreamFactory() throws IOException {
-    final JreDeflateParameters parameters = JreDeflateParameters.of(1, 0, true);
-    final byte[] buffer = deflate(parameters);
-    try {
-      // The factory here will NOT support mark(int), which should cause failure. Also, throw
-      // exceptions on close() to be extra rude.
-      diviner.divineDeflateParameters(new ByteArrayInputStreamFactory(buffer, false, true));
-      Assert.fail("operating without a markable stream");
-    } catch (IllegalArgumentException expected) {
-      // Correct!
-    }
-  }
-
-  @Test
-  public void testDivineDeflateParameters_BadCloseInputStreamFactory() throws IOException {
-    final JreDeflateParameters parameters = JreDeflateParameters.of(1, 0, true);
-    final byte[] buffer = deflate(parameters);
-    // The factory here will produce streams that throw exceptions when close() is called.
-    // These exceptions should be ignored.
-    JreDeflateParameters result =
-        diviner.divineDeflateParameters(new ByteArrayInputStreamFactory(buffer, true, true));
-    Assert.assertEquals(result, parameters);
-  }
-
   @Test
   public void testDivineDeflateParameters_JunkData() throws IOException {
     final byte[] junk = new byte[] {0, 1, 2, 3, 4};
-    Assert.assertNull(
-        diviner.divineDeflateParameters(new ByteArrayInputStreamFactory(junk, true, false)));
+    Assert.assertNull(diviner.divineDeflateParameters(new ByteArrayInputStreamFactory(junk)));
   }
 
   @Test
@@ -150,14 +83,15 @@
         for (int level : new int[] {1, 2, 3, 4, 5, 6, 7, 8, 9}) {
           JreDeflateParameters trueParameters = JreDeflateParameters.of(level, strategy, nowrap);
           final byte[] buffer = deflate(trueParameters);
-          ByteArrayInputStreamFactory factory =
-              new ByteArrayInputStreamFactory(buffer, true, false);
-          JreDeflateParameters divinedParameters = diviner.divineDeflateParameters(factory);
+          JreDeflateParameters divinedParameters =
+              diviner.divineDeflateParameters(new ByteArrayInputStreamFactory(buffer));
           Assert.assertNotNull(divinedParameters);
           // TODO(andrewhayden) make *CERTAIN 100%( that strategy doesn't matter for level < 4.
           if (strategy == 1 && level <= 3) {
             // Strategy 1 produces identical output at levels 1, 2 and 3.
-            Assert.assertEquals(JreDeflateParameters.of(level, 0, nowrap), divinedParameters);
+            Assert.assertEquals(
+                /*expected=*/ JreDeflateParameters.of(level, 0, nowrap),
+                /*actual=*/ divinedParameters);
           } else if (strategy == 2) {
             // All levels are the same with strategy 2.
             // TODO: Assert only one test gets done for this, should be the first level always.
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/PreDiffPlannerTest.java b/generator/src/test/java/com/google/archivepatcher/generator/PreDiffPlannerTest.java
index 9ba39e5..38b4c6b 100644
--- a/generator/src/test/java/com/google/archivepatcher/generator/PreDiffPlannerTest.java
+++ b/generator/src/test/java/com/google/archivepatcher/generator/PreDiffPlannerTest.java
@@ -435,11 +435,13 @@
     // the plan for the new archive should be empty as well.
     Assert.assertTrue(plan.getOldFileUncompressionPlan().isEmpty());
     Assert.assertTrue(plan.getNewFileUncompressionPlan().isEmpty());
-    checkRecommendation(plan, new QualifiedRecommendation(
-        findEntry(oldFile, ENTRY_A_STORED),
-        findEntry(newFile, ENTRY_A_LEVEL_6),
-        Recommendation.UNCOMPRESS_NEITHER,
-        RecommendationReason.UNSUITABLE));
+    checkRecommendation(
+        plan,
+        new QualifiedRecommendation(
+            findEntry(oldFile, ENTRY_A_STORED),
+            findEntry(newFile, ENTRY_A_LEVEL_6),
+            Recommendation.UNCOMPRESS_NEITHER,
+            RecommendationReason.DEFLATE_UNSUITABLE));
   }
 
   @Test
diff --git a/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/BsDiffTest.java b/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/BsDiffTest.java
index 2a7d7ae..16d74af 100644
--- a/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/BsDiffTest.java
+++ b/generator/src/test/java/com/google/archivepatcher/generator/bsdiff/BsDiffTest.java
@@ -15,17 +15,13 @@
 package com.google.archivepatcher.generator.bsdiff;
 
 import com.google.archivepatcher.generator.bsdiff.Matcher.NextMatch;
-import java.nio.charset.StandardCharsets;
-import java.util.Arrays;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -254,6 +250,7 @@
     Assert.assertArrayEquals(actualPatch, expectedPatch);
   }
 
+  @Test
   public void generatePatchOnRealCompiledBinaryTest() throws Exception {
     ByteArrayOutputStream out = new ByteArrayOutputStream();
     byte[] oldData = readTestData("minimalBlobA.bin");
diff --git a/shared/src/main/java/com/google/archivepatcher/shared/ByteArrayInputStreamFactory.java b/shared/src/main/java/com/google/archivepatcher/shared/ByteArrayInputStreamFactory.java
new file mode 100644
index 0000000..0df3c1c
--- /dev/null
+++ b/shared/src/main/java/com/google/archivepatcher/shared/ByteArrayInputStreamFactory.java
@@ -0,0 +1,36 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.archivepatcher.shared;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+
+/**
+ * A {@link MultiViewInputStreamFactory} which creates {@link ByteArrayInputStream}s based on the
+ * given {@code byte[]} in {@link #ByteArrayInputStreamFactory(byte[])}.
+ */
+public class ByteArrayInputStreamFactory implements MultiViewInputStreamFactory {
+
+  private final byte[] bytes;
+
+  public ByteArrayInputStreamFactory(byte[] bytes) {
+    this.bytes = bytes;
+  }
+
+  @Override
+  public ByteArrayInputStream newStream() throws IOException {
+    return new ByteArrayInputStream(bytes);
+  }
+}
diff --git a/shared/src/main/java/com/google/archivepatcher/shared/MultiViewInputStreamFactory.java b/shared/src/main/java/com/google/archivepatcher/shared/MultiViewInputStreamFactory.java
index f25dcc5..9b861b3 100644
--- a/shared/src/main/java/com/google/archivepatcher/shared/MultiViewInputStreamFactory.java
+++ b/shared/src/main/java/com/google/archivepatcher/shared/MultiViewInputStreamFactory.java
@@ -18,17 +18,17 @@
 import java.io.InputStream;
 
 /**
- * A factory that produces multiple independent but identical byte streams exposed via the
- * {@link InputStream} class.
- * @param <T> the type of {@link InputStream} that is produced
+ * A factory that produces multiple independent but identical byte streams exposed via the {@link
+ * InputStream} class.
  */
-public interface MultiViewInputStreamFactory<T extends InputStream> {
+public interface MultiViewInputStreamFactory {
   /**
    * Create and return a new {@link InputStream}. The returned stream is guaranteed to independently
    * produce the same byte sequence as any other stream obtained via a call to this method on the
    * same instance of this object.
+   *
    * @return the stream
    * @throws IOException if something goes wrong
    */
-  public T newStream() throws IOException;
+  public InputStream newStream() throws IOException;
 }
diff --git a/shared/src/main/java/com/google/archivepatcher/shared/RandomAccessFileInputStreamFactory.java b/shared/src/main/java/com/google/archivepatcher/shared/RandomAccessFileInputStreamFactory.java
index c153be8..0b5c89f 100644
--- a/shared/src/main/java/com/google/archivepatcher/shared/RandomAccessFileInputStreamFactory.java
+++ b/shared/src/main/java/com/google/archivepatcher/shared/RandomAccessFileInputStreamFactory.java
@@ -18,11 +18,10 @@
 import java.io.IOException;
 
 /**
- * An implementation of {@link MultiViewInputStreamFactory} that produces instances of
- * {@link RandomAccessFileInputStream}.
+ * An implementation of {@link MultiViewInputStreamFactory} that produces instances of {@link
+ * RandomAccessFileInputStream}.
  */
-public class RandomAccessFileInputStreamFactory
-    implements MultiViewInputStreamFactory<RandomAccessFileInputStream> {
+public class RandomAccessFileInputStreamFactory implements MultiViewInputStreamFactory {
 
   /**
    * Argument for {@link RandomAccessFileInputStream#RandomAccessFileInputStream(File, long, long)}.
diff --git a/shared/src/test/java/com/google/archivepatcher/shared/DefaultDeflateCompatibilityWindowTest.java b/shared/src/test/java/com/google/archivepatcher/shared/DefaultDeflateCompatibilityWindowTest.java
index ee764a3..ce77734 100644
--- a/shared/src/test/java/com/google/archivepatcher/shared/DefaultDeflateCompatibilityWindowTest.java
+++ b/shared/src/test/java/com/google/archivepatcher/shared/DefaultDeflateCompatibilityWindowTest.java
@@ -16,7 +16,6 @@
 
 import java.util.HashMap;
 import java.util.Map;
-
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -85,7 +84,7 @@
     // Manually scan for presence of the strategy-2 values, only set for compression level 1....
     Assert.assertTrue(mappings.containsKey(JreDeflateParameters.of(1, 2, true)));
     Assert.assertTrue(mappings.containsKey(JreDeflateParameters.of(1, 2, false)));
-    Assert.assertEquals(mappings.size(), 38);
+    Assert.assertEquals(38, mappings.size());
   }
 
   @Test