Merge "Tell Soong about HOST_CROSS_*"
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index b64b53b..0267321 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -340,6 +340,7 @@
 LOCAL_CLANG_64:=
 LOCAL_INIT_RC_32:=
 LOCAL_INIT_RC_64:=
+LOCAL_JAVA_LANGUAGE_VERSION:=
 
 # Trim MAKEFILE_LIST so that $(call my-dir) doesn't need to
 # iterate over thousands of entries every time.
diff --git a/core/combo/javac.mk b/core/combo/javac.mk
index 70dae13..7f66ea8 100644
--- a/core/combo/javac.mk
+++ b/core/combo/javac.mk
@@ -14,7 +14,7 @@
 ANDROID_COMPILE_WITH_JACK := true
 endif
 
-common_jdk_flags := -source 1.7 -target 1.7 -Xmaxerrs 9999999
+common_jdk_flags := -Xmaxerrs 9999999
 
 # Use the indexer wrapper to index the codebase instead of the javac compiler
 ifeq ($(ALTERNATE_JAVAC),)
diff --git a/core/java_common.mk b/core/java_common.mk
index aee3193..34c67dc 100644
--- a/core/java_common.mk
+++ b/core/java_common.mk
@@ -1,6 +1,14 @@
 # Common to host and target Java modules.
 
 ###########################################################
+## Java version
+###########################################################
+ifeq (,$(LOCAL_JAVA_LANGUAGE_VERSION))
+  LOCAL_JAVA_LANGUAGE_VERSION := 1.7
+endif
+LOCAL_JAVACFLAGS += -source $(LOCAL_JAVA_LANGUAGE_VERSION) -target $(LOCAL_JAVA_LANGUAGE_VERSION)
+
+###########################################################
 ## .proto files: Compile proto files to .java
 ###########################################################
 proto_sources := $(filter %.proto,$(LOCAL_SRC_FILES))
@@ -265,6 +273,8 @@
 ifdef LOCAL_JACK_ENABLED
 ifdef need_compile_java
 
+LOCAL_JACK_FLAGS += -D jack.java.source.version=$(LOCAL_JAVA_LANGUAGE_VERSION)
+
 full_static_jack_libs := \
     $(foreach lib,$(LOCAL_STATIC_JAVA_LIBRARIES), \
       $(call intermediates-dir-for, \
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 1d338ee..25d2743 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -16,7 +16,9 @@
 
 from collections import deque, OrderedDict
 from hashlib import sha1
+import array
 import common
+import functools
 import heapq
 import itertools
 import multiprocessing
@@ -24,6 +26,7 @@
 import re
 import subprocess
 import threading
+import time
 import tempfile
 
 from rangelib import RangeSet
@@ -204,6 +207,23 @@
             " to " + str(self.tgt_ranges) + ">")
 
 
+@functools.total_ordering
+class HeapItem(object):
+  def __init__(self, item):
+    self.item = item
+    # Negate the score since python's heap is a min-heap and we want
+    # the maximum score.
+    self.score = -item.score
+  def clear(self):
+    self.item = None
+  def __bool__(self):
+    return self.item is None
+  def __eq__(self, other):
+    return self.score == other.score
+  def __le__(self, other):
+    return self.score <= other.score
+
+
 # BlockImageDiff works on two image objects.  An image object is
 # anything that provides the following attributes:
 #
@@ -251,6 +271,7 @@
     self.transfers = []
     self.src_basenames = {}
     self.src_numpatterns = {}
+    self._max_stashed_size = 0
 
     assert version in (1, 2, 3, 4)
 
@@ -268,6 +289,10 @@
     self.AssertPartition(src.care_map, src.file_map.values())
     self.AssertPartition(tgt.care_map, tgt.file_map.values())
 
+  @property
+  def max_stashed_size(self):
+    return self._max_stashed_size
+
   def Compute(self, prefix):
     # When looking for a source file to use as the diff input for a
     # target file, we try:
@@ -538,17 +563,17 @@
         f.write(i)
 
     if self.version >= 2:
-      max_stashed_size = max_stashed_blocks * self.tgt.blocksize
+      self._max_stashed_size = max_stashed_blocks * self.tgt.blocksize
       OPTIONS = common.OPTIONS
       if OPTIONS.cache_size is not None:
         max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
         print("max stashed blocks: %d  (%d bytes), "
               "limit: %d bytes (%.2f%%)\n" % (
-              max_stashed_blocks, max_stashed_size, max_allowed,
-              max_stashed_size * 100.0 / max_allowed))
+              max_stashed_blocks, self._max_stashed_size, max_allowed,
+              self._max_stashed_size * 100.0 / max_allowed))
       else:
         print("max stashed blocks: %d  (%d bytes), limit: <unknown>\n" % (
-              max_stashed_blocks, max_stashed_size))
+              max_stashed_blocks, self._max_stashed_size))
 
   def ReviseStashSize(self):
     print("Revising stash size...")
@@ -734,7 +759,7 @@
     # - we write every block we care about exactly once.
 
     # Start with no blocks having been touched yet.
-    touched = RangeSet()
+    touched = array.array("B", "\0" * self.tgt.total_blocks)
 
     # Imagine processing the transfers in order.
     for xf in self.transfers:
@@ -745,14 +770,22 @@
         for _, sr in xf.use_stash:
           x = x.subtract(sr)
 
-      assert not touched.overlaps(x)
-      # Check that the output blocks for this transfer haven't yet been touched.
-      assert not touched.overlaps(xf.tgt_ranges)
-      # Touch all the blocks written by this transfer.
-      touched = touched.union(xf.tgt_ranges)
+      for s, e in x:
+        for i in range(s, e):
+          assert touched[i] == 0
+
+      # Check that the output blocks for this transfer haven't yet
+      # been touched, and touch all the blocks written by this
+      # transfer.
+      for s, e in xf.tgt_ranges:
+        for i in range(s, e):
+          assert touched[i] == 0
+          touched[i] = 1
 
     # Check that we've written every target block.
-    assert touched == self.tgt.care_map
+    for s, e in self.tgt.care_map:
+      for i in range(s, e):
+        assert touched[i] == 1
 
   def ImproveVertexSequence(self):
     print("Improving vertex order...")
@@ -889,6 +922,7 @@
     for xf in self.transfers:
       xf.incoming = xf.goes_after.copy()
       xf.outgoing = xf.goes_before.copy()
+      xf.score = sum(xf.outgoing.values()) - sum(xf.incoming.values())
 
     # We use an OrderedDict instead of just a set so that the output
     # is repeatable; otherwise it would depend on the hash values of
@@ -899,52 +933,67 @@
     s1 = deque()  # the left side of the sequence, built from left to right
     s2 = deque()  # the right side of the sequence, built from right to left
 
-    while G:
+    heap = []
+    for xf in self.transfers:
+      xf.heap_item = HeapItem(xf)
+      heap.append(xf.heap_item)
+    heapq.heapify(heap)
 
+    sinks = set(u for u in G if not u.outgoing)
+    sources = set(u for u in G if not u.incoming)
+
+    def adjust_score(iu, delta):
+      iu.score += delta
+      iu.heap_item.clear()
+      iu.heap_item = HeapItem(iu)
+      heapq.heappush(heap, iu.heap_item)
+
+    while G:
       # Put all sinks at the end of the sequence.
-      while True:
-        sinks = [u for u in G if not u.outgoing]
-        if not sinks:
-          break
+      while sinks:
+        new_sinks = set()
         for u in sinks:
+          if u not in G: continue
           s2.appendleft(u)
           del G[u]
           for iu in u.incoming:
-            del iu.outgoing[u]
+            adjust_score(iu, -iu.outgoing.pop(u))
+            if not iu.outgoing: new_sinks.add(iu)
+        sinks = new_sinks
 
       # Put all the sources at the beginning of the sequence.
-      while True:
-        sources = [u for u in G if not u.incoming]
-        if not sources:
-          break
+      while sources:
+        new_sources = set()
         for u in sources:
+          if u not in G: continue
           s1.append(u)
           del G[u]
           for iu in u.outgoing:
-            del iu.incoming[u]
+            adjust_score(iu, +iu.incoming.pop(u))
+            if not iu.incoming: new_sources.add(iu)
+        sources = new_sources
 
-      if not G:
-        break
+      if not G: break
 
       # Find the "best" vertex to put next.  "Best" is the one that
       # maximizes the net difference in source blocks saved we get by
       # pretending it's a source rather than a sink.
 
-      max_d = None
-      best_u = None
-      for u in G:
-        d = sum(u.outgoing.values()) - sum(u.incoming.values())
-        if best_u is None or d > max_d:
-          max_d = d
-          best_u = u
+      while True:
+        u = heapq.heappop(heap)
+        if u and u.item in G:
+          u = u.item
+          break
 
-      u = best_u
       s1.append(u)
       del G[u]
       for iu in u.outgoing:
-        del iu.incoming[u]
+        adjust_score(iu, +iu.incoming.pop(u))
+        if not iu.incoming: sources.add(iu)
+
       for iu in u.incoming:
-        del iu.outgoing[u]
+        adjust_score(iu, -iu.outgoing.pop(u))
+        if not iu.outgoing: sinks.add(iu)
 
     # Now record the sequence in the 'order' field of each transfer,
     # and by rearranging self.transfers to be in the chosen sequence.
@@ -960,10 +1009,38 @@
 
   def GenerateDigraph(self):
     print("Generating digraph...")
+
+    # Each item of source_ranges will be:
+    #   - None, if that block is not used as a source,
+    #   - a transfer, if one transfer uses it as a source, or
+    #   - a set of transfers.
+    source_ranges = []
+    for b in self.transfers:
+      for s, e in b.src_ranges:
+        if e > len(source_ranges):
+          source_ranges.extend([None] * (e-len(source_ranges)))
+        for i in range(s, e):
+          if source_ranges[i] is None:
+            source_ranges[i] = b
+          else:
+            if not isinstance(source_ranges[i], set):
+              source_ranges[i] = set([source_ranges[i]])
+            source_ranges[i].add(b)
+
     for a in self.transfers:
-      for b in self.transfers:
-        if a is b:
-          continue
+      intersections = set()
+      for s, e in a.tgt_ranges:
+        for i in range(s, e):
+          if i >= len(source_ranges): break
+          b = source_ranges[i]
+          if b is not None:
+            if isinstance(b, set):
+              intersections.update(b)
+            else:
+              intersections.add(b)
+
+      for b in intersections:
+        if a is b: continue
 
         # If the blocks written by A are read by B, then B needs to go before A.
         i = a.tgt_ranges.intersect(b.src_ranges)
@@ -1092,6 +1169,7 @@
     """Assert that all the RangeSets in 'seq' form a partition of the
     'total' RangeSet (ie, they are nonintersecting and their union
     equals 'total')."""
+
     so_far = RangeSet()
     for i in seq:
       assert not so_far.overlaps(i)
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 95aeb62..403c67d 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -30,7 +30,6 @@
 import zipfile
 
 import blockimgdiff
-import rangelib
 
 from hashlib import sha1 as sha1
 
@@ -1255,6 +1254,7 @@
     OPTIONS.tempfiles.append(tmpdir)
     self.path = os.path.join(tmpdir, partition)
     b.Compute(self.path)
+    self._required_cache = b.max_stashed_size
 
     if src is None:
       _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
@@ -1262,6 +1262,10 @@
       _, self.device = GetTypeAndDevice("/" + partition,
                                         OPTIONS.source_info_dict)
 
+  @property
+  def required_cache(self):
+    return self._required_cache
+
   def WriteScript(self, script, output_zip, progress=None):
     if not self.src:
       # write the output unconditionally
diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py
index d923cc8..f57360a 100644
--- a/tools/releasetools/edify_generator.py
+++ b/tools/releasetools/edify_generator.py
@@ -23,6 +23,7 @@
   def __init__(self, version, info, fstab=None):
     self.script = []
     self.mounts = set()
+    self._required_cache = 0
     self.version = version
     self.info = info
     if fstab is None:
@@ -38,6 +39,11 @@
     x.mounts = self.mounts
     return x
 
+  @property
+  def required_cache(self):
+    """Return the minimum cache size to apply the update."""
+    return self._required_cache
+
   @staticmethod
   def WordWrap(cmd, linelen=80):
     """'cmd' should be a function call with null characters after each
@@ -171,6 +177,7 @@
   def CacheFreeSpaceCheck(self, amount):
     """Check that there's at least 'amount' space that can be made
     available on /cache."""
+    self._required_cache = max(self._required_cache, amount)
     self.script.append(('apply_patch_space(%d) || abort("Not enough free space '
                         'on /cache to apply patches.");') % (amount,))
 
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index ed300a7..bea33a3 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -542,6 +542,8 @@
   has_recovery_patch = HasRecoveryPatch(input_zip)
   block_based = OPTIONS.block_based and has_recovery_patch
 
+  metadata["ota-type"] = "BLOCK" if block_based else "FILE"
+
   if not OPTIONS.omit_prereq:
     ts = GetBuildProp("ro.build.date.utc", OPTIONS.info_dict)
     ts_text = GetBuildProp("ro.build.date", OPTIONS.info_dict)
@@ -697,6 +699,8 @@
 endif;
 """ % bcb_dev)
   script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary)
+
+  metadata["ota-required-cache"] = str(script.required_cache)
   WriteMetadata(metadata, output_zip)
 
 
@@ -773,6 +777,7 @@
                                    OPTIONS.source_info_dict),
       "post-timestamp": GetBuildProp("ro.build.date.utc",
                                      OPTIONS.target_info_dict),
+      "ota-type": "BLOCK",
   }
 
   device_specific = common.DeviceSpecificParams(
@@ -815,7 +820,7 @@
   # Check first block of system partition for remount R/W only if
   # disk type is ext4
   system_partition = OPTIONS.source_info_dict["fstab"]["/system"]
-  check_first_block = system_partition.fs_type=="ext4"
+  check_first_block = system_partition.fs_type == "ext4"
   system_diff = common.BlockDifference("system", system_tgt, system_src,
                                        check_first_block,
                                        version=blockimgdiff_version)
@@ -831,7 +836,7 @@
     # Check first block of vendor partition for remount R/W only if
     # disk type is ext4
     vendor_partition = OPTIONS.source_info_dict["fstab"]["/vendor"]
-    check_first_block = vendor_partition.fs_type=="ext4"
+    check_first_block = vendor_partition.fs_type == "ext4"
     vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src,
                                          check_first_block,
                                          version=blockimgdiff_version)
@@ -910,6 +915,13 @@
           GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict),
           GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict))
 
+  # Check the required cache size (i.e. stashed blocks).
+  size = []
+  if system_diff:
+    size.append(system_diff.required_cache)
+  if vendor_diff:
+    size.append(vendor_diff.required_cache)
+
   if updating_boot:
     boot_type, boot_device = common.GetTypeAndDevice(
         "/boot", OPTIONS.source_info_dict)
@@ -930,6 +942,10 @@
                         (boot_type, boot_device,
                          source_boot.size, source_boot.sha1,
                          target_boot.size, target_boot.sha1))
+      size.append(target_boot.size)
+
+  if size:
+    script.CacheFreeSpaceCheck(max(size))
 
   device_specific.IncrementalOTA_VerifyEnd()
 
@@ -1003,6 +1019,7 @@
 
   script.SetProgress(1)
   script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary)
+  metadata["ota-required-cache"] = str(script.required_cache)
   WriteMetadata(metadata, output_zip)
 
 
@@ -1076,6 +1093,7 @@
 
   script.SetProgress(1.0)
   script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary)
+  metadata["ota-required-cache"] = str(script.required_cache)
   WriteMetadata(metadata, output_zip)
 
 
@@ -1119,6 +1137,8 @@
       "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict,
                                    OPTIONS.info_dict),
       "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict),
+      "ota-required-cache": "0",
+      "ota-type": "AB",
   }
 
   if source_file is not None:
@@ -1374,6 +1394,7 @@
                                    OPTIONS.source_info_dict),
       "post-timestamp": GetBuildProp("ro.build.date.utc",
                                      OPTIONS.target_info_dict),
+      "ota-type": "FILE",
   }
 
   device_specific = common.DeviceSpecificParams(
@@ -1487,6 +1508,13 @@
   if vendor_diff:
     so_far += vendor_diff.EmitVerification(script)
 
+  size = []
+  if system_diff.patch_list:
+    size.append(system_diff.largest_source_size)
+  if vendor_diff:
+    if vendor_diff.patch_list:
+      size.append(vendor_diff.largest_source_size)
+
   if updating_boot:
     d = common.Difference(target_boot, source_boot)
     _, _, d = d.ComputePatch()
@@ -1503,14 +1531,9 @@
                        source_boot.size, source_boot.sha1,
                        target_boot.size, target_boot.sha1))
     so_far += source_boot.size
+    size.append(target_boot.size)
 
-  size = []
-  if system_diff.patch_list:
-    size.append(system_diff.largest_source_size)
-  if vendor_diff:
-    if vendor_diff.patch_list:
-      size.append(vendor_diff.largest_source_size)
-  if size or updating_recovery or updating_boot:
+  if size:
     script.CacheFreeSpaceCheck(max(size))
 
   device_specific.IncrementalOTA_VerifyEnd()
@@ -1723,6 +1746,7 @@
     vendor_diff.EmitExplicitTargetVerification(script)
   script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary)
 
+  metadata["ota-required-cache"] = str(script.required_cache)
   WriteMetadata(metadata, output_zip)
 
 
diff --git a/tools/signapk/src/com/android/signapk/SignApk.java b/tools/signapk/src/com/android/signapk/SignApk.java
index 8f40220..4d6744a 100644
--- a/tools/signapk/src/com/android/signapk/SignApk.java
+++ b/tools/signapk/src/com/android/signapk/SignApk.java
@@ -722,9 +722,6 @@
 
                 int hash = getDigestAlgorithm(publicKey, minSdkVersion);
 
-                // Assume the certificate is valid for at least an hour.
-                long timestamp = publicKey.getNotBefore().getTime() + 3600L * 1000;
-
                 Manifest manifest = addDigestsToManifest(inputJar, hash);
                 copyFiles(manifest, inputJar, outputJar, timestamp, 0);
                 addOtacert(outputJar, publicKeyFile, timestamp, manifest, hash);