DO NOT MERGE - Merge pi-dev@5234907 into stage-aosp-master

Bug: 120848293
Change-Id: Ie622c515b59d59e9c73763dbbae6b56959f16f0e
diff --git a/Android.bp b/Android.bp
index 5f06ece..ec57ba7 100644
--- a/Android.bp
+++ b/Android.bp
@@ -40,6 +40,7 @@
         "src/dsp/cost.c",
         "src/dsp/cost_mips32.c",
         "src/dsp/cost_mips_dsp_r2.c",
+        "src/dsp/cost_neon.c",
         "src/dsp/cost_sse2.c",
         "src/dsp/cpu.c",
         "src/dsp/enc.c",
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000..be4e3c0
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1,7 @@
+# Default code reviewers picked from top 3 or more developers.
+# Please update this list if you find better candidates.
+jzern@google.com
+djsollen@google.com
+pascal.massimino@gmail.com
+# skal@google.com
+# libwebp-{en,de}code is used by external/{skia,skqp}
diff --git a/README b/README
index 4fa15b3..502a4c1 100644
--- a/README
+++ b/README
@@ -4,7 +4,7 @@
           \__\__/\____/\_____/__/ ____  ___
                 / _/ /    \    \ /  _ \/ _/
                /  \_/   / /   \ \   __/  \__
-               \____/____/\_____/_____/____/v1.0.1
+               \____/____/\_____/_____/____/v1.0.2
 
 Description:
 ============
@@ -136,6 +136,8 @@
 
 or through your favorite interface (like ccmake or cmake-qt-gui).
 
+Use option -DWEBP_UNICODE=ON for Unicode support on Windows (with chcp 65001).
+
 Finally, once installed, you can also use WebP in your CMake project by doing:
 
 find_package(WebP)
diff --git a/README.android b/README.android
index 9b1e839..57c508b 100644
--- a/README.android
+++ b/README.android
@@ -1,5 +1,5 @@
 URL: https://chromium.googlesource.com/webm/libwebp
-Version: v1.0.1
+Version: v1.0.2
 License: Google BSD like
 
 Local modifications:
diff --git a/README.version b/README.version
index 66740c8..467dfaf 100644
--- a/README.version
+++ b/README.version
@@ -1,3 +1,3 @@
-URL: https://storage.googleapis.com/downloads.webmproject.org/releases/webp/libwebp-1.0.1.tar.gz
-Version: 1.0.1
+URL: https://storage.googleapis.com/downloads.webmproject.org/releases/webp/libwebp-1.0.2.tar.gz
+Version: 1.0.2
 BugComponent: 20174
diff --git a/include/webp/decode.h b/include/webp/decode.h
index 95d31e7..ae8bfe8 100644
--- a/include/webp/decode.h
+++ b/include/webp/decode.h
@@ -42,6 +42,12 @@
 // This function will also validate the header, returning true on success,
 // false otherwise. '*width' and '*height' are only valid on successful return.
 // Pointers 'width' and 'height' can be passed NULL if deemed irrelevant.
+// Note: The following chunk sequences (before the raw VP8/VP8L data) are
+// considered valid by this function:
+// RIFF + VP8(L)
+// RIFF + VP8X + (optional chunks) + VP8(L)
+// ALPH + VP8 <-- Not a valid WebP format: only allowed for internal purpose.
+// VP8(L)     <-- Not a valid WebP format: only allowed for internal purpose.
 WEBP_EXTERN int WebPGetInfo(const uint8_t* data, size_t data_size,
                             int* width, int* height);
 
@@ -425,6 +431,12 @@
 // Returns VP8_STATUS_OK when the features are successfully retrieved. Returns
 // VP8_STATUS_NOT_ENOUGH_DATA when more data is needed to retrieve the
 // features from headers. Returns error in other cases.
+// Note: The following chunk sequences (before the raw VP8/VP8L data) are
+// considered valid by this function:
+// RIFF + VP8(L)
+// RIFF + VP8X + (optional chunks) + VP8(L)
+// ALPH + VP8 <-- Not a valid WebP format: only allowed for internal purpose.
+// VP8(L)     <-- Not a valid WebP format: only allowed for internal purpose.
 static WEBP_INLINE VP8StatusCode WebPGetFeatures(
     const uint8_t* data, size_t data_size,
     WebPBitstreamFeatures* features) {
diff --git a/src/dec/vp8i_dec.h b/src/dec/vp8i_dec.h
index e5e89df..2d7900a 100644
--- a/src/dec/vp8i_dec.h
+++ b/src/dec/vp8i_dec.h
@@ -32,7 +32,7 @@
 // version numbers
 #define DEC_MAJ_VERSION 1
 #define DEC_MIN_VERSION 0
-#define DEC_REV_VERSION 1
+#define DEC_REV_VERSION 2
 
 // YUV-cache parameters. Cache is 32-bytes wide (= one cacheline).
 // Constraints are: We need to store one 16x16 block of luma samples (y),
diff --git a/src/demux/demux.c b/src/demux/demux.c
index a69c65b..d8f7a40 100644
--- a/src/demux/demux.c
+++ b/src/demux/demux.c
@@ -25,7 +25,7 @@
 
 #define DMUX_MAJ_VERSION 1
 #define DMUX_MIN_VERSION 0
-#define DMUX_REV_VERSION 1
+#define DMUX_REV_VERSION 2
 
 typedef struct {
   size_t start_;        // start location of the data
diff --git a/src/dsp/cost.c b/src/dsp/cost.c
index 634ccc2..cc681cd 100644
--- a/src/dsp/cost.c
+++ b/src/dsp/cost.c
@@ -377,6 +377,7 @@
 extern void VP8EncDspCostInitMIPS32(void);
 extern void VP8EncDspCostInitMIPSdspR2(void);
 extern void VP8EncDspCostInitSSE2(void);
+extern void VP8EncDspCostInitNEON(void);
 
 WEBP_DSP_INIT_FUNC(VP8EncDspCostInit) {
   VP8GetResidualCost = GetResidualCost_C;
@@ -399,6 +400,11 @@
       VP8EncDspCostInitSSE2();
     }
 #endif
+#if defined(WEBP_USE_NEON)
+    if (VP8GetCPUInfo(kNEON)) {
+      VP8EncDspCostInitNEON();
+    }
+#endif
   }
 }
 
diff --git a/src/dsp/cost_neon.c b/src/dsp/cost_neon.c
new file mode 100644
index 0000000..8cc8ce5
--- /dev/null
+++ b/src/dsp/cost_neon.c
@@ -0,0 +1,122 @@
+// Copyright 2018 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// ARM NEON version of cost functions
+
+#include "src/dsp/dsp.h"
+
+#if defined(WEBP_USE_NEON)
+
+#include "src/dsp/neon.h"
+#include "src/enc/cost_enc.h"
+
+static const uint8_t position[16] = { 1, 2,  3,  4,  5,  6,  7,  8,
+                                      9, 10, 11, 12, 13, 14, 15, 16 };
+
+static void SetResidualCoeffs_NEON(const int16_t* const coeffs,
+                                   VP8Residual* const res) {
+  const int16x8_t minus_one = vdupq_n_s16(-1);
+  const int16x8_t coeffs_0 = vld1q_s16(coeffs);
+  const int16x8_t coeffs_1 = vld1q_s16(coeffs + 8);
+  const uint16x8_t eob_0 = vtstq_s16(coeffs_0, minus_one);
+  const uint16x8_t eob_1 = vtstq_s16(coeffs_1, minus_one);
+  const uint8x16_t eob = vcombine_u8(vqmovn_u16(eob_0), vqmovn_u16(eob_1));
+  const uint8x16_t masked = vandq_u8(eob, vld1q_u8(position));
+
+#ifdef __aarch64__
+  res->last = vmaxvq_u8(masked) - 1;
+#else
+  const uint8x8_t eob_8x8 = vmax_u8(vget_low_u8(masked), vget_high_u8(masked));
+  const uint16x8_t eob_16x8 = vmovl_u8(eob_8x8);
+  const uint16x4_t eob_16x4 =
+      vmax_u16(vget_low_u16(eob_16x8), vget_high_u16(eob_16x8));
+  const uint32x4_t eob_32x4 = vmovl_u16(eob_16x4);
+  uint32x2_t eob_32x2 =
+      vmax_u32(vget_low_u32(eob_32x4), vget_high_u32(eob_32x4));
+  eob_32x2 = vpmax_u32(eob_32x2, eob_32x2);
+
+  vst1_lane_s32(&res->last, vreinterpret_s32_u32(eob_32x2), 0);
+  --res->last;
+#endif  // __aarch64__
+
+  res->coeffs = coeffs;
+}
+
+static int GetResidualCost_NEON(int ctx0, const VP8Residual* const res) {
+  uint8_t levels[16], ctxs[16];
+  uint16_t abs_levels[16];
+  int n = res->first;
+  // should be prob[VP8EncBands[n]], but it's equivalent for n=0 or 1
+  const int p0 = res->prob[n][ctx0][0];
+  CostArrayPtr const costs = res->costs;
+  const uint16_t* t = costs[n][ctx0];
+  // bit_cost(1, p0) is already incorporated in t[] tables, but only if ctx != 0
+  // (as required by the syntax). For ctx0 == 0, we need to add it here or it'll
+  // be missing during the loop.
+  int cost = (ctx0 == 0) ? VP8BitCost(1, p0) : 0;
+
+  if (res->last < 0) {
+    return VP8BitCost(0, p0);
+  }
+
+  {   // precompute clamped levels and contexts, packed to 8b.
+    const uint8x16_t kCst2 = vdupq_n_u8(2);
+    const uint8x16_t kCst67 = vdupq_n_u8(MAX_VARIABLE_LEVEL);
+    const int16x8_t c0 = vld1q_s16(res->coeffs);
+    const int16x8_t c1 = vld1q_s16(res->coeffs + 8);
+    const uint16x8_t E0 = vreinterpretq_u16_s16(vabsq_s16(c0));
+    const uint16x8_t E1 = vreinterpretq_u16_s16(vabsq_s16(c1));
+    const uint8x16_t F = vcombine_u8(vqmovn_u16(E0), vqmovn_u16(E1));
+    const uint8x16_t G = vminq_u8(F, kCst2);   // context = 0,1,2
+    const uint8x16_t H = vminq_u8(F, kCst67);  // clamp_level in [0..67]
+
+    vst1q_u8(ctxs, G);
+    vst1q_u8(levels, H);
+
+    vst1q_u16(abs_levels, E0);
+    vst1q_u16(abs_levels + 8, E1);
+  }
+  for (; n < res->last; ++n) {
+    const int ctx = ctxs[n];
+    const int level = levels[n];
+    const int flevel = abs_levels[n];   // full level
+    cost += VP8LevelFixedCosts[flevel] + t[level];  // simplified VP8LevelCost()
+    t = costs[n + 1][ctx];
+  }
+  // Last coefficient is always non-zero
+  {
+    const int level = levels[n];
+    const int flevel = abs_levels[n];
+    assert(flevel != 0);
+    cost += VP8LevelFixedCosts[flevel] + t[level];
+    if (n < 15) {
+      const int b = VP8EncBands[n + 1];
+      const int ctx = ctxs[n];
+      const int last_p0 = res->prob[b][ctx][0];
+      cost += VP8BitCost(0, last_p0);
+    }
+  }
+  return cost;
+}
+
+//------------------------------------------------------------------------------
+// Entry point
+
+extern void VP8EncDspCostInitNEON(void);
+
+WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspCostInitNEON(void) {
+  VP8SetResidualCoeffs = SetResidualCoeffs_NEON;
+  VP8GetResidualCost = GetResidualCost_NEON;
+}
+
+#else  // !WEBP_USE_NEON
+
+WEBP_DSP_INIT_STUB(VP8EncDspCostInitNEON)
+
+#endif  // WEBP_USE_NEON
diff --git a/src/dsp/quant.h b/src/dsp/quant.h
new file mode 100644
index 0000000..5ba6f9c
--- /dev/null
+++ b/src/dsp/quant.h
@@ -0,0 +1,70 @@
+// Copyright 2018 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+
+#ifndef WEBP_DSP_QUANT_H_
+#define WEBP_DSP_QUANT_H_
+
+#include "src/dsp/dsp.h"
+#include "src/webp/types.h"
+
+#if defined(WEBP_USE_NEON) && !defined(WEBP_ANDROID_NEON) && \
+    !defined(WEBP_HAVE_NEON_RTCD)
+#include <arm_neon.h>
+
+#define IsFlat IsFlat_NEON
+
+static uint32x2_t horizontal_add_uint32x4(const uint32x4_t a) {
+  const uint64x2_t b = vpaddlq_u32(a);
+  return vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
+                  vreinterpret_u32_u64(vget_high_u64(b)));
+}
+
+static WEBP_INLINE int IsFlat(const int16_t* levels, int num_blocks,
+                              int thresh) {
+  const int16x8_t tst_ones = vdupq_n_s16(-1);
+  uint32x4_t sum = vdupq_n_u32(0);
+
+  for (int i = 0; i < num_blocks; ++i) {
+    // Set DC to zero.
+    const int16x8_t a_0 = vsetq_lane_s16(0, vld1q_s16(levels), 0);
+    const int16x8_t a_1 = vld1q_s16(levels + 8);
+
+    const uint16x8_t b_0 = vshrq_n_u16(vtstq_s16(a_0, tst_ones), 15);
+    const uint16x8_t b_1 = vshrq_n_u16(vtstq_s16(a_1, tst_ones), 15);
+
+    sum = vpadalq_u16(sum, b_0);
+    sum = vpadalq_u16(sum, b_1);
+
+    levels += 16;
+  }
+  return thresh >= (int32_t)vget_lane_u32(horizontal_add_uint32x4(sum), 0);
+}
+
+#else
+
+#define IsFlat IsFlat_C
+
+static WEBP_INLINE int IsFlat(const int16_t* levels, int num_blocks,
+                              int thresh) {
+  int score = 0;
+  while (num_blocks-- > 0) {      // TODO(skal): refine positional scoring?
+    int i;
+    for (i = 1; i < 16; ++i) {    // omit DC, we're only interested in AC
+      score += (levels[i] != 0);
+      if (score > thresh) return 0;
+    }
+    levels += 16;
+  }
+  return 1;
+}
+
+#endif  // defined(WEBP_USE_NEON) && !defined(WEBP_ANDROID_NEON) &&
+        // !defined(WEBP_HAVE_NEON_RTCD)
+
+#endif  // WEBP_DSP_QUANT_H_
diff --git a/src/enc/histogram_enc.c b/src/enc/histogram_enc.c
index 4e49e0a..8ac6fa8 100644
--- a/src/enc/histogram_enc.c
+++ b/src/enc/histogram_enc.c
@@ -165,7 +165,7 @@
 void VP8LHistogramSetClear(VP8LHistogramSet* const set) {
   int i;
   const int cache_bits = set->histograms[0]->palette_code_bits_;
-  const int size = set->size;
+  const int size = set->max_size;
   const size_t total_size = HistogramSetTotalSize(size, cache_bits);
   uint8_t* memory = (uint8_t*)set;
 
@@ -180,6 +180,20 @@
   }
 }
 
+// Removes the histogram 'i' from 'set' by setting it to NULL.
+static void HistogramSetRemoveHistogram(VP8LHistogramSet* const set, int i,
+                                        int* const num_used) {
+  assert(set->histograms[i] != NULL);
+  set->histograms[i] = NULL;
+  --*num_used;
+  // If we remove the last valid one, shrink until the next valid one.
+  if (i == set->size - 1) {
+    while (set->size >= 1 && set->histograms[set->size - 1] == NULL) {
+      --set->size;
+    }
+  }
+}
+
 // -----------------------------------------------------------------------------
 
 void VP8LHistogramAddSinglePixOrCopy(VP8LHistogram* const histo,
@@ -447,7 +461,9 @@
 static double HistogramAddThresh(const VP8LHistogram* const a,
                                  const VP8LHistogram* const b,
                                  double cost_threshold) {
-  double cost = -a->bit_cost_;
+  double cost;
+  assert(a != NULL && b != NULL);
+  cost = -a->bit_cost_;
   GetCombinedHistogramEntropy(a, b, cost_threshold, &cost);
   return cost;
 }
@@ -561,14 +577,17 @@
 }
 
 // Copies the histograms and computes its bit_cost.
-static void HistogramCopyAndAnalyze(
-    VP8LHistogramSet* const orig_histo, VP8LHistogramSet* const image_histo) {
-  int i;
-  const int histo_size = orig_histo->size;
+static const uint16_t kInvalidHistogramSymbol = (uint16_t)(-1);
+static void HistogramCopyAndAnalyze(VP8LHistogramSet* const orig_histo,
+                                    VP8LHistogramSet* const image_histo,
+                                    int* const num_used,
+                                    uint16_t* const histogram_symbols) {
+  int i, cluster_id;
+  int num_used_orig = *num_used;
   VP8LHistogram** const orig_histograms = orig_histo->histograms;
   VP8LHistogram** const histograms = image_histo->histograms;
-  image_histo->size = 0;
-  for (i = 0; i < histo_size; ++i) {
+  assert(image_histo->max_size == orig_histo->max_size);
+  for (cluster_id = 0, i = 0; i < orig_histo->max_size; ++i) {
     VP8LHistogram* const histo = orig_histograms[i];
     UpdateHistogramCost(histo);
 
@@ -576,10 +595,19 @@
     // with no information (when they are skipped because of LZ77).
     if (!histo->is_used_[0] && !histo->is_used_[1] && !histo->is_used_[2]
         && !histo->is_used_[3] && !histo->is_used_[4]) {
-      continue;
+      // The first histogram is always used. If an histogram is empty, we set
+      // its id to be the same as the previous one: this will improve
+      // compressibility for later LZ77.
+      assert(i > 0);
+      HistogramSetRemoveHistogram(image_histo, i, num_used);
+      HistogramSetRemoveHistogram(orig_histo, i, &num_used_orig);
+      histogram_symbols[i] = kInvalidHistogramSymbol;
+    } else {
+      // Copy histograms from orig_histo[] to image_histo[].
+      HistogramCopy(histo, histograms[i]);
+      histogram_symbols[i] = cluster_id++;
+      assert(cluster_id <= image_histo->max_size);
     }
-    // Copy histograms from orig_histo[] to image_histo[].
-    HistogramCopy(histo, histograms[image_histo->size++]);
   }
 }
 
@@ -596,29 +624,33 @@
 
   // Analyze the dominant (literal, red and blue) entropy costs.
   for (i = 0; i < histo_size; ++i) {
+    if (histograms[i] == NULL) continue;
     UpdateDominantCostRange(histograms[i], &cost_range);
   }
 
   // bin-hash histograms on three of the dominant (literal, red and blue)
   // symbol costs and store the resulting bin_id for each histogram.
   for (i = 0; i < histo_size; ++i) {
+    // bin_map[i] is not set to a special value as its use will later be guarded
+    // by another (histograms[i] == NULL).
+    if (histograms[i] == NULL) continue;
     bin_map[i] = GetHistoBinIndex(histograms[i], &cost_range, low_effort);
   }
 }
 
-// Compact image_histo[] by merging some histograms with same bin_id together if
-// it's advantageous.
+// Merges some histograms with same bin_id together if it's advantageous.
+// Sets the remaining histograms to NULL.
 static void HistogramCombineEntropyBin(VP8LHistogramSet* const image_histo,
+                                       int *num_used,
+                                       const uint16_t* const clusters,
+                                       uint16_t* const cluster_mappings,
                                        VP8LHistogram* cur_combo,
                                        const uint16_t* const bin_map,
-                                       int bin_map_size, int num_bins,
+                                       int num_bins,
                                        double combine_cost_factor,
                                        int low_effort) {
   VP8LHistogram** const histograms = image_histo->histograms;
   int idx;
-  // Work in-place: processed histograms are put at the beginning of
-  // image_histo[]. At the end, we just have to truncate the array.
-  int size = 0;
   struct {
     int16_t first;    // position of the histogram that accumulates all
                       // histograms with the same bin_id
@@ -631,16 +663,19 @@
     bin_info[idx].num_combine_failures = 0;
   }
 
-  for (idx = 0; idx < bin_map_size; ++idx) {
-    const int bin_id = bin_map[idx];
-    const int first = bin_info[bin_id].first;
-    assert(size <= idx);
+  // By default, a cluster matches itself.
+  for (idx = 0; idx < *num_used; ++idx) cluster_mappings[idx] = idx;
+  for (idx = 0; idx < image_histo->size; ++idx) {
+    int bin_id, first;
+    if (histograms[idx] == NULL) continue;
+    bin_id = bin_map[idx];
+    first = bin_info[bin_id].first;
     if (first == -1) {
-      // just move histogram #idx to its final position
-      histograms[size] = histograms[idx];
-      bin_info[bin_id].first = size++;
+      bin_info[bin_id].first = idx;
     } else if (low_effort) {
       HistogramAdd(histograms[idx], histograms[first], histograms[first]);
+      HistogramSetRemoveHistogram(image_histo, idx, num_used);
+      cluster_mappings[clusters[idx]] = clusters[first];
     } else {
       // try to merge #idx into #first (both share the same bin_id)
       const double bit_cost = histograms[idx]->bit_cost_;
@@ -663,19 +698,18 @@
             bin_info[bin_id].num_combine_failures >= max_combine_failures) {
           // move the (better) merged histogram to its final slot
           HistogramSwap(&cur_combo, &histograms[first]);
+          HistogramSetRemoveHistogram(image_histo, idx, num_used);
+          cluster_mappings[clusters[idx]] = clusters[first];
         } else {
-          histograms[size++] = histograms[idx];
           ++bin_info[bin_id].num_combine_failures;
         }
-      } else {
-        histograms[size++] = histograms[idx];
       }
     }
   }
-  image_histo->size = size;
   if (low_effort) {
     // for low_effort case, update the final cost when everything is merged
-    for (idx = 0; idx < size; ++idx) {
+    for (idx = 0; idx < image_histo->size; ++idx) {
+      if (histograms[idx] == NULL) continue;
       UpdateHistogramCost(histograms[idx]);
     }
   }
@@ -706,16 +740,9 @@
   int max_size;
 } HistoQueue;
 
-static int HistoQueueInit(HistoQueue* const histo_queue, const int max_index) {
+static int HistoQueueInit(HistoQueue* const histo_queue, const int max_size) {
   histo_queue->size = 0;
-  // max_index^2 for the queue size is safe. If you look at
-  // HistogramCombineGreedy, and imagine that UpdateQueueFront always pushes
-  // data to the queue, you insert at most:
-  // - max_index*(max_index-1)/2 (the first two for loops)
-  // - max_index - 1 in the last for loop at the first iteration of the while
-  //   loop, max_index - 2 at the second iteration ... therefore
-  //   max_index*(max_index-1)/2 overall too
-  histo_queue->max_size = max_index * max_index;
+  histo_queue->max_size = max_size;
   // We allocate max_size + 1 because the last element at index "size" is
   // used as temporary data (and it could be up to max_size).
   histo_queue->queue = (HistogramPair*)WebPSafeMalloc(
@@ -778,6 +805,8 @@
   const VP8LHistogram* h2;
   HistogramPair pair;
 
+  // Stop here if the queue is full.
+  if (histo_queue->size == histo_queue->max_size) return 0.;
   assert(threshold <= 0.);
   if (idx1 > idx2) {
     const int tmp = idx2;
@@ -794,8 +823,6 @@
   // Do not even consider the pair if it does not improve the entropy.
   if (pair.cost_diff >= threshold) return 0.;
 
-  // We cannot add more elements than the capacity.
-  assert(histo_queue->size < histo_queue->max_size);
   histo_queue->queue[histo_queue->size++] = pair;
   HistoQueueUpdateHead(histo_queue, &histo_queue->queue[histo_queue->size - 1]);
 
@@ -806,42 +833,43 @@
 
 // Combines histograms by continuously choosing the one with the highest cost
 // reduction.
-static int HistogramCombineGreedy(VP8LHistogramSet* const image_histo) {
+static int HistogramCombineGreedy(VP8LHistogramSet* const image_histo,
+                                  int* const num_used) {
   int ok = 0;
-  int image_histo_size = image_histo->size;
+  const int image_histo_size = image_histo->size;
   int i, j;
   VP8LHistogram** const histograms = image_histo->histograms;
-  // Indexes of remaining histograms.
-  int* const clusters =
-      (int*)WebPSafeMalloc(image_histo_size, sizeof(*clusters));
   // Priority queue of histogram pairs.
   HistoQueue histo_queue;
 
-  if (!HistoQueueInit(&histo_queue, image_histo_size) || clusters == NULL) {
+  // image_histo_size^2 for the queue size is safe. If you look at
+  // HistogramCombineGreedy, and imagine that UpdateQueueFront always pushes
+  // data to the queue, you insert at most:
+  // - image_histo_size*(image_histo_size-1)/2 (the first two for loops)
+  // - image_histo_size - 1 in the last for loop at the first iteration of
+  //   the while loop, image_histo_size - 2 at the second iteration ...
+  //   therefore image_histo_size*(image_histo_size-1)/2 overall too
+  if (!HistoQueueInit(&histo_queue, image_histo_size * image_histo_size)) {
     goto End;
   }
 
   for (i = 0; i < image_histo_size; ++i) {
-    // Initialize clusters indexes.
-    clusters[i] = i;
+    if (image_histo->histograms[i] == NULL) continue;
     for (j = i + 1; j < image_histo_size; ++j) {
-      // Initialize positions array.
+      // Initialize queue.
+      if (image_histo->histograms[j] == NULL) continue;
       HistoQueuePush(&histo_queue, histograms, i, j, 0.);
     }
   }
 
-  while (image_histo_size > 1 && histo_queue.size > 0) {
+  while (histo_queue.size > 0) {
     const int idx1 = histo_queue.queue[0].idx1;
     const int idx2 = histo_queue.queue[0].idx2;
     HistogramAdd(histograms[idx2], histograms[idx1], histograms[idx1]);
     histograms[idx1]->bit_cost_ = histo_queue.queue[0].cost_combo;
+
     // Remove merged histogram.
-    for (i = 0; i + 1 < image_histo_size; ++i) {
-      if (clusters[i] >= idx2) {
-        clusters[i] = clusters[i + 1];
-      }
-    }
-    --image_histo_size;
+    HistogramSetRemoveHistogram(image_histo, idx2, num_used);
 
     // Remove pairs intersecting the just combined best pair.
     for (i = 0; i < histo_queue.size;) {
@@ -856,24 +884,15 @@
     }
 
     // Push new pairs formed with combined histogram to the queue.
-    for (i = 0; i < image_histo_size; ++i) {
-      if (clusters[i] != idx1) {
-        HistoQueuePush(&histo_queue, histograms, idx1, clusters[i], 0.);
-      }
-    }
-  }
-  // Move remaining histograms to the beginning of the array.
-  for (i = 0; i < image_histo_size; ++i) {
-    if (i != clusters[i]) {  // swap the two histograms
-      HistogramSwap(&histograms[i], &histograms[clusters[i]]);
+    for (i = 0; i < image_histo->size; ++i) {
+      if (i == idx1 || image_histo->histograms[i] == NULL) continue;
+      HistoQueuePush(&histo_queue, image_histo->histograms, idx1, i, 0.);
     }
   }
 
-  image_histo->size = image_histo_size;
   ok = 1;
 
  End:
-  WebPSafeFree(clusters);
   HistoQueueClear(&histo_queue);
   return ok;
 }
@@ -881,47 +900,69 @@
 // Perform histogram aggregation using a stochastic approach.
 // 'do_greedy' is set to 1 if a greedy approach needs to be performed
 // afterwards, 0 otherwise.
+static int PairComparison(const void* idx1, const void* idx2) {
+  // To be used with bsearch: <0 when *idx1<*idx2, >0 if >, 0 when ==.
+  return (*(int*) idx1 - *(int*) idx2);
+}
 static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,
-                                      int min_cluster_size,
+                                      int* const num_used, int min_cluster_size,
                                       int* const do_greedy) {
-  int iter;
+  int j, iter;
   uint32_t seed = 1;
   int tries_with_no_success = 0;
-  int image_histo_size = image_histo->size;
-  const int outer_iters = image_histo_size;
+  const int outer_iters = *num_used;
   const int num_tries_no_success = outer_iters / 2;
   VP8LHistogram** const histograms = image_histo->histograms;
-  // Priority queue of histogram pairs. Its size of "kCostHeapSizeSqrt"^2
+  // Priority queue of histogram pairs. Its size of 'kHistoQueueSize'
   // impacts the quality of the compression and the speed: the smaller the
   // faster but the worse for the compression.
   HistoQueue histo_queue;
-  const int kHistoQueueSizeSqrt = 3;
+  const int kHistoQueueSize = 9;
   int ok = 0;
+  // mapping from an index in image_histo with no NULL histogram to the full
+  // blown image_histo.
+  int* mappings;
 
-  if (!HistoQueueInit(&histo_queue, kHistoQueueSizeSqrt)) {
+  if (*num_used < min_cluster_size) {
+    *do_greedy = 1;
+    return 1;
+  }
+
+  mappings = (int*) WebPSafeMalloc(*num_used, sizeof(*mappings));
+  if (mappings == NULL || !HistoQueueInit(&histo_queue, kHistoQueueSize)) {
     goto End;
   }
+  // Fill the initial mapping.
+  for (j = 0, iter = 0; iter < image_histo->size; ++iter) {
+    if (histograms[iter] == NULL) continue;
+    mappings[j++] = iter;
+  }
+  assert(j == *num_used);
+
   // Collapse similar histograms in 'image_histo'.
-  ++min_cluster_size;
-  for (iter = 0; iter < outer_iters && image_histo_size >= min_cluster_size &&
-                 ++tries_with_no_success < num_tries_no_success;
+  for (iter = 0;
+       iter < outer_iters && *num_used >= min_cluster_size &&
+           ++tries_with_no_success < num_tries_no_success;
        ++iter) {
+    int* mapping_index;
     double best_cost =
         (histo_queue.size == 0) ? 0. : histo_queue.queue[0].cost_diff;
     int best_idx1 = -1, best_idx2 = 1;
-    int j;
-    const uint32_t rand_range = (image_histo_size - 1) * image_histo_size;
-    // image_histo_size / 2 was chosen empirically. Less means faster but worse
+    const uint32_t rand_range = (*num_used - 1) * (*num_used);
+    // (*num_used) / 2 was chosen empirically. Less means faster but worse
     // compression.
-    const int num_tries = image_histo_size / 2;
+    const int num_tries = (*num_used) / 2;
 
-    for (j = 0; j < num_tries; ++j) {
+    // Pick random samples.
+    for (j = 0; *num_used >= 2 && j < num_tries; ++j) {
       double curr_cost;
       // Choose two different histograms at random and try to combine them.
       const uint32_t tmp = MyRand(&seed) % rand_range;
-      const uint32_t idx1 = tmp / (image_histo_size - 1);
-      uint32_t idx2 = tmp % (image_histo_size - 1);
+      uint32_t idx1 = tmp / (*num_used - 1);
+      uint32_t idx2 = tmp % (*num_used - 1);
       if (idx2 >= idx1) ++idx2;
+      idx1 = mappings[idx1];
+      idx2 = mappings[idx2];
 
       // Calculate cost reduction on combination.
       curr_cost =
@@ -934,18 +975,21 @@
     }
     if (histo_queue.size == 0) continue;
 
-    // Merge the two best histograms.
+    // Get the best histograms.
     best_idx1 = histo_queue.queue[0].idx1;
     best_idx2 = histo_queue.queue[0].idx2;
     assert(best_idx1 < best_idx2);
-    HistogramAddEval(histograms[best_idx1], histograms[best_idx2],
-                     histograms[best_idx1], 0);
-    // Swap the best_idx2 histogram with the last one (which is now unused).
-    --image_histo_size;
-    if (best_idx2 != image_histo_size) {
-      HistogramSwap(&histograms[image_histo_size], &histograms[best_idx2]);
-    }
-    histograms[image_histo_size] = NULL;
+    // Pop best_idx2 from mappings.
+    mapping_index = (int*) bsearch(&best_idx2, mappings, *num_used,
+                                   sizeof(best_idx2), &PairComparison);
+    assert(mapping_index != NULL);
+    memmove(mapping_index, mapping_index + 1, sizeof(*mapping_index) *
+        ((*num_used) - (mapping_index - mappings) - 1));
+    // Merge the histograms and remove best_idx2 from the queue.
+    HistogramAdd(histograms[best_idx2], histograms[best_idx1],
+                 histograms[best_idx1]);
+    histograms[best_idx1]->bit_cost_ = histo_queue.queue[0].cost_combo;
+    HistogramSetRemoveHistogram(image_histo, best_idx2, num_used);
     // Parse the queue and update each pair that deals with best_idx1,
     // best_idx2 or image_histo_size.
     for (j = 0; j < histo_queue.size;) {
@@ -968,12 +1012,6 @@
         p->idx2 = best_idx1;
         do_eval = 1;
       }
-      if (p->idx2 == image_histo_size) {
-        // No need to re-evaluate here as it does not involve a pair
-        // containing best_idx1 or best_idx2.
-        p->idx2 = best_idx2;
-      }
-      assert(p->idx2 < image_histo_size);
       // Make sure the index order is respected.
       if (p->idx1 > p->idx2) {
         const int tmp = p->idx2;
@@ -991,15 +1029,14 @@
       HistoQueueUpdateHead(&histo_queue, p);
       ++j;
     }
-
     tries_with_no_success = 0;
   }
-  image_histo->size = image_histo_size;
-  *do_greedy = (image_histo->size <= min_cluster_size);
+  *do_greedy = (*num_used <= min_cluster_size);
   ok = 1;
 
 End:
   HistoQueueClear(&histo_queue);
+  WebPSafeFree(mappings);
   return ok;
 }
 
@@ -1007,23 +1044,29 @@
 // Histogram refinement
 
 // Find the best 'out' histogram for each of the 'in' histograms.
+// At call-time, 'out' contains the histograms of the clusters.
 // Note: we assume that out[]->bit_cost_ is already up-to-date.
 static void HistogramRemap(const VP8LHistogramSet* const in,
-                           const VP8LHistogramSet* const out,
+                           VP8LHistogramSet* const out,
                            uint16_t* const symbols) {
   int i;
   VP8LHistogram** const in_histo = in->histograms;
   VP8LHistogram** const out_histo = out->histograms;
-  const int in_size = in->size;
+  const int in_size = out->max_size;
   const int out_size = out->size;
   if (out_size > 1) {
     for (i = 0; i < in_size; ++i) {
       int best_out = 0;
       double best_bits = MAX_COST;
       int k;
+      if (in_histo[i] == NULL) {
+        // Arbitrarily set to the previous value if unused to help future LZ77.
+        symbols[i] = symbols[i - 1];
+        continue;
+      }
       for (k = 0; k < out_size; ++k) {
-        const double cur_bits =
-            HistogramAddThresh(out_histo[k], in_histo[i], best_bits);
+        double cur_bits;
+        cur_bits = HistogramAddThresh(out_histo[k], in_histo[i], best_bits);
         if (k == 0 || cur_bits < best_bits) {
           best_bits = cur_bits;
           best_out = k;
@@ -1039,12 +1082,13 @@
   }
 
   // Recompute each out based on raw and symbols.
-  for (i = 0; i < out_size; ++i) {
-    HistogramClear(out_histo[i]);
-  }
+  VP8LHistogramSetClear(out);
+  out->size = out_size;
 
   for (i = 0; i < in_size; ++i) {
-    const int idx = symbols[i];
+    int idx;
+    if (in_histo[i] == NULL) continue;
+    idx = symbols[i];
     HistogramAdd(in_histo[i], out_histo[idx], out_histo[idx]);
   }
 }
@@ -1060,6 +1104,70 @@
   return combine_cost_factor;
 }
 
+// Given a HistogramSet 'set', the mapping of clusters 'cluster_mapping' and the
+// current assignment of the cells in 'symbols', merge the clusters and
+// assign the smallest possible clusters values.
+static void OptimizeHistogramSymbols(const VP8LHistogramSet* const set,
+                                     uint16_t* const cluster_mappings,
+                                     int num_clusters,
+                                     uint16_t* const cluster_mappings_tmp,
+                                     uint16_t* const symbols) {
+  int i, cluster_max;
+  int do_continue = 1;
+  // First, assign the lowest cluster to each pixel.
+  while (do_continue) {
+    do_continue = 0;
+    for (i = 0; i < num_clusters; ++i) {
+      int k;
+      k = cluster_mappings[i];
+      while (k != cluster_mappings[k]) {
+        cluster_mappings[k] = cluster_mappings[cluster_mappings[k]];
+        k = cluster_mappings[k];
+      }
+      if (k != cluster_mappings[i]) {
+        do_continue = 1;
+        cluster_mappings[i] = k;
+      }
+    }
+  }
+  // Create a mapping from a cluster id to its minimal version.
+  cluster_max = 0;
+  memset(cluster_mappings_tmp, 0,
+         set->max_size * sizeof(*cluster_mappings_tmp));
+  assert(cluster_mappings[0] == 0);
+  // Re-map the ids.
+  for (i = 0; i < set->max_size; ++i) {
+    int cluster;
+    if (symbols[i] == kInvalidHistogramSymbol) continue;
+    cluster = cluster_mappings[symbols[i]];
+    assert(symbols[i] < num_clusters);
+    if (cluster > 0 && cluster_mappings_tmp[cluster] == 0) {
+      ++cluster_max;
+      cluster_mappings_tmp[cluster] = cluster_max;
+    }
+    symbols[i] = cluster_mappings_tmp[cluster];
+  }
+
+  // Make sure all cluster values are used.
+  cluster_max = 0;
+  for (i = 0; i < set->max_size; ++i) {
+    if (symbols[i] == kInvalidHistogramSymbol) continue;
+    if (symbols[i] <= cluster_max) continue;
+    ++cluster_max;
+    assert(symbols[i] == cluster_max);
+  }
+}
+
+static void RemoveEmptyHistograms(VP8LHistogramSet* const image_histo) {
+  uint32_t size;
+  int i;
+  for (i = 0, size = 0; i < image_histo->size; ++i) {
+    if (image_histo->histograms[i] == NULL) continue;
+    image_histo->histograms[size++] = image_histo->histograms[i];
+  }
+  image_histo->size = size;
+}
+
 int VP8LGetHistoImageSymbols(int xsize, int ysize,
                              const VP8LBackwardRefs* const refs,
                              int quality, int low_effort,
@@ -1078,27 +1186,36 @@
   // maximum quality q==100 (to preserve the compression gains at that level).
   const int entropy_combine_num_bins = low_effort ? NUM_PARTITIONS : BIN_SIZE;
   int entropy_combine;
-
-  if (orig_histo == NULL) goto Error;
+  uint16_t* const map_tmp =
+      WebPSafeMalloc(2 * image_histo_raw_size, sizeof(map_tmp));
+  uint16_t* const cluster_mappings = map_tmp + image_histo_raw_size;
+  int num_used = image_histo_raw_size;
+  if (orig_histo == NULL || map_tmp == NULL) goto Error;
 
   // Construct the histograms from backward references.
   HistogramBuild(xsize, histo_bits, refs, orig_histo);
   // Copies the histograms and computes its bit_cost.
-  HistogramCopyAndAnalyze(orig_histo, image_histo);
+  // histogram_symbols is optimized
+  HistogramCopyAndAnalyze(orig_histo, image_histo, &num_used,
+                          histogram_symbols);
+
   entropy_combine =
-      (image_histo->size > entropy_combine_num_bins * 2) && (quality < 100);
+      (num_used > entropy_combine_num_bins * 2) && (quality < 100);
+
   if (entropy_combine) {
-    const int bin_map_size = image_histo->size;
-    // Reuse histogram_symbols storage. By definition, it's guaranteed to be ok.
-    uint16_t* const bin_map = histogram_symbols;
+    uint16_t* const bin_map = map_tmp;
     const double combine_cost_factor =
         GetCombineCostFactor(image_histo_raw_size, quality);
+    const uint32_t num_clusters = num_used;
 
     HistogramAnalyzeEntropyBin(image_histo, bin_map, low_effort);
     // Collapse histograms with similar entropy.
-    HistogramCombineEntropyBin(image_histo, tmp_histo, bin_map, bin_map_size,
+    HistogramCombineEntropyBin(image_histo, &num_used, histogram_symbols,
+                               cluster_mappings, tmp_histo, bin_map,
                                entropy_combine_num_bins, combine_cost_factor,
                                low_effort);
+    OptimizeHistogramSymbols(image_histo, cluster_mappings, num_clusters,
+                             map_tmp, histogram_symbols);
   }
 
   // Don't combine the histograms using stochastic and greedy heuristics for
@@ -1108,21 +1225,26 @@
     // cubic ramp between 1 and MAX_HISTO_GREEDY:
     const int threshold_size = (int)(1 + (x * x * x) * (MAX_HISTO_GREEDY - 1));
     int do_greedy;
-    if (!HistogramCombineStochastic(image_histo, threshold_size, &do_greedy)) {
+    if (!HistogramCombineStochastic(image_histo, &num_used, threshold_size,
+                                    &do_greedy)) {
       goto Error;
     }
-    if (do_greedy && !HistogramCombineGreedy(image_histo)) {
-      goto Error;
+    if (do_greedy) {
+      RemoveEmptyHistograms(image_histo);
+      if (!HistogramCombineGreedy(image_histo, &num_used)) {
+        goto Error;
+      }
     }
   }
 
-  // TODO(vrabaud): Optimize HistogramRemap for low-effort compression mode.
   // Find the optimal map from original histograms to the final ones.
+  RemoveEmptyHistograms(image_histo);
   HistogramRemap(orig_histo, image_histo, histogram_symbols);
 
   ok = 1;
 
  Error:
   VP8LFreeHistogramSet(orig_histo);
+  WebPSafeFree(map_tmp);
   return ok;
 }
diff --git a/src/enc/predictor_enc.c b/src/enc/predictor_enc.c
index f3715f5..802e896 100644
--- a/src/enc/predictor_enc.c
+++ b/src/enc/predictor_enc.c
@@ -177,12 +177,15 @@
   }
 }
 
+static WEBP_INLINE uint8_t NearLosslessDiff(uint8_t a, uint8_t b) {
+  return (uint8_t)((((int)(a) - (int)(b))) & 0xff);
+}
+
 // Quantize every component of the difference between the actual pixel value and
 // its prediction to a multiple of a quantization (a power of 2, not larger than
 // max_quantization which is a power of 2, smaller than max_diff). Take care if
 // value and predict have undergone subtract green, which means that red and
 // blue are represented as offsets from green.
-#define NEAR_LOSSLESS_DIFF(a, b) (uint8_t)((((int)(a) - (int)(b))) & 0xff)
 static uint32_t NearLossless(uint32_t value, uint32_t predict,
                              int max_quantization, int max_diff,
                              int used_subtract_green) {
@@ -199,7 +202,7 @@
   }
   if ((value >> 24) == 0 || (value >> 24) == 0xff) {
     // Preserve transparency of fully transparent or fully opaque pixels.
-    a = NEAR_LOSSLESS_DIFF(value >> 24, predict >> 24);
+    a = NearLosslessDiff(value >> 24, predict >> 24);
   } else {
     a = NearLosslessComponent(value >> 24, predict >> 24, 0xff, quantization);
   }
@@ -212,16 +215,15 @@
     // The amount by which green has been adjusted during quantization. It is
     // subtracted from red and blue for compensation, to avoid accumulating two
     // quantization errors in them.
-    green_diff = NEAR_LOSSLESS_DIFF(new_green, value >> 8);
+    green_diff = NearLosslessDiff(new_green, value >> 8);
   }
-  r = NearLosslessComponent(NEAR_LOSSLESS_DIFF(value >> 16, green_diff),
+  r = NearLosslessComponent(NearLosslessDiff(value >> 16, green_diff),
                             (predict >> 16) & 0xff, 0xff - new_green,
                             quantization);
-  b = NearLosslessComponent(NEAR_LOSSLESS_DIFF(value, green_diff),
+  b = NearLosslessComponent(NearLosslessDiff(value, green_diff),
                             predict & 0xff, 0xff - new_green, quantization);
   return ((uint32_t)a << 24) | ((uint32_t)r << 16) | ((uint32_t)g << 8) | b;
 }
-#undef NEAR_LOSSLESS_DIFF
 #endif  // (WEBP_NEAR_LOSSLESS == 1)
 
 // Stores the difference between the pixel and its prediction in "out".
diff --git a/src/enc/quant_enc.c b/src/enc/quant_enc.c
index 35bfaf2..03c682e 100644
--- a/src/enc/quant_enc.c
+++ b/src/enc/quant_enc.c
@@ -15,6 +15,7 @@
 #include <math.h>
 #include <stdlib.h>  // for abs()
 
+#include "src/dsp/quant.h"
 #include "src/enc/vp8i_enc.h"
 #include "src/enc/cost_enc.h"
 
@@ -977,19 +978,6 @@
   SwapPtr(&it->yuv_out_, &it->yuv_out2_);
 }
 
-static score_t IsFlat(const int16_t* levels, int num_blocks, score_t thresh) {
-  score_t score = 0;
-  while (num_blocks-- > 0) {      // TODO(skal): refine positional scoring?
-    int i;
-    for (i = 1; i < 16; ++i) {    // omit DC, we're only interested in AC
-      score += (levels[i] != 0);
-      if (score > thresh) return 0;
-    }
-    levels += 16;
-  }
-  return 1;
-}
-
 static void PickBestIntra16(VP8EncIterator* const it, VP8ModeScore* rd) {
   const int kNumBlocks = 16;
   VP8SegmentInfo* const dqm = &it->enc_->dqm_[it->mb_->segment_];
diff --git a/src/enc/vp8i_enc.h b/src/enc/vp8i_enc.h
index 92439fe..3a1967d 100644
--- a/src/enc/vp8i_enc.h
+++ b/src/enc/vp8i_enc.h
@@ -32,7 +32,7 @@
 // version numbers
 #define ENC_MAJ_VERSION 1
 #define ENC_MIN_VERSION 0
-#define ENC_REV_VERSION 1
+#define ENC_REV_VERSION 2
 
 enum { MAX_LF_LEVELS = 64,       // Maximum loop filter level
        MAX_VARIABLE_LEVEL = 67,  // last (inclusive) level with variable cost
diff --git a/src/enc/vp8l_enc.c b/src/enc/vp8l_enc.c
index 2713edc..2efd403 100644
--- a/src/enc/vp8l_enc.c
+++ b/src/enc/vp8l_enc.c
@@ -462,6 +462,7 @@
   for (i = 0; i < histogram_image_size; ++i) {
     const VP8LHistogram* const histo = histogram_image->histograms[i];
     HuffmanTreeCode* const codes = &huffman_codes[5 * i];
+    assert(histo != NULL);
     for (k = 0; k < 5; ++k) {
       const int num_symbols =
           (k == 0) ? VP8LHistogramNumCodes(histo->palette_code_bits_) :
diff --git a/src/mux/muxi.h b/src/mux/muxi.h
index df9f74c..3e9d8c4 100644
--- a/src/mux/muxi.h
+++ b/src/mux/muxi.h
@@ -29,7 +29,7 @@
 
 #define MUX_MAJ_VERSION 1
 #define MUX_MIN_VERSION 0
-#define MUX_REV_VERSION 1
+#define MUX_REV_VERSION 2
 
 // Chunk object.
 typedef struct WebPChunk WebPChunk;
diff --git a/src/utils/bit_writer_utils.c b/src/utils/bit_writer_utils.c
index f4f476c..7f83b4c 100644
--- a/src/utils/bit_writer_utils.c
+++ b/src/utils/bit_writer_utils.c
@@ -248,6 +248,7 @@
   dst->bits_ = src->bits_;
   dst->used_ = src->used_;
   dst->error_ = src->error_;
+  dst->cur_ = dst->buf_ + current_size;
   return 1;
 }
 
diff --git a/src/utils/utils.h b/src/utils/utils.h
index da97b5d..c7620f9 100644
--- a/src/utils/utils.h
+++ b/src/utils/utils.h
@@ -107,19 +107,6 @@
   PutLE16(data + 2, (int)(val >> 16));
 }
 
-// Returns 31 ^ clz(n) = log2(n). This is the default C-implementation, either
-// based on table or not. Can be used as fallback if clz() is not available.
-#define WEBP_NEED_LOG_TABLE_8BIT
-extern const uint8_t WebPLogTable8bit[256];
-static WEBP_INLINE int WebPLog2FloorC(uint32_t n) {
-  int log_value = 0;
-  while (n >= 256) {
-    log_value += 8;
-    n >>= 8;
-  }
-  return log_value + WebPLogTable8bit[n];
-}
-
 // Returns (int)floor(log2(n)). n must be > 0.
 // use GNU builtins where available.
 #if defined(__GNUC__) && \
@@ -138,6 +125,19 @@
   return first_set_bit;
 }
 #else   // default: use the C-version.
+// Returns 31 ^ clz(n) = log2(n). This is the default C-implementation, either
+// based on table or not. Can be used as fallback if clz() is not available.
+#define WEBP_NEED_LOG_TABLE_8BIT
+extern const uint8_t WebPLogTable8bit[256];
+static WEBP_INLINE int WebPLog2FloorC(uint32_t n) {
+  int log_value = 0;
+  while (n >= 256) {
+    log_value += 8;
+    n >>= 8;
+  }
+  return log_value + WebPLogTable8bit[n];
+}
+
 static WEBP_INLINE int BitsLog2Floor(uint32_t n) { return WebPLog2FloorC(n); }
 #endif
 
diff --git a/src/webp/decode.h b/src/webp/decode.h
index 95d31e7..ae8bfe8 100644
--- a/src/webp/decode.h
+++ b/src/webp/decode.h
@@ -42,6 +42,12 @@
 // This function will also validate the header, returning true on success,
 // false otherwise. '*width' and '*height' are only valid on successful return.
 // Pointers 'width' and 'height' can be passed NULL if deemed irrelevant.
+// Note: The following chunk sequences (before the raw VP8/VP8L data) are
+// considered valid by this function:
+// RIFF + VP8(L)
+// RIFF + VP8X + (optional chunks) + VP8(L)
+// ALPH + VP8 <-- Not a valid WebP format: only allowed for internal purpose.
+// VP8(L)     <-- Not a valid WebP format: only allowed for internal purpose.
 WEBP_EXTERN int WebPGetInfo(const uint8_t* data, size_t data_size,
                             int* width, int* height);
 
@@ -425,6 +431,12 @@
 // Returns VP8_STATUS_OK when the features are successfully retrieved. Returns
 // VP8_STATUS_NOT_ENOUGH_DATA when more data is needed to retrieve the
 // features from headers. Returns error in other cases.
+// Note: The following chunk sequences (before the raw VP8/VP8L data) are
+// considered valid by this function:
+// RIFF + VP8(L)
+// RIFF + VP8X + (optional chunks) + VP8(L)
+// ALPH + VP8 <-- Not a valid WebP format: only allowed for internal purpose.
+// VP8(L)     <-- Not a valid WebP format: only allowed for internal purpose.
 static WEBP_INLINE VP8StatusCode WebPGetFeatures(
     const uint8_t* data, size_t data_size,
     WebPBitstreamFeatures* features) {