Add BlobCache.

Change-Id: Ie4c6fcb9fe5b858779030a0da7b639cb99de74c8
diff --git a/new3d/src/com/android/gallery3d/data/BlobCache.java b/new3d/src/com/android/gallery3d/data/BlobCache.java
new file mode 100644
index 0000000..38a8985
--- /dev/null
+++ b/new3d/src/com/android/gallery3d/data/BlobCache.java
@@ -0,0 +1,612 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This is an on-disk cache which maps a 64-bits key to a byte array.
+//
+// It consists of three files: one index file and two data files. One of the
+// data files is "active", and the other is "inactive". New entries are
+// appended into the active region until it reaches the size limit. At that
+// point the active file and the inactive file are swapped, and the new active
+// file is truncated to empty (and the index for that file is also cleared).
+// The index is a hash table with linear probing. When the load factor reaches
+// 0.5, it does the same thing like when the size limit is reached.
+//
+// The index file format: (all numbers are stored in little-endian)
+// [0]  Magic number: 0xB3273030
+// [4]  MaxEntries: Max number of hash entries per region.
+// [8]  MaxBytes: Max number of data bytes per region (including header).
+// [12] ActiveRegion: The active growing region: 0 or 1.
+// [16] ActiveEntries: The number of hash entries used in the active region.
+// [20] ActiveBytes: The number of data bytes used in the active region.
+// [24] Reserved (should be 0).
+// [28] Checksum of [0..28).
+// [32] Hash entries for region 0. The size is X = (12 * MaxEntries bytes).
+// [32 + X] Hash entries for region 1. The size is also X.
+//
+// Each hash entry is 12 bytes: 8 bytes key and 4 bytes offset into the data
+// file. The offset is 0 when the slot is free. Note that 0 is a valid value
+// for key. The keys are used directly as index into a hash table, so they
+// should be suitably distributed.
+//
+// Each data file stores data for one region. The data file is concatenated
+// blobs followed by the magic number 0xBD248510.
+//
+// The blob format:
+// [0]  Key of this blob
+// [8]  Checksum of this blob
+// [12] Offset of this blob
+// [16] Length of this blob (not including header)
+// [20] Blob
+//
+// Below are the interface for BlobCache. The instance of this class does not
+// support concurrent use by multiple threads.
+//
+// public BlobCache(String path, int maxEntries, int maxBytes, boolean reset) throws IOException;
+// public void insert(long key, byte[] data) throws IOException;
+// public byte[] lookup(long key) throws IOException;
+// public void lookup(LookupRequest req) throws IOException;
+// public void close();
+// public void syncIndex();
+// public void syncAll();
+//
+package com.android.gallery3d.data;
+
+import android.util.Log;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteOrder;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.zip.Adler32;
+
+public class BlobCache {
+    private static final String TAG = "BlobCache";
+
+    private static final int MAGIC_INDEX_FILE = 0xB3273030;
+    private static final int MAGIC_DATA_FILE = 0xBD248510;
+
+    // index header offset
+    private static final int IH_MAGIC = 0;
+    private static final int IH_MAX_ENTRIES = 4;
+    private static final int IH_MAX_BYTES = 8;
+    private static final int IH_ACTIVE_REGION = 12;
+    private static final int IH_ACTIVE_ENTRIES = 16;
+    private static final int IH_ACTIVE_BYTES = 20;
+    private static final int IH_CHECKSUM = 28;
+    private static final int INDEX_HEADER_SIZE = 32;
+
+    private static final int DATA_HEADER_SIZE = 4;
+
+    // blob header offset
+    private static final int BH_KEY = 0;
+    private static final int BH_CHECKSUM = 8;
+    private static final int BH_OFFSET = 12;
+    private static final int BH_LENGTH = 16;
+    private static final int BLOB_HEADER_SIZE = 20;
+
+    private RandomAccessFile mIndexFile;
+    private RandomAccessFile mDataFile0;
+    private RandomAccessFile mDataFile1;
+    private FileChannel mIndexChannel;
+    private MappedByteBuffer mIndexBuffer;
+
+    private int mMaxEntries;
+    private int mMaxBytes;
+    private int mActiveRegion;
+    private int mActiveEntries;
+    private int mActiveBytes;
+
+    private RandomAccessFile mActiveDataFile;
+    private RandomAccessFile mInactiveDataFile;
+    private int mActiveHashStart;
+    private int mInactiveHashStart;
+    private byte[] mIndexHeader = new byte[INDEX_HEADER_SIZE];
+    private byte[] mBlobHeader = new byte[BLOB_HEADER_SIZE];
+    private Adler32 mAdler32 = new Adler32();
+
+    // Creates the cache. Three files will be created:
+    // path + ".idx", path + ".0", and path + ".1"
+    // The ".0" file and the ".1" file each stores data for a region. Each of
+    // them can grow to the size specified by maxBytes. The maxEntries parameter
+    // specifies the maximum number of entries each region can have. If the
+    // "reset" parameter is true, the cache will be cleared before use.
+    public BlobCache(String path, int maxEntries, int maxBytes, boolean reset)
+            throws IOException {
+        mIndexFile = new RandomAccessFile(path + ".idx", "rw");
+        mDataFile0 = new RandomAccessFile(path + ".0", "rw");
+        mDataFile1 = new RandomAccessFile(path + ".1", "rw");
+
+        if (!reset && loadIndex()) {
+            return;
+        }
+
+        resetCache(maxEntries, maxBytes);
+
+        if (!loadIndex()) {
+            closeAll();
+            throw new IOException("unable to load index");
+        }
+    }
+
+    // Close the cache. All resources are released. No other method should be
+    // called after this is called.
+    public void close() {
+        syncAll();
+        closeAll();
+    }
+
+    private void closeAll() {
+        closeSilently(mIndexChannel);
+        closeSilently(mIndexFile);
+        closeSilently(mDataFile0);
+        closeSilently(mDataFile1);
+    }
+
+    // Returns true if loading index is successful. After this method is called,
+    // mIndexHeader and index header in file should be kept sync.
+    private boolean loadIndex() {
+        try {
+            mIndexFile.seek(0);
+            mDataFile0.seek(0);
+            mDataFile1.seek(0);
+
+            byte[] buf = mIndexHeader;
+            if (mIndexFile.read(buf) != INDEX_HEADER_SIZE) {
+                Log.w(TAG, "cannot read header");
+                return false;
+            }
+
+            if (readInt(buf, IH_MAGIC) != MAGIC_INDEX_FILE) {
+                Log.w(TAG, "cannot read header magic");
+                return false;
+            }
+
+            mMaxEntries = readInt(buf, IH_MAX_ENTRIES);
+            mMaxBytes = readInt(buf, IH_MAX_BYTES);
+            mActiveRegion = readInt(buf, IH_ACTIVE_REGION);
+            mActiveEntries = readInt(buf, IH_ACTIVE_ENTRIES);
+            mActiveBytes = readInt(buf, IH_ACTIVE_BYTES);
+
+            int sum = readInt(buf, IH_CHECKSUM);
+            if (checkSum(buf, 0, IH_CHECKSUM) != sum) {
+                Log.w(TAG, "header checksum does not match");
+                return false;
+            }
+
+            // Sanity check
+            if (mMaxEntries <= 0) {
+                Log.w(TAG, "invalid max entries");
+                return false;
+            }
+            if (mMaxBytes <= 0) {
+                Log.w(TAG, "invalid max bytes");
+                return false;
+            }
+            if (mActiveRegion != 0 && mActiveRegion != 1) {
+                Log.w(TAG, "invalid active region");
+                return false;
+            }
+            if (mActiveEntries < 0 || mActiveEntries > mMaxEntries) {
+                Log.w(TAG, "invalid active entries");
+                return false;
+            }
+            if (mActiveBytes < DATA_HEADER_SIZE || mActiveBytes > mMaxBytes) {
+                Log.w(TAG, "invalid active bytes");
+                return false;
+            }
+
+            // Make sure data file has magic
+            byte[] magic = new byte[4];
+            if (mDataFile0.read(magic) != 4) {
+                Log.w(TAG, "cannot read data file magic");
+                return false;
+            }
+            if (readInt(magic, 0) != MAGIC_DATA_FILE) {
+                Log.w(TAG, "invalid data file magic");
+                return false;
+            }
+            if (mDataFile1.read(magic) != 4) {
+                Log.w(TAG, "cannot read data file magic");
+                return false;
+            }
+            if (readInt(magic, 0) != MAGIC_DATA_FILE) {
+                Log.w(TAG, "invalid data file magic");
+                return false;
+            }
+
+            // Map index file to memory
+            mIndexChannel = mIndexFile.getChannel();
+            mIndexBuffer = mIndexChannel.map(FileChannel.MapMode.READ_WRITE,
+                    0, mIndexFile.length());
+            mIndexBuffer.order(ByteOrder.LITTLE_ENDIAN);
+
+            setActiveVariables();
+            return true;
+        } catch (IOException ex) {
+            Log.e(TAG, "loadIndex failed.", ex);
+            return false;
+        }
+    }
+
+    private void setActiveVariables() throws IOException {
+        mActiveDataFile = (mActiveRegion == 0) ? mDataFile0 : mDataFile1;
+        mInactiveDataFile = (mActiveRegion == 1) ? mDataFile0 : mDataFile1;
+        mActiveDataFile.setLength(mActiveBytes);
+        mActiveDataFile.seek(mActiveBytes);
+
+        mActiveHashStart = INDEX_HEADER_SIZE;
+        mInactiveHashStart = INDEX_HEADER_SIZE;
+
+        if (mActiveRegion == 0) {
+            mInactiveHashStart += mMaxEntries * 12;
+        } else {
+            mActiveHashStart += mMaxEntries * 12;
+        }
+    }
+
+    private void resetCache(int maxEntries, int maxBytes) throws IOException {
+        mIndexFile.setLength(0);  // truncate to zero the index
+        mIndexFile.setLength(INDEX_HEADER_SIZE + maxEntries * 12 * 2);
+        byte[] buf = mIndexHeader;
+        writeInt(buf, IH_MAGIC, MAGIC_INDEX_FILE);
+        writeInt(buf, IH_MAX_ENTRIES, maxEntries);
+        writeInt(buf, IH_MAX_BYTES, maxBytes);
+        writeInt(buf, IH_ACTIVE_REGION, 0);
+        writeInt(buf, IH_ACTIVE_ENTRIES, 0);
+        writeInt(buf, IH_ACTIVE_BYTES, DATA_HEADER_SIZE);
+        writeInt(buf, IH_CHECKSUM, checkSum(buf, 0, IH_CHECKSUM));
+        mIndexFile.write(buf);
+
+        mDataFile0.setLength(0);
+        mDataFile1.setLength(0);
+        writeInt(buf, 0, MAGIC_DATA_FILE);
+        mDataFile0.write(buf, 0, 4);
+        mDataFile1.write(buf, 0, 4);
+    }
+
+    // Flip the active region and the inactive region.
+    private void flipRegion() throws IOException {
+        mActiveRegion = 1 - mActiveRegion;
+        mActiveEntries = 0;
+        mActiveBytes = DATA_HEADER_SIZE;
+
+        writeInt(mIndexHeader, IH_ACTIVE_REGION, mActiveRegion);
+        writeInt(mIndexHeader, IH_ACTIVE_ENTRIES, mActiveEntries);
+        writeInt(mIndexHeader, IH_ACTIVE_BYTES, mActiveBytes);
+        updateIndexHeader();
+
+        setActiveVariables();
+        clearHash(mActiveHashStart);
+        syncIndex();
+    }
+
+    // Sync mIndexHeader to the index file.
+    private void updateIndexHeader() {
+        writeInt(mIndexHeader, IH_CHECKSUM,
+                checkSum(mIndexHeader, 0, IH_CHECKSUM));
+        mIndexBuffer.position(0);
+        mIndexBuffer.put(mIndexHeader);
+    }
+
+    // Clear the hash table starting from the specified offset.
+    private void clearHash(int hashStart) {
+        byte[] zero = new byte[1024];
+        mIndexBuffer.position(hashStart);
+        for (int count = mMaxEntries * 12; count > 0;) {
+            int todo = Math.min(count, 1024);
+            mIndexBuffer.put(zero, 0, todo);
+            count -= todo;
+        }
+    }
+
+    // Inserts a (key, data) pair into the cache.
+    public void insert(long key, byte[] data) throws IOException {
+        if (DATA_HEADER_SIZE + BLOB_HEADER_SIZE + data.length > mMaxBytes) {
+            throw new RuntimeException("blob is too large!");
+        }
+
+        if (mActiveBytes + BLOB_HEADER_SIZE + data.length > mMaxBytes
+                || mActiveEntries * 2 >= mMaxEntries) {
+            flipRegion();
+        }
+
+        if (!lookupInternal(key, mActiveHashStart)) {
+            // If we don't have an existing entry with the same key, increase
+            // the entry count.
+            mActiveEntries++;
+            writeInt(mIndexHeader, IH_ACTIVE_ENTRIES, mActiveEntries);
+        }
+
+        insertInternal(key, data, data.length);
+        updateIndexHeader();
+    }
+
+    // Appends the data to the active file. It also updates the hash entry.
+    // The proper hash entry (suitable for insertion or replacement) must be
+    // pointed by mSlotOffset.
+    private void insertInternal(long key, byte[] data, int length)
+            throws IOException {
+        byte[] header = mBlobHeader;
+        int sum = checkSum(data);
+        writeLong(header, BH_KEY, key);
+        writeInt(header, BH_CHECKSUM, sum);
+        writeInt(header, BH_OFFSET, mActiveBytes);
+        writeInt(header, BH_LENGTH, length);
+        mActiveDataFile.write(header);
+        mActiveDataFile.write(data, 0, length);
+
+        mIndexBuffer.putLong(mSlotOffset, key);
+        mIndexBuffer.putInt(mSlotOffset + 8, mActiveBytes);
+        mActiveBytes += BLOB_HEADER_SIZE + length;
+        writeInt(mIndexHeader, IH_ACTIVE_BYTES, mActiveBytes);
+    }
+
+    public static class LookupRequest {
+        public long key;        // input: the key to find
+        public byte[] buffer;   // input/output: the buffer to store the blob
+        public int length;      // output: the length of the blob
+    }
+
+    // This method is for one-off lookup. For repeated lookup, use the version
+    // accepting LookupRequest to avoid repeated memory allocation.
+    private LookupRequest mLookupRequest = new LookupRequest();
+    public byte[] lookup(long key) throws IOException {
+        mLookupRequest.key = key;
+        mLookupRequest.buffer = null;
+        if (lookup(mLookupRequest)) {
+            return mLookupRequest.buffer;
+        } else {
+            return null;
+        }
+    }
+
+    // Returns true if the associated blob for the given key is available.
+    // The blob is stored in the buffer pointed by req.buffer, and the length
+    // is in stored in the req.length variable.
+    //
+    // The user can input a non-null value in req.buffer, and this method will
+    // try to use that buffer. If that buffer is not large enough, this method
+    // will allocate a new buffer and assign it to req.buffer.
+    //
+    // This method tries not to throw IOException even if the data file is
+    // corrupted, but it can still throw IOException if things get strange.
+    public boolean lookup(LookupRequest req) throws IOException {
+        // Look up in the active region first.
+        if (lookupInternal(req.key, mActiveHashStart)) {
+            if (getBlob(mActiveDataFile, mFileOffset, req)) {
+                return true;
+            }
+        }
+
+        // We want to copy the data from the inactive file to the active file
+        // if it's available. So we keep the offset of the hash entry so we can
+        // avoid looking it up again.
+        int insertOffset = mSlotOffset;
+
+        // Look up in the inactive region.
+        if (lookupInternal(req.key, mInactiveHashStart)) {
+            if (getBlob(mInactiveDataFile, mFileOffset, req)) {
+                // If we don't have enough space to insert this blob into
+                // the active file, just return it.
+                if (mActiveBytes + BLOB_HEADER_SIZE + req.length > mMaxBytes
+                    || mActiveEntries * 2 >= mMaxEntries) {
+                    return true;
+                }
+                // Otherwise copy it over.
+                mSlotOffset = insertOffset;
+                try {
+                    insertInternal(req.key, req.buffer, req.length);
+                    mActiveEntries++;
+                    writeInt(mIndexHeader, IH_ACTIVE_ENTRIES, mActiveEntries);
+                    updateIndexHeader();
+                } catch (Throwable t) {
+                    Log.e(TAG, "cannot copy over");
+                }
+                return true;
+            }
+        }
+
+        return false;
+    }
+
+
+    // Copies the blob for the specified offset in the specified file to
+    // req.buffer. If req.buffer is null or too small, allocate a buffer and
+    // assign it to req.buffer.
+    // Returns false if the blob is not available (either the index file is
+    // not sync with the data file, or one of them is corrupted). The length
+    // of the blob is stored in the req.length variable.
+    private boolean getBlob(RandomAccessFile file, int offset,
+            LookupRequest req) throws IOException {
+        byte[] header = mBlobHeader;
+        long oldPosition = file.getFilePointer();
+        try {
+            file.seek(offset);
+            if (file.read(header) != BLOB_HEADER_SIZE) {
+                Log.w(TAG, "cannot read blob header");
+                return false;
+            }
+            long blobKey = readLong(header, BH_KEY);
+            if (blobKey != req.key) {
+                Log.w(TAG, "blob key does not match: " + blobKey);
+                return false;
+            }
+            int sum = readInt(header, BH_CHECKSUM);
+            int blobOffset = readInt(header, BH_OFFSET);
+            if (blobOffset != offset) {
+                Log.w(TAG, "blob offset does not match: " + blobOffset);
+                return false;
+            }
+            int length = readInt(header, BH_LENGTH);
+            if (length < 0 || length > mMaxBytes - offset - BLOB_HEADER_SIZE) {
+                Log.w(TAG, "invalid blob length: " + length);
+                return false;
+            }
+            if (req.buffer == null || req.buffer.length < length) {
+                req.buffer = new byte[length];
+            }
+
+            byte[] blob = req.buffer;
+            req.length = length;
+
+            if (file.read(blob, 0, length) != length) {
+                Log.w(TAG, "cannot read blob data");
+                return false;
+            }
+            if (checkSum(blob, 0, length) != sum) {
+                Log.w(TAG, "blob checksum does not match: " + sum);
+                return false;
+            }
+            return true;
+        } catch (Throwable t)  {
+            Log.e(TAG, "getBlob failed.", t);
+            return false;
+        } finally {
+            file.seek(oldPosition);
+        }
+    }
+
+    // Tries to look up a key in the specified hash region.
+    // Returns true if the lookup is successful.
+    // The slot offset in the index file is saved in mSlotOffset. If the lookup
+    // is successful, it's the slot found. Otherwise it's the slot suitable for
+    // insertion.
+    // If the lookup is successful, the file offset is also saved in
+    // mFileOffset.
+    private int mSlotOffset;
+    private int mFileOffset;
+    private boolean lookupInternal(long key, int hashStart) throws IOException {
+        int slot = (int) (key % mMaxEntries);
+        if (slot < 0) slot += mMaxEntries;
+        int slotBegin = slot;
+        while (true) {
+            int offset = hashStart + slot * 12;
+            long candidateKey = mIndexBuffer.getLong(offset);
+            int candidateOffset = mIndexBuffer.getInt(offset + 8);
+            if (candidateOffset == 0) {
+                mSlotOffset = offset;
+                return false;
+            } else if (candidateKey == key) {
+                mSlotOffset = offset;
+                mFileOffset = candidateOffset;
+                return true;
+            } else {
+                if (++slot >= mMaxEntries) {
+                    slot = 0;
+                }
+                if (slot == slotBegin) {
+                    Log.w(TAG, "corrupted index: clear the slot.");
+                    mIndexBuffer.putInt(hashStart + slot * 12 + 8, 0);
+                }
+            }
+        }
+    }
+
+    public void syncIndex() {
+        try {
+            mIndexBuffer.force();
+        } catch (Throwable t) {
+            Log.w(TAG, "sync index failed", t);
+        }
+    }
+
+    public void syncAll() {
+        syncIndex();
+        try {
+            mDataFile0.getFD().sync();
+        } catch (Throwable t) {
+            Log.w(TAG, "sync data file 0 failed", t);
+        }
+        try {
+            mDataFile1.getFD().sync();
+        } catch (Throwable t) {
+            Log.w(TAG, "sync data file 1 failed", t);
+        }
+    }
+
+    // This is for testing only.
+    //
+    // Returns the active count (mActiveEntries). This also verifies that
+    // the active count matches matches what's inside the hash region.
+    int getActiveCount() {
+        int count = 0;
+        for (int i = 0; i < mMaxEntries; i++) {
+            int offset = mActiveHashStart + i * 12;
+            long candidateKey = mIndexBuffer.getLong(offset);
+            int candidateOffset = mIndexBuffer.getInt(offset + 8);
+            if (candidateOffset != 0) ++count;
+        }
+        if (count == mActiveEntries) {
+            return count;
+        } else {
+            Log.e(TAG, "wrong active count: " + mActiveEntries + " vs " + count);
+            return -1;  // signal failure.
+        }
+    }
+
+    int checkSum(byte[] data) {
+        mAdler32.reset();
+        mAdler32.update(data);
+        return (int) mAdler32.getValue();
+    }
+
+    int checkSum(byte[] data, int offset, int nbytes) {
+        mAdler32.reset();
+        mAdler32.update(data, offset, nbytes);
+        return (int) mAdler32.getValue();
+    }
+
+    static void closeSilently(Closeable c) {
+        if (c == null) return;
+        try {
+            c.close();
+        } catch (Throwable t) {
+            // do nothing
+        }
+    }
+
+    static int readInt(byte[] buf, int offset) {
+        return (buf[offset] & 0xff)
+                | ((buf[offset + 1] & 0xff) << 8)
+                | ((buf[offset + 2] & 0xff) << 16)
+                | ((buf[offset + 3] & 0xff) << 24);
+    }
+
+    static long readLong(byte[] buf, int offset) {
+        long result = buf[offset + 7] & 0xff;
+        for (int i = 6; i >= 0; i--) {
+            result = (result << 8) | (buf[offset + i] & 0xff);
+        }
+        return result;
+    }
+
+    static void writeInt(byte[] buf, int offset, int value) {
+        for (int i = 0; i < 4; i++) {
+            buf[offset + i] = (byte) (value & 0xff);
+            value >>= 8;
+        }
+    }
+
+    static void writeLong(byte[] buf, int offset, long value) {
+        for (int i = 0; i < 8; i++) {
+            buf[offset + i] = (byte) (value & 0xff);
+            value >>= 8;
+        }
+    }
+}
diff --git a/new3d/tests/src/com/android/gallery3d/data/BlobCacheTest.java b/new3d/tests/src/com/android/gallery3d/data/BlobCacheTest.java
new file mode 100644
index 0000000..6321fb9
--- /dev/null
+++ b/new3d/tests/src/com/android/gallery3d/data/BlobCacheTest.java
@@ -0,0 +1,736 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.gallery3d.data;
+
+import android.test.AndroidTestCase;
+import android.test.suitebuilder.annotation.SmallTest;
+import android.test.suitebuilder.annotation.MediumTest;
+import android.test.suitebuilder.annotation.LargeTest;
+import android.util.Log;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.Random;
+
+public class BlobCacheTest extends AndroidTestCase {
+    private static final String TAG = "BlobCacheTest";
+
+    @SmallTest
+    public void testReadIntLong() {
+        byte[] buf = new byte[9];
+        assertEquals(0, BlobCache.readInt(buf, 0));
+        assertEquals(0, BlobCache.readLong(buf, 0));
+        buf[0] = 1;
+        assertEquals(1, BlobCache.readInt(buf, 0));
+        assertEquals(1, BlobCache.readLong(buf, 0));
+        buf[3] = 0x7f;
+        assertEquals(0x7f000001, BlobCache.readInt(buf, 0));
+        assertEquals(0x7f000001, BlobCache.readLong(buf, 0));
+        assertEquals(0x007f0000, BlobCache.readInt(buf, 1));
+        assertEquals(0x007f0000, BlobCache.readLong(buf, 1));
+        buf[3] = (byte) 0x80;
+        buf[7] = (byte) 0xA0;
+        buf[0] = 0;
+        assertEquals(0x80000000, BlobCache.readInt(buf, 0));
+        assertEquals(0xA000000080000000L, BlobCache.readLong(buf, 0));
+        for (int i = 0; i < 8; i++) {
+            buf[i] = (byte) (0x11 * (i+8));
+        }
+        assertEquals(0xbbaa9988, BlobCache.readInt(buf, 0));
+        assertEquals(0xffeeddccbbaa9988L, BlobCache.readLong(buf, 0));
+        buf[8] = 0x33;
+        assertEquals(0x33ffeeddccbbaa99L, BlobCache.readLong(buf, 1));
+    }
+
+    @SmallTest
+    public void testWriteIntLong() {
+        byte[] buf = new byte[8];
+        BlobCache.writeInt(buf, 0, 0x12345678);
+        assertEquals(0x78, buf[0]);
+        assertEquals(0x56, buf[1]);
+        assertEquals(0x34, buf[2]);
+        assertEquals(0x12, buf[3]);
+        assertEquals(0x00, buf[4]);
+        BlobCache.writeLong(buf, 0, 0xffeeddccbbaa9988L);
+        for (int i = 0; i < 8; i++) {
+            assertEquals((byte) (0x11 * (i+8)), buf[i]);
+        }
+    }
+
+    @MediumTest
+    public void testChecksum() throws IOException {
+        BlobCache bc = new BlobCache(TEST_FILE_NAME, MAX_ENTRIES, MAX_BYTES, true);
+        byte[] buf = new byte[0];
+        assertEquals(0x1, bc.checkSum(buf));
+        buf = new byte[1];
+        assertEquals(0x10001, bc.checkSum(buf));
+        buf[0] = 0x47;
+        assertEquals(0x480048, bc.checkSum(buf));
+        buf = new byte[3];
+        buf[0] = 0x10;
+        buf[1] = 0x30;
+        buf[2] = 0x01;
+        assertEquals(0x940042, bc.checkSum(buf));
+        assertEquals(0x310031, bc.checkSum(buf, 1, 1));
+        assertEquals(0x1, bc.checkSum(buf, 1, 0));
+        assertEquals(0x630032, bc.checkSum(buf, 1, 2));
+        buf = new byte[1024];
+        for (int i = 0; i < buf.length; i++) {
+            buf[i] = (byte)(i*i);
+        }
+        assertEquals(0x3574a610, bc.checkSum(buf));
+        bc.close();
+    }
+
+    private static final int HEADER_SIZE = 32;
+    private static final int DATA_HEADER_SIZE = 4;
+    private static final int BLOB_HEADER_SIZE = 20;
+
+    private static final String TEST_FILE_NAME = "/sdcard/btest";
+    private static final int MAX_ENTRIES = 100;
+    private static final int MAX_BYTES = 1000;
+    private static final int INDEX_SIZE = HEADER_SIZE + MAX_ENTRIES * 12 * 2;
+    private static final long KEY_0 = 0x1122334455667788L;
+    private static final long KEY_1 = 0x1122334455667789L;
+    private static final long KEY_2 = 0x112233445566778AL;
+    private static byte[] DATA_0 = new byte[10];
+    private static byte[] DATA_1 = new byte[10];
+
+    @MediumTest
+    public void testBasic() throws IOException {
+        String name = TEST_FILE_NAME;
+        BlobCache bc;
+        File idxFile = new File(name + ".idx");
+        File data0File = new File(name + ".0");
+        File data1File = new File(name + ".1");
+
+        // Create a brand new cache.
+        bc = new BlobCache(name, MAX_ENTRIES, MAX_BYTES, true);
+        bc.close();
+
+        // Make sure the initial state is correct.
+        assertTrue(idxFile.exists());
+        assertTrue(data0File.exists());
+        assertTrue(data1File.exists());
+        assertEquals(INDEX_SIZE, idxFile.length());
+        assertEquals(DATA_HEADER_SIZE, data0File.length());
+        assertEquals(DATA_HEADER_SIZE, data1File.length());
+        assertEquals(0, bc.getActiveCount());
+
+        // Re-open it.
+        bc = new BlobCache(name, MAX_ENTRIES, MAX_BYTES, false);
+        assertNull(bc.lookup(KEY_0));
+
+        // insert one blob
+        genData(DATA_0, 1);
+        bc.insert(KEY_0, DATA_0);
+        assertSameData(DATA_0, bc.lookup(KEY_0));
+        assertEquals(1, bc.getActiveCount());
+        bc.close();
+
+        // Make sure the file size is right.
+        assertEquals(INDEX_SIZE, idxFile.length());
+        assertEquals(DATA_HEADER_SIZE + BLOB_HEADER_SIZE + DATA_0.length,
+                data0File.length());
+        assertEquals(DATA_HEADER_SIZE, data1File.length());
+
+        // Re-open it and make sure we can get the old data
+        bc = new BlobCache(name, MAX_ENTRIES, MAX_BYTES, false);
+        assertSameData(DATA_0, bc.lookup(KEY_0));
+
+        // insert with the same key (but using a different blob)
+        genData(DATA_0, 2);
+        bc.insert(KEY_0, DATA_0);
+        assertSameData(DATA_0, bc.lookup(KEY_0));
+        assertEquals(1, bc.getActiveCount());
+        bc.close();
+
+        // Make sure the file size is right.
+        assertEquals(INDEX_SIZE, idxFile.length());
+        assertEquals(DATA_HEADER_SIZE + 2 * (BLOB_HEADER_SIZE + DATA_0.length),
+                data0File.length());
+        assertEquals(DATA_HEADER_SIZE, data1File.length());
+
+        // Re-open it and make sure we can get the old data
+        bc = new BlobCache(name, MAX_ENTRIES, MAX_BYTES, false);
+        assertSameData(DATA_0, bc.lookup(KEY_0));
+
+        // insert another key and make sure we can get both key.
+        assertNull(bc.lookup(KEY_1));
+        genData(DATA_1, 3);
+        bc.insert(KEY_1, DATA_1);
+        assertSameData(DATA_0, bc.lookup(KEY_0));
+        assertSameData(DATA_1, bc.lookup(KEY_1));
+        assertEquals(2, bc.getActiveCount());
+        bc.close();
+
+        // Make sure the file size is right.
+        assertEquals(INDEX_SIZE, idxFile.length());
+        assertEquals(DATA_HEADER_SIZE + 3 * (BLOB_HEADER_SIZE + DATA_0.length),
+                data0File.length());
+        assertEquals(DATA_HEADER_SIZE, data1File.length());
+
+        // Re-open it and make sure we can get the old data
+        bc = new BlobCache(name, 100, 1000, false);
+        assertSameData(DATA_0, bc.lookup(KEY_0));
+        assertSameData(DATA_1, bc.lookup(KEY_1));
+        assertEquals(2, bc.getActiveCount());
+        bc.close();
+    }
+
+    @MediumTest
+    public void testNegativeKey() throws IOException {
+        BlobCache bc = new BlobCache(TEST_FILE_NAME, MAX_ENTRIES, MAX_BYTES, true);
+
+        // insert one blob
+        genData(DATA_0, 1);
+        bc.insert(-123, DATA_0);
+        assertSameData(DATA_0, bc.lookup(-123));
+        bc.close();
+    }
+
+    @MediumTest
+    public void testEmptyBlob() throws IOException {
+        BlobCache bc = new BlobCache(TEST_FILE_NAME, MAX_ENTRIES, MAX_BYTES, true);
+
+        byte[] data = new byte[0];
+        bc.insert(123, data);
+        assertSameData(data, bc.lookup(123));
+        bc.close();
+    }
+
+    @MediumTest
+    public void testLookupRequest() throws IOException {
+        BlobCache bc = new BlobCache(TEST_FILE_NAME, MAX_ENTRIES, MAX_BYTES, true);
+
+        // insert one blob
+        genData(DATA_0, 1);
+        bc.insert(1, DATA_0);
+        assertSameData(DATA_0, bc.lookup(1));
+
+        // the same size buffer
+        byte[] buf = new byte[DATA_0.length];
+        BlobCache.LookupRequest req = new BlobCache.LookupRequest();
+        req.key = 1;
+        req.buffer = buf;
+        assertTrue(bc.lookup(req));
+        assertEquals(1, req.key);
+        assertSame(buf, req.buffer);
+        assertEquals(DATA_0.length, req.length);
+
+        // larger buffer
+        buf = new byte[DATA_0.length + 22];
+        req = new BlobCache.LookupRequest();
+        req.key = 1;
+        req.buffer = buf;
+        assertTrue(bc.lookup(req));
+        assertEquals(1, req.key);
+        assertSame(buf, req.buffer);
+        assertEquals(DATA_0.length, req.length);
+
+        // smaller buffer
+        buf = new byte[DATA_0.length - 1];
+        req = new BlobCache.LookupRequest();
+        req.key = 1;
+        req.buffer = buf;
+        assertTrue(bc.lookup(req));
+        assertEquals(1, req.key);
+        assertNotSame(buf, req.buffer);
+        assertEquals(DATA_0.length, req.length);
+        assertSameData(DATA_0, req.buffer, DATA_0.length);
+
+        // null buffer
+        req = new BlobCache.LookupRequest();
+        req.key = 1;
+        req.buffer = null;
+        assertTrue(bc.lookup(req));
+        assertEquals(1, req.key);
+        assertNotNull(req.buffer);
+        assertEquals(DATA_0.length, req.length);
+        assertSameData(DATA_0, req.buffer, DATA_0.length);
+
+        bc.close();
+    }
+
+    @MediumTest
+    public void testKeyCollision() throws IOException {
+        BlobCache bc = new BlobCache(TEST_FILE_NAME, MAX_ENTRIES, MAX_BYTES, true);
+
+        for (int i = 0; i < MAX_ENTRIES / 2; i++) {
+            genData(DATA_0, i);
+            long key = KEY_1 + i * MAX_ENTRIES;
+            bc.insert(key, DATA_0);
+        }
+
+        for (int i = 0; i < MAX_ENTRIES / 2; i++) {
+            genData(DATA_0, i);
+            long key = KEY_1 + i * MAX_ENTRIES;
+            assertSameData(DATA_0, bc.lookup(key));
+        }
+        bc.close();
+    }
+
+    @MediumTest
+    public void testRegionFlip() throws IOException {
+        String name = TEST_FILE_NAME;
+        BlobCache bc;
+        File idxFile = new File(name + ".idx");
+        File data0File = new File(name + ".0");
+        File data1File = new File(name + ".1");
+
+        // Create a brand new cache.
+        bc = new BlobCache(name, MAX_ENTRIES, MAX_BYTES, true);
+
+        // This is the number of blobs fits into a region.
+        int maxFit = (MAX_BYTES - DATA_HEADER_SIZE) /
+                (BLOB_HEADER_SIZE + DATA_0.length);
+
+        for (int k = 0; k < maxFit; k++) {
+            genData(DATA_0, k);
+            bc.insert(k, DATA_0);
+        }
+        assertEquals(maxFit, bc.getActiveCount());
+
+        // Make sure the file size is right.
+        assertEquals(INDEX_SIZE, idxFile.length());
+        assertEquals(DATA_HEADER_SIZE + maxFit * (BLOB_HEADER_SIZE + DATA_0.length),
+                data0File.length());
+        assertEquals(DATA_HEADER_SIZE, data1File.length());
+
+        // Now insert another one and let it flip.
+        genData(DATA_0, 777);
+        bc.insert(KEY_1, DATA_0);
+        assertEquals(1, bc.getActiveCount());
+
+        assertEquals(INDEX_SIZE, idxFile.length());
+        assertEquals(DATA_HEADER_SIZE + maxFit * (BLOB_HEADER_SIZE + DATA_0.length),
+                data0File.length());
+        assertEquals(DATA_HEADER_SIZE + 1 * (BLOB_HEADER_SIZE + DATA_0.length),
+                data1File.length());
+
+        // Make sure we can find the new data
+        assertSameData(DATA_0, bc.lookup(KEY_1));
+
+        // Now find an old blob
+        int old = maxFit / 2;
+        genData(DATA_0, old);
+        assertSameData(DATA_0, bc.lookup(old));
+        assertEquals(2, bc.getActiveCount());
+
+        // Observed data is copied.
+        assertEquals(INDEX_SIZE, idxFile.length());
+        assertEquals(DATA_HEADER_SIZE + maxFit * (BLOB_HEADER_SIZE + DATA_0.length),
+                data0File.length());
+        assertEquals(DATA_HEADER_SIZE + 2 * (BLOB_HEADER_SIZE + DATA_0.length),
+                data1File.length());
+
+        // Now copy everything over (except we should have no space for the last one)
+        assertTrue(old < maxFit - 1);
+        for (int k = 0; k < maxFit; k++) {
+            genData(DATA_0, k);
+            assertSameData(DATA_0, bc.lookup(k));
+        }
+        assertEquals(maxFit, bc.getActiveCount());
+
+        // Now both file should be full.
+        assertEquals(INDEX_SIZE, idxFile.length());
+        assertEquals(DATA_HEADER_SIZE + maxFit * (BLOB_HEADER_SIZE + DATA_0.length),
+                data0File.length());
+        assertEquals(DATA_HEADER_SIZE + maxFit * (BLOB_HEADER_SIZE + DATA_0.length),
+                data1File.length());
+
+        // Now insert one to make it flip.
+        genData(DATA_0, 888);
+        bc.insert(KEY_2, DATA_0);
+        assertEquals(1, bc.getActiveCount());
+
+        // Check the size after the second flip.
+        assertEquals(INDEX_SIZE, idxFile.length());
+        assertEquals(DATA_HEADER_SIZE + 1 * (BLOB_HEADER_SIZE + DATA_0.length),
+                data0File.length());
+        assertEquals(DATA_HEADER_SIZE + maxFit * (BLOB_HEADER_SIZE + DATA_0.length),
+                data1File.length());
+
+        // Now the last key should be gone.
+        assertNull(bc.lookup(maxFit - 1));
+
+        // But others should remain
+        for (int k = 0; k < maxFit - 1; k++) {
+            genData(DATA_0, k);
+            assertSameData(DATA_0, bc.lookup(k));
+        }
+
+        assertEquals(maxFit, bc.getActiveCount());
+        genData(DATA_0, 777);
+        assertSameData(DATA_0, bc.lookup(KEY_1));
+        genData(DATA_0, 888);
+        assertSameData(DATA_0, bc.lookup(KEY_2));
+        assertEquals(maxFit, bc.getActiveCount());
+
+        // Now two files should be full.
+        assertEquals(INDEX_SIZE, idxFile.length());
+        assertEquals(DATA_HEADER_SIZE + maxFit * (BLOB_HEADER_SIZE + DATA_0.length),
+                data0File.length());
+        assertEquals(DATA_HEADER_SIZE + maxFit * (BLOB_HEADER_SIZE + DATA_0.length),
+                data1File.length());
+
+        bc.close();
+    }
+
+    @MediumTest
+    public void testEntryLimit() throws IOException {
+        String name = TEST_FILE_NAME;
+        BlobCache bc;
+        File idxFile = new File(name + ".idx");
+        File data0File = new File(name + ".0");
+        File data1File = new File(name + ".1");
+        int maxEntries = 10;
+        int maxFit = maxEntries / 2;
+        int indexSize = HEADER_SIZE + maxEntries * 12 * 2;
+
+        // Create a brand new cache with a small entry limit.
+        bc = new BlobCache(name, maxEntries, MAX_BYTES, true);
+
+        // Fill to just before flipping
+        for (int i = 0; i < maxFit; i++) {
+            genData(DATA_0, i);
+            bc.insert(i, DATA_0);
+        }
+        assertEquals(maxFit, bc.getActiveCount());
+
+        // Check the file size.
+        assertEquals(indexSize, idxFile.length());
+        assertEquals(DATA_HEADER_SIZE + maxFit * (BLOB_HEADER_SIZE + DATA_0.length),
+                data0File.length());
+        assertEquals(DATA_HEADER_SIZE, data1File.length());
+
+        // Insert one and make it flip
+        genData(DATA_0, 777);
+        bc.insert(777, DATA_0);
+        assertEquals(1, bc.getActiveCount());
+
+        // Check the file size.
+        assertEquals(indexSize, idxFile.length());
+        assertEquals(DATA_HEADER_SIZE + maxFit * (BLOB_HEADER_SIZE + DATA_0.length),
+                data0File.length());
+        assertEquals(DATA_HEADER_SIZE + 1 * (BLOB_HEADER_SIZE + DATA_0.length),
+                data1File.length());
+        bc.close();
+    }
+
+    @LargeTest
+    public void testDataIntegrity() throws IOException {
+        String name = TEST_FILE_NAME;
+        File idxFile = new File(name + ".idx");
+        File data0File = new File(name + ".0");
+        File data1File = new File(name + ".1");
+        RandomAccessFile f;
+
+        Log.v(TAG, "It should be readable if the content is not changed.");
+        prepareNewCache();
+        f = new RandomAccessFile(data0File, "rw");
+        f.seek(1);
+        byte b = f.readByte();
+        f.seek(1);
+        f.write(b);
+        f.close();
+        assertReadable();
+
+        Log.v(TAG, "Change the data file magic field");
+        prepareNewCache();
+        f = new RandomAccessFile(data0File, "rw");
+        f.seek(1);
+        f.write(0xFF);
+        f.close();
+        assertUnreadable();
+
+        prepareNewCache();
+        f = new RandomAccessFile(data1File, "rw");
+        f.write(0xFF);
+        f.close();
+        assertUnreadable();
+
+        Log.v(TAG, "Change the blob key");
+        prepareNewCache();
+        f = new RandomAccessFile(data0File, "rw");
+        f.seek(4);
+        f.write(0x00);
+        f.close();
+        assertUnreadable();
+
+        Log.v(TAG, "Change the blob checksum");
+        prepareNewCache();
+        f = new RandomAccessFile(data0File, "rw");
+        f.seek(4 + 8);
+        f.write(0x00);
+        f.close();
+        assertUnreadable();
+
+        Log.v(TAG, "Change the blob offset");
+        prepareNewCache();
+        f = new RandomAccessFile(data0File, "rw");
+        f.seek(4 + 12);
+        f.write(0x20);
+        f.close();
+        assertUnreadable();
+
+        Log.v(TAG, "Change the blob length: some other value");
+        prepareNewCache();
+        f = new RandomAccessFile(data0File, "rw");
+        f.seek(4 + 16);
+        f.write(0x20);
+        f.close();
+        assertUnreadable();
+
+        Log.v(TAG, "Change the blob length: -1");
+        prepareNewCache();
+        f = new RandomAccessFile(data0File, "rw");
+        f.seek(4 + 16);
+        f.writeInt(0xFFFFFFFF);
+        f.close();
+        assertUnreadable();
+
+        Log.v(TAG, "Change the blob length: big value");
+        prepareNewCache();
+        f = new RandomAccessFile(data0File, "rw");
+        f.seek(4 + 16);
+        f.writeInt(0xFFFFFF00);
+        f.close();
+        assertUnreadable();
+
+        Log.v(TAG, "Change the blob content");
+        prepareNewCache();
+        f = new RandomAccessFile(data0File, "rw");
+        f.seek(4 + 20);
+        f.write(0x01);
+        f.close();
+        assertUnreadable();
+
+        Log.v(TAG, "Change the index magic");
+        prepareNewCache();
+        f = new RandomAccessFile(idxFile, "rw");
+        f.seek(1);
+        f.write(0x00);
+        f.close();
+        assertUnreadable();
+
+        Log.v(TAG, "Change the active region");
+        prepareNewCache();
+        f = new RandomAccessFile(idxFile, "rw");
+        f.seek(12);
+        f.write(0x01);
+        f.close();
+        assertUnreadable();
+
+        Log.v(TAG, "Change the reserved data");
+        prepareNewCache();
+        f = new RandomAccessFile(idxFile, "rw");
+        f.seek(24);
+        f.write(0x01);
+        f.close();
+        assertUnreadable();
+
+        Log.v(TAG, "Change the checksum");
+        prepareNewCache();
+        f = new RandomAccessFile(idxFile, "rw");
+        f.seek(29);
+        f.write(0x00);
+        f.close();
+        assertUnreadable();
+
+        Log.v(TAG, "Change the key");
+        prepareNewCache();
+        f = new RandomAccessFile(idxFile, "rw");
+        f.seek(32 + 12 * (KEY_1 % MAX_ENTRIES));
+        f.write(0x00);
+        f.close();
+        assertUnreadable();
+
+        Log.v(TAG, "Change the offset");
+        prepareNewCache();
+        f = new RandomAccessFile(idxFile, "rw");
+        f.seek(32 + 12 * (KEY_1 % MAX_ENTRIES) + 8);
+        f.write(0x05);
+        f.close();
+        assertUnreadable();
+
+        Log.v(TAG, "Change the offset");
+        prepareNewCache();
+        f = new RandomAccessFile(idxFile, "rw");
+        f.seek(32 + 12 * (KEY_1 % MAX_ENTRIES) + 8 + 3);
+        f.write(0xFF);
+        f.close();
+        assertUnreadable();
+
+        Log.v(TAG, "Garbage index");
+        prepareNewCache();
+        f = new RandomAccessFile(idxFile, "rw");
+        int n = (int) idxFile.length();
+        f.seek(32);
+        byte[] garbage = new byte[1024];
+        for (int i = 0; i < garbage.length; i++) {
+            garbage[i] = (byte) 0x80;
+        }
+        int i = 32;
+        while (i < n) {
+            int todo = Math.min(garbage.length, n - i);
+            f.write(garbage, 0, todo);
+            i += todo;
+        }
+        f.close();
+        assertUnreadable();
+    }
+
+    // Create a brand new cache and put one entry into it.
+    private void prepareNewCache() throws IOException {
+        BlobCache bc = new BlobCache(TEST_FILE_NAME, MAX_ENTRIES, MAX_BYTES, true);
+        genData(DATA_0, 777);
+        bc.insert(KEY_1, DATA_0);
+        bc.close();
+    }
+
+    private void assertReadable() throws IOException {
+        BlobCache bc = new BlobCache(TEST_FILE_NAME, MAX_ENTRIES, MAX_BYTES, false);
+        genData(DATA_0, 777);
+        assertSameData(DATA_0, bc.lookup(KEY_1));
+        bc.close();
+    }
+
+    private void assertUnreadable() throws IOException {
+        BlobCache bc = new BlobCache(TEST_FILE_NAME, MAX_ENTRIES, MAX_BYTES, false);
+        genData(DATA_0, 777);
+        assertNull(bc.lookup(KEY_1));
+        bc.close();
+    }
+
+    @LargeTest
+    public void testRandomSize() throws IOException {
+        BlobCache bc = new BlobCache(TEST_FILE_NAME, MAX_ENTRIES, MAX_BYTES, true);
+
+        // Random size test
+        Random rand = new Random(0);
+        for (int i = 0; i < 100; i++) {
+            byte[] data = new byte[rand.nextInt(MAX_BYTES*12/10)];
+            try {
+                bc.insert(rand.nextLong(), data);
+                if (data.length > MAX_BYTES - 4 - 20) fail();
+            } catch (RuntimeException ex) {
+                if (data.length <= MAX_BYTES - 4 - 20) fail();
+            }
+        }
+
+        bc.close();
+    }
+
+    @LargeTest
+    public void testBandwidth() throws IOException {
+        BlobCache bc = new BlobCache(TEST_FILE_NAME, 1000, 10000000, true);
+
+        // Write
+        int count = 0;
+        byte[] data = new byte[20000];
+        long t0 = System.nanoTime();
+        for (int i = 0; i < 1000; i++) {
+            bc.insert(i, data);
+            count += data.length;
+        }
+        bc.syncAll();
+        float delta = (System.nanoTime() - t0) * 1e-3f;
+        Log.v(TAG, "write bandwidth = " + (count / delta) + " M/s");
+
+        // Copy over
+        BlobCache.LookupRequest req = new BlobCache.LookupRequest();
+        count = 0;
+        t0 = System.nanoTime();
+        for (int i = 0; i < 1000; i++) {
+            req.key = i;
+            req.buffer = data;
+            if (bc.lookup(req)) {
+                count += req.length;
+            }
+        }
+        bc.syncAll();
+        delta = (System.nanoTime() - t0) * 1e-3f;
+        Log.v(TAG, "copy over bandwidth = " + (count / delta) + " M/s");
+
+        // Read
+        count = 0;
+        t0 = System.nanoTime();
+        for (int i = 0; i < 1000; i++) {
+            req.key = i;
+            req.buffer = data;
+            if (bc.lookup(req)) {
+                count += req.length;
+            }
+        }
+        bc.syncAll();
+        delta = (System.nanoTime() - t0) * 1e-3f;
+        Log.v(TAG, "read bandwidth = " + (count / delta) + " M/s");
+
+        bc.close();
+    }
+
+    @LargeTest
+    public void testSmallSize() throws IOException {
+        BlobCache bc = new BlobCache(TEST_FILE_NAME, MAX_ENTRIES, 40, true);
+
+        // Small size test
+        Random rand = new Random(0);
+        for (int i = 0; i < 100; i++) {
+            byte[] data = new byte[rand.nextInt(3)];
+            bc.insert(rand.nextLong(), data);
+        }
+
+        bc.close();
+    }
+
+    @LargeTest
+    public void testManyEntries() throws IOException {
+        BlobCache bc = new BlobCache(TEST_FILE_NAME, 1, MAX_BYTES, true);
+
+        // Many entries test
+        Random rand = new Random(0);
+        for (int i = 0; i < 100; i++) {
+            byte[] data = new byte[rand.nextInt(10)];
+        }
+
+        bc.close();
+    }
+
+    private void genData(byte[] data, int seed) {
+        for(int i = 0; i < data.length; i++) {
+            data[i] = (byte) (seed * i);
+        }
+    }
+
+    private void assertSameData(byte[] data1, byte[] data2) {
+        if (data1 == null && data2 == null) return;
+        if (data1 == null || data2 == null) fail();
+        if (data1.length != data2.length) fail();
+        for (int i = 0; i < data1.length; i++) {
+            if (data1[i] != data2[i]) fail();
+        }
+    }
+
+    private void assertSameData(byte[] data1, byte[] data2, int n) {
+        if (data1 == null || data2 == null) fail();
+        for (int i = 0; i < n; i++) {
+            if (data1[i] != data2[i]) fail();
+        }
+    }
+}