Merge patch from benswick@ to make Murmur3 compile as plain C code.



git-svn-id: http://smhasher.googlecode.com/svn/trunk@150 77a7d1d3-4c08-bdc2-d393-d5859734b01a
diff --git a/MurmurHash3.cpp b/MurmurHash3.cpp
index 5a55b7a..aa7982d 100644
--- a/MurmurHash3.cpp
+++ b/MurmurHash3.cpp
@@ -52,12 +52,12 @@
 // Block read - if your platform needs to do endian-swapping or can only
 // handle aligned reads, do the conversion here
 
-FORCE_INLINE uint32_t getblock ( const uint32_t * p, int i )
+FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i )
 {
   return p[i];
 }
 
-FORCE_INLINE uint64_t getblock ( const uint64_t * p, int i )
+FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i )
 {
   return p[i];
 }
@@ -65,7 +65,7 @@
 //-----------------------------------------------------------------------------
 // Finalization mix - force all bits of a hash block to avalanche
 
-FORCE_INLINE uint32_t fmix ( uint32_t h )
+FORCE_INLINE uint32_t fmix32 ( uint32_t h )
 {
   h ^= h >> 16;
   h *= 0x85ebca6b;
@@ -78,7 +78,7 @@
 
 //----------
 
-FORCE_INLINE uint64_t fmix ( uint64_t k )
+FORCE_INLINE uint64_t fmix64 ( uint64_t k )
 {
   k ^= k >> 33;
   k *= BIG_CONSTANT(0xff51afd7ed558ccd);
@@ -109,7 +109,7 @@
 
   for(int i = -nblocks; i; i++)
   {
-    uint32_t k1 = getblock(blocks,i);
+    uint32_t k1 = getblock32(blocks,i);
 
     k1 *= c1;
     k1 = ROTL32(k1,15);
@@ -140,7 +140,7 @@
 
   h1 ^= len;
 
-  h1 = fmix(h1);
+  h1 = fmix32(h1);
 
   *(uint32_t*)out = h1;
 } 
@@ -170,10 +170,10 @@
 
   for(int i = -nblocks; i; i++)
   {
-    uint32_t k1 = getblock(blocks,i*4+0);
-    uint32_t k2 = getblock(blocks,i*4+1);
-    uint32_t k3 = getblock(blocks,i*4+2);
-    uint32_t k4 = getblock(blocks,i*4+3);
+    uint32_t k1 = getblock32(blocks,i*4+0);
+    uint32_t k2 = getblock32(blocks,i*4+1);
+    uint32_t k3 = getblock32(blocks,i*4+2);
+    uint32_t k4 = getblock32(blocks,i*4+3);
 
     k1 *= c1; k1  = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
 
@@ -236,10 +236,10 @@
   h1 += h2; h1 += h3; h1 += h4;
   h2 += h1; h3 += h1; h4 += h1;
 
-  h1 = fmix(h1);
-  h2 = fmix(h2);
-  h3 = fmix(h3);
-  h4 = fmix(h4);
+  h1 = fmix32(h1);
+  h2 = fmix32(h2);
+  h3 = fmix32(h3);
+  h4 = fmix32(h4);
 
   h1 += h2; h1 += h3; h1 += h4;
   h2 += h1; h3 += h1; h4 += h1;
@@ -271,8 +271,8 @@
 
   for(int i = 0; i < nblocks; i++)
   {
-    uint64_t k1 = getblock(blocks,i*2+0);
-    uint64_t k2 = getblock(blocks,i*2+1);
+    uint64_t k1 = getblock64(blocks,i*2+0);
+    uint64_t k2 = getblock64(blocks,i*2+1);
 
     k1 *= c1; k1  = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
 
@@ -293,23 +293,23 @@
 
   switch(len & 15)
   {
-  case 15: k2 ^= uint64_t(tail[14]) << 48;
-  case 14: k2 ^= uint64_t(tail[13]) << 40;
-  case 13: k2 ^= uint64_t(tail[12]) << 32;
-  case 12: k2 ^= uint64_t(tail[11]) << 24;
-  case 11: k2 ^= uint64_t(tail[10]) << 16;
-  case 10: k2 ^= uint64_t(tail[ 9]) << 8;
-  case  9: k2 ^= uint64_t(tail[ 8]) << 0;
+  case 15: k2 ^= ((uint64_t)tail[14]) << 48;
+  case 14: k2 ^= ((uint64_t)tail[13]) << 40;
+  case 13: k2 ^= ((uint64_t)tail[12]) << 32;
+  case 12: k2 ^= ((uint64_t)tail[11]) << 24;
+  case 11: k2 ^= ((uint64_t)tail[10]) << 16;
+  case 10: k2 ^= ((uint64_t)tail[ 9]) << 8;
+  case  9: k2 ^= ((uint64_t)tail[ 8]) << 0;
            k2 *= c2; k2  = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
 
-  case  8: k1 ^= uint64_t(tail[ 7]) << 56;
-  case  7: k1 ^= uint64_t(tail[ 6]) << 48;
-  case  6: k1 ^= uint64_t(tail[ 5]) << 40;
-  case  5: k1 ^= uint64_t(tail[ 4]) << 32;
-  case  4: k1 ^= uint64_t(tail[ 3]) << 24;
-  case  3: k1 ^= uint64_t(tail[ 2]) << 16;
-  case  2: k1 ^= uint64_t(tail[ 1]) << 8;
-  case  1: k1 ^= uint64_t(tail[ 0]) << 0;
+  case  8: k1 ^= ((uint64_t)tail[ 7]) << 56;
+  case  7: k1 ^= ((uint64_t)tail[ 6]) << 48;
+  case  6: k1 ^= ((uint64_t)tail[ 5]) << 40;
+  case  5: k1 ^= ((uint64_t)tail[ 4]) << 32;
+  case  4: k1 ^= ((uint64_t)tail[ 3]) << 24;
+  case  3: k1 ^= ((uint64_t)tail[ 2]) << 16;
+  case  2: k1 ^= ((uint64_t)tail[ 1]) << 8;
+  case  1: k1 ^= ((uint64_t)tail[ 0]) << 0;
            k1 *= c1; k1  = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
   };
 
@@ -321,8 +321,8 @@
   h1 += h2;
   h2 += h1;
 
-  h1 = fmix(h1);
-  h2 = fmix(h2);
+  h1 = fmix64(h1);
+  h2 = fmix64(h2);
 
   h1 += h2;
   h2 += h1;