Improve sad3x16 SSE2 function

Vp9_sad3x16_sse2() is heavily called in decoder, in which the
unaligned reads consume lots of cpu cycles. When CONFIG_SUBPELREFMV
is off, the unaligned offset is 1. In this situation,
we can adjust the src_ptr to be 4-byte aligned, and then do the
aligned reads. This reduced the reading time significantly. Tests
on 1080p clip showed over 2% decoder performance gain with
CONFIG_SUBPELREFM off.

Change-Id: I953afe3ac5406107933ef49d0b695eafba9a6507
diff --git a/vp9/common/findnearmv.c b/vp9/common/findnearmv.c
index 768abf2..a551db8 100644
--- a/vp9/common/findnearmv.c
+++ b/vp9/common/findnearmv.c
@@ -45,15 +45,13 @@
 unsigned int vp9_sad3x16_c(const unsigned char *src_ptr,
                            int  src_stride,
                            const unsigned char *ref_ptr,
-                           int  ref_stride,
-                           int max_sad) {
+                           int  ref_stride) {
   return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 3, 16);
 }
 unsigned int vp9_sad16x3_c(const unsigned char *src_ptr,
                            int  src_stride,
                            const unsigned char *ref_ptr,
-                           int  ref_stride,
-                           int max_sad) {
+                           int  ref_stride) {
   return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 16, 3);
 }
 
@@ -230,23 +228,23 @@
     score = 0;
     if (xd->up_available) {
       score += vp9_sad16x3(above_src, xd->dst.y_stride,
-                           above_ref + offset, ref_y_stride, INT_MAX);
+                           above_ref + offset, ref_y_stride);
 #if CONFIG_SUPERBLOCKS
       if (xd->mode_info_context->mbmi.encoded_as_sb) {
         score += vp9_sad16x3(above_src + 16, xd->dst.y_stride,
-                             above_ref + offset + 16, ref_y_stride, INT_MAX);
+                             above_ref + offset + 16, ref_y_stride);
       }
 #endif
     }
     if (xd->left_available) {
       score += vp9_sad3x16(left_src, xd->dst.y_stride,
-                           left_ref + offset, ref_y_stride, INT_MAX);
+                           left_ref + offset, ref_y_stride);
 #if CONFIG_SUPERBLOCKS
       if (xd->mode_info_context->mbmi.encoded_as_sb) {
         score += vp9_sad3x16(left_src + xd->dst.y_stride * 16,
                              xd->dst.y_stride,
                              left_ref + offset + ref_y_stride * 16,
-                             ref_y_stride, INT_MAX);
+                             ref_y_stride);
       }
 #endif
     }
diff --git a/vp9/common/rtcd_defs.sh b/vp9/common/rtcd_defs.sh
index bbef1ec..ea134a8 100644
--- a/vp9/common/rtcd_defs.sh
+++ b/vp9/common/rtcd_defs.sh
@@ -221,10 +221,10 @@
 #
 # sad 16x3, 3x16
 #
-prototype unsigned int vp9_sad16x3 "const unsigned char *src_ptr, int  src_stride, const unsigned char *ref_ptr, int ref_stride, int max_sad"
+prototype unsigned int vp9_sad16x3 "const unsigned char *src_ptr, int  src_stride, const unsigned char *ref_ptr, int ref_stride"
 specialize vp9_sad16x3 sse2
 
-prototype unsigned int vp9_sad3x16 "const unsigned char *src_ptr, int  src_stride, const unsigned char *ref_ptr, int ref_stride, int max_sad"
+prototype unsigned int vp9_sad3x16 "const unsigned char *src_ptr, int  src_stride, const unsigned char *ref_ptr, int ref_stride"
 specialize vp9_sad3x16 sse2
 
 #
diff --git a/vp9/common/x86/sadmxn_x86.c b/vp9/common/x86/sadmxn_x86.c
index 77cd372..0b783cc 100644
--- a/vp9/common/x86/sadmxn_x86.c
+++ b/vp9/common/x86/sadmxn_x86.c
@@ -11,21 +11,18 @@
 #include <emmintrin.h>  // SSE2
 #include "./vpx_config.h"
 #include "./vp9_rtcd.h"
-
+#include "vpx/vpx_integer.h"
 
 #if HAVE_SSE2
 unsigned int vp9_sad16x3_sse2(
   const unsigned char *src_ptr,
   int  src_stride,
   const unsigned char *ref_ptr,
-  int  ref_stride,
-  int max_sad) {
+  int  ref_stride) {
   __m128i s0, s1, s2;
   __m128i r0, r1, r2;
   __m128i sad;
 
-  (void)max_sad;
-
   s0 = _mm_loadu_si128((const __m128i *)(src_ptr + 0 * src_stride));
   s1 = _mm_loadu_si128((const __m128i *)(src_ptr + 1 * src_stride));
   s2 = _mm_loadu_si128((const __m128i *)(src_ptr + 2 * src_stride));
@@ -46,12 +43,25 @@
   const unsigned char *src_ptr,
   int  src_stride,
   const unsigned char *ref_ptr,
-  int  ref_stride,
-  int max_sad) {
+  int  ref_stride) {
   int r;
   __m128i s0, s1, s2, s3;
   __m128i r0, r1, r2, r3;
-  __m128i sad = _mm_set1_epi16(0);
+  __m128i sad = _mm_setzero_si128();
+  __m128i mask;
+  const int offset = (uintptr_t)src_ptr & 3;
+
+  /* In current use case, the offset is 1 if CONFIG_SUBPELREFMV is off.
+   * Here, for offset=1, we adjust src_ptr to be 4-byte aligned. Then, movd
+   * takes much less time.
+   */
+  if (offset == 1)
+    src_ptr -= 1;
+
+  /* mask = 0xffffffffffff0000ffffffffffff0000 */
+  mask = _mm_cmpeq_epi32(sad, sad);
+  mask = _mm_slli_epi64(mask, 16);
+
   for (r = 0; r < 16; r += 4) {
     s0 = _mm_cvtsi32_si128 (*(const int *)(src_ptr + 0 * src_stride));
     s1 = _mm_cvtsi32_si128 (*(const int *)(src_ptr + 1 * src_stride));
@@ -69,8 +79,11 @@
     s0 = _mm_unpacklo_epi64(s0, s2);
     r0 = _mm_unpacklo_epi64(r0, r2);
 
-    // throw out byte 3
-    s0 = _mm_slli_epi64(s0, 16);
+    // throw out extra byte
+    if (offset == 1)
+      s0 = _mm_and_si128(s0, mask);
+    else
+      s0 = _mm_slli_epi64(s0, 16);
     r0 = _mm_slli_epi64(r0, 16);
 
     sad = _mm_add_epi16(sad, _mm_sad_epu8(s0, r0));
@@ -84,5 +97,3 @@
 }
 
 #endif
-
-