Remove legacy loop filter code

This part of code is legacy from VP9 and not used.

Change-Id: I1d6138c1ff59e911400c2e34ad95599742564aa9
diff --git a/av1/common/av1_loopfilter.c b/av1/common/av1_loopfilter.c
index 29061a2..fb4bd93 100644
--- a/av1/common/av1_loopfilter.c
+++ b/av1/common/av1_loopfilter.c
@@ -33,232 +33,6 @@
 
 typedef enum EDGE_DIR { VERT_EDGE = 0, HORZ_EDGE = 1, NUM_EDGE_DIRS } EDGE_DIR;
 
-// 64 bit masks for left transform size. Each 1 represents a position where
-// we should apply a loop filter across the left border of an 8x8 block
-// boundary.
-//
-// In the case of TX_16X16->  ( in low order byte first we end up with
-// a mask that looks like this
-//
-//    10101010
-//    10101010
-//    10101010
-//    10101010
-//    10101010
-//    10101010
-//    10101010
-//    10101010
-//
-// A loopfilter should be applied to every other 8x8 horizontally.
-static const uint64_t left_64x64_txform_mask[TX_SIZES] = {
-  0xffffffffffffffffULL,  // TX_4X4
-  0xffffffffffffffffULL,  // TX_8x8
-  0x5555555555555555ULL,  // TX_16x16
-  0x1111111111111111ULL,  // TX_32x32
-  0x0101010101010101ULL,  // TX_64x64
-};
-
-// 64 bit masks for above transform size. Each 1 represents a position where
-// we should apply a loop filter across the top border of an 8x8 block
-// boundary.
-//
-// In the case of TX_32x32 ->  ( in low order byte first we end up with
-// a mask that looks like this
-//
-//    11111111
-//    00000000
-//    00000000
-//    00000000
-//    11111111
-//    00000000
-//    00000000
-//    00000000
-//
-// A loopfilter should be applied to every other 4 the row vertically.
-static const uint64_t above_64x64_txform_mask[TX_SIZES] = {
-  0xffffffffffffffffULL,  // TX_4X4
-  0xffffffffffffffffULL,  // TX_8x8
-  0x00ff00ff00ff00ffULL,  // TX_16x16
-  0x000000ff000000ffULL,  // TX_32x32
-  0x00000000000000ffULL,  // TX_64x64
-};
-
-// 64 bit masks for prediction sizes (left). Each 1 represents a position
-// where left border of an 8x8 block. These are aligned to the right most
-// appropriate bit, and then shifted into place.
-//
-// In the case of TX_16x32 ->  ( low order byte first ) we end up with
-// a mask that looks like this :
-//
-//  10000000
-//  10000000
-//  10000000
-//  10000000
-//  00000000
-//  00000000
-//  00000000
-//  00000000
-static const uint64_t left_prediction_mask[BLOCK_SIZES_ALL] = {
-  0x0000000000000001ULL,  // BLOCK_4X4,
-  0x0000000000000001ULL,  // BLOCK_4X8,
-  0x0000000000000001ULL,  // BLOCK_8X4,
-  0x0000000000000001ULL,  // BLOCK_8X8,
-  0x0000000000000101ULL,  // BLOCK_8X16,
-  0x0000000000000001ULL,  // BLOCK_16X8,
-  0x0000000000000101ULL,  // BLOCK_16X16,
-  0x0000000001010101ULL,  // BLOCK_16X32,
-  0x0000000000000101ULL,  // BLOCK_32X16,
-  0x0000000001010101ULL,  // BLOCK_32X32,
-  0x0101010101010101ULL,  // BLOCK_32X64,
-  0x0000000001010101ULL,  // BLOCK_64X32,
-  0x0101010101010101ULL,  // BLOCK_64X64,
-  0x0000000000000101ULL,  // BLOCK_4X16,
-  0x0000000000000001ULL,  // BLOCK_16X4,
-  0x0000000001010101ULL,  // BLOCK_8X32,
-  0x0000000000000001ULL,  // BLOCK_32X8,
-  0x0101010101010101ULL,  // BLOCK_16X64,
-  0x0000000000000101ULL,  // BLOCK_64X16
-};
-
-// 64 bit mask to shift and set for each prediction size.
-static const uint64_t above_prediction_mask[BLOCK_SIZES_ALL] = {
-  0x0000000000000001ULL,  // BLOCK_4X4
-  0x0000000000000001ULL,  // BLOCK_4X8
-  0x0000000000000001ULL,  // BLOCK_8X4
-  0x0000000000000001ULL,  // BLOCK_8X8
-  0x0000000000000001ULL,  // BLOCK_8X16,
-  0x0000000000000003ULL,  // BLOCK_16X8
-  0x0000000000000003ULL,  // BLOCK_16X16
-  0x0000000000000003ULL,  // BLOCK_16X32,
-  0x000000000000000fULL,  // BLOCK_32X16,
-  0x000000000000000fULL,  // BLOCK_32X32,
-  0x000000000000000fULL,  // BLOCK_32X64,
-  0x00000000000000ffULL,  // BLOCK_64X32,
-  0x00000000000000ffULL,  // BLOCK_64X64,
-  0x0000000000000001ULL,  // BLOCK_4X16,
-  0x0000000000000003ULL,  // BLOCK_16X4,
-  0x0000000000000001ULL,  // BLOCK_8X32,
-  0x000000000000000fULL,  // BLOCK_32X8,
-  0x0000000000000003ULL,  // BLOCK_16X64,
-  0x00000000000000ffULL,  // BLOCK_64X16
-};
-// 64 bit mask to shift and set for each prediction size. A bit is set for
-// each 8x8 block that would be in the top left most block of the given block
-// size in the 64x64 block.
-static const uint64_t size_mask[BLOCK_SIZES_ALL] = {
-  0x0000000000000001ULL,  // BLOCK_4X4
-  0x0000000000000001ULL,  // BLOCK_4X8
-  0x0000000000000001ULL,  // BLOCK_8X4
-  0x0000000000000001ULL,  // BLOCK_8X8
-  0x0000000000000101ULL,  // BLOCK_8X16,
-  0x0000000000000003ULL,  // BLOCK_16X8
-  0x0000000000000303ULL,  // BLOCK_16X16
-  0x0000000003030303ULL,  // BLOCK_16X32,
-  0x0000000000000f0fULL,  // BLOCK_32X16,
-  0x000000000f0f0f0fULL,  // BLOCK_32X32,
-  0x0f0f0f0f0f0f0f0fULL,  // BLOCK_32X64,
-  0x00000000ffffffffULL,  // BLOCK_64X32,
-  0xffffffffffffffffULL,  // BLOCK_64X64,
-  0x0000000000000101ULL,  // BLOCK_4X16,
-  0x0000000000000003ULL,  // BLOCK_16X4,
-  0x0000000001010101ULL,  // BLOCK_8X32,
-  0x000000000000000fULL,  // BLOCK_32X8,
-  0x0303030303030303ULL,  // BLOCK_16X64,
-  0x000000000000ffffULL,  // BLOCK_64X16
-};
-
-// These are used for masking the left and above 32x32 borders.
-static const uint64_t left_border = 0x1111111111111111ULL;
-static const uint64_t above_border = 0x000000ff000000ffULL;
-
-// 16 bit masks for uv transform sizes.
-static const uint16_t left_64x64_txform_mask_uv[TX_SIZES] = {
-  0xffff,  // TX_4X4
-  0xffff,  // TX_8x8
-  0x5555,  // TX_16x16
-  0x1111,  // TX_32x32
-  0x0101,  // TX_64x64, never used
-};
-
-static const uint16_t above_64x64_txform_mask_uv[TX_SIZES] = {
-  0xffff,  // TX_4X4
-  0xffff,  // TX_8x8
-  0x0f0f,  // TX_16x16
-  0x000f,  // TX_32x32
-  0x0003,  // TX_64x64, never used
-};
-
-// 16 bit left mask to shift and set for each uv prediction size.
-static const uint16_t left_prediction_mask_uv[BLOCK_SIZES_ALL] = {
-  0x0001,  // BLOCK_4X4,
-  0x0001,  // BLOCK_4X8,
-  0x0001,  // BLOCK_8X4,
-  0x0001,  // BLOCK_8X8,
-  0x0001,  // BLOCK_8X16,
-  0x0001,  // BLOCK_16X8,
-  0x0001,  // BLOCK_16X16,
-  0x0011,  // BLOCK_16X32,
-  0x0001,  // BLOCK_32X16,
-  0x0011,  // BLOCK_32X32,
-  0x1111,  // BLOCK_32X64
-  0x0011,  // BLOCK_64X32,
-  0x1111,  // BLOCK_64X64,
-  0x0001,  // BLOCK_4X16,
-  0x0001,  // BLOCK_16X4,
-  0x0011,  // BLOCK_8X32,
-  0x0001,  // BLOCK_32X8,
-  0x1111,  // BLOCK_16X64,
-  0x0001,  // BLOCK_64X16,
-};
-
-// 16 bit above mask to shift and set for uv each prediction size.
-static const uint16_t above_prediction_mask_uv[BLOCK_SIZES_ALL] = {
-  0x0001,  // BLOCK_4X4
-  0x0001,  // BLOCK_4X8
-  0x0001,  // BLOCK_8X4
-  0x0001,  // BLOCK_8X8
-  0x0001,  // BLOCK_8X16,
-  0x0001,  // BLOCK_16X8
-  0x0001,  // BLOCK_16X16
-  0x0001,  // BLOCK_16X32,
-  0x0003,  // BLOCK_32X16,
-  0x0003,  // BLOCK_32X32,
-  0x0003,  // BLOCK_32X64,
-  0x000f,  // BLOCK_64X32,
-  0x000f,  // BLOCK_64X64,
-  0x0001,  // BLOCK_4X16,
-  0x0001,  // BLOCK_16X4,
-  0x0001,  // BLOCK_8X32,
-  0x0003,  // BLOCK_32X8,
-  0x0001,  // BLOCK_16X64,
-  0x000f,  // BLOCK_64X16
-};
-
-// 64 bit mask to shift and set for each uv prediction size
-static const uint16_t size_mask_uv[BLOCK_SIZES_ALL] = {
-  0x0001,  // BLOCK_4X4
-  0x0001,  // BLOCK_4X8
-  0x0001,  // BLOCK_8X4
-  0x0001,  // BLOCK_8X8
-  0x0001,  // BLOCK_8X16,
-  0x0001,  // BLOCK_16X8
-  0x0001,  // BLOCK_16X16
-  0x0011,  // BLOCK_16X32,
-  0x0003,  // BLOCK_32X16,
-  0x0033,  // BLOCK_32X32,
-  0x3333,  // BLOCK_32X64,
-  0x00ff,  // BLOCK_64X32,
-  0xffff,  // BLOCK_64X64,
-  0x0001,  // BLOCK_4X16,
-  0x0001,  // BLOCK_16X4,
-  0x0011,  // BLOCK_8X32,
-  0x0003,  // BLOCK_32X8,
-  0x1111,  // BLOCK_16X64,
-  0x000f,  // BLOCK_64X16
-};
-static const uint16_t left_border_uv = 0x1111;
-static const uint16_t above_border_uv = 0x000f;
-
 static const int mode_lf_lut[] = {
   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,  // INTRA_MODES
   1, 1, 0, 1,                             // INTER_MODES (GLOBALMV == 0)
@@ -1118,397 +892,6 @@
 }
 #endif  // LOOP_FILTER_BITMASK
 
-// This function ors into the current lfm structure, where to do loop
-// filters for the specific mi we are looking at. It uses information
-// including the block_size_type (32x16, 32x32, etc.), the transform size,
-// whether there were any coefficients encoded, and the loop filter strength
-// block we are currently looking at. Shift is used to position the
-// 1's we produce.
-// TODO(JBB) Need another function for different resolution color..
-static void build_masks(AV1_COMMON *const cm,
-                        const loop_filter_info_n *const lfi_n,
-                        const MB_MODE_INFO *mbmi, const int shift_y,
-                        const int shift_uv, LOOP_FILTER_MASK *lfm) {
-  const BLOCK_SIZE block_size = mbmi->sb_type;
-  // TODO(debargha): Check if masks can be setup correctly when
-  // rectangular transfroms are used with the EXT_TX expt.
-  const TX_SIZE tx_size_y = txsize_sqr_map[mbmi->tx_size];
-  const TX_SIZE tx_size_y_left = txsize_horz_map[mbmi->tx_size];
-  const TX_SIZE tx_size_y_above = txsize_vert_map[mbmi->tx_size];
-  const TX_SIZE tx_size_uv_actual = av1_get_uv_tx_size(mbmi, 1, 1);
-  const TX_SIZE tx_size_uv = txsize_sqr_map[tx_size_uv_actual];
-  const TX_SIZE tx_size_uv_left = txsize_horz_map[tx_size_uv_actual];
-  const TX_SIZE tx_size_uv_above = txsize_vert_map[tx_size_uv_actual];
-  const int filter_level = get_filter_level(cm, lfi_n, 0, 0, mbmi);
-  uint64_t *const left_y = &lfm->left_y[tx_size_y_left];
-  uint64_t *const above_y = &lfm->above_y[tx_size_y_above];
-  uint64_t *const int_4x4_y = &lfm->int_4x4_y;
-  uint16_t *const left_uv = &lfm->left_uv[tx_size_uv_left];
-  uint16_t *const above_uv = &lfm->above_uv[tx_size_uv_above];
-  uint16_t *const int_4x4_uv = &lfm->left_int_4x4_uv;
-  int i;
-
-  // If filter level is 0 we don't loop filter.
-  if (!filter_level) {
-    return;
-  } else {
-    const int w = num_8x8_blocks_wide_lookup[block_size];
-    const int h = num_8x8_blocks_high_lookup[block_size];
-    const int row = (shift_y >> MAX_MIB_SIZE_LOG2);
-    const int col = shift_y - (row << MAX_MIB_SIZE_LOG2);
-
-    for (i = 0; i < h; i++) memset(&lfm->lfl_y[row + i][col], filter_level, w);
-  }
-
-  // These set 1 in the current block size for the block size edges.
-  // For instance if the block size is 32x16, we'll set:
-  //    above =   1111
-  //              0000
-  //    and
-  //    left  =   1000
-  //          =   1000
-  // NOTE : In this example the low bit is left most ( 1000 ) is stored as
-  //        1,  not 8...
-  //
-  // U and V set things on a 16 bit scale.
-  //
-  *above_y |= above_prediction_mask[block_size] << shift_y;
-  *above_uv |= above_prediction_mask_uv[block_size] << shift_uv;
-  *left_y |= left_prediction_mask[block_size] << shift_y;
-  *left_uv |= left_prediction_mask_uv[block_size] << shift_uv;
-
-  // If the block has no coefficients and is not intra we skip applying
-  // the loop filter on block edges.
-  if (mbmi->skip && is_inter_block(mbmi)) return;
-
-  // Here we are adding a mask for the transform size. The transform
-  // size mask is set to be correct for a 64x64 prediction block size. We
-  // mask to match the size of the block we are working on and then shift it
-  // into place..
-  *above_y |= (size_mask[block_size] & above_64x64_txform_mask[tx_size_y_above])
-              << shift_y;
-  *above_uv |=
-      (size_mask_uv[block_size] & above_64x64_txform_mask_uv[tx_size_uv_above])
-      << shift_uv;
-
-  *left_y |= (size_mask[block_size] & left_64x64_txform_mask[tx_size_y_left])
-             << shift_y;
-  *left_uv |=
-      (size_mask_uv[block_size] & left_64x64_txform_mask_uv[tx_size_uv_left])
-      << shift_uv;
-
-  // Here we are trying to determine what to do with the internal 4x4 block
-  // boundaries.  These differ from the 4x4 boundaries on the outside edge of
-  // an 8x8 in that the internal ones can be skipped and don't depend on
-  // the prediction block size.
-  if (tx_size_y == TX_4X4)
-    *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffffULL) << shift_y;
-
-  if (tx_size_uv == TX_4X4)
-    *int_4x4_uv |= (size_mask_uv[block_size] & 0xffff) << shift_uv;
-}
-
-// This function does the same thing as the one above with the exception that
-// it only affects the y masks. It exists because for blocks < 16x16 in size,
-// we only update u and v masks on the first block.
-static void build_y_mask(AV1_COMMON *const cm,
-                         const loop_filter_info_n *const lfi_n,
-                         const MB_MODE_INFO *mbmi, const int shift_y,
-                         LOOP_FILTER_MASK *lfm) {
-  const TX_SIZE tx_size_y = txsize_sqr_map[mbmi->tx_size];
-  const TX_SIZE tx_size_y_left = txsize_horz_map[mbmi->tx_size];
-  const TX_SIZE tx_size_y_above = txsize_vert_map[mbmi->tx_size];
-  const BLOCK_SIZE block_size = mbmi->sb_type;
-  const int filter_level = get_filter_level(cm, lfi_n, 0, 0, mbmi);
-  uint64_t *const left_y = &lfm->left_y[tx_size_y_left];
-  uint64_t *const above_y = &lfm->above_y[tx_size_y_above];
-  uint64_t *const int_4x4_y = &lfm->int_4x4_y;
-  int i;
-
-  if (!filter_level) {
-    return;
-  } else {
-    const int w = num_8x8_blocks_wide_lookup[block_size];
-    const int h = num_8x8_blocks_high_lookup[block_size];
-    const int row = (shift_y >> MAX_MIB_SIZE_LOG2);
-    const int col = shift_y - (row << MAX_MIB_SIZE_LOG2);
-
-    for (i = 0; i < h; i++) memset(&lfm->lfl_y[row + i][col], filter_level, w);
-  }
-
-  *above_y |= above_prediction_mask[block_size] << shift_y;
-  *left_y |= left_prediction_mask[block_size] << shift_y;
-
-  if (mbmi->skip && is_inter_block(mbmi)) return;
-
-  *above_y |= (size_mask[block_size] & above_64x64_txform_mask[tx_size_y_above])
-              << shift_y;
-
-  *left_y |= (size_mask[block_size] & left_64x64_txform_mask[tx_size_y_left])
-             << shift_y;
-
-  if (tx_size_y == TX_4X4)
-    *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffffULL) << shift_y;
-}
-
-// This function sets up the bit masks for the entire 64x64 region represented
-// by mi_row, mi_col.
-// TODO(JBB): This function only works for yv12.
-void av1_setup_mask(AV1_COMMON *const cm, int mi_row, int mi_col,
-                    MB_MODE_INFO **mi, int mode_info_stride,
-                    LOOP_FILTER_MASK *lfm) {
-  assert(0 && "Not yet updated");
-  int idx_32, idx_16, idx_8;
-  const loop_filter_info_n *const lfi_n = &cm->lf_info;
-  MB_MODE_INFO **mip = mi;
-  MB_MODE_INFO **mip2 = mi;
-
-  // These are offsets to the next mi in the 64x64 block. It is what gets
-  // added to the mi ptr as we go through each loop. It helps us to avoid
-  // setting up special row and column counters for each index. The last step
-  // brings us out back to the starting position.
-  const int offset_32[] = { 4, (mode_info_stride << 2) - 4, 4,
-                            -(mode_info_stride << 2) - 4 };
-  const int offset_16[] = { 2, (mode_info_stride << 1) - 2, 2,
-                            -(mode_info_stride << 1) - 2 };
-  const int offset[] = { 1, mode_info_stride - 1, 1, -mode_info_stride - 1 };
-
-  // Following variables represent shifts to position the current block
-  // mask over the appropriate block. A shift of 36 to the left will move
-  // the bits for the final 32 by 32 block in the 64x64 up 4 rows and left
-  // 4 rows to the appropriate spot.
-  const int shift_32_y[] = { 0, 4, 32, 36 };
-  const int shift_16_y[] = { 0, 2, 16, 18 };
-  const int shift_8_y[] = { 0, 1, 8, 9 };
-  const int shift_32_uv[] = { 0, 2, 8, 10 };
-  const int shift_16_uv[] = { 0, 1, 4, 5 };
-  int i;
-  const int max_rows = AOMMIN(cm->mi_rows - mi_row, MAX_MIB_SIZE);
-  const int max_cols = AOMMIN(cm->mi_cols - mi_col, MAX_MIB_SIZE);
-
-  av1_zero(*lfm);
-  assert(mip[0] != NULL);
-
-  // TODO(jimbankoski): Try moving most of the following code into decode
-  // loop and storing lfm in the mbmi structure so that we don't have to go
-  // through the recursive loop structure multiple times.
-  switch (mip[0]->sb_type) {
-    case BLOCK_64X64: build_masks(cm, lfi_n, mip[0], 0, 0, lfm); break;
-    case BLOCK_64X32:
-      build_masks(cm, lfi_n, mip[0], 0, 0, lfm);
-      mip2 = mip + mode_info_stride * 4;
-      if (4 >= max_rows) break;
-      build_masks(cm, lfi_n, mip2[0], 32, 8, lfm);
-      break;
-    case BLOCK_32X64:
-      build_masks(cm, lfi_n, mip[0], 0, 0, lfm);
-      mip2 = mip + 4;
-      if (4 >= max_cols) break;
-      build_masks(cm, lfi_n, mip2[0], 4, 2, lfm);
-      break;
-    default:
-      for (idx_32 = 0; idx_32 < 4; mip += offset_32[idx_32], ++idx_32) {
-        const int shift_y_32 = shift_32_y[idx_32];
-        const int shift_uv_32 = shift_32_uv[idx_32];
-        const int mi_32_col_offset = ((idx_32 & 1) << 2);
-        const int mi_32_row_offset = ((idx_32 >> 1) << 2);
-        if (mi_32_col_offset >= max_cols || mi_32_row_offset >= max_rows)
-          continue;
-        switch (mip[0]->sb_type) {
-          case BLOCK_32X32:
-            build_masks(cm, lfi_n, mip[0], shift_y_32, shift_uv_32, lfm);
-            break;
-          case BLOCK_32X16:
-            build_masks(cm, lfi_n, mip[0], shift_y_32, shift_uv_32, lfm);
-            if (mi_32_row_offset + 2 >= max_rows) continue;
-            mip2 = mip + mode_info_stride * 2;
-            build_masks(cm, lfi_n, mip2[0], shift_y_32 + 16, shift_uv_32 + 4,
-                        lfm);
-            break;
-          case BLOCK_16X32:
-            build_masks(cm, lfi_n, mip[0], shift_y_32, shift_uv_32, lfm);
-            if (mi_32_col_offset + 2 >= max_cols) continue;
-            mip2 = mip + 2;
-            build_masks(cm, lfi_n, mip2[0], shift_y_32 + 2, shift_uv_32 + 1,
-                        lfm);
-            break;
-          default:
-            for (idx_16 = 0; idx_16 < 4; mip += offset_16[idx_16], ++idx_16) {
-              const int shift_y_32_16 = shift_y_32 + shift_16_y[idx_16];
-              const int shift_uv_32_16 = shift_uv_32 + shift_16_uv[idx_16];
-              const int mi_16_col_offset =
-                  mi_32_col_offset + ((idx_16 & 1) << 1);
-              const int mi_16_row_offset =
-                  mi_32_row_offset + ((idx_16 >> 1) << 1);
-
-              if (mi_16_col_offset >= max_cols || mi_16_row_offset >= max_rows)
-                continue;
-
-              switch (mip[0]->sb_type) {
-                case BLOCK_16X16:
-                  build_masks(cm, lfi_n, mip[0], shift_y_32_16, shift_uv_32_16,
-                              lfm);
-                  break;
-                case BLOCK_16X8:
-                  build_masks(cm, lfi_n, mip[0], shift_y_32_16, shift_uv_32_16,
-                              lfm);
-                  if (mi_16_row_offset + 1 >= max_rows) continue;
-                  mip2 = mip + mode_info_stride;
-                  build_y_mask(cm, lfi_n, mip2[0], shift_y_32_16 + 8, lfm);
-                  break;
-                case BLOCK_8X16:
-                  build_masks(cm, lfi_n, mip[0], shift_y_32_16, shift_uv_32_16,
-                              lfm);
-                  if (mi_16_col_offset + 1 >= max_cols) continue;
-                  mip2 = mip + 1;
-                  build_y_mask(cm, lfi_n, mip2[0], shift_y_32_16 + 1, lfm);
-                  break;
-                default: {
-                  const int shift_y_32_16_8_zero = shift_y_32_16 + shift_8_y[0];
-                  build_masks(cm, lfi_n, mip[0], shift_y_32_16_8_zero,
-                              shift_uv_32_16, lfm);
-                  mip += offset[0];
-                  for (idx_8 = 1; idx_8 < 4; mip += offset[idx_8], ++idx_8) {
-                    const int shift_y_32_16_8 =
-                        shift_y_32_16 + shift_8_y[idx_8];
-                    const int mi_8_col_offset =
-                        mi_16_col_offset + ((idx_8 & 1));
-                    const int mi_8_row_offset =
-                        mi_16_row_offset + ((idx_8 >> 1));
-
-                    if (mi_8_col_offset >= max_cols ||
-                        mi_8_row_offset >= max_rows)
-                      continue;
-                    build_y_mask(cm, lfi_n, mip[0], shift_y_32_16_8, lfm);
-                  }
-                  break;
-                }
-              }
-            }
-            break;
-        }
-      }
-      break;
-  }
-  // The largest loopfilter we have is 16x16 so we use the 16x16 mask
-  // for 32x32 transforms also.
-  lfm->left_y[TX_16X16] |= lfm->left_y[TX_32X32];
-  lfm->above_y[TX_16X16] |= lfm->above_y[TX_32X32];
-  lfm->left_uv[TX_16X16] |= lfm->left_uv[TX_32X32];
-  lfm->above_uv[TX_16X16] |= lfm->above_uv[TX_32X32];
-
-  // We do at least 8 tap filter on every 32x32 even if the transform size
-  // is 4x4. So if the 4x4 is set on a border pixel add it to the 8x8 and
-  // remove it from the 4x4.
-  lfm->left_y[TX_8X8] |= lfm->left_y[TX_4X4] & left_border;
-  lfm->left_y[TX_4X4] &= ~left_border;
-  lfm->above_y[TX_8X8] |= lfm->above_y[TX_4X4] & above_border;
-  lfm->above_y[TX_4X4] &= ~above_border;
-  lfm->left_uv[TX_8X8] |= lfm->left_uv[TX_4X4] & left_border_uv;
-  lfm->left_uv[TX_4X4] &= ~left_border_uv;
-  lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_4X4] & above_border_uv;
-  lfm->above_uv[TX_4X4] &= ~above_border_uv;
-
-  // We do some special edge handling.
-  if (mi_row + MAX_MIB_SIZE > cm->mi_rows) {
-    const uint64_t rows = cm->mi_rows - mi_row;
-
-    // Each pixel inside the border gets a 1,
-    const uint64_t mask_y = (((uint64_t)1 << (rows << MAX_MIB_SIZE_LOG2)) - 1);
-    const uint16_t mask_uv =
-        (((uint16_t)1 << (((rows + 1) >> 1) << (MAX_MIB_SIZE_LOG2 - 1))) - 1);
-
-    // Remove values completely outside our border.
-    for (i = 0; i < TX_32X32; i++) {
-      lfm->left_y[i] &= mask_y;
-      lfm->above_y[i] &= mask_y;
-      lfm->left_uv[i] &= mask_uv;
-      lfm->above_uv[i] &= mask_uv;
-    }
-    lfm->int_4x4_y &= mask_y;
-    lfm->above_int_4x4_uv = lfm->left_int_4x4_uv & mask_uv;
-
-    // We don't apply a wide loop filter on the last uv block row. If set
-    // apply the shorter one instead.
-    if (rows == 1) {
-      lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_16X16];
-      lfm->above_uv[TX_16X16] = 0;
-    }
-    if (rows == 5) {
-      lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_16X16] & 0xff00;
-      lfm->above_uv[TX_16X16] &= ~(lfm->above_uv[TX_16X16] & 0xff00);
-    }
-  } else {
-    lfm->above_int_4x4_uv = lfm->left_int_4x4_uv;
-  }
-
-  if (mi_col + MAX_MIB_SIZE > cm->mi_cols) {
-    const uint64_t columns = cm->mi_cols - mi_col;
-
-    // Each pixel inside the border gets a 1, the multiply copies the border
-    // to where we need it.
-    const uint64_t mask_y = (((1 << columns) - 1)) * 0x0101010101010101ULL;
-    const uint16_t mask_uv = ((1 << ((columns + 1) >> 1)) - 1) * 0x1111;
-
-    // Internal edges are not applied on the last column of the image so
-    // we mask 1 more for the internal edges
-    const uint16_t mask_uv_int = ((1 << (columns >> 1)) - 1) * 0x1111;
-
-    // Remove the bits outside the image edge.
-    for (i = 0; i < TX_32X32; i++) {
-      lfm->left_y[i] &= mask_y;
-      lfm->above_y[i] &= mask_y;
-      lfm->left_uv[i] &= mask_uv;
-      lfm->above_uv[i] &= mask_uv;
-    }
-    lfm->int_4x4_y &= mask_y;
-    lfm->left_int_4x4_uv &= mask_uv_int;
-
-    // We don't apply a wide loop filter on the last uv column. If set
-    // apply the shorter one instead.
-    if (columns == 1) {
-      lfm->left_uv[TX_8X8] |= lfm->left_uv[TX_16X16];
-      lfm->left_uv[TX_16X16] = 0;
-    }
-    if (columns == 5) {
-      lfm->left_uv[TX_8X8] |= (lfm->left_uv[TX_16X16] & 0xcccc);
-      lfm->left_uv[TX_16X16] &= ~(lfm->left_uv[TX_16X16] & 0xcccc);
-    }
-  }
-  // We don't apply a loop filter on the first column in the image, mask that
-  // out.
-  if (mi_col == 0) {
-    for (i = 0; i < TX_32X32; i++) {
-      lfm->left_y[i] &= 0xfefefefefefefefeULL;
-      lfm->left_uv[i] &= 0xeeee;
-    }
-  }
-
-  // Assert if we try to apply 2 different loop filters at the same position.
-  assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_8X8]));
-  assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_4X4]));
-  assert(!(lfm->left_y[TX_8X8] & lfm->left_y[TX_4X4]));
-  assert(!(lfm->int_4x4_y & lfm->left_y[TX_16X16]));
-  assert(!(lfm->left_uv[TX_16X16] & lfm->left_uv[TX_8X8]));
-  assert(!(lfm->left_uv[TX_16X16] & lfm->left_uv[TX_4X4]));
-  assert(!(lfm->left_uv[TX_8X8] & lfm->left_uv[TX_4X4]));
-  assert(!(lfm->left_int_4x4_uv & lfm->left_uv[TX_16X16]));
-  assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_8X8]));
-  assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_4X4]));
-  assert(!(lfm->above_y[TX_8X8] & lfm->above_y[TX_4X4]));
-  assert(!(lfm->int_4x4_y & lfm->above_y[TX_16X16]));
-  assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_8X8]));
-  assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_4X4]));
-  assert(!(lfm->above_uv[TX_8X8] & lfm->above_uv[TX_4X4]));
-  assert(!(lfm->above_int_4x4_uv & lfm->above_uv[TX_16X16]));
-}
-
-typedef struct {
-  unsigned int m16x16;
-  unsigned int m8x8;
-  unsigned int m4x4;
-} FilterMasks;
-
 static TX_SIZE get_transform_size(const MACROBLOCKD *const xd,
                                   const MB_MODE_INFO *const mbmi,
                                   const EDGE_DIR edge_dir, const int mi_row,
diff --git a/av1/common/av1_loopfilter.h b/av1/common/av1_loopfilter.h
index 136b27e..712a3da 100644
--- a/av1/common/av1_loopfilter.h
+++ b/av1/common/av1_loopfilter.h
@@ -154,38 +154,11 @@
   uint8_t lvl[MAX_MB_PLANE][MAX_SEGMENTS][2][REF_FRAMES][MAX_MODE_LF_DELTAS];
 } loop_filter_info_n;
 
-// This structure holds bit masks for all 8x8 blocks in a 64x64 region.
-// Each 1 bit represents a position in which we want to apply the loop filter.
-// Left_ entries refer to whether we apply a filter on the border to the
-// left of the block.   Above_ entries refer to whether or not to apply a
-// filter on the above border.   Int_ entries refer to whether or not to
-// apply borders on the 4x4 edges within the 8x8 block that each bit
-// represents.
-// Since each transform is accompanied by a potentially different type of
-// loop filter there is a different entry in the array for each transform size.
-typedef struct {
-  uint64_t left_y[TX_SIZES];
-  uint64_t above_y[TX_SIZES];
-  uint64_t int_4x4_y;
-  uint16_t left_uv[TX_SIZES];
-  uint16_t above_uv[TX_SIZES];
-  uint16_t left_int_4x4_uv;
-  uint16_t above_int_4x4_uv;
-  uint8_t lfl_y[MAX_MIB_SIZE][MAX_MIB_SIZE];
-  uint8_t lfl_uv[MAX_MIB_SIZE / 2][MAX_MIB_SIZE / 2];
-} LOOP_FILTER_MASK;
-
 /* assorted loopfilter functions which get used elsewhere */
 struct AV1Common;
 struct macroblockd;
 struct AV1LfSyncData;
 
-// This function sets up the bit masks for the entire 64x64 region represented
-// by mi_row, mi_col.
-void av1_setup_mask(struct AV1Common *const cm, int mi_row, int mi_col,
-                    MB_MODE_INFO **mi_8x8, int mode_info_stride,
-                    LOOP_FILTER_MASK *lfm);
-
 void av1_loop_filter_init(struct AV1Common *cm);
 
 void av1_loop_filter_frame(YV12_BUFFER_CONFIG *frame, struct AV1Common *cm,