Merge remote-tracking branch 'aosp/upstream-master' into aosp

* aosp/upstream-master:
  resize.f2fs: add option for large_nat_bitmap feature
  dump.f2fs: print more info of inode layout
  f2fs-tools: support data compression
  fsck.f2fs: Enable user-space cache
  libf2fs_io: Add user-space cache
  f2fs-tools: handle /sys is not mounted
  fsck.f2fs: add --{no-}kernel-check to bypass kernel version diff or not
  f2fs_io: add set_fsverity
  fsck.f2fs: fix typo
  sg_write_buffer: fix sg_write_buffer build outside the source dir
  f2fs-tools: reuse same pointer, exit on error without clean-up

Change-Id: I7dcef0d525f93d5871b44678a1489a2d1fc03943
Signed-off-by: Jaegeuk Kim <jaegeuk@google.com>
diff --git a/METADATA b/METADATA
index f3133c3..a094b66 100644
--- a/METADATA
+++ b/METADATA
@@ -10,8 +10,8 @@
   }
   version: "v1.13.0"
   last_upgrade_date {
-    year: 2019
-    month: 10
-    day: 18
+    year: 2020
+    month: 02
+    day: 06
   }
 }
diff --git a/fsck/dump.c b/fsck/dump.c
index a6a1635..8481a58 100644
--- a/fsck/dump.c
+++ b/fsck/dump.c
@@ -253,15 +253,22 @@
 	struct node_info ni;
 	struct f2fs_node *node_blk;
 	u32 skip = 0;
-	u32 i, idx;
+	u32 i, idx = 0;
+
+	get_node_info(sbi, nid, &ni);
+
+	node_blk = calloc(BLOCK_SZ, 1);
+	ASSERT(node_blk);
+
+	dev_read_block(node_blk, ni.blk_addr);
 
 	switch (ntype) {
 	case TYPE_DIRECT_NODE:
-		skip = idx = ADDRS_PER_BLOCK;
+		skip = idx = ADDRS_PER_BLOCK(&node_blk->i);
 		break;
 	case TYPE_INDIRECT_NODE:
 		idx = NIDS_PER_BLOCK;
-		skip = idx * ADDRS_PER_BLOCK;
+		skip = idx * ADDRS_PER_BLOCK(&node_blk->i);
 		break;
 	case TYPE_DOUBLE_INDIRECT_NODE:
 		skip = 0;
@@ -274,13 +281,6 @@
 		return;
 	}
 
-	get_node_info(sbi, nid, &ni);
-
-	node_blk = calloc(BLOCK_SZ, 1);
-	ASSERT(node_blk);
-
-	dev_read_block(node_blk, ni.blk_addr);
-
 	for (i = 0; i < idx; i++, (*ofs)++) {
 		switch (ntype) {
 		case TYPE_DIRECT_NODE:
@@ -545,7 +545,8 @@
 		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
 		bidx = node_ofs - 5 - dec;
 	}
-	return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(&node_blk->i);
+	return bidx * ADDRS_PER_BLOCK(&node_blk->i) +
+				ADDRS_PER_INODE(&node_blk->i);
 }
 
 static void dump_data_offset(u32 blk_addr, int ofs_in_node)
diff --git a/fsck/fsck.c b/fsck/fsck.c
index 507437d..4d8aff0 100644
--- a/fsck/fsck.c
+++ b/fsck/fsck.c
@@ -859,6 +859,12 @@
 		/* check extent info */
 		check_extent_info(&child, blkaddr, 0);
 
+		if (blkaddr == COMPRESS_ADDR) {
+			fsck->chk.valid_blk_cnt++;
+			*blk_cnt = *blk_cnt + 1;
+			continue;
+		}
+
 		if (blkaddr != 0) {
 			ret = fsck_chk_data_blk(sbi,
 					IS_CASEFOLDED(&node_blk->i),
@@ -911,11 +917,12 @@
 			}
 skip:
 			if (ntype == TYPE_DIRECT_NODE)
-				child.pgofs += ADDRS_PER_BLOCK;
+				child.pgofs += ADDRS_PER_BLOCK(&node_blk->i);
 			else if (ntype == TYPE_INDIRECT_NODE)
-				child.pgofs += ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
+				child.pgofs += ADDRS_PER_BLOCK(&node_blk->i) *
+								NIDS_PER_BLOCK;
 			else
-				child.pgofs += ADDRS_PER_BLOCK *
+				child.pgofs += ADDRS_PER_BLOCK(&node_blk->i) *
 						NIDS_PER_BLOCK * NIDS_PER_BLOCK;
 		}
 
@@ -1088,13 +1095,18 @@
 	child->p_ino = nid;
 	child->pp_ino = le32_to_cpu(inode->i_pino);
 
-	for (idx = 0; idx < ADDRS_PER_BLOCK; idx++, child->pgofs++) {
+	for (idx = 0; idx < ADDRS_PER_BLOCK(inode); idx++, child->pgofs++) {
 		block_t blkaddr = le32_to_cpu(node_blk->dn.addr[idx]);
 
 		check_extent_info(child, blkaddr, 0);
 
 		if (blkaddr == 0x0)
 			continue;
+		if (blkaddr == COMPRESS_ADDR) {
+			F2FS_FSCK(sbi)->chk.valid_blk_cnt++;
+			*blk_cnt = *blk_cnt + 1;
+			continue;
+		}
 		ret = fsck_chk_data_blk(sbi, IS_CASEFOLDED(inode),
 			blkaddr, child,
 			le64_to_cpu(inode->i_blocks) == *blk_cnt, ftype,
@@ -1141,7 +1153,7 @@
 				FIX_MSG("Set indirect node 0x%x -> 0", i);
 			}
 skip:
-			child->pgofs += ADDRS_PER_BLOCK;
+			child->pgofs += ADDRS_PER_BLOCK(&node_blk->i);
 		}
 	}
 
@@ -1183,7 +1195,8 @@
 				FIX_MSG("Set double indirect node 0x%x -> 0", i);
 			}
 skip:
-			child->pgofs += ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
+			child->pgofs += ADDRS_PER_BLOCK(&node_blk->i) *
+							NIDS_PER_BLOCK;
 		}
 	}
 
@@ -2515,7 +2528,7 @@
 	fsck->chk.valid_blk_cnt--;
 	f2fs_clear_main_bitmap(sbi, ni.blk_addr);
 
-	for (i = 0; i < ADDRS_PER_BLOCK; i++) {
+	for (i = 0; i < ADDRS_PER_BLOCK(&node->i); i++) {
 		addr = le32_to_cpu(node->dn.addr[i]);
 		if (!addr)
 			continue;
@@ -3024,7 +3037,7 @@
 		c.bug_on = 1;
 	}
 
-	printf("[FSCK] valid_node_count matcing with CP (de lookup)  ");
+	printf("[FSCK] valid_node_count matching with CP (de lookup) ");
 	if (sbi->total_valid_node_count == fsck->chk.valid_node_cnt) {
 		printf(" [Ok..] [0x%x]\n", fsck->chk.valid_node_cnt);
 	} else {
@@ -3033,7 +3046,7 @@
 		c.bug_on = 1;
 	}
 
-	printf("[FSCK] valid_node_count matcing with CP (nat lookup) ");
+	printf("[FSCK] valid_node_count matching with CP (nat lookup)");
 	if (sbi->total_valid_node_count == fsck->chk.valid_nat_entry_cnt) {
 		printf(" [Ok..] [0x%x]\n", fsck->chk.valid_nat_entry_cnt);
 	} else {
diff --git a/fsck/main.c b/fsck/main.c
index 9a7d499..a6fd970 100644
--- a/fsck/main.c
+++ b/fsck/main.c
@@ -10,6 +10,9 @@
  *   Liu Shuoran <liushuoran@huawei.com>
  *   Jaegeuk Kim <jaegeuk@kernel.org>
  *  : add sload.f2fs
+ * Copyright (c) 2019 Google Inc.
+ *   Robin Hsu <robinhsu@google.com>
+ *  : add cache layer
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -20,6 +23,7 @@
 #include <ctype.h>
 #include <time.h>
 #include <getopt.h>
+#include <stdbool.h>
 #include "quotaio.h"
 
 struct f2fs_fsck gfsck;
@@ -54,7 +58,12 @@
 	MSG(0, "\nUsage: fsck.f2fs [options] device\n");
 	MSG(0, "[options]:\n");
 	MSG(0, "  -a check/fix potential corruption, reported by f2fs\n");
-	MSG(0, "  -C encoding[:flag1,flag2] Set options for enabling casefolding\n");
+	MSG(0, "  -c <num-cache-entry>  set number of cache entries"
+			" (default 0)\n");
+	MSG(0, "  -m <max-hash-collision>  set max cache hash collision"
+			" (default 16)\n");
+	MSG(0, "  -C encoding[:flag1,flag2] Set options for enabling"
+			" casefolding\n");
 	MSG(0, "  -d debug level [default:0]\n");
 	MSG(0, "  -f check/fix entire partition\n");
 	MSG(0, "  -g add default options\n");
@@ -66,6 +75,9 @@
 	MSG(0, "  -y fix all the time\n");
 	MSG(0, "  -V print the version number and exit\n");
 	MSG(0, "  --dry-run do not really fix corruptions\n");
+	MSG(0, "  --no-kernel-check skips detecting kernel change\n");
+	MSG(0, "  --kernel-check checks kernel change\n");
+	MSG(0, "  --debug-cache to debug cache when -c is used\n");
 	exit(1);
 }
 
@@ -104,6 +116,7 @@
 	MSG(0, "\nUsage: resize.f2fs [options] device\n");
 	MSG(0, "[options]:\n");
 	MSG(0, "  -d debug level [default:0]\n");
+	MSG(0, "  -i extended node bitmap, node ratio is 20%% by default\n");
 	MSG(0, "  -s safe resize (Does not resize metadata)");
 	MSG(0, "  -t target sectors [default: device size]\n");
 	MSG(0, "  -V print the version number and exit\n");
@@ -187,15 +200,20 @@
 	}
 
 	if (!strcmp("fsck.f2fs", prog)) {
-		const char *option_string = ":aC:d:fg:O:p:q:StyV";
+		const char *option_string = ":aC:c:m:d:fg:O:p:q:StyV";
 		int opt = 0, val;
 		char *token;
 		struct option long_opt[] = {
 			{"dry-run", no_argument, 0, 1},
+			{"no-kernel-check", no_argument, 0, 2},
+			{"kernel-check", no_argument, 0, 3},
+			{"debug-cache", no_argument, 0, 4},
 			{0, 0, 0, 0}
 		};
 
 		c.func = FSCK;
+		c.cache_config.max_hash_collision = 16;
+		c.cache_config.dbg_en = false;
 		while ((option = getopt_long(argc, argv, option_string,
 						long_opt, &opt)) != EOF) {
 			switch (option) {
@@ -203,10 +221,28 @@
 				c.dry_run = 1;
 				MSG(0, "Info: Dry run\n");
 				break;
+			case 2:
+				c.no_kernel_check = 1;
+				MSG(0, "Info: No Kernel Check\n");
+				break;
+			case 3:
+				c.no_kernel_check = 0;
+				MSG(0, "Info: Do Kernel Check\n");
+				break;
+			case 4:
+				c.cache_config.dbg_en = true;
+				break;
 			case 'a':
 				c.auto_fix = 1;
 				MSG(0, "Info: Fix the reported corruption.\n");
 				break;
+			case 'c':
+				c.cache_config.num_cache_entry = atoi(optarg);
+				break;
+			case 'm':
+				c.cache_config.max_hash_collision =
+						atoi(optarg);
+				break;
 			case 'g':
 				if (!strcmp(optarg, "android"))
 					c.defset = CONF_ANDROID;
@@ -449,7 +485,7 @@
 				break;
 		}
 	} else if (!strcmp("resize.f2fs", prog)) {
-		const char *option_string = "d:st:V";
+		const char *option_string = "d:st:iV";
 
 		c.func = RESIZE;
 		while ((option = getopt(argc, argv, option_string)) != EOF) {
@@ -476,6 +512,9 @@
 					ret = sscanf(optarg, "%"PRIx64"",
 							&c.target_sectors);
 				break;
+			case 'i':
+				c.large_nat_bitmap = 1;
+				break;
 			case 'V':
 				show_version(prog);
 				exit(0);
@@ -773,11 +812,14 @@
 	f2fs_parse_options(argc, argv);
 
 	if (c.func != DUMP && f2fs_devs_are_umounted() < 0) {
-		if (errno == EBUSY)
-			return -1;
+		if (errno == EBUSY) {
+			ret = -1;
+			goto quick_err;
+		}
 		if (!c.ro || c.func == DEFRAG) {
 			MSG(0, "\tError: Not available on mounted device!\n");
-			return -1;
+			ret = -1;
+			goto quick_err;
 		}
 
 		/* allow ro-mounted partition */
@@ -791,8 +833,10 @@
 	}
 
 	/* Get device */
-	if (f2fs_get_device_info() < 0)
-		return -1;
+	if (f2fs_get_device_info() < 0) {
+		ret = -1;
+		goto quick_err;
+	}
 
 fsck_again:
 	memset(&gfsck, 0, sizeof(gfsck));
@@ -883,5 +927,7 @@
 		free(sbi->ckpt);
 	if (sbi->raw_super)
 		free(sbi->raw_super);
+quick_err:
+	f2fs_release_sparse_resource();
 	return ret;
 }
diff --git a/fsck/mount.c b/fsck/mount.c
index 882f1ea..e4ba048 100644
--- a/fsck/mount.c
+++ b/fsck/mount.c
@@ -228,18 +228,26 @@
 			DISP_u64(inode, i_crtime);
 			DISP_u32(inode, i_crtime_nsec);
 		}
+		if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
+			DISP_u64(inode, i_compr_blocks);
+			DISP_u32(inode, i_compress_algrithm);
+			DISP_u32(inode, i_log_cluster_size);
+			DISP_u32(inode, i_padding);
+		}
 	}
 
-	DISP_u32(inode, i_addr[ofs]);		/* Pointers to data blocks */
-	DISP_u32(inode, i_addr[ofs + 1]);	/* Pointers to data blocks */
-	DISP_u32(inode, i_addr[ofs + 2]);	/* Pointers to data blocks */
-	DISP_u32(inode, i_addr[ofs + 3]);	/* Pointers to data blocks */
+	for (i = ofs; i < ADDRS_PER_INODE(inode); i++) {
+		block_t blkaddr = le32_to_cpu(inode->i_addr[i]);
+		char *flag = "";
 
-	for (i = ofs + 3; i < ADDRS_PER_INODE(inode); i++) {
-		if (inode->i_addr[i] == 0x0)
-			break;
-		printf("i_addr[0x%x] points data block\t\t[0x%4x]\n",
-				i, le32_to_cpu(inode->i_addr[i]));
+		if (blkaddr == 0x0)
+			continue;
+		if (blkaddr == COMPRESS_ADDR)
+			flag = "cluster flag";
+		else if (blkaddr == NEW_ADDR)
+			flag = "reserved flag";
+		printf("i_addr[0x%x] %-16s\t\t[0x%8x : %u]\n", i, flag,
+				blkaddr, blkaddr);
 	}
 
 	DISP_u32(inode, i_nid[0]);	/* direct */
@@ -272,7 +280,7 @@
 		DBG(verbose,
 			"Node ID [0x%x:%u] is direct node or indirect node.\n",
 								nid, nid);
-		for (i = 0; i <= 10; i++)
+		for (i = 0; i < DEF_ADDRS_PER_BLOCK; i++)
 			MSG(verbose, "[%d]\t\t\t[0x%8x : %d]\n",
 						i, dump_blk[i], dump_blk[i]);
 	}
@@ -470,6 +478,9 @@
 	if (f & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) {
 		MSG(0, "%s", " casefold");
 	}
+	if (f & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
+		MSG(0, "%s", " compression");
+	}
 	MSG(0, "\n");
 	MSG(0, "Info: superblock encrypt level = %d, salt = ",
 					sb->encryption_level);
@@ -480,7 +491,8 @@
 
 static inline bool is_valid_data_blkaddr(block_t blkaddr)
 {
-	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
+	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
+				blkaddr == COMPRESS_ADDR)
 		return 0;
 	return 1;
 }
@@ -872,7 +884,8 @@
 		MSG(0, "Info: MKFS version\n  \"%s\"\n", c.init_version);
 		MSG(0, "Info: FSCK version\n  from \"%s\"\n    to \"%s\"\n",
 					c.sb_version, c.version);
-		if (memcmp(c.sb_version, c.version, VERSION_LEN)) {
+		if (!c.no_kernel_check &&
+				memcmp(c.sb_version, c.version, VERSION_LEN)) {
 			memcpy(sbi->raw_super->version,
 						c.version, VERSION_LEN);
 			update_superblock(sbi->raw_super, SB_MASK(sb_addr));
@@ -3214,7 +3227,7 @@
 
 	/* step 3: recover data indices */
 	start = start_bidx_of_node(ofs_of_node(node_blk), node_blk);
-	end = start + ADDRS_PER_PAGE(node_blk);
+	end = start + ADDRS_PER_PAGE(sbi, node_blk, NULL);
 
 	for (; start < end; start++, ofs_in_node++) {
 		blkaddr = datablock_addr(node_blk, ofs_in_node);
diff --git a/fsck/node.c b/fsck/node.c
index d76e3bc..229a99c 100644
--- a/fsck/node.c
+++ b/fsck/node.c
@@ -117,9 +117,9 @@
 				int offset[4], unsigned int noffset[4])
 {
 	const long direct_index = ADDRS_PER_INODE(&node->i);
-	const long direct_blks = ADDRS_PER_BLOCK;
+	const long direct_blks = ADDRS_PER_BLOCK(&node->i);
 	const long dptrs_per_blk = NIDS_PER_BLOCK;
-	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
+	const long indirect_blks = ADDRS_PER_BLOCK(&node->i) * NIDS_PER_BLOCK;
 	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
 	int n = 0;
 	int level = 0;
diff --git a/fsck/node.h b/fsck/node.h
index d927a09..6bce1fb 100644
--- a/fsck/node.h
+++ b/fsck/node.h
@@ -18,14 +18,36 @@
 
 #include "fsck.h"
 
-#define ADDRS_PER_PAGE(page) \
-	(IS_INODE(page) ? ADDRS_PER_INODE(&page->i) : ADDRS_PER_BLOCK)
-
 static inline int IS_INODE(struct f2fs_node *node)
 {
 	return ((node)->footer.nid == (node)->footer.ino);
 }
 
+static inline unsigned int ADDRS_PER_PAGE(struct f2fs_sb_info *sbi,
+		struct f2fs_node *node_blk, struct f2fs_node *inode_blk)
+{
+	nid_t ino = le32_to_cpu(node_blk->footer.ino);
+	unsigned int nblocks;
+
+	if (IS_INODE(node_blk))
+		return ADDRS_PER_INODE(&node_blk->i);
+
+	if (!inode_blk) {
+		struct node_info ni;
+
+		inode_blk = calloc(BLOCK_SZ, 2);
+		ASSERT(inode_blk);
+
+		get_node_info(sbi, ino, &ni);
+		ASSERT(dev_read_block(inode_blk, ni.blk_addr) >= 0);
+		nblocks = ADDRS_PER_BLOCK(&inode_blk->i);
+		free(inode_blk);
+	} else {
+		nblocks = ADDRS_PER_BLOCK(&inode_blk->i);
+	}
+	return nblocks;
+}
+
 static inline __le32 *blkaddr_in_inode(struct f2fs_node *node)
 {
 	return node->i.i_addr + get_extra_isize(node);
diff --git a/fsck/resize.c b/fsck/resize.c
index fc563f2..46b1cfb 100644
--- a/fsck/resize.c
+++ b/fsck/resize.c
@@ -512,6 +512,9 @@
 
 	/* update nat_bits flag */
 	flags = update_nat_bits_flags(new_sb, cp, get_cp(ckpt_flags));
+	if (c.large_nat_bitmap)
+		flags |= CP_LARGE_NAT_BITMAP_FLAG;
+
 	if (flags & CP_COMPACT_SUM_FLAG)
 		flags &= ~CP_COMPACT_SUM_FLAG;
 	if (flags & CP_LARGE_NAT_BITMAP_FLAG)
diff --git a/fsck/segment.c b/fsck/segment.c
index 17c42b7..b7cf245 100644
--- a/fsck/segment.c
+++ b/fsck/segment.c
@@ -170,7 +170,8 @@
 				free(index_node);
 			index_node = (dn.node_blk == dn.inode_blk) ?
 							NULL : dn.node_blk;
-			remained_blkentries = ADDRS_PER_PAGE(dn.node_blk);
+			remained_blkentries = ADDRS_PER_PAGE(sbi,
+						dn.node_blk, dn.inode_blk);
 		}
 		ASSERT(remained_blkentries > 0);
 
@@ -248,7 +249,8 @@
 				free(index_node);
 			index_node = (dn.node_blk == dn.inode_blk) ?
 							NULL : dn.node_blk;
-			remained_blkentries = ADDRS_PER_PAGE(dn.node_blk);
+			remained_blkentries = ADDRS_PER_PAGE(sbi,
+						dn.node_blk, dn.inode_blk);
 		}
 		ASSERT(remained_blkentries > 0);
 
diff --git a/fsck/xattr.c b/fsck/xattr.c
index d51a08a..d5350e3 100644
--- a/fsck/xattr.c
+++ b/fsck/xattr.c
@@ -98,7 +98,8 @@
 		xattr_node = calloc(BLOCK_SZ, 1);
 		ASSERT(xattr_node);
 		ret = dev_read_block(xattr_node, ni.blk_addr);
-		ASSERT(ret >= 0);
+		if (ret < 0)
+			goto free_xattr_node;
 	}
 
 	/* write to xattr node block */
@@ -107,10 +108,10 @@
 			PAGE_SIZE - sizeof(struct node_footer));
 
 	ret = dev_write_block(xattr_node, blkaddr);
-	ASSERT(ret >= 0);
 
-	if (xnid)
-		free(xattr_node);
+free_xattr_node:
+	free(xattr_node);
+	ASSERT(ret >= 0);
 }
 
 int f2fs_setxattr(struct f2fs_sb_info *sbi, nid_t ino, int index, const char *name,
diff --git a/include/f2fs_fs.h b/include/f2fs_fs.h
index a36927b..af31bc5 100644
--- a/include/f2fs_fs.h
+++ b/include/f2fs_fs.h
@@ -3,6 +3,8 @@
  *
  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com/
+ * Copyright (c) 2019 Google Inc.
+ *             http://www.google.com/
  *
  * Dual licensed under the GPL or LGPL version 2 licenses.
  *
@@ -332,6 +334,16 @@
 	size_t zone_blocks;
 };
 
+typedef struct {
+	/* Value 0 means no cache, minimum 1024 */
+	long num_cache_entry;
+
+	/* Value 0 means always overwrite (no collision allowed). maximum 16 */
+	unsigned max_hash_collision;
+
+	bool dbg_en;
+} dev_cache_config_t;
+
 struct f2fs_configuration {
 	u_int32_t reserved_segments;
 	u_int32_t new_reserved_segments;
@@ -375,6 +387,7 @@
 	int func;
 	void *private;
 	int dry_run;
+	int no_kernel_check;
 	int fix_on;
 	int force;
 	int defset;
@@ -422,6 +435,9 @@
 
 	/* precomputed fs UUID checksum for seeding other checksums */
 	u_int32_t chksum_seed;
+
+	/* cache parameters */
+	dev_cache_config_t cache_config;
 };
 
 #ifdef CONFIG_64BIT
@@ -558,6 +574,7 @@
 
 #define NULL_ADDR		0x0U
 #define NEW_ADDR		-1U
+#define COMPRESS_ADDR		-2U
 
 #define F2FS_ROOT_INO(sbi)	(sbi->root_ino_num)
 #define F2FS_NODE_INO(sbi)	(sbi->node_ino_num)
@@ -598,6 +615,7 @@
 #define F2FS_FEATURE_VERITY		0x0400	/* reserved */
 #define F2FS_FEATURE_SB_CHKSUM		0x0800
 #define F2FS_FEATURE_CASEFOLD		0x1000
+ #define F2FS_FEATURE_COMPRESSION	0x2000
 
 #define MAX_VOLUME_NAME		512
 
@@ -751,7 +769,8 @@
 #define CUR_ADDRS_PER_INODE(inode)	(DEF_ADDRS_PER_INODE - \
 					__get_extra_isize(inode))
 #define ADDRS_PER_INODE(i)	addrs_per_inode(i)
-#define ADDRS_PER_BLOCK         1018	/* Address Pointers in a Direct Block */
+#define DEF_ADDRS_PER_BLOCK	1018	/* Address Pointers in a Direct Block */
+#define ADDRS_PER_BLOCK(i)	addrs_per_block(i)
 #define NIDS_PER_BLOCK          1018	/* Node IDs in an Indirect Block */
 
 #define	NODE_DIR1_BLOCK		(DEF_ADDRS_PER_INODE + 1)
@@ -811,6 +830,10 @@
 #define F2FS_CASEFOLD_FL	0x40000000 /* Casefolded file */
 #define IS_CASEFOLDED(dir)     ((dir)->i_flags & F2FS_CASEFOLD_FL)
 
+/*
+ * inode flags
+ */
+#define F2FS_COMPR_FL		0x00000004 /* Compress file */
 struct f2fs_inode {
 	__le16 i_mode;			/* file mode */
 	__u8 i_advise;			/* file hints */
@@ -851,6 +874,10 @@
 			__le32 i_inode_checksum;/* inode meta checksum */
 			__le64 i_crtime;	/* creation time */
 			__le32 i_crtime_nsec;	/* creation time in nano scale */
+			__le64 i_compr_blocks;	/* # of compressed blocks */
+			__u8 i_compress_algrithm;	/* compress algrithm */
+			__u8 i_log_cluster_size;	/* log of cluster size */
+			__le16 i_padding;		/* padding */
 			__le32 i_extra_end[0];	/* for attribute size calculation */
 		} __attribute__((packed));
 		__le32 i_addr[DEF_ADDRS_PER_INODE];	/* Pointers to data blocks */
@@ -861,7 +888,7 @@
 
 
 struct direct_node {
-	__le32 addr[ADDRS_PER_BLOCK];	/* array of data block address */
+	__le32 addr[DEF_ADDRS_PER_BLOCK];	/* array of data block address */
 } __attribute__((packed));
 
 struct indirect_node {
@@ -1160,6 +1187,7 @@
 extern int utf16_to_utf8(char *, const u_int16_t *, size_t, size_t);
 extern int log_base_2(u_int32_t);
 extern unsigned int addrs_per_inode(struct f2fs_inode *);
+extern unsigned int addrs_per_block(struct f2fs_inode *);
 extern __u32 f2fs_inode_chksum(struct f2fs_node *);
 extern __u32 f2fs_checkpoint_chksum(struct f2fs_checkpoint *);
 extern int write_inode(struct f2fs_node *, u64);
@@ -1185,9 +1213,13 @@
 extern unsigned int calc_extra_isize(void);
 extern int get_device_info(int);
 extern int f2fs_init_sparse_file(void);
+extern void f2fs_release_sparse_resource(void);
 extern int f2fs_finalize_device(void);
 extern int f2fs_fsync_device(void);
 
+extern void dcache_init(void);
+extern void dcache_release(void);
+
 extern int dev_read(void *, __u64, size_t);
 #ifdef POSIX_FADV_WILLNEED
 extern int dev_readahead(__u64, size_t);
@@ -1397,6 +1429,7 @@
 	{ "verity",			F2FS_FEATURE_VERITY },	/* reserved */ \
 	{ "sb_checksum",		F2FS_FEATURE_SB_CHKSUM },	\
 	{ "casefold",			F2FS_FEATURE_CASEFOLD },	\
+	{ "compression",		F2FS_FEATURE_COMPRESSION },	\
 	{ NULL,				0x0},				\
 };
 
diff --git a/lib/libf2fs.c b/lib/libf2fs.c
index 83a578a..d527d68 100644
--- a/lib/libf2fs.c
+++ b/lib/libf2fs.c
@@ -499,9 +499,21 @@
 	return __f2fs_dentry_hash(name, len);
 }
 
+#define ALIGN_DOWN(addrs, size)		(((addrs) / (size)) * (size))
 unsigned int addrs_per_inode(struct f2fs_inode *i)
 {
-	return CUR_ADDRS_PER_INODE(i) - get_inline_xattr_addrs(i);
+	unsigned int addrs = CUR_ADDRS_PER_INODE(i) - get_inline_xattr_addrs(i);
+
+	if (!(le32_to_cpu(i->i_flags) & F2FS_COMPR_FL))
+		return addrs;
+	return ALIGN_DOWN(addrs, 1 << i->i_log_cluster_size);
+}
+
+unsigned int addrs_per_block(struct f2fs_inode *i)
+{
+	if (!(le32_to_cpu(i->i_flags) & F2FS_COMPR_FL))
+		return DEF_ADDRS_PER_BLOCK;
+	return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, 1 << i->i_log_cluster_size);
 }
 
 /*
@@ -655,6 +667,9 @@
 	c.wanted_sector_size = -1;
 #ifndef WITH_ANDROID
 	c.preserve_limits = 1;
+	c.no_kernel_check = 1;
+#else
+	c.no_kernel_check = 0;
 #endif
 
 	for (i = 0; i < MAX_DEVICES; i++) {
@@ -1243,6 +1258,8 @@
 	if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
 		size = offsetof(struct f2fs_inode, i_crtime);
 	if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME))
+		size = offsetof(struct f2fs_inode, i_compr_blocks);
+	if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION))
 		size = offsetof(struct f2fs_inode, i_extra_end);
 
 	return size - F2FS_EXTRA_ISIZE_OFFSET;
diff --git a/lib/libf2fs_io.c b/lib/libf2fs_io.c
index 4d0ea0d..1f597a9 100644
--- a/lib/libf2fs_io.c
+++ b/lib/libf2fs_io.c
@@ -3,6 +3,8 @@
  *
  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com/
+ * Copyright (c) 2019 Google Inc.
+ *             http://www.google.com/
  *
  * Dual licensed under the GPL or LGPL version 2 licenses.
  */
@@ -27,7 +29,10 @@
 #include <linux/hdreg.h>
 #endif
 
-#include <f2fs_fs.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <inttypes.h>
+#include "f2fs_fs.h"
 
 struct f2fs_configuration c;
 
@@ -64,6 +69,318 @@
 }
 #endif
 
+/* ---------- dev_cache, Least Used First (LUF) policy  ------------------- */
+/*
+ * Least used block will be the first victim to be replaced when max hash
+ * collision exceeds
+ */
+static bool *dcache_valid; /* is the cached block valid? */
+static off64_t  *dcache_blk; /* which block it cached */
+static uint64_t *dcache_lastused; /* last used ticks for cache entries */
+static char *dcache_buf; /* cached block data */
+static uint64_t dcache_usetick; /* current use tick */
+
+static uint64_t dcache_raccess;
+static uint64_t dcache_rhit;
+static uint64_t dcache_rmiss;
+static uint64_t dcache_rreplace;
+
+static bool dcache_exit_registered = false;
+
+/*
+ *  Shadow config:
+ *
+ *  Active set of the configurations.
+ *  Global configuration 'dcache_config' will be transferred here when
+ *  when dcache_init() is called
+ */
+static dev_cache_config_t dcache_config = {0, 16, 1};
+static bool dcache_initialized = false;
+
+#define MIN_NUM_CACHE_ENTRY  1024L
+#define MAX_MAX_HASH_COLLISION  16
+
+static long dcache_relocate_offset0[] = {
+	20, -20, 40, -40, 80, -80, 160, -160,
+	320, -320, 640, -640, 1280, -1280, 2560, -2560,
+};
+static int dcache_relocate_offset[16];
+
+static void dcache_print_statistics(void)
+{
+	long i;
+	long useCnt;
+
+	/* Number of used cache entries */
+	useCnt = 0;
+	for (i = 0; i < dcache_config.num_cache_entry; i++)
+		if (dcache_valid[i])
+			++useCnt;
+
+	/*
+	 *  c: number of cache entries
+	 *  u: used entries
+	 *  RA: number of read access blocks
+	 *  CH: cache hit
+	 *  CM: cache miss
+	 *  Repl: read cache replaced
+	 */
+	printf ("\nc, u, RA, CH, CM, Repl=\n");
+	printf ("%ld %ld %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
+			dcache_config.num_cache_entry,
+			useCnt,
+			dcache_raccess,
+			dcache_rhit,
+			dcache_rmiss,
+			dcache_rreplace);
+}
+
+void dcache_release(void)
+{
+	if (!dcache_initialized)
+		return;
+
+	dcache_initialized = false;
+
+	if (c.cache_config.dbg_en)
+		dcache_print_statistics();
+
+	if (dcache_blk != NULL)
+		free(dcache_blk);
+	if (dcache_lastused != NULL)
+		free(dcache_lastused);
+	if (dcache_buf != NULL)
+		free(dcache_buf);
+	if (dcache_valid != NULL)
+		free(dcache_valid);
+	dcache_config.num_cache_entry = 0;
+	dcache_blk = NULL;
+	dcache_lastused = NULL;
+	dcache_buf = NULL;
+	dcache_valid = NULL;
+}
+
+// return 0 for success, error code for failure.
+static int dcache_alloc_all(long n)
+{
+	if (n <= 0)
+		return -1;
+	if ((dcache_blk = (off64_t *) malloc(sizeof(off64_t) * n)) == NULL
+		|| (dcache_lastused = (uint64_t *)
+				malloc(sizeof(uint64_t) * n)) == NULL
+		|| (dcache_buf = (char *) malloc (F2FS_BLKSIZE * n)) == NULL
+		|| (dcache_valid = (bool *) malloc(sizeof(bool) * n)) == NULL)
+	{
+		dcache_release();
+		return -1;
+	}
+	dcache_config.num_cache_entry = n;
+	return 0;
+}
+
+static void dcache_relocate_init(void)
+{
+	int i;
+	int n0 = (sizeof(dcache_relocate_offset0)
+			/ sizeof(dcache_relocate_offset0[0]));
+	int n = (sizeof(dcache_relocate_offset)
+			/ sizeof(dcache_relocate_offset[0]));
+
+	ASSERT(n == n0);
+	for (i = 0; i < n && i < dcache_config.max_hash_collision; i++) {
+		if (labs(dcache_relocate_offset0[i])
+				> dcache_config.num_cache_entry / 2) {
+			dcache_config.max_hash_collision = i;
+			break;
+		}
+		dcache_relocate_offset[i] =
+				dcache_config.num_cache_entry
+				+ dcache_relocate_offset0[i];
+	}
+}
+
+void dcache_init(void)
+{
+	long n;
+
+	if (c.cache_config.num_cache_entry <= 0)
+		return;
+
+	/* release previous cache init, if any */
+	dcache_release();
+
+	dcache_blk = NULL;
+	dcache_lastused = NULL;
+	dcache_buf = NULL;
+	dcache_valid = NULL;
+
+	dcache_config = c.cache_config;
+
+	n = max(MIN_NUM_CACHE_ENTRY, dcache_config.num_cache_entry);
+
+	/* halve alloc size until alloc succeed, or min cache reached */
+	while (dcache_alloc_all(n) != 0 && n !=  MIN_NUM_CACHE_ENTRY)
+		n = max(MIN_NUM_CACHE_ENTRY, n/2);
+
+	/* must be the last: data dependent on num_cache_entry */
+	dcache_relocate_init();
+	dcache_initialized = true;
+
+	if (!dcache_exit_registered) {
+		dcache_exit_registered = true;
+		atexit(dcache_release); /* auto release */
+	}
+
+	dcache_raccess = 0;
+	dcache_rhit = 0;
+	dcache_rmiss = 0;
+	dcache_rreplace = 0;
+}
+
+static inline char *dcache_addr(long entry)
+{
+	return dcache_buf + F2FS_BLKSIZE * entry;
+}
+
+/* relocate on (n+1)-th collision */
+static inline long dcache_relocate(long entry, int n)
+{
+	assert(dcache_config.num_cache_entry != 0);
+	return (entry + dcache_relocate_offset[n]) %
+			dcache_config.num_cache_entry;
+}
+
+static long dcache_find(off64_t blk)
+{
+	register long n = dcache_config.num_cache_entry;
+	register unsigned m = dcache_config.max_hash_collision;
+	long entry, least_used, target;
+	unsigned try;
+
+	assert(n > 0);
+	target = least_used = entry = blk % n; /* simple modulo hash */
+
+	for (try = 0; try < m; try++) {
+		if (!dcache_valid[target] || dcache_blk[target] == blk)
+			return target;  /* found target or empty cache slot */
+		if (dcache_lastused[target] < dcache_lastused[least_used])
+			least_used = target;
+		target = dcache_relocate(entry, try); /* next target */
+	}
+	return least_used;  /* max search reached, return least used slot */
+}
+
+/* Physical read into cache */
+static int dcache_io_read(int fd, long entry, off64_t offset, off64_t blk)
+{
+	if (lseek64(fd, offset, SEEK_SET) < 0) {
+		MSG(0, "\n lseek64 fail.\n");
+		return -1;
+	}
+	if (read(fd, dcache_buf + entry * F2FS_BLKSIZE, F2FS_BLKSIZE) < 0) {
+		MSG(0, "\n read() fail.\n");
+		return -1;
+	}
+	dcache_lastused[entry] = ++dcache_usetick;
+	dcache_valid[entry] = true;
+	dcache_blk[entry] = blk;
+	return 0;
+}
+
+/*
+ *  - Note: Read/Write are not symmetric:
+ *       For read, we need to do it block by block, due to the cache nature:
+ *           some blocks may be cached, and others don't.
+ *       For write, since we always do a write-thru, we can join all writes into one,
+ *       and write it once at the caller.  This function updates the cache for write, but
+ *       not the do a physical write.  The caller is responsible for the physical write.
+ *  - Note: We concentrate read/write together, due to the fact of similar structure to find
+ *          the relavant cache entries
+ *  - Return values:
+ *       0: success
+ *       1: cache not available (uninitialized)
+ *      -1: error
+ */
+static int dcache_update_rw(int fd, void *buf, off64_t offset,
+		size_t byte_count, bool is_write)
+{
+	off64_t blk;
+	int addr_in_blk;
+	off64_t start;
+
+	if (!dcache_initialized)
+		dcache_init(); /* auto initialize */
+
+	if (!dcache_initialized)
+		return 1; /* not available */
+
+	blk = offset / F2FS_BLKSIZE;
+	addr_in_blk = offset % F2FS_BLKSIZE;
+	start = blk * F2FS_BLKSIZE;
+
+	while (byte_count != 0) {
+		size_t cur_size = min(byte_count,
+				(size_t)(F2FS_BLKSIZE - addr_in_blk));
+		long entry = dcache_find(blk);
+
+		if (!is_write)
+			++dcache_raccess;
+
+		if (dcache_valid[entry] && dcache_blk[entry] == blk) {
+			/* cache hit */
+			if (is_write)  /* write: update cache */
+				memcpy(dcache_addr(entry) + addr_in_blk,
+					buf, cur_size);
+			else
+				++dcache_rhit;
+		} else {
+			/* cache miss */
+			if (!is_write) {
+				int err;
+				++dcache_rmiss;
+				if (dcache_valid[entry])
+					++dcache_rreplace;
+				/* read: physical I/O read into cache */
+				err = dcache_io_read(fd, entry, start, blk);
+				if (err)
+					return err;
+			}
+		}
+
+		/* read: copy data from cache */
+		/* write: nothing to do, since we don't do physical write. */
+		if (!is_write)
+			memcpy(buf, dcache_addr(entry) + addr_in_blk,
+				cur_size);
+
+		/* next block */
+		++blk;
+		buf += cur_size;
+		start += F2FS_BLKSIZE;
+		byte_count -= cur_size;
+		addr_in_blk = 0;
+	}
+	return 0;
+}
+
+/*
+ * dcache_update_cache() just update cache, won't do physical I/O.
+ * Thus even no error, we need normal non-cache I/O for actual write
+ *
+ * return value: 1: cache not available
+ *               0: success, -1: I/O error
+ */
+int dcache_update_cache(int fd, void *buf, off64_t offset, size_t count)
+{
+	return dcache_update_rw(fd, buf, offset, count, true);
+}
+
+/* handles read into cache + read into buffer  */
+int dcache_read(int fd, void *buf, off64_t offset, size_t count)
+{
+	return dcache_update_rw(fd, buf, offset, count, false);
+}
+
 /*
  * IO interfaces
  */
@@ -185,6 +502,7 @@
 int dev_read(void *buf, __u64 offset, size_t len)
 {
 	int fd;
+	int err;
 
 	if (c.sparse_mode)
 		return sparse_read_blk(offset / F2FS_BLKSIZE,
@@ -194,6 +512,11 @@
 	if (fd < 0)
 		return fd;
 
+	/* err = 1: cache not available, fall back to non-cache R/W */
+	/* err = 0: success, err=-1: I/O error */
+	err = dcache_read(fd, buf, (off64_t)offset, len);
+	if (err <= 0)
+		return err;
 	if (lseek64(fd, (off64_t)offset, SEEK_SET) < 0)
 		return -1;
 	if (read(fd, buf, len) < 0)
@@ -233,6 +556,12 @@
 	if (fd < 0)
 		return fd;
 
+	/*
+	 * dcache_update_cache() just update cache, won't do I/O.
+	 * Thus even no error, we need normal non-cache I/O for actual write
+	 */
+	if (dcache_update_cache(fd, buf, (off64_t)offset, len) < 0)
+		return -1;
 	if (lseek64(fd, (off64_t)offset, SEEK_SET) < 0)
 		return -1;
 	if (write(fd, buf, len) < 0)
@@ -311,6 +640,8 @@
 #ifdef WITH_ANDROID
 	if (c.func == MKFS) {
 		f2fs_sparse_file = sparse_file_new(F2FS_BLKSIZE, c.device_size);
+		if (!f2fs_sparse_file)
+			return -1;
 	} else {
 		f2fs_sparse_file = sparse_file_import(c.devices[0].fd,
 							true, false);
@@ -346,6 +677,26 @@
 #endif
 }
 
+void f2fs_release_sparse_resource(void)
+{
+#ifdef WITH_ANDROID
+	int j;
+
+	if (c.sparse_mode) {
+		if (f2fs_sparse_file != NULL) {
+			sparse_file_destroy(f2fs_sparse_file);
+			f2fs_sparse_file = NULL;
+		}
+		for (j = 0; j < blocks_count; j++)
+			free(blocks[j]);
+		free(blocks);
+		blocks = NULL;
+		free(zeroed_block);
+		zeroed_block = NULL;
+	}
+#endif
+}
+
 #define MAX_CHUNK_SIZE		(1 * 1024 * 1024 * 1024ULL)
 #define MAX_CHUNK_COUNT		(MAX_CHUNK_SIZE / F2FS_BLKSIZE)
 int f2fs_finalize_device(void)
@@ -412,14 +763,7 @@
 		sparse_file_write(f2fs_sparse_file, c.devices[0].fd,
 				/*gzip*/0, /*sparse*/1, /*crc*/0);
 
-		sparse_file_destroy(f2fs_sparse_file);
-		for (j = 0; j < blocks_count; j++)
-			free(blocks[j]);
-		free(blocks);
-		blocks = NULL;
-		free(zeroed_block);
-		zeroed_block = NULL;
-		f2fs_sparse_file = NULL;
+		f2fs_release_sparse_resource();
 	}
 #endif
 	/*
diff --git a/lib/libf2fs_zoned.c b/lib/libf2fs_zoned.c
index e58d3ce..efc687c 100644
--- a/lib/libf2fs_zoned.c
+++ b/lib/libf2fs_zoned.c
@@ -97,8 +97,9 @@
 	/* Check that this is a zoned block device */
 	res = get_sysfs_path(dev, "queue/zoned", str, sizeof(str));
 	if (res != 0) {
-		MSG(0, "\tError: Failed to get device sysfs path\n");
-		return -1;
+		MSG(0, "\tInfo: can't find /sys, assuming normal block device\n");
+		dev->zoned_model = F2FS_ZONED_NONE;
+		return 0;
 	}
 
 	file = fopen(str, "r");
diff --git a/mkfs/f2fs_format.c b/mkfs/f2fs_format.c
index 9402619..0e9e7a9 100644
--- a/mkfs/f2fs_format.c
+++ b/mkfs/f2fs_format.c
@@ -1150,6 +1150,12 @@
 		raw_node->i.i_crtime_nsec = 0;
 	}
 
+	if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
+		raw_node->i.i_compress_algrithm = 0;
+		raw_node->i.i_log_cluster_size = 0;
+		raw_node->i.i_padding = 0;
+	}
+
 	data_blk_nor = get_sb(main_blkaddr) +
 		c.cur_seg[CURSEG_HOT_DATA] * c.blks_per_seg;
 	raw_node->i.i_addr[get_extra_isize(raw_node)] = cpu_to_le32(data_blk_nor);
@@ -1500,6 +1506,12 @@
 		raw_node->i.i_crtime_nsec = 0;
 	}
 
+	if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
+		raw_node->i.i_compress_algrithm = 0;
+		raw_node->i.i_log_cluster_size = 0;
+		raw_node->i.i_padding = 0;
+	}
+
 	data_blk_nor = f2fs_add_default_dentry_lpf();
 	if (data_blk_nor == 0) {
 		MSG(1, "\tError: Failed to add default dentries for lost+found!!!\n");
diff --git a/mkfs/f2fs_format_main.c b/mkfs/f2fs_format_main.c
index ba233c6..d68fc65 100644
--- a/mkfs/f2fs_format_main.c
+++ b/mkfs/f2fs_format_main.c
@@ -241,6 +241,11 @@
 				"enabled with extra attr feature\n");
 			exit(1);
 		}
+		if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
+			MSG(0, "\tInfo: compression feature should always be "
+				"enabled with extra attr feature\n");
+			exit(1);
+		}
 	}
 
 	if (optind >= argc) {
@@ -361,7 +366,7 @@
 	}
 
 	if (f2fs_get_device_info() < 0)
-		return -1;
+		goto err_format;
 
 	/*
 	 * Some options are mandatory for host-managed
@@ -369,26 +374,25 @@
 	 */
 	if (c.zoned_model == F2FS_ZONED_HM && !c.zoned_mode) {
 		MSG(0, "\tError: zoned block device feature is required\n");
-		return -1;
+		goto err_format;
 	}
 
 	if (c.zoned_mode && !c.trim) {
 		MSG(0, "\tError: Trim is required for zoned block devices\n");
-		return -1;
-	}
-
-	if (c.sparse_mode) {
-		if (f2fs_init_sparse_file())
-			return -1;
+		goto err_format;
 	}
 
 	if (f2fs_format_device() < 0)
-		return -1;
+		goto err_format;
 
 	if (f2fs_finalize_device() < 0)
-		return -1;
+		goto err_format;
 
 	MSG(0, "Info: format successful\n");
 
 	return 0;
+
+err_format:
+	f2fs_release_sparse_resource();
+	return -1;
 }
diff --git a/tools/f2fs_io/f2fs_io.c b/tools/f2fs_io/f2fs_io.c
index df2ea21..68e27ed 100644
--- a/tools/f2fs_io/f2fs_io.c
+++ b/tools/f2fs_io/f2fs_io.c
@@ -130,6 +130,33 @@
 	}
 }
 
+#define set_verity_desc "Set fs-verity"
+#define set_verity_help					\
+"f2fs_io set_verity [file]\n\n"				\
+"Set fsverity bit given a file\n"			\
+
+static void do_set_verity(int argc, char **argv, const struct cmd_desc *cmd)
+{
+	int ret, fd;
+
+	if (argc != 2) {
+		fputs("Excess arguments\n\n", stderr);
+		fputs(cmd->cmd_help, stderr);
+		exit(1);
+	}
+
+	fd = open(argv[1], O_RDWR);
+
+	ret = ioctl(fd, FS_IOC_ENABLE_VERITY);
+	if (ret < 0) {
+		perror("FS_IOC_ENABLE_VERITY");
+		exit(1);
+	}
+
+	printf("Set fsverity bit to %s\n", argv[1]);
+	exit(0);
+}
+
 #define getflags_desc "getflags ioctl"
 #define getflags_help						\
 "f2fs_io getflags [file]\n\n"					\
@@ -678,6 +705,7 @@
 static void do_help(int argc, char **argv, const struct cmd_desc *cmd);
 const struct cmd_desc cmd_list[] = {
 	_CMD(help),
+	CMD(set_verity),
 	CMD(getflags),
 	CMD(setflags),
 	CMD(shutdown),
diff --git a/tools/f2fs_io/f2fs_io.h b/tools/f2fs_io/f2fs_io.h
index 5768c1b..3b2892c 100644
--- a/tools/f2fs_io/f2fs_io.h
+++ b/tools/f2fs_io/f2fs_io.h
@@ -73,6 +73,8 @@
 #define F2FS_IOC_GET_ENCRYPTION_POLICY	FS_IOC_GET_ENCRYPTION_POLICY
 #define F2FS_IOC_GET_ENCRYPTION_PWSALT	FS_IOC_GET_ENCRYPTION_PWSALT
 
+#define FS_IOC_ENABLE_VERITY		_IO('f', 133)
+
 /*
  * Inode flags
  */
diff --git a/tools/sg_write_buffer/Makefile.am b/tools/sg_write_buffer/Makefile.am
index 922c328..19c438d 100644
--- a/tools/sg_write_buffer/Makefile.am
+++ b/tools/sg_write_buffer/Makefile.am
@@ -1,7 +1,7 @@
 ## Makefile.am
 
 if LINUX
-AM_CPPFLAGS = -I./include
+AM_CPPFLAGS = -I$(srcdir)/include
 AM_CFLAGS = -Wall
 sbin_PROGRAMS = sg_write_buffer
 sg_write_buffer_SOURCES = sg_write_buffer.c \