scsi: ufs: introduce Host Performance Booster

driver: v1.03
FW: v1.35
FW_rev : 0610

HPB increases random read performance by up to 43% in 64GB random range.

Bug: 63264275
Bug: 70044903
Change-Id: I7595840f5ce12725c41b8d8ce985a14a1ed45aa6
Signed-off-by: Yongmyung Lee <ymhungry.lee@samsung.com>
Signed-off-by: Jinyoung Choi <j-young.choi@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@google.com>
diff --git a/Documentation/ABI/testing/sysfs-devices-ufshpb b/Documentation/ABI/testing/sysfs-devices-ufshpb
new file mode 100644
index 0000000..0e76cee
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-ufshpb
@@ -0,0 +1,77 @@
+What:		/sys/devices/platform/soc/<....>ufshc/ufshpb_lu#num/HPBVersion
+Date:		April, 2018
+Contact:	"Jaegeuk Kim" <jaegeuk@google.com>
+Description:
+		Show HPB version info.
+
+What:		/sys/devices/platform/soc/<....>ufshc/ufshpb_lu#num/add_evict_count
+Date:		April, 2018
+Contact:	"Jaegeuk Kim" <jaegeuk@google.com>
+Description:
+		Show the count of HPB entry evictions.
+
+What:		/sys/devices/platform/soc/<....>ufshc/ufshpb_lu#num/hit_count
+Date:		April, 2018
+Contact:	"Jaegeuk Kim" <jaegeuk@google.com>
+Description:
+		Show the count of HPB entry hits.
+
+What:		/sys/devices/platform/soc/<....>ufshc/ufshpb_lu#num/map_loading
+Date:		April, 2018
+Contact:	"Jaegeuk Kim" <jaegeuk@google.com>
+Description:
+		Request HPB map loading.
+
+What:		/sys/devices/platform/soc/<....>ufshc/ufshpb_lu#num/read_16_disable
+Date:		April, 2018
+Contact:	"Jaegeuk Kim" <jaegeuk@google.com>
+Description:
+		Disable READ16 for HPB communication.
+
+What:		/sys/devices/platform/soc/<....>ufshc/ufshpb_lu#num/active_block_status
+Date:		April, 2018
+Contact:	"Jaegeuk Kim" <jaegeuk@google.com>
+Description:
+		Show all the active block.
+
+What:		/sys/devices/platform/soc/<....>ufshc/ufshpb_lu#num/count_reset
+Date:		April, 2018
+Contact:	"Jaegeuk Kim" <jaegeuk@google.com>
+Description:
+		Reset all the tracking counts.
+
+What:		/sys/devices/platform/soc/<....>ufshc/ufshpb_lu#num/is_active_group
+Date:		April, 2018
+Contact:	"Jaegeuk Kim" <jaegeuk@google.com>
+Description:
+		Show active group info.
+
+What:		/sys/devices/platform/soc/<....>ufshc/ufshpb_lu#num/map_req_count
+Date:		April, 2018
+Contact:	"Jaegeuk Kim" <jaegeuk@google.com>
+Description:
+		Show the count to request HPB map entry.
+
+What:		/sys/devices/platform/soc/<....>ufshc/ufshpb_lu#num/active_list
+Date:		April, 2018
+Contact:	"Jaegeuk Kim" <jaegeuk@google.com>
+Description:
+		Show active block list.
+
+What:		/sys/devices/platform/soc/<....>ufshc/ufshpb_lu#num/get_info_from_lba
+Date:		April, 2018
+Contact:	"Jaegeuk Kim" <jaegeuk@google.com>
+Description:
+		Show HPB entry info given LBA.
+
+What:		/sys/devices/platform/soc/<....>ufshc/ufshpb_lu#num/map_cmd_disable
+Date:		April, 2018
+Contact:	"Jaegeuk Kim" <jaegeuk@google.com>
+Description:
+		Disable HPB map command.
+
+What:		/sys/devices/platform/soc/<....>ufshc/ufshpb_lu#num/miss_count
+Date:		April, 2018
+Contact:	"Jaegeuk Kim" <jaegeuk@google.com>
+Description:
+		Show the count of HPB entry misses.
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 1b283b2..02a9ab1 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -3,6 +3,8 @@
 #
 # This code is based on drivers/scsi/ufs/Kconfig
 # Copyright (C) 2011-2013 Samsung India Software Operations
+# Copyright (c) 2017-2018 Samsung Electronics Co., Ltd.
+# Copyright (C) 2018, Google, Inc.
 #
 # Authors:
 #	Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -126,3 +128,10 @@
 
 	  Select this if you want above mentioned debug information captured.
 	  If unsure, say N.
+
+config SCSI_UFSHCD_HPB_ACTIVATE
+	int "Activate HPB Host-aware Performance Booster"
+	depends on SCSI_UFSHCD
+	default 1
+	help
+	  Activate or deactive UFSHPB v1.3.5 test driver
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index ce98c09..e8e3043 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -1,7 +1,10 @@
 # UFSHCD makefile
 obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
 obj-$(CONFIG_SCSI_UFS_QCOM_ICE) += ufs-qcom-ice.o
-obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o ufs_quirks.o
+
+obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
+ufshcd-core-objs := ufshcd.o ufs_quirks.o ufshpb.o
+
 obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
 obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
 obj-$(CONFIG_SCSI_UFS_TEST) += ufs_test.o
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index de29930..3fd96aa 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -3,6 +3,7 @@
  *
  * This code is based on drivers/scsi/ufs/ufs.h
  * Copyright (C) 2011-2013 Samsung India Software Operations
+ # Copyright (c) 2017-2018 Samsung Electronics Co., Ltd.
  *
  * Authors:
  *	Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -176,6 +177,9 @@
 	UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT	= 0x18,
 	UNIT_DESC_PARAM_CTX_CAPABILITIES	= 0x20,
 	UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1	= 0x22,
+	UNIT_DESC_HPB_LU_MAX_ACTIVE_REGIONS	= 0x23,
+	UNIT_DESC_HPB_LU_PIN_REGION_START_OFFSET = 0x25,
+	UNIT_DESC_HPB_LU_NUM_PIN_REGIONS	= 0x27,
 };
 
 /* Device descriptor parameters offsets in bytes*/
@@ -207,7 +211,18 @@
 	DEVICE_DESC_PARAM_UD_LEN		= 0x1B,
 	DEVICE_DESC_PARAM_RTT_CAP		= 0x1C,
 	DEVICE_DESC_PARAM_FRQ_RTC		= 0x1D,
+	DEVICE_DESC_PARAM_FEAT_SUP		= 0x1F,
+	DEVICE_DESC_PARAM_HPB_VER		= 0x40,
 };
+
+enum geometry_desc_param {
+	GEOMETRY_DESC_SEGMENT_SIZE			= 0x0D,
+	GEOMETRY_DESC_HPB_REGION_SIZE			= 0x48,
+	GEOMETRY_DESC_HPB_NUMBER_LU			= 0x49,
+	GEOMETRY_DESC_HPB_SUBREGION_SIZE		= 0x4A,
+	GEOMETRY_DESC_HPB_DEVICE_MAX_ACTIVE_REGIONS	= 0x4B,
+};
+
 /*
  * Logical Unit Write Protect
  * 00h: LU not write protected
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index c534933..f8726c6 100755
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4,6 +4,8 @@
  * This code is based on drivers/scsi/ufs/ufshcd.c
  * Copyright (C) 2011-2013 Samsung India Software Operations
  * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018 Samsung Electronics Co., Ltd.
+ * Copyright (C) 2018, Google, Inc.
  *
  * Authors:
  *	Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -3037,9 +3039,15 @@
 	switch (lrbp->command_type) {
 	case UTP_CMD_TYPE_SCSI:
 		if (likely(lrbp->cmd)) {
+			if (hba->ufshpb_state == HPB_PRESENT &&
+					hba->issue_ioctl == true)
+				lrbp->lun = 0x7F;
 			ret = ufshcd_prepare_req_desc_hdr(hba, lrbp,
 				&upiu_flags, lrbp->cmd->sc_data_direction);
 			ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
+			if (hba->ufshpb_state == HPB_PRESENT &&
+					hba->issue_ioctl == false)
+				ufshpb_prep_fn(hba, lrbp);
 		} else {
 			ret = -EINVAL;
 		}
@@ -5588,6 +5596,8 @@
 	sdev->autosuspend_delay = UFSHCD_AUTO_SUSPEND_DELAY_MS;
 	sdev->use_rpm_auto = 1;
 
+	if (sdev->lun < UFS_UPIU_MAX_GENERAL_LUN)
+		hba->sdev_ufs_lu[sdev->lun] = sdev;
 	return 0;
 }
 
@@ -5743,6 +5753,9 @@
 				if (schedule_work(&hba->eeh_work))
 					pm_runtime_get_noresume(hba->dev);
 			}
+			if (hba->ufshpb_state == HPB_PRESENT &&
+					scsi_status == SAM_STAT_GOOD)
+				ufshpb_rsp_upiu(hba, lrbp);
 			break;
 		case UPIU_TRANSACTION_REJECT_UPIU:
 			/* TODO: handle Reject UPIU Response */
@@ -7127,12 +7140,16 @@
 		}
 	}
 	spin_lock_irqsave(host->host_lock, flags);
+	if (hba->ufshpb_state == HPB_PRESENT)
+		hba->ufshpb_state = HPB_RESET;
 	ufshcd_transfer_req_compl(hba);
 	spin_unlock_irqrestore(host->host_lock, flags);
 
 out:
 	hba->req_abort_count = 0;
 	if (!err) {
+		schedule_delayed_work(&hba->ufshpb_init_work,
+					msecs_to_jiffies(10));
 		err = SUCCESS;
 	} else {
 		dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
@@ -8161,6 +8178,9 @@
 			hba->clk_scaling.is_allowed = true;
 		}
 
+		schedule_delayed_work(&hba->ufshpb_init_work,
+						msecs_to_jiffies(0));
+
 		scsi_scan_host(hba->host);
 		pm_runtime_put_sync(hba->dev);
 	}
@@ -8334,6 +8354,120 @@
 	ufshcd_extcon_register(hba);
 }
 
+static int ufshcd_query_desc_for_ufshpb(struct ufs_hba *hba, int lun,
+		struct ufs_ioctl_query_data *ioctl_data, void __user *buffer)
+{
+	unsigned char *kernel_buf;
+	int kernel_buf_len;
+	int opcode, selector;
+	int err = 0;
+	int index = 0;
+	int length = 0;
+
+	opcode = UPIU_QUERY_OPCODE_LOW(ioctl_data->opcode);
+	selector = 1;
+
+	if (ioctl_data->idn == QUERY_DESC_IDN_STRING)
+		kernel_buf_len = IOCTL_DEV_CTX_MAX_SIZE;
+	else
+		kernel_buf_len = QUERY_DESC_MAX_SIZE;
+
+	kernel_buf = kzalloc(kernel_buf_len, GFP_KERNEL);
+	if (!kernel_buf) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	switch (opcode) {
+	case UPIU_QUERY_OPCODE_WRITE_DESC:
+		if (kernel_buf_len < ioctl_data->buf_size ||
+					!ioctl_data->buf_size) {
+			err = -EINVAL;
+			goto out_release_mem;
+		}
+		/* support configuration change only */
+		if (ioctl_data->idn != QUERY_DESC_IDN_CONFIGURATION &&
+				ioctl_data->buf_size != UFSHPB_CONFIG_LEN) {
+			err = -ENOTSUPP;
+			goto out_release_mem;
+		}
+		err = copy_from_user(kernel_buf, buffer +
+				sizeof(struct ufs_ioctl_query_data),
+				ioctl_data->buf_size);
+		if (!err)
+			err = ufshpb_control_validation(hba,
+				(struct ufshpb_config_desc *)kernel_buf);
+		if (err)
+			goto out_release_mem;
+		break;
+
+	case UPIU_QUERY_OPCODE_READ_DESC:
+		switch (ioctl_data->idn) {
+		case QUERY_DESC_IDN_UNIT:
+			if (!ufs_is_valid_unit_desc_lun(lun)) {
+				err = -EINVAL;
+				dev_err(hba->dev,
+					"%s: No unit descriptor for lun 0x%x\n",
+						__func__, lun);
+				goto out_release_mem;
+			}
+			index = lun;
+			break;
+		case QUERY_DESC_IDN_STRING:
+			if (!ufs_is_valid_unit_desc_lun(lun)) {
+				err = -EINVAL;
+				dev_err(hba->dev,
+					"No unit descriptor for lun 0x%x\n",
+					lun);
+				goto out_release_mem;
+			}
+			err = ufshpb_issue_req_dev_ctx(hba->ufshpb_lup[lun],
+						kernel_buf,
+						ioctl_data->buf_size);
+			if (err < 0)
+				goto out_release_mem;
+			goto copy_buffer;
+
+		case QUERY_DESC_IDN_DEVICE:
+		case QUERY_DESC_IDN_GEOMETRY:
+		case QUERY_DESC_IDN_CONFIGURAION:
+			break;
+
+		default:
+			err = -EINVAL;
+			dev_err(hba->dev, "invalid idn %d\n", ioctl_data->idn);
+			goto out_release_mem;
+		}
+		break;
+	default:
+		err = -EINVAL;
+		dev_err(hba->dev, "invalid opcode %d\n", opcode);
+		goto out_release_mem;
+	}
+
+	length = ioctl_data->buf_size;
+	err = ufshcd_query_descriptor(hba, opcode, ioctl_data->idn, index,
+			selector, kernel_buf, &length);
+	if (err)
+		goto out_release_mem;
+copy_buffer:
+	if (opcode == UPIU_QUERY_OPCODE_READ_DESC) {
+		err = copy_to_user(buffer, ioctl_data,
+				sizeof(struct ufs_ioctl_query_data));
+		if (err)
+			dev_err(hba->dev, "Failed copying back to user.\n");
+		err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data),
+				kernel_buf, ioctl_data->buf_size);
+		if (err)
+			dev_err(hba->dev,
+				"Failed copying back to user : rsp_buffer.\n");
+	}
+out_release_mem:
+	kfree(kernel_buf);
+out:
+	return err;
+}
+
 /**
  * ufshcd_query_ioctl - perform user read queries
  * @hba: per-adapter instance
@@ -8374,6 +8508,14 @@
 		goto out_release_mem;
 	}
 
+	if (UPIU_QUERY_OPCODE_HIGH(ioctl_data->opcode) ==
+					UPIU_QUERY_OPCODE_HIGH_HPB) {
+		err = ufshcd_query_desc_for_ufshpb(hba, lun,
+					ioctl_data, buffer);
+		kfree(ioctl_data);
+		goto out;
+	}
+
 	/* verify legal parameters & send query */
 	switch (ioctl_data->opcode) {
 	case UPIU_QUERY_OPCODE_READ_DESC:
@@ -10421,6 +10563,7 @@
  */
 void ufshcd_remove(struct ufs_hba *hba)
 {
+	ufshpb_release(hba, HPB_NEED_INIT);
 	scsi_remove_host(hba->host);
 	/* disable interrupts */
 	ufshcd_disable_intr(hba, hba->intr_mask);
@@ -11168,6 +11311,9 @@
 	 */
 	ufshcd_set_ufs_dev_active(hba);
 
+	/* initialize hpb structures */
+	ufshcd_init_hpb(hba);
+
 	ufshcd_cmd_log_init(hba);
 
 	async_schedule(ufshcd_async_scan, hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index a2e50d0..7e4a821 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -4,6 +4,8 @@
  * This code is based on drivers/scsi/ufs/ufshcd.h
  * Copyright (C) 2011-2013 Samsung India Software Operations
  * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018 Samsung Electronics Co., Ltd.
+ * Copyright (C) 2018, Google, Inc.
  *
  * Authors:
  *	Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -72,6 +74,7 @@
 #include <linux/fault-inject.h>
 #include "ufs.h"
 #include "ufshci.h"
+#include "ufshpb.h"
 
 #define UFSHCD "ufshcd"
 #define UFSHCD_DRIVER_VERSION "0.3"
@@ -1067,6 +1070,16 @@
 	u64 slowio_us;
 	u64 slowio_cnt;
 
+	/* HPB support */
+	u32 ufshpb_feat;
+	int ufshpb_state;
+	int ufshpb_max_regions;
+	struct delayed_work ufshpb_init_work;
+	bool issue_ioctl;
+	struct ufshpb_lu *ufshpb_lup[UFS_UPIU_MAX_GENERAL_LUN];
+	struct scsi_device *sdev_ufs_lu[UFS_UPIU_MAX_GENERAL_LUN];
+	struct work_struct ufshpb_eh_work;
+
 	struct ufs_manual_gc manual_gc;
 };
 
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
new file mode 100644
index 0000000..1116bfe
--- /dev/null
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -0,0 +1,2820 @@
+/*
+ * Universal Flash Storage Host Performance Booster
+ *
+ * Copyright (C) 2017-2018 Samsung Electronics Co., Ltd.
+ * Copyright (C) 2018, Google, Inc.
+ *
+ * Authors:
+ *	Yongmyung Lee <ymhungry.lee@samsung.com>
+ *	Jinyoung Choi <j-young.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ *
+ * The Linux Foundation chooses to take subject only to the GPLv2
+ * license terms, and distributes only under these terms.
+ */
+
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi.h>
+#include <linux/sysfs.h>
+#include <linux/blktrace_api.h>
+
+#include "../../../block/blk.h"
+
+#include "ufs.h"
+#include "ufshcd.h"
+#include "ufshpb.h"
+
+/*
+ * UFSHPB DEBUG
+ */
+#define hpb_dbg(hba, msg, args...)					\
+	do {								\
+		if (hba)						\
+			dev_dbg(hba->dev, msg, ##args);			\
+	} while (0)
+
+#define hpb_trace(hpb, args...)						\
+	do {								\
+		if (hpb->hba->sdev_ufs_lu[hpb->lun] &&			\
+			hpb->hba->sdev_ufs_lu[hpb->lun]->request_queue) \
+			blk_add_trace_msg(				\
+			hpb->hba->sdev_ufs_lu[hpb->lun]->request_queue,	\
+				##args);				\
+	} while (0)
+
+/*
+ * debug variables
+ */
+static int alloc_mctx;
+
+/*
+ * define global constants
+ */
+static int sects_per_blk_shift;
+static int bits_per_dword_shift;
+static int bits_per_dword_mask;
+static int bits_per_byte_shift;
+
+static int ufshpb_create_sysfs(struct ufs_hba *hba, struct ufshpb_lu *hpb);
+static void ufshpb_error_handler(struct work_struct *work);
+static void ufshpb_evict_region(struct ufshpb_lu *hpb,
+						struct ufshpb_region *cb);
+
+static inline void ufshpb_get_bit_offset(
+		struct ufshpb_lu *hpb, int subregion_offset,
+		int *dword, int *offset)
+{
+	*dword = subregion_offset >> bits_per_dword_shift;
+	*offset = subregion_offset & bits_per_dword_mask;
+}
+
+/* called with hpb_lock (irq) */
+static bool ufshpb_ppn_dirty_check(struct ufshpb_lu *hpb,
+		struct ufshpb_subregion *cp, int subregion_offset)
+{
+	bool is_dirty;
+	unsigned int bit_dword, bit_offset;
+
+	ufshpb_get_bit_offset(hpb, subregion_offset,
+			&bit_dword, &bit_offset);
+
+	if (!cp->mctx->ppn_dirty)
+		return false;
+
+	is_dirty = cp->mctx->ppn_dirty[bit_dword] &
+		(1 << bit_offset) ? true : false;
+
+	return is_dirty;
+}
+
+static void ufshpb_ppn_prep(struct ufshpb_lu *hpb,
+		struct ufshcd_lrb *lrbp, unsigned long long ppn)
+{
+	unsigned char cmd[16] = { 0 };
+
+	cmd[0] = READ_16;
+	cmd[2] = lrbp->cmd->cmnd[2];
+	cmd[3] = lrbp->cmd->cmnd[3];
+	cmd[4] = lrbp->cmd->cmnd[4];
+	cmd[5] = lrbp->cmd->cmnd[5];
+	cmd[6] = GET_BYTE_7(ppn);
+	cmd[7] = GET_BYTE_6(ppn);
+	cmd[8] = GET_BYTE_5(ppn);
+	cmd[9] = GET_BYTE_4(ppn);
+	cmd[10] = GET_BYTE_3(ppn);
+	cmd[11] = GET_BYTE_2(ppn);
+	cmd[12] = GET_BYTE_1(ppn);
+	cmd[13] = GET_BYTE_0(ppn);
+	cmd[14] = 0x11;		// Group Number
+	cmd[15] = 0x01;		// Transfer_len = 0x01 (FW defined)
+
+	memcpy(lrbp->cmd->cmnd, cmd, MAX_CDB_SIZE);
+	memcpy(lrbp->ucd_req_ptr->sc.cdb, cmd, MAX_CDB_SIZE);
+}
+
+/* called with hpb_lock (irq) */
+static inline void ufshpb_set_dirty_bits(struct ufshpb_lu *hpb,
+		struct ufshpb_region *cb, struct ufshpb_subregion *cp,
+		int dword, int offset, unsigned int count)
+{
+	const unsigned long mask = ((1UL << count) - 1) & 0xffffffff;
+
+	if (cb->region_state == HPBREGION_INACTIVE)
+		return;
+
+	cp->mctx->ppn_dirty[dword] |= (mask << offset);
+}
+
+static void ufshpb_set_dirty(struct ufshpb_lu *hpb,
+			struct ufshcd_lrb *lrbp, int region,
+			int subregion, int subregion_offset)
+{
+	struct ufshpb_region *cb;
+	struct ufshpb_subregion *cp;
+	int count;
+	int bit_count, bit_dword, bit_offset;
+
+	count = blk_rq_sectors(lrbp->cmd->request) >> sects_per_blk_shift;
+	ufshpb_get_bit_offset(hpb, subregion_offset,
+			&bit_dword, &bit_offset);
+
+	do {
+		bit_count = min(count, BITS_PER_DWORD - bit_offset);
+
+		cb = hpb->region_tbl + region;
+		cp = cb->subregion_tbl + subregion;
+
+		ufshpb_set_dirty_bits(hpb, cb, cp,
+				bit_dword, bit_offset, bit_count);
+
+		bit_offset = 0;
+		bit_dword++;
+
+		if (bit_dword == hpb->dwords_per_subregion) {
+			bit_dword = 0;
+			subregion++;
+
+			if (subregion == hpb->subregions_per_region) {
+				subregion = 0;
+				region++;
+			}
+		}
+
+		count -= bit_count;
+	} while (count);
+}
+
+static inline bool ufshpb_is_read_lrbp(struct ufshcd_lrb *lrbp)
+{
+	if (lrbp->cmd->cmnd[0] == READ_10 || lrbp->cmd->cmnd[0] == READ_16)
+		return true;
+
+	return false;
+}
+
+static inline bool ufshpb_is_encrypted_lrbp(struct ufshcd_lrb *lrbp)
+{
+	return (lrbp->utr_descriptor_ptr->header.dword_0 & UTRD_CRYPTO_ENABLE);
+}
+
+static inline bool ufshpb_is_write_discard_lrbp(struct ufshcd_lrb *lrbp)
+{
+	return lrbp->cmd->cmnd[0] == WRITE_10 ||
+			lrbp->cmd->cmnd[0] == WRITE_16 ||
+			lrbp->cmd->cmnd[0] == UNMAP;
+}
+
+static inline void ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb,
+		unsigned int lpn, int *region, int *subregion, int *offset)
+{
+	int region_offset;
+
+	*region = lpn >> hpb->entries_per_region_shift;
+	region_offset = lpn & hpb->entries_per_region_mask;
+	*subregion = region_offset >> hpb->entries_per_subregion_shift;
+	*offset = region_offset & hpb->entries_per_subregion_mask;
+}
+
+static unsigned long long ufshpb_get_ppn(struct ufshpb_map_ctx *mctx, int pos)
+{
+	unsigned long long *ppn_table;
+	int index, offset;
+
+	index = pos / HPB_ENTREIS_PER_OS_PAGE;
+	offset = pos % HPB_ENTREIS_PER_OS_PAGE;
+
+	ppn_table = page_address(mctx->m_page[index]);
+	return ppn_table[offset];
+}
+
+void ufshpb_prep_fn(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+	struct ufshpb_lu *hpb;
+	struct ufshpb_region *cb;
+	struct ufshpb_subregion *cp;
+	unsigned int lpn;
+	unsigned long long ppn = 0;
+	int region, subregion, subregion_offset;
+	const struct request *rq = lrbp->cmd->request;
+	unsigned long long rq_pos = blk_rq_pos(rq);
+	unsigned int rq_sectors = blk_rq_sectors(rq);
+
+	/* WKLU could not be HPB-LU */
+	if (lrbp->lun >= UFS_UPIU_MAX_GENERAL_LUN)
+		return;
+
+	hpb = hba->ufshpb_lup[lrbp->lun];
+	if (!hpb || !hpb->lu_hpb_enable) {
+		if (ufshpb_is_read_lrbp(lrbp))
+			goto read_10;
+		return;
+	}
+
+	if (hpb->force_disable) {
+		if (ufshpb_is_read_lrbp(lrbp))
+			goto read_10;
+		return;
+	}
+
+	/*
+	 * TODO: check if ICE is not supported or not.
+	 *
+	 * if (ufshpb_is_read_lrbp(lrbp) && ufshpb_is_encrypted_lrbp(lrbp))
+	 *	goto read_10;
+	 */
+
+	lpn = rq_pos / SECTORS_PER_BLOCK;
+	ufshpb_get_pos_from_lpn(hpb, lpn, &region,
+			&subregion, &subregion_offset);
+	cb = hpb->region_tbl + region;
+
+	if (ufshpb_is_write_discard_lrbp(lrbp)) {
+		spin_lock_bh(&hpb->hpb_lock);
+
+		if (cb->region_state == HPBREGION_INACTIVE) {
+			spin_unlock_bh(&hpb->hpb_lock);
+			return;
+		}
+		ufshpb_set_dirty(hpb, lrbp, region, subregion,
+						subregion_offset);
+		spin_unlock_bh(&hpb->hpb_lock);
+		return;
+	}
+
+	if (!ufshpb_is_read_lrbp(lrbp))
+		return;
+
+	if (rq_sectors != SECTORS_PER_BLOCK) {
+		hpb_trace(hpb, "%llu + %u READ_10 many_blocks %d - %d",
+				rq_pos, rq_sectors, region, subregion);
+		return;
+	}
+
+	cp = cb->subregion_tbl + subregion;
+
+	spin_lock_bh(&hpb->hpb_lock);
+	if (cb->region_state == HPBREGION_INACTIVE ||
+			cp->subregion_state != HPBSUBREGION_CLEAN) {
+		if (cb->region_state == HPBREGION_INACTIVE) {
+			atomic64_inc(&hpb->region_miss);
+			hpb_trace(hpb, "%llu + %u READ_10 RG_INACT %d - %d",
+					rq_pos, rq_sectors, region, subregion);
+		} else if (cp->subregion_state == HPBSUBREGION_DIRTY
+				|| cp->subregion_state == HPBSUBREGION_ISSUED) {
+			atomic64_inc(&hpb->subregion_miss);
+			hpb_trace(hpb, "%llu + %u READ_10 SRG_D %d - %d",
+					rq_pos, rq_sectors, region, subregion);
+		} else {
+			hpb_trace(hpb, "%llu + %u READ_10 ( %d %d ) %d - %d",
+					rq_pos, rq_sectors,
+				cb->region_state, cp->subregion_state,
+				region, subregion);
+		}
+		spin_unlock_bh(&hpb->hpb_lock);
+		return;
+	}
+
+	if (ufshpb_ppn_dirty_check(hpb, cp, subregion_offset)) {
+		atomic64_inc(&hpb->entry_dirty_miss);
+		hpb_trace(hpb, "%llu + %u READ_10 E_D %d - %d",
+				rq_pos, rq_sectors, region, subregion);
+		spin_unlock_bh(&hpb->hpb_lock);
+		return;
+	}
+
+	ppn = ufshpb_get_ppn(cp->mctx, subregion_offset);
+	spin_unlock_bh(&hpb->hpb_lock);
+
+	ufshpb_ppn_prep(hpb, lrbp, ppn);
+	hpb_trace(hpb, "%llu + %u READ_16 %d - %d",
+			rq_pos, rq_sectors, region, subregion);
+	atomic64_inc(&hpb->hit);
+	return;
+read_10:
+	if (!hpb || !lrbp)
+		return;
+	hpb_trace(hpb, "%llu + %u READ_10", rq_pos, rq_sectors);
+	atomic64_inc(&hpb->miss);
+}
+
+static int ufshpb_clean_dirty_bitmap(
+		struct ufshpb_lu *hpb, struct ufshpb_subregion *cp)
+{
+	struct ufshpb_region *cb;
+
+	cb = hpb->region_tbl + cp->region;
+
+	/* if mctx is null, active block had been evicted out */
+	if (cb->region_state == HPBREGION_INACTIVE || !cp->mctx) {
+		hpb_dbg(hpb->hba, "%d - %d evicted\n",
+				cp->region, cp->subregion);
+		return -EINVAL;
+	}
+
+	memset(cp->mctx->ppn_dirty, 0x00,
+			hpb->entries_per_subregion >> bits_per_byte_shift);
+	return 0;
+}
+
+static void ufshpb_clean_active_subregion(
+		struct ufshpb_lu *hpb, struct ufshpb_subregion *cp)
+{
+	struct ufshpb_region *cb;
+
+	cb = hpb->region_tbl + cp->region;
+
+	/* if mctx is null, active block had been evicted out */
+	if (cb->region_state == HPBREGION_INACTIVE || !cp->mctx) {
+		hpb_dbg(hpb->hba, "%d - %d evicted\n",
+				cp->region, cp->subregion);
+		return;
+	}
+	cp->subregion_state = HPBSUBREGION_CLEAN;
+}
+
+static void ufshpb_error_active_subregion(
+		struct ufshpb_lu *hpb, struct ufshpb_subregion *cp)
+{
+	struct ufshpb_region *cb;
+
+	cb = hpb->region_tbl + cp->region;
+
+	/* if mctx is null, active block had been evicted out */
+	if (cb->region_state == HPBREGION_INACTIVE || !cp->mctx) {
+		dev_err(HPB_DEV(hpb),
+			"%d - %d evicted\n", cp->region, cp->subregion);
+		return;
+	}
+	cp->subregion_state = HPBSUBREGION_DIRTY;
+}
+
+static void ufshpb_map_compl_process(struct ufshpb_lu *hpb,
+		struct ufshpb_map_req *map_req)
+{
+	unsigned long long debug_ppn_0, debug_ppn_65535;
+
+	map_req->RSP_end = ktime_to_ns(ktime_get());
+
+	debug_ppn_0 = ufshpb_get_ppn(map_req->mctx, 0);
+	debug_ppn_65535 = ufshpb_get_ppn(map_req->mctx, 65535);
+
+	hpb_trace(hpb, "Noti: C RB %d - %d",
+			map_req->region, map_req->subregion);
+	hpb_dbg(hpb->hba, "UFSHPB COMPL READ BUFFER %d - %d ( %llx ~ %llx )\n",
+			map_req->region, map_req->subregion,
+			debug_ppn_0, debug_ppn_65535);
+	hpb_dbg(hpb->hba, "start=%llu, issue=%llu, endio=%llu, endio=%llu\n",
+			map_req->RSP_tasklet_enter1 - map_req->RSP_start,
+			map_req->RSP_issue - map_req->RSP_tasklet_enter1,
+			map_req->RSP_endio - map_req->RSP_issue,
+			map_req->RSP_end - map_req->RSP_endio);
+
+	spin_lock(&hpb->hpb_lock);
+	ufshpb_clean_active_subregion(hpb,
+			hpb->region_tbl[map_req->region].subregion_tbl +
+							map_req->subregion);
+	spin_unlock(&hpb->hpb_lock);
+}
+
+/*
+ * Must held rsp_list_lock before enter this function
+ */
+static struct ufshpb_rsp_info *ufshpb_get_req_info(struct ufshpb_lu *hpb)
+{
+	struct ufshpb_rsp_info *rsp_info =
+		list_first_entry_or_null(&hpb->lh_rsp_info_free,
+				struct ufshpb_rsp_info,
+				list_rsp_info);
+	if (!rsp_info) {
+		hpb_dbg(hpb->hba, "there is no rsp_info");
+		return NULL;
+	}
+	list_del(&rsp_info->list_rsp_info);
+	memset(rsp_info, 0x00, sizeof(struct ufshpb_rsp_info));
+
+	INIT_LIST_HEAD(&rsp_info->list_rsp_info);
+
+	return rsp_info;
+}
+
+static void ufshpb_map_req_compl_fn(struct request *req, int error)
+{
+	struct ufshpb_map_req *map_req =
+		(struct ufshpb_map_req *)req->end_io_data;
+	struct ufs_hba *hba;
+	struct ufshpb_lu *hpb;
+	struct scsi_sense_hdr sshdr;
+	struct ufshpb_region *cb;
+	struct ufshpb_rsp_info *rsp_info;
+	unsigned long flags;
+
+	hpb = map_req->hpb;
+	hba = hpb->hba;
+	cb = hpb->region_tbl + map_req->region;
+	map_req->RSP_endio = ktime_to_ns(ktime_get());
+
+	if (hba->ufshpb_state != HPB_PRESENT)
+		goto free_map_req;
+
+	if (!error) {
+		ufshpb_map_compl_process(hpb, map_req);
+		goto free_map_req;
+	}
+
+	dev_err(HPB_DEV(hpb),
+		"error number %d ( %d - %d )\n",
+		error, map_req->region, map_req->subregion);
+	scsi_normalize_sense(map_req->sense,
+				SCSI_SENSE_BUFFERSIZE, &sshdr);
+	dev_err(HPB_DEV(hpb),
+		"code %x sense_key %x asc %x ascq %x\n",
+				sshdr.response_code,
+				sshdr.sense_key, sshdr.asc, sshdr.ascq);
+	dev_err(HPB_DEV(hpb),
+		"byte4 %x byte5 %x byte6 %x additional_len %x\n",
+				sshdr.byte4, sshdr.byte5,
+				sshdr.byte6, sshdr.additional_length);
+	atomic64_inc(&hpb->rb_fail);
+
+	if (sshdr.sense_key == ILLEGAL_REQUEST) {
+		spin_lock(&hpb->hpb_lock);
+		if (cb->region_state == HPBREGION_PINNED) {
+			if (sshdr.asc == 0x06 && sshdr.ascq == 0x01) {
+				dev_err(HPB_DEV(hpb), "retry pinned rb %d - %d",
+						map_req->region,
+						map_req->subregion);
+				INIT_LIST_HEAD(&map_req->list_map_req);
+				list_add_tail(&map_req->list_map_req,
+							&hpb->lh_map_req_retry);
+				spin_unlock(&hpb->hpb_lock);
+
+				schedule_delayed_work(&hpb->ufshpb_retry_work,
+							msecs_to_jiffies(5000));
+				return;
+			}
+
+			hpb_dbg(hpb->hba, "pinned rb %d - %d(dirty)",
+					map_req->region, map_req->subregion);
+			ufshpb_error_active_subregion(hpb,
+					cb->subregion_tbl + map_req->subregion);
+			spin_unlock(&hpb->hpb_lock);
+		} else {
+			spin_unlock(&hpb->hpb_lock);
+
+			spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+			rsp_info = ufshpb_get_req_info(hpb);
+			spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+			if (!rsp_info) {
+				dev_warn(hba->dev,
+					"%s:%d No rsp_info\n",
+					__func__, __LINE__);
+				goto free_map_req;
+			}
+
+			rsp_info->type = HPB_RSP_REQ_REGION_UPDATE;
+			rsp_info->RSP_start = ktime_to_ns(ktime_get());
+			rsp_info->active_cnt = 0;
+			rsp_info->inactive_cnt = 1;
+			rsp_info->inactive_list.region[0] = map_req->region;
+			hpb_dbg(hpb->hba,
+				"Non-pinned rb %d is added to rsp_info_list",
+				map_req->region);
+
+			spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+			list_add_tail(&rsp_info->list_rsp_info,
+							&hpb->lh_rsp_info);
+			spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+
+			tasklet_schedule(&hpb->ufshpb_tasklet);
+		}
+	}
+free_map_req:
+	spin_lock(&hpb->hpb_lock);
+	INIT_LIST_HEAD(&map_req->list_map_req);
+	list_add_tail(&map_req->list_map_req, &hpb->lh_map_req_free);
+	spin_unlock(&hpb->hpb_lock);
+}
+
+static int ufshpb_execute_req_dev_ctx(struct ufshpb_lu *hpb,
+				unsigned char *cmd, void *buf, int length)
+{
+	unsigned long flags;
+	struct scsi_sense_hdr sshdr;
+	struct scsi_device *sdp;
+	struct ufs_hba *hba = hpb->hba;
+	int ret = 0;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	sdp = hba->sdev_ufs_lu[hpb->lun];
+	if (sdp) {
+		ret = scsi_device_get(sdp);
+		if (!ret && !scsi_device_online(sdp)) {
+			ret = -ENODEV;
+			scsi_device_put(sdp);
+		} else if (!ret) {
+			hba->issue_ioctl = true;
+		}
+	} else {
+		ret = -ENODEV;
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	if (ret)
+		return ret;
+
+	ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE,
+				buf, length, &sshdr,
+				msecs_to_jiffies(30000), 3, NULL, 0);
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	hba->issue_ioctl = false;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	scsi_device_put(sdp);
+	return ret;
+}
+
+static inline void ufshpb_set_read_dev_ctx_cmd(unsigned char *cmd, int lba,
+					       int length)
+{
+	cmd[0] = READ_10;
+	cmd[1] = 0x02;
+	cmd[2] = GET_BYTE_3(lba);
+	cmd[3] = GET_BYTE_2(lba);
+	cmd[4] = GET_BYTE_1(lba);
+	cmd[5] = GET_BYTE_0(lba);
+	cmd[6] = GET_BYTE_2(length);
+	cmd[7] = GET_BYTE_1(length);
+	cmd[8] = GET_BYTE_0(length);
+}
+
+int ufshpb_issue_req_dev_ctx(struct ufshpb_lu *hpb, unsigned char *buf,
+			      int buf_length)
+{
+	unsigned char cmd[10] = { 0 };
+	int cmd_len = buf_length >> OS_PAGE_SHIFT;
+	int ret = 0;
+
+	ufshpb_set_read_dev_ctx_cmd(cmd, 0x48504230, cmd_len);
+
+	ret = ufshpb_execute_req_dev_ctx(hpb, cmd, buf, buf_length);
+	if (ret < 0)
+		hpb_dbg(hpb->hba, "failed with err %d\n", ret);
+	return ret;
+}
+
+static inline void ufshpb_set_read_buf_cmd(unsigned char *cmd,
+		int region, int subregion, int subregion_mem_size)
+{
+	cmd[0] = UFSHPB_READ_BUFFER;
+	cmd[1] = 0x01;
+	cmd[2] = GET_BYTE_1(region);
+	cmd[3] = GET_BYTE_0(region);
+	cmd[4] = GET_BYTE_1(subregion);
+	cmd[5] = GET_BYTE_0(subregion);
+	cmd[6] = GET_BYTE_2(subregion_mem_size);
+	cmd[7] = GET_BYTE_1(subregion_mem_size);
+	cmd[8] = GET_BYTE_0(subregion_mem_size);
+	cmd[9] = 0x00;
+}
+
+static void ufshpb_bio_init(struct bio *bio, struct bio_vec *table,
+		int max_vecs)
+{
+	bio_init(bio);
+
+	bio->bi_io_vec = table;
+	bio->bi_max_vecs = max_vecs;
+}
+
+static int ufshpb_add_bio_page(struct ufshpb_lu *hpb,
+		struct request_queue *q, struct bio *bio, struct bio_vec *bvec,
+		struct ufshpb_map_ctx *mctx)
+{
+	struct page *page = NULL;
+	int i, ret;
+
+	ufshpb_bio_init(bio, bvec, hpb->mpages_per_subregion);
+
+	for (i = 0; i < hpb->mpages_per_subregion; i++) {
+		/* virt_to_page(p + (OS_PAGE_SIZE * i)); */
+		page = mctx->m_page[i];
+		if (!page)
+			return -ENOMEM;
+
+		ret = bio_add_pc_page(q, bio, page, hpb->mpage_bytes, 0);
+		if (ret != hpb->mpage_bytes) {
+			dev_err(HPB_DEV(hpb), "error ret %d\n", ret);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static inline void ufshpb_issue_map_req(struct request_queue *q,
+		struct request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	list_add(&req->queuelist, &q->queue_head);
+	spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static int ufshpb_map_req_issue(struct ufshpb_lu *hpb,
+		struct request_queue *q, struct ufshpb_map_req *map_req)
+{
+	struct request *req;
+	struct bio *bio;
+	unsigned char cmd[16] = { 0 };
+	int ret;
+
+	bio = &map_req->bio;
+	ret = ufshpb_add_bio_page(hpb, q, bio, map_req->bvec, map_req->mctx);
+	if (ret) {
+		hpb_dbg(hpb->hba, "ufshpb_add_bio_page_error %d\n", ret);
+		return ret;
+	}
+
+	ufshpb_set_read_buf_cmd(cmd, map_req->region, map_req->subregion,
+				hpb->subregion_mem_size);
+
+	req = &map_req->req;
+	blk_rq_init(q, req);
+	blk_rq_append_bio(req, bio);
+
+	req->cmd_len = COMMAND_SIZE(cmd[0]);
+	memcpy(req->cmd, cmd, req->cmd_len);
+
+	req->cmd_type = REQ_TYPE_BLOCK_PC;
+	req->cmd_flags = READ | REQ_SOFTBARRIER | REQ_QUIET | REQ_PREEMPT;
+	req->timeout = msecs_to_jiffies(30000);
+	req->end_io = ufshpb_map_req_compl_fn;
+	req->end_io_data = (void *)map_req;
+	req->sense = map_req->sense;
+	req->sense_len = 0;
+
+	hpb_dbg(hpb->hba, "issue map_request: %d - %d\n",
+			map_req->region, map_req->subregion);
+
+	/* this sequence already has spin_lock_irqsave(queue_lock) */
+	ufshpb_issue_map_req(q, req);
+	map_req->RSP_issue = ktime_to_ns(ktime_get());
+
+	atomic64_inc(&hpb->map_req_cnt);
+
+	return 0;
+}
+
+static int ufshpb_set_map_req(struct ufshpb_lu *hpb,
+		int region, int subregion, struct ufshpb_map_ctx *mctx,
+		struct ufshpb_rsp_info *rsp_info)
+{
+	struct ufshpb_map_req *map_req;
+
+	spin_lock(&hpb->hpb_lock);
+	map_req = list_first_entry_or_null(&hpb->lh_map_req_free,
+					    struct ufshpb_map_req,
+					    list_map_req);
+	if (!map_req) {
+		hpb_dbg(hpb->hba, "There is no map_req\n");
+		spin_unlock(&hpb->hpb_lock);
+		return -ENOMEM;
+	}
+	list_del(&map_req->list_map_req);
+	memset(map_req, 0x00, sizeof(struct ufshpb_map_req));
+
+	spin_unlock(&hpb->hpb_lock);
+
+	map_req->hpb = hpb;
+	map_req->region = region;
+	map_req->subregion = subregion;
+	map_req->mctx = mctx;
+	map_req->lun = hpb->lun;
+	map_req->RSP_start = rsp_info->RSP_start;
+	map_req->RSP_tasklet_enter1 = rsp_info->RSP_tasklet_enter;
+
+	return ufshpb_map_req_issue(hpb,
+		hpb->hba->sdev_ufs_lu[hpb->lun]->request_queue, map_req);
+}
+
+static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb)
+{
+	struct ufshpb_map_ctx *mctx;
+
+	mctx = list_first_entry_or_null(&hpb->lh_map_ctx,
+			struct ufshpb_map_ctx, list_table);
+	if (mctx) {
+		list_del_init(&mctx->list_table);
+		hpb->debug_free_table--;
+		return mctx;
+	}
+	return ERR_PTR(-ENOMEM);
+}
+
+static inline void ufshpb_add_lru_info(struct victim_select_info *lru_info,
+				       struct ufshpb_region *cb)
+{
+	cb->region_state = HPBREGION_ACTIVE;
+	list_add_tail(&cb->list_region, &lru_info->lru);
+	atomic64_inc(&lru_info->active_cnt);
+}
+
+static inline int ufshpb_add_region(struct ufshpb_lu *hpb,
+					struct ufshpb_region *cb)
+{
+	struct victim_select_info *lru_info;
+	int subregion;
+	int err = 0;
+
+	lru_info = &hpb->lru_info;
+
+	hpb_dbg(hpb->hba,
+		"\x1b[44m\x1b[32m E->active region: %d \x1b[0m\n", cb->region);
+	hpb_trace(hpb, "Noti: ACT RG: %d", cb->region);
+
+	for (subregion = 0; subregion < cb->subregion_count; subregion++) {
+		struct ufshpb_subregion *cp;
+
+		cp = cb->subregion_tbl + subregion;
+		cp->mctx = ufshpb_get_map_ctx(hpb);
+		if (IS_ERR(cp->mctx)) {
+			err = PTR_ERR(cp->mctx);
+			goto out;
+		}
+		cp->subregion_state = HPBSUBREGION_DIRTY;
+	}
+	ufshpb_add_lru_info(lru_info, cb);
+
+	atomic64_inc(&hpb->region_add);
+out:
+	if (err)
+		hpb_dbg(hpb->hba,
+			"get mctx failed. err %d subregion %d free_table %d\n",
+			err, subregion, hpb->debug_free_table);
+	return err;
+}
+
+static inline void ufshpb_put_map_ctx(
+		struct ufshpb_lu *hpb, struct ufshpb_map_ctx *mctx)
+{
+	list_add(&mctx->list_table, &hpb->lh_map_ctx);
+	hpb->debug_free_table++;
+}
+
+static inline void ufshpb_purge_active_page(struct ufshpb_lu *hpb,
+		struct ufshpb_subregion *cp, int state)
+{
+	if (state == HPBSUBREGION_UNUSED) {
+		ufshpb_put_map_ctx(hpb, cp->mctx);
+		cp->mctx = NULL;
+	}
+	cp->subregion_state = state;
+}
+
+static inline void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
+					   struct ufshpb_region *cb)
+{
+	list_del_init(&cb->list_region);
+	cb->region_state = HPBREGION_INACTIVE;
+	cb->hit_count = 0;
+	atomic64_dec(&lru_info->active_cnt);
+}
+
+static inline void ufshpb_evict_region(struct ufshpb_lu *hpb,
+				     struct ufshpb_region *cb)
+{
+	struct victim_select_info *lru_info;
+	struct ufshpb_subregion *cp;
+	int subregion;
+
+	lru_info = &hpb->lru_info;
+
+	hpb_dbg(hpb->hba, "\x1b[41m\x1b[33m C->EVICT region: %d \x1b[0m\n",
+		  cb->region);
+	hpb_trace(hpb, "Noti: EVIC RG: %d", cb->region);
+
+	ufshpb_cleanup_lru_info(lru_info, cb);
+	atomic64_inc(&hpb->region_evict);
+	for (subregion = 0; subregion < cb->subregion_count; subregion++) {
+		cp = cb->subregion_tbl + subregion;
+
+		ufshpb_purge_active_page(hpb, cp, HPBSUBREGION_UNUSED);
+	}
+}
+
+static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
+				       struct ufshpb_region *cb)
+{
+	switch (lru_info->selection_type) {
+	case LRU:
+		list_move_tail(&cb->list_region, &lru_info->lru);
+		break;
+	case LFU:
+		if (cb->hit_count != 0xffffffff)
+			cb->hit_count++;
+
+		list_move_tail(&cb->list_region, &lru_info->lru);
+		break;
+	default:
+		break;
+	}
+}
+
+static struct ufshpb_region *ufshpb_victim_lru_info(
+				struct victim_select_info *lru_info)
+{
+	struct ufshpb_region *cb;
+	struct ufshpb_region *victim_cb = NULL;
+	u32 hit_count = 0xffffffff;
+
+	switch (lru_info->selection_type) {
+	case LRU:
+		victim_cb = list_first_entry(&lru_info->lru,
+				struct ufshpb_region, list_region);
+		break;
+	case LFU:
+		list_for_each_entry(cb, &lru_info->lru, list_region) {
+			if (hit_count > cb->hit_count) {
+				hit_count = cb->hit_count;
+				victim_cb = cb;
+			}
+		}
+		break;
+	default:
+		break;
+	}
+	return victim_cb;
+}
+
+static int ufshpb_evict_load_region(struct ufshpb_lu *hpb,
+				struct ufshpb_rsp_info *rsp_info)
+{
+	struct ufshpb_region *cb;
+	struct ufshpb_region *victim_cb;
+	struct victim_select_info *lru_info;
+	int region, ret, iter;
+
+	lru_info = &hpb->lru_info;
+	hpb_dbg(hpb->hba, "active_cnt :%lld\n",
+			(long long)atomic64_read(&lru_info->active_cnt));
+
+	for (iter = 0; iter < rsp_info->inactive_cnt; iter++) {
+		region = rsp_info->inactive_list.region[iter];
+		cb = hpb->region_tbl + region;
+
+		if (cb->region_state == HPBREGION_PINNED) {
+			/*
+			 * Pinned active-block should not drop-out.
+			 * But if so, it would treat error as critical,
+			 * and it will run ufshpb_eh_work
+			 */
+			dev_warn(hpb->hba->dev,
+				 "UFSHPB pinned active-block drop-out error\n");
+			return -ENOMEM;
+		}
+
+		if (list_empty(&cb->list_region))
+			continue;
+
+		spin_lock(&hpb->hpb_lock);
+		ufshpb_evict_region(hpb, cb);
+		spin_unlock(&hpb->hpb_lock);
+	}
+
+	for (iter = 0; iter < rsp_info->active_cnt; iter++) {
+		region = rsp_info->active_list.region[iter];
+		cb = hpb->region_tbl + region;
+
+		/*
+		 * if already region is added to lru_list,
+		 * just initiate the information of lru.
+		 * because the region already has the map ctx.
+		 * (!list_empty(&cb->list_region) == region->state=active...)
+		 */
+		if (!list_empty(&cb->list_region)) {
+			ufshpb_hit_lru_info(lru_info, cb);
+			continue;
+		}
+
+		spin_lock(&hpb->hpb_lock);
+		if (cb->region_state != HPBREGION_INACTIVE) {
+			spin_unlock(&hpb->hpb_lock);
+			continue;
+		}
+
+		if (atomic64_read(&lru_info->active_cnt) ==
+				lru_info->max_lru_active_cnt) {
+
+			victim_cb = ufshpb_victim_lru_info(lru_info);
+
+			if (!victim_cb) {
+				dev_warn(hpb->hba->dev,
+						"UFSHPB victim_cb is NULL\n");
+				goto unlock_error;
+			}
+
+			hpb_trace(hpb, "Noti: VT RG %d", victim_cb->region);
+			hpb_dbg(hpb->hba, "max lru case. victim : %d\n",
+					victim_cb->region);
+
+			ufshpb_evict_region(hpb, victim_cb);
+		}
+
+		ret = ufshpb_add_region(hpb, cb);
+		if (ret) {
+			dev_warn(hpb->hba->dev,
+					"UFSHPB memory allocation failed\n");
+			goto unlock_error;
+		}
+		spin_unlock(&hpb->hpb_lock);
+	}
+	return 0;
+
+unlock_error:
+	spin_unlock(&hpb->hpb_lock);
+	return -ENOMEM;
+}
+
+static inline struct ufshpb_rsp_field *ufshpb_get_hpb_rsp(
+		struct ufshcd_lrb *lrbp)
+{
+	return (struct ufshpb_rsp_field *)&lrbp->ucd_rsp_ptr->sr.sense_data_len;
+}
+
+static void ufshpb_rsp_map_cmd_req(struct ufshpb_lu *hpb,
+		struct ufshpb_rsp_info *rsp_info)
+{
+	struct ufshpb_region *cb;
+	struct ufshpb_subregion *cp;
+	int region, subregion;
+	int iter;
+	int ret;
+
+	/*
+	 *  Before Issue read buffer CMD for active active block,
+	 *  prepare the memory from memory pool.
+	 */
+	ret = ufshpb_evict_load_region(hpb, rsp_info);
+	if (ret) {
+		hpb_dbg(hpb->hba, "region evict/load failed. ret %d\n", ret);
+		goto wakeup_ee_worker;
+	}
+
+	for (iter = 0; iter < rsp_info->active_cnt; iter++) {
+		region = rsp_info->active_list.region[iter];
+		subregion = rsp_info->active_list.subregion[iter];
+		cb = hpb->region_tbl + region;
+
+		if (region >= hpb->regions_per_lu ||
+					subregion >= cb->subregion_count) {
+			hpb_dbg(hpb->hba,
+				"ufshpb issue-map %d - %d range error\n",
+				region, subregion);
+			goto wakeup_ee_worker;
+		}
+
+		cp = cb->subregion_tbl + subregion;
+
+		/*
+		 * if subregion_state set HPBSUBREGION_ISSUED,
+		 * active_page has already been added to list,
+		 * so it just ends function.
+		 */
+		spin_lock(&hpb->hpb_lock);
+		if (cp->subregion_state == HPBSUBREGION_ISSUED) {
+			spin_unlock(&hpb->hpb_lock);
+			continue;
+		}
+
+		cp->subregion_state = HPBSUBREGION_ISSUED;
+
+		ret = ufshpb_clean_dirty_bitmap(hpb, cp);
+
+		spin_unlock(&hpb->hpb_lock);
+
+		if (ret)
+			continue;
+
+		if (hpb->force_map_req_disable ||
+				!hpb->hba->sdev_ufs_lu[hpb->lun] ||
+				!hpb->hba->sdev_ufs_lu[hpb->lun]->request_queue)
+			return;
+
+		ret = ufshpb_set_map_req(hpb, region, subregion,
+						cp->mctx, rsp_info);
+		if (ret) {
+			hpb_dbg(hpb->hba, "ufshpb_set_map_req error %d\n", ret);
+			goto wakeup_ee_worker;
+		}
+	}
+	return;
+
+wakeup_ee_worker:
+	hpb->hba->ufshpb_state = HPB_FAILED;
+	schedule_work(&hpb->hba->ufshpb_eh_work);
+}
+
+/* routine : isr (ufs) */
+void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+	struct ufshpb_lu *hpb;
+	struct ufshpb_rsp_field *rsp_field;
+	struct ufshpb_rsp_info *rsp_info;
+	struct ufshpb_region *region;
+	int data_seg_len, num, blk_idx;
+
+	data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
+		& MASK_RSP_UPIU_DATA_SEG_LEN;
+	if (!data_seg_len) {
+		bool do_tasklet = false;
+
+		if (lrbp->lun >= UFS_UPIU_MAX_GENERAL_LUN)
+			return;
+
+		hpb = hba->ufshpb_lup[lrbp->lun];
+		if (!hpb)
+			return;
+
+		spin_lock(&hpb->rsp_list_lock);
+		do_tasklet = !list_empty(&hpb->lh_rsp_info);
+		spin_unlock(&hpb->rsp_list_lock);
+
+		if (do_tasklet)
+			tasklet_schedule(&hpb->ufshpb_tasklet);
+		return;
+	}
+
+	rsp_field = ufshpb_get_hpb_rsp(lrbp);
+
+	if ((BE_BYTE(rsp_field->sense_data_len, 0) != DEV_SENSE_SEG_LEN) ||
+			rsp_field->desc_type != DEV_DES_TYPE ||
+			rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
+			rsp_field->hpb_type == HPB_RSP_NONE ||
+			rsp_field->active_region_cnt > MAX_ACTIVE_NUM ||
+			rsp_field->inactive_region_cnt > MAX_INACTIVE_NUM)
+		return;
+
+	if (lrbp->lun >= UFS_UPIU_MAX_GENERAL_LUN) {
+		dev_warn(hba->dev, "lun is not general = %d", lrbp->lun);
+		return;
+	}
+
+	hpb = hba->ufshpb_lup[lrbp->lun];
+	if (!hpb) {
+		dev_warn(hba->dev,
+			"%s:%d UFS-LU%d is not UFSHPB LU\n", __func__,
+			__LINE__, lrbp->lun);
+		return;
+	}
+
+	hpb_dbg(hpb->hba,
+		"HPB-Info Noti: %d LUN: %d Seg-Len %d, Req_type = %d\n",
+		rsp_field->hpb_type, lrbp->lun,
+		be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
+		MASK_RSP_UPIU_DATA_SEG_LEN, rsp_field->reserved);
+
+	atomic64_inc(&hpb->rb_noti_cnt);
+
+	if (!hpb->lu_hpb_enable) {
+		dev_warn(hba->dev, "UFSHPB(%s) LU(%d) not HPB-LU\n",
+				__func__, lrbp->lun);
+		return;
+	}
+
+	spin_lock(&hpb->rsp_list_lock);
+	rsp_info = ufshpb_get_req_info(hpb);
+	spin_unlock(&hpb->rsp_list_lock);
+	if (!rsp_info)
+		return;
+
+	switch (rsp_field->hpb_type) {
+	case HPB_RSP_REQ_REGION_UPDATE:
+		WARN_ON(data_seg_len != DEV_DATA_SEG_LEN);
+		rsp_info->type = HPB_RSP_REQ_REGION_UPDATE;
+
+		rsp_info->RSP_start = ktime_to_ns(ktime_get());
+
+		for (num = 0; num < rsp_field->active_region_cnt; num++) {
+			blk_idx = num * PER_ACTIVE_INFO_BYTES;
+			rsp_info->active_list.region[num] =
+				BE_BYTE(rsp_field->hpb_active_field, blk_idx);
+			rsp_info->active_list.subregion[num] =
+				BE_BYTE(rsp_field->hpb_active_field,
+								blk_idx + 2);
+			hpb_dbg(hpb->hba,
+				"active num: %d, #block: %d, page#: %d\n",
+				num + 1,
+				rsp_info->active_list.region[num],
+				rsp_info->active_list.subregion[num]);
+		}
+		rsp_info->active_cnt = num;
+
+		for (num = 0; num < rsp_field->inactive_region_cnt; num++) {
+			blk_idx = num * PER_INACTIVE_INFO_BYTES;
+			rsp_info->inactive_list.region[num] =
+				BE_BYTE(rsp_field->hpb_inactive_field, blk_idx);
+			hpb_dbg(hpb->hba, "inactive num: %d, #block: %d\n",
+				  num + 1, rsp_info->inactive_list.region[num]);
+		}
+		rsp_info->inactive_cnt = num;
+
+		hpb_trace(hpb, "Noti: #ACTIVE %d, #INACTIVE %d",
+			rsp_info->active_cnt, rsp_info->inactive_cnt);
+		hpb_dbg(hpb->hba, "active cnt: %d, inactive cnt: %d\n",
+			  rsp_info->active_cnt, rsp_info->inactive_cnt);
+		list_for_each_entry(region, &hpb->lru_info.lru, list_region)
+			hpb_dbg(hpb->hba, "active list : %d (cnt: %d)\n",
+					region->region, region->hit_count);
+		hpb_dbg(hpb->hba, "add_list %p -> %p\n",
+					rsp_info, &hpb->lh_rsp_info);
+
+		spin_lock(&hpb->rsp_list_lock);
+		list_add_tail(&rsp_info->list_rsp_info, &hpb->lh_rsp_info);
+		spin_unlock(&hpb->rsp_list_lock);
+
+		tasklet_schedule(&hpb->ufshpb_tasklet);
+		break;
+	default:
+		hpb_dbg(hpb->hba, "hpb_type is not available : %d\n",
+					rsp_field->hpb_type);
+		break;
+	}
+}
+
+static int ufshpb_read_desc(struct ufs_hba *hba,
+		u8 desc_id, u8 desc_index, u8 *desc_buf, u32 size)
+{
+	int err = 0;
+	u8 selector = 1;
+
+	err = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
+				desc_id, desc_index, selector, desc_buf, &size);
+	if (!err)
+		dev_dbg(hba->dev, "%s:%d reading Device Desc failed. err = %d\n",
+			__func__, __LINE__, err);
+	return err;
+}
+
+static int ufshpb_read_device_desc(struct ufs_hba *hba, u8 *desc_buf, u32 size)
+{
+	return ufshpb_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, desc_buf, size);
+}
+
+static int ufshpb_read_geo_desc(struct ufs_hba *hba, u8 *desc_buf, u32 size)
+{
+	return ufshpb_read_desc(hba, QUERY_DESC_IDN_GEOMETRY, 0,
+						desc_buf, size);
+}
+
+static int ufshpb_read_unit_desc(struct ufs_hba *hba, int lun,
+						u8 *desc_buf, u32 size)
+{
+	return ufshpb_read_desc(hba, QUERY_DESC_IDN_UNIT,
+						lun, desc_buf, size);
+}
+
+static int ufshpb_read_config_desc(struct ufs_hba *hba, u8 *desc_buf, u32 size)
+{
+	return ufshpb_read_desc(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
+						desc_buf, size);
+}
+
+static inline void ufshpb_add_subregion_to_req_list(struct ufshpb_lu *hpb,
+		struct ufshpb_subregion *cp)
+{
+	list_add_tail(&cp->list_subregion, &hpb->lh_subregion_req);
+	cp->subregion_state = HPBSUBREGION_ISSUED;
+}
+
+static int ufshpb_execute_req(struct ufshpb_lu *hpb, unsigned char *cmd,
+		struct ufshpb_subregion *cp)
+{
+	unsigned long flags;
+	struct request_queue *q;
+	char sense[SCSI_SENSE_BUFFERSIZE];
+	struct scsi_sense_hdr sshdr;
+	struct scsi_device *sdp;
+	struct ufs_hba *hba = hpb->hba;
+	struct request req;
+	struct bio bio;
+	int ret = 0;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	sdp = hba->sdev_ufs_lu[hpb->lun];
+	if (sdp) {
+		ret = scsi_device_get(sdp);
+		if (!ret && !scsi_device_online(sdp)) {
+			ret = -ENODEV;
+			scsi_device_put(sdp);
+		}
+	} else {
+		ret = -ENODEV;
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	if (ret)
+		return ret;
+
+	q = sdp->request_queue;
+	ret = ufshpb_add_bio_page(hpb, q, &bio, hpb->bvec, cp->mctx);
+	if (ret)
+		goto put_out;
+
+	blk_rq_init(q, &req);
+	blk_rq_append_bio(&req, &bio);
+
+	req.cmd_len = COMMAND_SIZE(cmd[0]);
+	memcpy(req.cmd, cmd, req.cmd_len);
+	req.sense = sense;
+	req.sense_len = 0;
+	req.retries = 3;
+	req.timeout = msecs_to_jiffies(10000);
+	req.cmd_type = REQ_TYPE_BLOCK_PC;
+	req.cmd_flags = REQ_QUIET | REQ_PREEMPT;
+
+	blk_execute_rq(q, NULL, &req, 1);
+	if (req.errors) {
+		ret = -EIO;
+		scsi_normalize_sense(req.sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
+		dev_err(HPB_DEV(hpb),
+				"code %x sense_key %x asc %x ascq %x",
+				sshdr.response_code, sshdr.sense_key, sshdr.asc,
+				sshdr.ascq);
+		dev_err(HPB_DEV(hpb),
+				"byte4 %x byte5 %x byte6 %x additional_len %x",
+				sshdr.byte4, sshdr.byte5, sshdr.byte6,
+				sshdr.additional_length);
+		spin_lock_bh(&hpb->hpb_lock);
+		ufshpb_error_active_subregion(hpb, cp);
+		spin_unlock_bh(&hpb->hpb_lock);
+	} else {
+		ret = 0;
+		spin_lock_bh(&hpb->hpb_lock);
+		ufshpb_clean_dirty_bitmap(hpb, cp);
+		spin_unlock_bh(&hpb->hpb_lock);
+	}
+put_out:
+	scsi_device_put(sdp);
+	return ret;
+}
+
+static int ufshpb_issue_map_req_from_list(struct ufshpb_lu *hpb)
+{
+	struct ufshpb_subregion *cp, *next_cp;
+	int ret;
+
+	LIST_HEAD(req_list);
+
+	spin_lock(&hpb->hpb_lock);
+	list_splice_init(&hpb->lh_subregion_req, &req_list);
+	spin_unlock(&hpb->hpb_lock);
+
+	list_for_each_entry_safe(cp, next_cp, &req_list, list_subregion) {
+		unsigned char cmd[10] = { 0 };
+
+		ufshpb_set_read_buf_cmd(cmd, cp->region, cp->subregion,
+					hpb->subregion_mem_size);
+
+		hpb_dbg(hpb->hba, "issue map_request: %d - %d\n",
+				cp->region, cp->subregion);
+
+		ret = ufshpb_execute_req(hpb, cmd, cp);
+		if (ret < 0) {
+			dev_err(HPB_DEV(hpb),
+					"region %d sub %d failed with err %d",
+					cp->region, cp->subregion, ret);
+			continue;
+		}
+
+		spin_lock_bh(&hpb->hpb_lock);
+		ufshpb_clean_active_subregion(hpb, cp);
+		list_del_init(&cp->list_subregion);
+		spin_unlock_bh(&hpb->hpb_lock);
+	}
+
+	return 0;
+}
+
+static void ufshpb_work_handler(struct work_struct *work)
+{
+	struct ufshpb_lu *hpb;
+	int ret;
+
+	hpb = container_of(work, struct ufshpb_lu, ufshpb_work);
+	hpb_dbg(hpb->hba, "worker start\n");
+
+	if (!list_empty(&hpb->lh_subregion_req)) {
+		pm_runtime_get_sync(HPB_DEV(hpb));
+		ret = ufshpb_issue_map_req_from_list(hpb);
+		/*
+		 * if its function failed at init time,
+		 * ufshpb-device will request map-req,
+		 * so it is not critical-error, and just finish work-handler
+		 */
+		if (ret)
+			hpb_dbg(hpb->hba, "failed map-issue. ret %d\n", ret);
+		pm_runtime_mark_last_busy(HPB_DEV(hpb));
+		pm_runtime_put_noidle(HPB_DEV(hpb));
+	}
+
+	hpb_dbg(hpb->hba, "worker end\n");
+}
+
+static void ufshpb_retry_work_handler(struct work_struct *work)
+{
+	struct ufshpb_lu *hpb;
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct ufshpb_map_req *map_req;
+	int ret = 0;
+
+	LIST_HEAD(retry_list);
+
+	hpb = container_of(dwork, struct ufshpb_lu, ufshpb_retry_work);
+	hpb_dbg(hpb->hba, "retry worker start");
+
+	spin_lock_bh(&hpb->hpb_lock);
+	list_splice_init(&hpb->lh_map_req_retry, &retry_list);
+	spin_unlock_bh(&hpb->hpb_lock);
+
+	while (1) {
+		map_req = list_first_entry_or_null(&retry_list,
+				struct ufshpb_map_req, list_map_req);
+		if (!map_req) {
+			hpb_dbg(hpb->hba, "There is no map_req");
+			break;
+		}
+		list_del(&map_req->list_map_req);
+
+		map_req->retry_cnt++;
+
+		ret = ufshpb_map_req_issue(hpb,
+				hpb->hba->sdev_ufs_lu[hpb->lun]->request_queue,
+				map_req);
+		if (ret) {
+			hpb_dbg(hpb->hba, "ufshpb_set_map_req error %d", ret);
+			goto wakeup_ee_worker;
+		}
+	}
+	hpb_dbg(hpb->hba, "worker end");
+	return;
+
+wakeup_ee_worker:
+	hpb->hba->ufshpb_state = HPB_FAILED;
+	schedule_work(&hpb->hba->ufshpb_eh_work);
+}
+
+static void ufshpb_tasklet_fn(unsigned long private)
+{
+	struct ufshpb_lu *hpb = (struct ufshpb_lu *)private;
+	struct ufshpb_rsp_info *rsp_info;
+	unsigned long flags;
+
+	while (1) {
+		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+		rsp_info = list_first_entry_or_null(&hpb->lh_rsp_info,
+				struct ufshpb_rsp_info, list_rsp_info);
+		if (!rsp_info) {
+			spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+			break;
+		}
+
+		rsp_info->RSP_tasklet_enter = ktime_to_ns(ktime_get());
+
+		list_del_init(&rsp_info->list_rsp_info);
+		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+
+		switch (rsp_info->type) {
+		case HPB_RSP_REQ_REGION_UPDATE:
+			ufshpb_rsp_map_cmd_req(hpb, rsp_info);
+			break;
+		default:
+			break;
+		}
+
+		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+		list_add_tail(&rsp_info->list_rsp_info, &hpb->lh_rsp_info_free);
+		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+	}
+}
+
+static void ufshpb_init_constant(void)
+{
+	sects_per_blk_shift = ffs(BLOCK) - ffs(SECTOR);
+	dev_dbg(NULL, "sects_per_blk_shift: %u %u\n",
+		  sects_per_blk_shift, ffs(SECTORS_PER_BLOCK) - 1);
+
+	bits_per_dword_shift = ffs(BITS_PER_DWORD) - 1;
+	bits_per_dword_mask = BITS_PER_DWORD - 1;
+	dev_dbg(NULL, "bits_per_dword %u shift %u mask 0x%X\n",
+		  BITS_PER_DWORD, bits_per_dword_shift, bits_per_dword_mask);
+
+	bits_per_byte_shift = ffs(BITS_PER_BYTE) - 1;
+	dev_dbg(NULL, "bits_per_byte %u shift %u\n",
+		  BITS_PER_BYTE, bits_per_byte_shift);
+}
+
+static inline void ufshpb_req_mempool_remove(struct ufshpb_lu *hpb)
+{
+	if (!hpb)
+		return;
+	kfree(hpb->rsp_info);
+	kfree(hpb->map_req);
+}
+
+static void ufshpb_table_mempool_remove(struct ufshpb_lu *hpb)
+{
+	struct ufshpb_map_ctx *mctx, *next;
+	int i;
+
+	/*
+	 * the mctx in the lh_map_ctx has been allocated completely.
+	 */
+	list_for_each_entry_safe(mctx, next, &hpb->lh_map_ctx, list_table) {
+		for (i = 0; i < hpb->mpages_per_subregion; i++)
+			__free_page(mctx->m_page[i]);
+
+		vfree(mctx->ppn_dirty);
+		kfree(mctx->m_page);
+		kfree(mctx);
+		alloc_mctx--;
+	}
+}
+
+static int ufshpb_init_pinned_active_block(struct ufshpb_lu *hpb,
+		struct ufshpb_region *cb)
+{
+	struct ufshpb_subregion *cp;
+	int subregion, j;
+	int err = 0;
+
+	for (subregion = 0 ; subregion < cb->subregion_count ; subregion++) {
+		cp = cb->subregion_tbl + subregion;
+
+		cp->mctx = ufshpb_get_map_ctx(hpb);
+		if (IS_ERR(cp->mctx)) {
+			err = PTR_ERR(cp->mctx);
+			goto release;
+		}
+		spin_lock(&hpb->hpb_lock);
+		ufshpb_add_subregion_to_req_list(hpb, cp);
+		spin_unlock(&hpb->hpb_lock);
+	}
+
+	cb->region_state = HPBREGION_PINNED;
+	return 0;
+
+release:
+	for (j = 0 ; j < subregion ; j++) {
+		cp = cb->subregion_tbl + j;
+		ufshpb_put_map_ctx(hpb, cp->mctx);
+	}
+	return err;
+}
+
+static inline bool ufshpb_is_HPBREGION_PINNED(
+		struct ufshpb_lu_desc *lu_desc, int region)
+{
+	if (lu_desc->lu_hpb_pinned_end_offset != -1 &&
+			region >= lu_desc->hpb_pinned_region_startidx &&
+				region <= lu_desc->lu_hpb_pinned_end_offset)
+		return true;
+
+	return false;
+}
+
+static inline void ufshpb_init_jobs(struct ufshpb_lu *hpb)
+{
+	INIT_WORK(&hpb->ufshpb_work, ufshpb_work_handler);
+	INIT_DELAYED_WORK(&hpb->ufshpb_retry_work, ufshpb_retry_work_handler);
+	tasklet_init(&hpb->ufshpb_tasklet, ufshpb_tasklet_fn,
+							(unsigned long)hpb);
+}
+
+static inline void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
+{
+	cancel_work_sync(&hpb->ufshpb_work);
+	cancel_delayed_work_sync(&hpb->ufshpb_retry_work);
+	tasklet_kill(&hpb->ufshpb_tasklet);
+}
+
+static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
+		struct ufshpb_region *cb)
+{
+	int subregion;
+
+	for (subregion = 0 ; subregion < cb->subregion_count ; subregion++) {
+		struct ufshpb_subregion *cp = cb->subregion_tbl + subregion;
+
+		cp->region = cb->region;
+		cp->subregion = subregion;
+		cp->subregion_state = HPBSUBREGION_UNUSED;
+	}
+}
+
+static inline int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
+		struct ufshpb_region *cb, int subregion_count)
+{
+	cb->subregion_tbl = kzalloc(
+			sizeof(struct ufshpb_subregion) * subregion_count,
+			GFP_KERNEL);
+	if (!cb->subregion_tbl)
+		return -ENOMEM;
+
+	cb->subregion_count = subregion_count;
+	hpb_dbg(hpb->hba,
+		"region %d subregion_count %d active_page_table bytes %lu\n",
+		cb->region, subregion_count,
+		sizeof(struct ufshpb_subregion *) * hpb->subregions_per_region);
+
+	return 0;
+}
+
+static int ufshpb_table_mempool_init(struct ufshpb_lu *hpb,
+		int num_regions, int subregions_per_region,
+		int entry_count, int entry_byte)
+{
+	int i, j;
+	struct ufshpb_map_ctx *mctx = NULL;
+
+	INIT_LIST_HEAD(&hpb->lh_map_ctx);
+
+	for (i = 0 ; i < num_regions * subregions_per_region ; i++) {
+		mctx = kzalloc(sizeof(struct ufshpb_map_ctx), GFP_KERNEL);
+		if (!mctx)
+			goto release_mem;
+
+		mctx->m_page = kzalloc(sizeof(struct page *) *
+					hpb->mpages_per_subregion, GFP_KERNEL);
+		if (!mctx->m_page)
+			goto release_mem;
+
+		mctx->ppn_dirty = vzalloc(entry_count >>
+						bits_per_byte_shift);
+		if (!mctx->ppn_dirty)
+			goto release_mem;
+
+		for (j = 0; j < hpb->mpages_per_subregion; j++) {
+			mctx->m_page[j] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+			if (!mctx->m_page[j])
+				goto release_mem;
+		}
+		hpb_dbg(hpb->hba, "[%d] mctx->m_page %p get_order %d\n", i,
+			  mctx->m_page, get_order(hpb->mpages_per_subregion));
+
+		INIT_LIST_HEAD(&mctx->list_table);
+		list_add(&mctx->list_table, &hpb->lh_map_ctx);
+
+		hpb->debug_free_table++;
+	}
+
+	alloc_mctx = num_regions * subregions_per_region;
+	hpb_dbg(hpb->hba, "number of mctx %d %d %d. debug_free_table %d\n",
+		  num_regions * subregions_per_region, num_regions,
+		  subregions_per_region, hpb->debug_free_table);
+	return 0;
+
+release_mem:
+	/*
+	 * mctxs already added in lh_map_ctx will be removed
+	 * in the caller function.
+	 */
+	if (!mctx)
+		goto out;
+
+	if (mctx->m_page) {
+		for (j = 0; j < hpb->mpages_per_subregion; j++)
+			if (mctx->m_page[i])
+				__free_page(mctx->m_page[j]);
+		kfree(mctx->m_page);
+	}
+	if (mctx->ppn_dirty)
+		vfree(mctx->ppn_dirty);
+	kfree(mctx);
+out:
+	return -ENOMEM;
+}
+
+static int ufshpb_req_mempool_init(struct ufs_hba *hba,
+				struct ufshpb_lu *hpb, int queue_depth)
+{
+	struct ufshpb_rsp_info *rsp_info = NULL;
+	struct ufshpb_map_req *map_req = NULL;
+	int map_req_cnt = 0;
+	int i;
+
+	if (!queue_depth) {
+		queue_depth = hba->nutrs;
+		hpb_dbg(hba,
+			"lu_queue_depth is 0. we use device's queue info.\n");
+		hpb_dbg(hba, "hba->nutrs = %d\n", hba->nutrs);
+	}
+
+	INIT_LIST_HEAD(&hpb->lh_rsp_info_free);
+	INIT_LIST_HEAD(&hpb->lh_map_req_free);
+	INIT_LIST_HEAD(&hpb->lh_map_req_retry);
+
+	hpb->rsp_info = kcalloc(queue_depth, sizeof(struct ufshpb_rsp_info),
+								GFP_KERNEL);
+	if (!hpb->rsp_info)
+		goto release_mem;
+
+	map_req_cnt = max(hpb->subregions_per_lu, queue_depth);
+
+	hpb->map_req = kcalloc(map_req_cnt, sizeof(struct ufshpb_map_req),
+								GFP_KERNEL);
+	if (!hpb->map_req)
+		goto release_mem;
+
+	for (i = 0; i < queue_depth; i++) {
+		rsp_info = hpb->rsp_info + i;
+		INIT_LIST_HEAD(&rsp_info->list_rsp_info);
+		list_add_tail(&rsp_info->list_rsp_info, &hpb->lh_rsp_info_free);
+	}
+
+	for (i = 0; i < map_req_cnt; i++) {
+		map_req = hpb->map_req + i;
+		INIT_LIST_HEAD(&map_req->list_map_req);
+		list_add_tail(&map_req->list_map_req, &hpb->lh_map_req_free);
+	}
+
+	return 0;
+
+release_mem:
+	kfree(hpb->rsp_info);
+	return -ENOMEM;
+}
+
+static void ufshpb_init_lu_constant(struct ufshpb_lu *hpb,
+		struct ufshpb_lu_desc *lu_desc,
+		struct ufshpb_func_desc *func_desc)
+{
+	unsigned long long region_unit_size, region_mem_size;
+	int entries_per_region;
+
+	/*	From descriptors	*/
+	region_unit_size = (unsigned long long)
+		SECTOR * (0x01 << func_desc->hpb_region_size);
+	region_mem_size = region_unit_size / BLOCK * HPB_ENTRY_SIZE;
+
+	hpb->subregion_unit_size = (unsigned long long)
+		SECTOR * (0x01 << func_desc->hpb_subregion_size);
+	hpb->subregion_mem_size = hpb->subregion_unit_size /
+						BLOCK * HPB_ENTRY_SIZE;
+
+	hpb->hpb_ver = func_desc->hpb_ver;
+	hpb->lu_max_active_regions = lu_desc->lu_max_active_hpb_regions;
+	hpb->lru_info.max_lru_active_cnt =
+					lu_desc->lu_max_active_hpb_regions
+					- lu_desc->lu_num_hpb_pinned_regions;
+
+	/*	relation : lu <-> region <-> sub region <-> entry	 */
+	hpb->lu_num_blocks = lu_desc->lu_logblk_cnt;
+	entries_per_region = region_mem_size / HPB_ENTRY_SIZE;
+	hpb->entries_per_subregion = hpb->subregion_mem_size / HPB_ENTRY_SIZE;
+	hpb->subregions_per_region = region_mem_size / hpb->subregion_mem_size;
+
+	/*
+	 * 1. regions_per_lu
+	 *		= (lu_num_blocks * 4096) / region_unit_size
+	 *		= (lu_num_blocks * HPB_ENTRY_SIZE) / region_mem_size
+	 *		= lu_num_blocks / (region_mem_size / HPB_ENTRY_SIZE)
+	 *
+	 * 2. regions_per_lu = lu_num_blocks / subregion_mem_size (is trik...)
+	 *    if HPB_ENTRY_SIZE != subregions_per_region, it is error.
+	 */
+	hpb->regions_per_lu = ((unsigned long long)hpb->lu_num_blocks
+			+ (region_mem_size / HPB_ENTRY_SIZE) - 1)
+			/ (region_mem_size / HPB_ENTRY_SIZE);
+	hpb->subregions_per_lu = ((unsigned long long)hpb->lu_num_blocks
+			+ (hpb->subregion_mem_size / HPB_ENTRY_SIZE) - 1)
+			/ (hpb->subregion_mem_size / HPB_ENTRY_SIZE);
+
+	/*	mempool info	*/
+	hpb->mpage_bytes = OS_PAGE_SIZE;
+	hpb->mpages_per_subregion = hpb->subregion_mem_size / hpb->mpage_bytes;
+
+	/*	Bitmask Info.	 */
+	hpb->dwords_per_subregion = hpb->entries_per_subregion / BITS_PER_DWORD;
+	hpb->entries_per_region_shift = ffs(entries_per_region) - 1;
+	hpb->entries_per_region_mask = entries_per_region - 1;
+	hpb->entries_per_subregion_shift = ffs(hpb->entries_per_subregion) - 1;
+	hpb->entries_per_subregion_mask = hpb->entries_per_subregion - 1;
+
+	pr_info("===== From Device Descriptor! =====\n");
+	pr_info("hpb_region_size = %d, hpb_subregion_size = %d\n",
+			func_desc->hpb_region_size,
+			func_desc->hpb_subregion_size);
+	pr_info("=====   Constant Values(LU)   =====\n");
+	pr_info("region_unit_size = %llu, region_mem_size %llu\n",
+			region_unit_size, region_mem_size);
+	pr_info("subregion_unit_size = %llu, subregion_mem_size %d\n",
+			hpb->subregion_unit_size, hpb->subregion_mem_size);
+	pr_info("lu_num_blks = %d, reg_per_lu = %d, subreg_per_lu = %d\n",
+			hpb->lu_num_blocks, hpb->regions_per_lu,
+			hpb->subregions_per_lu);
+	pr_info("subregions_per_region = %d\n",
+			hpb->subregions_per_region);
+	pr_info("entries_per_region %u shift %u mask 0x%X\n",
+			entries_per_region, hpb->entries_per_region_shift,
+			hpb->entries_per_region_mask);
+	pr_info("entries_per_subregion %u shift %u mask 0x%X\n",
+			hpb->entries_per_subregion,
+			hpb->entries_per_subregion_shift,
+			hpb->entries_per_subregion_mask);
+	pr_info("mpages_per_subregion : %d\n",
+			hpb->mpages_per_subregion);
+	pr_info("===================================\n");
+}
+
+static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb,
+		struct ufshpb_func_desc *func_desc,
+		struct ufshpb_lu_desc *lu_desc, int lun)
+{
+	struct ufshpb_region *region_table, *cb;
+	struct ufshpb_subregion *cp;
+	int region, subregion;
+	int total_subregion_count, subregion_count;
+	bool do_work_handler;
+	int ret, j;
+
+	hpb->lu_hpb_enable = true;
+
+	ufshpb_init_lu_constant(hpb, lu_desc, func_desc);
+
+	region_table = kzalloc(sizeof(struct ufshpb_region) *
+				hpb->regions_per_lu, GFP_KERNEL);
+	if (!region_table)
+		return -ENOMEM;
+
+	hpb_dbg(hba, "active_block_table bytes: %lu\n",
+		(sizeof(struct ufshpb_region) * hpb->regions_per_lu));
+
+	hpb->region_tbl = region_table;
+
+	spin_lock_init(&hpb->hpb_lock);
+	spin_lock_init(&hpb->rsp_list_lock);
+
+	/* init lru information */
+	INIT_LIST_HEAD(&hpb->lru_info.lru);
+	hpb->lru_info.selection_type = LRU;
+
+	INIT_LIST_HEAD(&hpb->lh_subregion_req);
+	INIT_LIST_HEAD(&hpb->lh_rsp_info);
+	INIT_LIST_HEAD(&hpb->lh_map_ctx);
+
+	ret = ufshpb_table_mempool_init(hpb,
+			lu_desc->lu_max_active_hpb_regions,
+			hpb->subregions_per_region,
+			hpb->entries_per_subregion, HPB_ENTRY_SIZE);
+	if (ret) {
+		dev_err(HPB_DEV(hpb), "ppn table mempool init fail!\n");
+		goto release_mempool;
+	}
+
+	ret = ufshpb_req_mempool_init(hba, hpb, lu_desc->lu_queue_depth);
+	if (ret) {
+		dev_err(HPB_DEV(hpb), "rsp_info_mempool init fail!\n");
+		goto release_mempool;
+	}
+
+	total_subregion_count = hpb->subregions_per_lu;
+
+	ufshpb_init_jobs(hpb);
+
+	hpb_dbg(hba, "total_subregion_count: %d\n", total_subregion_count);
+	for (region = 0, subregion_count = 0,
+			total_subregion_count = hpb->subregions_per_lu;
+			region < hpb->regions_per_lu;
+			region++, total_subregion_count -= subregion_count) {
+		cb = region_table + region;
+		cb->region = region;
+
+		/* init lru region information*/
+		INIT_LIST_HEAD(&cb->list_region);
+		cb->hit_count = 0;
+
+		subregion_count = min(total_subregion_count,
+				hpb->subregions_per_region);
+		hpb_dbg(hba, "total: %d subregion_count: %d\n",
+				total_subregion_count, subregion_count);
+
+		ret = ufshpb_alloc_subregion_tbl(hpb, cb, subregion_count);
+		if (ret)
+			goto release_region_cp;
+		ufshpb_init_subregion_tbl(hpb, cb);
+
+		if (ufshpb_is_HPBREGION_PINNED(lu_desc, region)) {
+			hpb_dbg(hba, "region: %d PINNED %d ~ %d\n",
+				region, lu_desc->hpb_pinned_region_startidx,
+				lu_desc->lu_hpb_pinned_end_offset);
+			ret = ufshpb_init_pinned_active_block(hpb, cb);
+			if (ret)
+				goto release_region_cp;
+			do_work_handler = true;
+		} else {
+			hpb_dbg(hba, "region: %d inactive\n", cb->region);
+			cb->region_state = HPBREGION_INACTIVE;
+		}
+	}
+
+	if (total_subregion_count != 0) {
+		dev_err(HPB_DEV(hpb),
+			"error total_subregion_count: %d\n",
+			total_subregion_count);
+		goto release_region_cp;
+	}
+
+	hpb->hba = hba;
+	hpb->lun = lun;
+
+	if (do_work_handler)
+		schedule_work(&hpb->ufshpb_work);
+
+	/*
+	 * even if creating sysfs failed, ufshpb could run normally.
+	 * so we don't deal with error handling
+	 */
+	ufshpb_create_sysfs(hba, hpb);
+	return 0;
+
+release_region_cp:
+	for (j = 0 ; j < region ; j++) {
+		cb = region_table + j;
+		if (cb->subregion_tbl) {
+			for (subregion = 0; subregion < cb->subregion_count;
+								subregion++) {
+				cp = cb->subregion_tbl + subregion;
+
+				if (cp->mctx)
+					ufshpb_put_map_ctx(hpb, cp->mctx);
+			}
+			kfree(cb->subregion_tbl);
+		}
+	}
+
+release_mempool:
+	ufshpb_table_mempool_remove(hpb);
+	kfree(region_table);
+	return ret;
+}
+
+static int ufshpb_get_hpb_lu_desc(struct ufs_hba *hba,
+		struct ufshpb_lu_desc *lu_desc, int lun)
+{
+	int ret;
+	u8 logical_buf[UFSHPB_QUERY_DESC_UNIT_MAX_SIZE] = { 0 };
+
+	ret = ufshpb_read_unit_desc(hba, lun, logical_buf,
+				UFSHPB_QUERY_DESC_UNIT_MAX_SIZE);
+	if (ret) {
+		dev_err(hba->dev, "read unit desc failed. ret %d\n", ret);
+		return ret;
+	}
+
+	lu_desc->lu_queue_depth = logical_buf[UNIT_DESC_PARAM_LU_Q_DEPTH];
+
+	// 2^log, ex) 0x0C = 4KB
+	lu_desc->lu_logblk_size = logical_buf[UNIT_DESC_PARAM_LOGICAL_BLK_SIZE];
+	lu_desc->lu_logblk_cnt =
+		SHIFT_BYTE_7((u64)
+			logical_buf[UNIT_DESC_PARAM_LOGICAL_BLK_COUNT]) |
+		SHIFT_BYTE_6((u64)
+			logical_buf[UNIT_DESC_PARAM_LOGICAL_BLK_COUNT + 1]) |
+		SHIFT_BYTE_5((u64)
+			logical_buf[UNIT_DESC_PARAM_LOGICAL_BLK_COUNT + 2]) |
+		SHIFT_BYTE_4((u64)
+			logical_buf[UNIT_DESC_PARAM_LOGICAL_BLK_COUNT + 3]) |
+		SHIFT_BYTE_3(
+			logical_buf[UNIT_DESC_PARAM_LOGICAL_BLK_COUNT + 4]) |
+		SHIFT_BYTE_2(
+			logical_buf[UNIT_DESC_PARAM_LOGICAL_BLK_COUNT + 5]) |
+		SHIFT_BYTE_1(
+			logical_buf[UNIT_DESC_PARAM_LOGICAL_BLK_COUNT + 6]) |
+		SHIFT_BYTE_0(
+			logical_buf[UNIT_DESC_PARAM_LOGICAL_BLK_COUNT + 7]);
+
+	if (logical_buf[UNIT_DESC_PARAM_LU_ENABLE] == LU_HPB_ENABLE)
+		lu_desc->lu_hpb_enable = true;
+	else
+		lu_desc->lu_hpb_enable = false;
+
+	lu_desc->lu_max_active_hpb_regions =
+		BE_BYTE(logical_buf, UNIT_DESC_HPB_LU_MAX_ACTIVE_REGIONS);
+	lu_desc->hpb_pinned_region_startidx =
+		BE_BYTE(logical_buf, UNIT_DESC_HPB_LU_PIN_REGION_START_OFFSET);
+	lu_desc->lu_num_hpb_pinned_regions =
+		BE_BYTE(logical_buf, UNIT_DESC_HPB_LU_NUM_PIN_REGIONS);
+
+	if (lu_desc->lu_hpb_enable) {
+		hpb_dbg(hba, "LUN(%d) [0A] bLogicalBlockSize %d\n",
+				lun, lu_desc->lu_logblk_size);
+		hpb_dbg(hba, "LUN(%d) [0B] qLogicalBlockCount %llu\n",
+				lun, lu_desc->lu_logblk_cnt);
+		hpb_dbg(hba, "LUN(%d) [03] bLuEnable %d\n",
+				lun, logical_buf[UNIT_DESC_PARAM_LU_ENABLE]);
+		hpb_dbg(hba, "LUN(%d) [06] bLuQueueDepth %d\n",
+				lun, lu_desc->lu_queue_depth);
+		hpb_dbg(hba, "LUN(%d) [23:24] wLUMaxActiveHPBRegions %d\n",
+				lun, lu_desc->lu_max_active_hpb_regions);
+		hpb_dbg(hba, "LUN(%d) [25:26] wHPBPinnedRegionStartIdx %d\n",
+				lun, lu_desc->hpb_pinned_region_startidx);
+		hpb_dbg(hba, "LUN(%d) [27:28] wNumHPBPinnedRegions %d\n",
+				lun, lu_desc->lu_num_hpb_pinned_regions);
+	}
+
+	if (lu_desc->lu_num_hpb_pinned_regions > 0) {
+		lu_desc->lu_hpb_pinned_end_offset =
+			lu_desc->hpb_pinned_region_startidx +
+			lu_desc->lu_num_hpb_pinned_regions - 1;
+	} else
+		lu_desc->lu_hpb_pinned_end_offset = -1;
+
+	if (lu_desc->lu_hpb_enable)
+		pr_info("UFSHPB: Enable, LU: %d, REG: %d, PIN: %d - %d\n",
+			lun,
+			lu_desc->lu_max_active_hpb_regions,
+			lu_desc->hpb_pinned_region_startidx,
+			lu_desc->lu_num_hpb_pinned_regions);
+	return 0;
+}
+
+static int ufshpb_read_dev_desc_support(struct ufs_hba *hba,
+		struct ufshpb_func_desc *desc)
+{
+	u8 desc_buf[UFSHPB_QUERY_DESC_DEVICE_MAX_SIZE];
+	int err;
+
+	err = ufshpb_read_device_desc(hba, desc_buf,
+			UFSHPB_QUERY_DESC_DEVICE_MAX_SIZE);
+	if (err)
+		return err;
+
+	if (desc_buf[DEVICE_DESC_PARAM_FEAT_SUP] &
+			UFS_FEATURE_SUPPORT_HPB_BIT) {
+		hba->ufshpb_feat |= UFS_FEATURE_SUPPORT_HPB_BIT;
+		pr_info("UFSHPB: FeaturesSupport = support\n");
+	} else {
+		pr_info("UFSHPB: FeaturesSupport = not support\n");
+		return -ENODEV;
+	}
+
+	desc->lu_cnt = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
+	hpb_dbg(hba, "device lu count %d\n", desc->lu_cnt);
+
+	desc->hpb_ver =
+		(u16)SHIFT_BYTE_1(desc_buf[DEVICE_DESC_PARAM_HPB_VER]) |
+		(u16)SHIFT_BYTE_0(desc_buf[DEVICE_DESC_PARAM_HPB_VER + 1]);
+
+	pr_info("UFSHPB: Version = %.2x.%.2x, Driver Version = %.2x.%.2x\n",
+			GET_BYTE_1(desc->hpb_ver),
+			GET_BYTE_0(desc->hpb_ver),
+			GET_BYTE_1(UFSHPB_VER),
+			GET_BYTE_0(UFSHPB_VER));
+
+	if (desc->hpb_ver != UFSHPB_VER)
+		return -ENODEV;
+	return 0;
+}
+
+static int ufshpb_read_geo_desc_support(struct ufs_hba *hba,
+		struct ufshpb_func_desc *desc)
+{
+	int err;
+	u8 geometry_buf[UFSHPB_QUERY_DESC_GEOMETRY_MAX_SIZE];
+
+	err = ufshpb_read_geo_desc(hba, geometry_buf,
+				UFSHPB_QUERY_DESC_GEOMETRY_MAX_SIZE);
+	if (err)
+		return err;
+
+	desc->hpb_region_size = geometry_buf[GEOMETRY_DESC_HPB_REGION_SIZE];
+	desc->hpb_number_lu = geometry_buf[GEOMETRY_DESC_HPB_NUMBER_LU];
+	desc->hpb_subregion_size =
+			geometry_buf[GEOMETRY_DESC_HPB_SUBREGION_SIZE];
+	desc->hpb_device_max_active_regions =
+			BE_BYTE(geometry_buf,
+				GEOMETRY_DESC_HPB_DEVICE_MAX_ACTIVE_REGIONS);
+
+	hpb_dbg(hba, "[48] bHPBRegionSiz %u\n", desc->hpb_region_size);
+	hpb_dbg(hba, "[49] bHPBNumberLU %u\n", desc->hpb_number_lu);
+	hpb_dbg(hba, "[4A] bHPBSubRegionSize %u\n", desc->hpb_subregion_size);
+	hpb_dbg(hba, "[4B:4C] wDeviceMaxActiveHPBRegions %u\n",
+			desc->hpb_device_max_active_regions);
+
+	if (desc->hpb_number_lu == 0) {
+		dev_warn(hba->dev, "UFSHPB: HPB is not supported\n");
+		return -ENODEV;
+	}
+	/* for activation */
+	hba->ufshpb_max_regions = desc->hpb_device_max_active_regions;
+	return 0;
+}
+
+int ufshpb_control_validation(struct ufs_hba *hba,
+				struct ufshpb_config_desc *config)
+{
+	unsigned int num_regions = 0;
+	int lun;
+
+	if (!(hba->ufshpb_feat & UFS_FEATURE_SUPPORT_HPB_BIT))
+		return -ENOTSUPP;
+
+	for (lun = 0 ; lun < UFS_UPIU_MAX_GENERAL_LUN ; lun++) {
+		unsigned char *unit = config->unit[lun];
+
+		if (unit[UFSHPB_CONF_LU_ENABLE] >= LU_SET_MAX)
+			return -EINVAL;
+
+		/* total should not exceed max_active_regions */
+		num_regions += unit[UFSHPB_CONF_ACTIVE_REGIONS] << 8;
+		num_regions += unit[UFSHPB_CONF_ACTIVE_REGIONS + 1];
+		if (num_regions > hba->ufshpb_max_regions)
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static int ufshpb_control_activation(struct ufs_hba *hba, bool activate)
+{
+	struct ufshpb_config_desc config;
+	unsigned char *unit;
+	unsigned short num_regions;
+	unsigned short pinned_start;
+	unsigned short pinned_num;
+	unsigned char enable;
+	u32 length = UFSHPB_CONFIG_LEN;
+	int lun = 0;		/* LU:0 for Android */
+	int ret;
+
+	if (activate) {
+		/* pinned system image: 0 ~ 4GB range */
+		enable = LU_HPB_ENABLE;
+		num_regions = hba->ufshpb_max_regions;
+		pinned_start = 0;
+		pinned_num = 2;
+	} else {
+		enable = LU_ENABLE;
+		num_regions = 0;
+		pinned_start = 0;
+		pinned_num = 0;
+	}
+
+	pr_info("UFSHPB: Control - %s, LU: %d, REG: %d, PIN: %d - %d\n",
+			activate ? "Enable" : "Disable",
+			lun, num_regions, pinned_start, pinned_num);
+
+	ret = ufshpb_read_config_desc(hba, (u8 *)&config, length);
+	if (ret)
+		return ret;
+
+	unit = config.unit[lun];
+	unit[UFSHPB_CONF_LU_ENABLE] = enable;
+	unit[UFSHPB_CONF_ACTIVE_REGIONS] = (num_regions >> 8) & 0xff;
+	unit[UFSHPB_CONF_ACTIVE_REGIONS + 1] = num_regions & 0xff;
+	unit[UFSHPB_CONF_PINNED_START] = (pinned_start >> 8) & 0xff;
+	unit[UFSHPB_CONF_PINNED_START + 1] = pinned_start & 0xff;
+	unit[UFSHPB_CONF_PINNED_NUM] = (pinned_num >> 8) & 0xff;
+	unit[UFSHPB_CONF_PINNED_NUM + 1] = pinned_num & 0xff;
+
+	return ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_WRITE_DESC,
+			QUERY_DESC_IDN_CONFIGURATION, 0, 1,
+			(u8 *)&config, &length);
+}
+
+static int ufshpb_init(struct ufs_hba *hba)
+{
+	struct ufshpb_func_desc func_desc;
+	int lun, ret;
+	int hpb_dev = 0;
+	int activate = CONFIG_SCSI_UFSHCD_HPB_ACTIVATE;
+	bool need_disable = false;
+
+	pm_runtime_get_sync(hba->dev);
+
+	ret = ufshpb_read_dev_desc_support(hba, &func_desc);
+	if (ret)
+		goto out_state;
+
+	ret = ufshpb_read_geo_desc_support(hba, &func_desc);
+	if (ret)
+		goto out_state;
+
+	ufshpb_init_constant();
+reinit:
+	for (lun = 0 ; lun < UFS_UPIU_MAX_GENERAL_LUN ; lun++) {
+		struct ufshpb_lu_desc lu_desc;
+
+		ret = ufshpb_get_hpb_lu_desc(hba, &lu_desc, lun);
+		if (ret)
+			goto out_state;
+
+		if (lu_desc.lu_hpb_enable == false)
+			continue;
+
+		if (!activate) {
+			need_disable = true;
+			break;
+		}
+		hba->ufshpb_lup[lun] = kzalloc(sizeof(struct ufshpb_lu),
+								GFP_KERNEL);
+		if (!hba->ufshpb_lup[lun])
+			goto out_free_mem;
+
+		ret = ufshpb_lu_hpb_init(hba, hba->ufshpb_lup[lun],
+				&func_desc, &lu_desc, lun);
+		if (ret) {
+			if (ret == -ENODEV)
+				continue;
+			else
+				goto out_free_mem;
+		}
+		hpb_dev++;
+	}
+
+	if (hpb_dev)
+		goto done;
+
+	if (activate) {
+		if (!ufshpb_control_activation(hba, true))
+			goto reinit;
+		goto out_free_mem;
+	}
+	if (need_disable) {
+		ufshpb_control_activation(hba, false);
+		ret = -ENOTSUPP;
+	}
+	goto out_free_mem;
+done:
+	INIT_WORK(&hba->ufshpb_eh_work, ufshpb_error_handler);
+	hba->ufshpb_state = HPB_PRESENT;
+	hba->issue_ioctl = false;
+	pm_runtime_mark_last_busy(hba->dev);
+	pm_runtime_put_noidle(hba->dev);
+	return 0;
+
+out_free_mem:
+	ufshpb_release(hba, HPB_NOT_SUPPORTED);
+out_state:
+	hba->ufshpb_state = HPB_NOT_SUPPORTED;
+	pm_runtime_mark_last_busy(hba->dev);
+	pm_runtime_put_noidle(hba->dev);
+	return ret;
+}
+
+static void ufshpb_map_loading_trigger(struct ufshpb_lu *hpb,
+		bool dirty, bool only_pinned, bool do_work_handler)
+{
+	int region, subregion;
+
+	if (do_work_handler)
+		goto work_out;
+
+	for (region = 0 ; region < hpb->regions_per_lu ; region++) {
+		struct ufshpb_region *cb;
+
+		cb = hpb->region_tbl + region;
+
+		if (cb->region_state != HPBREGION_ACTIVE &&
+				cb->region_state != HPBREGION_PINNED)
+			continue;
+
+		if ((only_pinned && cb->region_state == HPBREGION_PINNED) ||
+				!only_pinned) {
+			spin_lock(&hpb->hpb_lock);
+			for (subregion = 0; subregion < cb->subregion_count;
+								subregion++)
+				ufshpb_add_subregion_to_req_list(hpb,
+						cb->subregion_tbl + subregion);
+			spin_unlock(&hpb->hpb_lock);
+			do_work_handler = true;
+		}
+		if (dirty) {
+			for (subregion = 0; subregion < cb->subregion_count;
+								subregion++)
+				cb->subregion_tbl[subregion].subregion_state =
+							HPBSUBREGION_DIRTY;
+		}
+	}
+work_out:
+	if (do_work_handler)
+		schedule_work(&hpb->ufshpb_work);
+}
+
+static void ufshpb_purge_active_block(struct ufshpb_lu *hpb)
+{
+	int region, subregion;
+	int state;
+	struct ufshpb_region *cb;
+	struct ufshpb_subregion *cp;
+
+	spin_lock(&hpb->hpb_lock);
+	for (region = 0 ; region < hpb->regions_per_lu ; region++) {
+		cb = hpb->region_tbl + region;
+
+		if (cb->region_state == HPBREGION_INACTIVE) {
+			hpb_dbg(hpb->hba, "region %d inactive\n", region);
+			continue;
+		}
+
+		if (cb->region_state == HPBREGION_PINNED) {
+			state = HPBSUBREGION_DIRTY;
+		} else if (cb->region_state == HPBREGION_ACTIVE) {
+			state = HPBSUBREGION_UNUSED;
+			ufshpb_cleanup_lru_info(&hpb->lru_info, cb);
+		} else {
+			hpb_dbg(hpb->hba, "Unsupported state of region\n");
+			continue;
+		}
+
+		hpb_dbg(hpb->hba, "region %d state %d dft %d\n",
+				region, state,
+				hpb->debug_free_table);
+		for (subregion = 0 ; subregion < cb->subregion_count;
+							subregion++) {
+			cp = cb->subregion_tbl + subregion;
+
+			ufshpb_purge_active_page(hpb, cp, state);
+		}
+		hpb_dbg(hpb->hba, "region %d state %d dft %d\n",
+				region, state, hpb->debug_free_table);
+	}
+	spin_unlock(&hpb->hpb_lock);
+}
+
+static void ufshpb_retrieve_rsp_info(struct ufshpb_lu *hpb)
+{
+	struct ufshpb_rsp_info *rsp_info;
+	unsigned long flags;
+
+	while (1) {
+		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+		rsp_info = list_first_entry_or_null(&hpb->lh_rsp_info,
+				struct ufshpb_rsp_info, list_rsp_info);
+		if (!rsp_info) {
+			spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+			break;
+		}
+		list_move_tail(&rsp_info->list_rsp_info,
+				&hpb->lh_rsp_info_free);
+		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+		hpb_dbg(hpb->hba, "add_list %p -> %p",
+				&hpb->lh_rsp_info_free, rsp_info);
+	}
+}
+
+static void ufshpb_probe(struct ufs_hba *hba)
+{
+	struct ufshpb_lu *hpb;
+	int lu;
+
+	for (lu = 0 ; lu < UFS_UPIU_MAX_GENERAL_LUN ; lu++) {
+		hpb = hba->ufshpb_lup[lu];
+
+		if (hpb && hpb->lu_hpb_enable) {
+			ufshpb_cancel_jobs(hpb);
+			ufshpb_retrieve_rsp_info(hpb);
+			ufshpb_purge_active_block(hpb);
+			dev_info(hba->dev, "UFSHPB lun %d reset\n", lu);
+			tasklet_init(&hpb->ufshpb_tasklet,
+				ufshpb_tasklet_fn, (unsigned long)hpb);
+		}
+	}
+	hba->ufshpb_state = HPB_PRESENT;
+}
+
+static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
+		struct ufshpb_region *cb)
+{
+	int subregion;
+
+	for (subregion = 0 ; subregion < cb->subregion_count ; subregion++) {
+		struct ufshpb_subregion *cp;
+
+		cp = cb->subregion_tbl + subregion;
+		cp->subregion_state = HPBSUBREGION_UNUSED;
+		ufshpb_put_map_ctx(hpb, cp->mctx);
+	}
+
+	kfree(cb->subregion_tbl);
+}
+
+static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
+{
+	int region;
+
+	if (!hpb)
+		return;
+
+	for (region = 0 ; region < hpb->regions_per_lu ; region++) {
+		struct ufshpb_region *cb;
+
+		cb = hpb->region_tbl + region;
+		if (cb->region_state == HPBREGION_PINNED ||
+				cb->region_state == HPBREGION_ACTIVE) {
+			cb->region_state = HPBREGION_INACTIVE;
+
+			ufshpb_destroy_subregion_tbl(hpb, cb);
+		}
+	}
+
+	ufshpb_table_mempool_remove(hpb);
+	kfree(hpb->region_tbl);
+}
+
+void ufshpb_release(struct ufs_hba *hba, int state)
+{
+	int lun;
+
+	hba->ufshpb_state = HPB_FAILED;
+
+	for (lun = 0 ; lun < UFS_UPIU_MAX_GENERAL_LUN ; lun++) {
+		struct ufshpb_lu *hpb = hba->ufshpb_lup[lun];
+
+		if (!hpb)
+			continue;
+
+		hba->ufshpb_lup[lun] = NULL;
+
+		if (!hpb->lu_hpb_enable)
+			continue;
+
+		hpb->lu_hpb_enable = false;
+
+		ufshpb_cancel_jobs(hpb);
+
+		ufshpb_destroy_region_tbl(hpb);
+
+		ufshpb_req_mempool_remove(hpb);
+
+		kobject_uevent(&hpb->kobj, KOBJ_REMOVE);
+		kobject_del(&hpb->kobj); // TODO --count & del?
+
+		kfree(hpb);
+	}
+
+	if (alloc_mctx != 0)
+		dev_warn(hba->dev, "warning: alloc_mctx %d", alloc_mctx);
+
+	hba->ufshpb_state = state;
+}
+
+void ufshpb_init_handler(struct work_struct *work)
+{
+	struct ufs_hba *hba;
+	struct delayed_work *dwork = to_delayed_work(work);
+#if defined(CONFIG_SCSI_SCAN_ASYNC)
+	unsigned long flags;
+#endif
+
+	hba = container_of(dwork, struct ufs_hba, ufshpb_init_work);
+
+	if (hba->ufshpb_state == HPB_NOT_SUPPORTED)
+		return;
+
+#if defined(CONFIG_SCSI_SCAN_ASYNC)
+	mutex_lock(&hba->host->scan_mutex);
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->host->async_scan == 1) {
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		mutex_unlock(&hba->host->scan_mutex);
+		schedule_delayed_work(&hba->ufshpb_init_work,
+						msecs_to_jiffies(100));
+		return;
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	mutex_unlock(&hba->host->scan_mutex);
+#endif
+	if (hba->ufshpb_state == HPB_NEED_INIT) {
+		int err = ufshpb_init(hba);
+
+		if (hba->ufshpb_state == HPB_NOT_SUPPORTED)
+			pr_info("UFSHPB: run without HPB - err=%d\n", err);
+	} else if (hba->ufshpb_state == HPB_RESET) {
+		ufshpb_probe(hba);
+	}
+}
+
+void ufshcd_init_hpb(struct ufs_hba *hba)
+{
+	int lun;
+
+	hba->ufshpb_feat = 0;
+	hba->ufshpb_state = HPB_NEED_INIT;
+	for (lun = 0 ; lun < UFS_UPIU_MAX_GENERAL_LUN ; lun++) {
+		hba->ufshpb_lup[lun] = NULL;
+		hba->sdev_ufs_lu[lun] = NULL;
+	}
+
+	INIT_DELAYED_WORK(&hba->ufshpb_init_work, ufshpb_init_handler);
+}
+
+static void ufshpb_error_handler(struct work_struct *work)
+{
+	struct ufs_hba *hba;
+
+	hba = container_of(work, struct ufs_hba, ufshpb_eh_work);
+
+	dev_warn(hba->dev, "UFSHPB driver runs without UFSHPB\n");
+	dev_warn(hba->dev, "UFSHPB will be removed from the kernel\n");
+
+	ufshpb_release(hba, HPB_FAILED);
+}
+
+static void ufshpb_stat_init(struct ufshpb_lu *hpb)
+{
+	atomic64_set(&hpb->hit, 0);
+	atomic64_set(&hpb->miss, 0);
+	atomic64_set(&hpb->region_miss, 0);
+	atomic64_set(&hpb->subregion_miss, 0);
+	atomic64_set(&hpb->entry_dirty_miss, 0);
+	atomic64_set(&hpb->map_req_cnt, 0);
+	atomic64_set(&hpb->region_evict, 0);
+	atomic64_set(&hpb->region_add, 0);
+	atomic64_set(&hpb->rb_fail, 0);
+}
+
+static ssize_t ufshpb_sysfs_info_lba_store(struct ufshpb_lu *hpb,
+		const char *buf, size_t count)
+{
+	unsigned long long ppn;
+	unsigned long value;
+	unsigned int lpn;
+	int region, subregion, subregion_offset;
+	struct ufshpb_region *cb;
+	struct ufshpb_subregion *cp;
+	int dirty;
+
+	if (kstrtoul(buf, 0, &value)) {
+		dev_err(HPB_DEV(hpb), "kstrtoul error\n");
+		return -EINVAL;
+	}
+
+	if (value > hpb->lu_num_blocks * SECTORS_PER_BLOCK) {
+		dev_err(HPB_DEV(hpb),
+			"value %lu > lu_num_blocks %d error\n",
+			value, hpb->lu_num_blocks);
+		return -EINVAL;
+	}
+	lpn = value / SECTORS_PER_BLOCK;
+
+	ufshpb_get_pos_from_lpn(hpb, lpn, &region, &subregion,
+						&subregion_offset);
+
+	cb = hpb->region_tbl + region;
+	cp = cb->subregion_tbl + subregion;
+
+	if (cb->region_state != HPBREGION_INACTIVE) {
+		ppn = ufshpb_get_ppn(cp->mctx, subregion_offset);
+		spin_lock_bh(&hpb->hpb_lock);
+		dirty = ufshpb_ppn_dirty_check(hpb, cp, subregion_offset);
+		spin_unlock_bh(&hpb->hpb_lock);
+	} else {
+		ppn = 0;
+		dirty = -1;
+	}
+
+	dev_info(HPB_DEV(hpb),
+		"sector %lu region %d state %d subregion %d state %d",
+			value, region, cb->region_state, subregion,
+			cp->subregion_state);
+	dev_info(HPB_DEV(hpb),
+		"sector %lu lpn %u ppn %llx dirty %d",
+			value, lpn, ppn, dirty);
+	return count;
+}
+
+static ssize_t ufshpb_sysfs_map_req_show(struct ufshpb_lu *hpb, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%lld %lld\n",
+			(long long)atomic64_read(&hpb->rb_noti_cnt),
+			(long long)atomic64_read(&hpb->map_req_cnt));
+}
+
+static ssize_t ufshpb_sysfs_count_reset_store(struct ufshpb_lu *hpb,
+		const char *buf, size_t count)
+{
+	unsigned long debug;
+
+	if (kstrtoul(buf, 0, &debug))
+		return -EINVAL;
+
+	ufshpb_stat_init(hpb);
+
+	return count;
+}
+
+static ssize_t ufshpb_sysfs_add_evict_show(struct ufshpb_lu *hpb, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%lld %lld\n",
+			(long long)atomic64_read(&hpb->region_add),
+			(long long)atomic64_read(&hpb->region_evict));
+}
+
+static ssize_t ufshpb_sysfs_hit_show(struct ufshpb_lu *hpb, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%lld\n",
+				(long long)atomic64_read(&hpb->hit));
+}
+
+static ssize_t ufshpb_sysfs_miss_show(struct ufshpb_lu *hpb, char *buf)
+{
+	long long region_miss, subregion_miss, entry_dirty_miss, rb_fail;
+
+	region_miss = atomic64_read(&hpb->region_miss);
+	subregion_miss = atomic64_read(&hpb->subregion_miss);
+	entry_dirty_miss = atomic64_read(&hpb->entry_dirty_miss);
+	rb_fail = atomic64_read(&hpb->rb_fail);
+
+	return snprintf(buf, PAGE_SIZE,
+		"%lld %lld %lld %lld %lld\n",
+			region_miss + subregion_miss + entry_dirty_miss,
+			region_miss, subregion_miss, entry_dirty_miss, rb_fail);
+}
+
+static ssize_t ufshpb_sysfs_version_show(struct ufshpb_lu *hpb, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE,
+			"%.2x.%.2x %.2x.%.2x\n",
+			GET_BYTE_1(hpb->hpb_ver), GET_BYTE_0(hpb->hpb_ver),
+			GET_BYTE_1(UFSHPB_DD_VER), GET_BYTE_0(UFSHPB_DD_VER));
+}
+
+static ssize_t ufshpb_sysfs_active_list_show(struct ufshpb_lu *hpb, char *buf)
+{
+	int ret = 0, count = 0;
+	struct ufshpb_region *region;
+
+	list_for_each_entry(region, &hpb->lru_info.lru, list_region) {
+		ret = snprintf(buf + count, PAGE_SIZE - count,
+				"%d %d ",
+				region->region, region->hit_count);
+		count += ret;
+	}
+	ret = snprintf(buf + count, PAGE_SIZE - count, "\n");
+	count += ret;
+
+	return count;
+}
+
+static ssize_t ufshpb_sysfs_active_block_status_show(struct ufshpb_lu *hpb,
+							char *buf)
+{
+	int ret = 0, count = 0, region;
+
+	for (region = 0 ; region < hpb->regions_per_lu ; region++) {
+		ret = snprintf(buf + count, PAGE_SIZE - count,
+				"%d:%d ",
+				region, hpb->region_tbl[region].region_state);
+		count += ret;
+	}
+
+	ret = snprintf(buf + count, PAGE_SIZE - count, "\n");
+	count += ret;
+
+	return count;
+}
+
+static ssize_t ufshpb_sysfs_map_loading_store(struct ufshpb_lu *hpb,
+		const char *buf, size_t count)
+{
+	unsigned long value;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	if (value > 2)
+		return -EINVAL;
+
+	if (value == 1)
+		ufshpb_map_loading_trigger(hpb, false, false, false);
+	else if (value == 2)
+		ufshpb_map_loading_trigger(hpb, false, false, true);
+
+	return count;
+}
+
+static ssize_t ufshpb_sysfs_map_disable_show(struct ufshpb_lu *hpb, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", hpb->force_map_req_disable);
+}
+
+static ssize_t ufshpb_sysfs_map_disable_store(struct ufshpb_lu *hpb,
+		const char *buf, size_t count)
+{
+	unsigned long value;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	if (value)
+		value = 1;
+
+	hpb->force_map_req_disable = value;
+	return count;
+}
+
+static ssize_t ufshpb_sysfs_disable_show(struct ufshpb_lu *hpb, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", hpb->force_disable);
+}
+
+static ssize_t ufshpb_sysfs_disable_store(struct ufshpb_lu *hpb,
+		const char *buf, size_t count)
+{
+	unsigned long value;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	if (value)
+		value = 1;
+
+	hpb->force_disable = value;
+	return count;
+}
+
+static int global_region;
+
+static inline bool is_region_active(struct ufshpb_lu *hpb, int region)
+{
+	if (hpb->region_tbl[region].region_state == HPBREGION_ACTIVE ||
+			hpb->region_tbl[region].region_state ==
+						HPBREGION_PINNED)
+		return true;
+
+	return false;
+}
+
+static ssize_t ufshpb_sysfs_active_group_store(struct ufshpb_lu *hpb,
+		const char *buf, size_t count)
+{
+	unsigned long block;
+	int region;
+
+	if (kstrtoul(buf, 0, &block))
+		return -EINVAL;
+
+	region = block >> hpb->entries_per_region_shift;
+	if (region >= hpb->regions_per_lu) {
+		dev_err(HPB_DEV(hpb),
+			"error region %d max %d\n",
+			region, hpb->regions_per_lu);
+		region = hpb->regions_per_lu - 1;
+	}
+
+	global_region = region;
+
+	dev_info(HPB_DEV(hpb),
+		"block %lu region %d active %d",
+			block, region, is_region_active(hpb, region));
+
+	return count;
+}
+
+static ssize_t ufshpb_sysfs_active_group_show(struct ufshpb_lu *hpb, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE,
+			"%d\n",	is_region_active(hpb, global_region));
+}
+
+static struct ufshpb_sysfs_entry ufshpb_sysfs_entries[] = {
+	__ATTR(is_active_group, 0644,
+			ufshpb_sysfs_active_group_show,
+			ufshpb_sysfs_active_group_store),
+	__ATTR(read_16_disable, 0644,
+			ufshpb_sysfs_disable_show, ufshpb_sysfs_disable_store),
+	__ATTR(map_cmd_disable, 0644,
+			ufshpb_sysfs_map_disable_show,
+			ufshpb_sysfs_map_disable_store),
+	__ATTR(map_loading, 0200, NULL, ufshpb_sysfs_map_loading_store),
+	__ATTR(active_block_status, 0444,
+			ufshpb_sysfs_active_block_status_show, NULL),
+	__ATTR(HPBVersion, 0444, ufshpb_sysfs_version_show, NULL),
+	__ATTR(hit_count, 0444, ufshpb_sysfs_hit_show, NULL),
+	__ATTR(miss_count, 0444, ufshpb_sysfs_miss_show, NULL),
+	__ATTR(active_list, 0444, ufshpb_sysfs_active_list_show, NULL),
+	__ATTR(add_evict_count, 0444, ufshpb_sysfs_add_evict_show, NULL),
+	__ATTR(count_reset, 0200, NULL, ufshpb_sysfs_count_reset_store),
+	__ATTR(map_req_count, 0444, ufshpb_sysfs_map_req_show, NULL),
+	__ATTR(get_info_from_lba, 0200, NULL, ufshpb_sysfs_info_lba_store),
+	__ATTR_NULL
+};
+
+static ssize_t ufshpb_attr_show(struct kobject *kobj,
+		struct attribute *attr, char *page)
+{
+	struct ufshpb_sysfs_entry *entry;
+	struct ufshpb_lu *hpb;
+	ssize_t error;
+
+	entry = container_of(attr,
+			struct ufshpb_sysfs_entry, attr);
+	hpb = container_of(kobj, struct ufshpb_lu, kobj);
+
+	if (!entry->show)
+		return -EIO;
+
+	mutex_lock(&hpb->sysfs_lock);
+	error = entry->show(hpb, page);
+	mutex_unlock(&hpb->sysfs_lock);
+	return error;
+}
+
+static ssize_t ufshpb_attr_store(struct kobject *kobj,
+		struct attribute *attr,
+		const char *page, size_t length)
+{
+	struct ufshpb_sysfs_entry *entry;
+	struct ufshpb_lu *hpb;
+	ssize_t error;
+
+	entry = container_of(attr, struct ufshpb_sysfs_entry, attr);
+	hpb = container_of(kobj, struct ufshpb_lu, kobj);
+
+	if (!entry->store)
+		return -EIO;
+
+	mutex_lock(&hpb->sysfs_lock);
+	error = entry->store(hpb, page, length);
+	mutex_unlock(&hpb->sysfs_lock);
+	return error;
+}
+
+static const struct sysfs_ops ufshpb_sysfs_ops = {
+	.show = ufshpb_attr_show,
+	.store = ufshpb_attr_store,
+};
+
+static struct kobj_type ufshpb_ktype = {
+	.sysfs_ops = &ufshpb_sysfs_ops,
+	.release = NULL,
+};
+
+static int ufshpb_create_sysfs(struct ufs_hba *hba,
+		struct ufshpb_lu *hpb)
+{
+	struct device *dev = hba->dev;
+	struct ufshpb_sysfs_entry *entry;
+	int err;
+
+	hpb->sysfs_entries = ufshpb_sysfs_entries;
+
+	ufshpb_stat_init(hpb);
+
+	kobject_init(&hpb->kobj, &ufshpb_ktype);
+	mutex_init(&hpb->sysfs_lock);
+
+	err = kobject_add(&hpb->kobj, kobject_get(&dev->kobj),
+			"ufshpb_lu%d", hpb->lun);
+	if (!err) {
+		for (entry = hpb->sysfs_entries;
+				entry->attr.name != NULL ; entry++) {
+			if (sysfs_create_file(&hpb->kobj, &entry->attr))
+				break;
+		}
+		kobject_uevent(&hpb->kobj, KOBJ_ADD);
+	}
+
+	return err;
+}
diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h
new file mode 100644
index 0000000..7aad09b
--- /dev/null
+++ b/drivers/scsi/ufs/ufshpb.h
@@ -0,0 +1,414 @@
+/*
+ * Universal Flash Storage Host Performance Booster
+ *
+ * Copyright (C) 2017-2018 Samsung Electronics Co., Ltd.
+ * Copyright (C) 2018, Google, Inc.
+ *
+ * Authors:
+ *	Yongmyung Lee <ymhungry.lee@samsung.com>
+ *	Jinyoung Choi <j-young.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ *
+ * The Linux Foundation chooses to take subject only to the GPLv2
+ * license terms, and distributes only under these terms.
+ */
+
+#ifndef _UFSHPB_H_
+#define _UFSHPB_H_
+
+#include <linux/spinlock.h>
+#include <linux/circ_buf.h>
+#include <linux/workqueue.h>
+
+/* Version info*/
+#define UFSHPB_VER				0x0103
+#define UFSHPB_DD_VER				0x0135
+
+/* Constant value*/
+#define SECTOR					512
+#define BLOCK					4096
+#define SECTORS_PER_BLOCK			(BLOCK / SECTOR)
+#define BITS_PER_DWORD				32
+#define MAX_MAP_REQ				16
+#define MAX_ACTIVE_NUM				2
+#define MAX_INACTIVE_NUM			2
+
+#define HPB_ENTRY_SIZE				0x08
+#define OS_PAGE_SIZE				4096
+#define HPB_ENTREIS_PER_OS_PAGE			(OS_PAGE_SIZE / HPB_ENTRY_SIZE)
+#define IOCTL_DEV_CTX_MAX_SIZE			OS_PAGE_SIZE
+#define OS_PAGE_SHIFT				12
+
+/* Description */
+#define UFS_FEATURE_SUPPORT_HPB_BIT			0x80
+#define UFSHPB_QUERY_DESC_DEVICE_MAX_SIZE		0x48
+#define UFSHPB_QUERY_DESC_CONFIGURAION_MAX_SIZE		0xD0
+#define UFSHPB_QUERY_DESC_UNIT_MAX_SIZE			0x2C
+#define UFSHPB_QUERY_DESC_GEOMETRY_MAX_SIZE		0x50
+
+/* Configuration for HPB */
+#define UFSHPB_CONF_LU_ENABLE			0x00
+#define UFSHPB_CONF_ACTIVE_REGIONS		0x10
+#define UFSHPB_CONF_PINNED_START		0x12
+#define UFSHPB_CONF_PINNED_NUM			0x14
+
+/* Parameter Macros */
+#define HPB_DEV(h)	((h)->hba->dev)
+#define MAX_BVEC_SIZE	128
+
+/* Use for HPB activate */
+#define UFSHPB_CONFIG_LEN	0xd0
+
+enum ufshpb_lu_set {
+	LU_DISABLE	= 0x00,
+	LU_ENABLE	= 0x01,
+	LU_HPB_ENABLE	= 0x02,
+	LU_SET_MAX,
+};
+
+struct ufshpb_config_desc {
+	unsigned char conf_dev_desc[16];
+	unsigned char unit[UFS_UPIU_MAX_GENERAL_LUN][24];
+};
+
+/* Response UPIU types */
+#define HPB_RSP_NONE					0x00
+#define HPB_RSP_REQ_REGION_UPDATE			0x01
+#define PER_ACTIVE_INFO_BYTES				4
+#define PER_INACTIVE_INFO_BYTES				2
+
+/* Vender defined OPCODE */
+#define UFSHPB_READ_BUFFER				0xF9
+
+#define DEV_DATA_SEG_LEN				0x14
+#define DEV_SENSE_SEG_LEN				0x12
+#define DEV_DES_TYPE					0x80
+#define DEV_ADDITIONAL_LEN				0x11
+
+/* BYTE SHIFT */
+#define ZERO_BYTE_SHIFT					0
+#define ONE_BYTE_SHIFT					8
+#define TWO_BYTE_SHIFT					16
+#define THREE_BYTE_SHIFT				24
+#define FOUR_BYTE_SHIFT					32
+#define FIVE_BYTE_SHIFT					40
+#define SIX_BYTE_SHIFT					48
+#define SEVEN_BYTE_SHIFT				56
+
+#define SHIFT_BYTE_0(num)		((num) << ZERO_BYTE_SHIFT)
+#define SHIFT_BYTE_1(num)		((num) << ONE_BYTE_SHIFT)
+#define SHIFT_BYTE_2(num)		((num) << TWO_BYTE_SHIFT)
+#define SHIFT_BYTE_3(num)		((num) << THREE_BYTE_SHIFT)
+#define SHIFT_BYTE_4(num)		((num) << FOUR_BYTE_SHIFT)
+#define SHIFT_BYTE_5(num)		((num) << FIVE_BYTE_SHIFT)
+#define SHIFT_BYTE_6(num)		((num) << SIX_BYTE_SHIFT)
+#define SHIFT_BYTE_7(num)		((num) << SEVEN_BYTE_SHIFT)
+
+#define GET_BYTE_0(num)			(((num) >> ZERO_BYTE_SHIFT) & 0xff)
+#define GET_BYTE_1(num)			(((num) >> ONE_BYTE_SHIFT) & 0xff)
+#define GET_BYTE_2(num)			(((num) >> TWO_BYTE_SHIFT) & 0xff)
+#define GET_BYTE_3(num)			(((num) >> THREE_BYTE_SHIFT) & 0xff)
+#define GET_BYTE_4(num)			(((num) >> FOUR_BYTE_SHIFT) & 0xff)
+#define GET_BYTE_5(num)			(((num) >> FIVE_BYTE_SHIFT) & 0xff)
+#define GET_BYTE_6(num)			(((num) >> SIX_BYTE_SHIFT) & 0xff)
+#define GET_BYTE_7(num)			(((num) >> SEVEN_BYTE_SHIFT) & 0xff)
+
+#define BE_BYTE(p, i)							\
+		((u16) SHIFT_BYTE_1(*((u8 *)(p) + (i))) |		\
+		(u16) SHIFT_BYTE_0(*((u8 *)(p) + (i + 1))))
+#define REGION_UNIT_SIZE(bit_offset)		(0x01 << (bit_offset))
+
+enum UFSHPB_STATE {
+	HPB_PRESENT = 1,
+	HPB_NOT_SUPPORTED = -1,
+	HPB_FAILED = -2,
+	HPB_NEED_INIT = 0,
+	HPB_RESET = -3,
+};
+
+enum HPBREGION_STATE {
+	HPBREGION_INACTIVE,
+	HPBREGION_ACTIVE,
+	HPBREGION_PINNED,
+};
+
+enum HPBSUBREGION_STATE {
+	HPBSUBREGION_UNUSED,
+	HPBSUBREGION_DIRTY,
+	HPBSUBREGION_CLEAN,
+	HPBSUBREGION_ISSUED,
+};
+
+struct ufshpb_func_desc {
+	/*** Device Descriptor ***/
+	/* 06h bNumberLU */
+	int lu_cnt;
+	/* 40h HPB Version */
+	u16 hpb_ver;
+
+	/*** Geometry Descriptor ***/
+	/* 48h bHPBRegionSize (UNIT: 512KB) */
+	u8 hpb_region_size;
+	/* 49h bHPBNumberLU */
+	u8 hpb_number_lu;
+	/* 4Ah bHPBSubRegionSize */
+	u8 hpb_subregion_size;
+	/* 4B:4Ch wDeviceMaxActiveHPBRegions */
+	u16 hpb_device_max_active_regions;
+};
+
+struct ufshpb_lu_desc {
+	/*** Unit Descriptor ****/
+	/* 03h bLUEnable */
+	int lu_enable;
+	/* 06h lu queue depth info*/
+	int lu_queue_depth;
+	/* 0Ah bLogicalBlockSize. default 0x0C = 4KB */
+	int lu_logblk_size;
+	/* 0Bh qLogicalBlockCount. same as the read_capacity ret val. */
+	u64 lu_logblk_cnt;
+
+	/* 23h:24h wLUMaxActiveHPBRegions */
+	u16 lu_max_active_hpb_regions;
+	/* 25h:26h wHPBPinnedRegionStartIdx */
+	u16 hpb_pinned_region_startidx;
+	/* 27h:28h wNumHPBPinnedRegions */
+	u16 lu_num_hpb_pinned_regions;
+
+
+	/* if 03h value is 02h, hpb_enable is set. */
+	bool lu_hpb_enable;
+
+	int lu_hpb_pinned_end_offset;
+};
+
+struct ufshpb_rsp_active_list {
+	u16 region[MAX_ACTIVE_NUM];
+	u16 subregion[MAX_ACTIVE_NUM];
+};
+
+struct ufshpb_rsp_inactive_list {
+	u16 region[MAX_INACTIVE_NUM];
+};
+
+struct ufshpb_rsp_update_entry {
+	unsigned int lpn;
+	unsigned long long ppn;
+};
+
+struct ufshpb_rsp_info {
+	int type;
+	int active_cnt;
+	int inactive_cnt;
+	struct ufshpb_rsp_active_list active_list;
+	struct ufshpb_rsp_inactive_list inactive_list;
+
+	__u64 RSP_start;
+	__u64 RSP_tasklet_enter;
+
+	struct list_head list_rsp_info;
+};
+
+struct ufshpb_rsp_field {
+	u8 sense_data_len[2];
+	u8 desc_type;
+	u8 additional_len;
+	u8 hpb_type;
+	u8 reserved;
+	u8 active_region_cnt;
+	u8 inactive_region_cnt;
+	u8 hpb_active_field[8];
+	u8 hpb_inactive_field[4];
+};
+
+struct ufshpb_map_ctx {
+	struct page **m_page;
+	unsigned int *ppn_dirty;
+
+	struct list_head list_table;
+};
+
+struct ufshpb_subregion {
+	struct ufshpb_map_ctx *mctx;
+	enum HPBSUBREGION_STATE subregion_state;
+	int region;
+	int subregion;
+
+	struct list_head list_subregion;
+};
+
+struct ufshpb_region {
+	struct ufshpb_subregion *subregion_tbl;
+	enum HPBREGION_STATE region_state;
+	int region;
+	int subregion_count;
+
+	/*below information is used by lru*/
+	struct list_head list_region;
+	int hit_count;
+};
+
+struct ufshpb_map_req {
+	struct ufshpb_lu *hpb;
+	struct ufshpb_map_ctx *mctx;
+	struct request req;
+	struct bio bio;
+	struct bio_vec bvec[MAX_BVEC_SIZE];
+	void (*end_io)(struct request *rq, int err);
+	void *end_io_data;
+	int region;
+	int subregion;
+	int lun;
+	int retry_cnt;
+
+	/* for debug : RSP Profiling */
+	__u64 RSP_start; // get the request from device
+	__u64 RSP_tasklet_enter1; // tesklet sched time
+	__u64 RSP_issue; // issue scsi cmd
+	__u64 RSP_endio;
+	__u64 RSP_tasklet_enter2;
+	__u64 RSP_end;	 // complete the request
+
+	char sense[SCSI_SENSE_BUFFERSIZE];
+
+	struct list_head list_map_req;
+};
+
+enum selection_type {
+	LRU = 1,
+	LFU = 2,
+};
+
+struct victim_select_info {
+	int selection_type;
+	struct list_head lru;
+	int max_lru_active_cnt;	// supported hpb #region - pinned #region
+	atomic64_t active_cnt;
+};
+
+struct ufshpb_lu {
+	struct ufshpb_region *region_tbl;
+	struct ufshpb_rsp_info *rsp_info;
+	struct ufshpb_map_req *map_req;
+
+	struct list_head lh_map_ctx;
+	struct list_head lh_subregion_req;
+	struct list_head lh_rsp_info;
+
+	struct list_head lh_rsp_info_free;
+	struct list_head lh_map_req_free;
+	struct list_head lh_map_req_retry;
+	int debug_free_table;
+
+	bool lu_hpb_enable;
+
+	struct work_struct ufshpb_work;
+	struct delayed_work ufshpb_retry_work;
+	struct tasklet_struct ufshpb_tasklet;
+	struct bio_vec bvec[MAX_BVEC_SIZE];
+
+	int subregions_per_lu;
+	int regions_per_lu;
+	int subregion_mem_size;
+
+	/* for selecting victim */
+	struct victim_select_info lru_info;
+
+	int hpb_ver;
+	int lu_max_active_regions;
+
+	int entries_per_subregion;
+	int entries_per_subregion_shift;
+	int entries_per_subregion_mask;
+
+	int entries_per_region_shift;
+	int entries_per_region_mask;
+	int subregions_per_region;
+
+	int dwords_per_subregion;
+	unsigned long long subregion_unit_size;
+
+	int mpage_bytes;
+	int mpages_per_subregion;
+
+	/* for debug constant variables */
+	int lu_num_blocks;
+
+	int lun;
+
+	struct ufs_hba *hba;
+
+	spinlock_t hpb_lock;
+	spinlock_t rsp_list_lock;
+
+	struct kobject kobj;
+	struct mutex sysfs_lock;
+	struct ufshpb_sysfs_entry *sysfs_entries;
+
+	/* for debug */
+	bool force_disable;
+	bool force_map_req_disable;
+	bool read_buf_debug;
+	atomic64_t hit;
+	atomic64_t miss;
+	atomic64_t region_miss;
+	atomic64_t subregion_miss;
+	atomic64_t entry_dirty_miss;
+	atomic64_t rb_noti_cnt;
+	atomic64_t map_req_cnt;
+	atomic64_t region_add;
+	atomic64_t region_evict;
+	atomic64_t rb_fail;
+};
+
+struct ufshpb_sysfs_entry {
+	struct attribute    attr;
+	ssize_t (*show)(struct ufshpb_lu *hpb, char *buf);
+	ssize_t (*store)(struct ufshpb_lu *hpb, const char *, size_t);
+};
+
+static inline void *kvzalloc(size_t size, gfp_t flags)
+{
+	void *ret;
+
+	ret = kzalloc(size, flags | __GFP_NOWARN);
+	if (!ret)
+		ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
+	return ret;
+}
+
+struct ufshcd_lrb;
+
+void ufshcd_init_hpb(struct ufs_hba *hba);
+void ufshpb_init_handler(struct work_struct *work);
+void ufshpb_prep_fn(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
+void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
+void ufshpb_release(struct ufs_hba *hba, int state);
+int ufshpb_issue_req_dev_ctx(struct ufshpb_lu *hpb, unsigned char *buf,
+				int buf_length);
+int ufshpb_control_validation(struct ufs_hba *hba,
+				struct ufshpb_config_desc *config);
+#endif /* End of Header */
diff --git a/include/uapi/scsi/ufs/ufs.h b/include/uapi/scsi/ufs/ufs.h
index 8a76271..6d8ef80 100644
--- a/include/uapi/scsi/ufs/ufs.h
+++ b/include/uapi/scsi/ufs/ufs.h
@@ -73,4 +73,13 @@
 	UPIU_QUERY_OPCODE_TOGGLE_FLAG	= 0x8,
 	UPIU_QUERY_OPCODE_MAX,
 };
+
+/*
+ * high 16 bits for HPB. E.g.,
+ *  opcode = (UFS_IOCTL_QUERY_OPCODE << 16) | UPIU_QUERY_OPCODE_READ_DESC
+ */
+#define UPIU_QUERY_OPCODE_HIGH_HPB	0x5500
+#define UPIU_QUERY_OPCODE_HIGH(opcode)	((opcode) >> 16)
+#define UPIU_QUERY_OPCODE_LOW(opcode)	((opcode) & 0xffff)
+
 #endif /* UAPI_UFS_H_ */