Keystore Applet Chaabi Integration
This project contains the sep54 driver for Chaabi

Change-Id: I056874c8ff52fe13fe1e0da7ba4a805d62be63d0
Tracked-On: https://jira01.devtools.intel.com/browse/BP-21
Signed-off-by: Khushboo Bindlish <khushboo.bindlish@intel.com>
diff --git a/drivers/staging/sep54/Kconfig b/drivers/staging/sep54/Kconfig
new file mode 100644
index 0000000..d502943
--- /dev/null
+++ b/drivers/staging/sep54/Kconfig
@@ -0,0 +1,13 @@
+config DX_SEP54
+	tristate "Discretix SEP driver (CC54)"
+	depends on PCI && MMC
+	select CRYPTO_BLKCIPHER
+	help
+	  Discretix SEP driver for CC54; used for the security processor subsystem
+	  on board the Intel Mobile Internet Device and adds SEP availability
+	  to the kernel crypto infrastructure
+
+	  The driver's name is sep_driver.
+
+	  If unsure, select N.
+
diff --git a/drivers/staging/sep54/Makefile b/drivers/staging/sep54/Makefile
new file mode 100644
index 0000000..bd556d0
--- /dev/null
+++ b/drivers/staging/sep54/Makefile
@@ -0,0 +1,14 @@
+obj-$(CONFIG_DX_SEP54) += sep54.o
+sep54-objs := dx_driver.o sep_init.o crypto_ctx_mgr.o sep_sysfs.o \
+                   desc_mgr.o lli_mgr.o crypto_api.o sep_request_mgr.o \
+                   sep_power.o sepapp.o crypto_hwk.o
+
+ifeq ($(CONFIG_COMPAT),y)
+	sep54-objs += sep_compat_ioctl.o
+endif
+
+ccflags-y += -DSEP_SUPPORT_SHA=256 -DCONFIG_NOT_COHERENT_CACHE -DSEP_HWK_UNIT_TEST -DDEBUG
+
+ifeq ($(CONFIG_PM_RUNTIME),y)
+	ccflags-y += -DSEP_RUNTIME_PM
+endif
diff --git a/drivers/staging/sep54/crypto_api.c b/drivers/staging/sep54/crypto_api.c
new file mode 100644
index 0000000..6b23fa8
--- /dev/null
+++ b/drivers/staging/sep54/crypto_api.c
@@ -0,0 +1,1526 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/* \file cryto_api.c - Implementation of wrappers for Linux Crypto API */
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_CRYPTO_API
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/workqueue.h>
+#include "dx_driver_abi.h"
+#include "dx_driver.h"
+#include "sep_power.h"
+#include "crypto_ctx_mgr.h"
+#include "crypto_api.h"
+#include "sepapp.h"
+#include "sep_applets.h"
+#include "dx_sepapp_kapi.h"
+
+#include <linux/sched.h>
+
+#define CRYPTO_API_QID 0
+/* Priority assigned to our algorithms implementation */
+#define DX_CRYPTO_PRIO (300 + (100 * CRYPTO_API_QID))
+
+#define SYMCIPHER_ALG_NAME_LEN 8	/* Format: "mod(alg)" */
+#define SYMCIPHER_ALG_NAME_MODE_OFFSET 0
+#define SYMCIPHER_ALG_NAME_MODE_SIZE 3
+#define SYMCIPHER_ALG_NAME_ALG_OFFSET 4
+#define SYMCIPHER_ALG_NAME_ALG_SIZE 3
+
+#define CMD_DO_CRYPTO 7
+#define DISK_ENC_APP_UUID "INTEL DISK ENC01"
+
+/**
+ * struct async_digest_req_ctx - Context for async. digest algorithms requests
+ * @host_ctx:	Host crypto context allocated per request
+ * @result:	Where to copy the digest result.
+ *		When NULL the result is retained in the sep_ctx until "final"
+ *		and this field holds the pointer to its location.
+ * @async_req:	The generic async request context for completion notification
+ */
+struct async_digest_req_ctx {
+	union {
+		struct host_crypto_ctx_hash hash_ctx;
+		struct host_crypto_ctx_mac mac_ctx;
+	} host_ctx;
+	u8 *result;
+	struct async_req_ctx async_req;
+};
+
+/* Client context for the Crypto API operations */
+/* To be initialized by sep_setup */
+static struct sep_client_ctx crypto_api_ctx;
+static struct dma_pool *sep_ctx_pool;
+
+/* Functions from the main driver code that are shared with this module */
+int prepare_data_for_sep(struct sep_op_ctx *op_ctx,
+			 u8 __user *data_in,
+			 struct scatterlist *sgl_in,
+			 u8 __user *data_out,
+			 struct scatterlist *sgl_out,
+			 u32 data_in_size,
+			 enum crypto_data_intent data_intent);
+
+/* Local (static) functions */
+static void release_symcipher_ctx(struct sep_op_ctx *op_ctx,
+				  u8 *iv_crypto);
+
+/****************************************/
+/* Block cipher algorithms declarations */
+/****************************************/
+static int symcipher_set_key(struct crypto_ablkcipher *tfm,
+			     const u8 *key, unsigned int keylen);
+static int symcipher_encrypt(struct ablkcipher_request *req);
+static int symcipher_decrypt(struct ablkcipher_request *req);
+static int symcipher_ctx_init(struct crypto_tfm *tfm);
+static void crypto_ctx_cleanup(struct crypto_tfm *tfm);
+
+/* Template for block ciphers */
+static struct crypto_alg blkcipher_algs_base = {
+	.cra_priority = DX_CRYPTO_PRIO,
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_ctxsize = sizeof(struct host_crypto_ctx_sym_cipher),
+	.cra_alignmask = 0,	/* Cannot use this due to bug in kernel */
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_module = THIS_MODULE,
+	.cra_u = {
+		  .ablkcipher = {
+				 .setkey = symcipher_set_key,
+				 .encrypt = symcipher_encrypt,
+				 .decrypt = symcipher_decrypt}
+		  },
+	.cra_init = symcipher_ctx_init,
+	.cra_exit = crypto_ctx_cleanup
+};
+
+/* Block cipher specific attributes */
+static struct crypto_alg dx_ablkcipher_algs[] = {
+	{			/* xxx(aes) */
+	 .cra_name = "xxx(aes)",
+	 .cra_driver_name = MODULE_NAME "-aes-xxx",
+	 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_AES_128_BIT_KEY_SIZE,
+				  .max_keysize = SEP_AES_256_BIT_KEY_SIZE,
+				  .ivsize = SEP_AES_IV_SIZE}
+		   }
+	 },
+#ifdef USE_SEP54_AES
+	{			/* ecb(aes) */
+	 .cra_name = "ecb(aes)",
+	 .cra_driver_name = MODULE_NAME "-aes-ecb",
+	 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_AES_128_BIT_KEY_SIZE,
+				  .max_keysize = SEP_AES_256_BIT_KEY_SIZE,
+				  .ivsize = SEP_AES_IV_SIZE}
+		   }
+	 },
+	{			/* cbc(aes) */
+	 .cra_name = "cbc(aes)",
+	 .cra_driver_name = MODULE_NAME "-aes-cbc",
+	 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_AES_128_BIT_KEY_SIZE,
+				  .max_keysize = SEP_AES_256_BIT_KEY_SIZE,
+				  .ivsize = SEP_AES_IV_SIZE}
+		   }
+	 },
+	{			/* ctr(aes) */
+	 .cra_name = "ctr(aes)",
+	 .cra_driver_name = MODULE_NAME "-aes-ctr",
+	 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_AES_128_BIT_KEY_SIZE,
+				  .max_keysize = SEP_AES_256_BIT_KEY_SIZE,
+				  .ivsize = SEP_AES_IV_SIZE}
+		   }
+	 },
+	{			/* xts(aes) */
+	 .cra_name = "xts(aes)",
+	 .cra_driver_name = MODULE_NAME "-aes-xts",
+	 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+	 .cra_u = {
+			/* AES-XTS uses two keys, so the key size is doubled */
+		   .ablkcipher = {
+				  .min_keysize = SEP_AES_128_BIT_KEY_SIZE * 2,
+				  .max_keysize = SEP_AES_256_BIT_KEY_SIZE * 2,
+				  .ivsize = SEP_AES_IV_SIZE}
+		   }
+	 },
+#endif /* USE_SEP54_AES */
+	{			/* ecb(des) */
+	 .cra_name = "ecb(des)",
+	 .cra_driver_name = MODULE_NAME "-des-ecb",
+	 .cra_blocksize = SEP_DES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_DES_ONE_KEY_SIZE,
+				  .max_keysize = SEP_DES_ONE_KEY_SIZE,
+				  .ivsize = SEP_DES_IV_SIZE}
+		   }
+	 },
+	{			/* cbc(des) */
+	 .cra_name = "cbc(des)",
+	 .cra_driver_name = MODULE_NAME "-des-cbc",
+	 .cra_blocksize = SEP_DES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_DES_ONE_KEY_SIZE,
+				  .max_keysize = SEP_DES_ONE_KEY_SIZE,
+				  .ivsize = SEP_DES_IV_SIZE}
+		   }
+	 },
+	{			/* ecb(des3_ede) */
+	 .cra_name = "ecb(des3_ede)",
+	 .cra_driver_name = MODULE_NAME "-des3-ecb",
+	 .cra_blocksize = SEP_DES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_DES_TRIPLE_KEY_SIZE,
+				  .max_keysize = SEP_DES_TRIPLE_KEY_SIZE,
+				  .ivsize = SEP_DES_IV_SIZE}
+		   }
+	 },
+	{			/* cbc(des3_ede) */
+	 .cra_name = "cbc(des3_ede)",
+	 .cra_driver_name = MODULE_NAME "-des3-cbc",
+	 .cra_blocksize = SEP_DES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_DES_TRIPLE_KEY_SIZE,
+				  .max_keysize = SEP_DES_TRIPLE_KEY_SIZE,
+				  .ivsize = SEP_DES_IV_SIZE}
+		   }
+	 }
+};				/* ablkcipher_algs[] */
+
+#define DX_ABLKCIPHER_NUM \
+	(sizeof(dx_ablkcipher_algs) / sizeof(struct crypto_alg))
+
+static const enum dxdi_sym_cipher_type dx_algs_cipher_types[] = {
+	DXDI_SYMCIPHER_AES_XXX,
+#ifdef USE_SEP54_AES
+	DXDI_SYMCIPHER_AES_ECB,
+	DXDI_SYMCIPHER_AES_CBC,
+	DXDI_SYMCIPHER_AES_CTR,
+	DXDI_SYMCIPHER_AES_XTS,
+#endif
+	DXDI_SYMCIPHER_DES_ECB,
+	DXDI_SYMCIPHER_DES_CBC,
+	DXDI_SYMCIPHER_DES_ECB,
+	DXDI_SYMCIPHER_DES_CBC,
+};
+
+/*********************************************/
+/* Digest (hash/MAC) algorithms declarations */
+/*********************************************/
+static int digest_tfm_init(struct crypto_tfm *tfm);
+static int digest_init(struct ahash_request *req);
+static int digest_update(struct ahash_request *req);
+static int digest_final(struct ahash_request *req);
+static int digest_finup(struct ahash_request *req);
+static int digest_integrated(struct ahash_request *req);
+static int mac_setkey(struct crypto_ahash *tfm,
+		      const u8 *key, unsigned int keylen);
+
+/* Save set key in tfm ctx */
+struct mac_key_data {
+	u32 key_size;	/* In octets */
+	u8 key[DXDI_MAC_KEY_SIZE_MAX];
+};
+
+/* Description of a digest (hash/MAC) algorithm */
+struct dx_digest_alg {
+	enum dxdi_hash_type hash_type;
+	enum dxdi_mac_type mac_type;
+	struct ahash_alg ahash;
+};
+
+/* Common attributes for all the digest (hash/MAC) algorithms */
+static struct ahash_alg digest_algs_base = {
+	.init = digest_init,
+	.update = digest_update,
+	.final = digest_final,
+	.finup = digest_finup,
+	.digest = digest_integrated,
+	.halg.base = {
+		      .cra_type = &crypto_ahash_type,
+		      .cra_priority = DX_CRYPTO_PRIO,
+		      .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+		      .cra_alignmask = 0,
+		      .cra_module = THIS_MODULE,
+		      .cra_init = digest_tfm_init}
+};
+
+/* Algorithm specific attributes */
+static struct dx_digest_alg dx_digest_algs[] = {
+#ifdef USE_SEP54_AHASH
+	{			/* sha1 */
+	 .hash_type = DXDI_HASH_SHA1,
+	 .mac_type = DXDI_MAC_NONE,
+	 .ahash = {
+		   .halg.base = {
+				 .cra_name = "sha1",
+				 .cra_driver_name = MODULE_NAME "-sha1",
+				 .cra_blocksize = SHA1_BLOCK_SIZE},
+		   .halg.digestsize = SHA1_DIGEST_SIZE,
+		   .halg.statesize = SHA1_BLOCK_SIZE}
+	 },
+	{			/* sha224 */
+	 .hash_type = DXDI_HASH_SHA224,
+	 .mac_type = DXDI_MAC_NONE,
+	 .ahash = {
+		   .halg.base = {
+				 .cra_name = "sha224",
+				 .cra_driver_name = MODULE_NAME "-sha224",
+				 .cra_blocksize = SHA224_BLOCK_SIZE},
+		   .halg.digestsize = SHA224_DIGEST_SIZE,
+		   .halg.statesize = SHA224_BLOCK_SIZE}
+	 },
+	{			/* sha256 */
+	 .hash_type = DXDI_HASH_SHA256,
+	 .mac_type = DXDI_MAC_NONE,
+	 .ahash = {
+		   .halg.base = {
+				 .cra_name = "sha256",
+				 .cra_driver_name = MODULE_NAME "-sha256",
+				 .cra_blocksize = SHA256_BLOCK_SIZE},
+		   .halg.digestsize = SHA256_DIGEST_SIZE,
+		   .halg.statesize = SHA256_BLOCK_SIZE}
+	 },
+	{			/* hmac(sha1) */
+	 .hash_type = DXDI_HASH_SHA1,
+	 .mac_type = DXDI_MAC_HMAC,
+	 .ahash = {
+		   .setkey = mac_setkey,
+		   .halg.base = {
+				 .cra_name = "hmac(sha1)",
+				 .cra_driver_name = MODULE_NAME "-hmac-sha1",
+				 .cra_blocksize = SHA1_BLOCK_SIZE,
+				 .cra_ctxsize = sizeof(struct mac_key_data)
+				 },
+		   .halg.digestsize = SHA1_DIGEST_SIZE,
+		   .halg.statesize = SHA1_BLOCK_SIZE}
+	 },
+	{			/* hmac(sha224) */
+	 .hash_type = DXDI_HASH_SHA224,
+	 .mac_type = DXDI_MAC_HMAC,
+	 .ahash = {
+		   .setkey = mac_setkey,
+		   .halg.base = {
+				 .cra_name = "hmac(sha224)",
+				 .cra_driver_name = MODULE_NAME "-hmac-sha224",
+				 .cra_blocksize = SHA224_BLOCK_SIZE,
+				 .cra_ctxsize = sizeof(struct mac_key_data)
+				 },
+		   .halg.digestsize = SHA224_DIGEST_SIZE,
+		   .halg.statesize = SHA224_BLOCK_SIZE}
+	 },
+	{			/* hmac(sha256) */
+	 .hash_type = DXDI_HASH_SHA256,
+	 .mac_type = DXDI_MAC_HMAC,
+	 .ahash = {
+		   .setkey = mac_setkey,
+		   .halg.base = {
+				 .cra_name = "hmac(sha256)",
+				 .cra_driver_name = MODULE_NAME "-hmac-sha256",
+				 .cra_blocksize = SHA256_BLOCK_SIZE,
+				 .cra_ctxsize = sizeof(struct mac_key_data)
+				 },
+		   .halg.digestsize = SHA256_DIGEST_SIZE,
+		   .halg.statesize = SHA256_BLOCK_SIZE}
+	 },
+#ifdef USE_SEP54_AES
+	{			/* xcbc(aes) */
+	 .hash_type = DXDI_HASH_NONE,
+	 .mac_type = DXDI_MAC_AES_XCBC_MAC,
+	 .ahash = {
+		   .setkey = mac_setkey,
+		   .halg.base = {
+				 .cra_name = "xcbc(aes)",
+				 .cra_driver_name = MODULE_NAME "-aes-xcbc",
+				 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+				 .cra_ctxsize = sizeof(struct mac_key_data)
+				 },
+		   .halg.digestsize = SEP_AES_BLOCK_SIZE,
+		   .halg.statesize = SEP_AES_BLOCK_SIZE}
+	 },
+	{			/* cmac(aes) */
+	 .hash_type = DXDI_HASH_NONE,
+	 .mac_type = DXDI_MAC_AES_CMAC,
+	 .ahash = {
+		   .setkey = mac_setkey,
+		   .halg.base = {
+				 .cra_name = "cmac(aes)",
+				 .cra_driver_name = MODULE_NAME "-aes-cmac",
+				 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+				 .cra_ctxsize = sizeof(struct mac_key_data)
+				 },
+		   .halg.digestsize = SEP_AES_BLOCK_SIZE,
+		   .halg.statesize = SEP_AES_BLOCK_SIZE}
+	 }
+#endif /* USE_SEP54_AES */
+#endif /* USE_SEP54_AHASH */
+};				/*dx_ahash_algs[] */
+
+#define DX_DIGEST_NUM \
+	(sizeof(dx_digest_algs) / sizeof(struct dx_digest_alg))
+
+static void crypto_ctx_cleanup(struct crypto_tfm *tfm)
+{
+	struct host_crypto_ctx *host_ctx_p = crypto_tfm_ctx(tfm);
+	struct device *mydev = crypto_api_ctx.drv_data->sep_data->dev;
+	struct client_crypto_ctx_info _ctx_info;
+	struct client_crypto_ctx_info *ctx_info = &_ctx_info;
+	int rc;
+
+	pr_debug("Cleaning context @%p for %s\n",
+		      host_ctx_p, crypto_tfm_alg_name(tfm));
+
+	rc = ctxmgr_map_kernel_ctx(ctx_info, mydev, host_ctx_p->alg_class,
+				   (struct host_crypto_ctx *)host_ctx_p, NULL,
+				   0);
+	if (rc != 0) {
+		pr_err("Failed mapping context @%p (rc=%d)\n",
+			    host_ctx_p, rc);
+		return;
+	}
+
+	/* New TEE method */
+	if (!memcmp(crypto_tfm_alg_name(tfm), "xxx(aes)", 8)) {
+		if (dx_sepapp_session_close(host_ctx_p->sctx,
+						host_ctx_p->sess_id))
+			BUG(); /* TODO */
+		dx_sepapp_context_free(host_ctx_p->sctx);
+	}
+
+	ctxmgr_set_ctx_state(ctx_info, CTX_STATE_UNINITIALIZED);
+
+	ctxmgr_unmap_kernel_ctx(ctx_info);
+}
+
+/**
+ * dispatch_crypto_op() - Dispatch (async.) CRYPTO_OP descriptor operation
+ * @op_ctx:		Operation context
+ * @may_backlog:	If software queue is full, may be put in backlog queue
+ * @do_init:		Initialize given crypto context
+ * @proc_mode:		Processing mode code
+ * @keep_in_cache:	Retain crypto context in cache after dispatching req.
+ *
+ * Returns -EINPROGRESS on success to dispatch into the SW desc. q.
+ * Returns -EBUSY if may_backlog==true and the descriptor was enqueued in the
+ * the backlog queue.
+ * Returns -ENOMEM if queue is full and cannot enqueue in the backlog queue
+ */
+static int dispatch_crypto_op(struct sep_op_ctx *op_ctx, bool may_backlog,
+			      bool do_init, enum sep_proc_mode proc_mode,
+			      bool keep_in_cache)
+{
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_crypto_ctx_info *ctx_info = &op_ctx->ctx_info;
+	int sep_ctx_load_req;
+	struct crypto_ctx_uid ctx_id = ctxmgr_get_ctx_id(ctx_info);
+	int rc;
+	struct sep_sw_desc desc;
+
+	/* Start critical section -
+	   cache allocation must be coupled to descriptor enqueue */
+	mutex_lock(&drvdata->desc_queue_sequencer);
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+	ctxmgr_set_sep_cache_idx(ctx_info,
+				 ctxmgr_sep_cache_alloc(drvdata->sep_cache,
+							ctx_id,
+							&sep_ctx_load_req));
+	desc_q_pack_crypto_op_desc(&desc, op_ctx, sep_ctx_load_req, do_init,
+				   proc_mode);
+	/* op_state must be updated before dispatching descriptor */
+	rc = desc_q_enqueue(drvdata->desc_queue, &desc, may_backlog);
+	if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc)))
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	if ((!keep_in_cache) || unlikely(IS_DESCQ_ENQUEUE_ERR(rc)))
+		ctxmgr_sep_cache_invalidate(drvdata->sep_cache, ctx_id,
+					    CRYPTO_CTX_ID_SINGLE_MASK);
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+	mutex_unlock(&drvdata->desc_queue_sequencer);
+	return rc;
+}
+
+/**
+ * process_digest_fin() - Process finilization event for hash operation
+ *
+ * @digest_req:	The async. digest request context
+ *
+ */
+static void process_digest_fin(struct async_digest_req_ctx *digest_req)
+{
+	u8 digest_size;
+	u8 *digest_ptr;
+	struct sep_op_ctx *op_ctx = &digest_req->async_req.op_ctx;
+#ifdef DEBUG
+	struct crypto_ahash *ahash_tfm;
+#endif
+
+	if (op_ctx->op_type == SEP_OP_CRYPTO_FINI) {
+		/* Handle digest copy back to "result" */
+		digest_size =
+		    ctxmgr_get_digest_or_mac_ptr(&op_ctx->ctx_info,
+						 &digest_ptr);
+		if (unlikely(digest_ptr == NULL)) {
+			pr_err("Failed fetching digest/MAC\n");
+			return;
+		}
+		if (digest_req->result != NULL)
+			memcpy(digest_req->result, digest_ptr, digest_size);
+		else	/* Save pointer to result (to be copied on "final") */
+			digest_req->result = digest_ptr;
+#ifdef DEBUG
+		dump_byte_array("digest", digest_req->result, digest_size);
+		ahash_tfm =
+		    crypto_ahash_reqtfm(container_of
+					(digest_req->async_req.initiating_req,
+					 struct ahash_request, base));
+		if (digest_size != crypto_ahash_digestsize(ahash_tfm))
+			pr_err("Read digest of %u B. Expected %u B.\n",
+				    digest_size,
+				    crypto_ahash_digestsize(ahash_tfm));
+#endif
+	}
+	crypto_op_completion_cleanup(op_ctx);
+	ctxmgr_unmap_kernel_ctx(&op_ctx->ctx_info);
+}
+
+static void dx_crypto_api_handle_op_completion(struct work_struct *work)
+{
+	struct async_req_ctx *areq_ctx =
+	    container_of(work, struct async_req_ctx, comp_work);
+	struct sep_op_ctx *op_ctx = &areq_ctx->op_ctx;
+	struct ablkcipher_request *ablkcipher_req;
+	struct crypto_async_request *initiating_req = areq_ctx->initiating_req;
+	int err = 0;
+	u8 *req_info_p;/* For state persistency in caller's context (IV) */
+
+	pr_debug("req=%p op_ctx=%p\n", initiating_req, op_ctx);
+	if (op_ctx == NULL) {
+		pr_err("Invalid work context (%p)\n", work);
+		return;
+	}
+
+	if (op_ctx->op_state == USER_OP_COMPLETED) {
+
+		if (unlikely(op_ctx->error_info != 0)) {
+			pr_err("SeP crypto-op failed (sep_rc=0x%08X)\n",
+				    op_ctx->error_info);
+		}
+		switch (crypto_tfm_alg_type(initiating_req->tfm)) {
+		case CRYPTO_ALG_TYPE_ABLKCIPHER:
+			/* Resolve to "info" (IV, etc.) for given alg_type */
+			crypto_op_completion_cleanup(op_ctx);
+			ablkcipher_req = (struct ablkcipher_request *)
+			    container_of(initiating_req,
+					 struct ablkcipher_request, base);
+			req_info_p = ablkcipher_req->info;
+			release_symcipher_ctx(op_ctx, req_info_p);
+			break;
+		case CRYPTO_ALG_TYPE_AHASH:
+			process_digest_fin(container_of(areq_ctx,
+					struct async_digest_req_ctx,
+					async_req));
+			break;
+		default:
+			pr_err("Unsupported alg_type (%d)\n",
+				    crypto_tfm_alg_type(initiating_req->tfm));
+		}
+		/* Save ret_code info before cleaning op_ctx */
+		err = -(op_ctx->error_info);
+		if (unlikely(err == -EINPROGRESS)) {
+			/* SeP error code collides with EINPROGRESS */
+			pr_err("Invalid SeP error code 0x%08X\n",
+				    op_ctx->error_info);
+			err = -EINVAL;	/* fallback */
+		}
+		op_ctx_fini(op_ctx);
+	} else if (op_ctx->op_state == USER_OP_INPROC) {
+		/* Report with the callback the dispatch from backlog to
+		   the actual processing in the SW descriptors queue
+		   (Returned -EBUSY when the request was dispatched) */
+		err = -EINPROGRESS;
+	} else {
+		pr_err("Invalid state (%d) for op_ctx %p\n",
+			    op_ctx->op_state, op_ctx);
+		BUG();
+	}
+	if (likely(initiating_req->complete != NULL))
+		initiating_req->complete(initiating_req, err);
+	else
+		pr_err("Async. operation has no completion callback.\n");
+}
+
+/****************************************************/
+/* Block cipher algorithms                          */
+/****************************************************/
+
+/**
+ * get_symcipher_tfm_cipher_type() - Get cipher type of given symcipher
+ *					transform
+ * @tfm:
+ *
+ * Returns enum dxdi_sym_cipher_type (DXDI_SYMCIPHER_NONE for invalid)
+ */
+static enum dxdi_sym_cipher_type get_symcipher_tfm_cipher_type(struct crypto_tfm
+							       *tfm)
+{
+	const int alg_index = tfm->__crt_alg - dx_ablkcipher_algs;
+
+	if ((alg_index < 0) || (alg_index >= DX_ABLKCIPHER_NUM)) {
+		pr_err("Unknown alg: %s\n", crypto_tfm_alg_name(tfm));
+		return DXDI_SYMCIPHER_NONE;
+	}
+
+	return dx_algs_cipher_types[alg_index];
+}
+
+static int symcipher_ctx_init(struct crypto_tfm *tfm)
+{
+	struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
+	struct host_crypto_ctx_sym_cipher *host_ctx_p = crypto_tfm_ctx(tfm);
+	struct device *mydev = crypto_api_ctx.drv_data->sep_data->dev;
+	struct client_crypto_ctx_info _ctx_info;
+	struct client_crypto_ctx_info *ctx_info = &_ctx_info;
+	enum dxdi_sym_cipher_type cipher_type =
+	    get_symcipher_tfm_cipher_type(tfm);
+	int rc;
+
+	pr_debug("Initializing context @%p for %s (%d)\n",
+		      host_ctx_p, crypto_tfm_alg_name(tfm), cipher_type);
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+	ablktfm->reqsize += sizeof(struct async_req_ctx);
+	rc = ctxmgr_map_kernel_ctx(ctx_info, mydev, ALG_CLASS_SYM_CIPHER,
+				   (struct host_crypto_ctx *)host_ctx_p, NULL,
+				   0);
+	if (rc != 0) {
+		pr_err("Failed mapping context (rc=%d)\n", rc);
+#ifdef SEP_RUNTIME_PM
+		dx_sep_pm_runtime_put();
+#endif
+		return rc;
+	}
+	/* Allocate a new Crypto context ID */
+	ctxmgr_set_ctx_id(ctx_info, alloc_crypto_ctx_id(&crypto_api_ctx));
+	rc = ctxmgr_init_symcipher_ctx_no_props(ctx_info, cipher_type);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed initializing context\n");
+		ctxmgr_set_ctx_state(ctx_info, CTX_STATE_UNINITIALIZED);
+	} else {
+		ctxmgr_set_ctx_state(ctx_info, CTX_STATE_PARTIAL_INIT);
+	}
+	ctxmgr_unmap_kernel_ctx(ctx_info);
+
+	/* New TEE method */
+	if (!memcmp(crypto_tfm_alg_name(tfm), "xxx(aes)", 8)) {
+		u8 uuid[16] = DISK_ENC_APP_UUID;
+		enum dxdi_sep_module ret_origin;
+
+		host_ctx_p->sctx = dx_sepapp_context_alloc();
+		if (unlikely(!host_ctx_p->sctx)) {
+			rc = -ENOMEM;
+			goto init_end;
+		}
+
+		rc = dx_sepapp_session_open(host_ctx_p->sctx,
+				uuid, 0, NULL, NULL, &host_ctx_p->sess_id,
+				&ret_origin);
+		if (unlikely(rc != 0))
+			dx_sepapp_context_free(host_ctx_p->sctx);
+	}
+
+init_end:
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+	return rc;
+}
+
+/**
+ * symcipher_set_key() - Set key for given symmetric cipher context
+ * @tfm:
+ * @key:
+ * @keylen:
+ *
+ * Set key for given symmetric cipher context
+ * Setting a key implies initialization of the context
+ * Returns int
+ */
+static int symcipher_set_key(struct crypto_ablkcipher *tfm,
+			     const u8 *key, unsigned int keylen)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+	struct device *mydev = crypto_api_ctx.drv_data->sep_data->dev;
+	struct client_crypto_ctx_info _ctx_info;
+	struct client_crypto_ctx_info *ctx_info = &_ctx_info;
+	enum dxdi_sym_cipher_type cipher_type =
+	    get_symcipher_tfm_cipher_type(crypto_ablkcipher_tfm(tfm));
+	u32 tfm_flags = crypto_ablkcipher_get_flags(tfm);
+	int rc;
+
+	if (cipher_type == DXDI_SYMCIPHER_NONE)
+		return -EINVAL;
+
+	if (keylen > DXDI_SYM_KEY_SIZE_MAX) {
+		pr_err("keylen=%u > %u\n", keylen, DXDI_SYM_KEY_SIZE_MAX);
+		tfm_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		crypto_ablkcipher_set_flags(tfm, tfm_flags);
+		return -EINVAL;
+	}
+
+	pr_debug("alg=%s (%d) , keylen=%u\n",
+		      crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm)),
+		      cipher_type, keylen);
+
+	rc = ctxmgr_map_kernel_ctx(ctx_info, mydev, ALG_CLASS_SYM_CIPHER,
+				   (struct host_crypto_ctx *)host_ctx_p, NULL,
+				   0);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed mapping context (rc=%d)\n", rc);
+		return rc;
+	}
+
+	if (ctxmgr_get_ctx_state(ctx_info) == CTX_STATE_UNINITIALIZED) {
+		pr_err("Invoked for uninitialized context @%p\n",
+			    host_ctx_p);
+		rc = -EINVAL;
+	} else {		/* Modify algorithm key */
+		rc = ctxmgr_set_symcipher_key(ctx_info, keylen, key);
+		if (rc != 0) {
+			if (rc == -EINVAL) {
+				pr_info("Invalid keylen=%u\n", keylen);
+				tfm_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+			} else if (rc == -EPERM) {
+				pr_info("Invalid/weak key\n");
+				tfm_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+			} else {
+				pr_err("Unknown key setting error (%d)\n",
+					    rc);
+			}
+		}
+	}
+
+	ctxmgr_unmap_kernel_ctx(ctx_info);
+	crypto_ablkcipher_set_flags(tfm, tfm_flags);
+	return rc;
+}
+
+/**
+ * prepare_symcipher_ctx_for_processing() - Prepare crypto context resources
+ *						before dispatching an operation
+ *
+ * @op_ctx:	The associate operation context (from async req ctx)
+ * @host_ctx_p:	The host context to use (from tfm)
+ * @iv_crypto:	A pointer to IV (from req->info)
+ * @direction:	Requested cipher direction
+ */
+static int prepare_symcipher_ctx_for_processing(struct sep_op_ctx *op_ctx,
+						struct
+						host_crypto_ctx_sym_cipher
+						*host_ctx_p,
+						u8 *iv_crypto,
+						enum dxdi_cipher_direction
+						direction)
+{
+	struct client_crypto_ctx_info *ctx_info = &op_ctx->ctx_info;
+	struct device *mydev = crypto_api_ctx.drv_data->sep_data->dev;
+	struct sep_ctx_cache_entry *sep_ctx_p;
+	dma_addr_t sep_ctx_dma_addr;
+	int rc;
+
+	sep_ctx_p = dma_pool_alloc(sep_ctx_pool, GFP_KERNEL, &sep_ctx_dma_addr);
+	if (sep_ctx_p == NULL) {
+		pr_err("Failed allocating SeP context buffer\n");
+		return -ENOMEM;
+	}
+	rc = ctxmgr_map_kernel_ctx(ctx_info, mydev,
+				   ALG_CLASS_SYM_CIPHER,
+				   (struct host_crypto_ctx *)host_ctx_p,
+				   sep_ctx_p, sep_ctx_dma_addr);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed mapping context (rc=%d)\n", rc);
+	} else {
+		ctxmgr_set_symcipher_iv(ctx_info, iv_crypto);
+		rc = ctxmgr_set_symcipher_direction(ctx_info, direction);
+		if (unlikely(rc != 0)) {
+			pr_err("Failed setting direction %d (rc=%d)\n",
+				    direction, rc);
+		}
+	}
+
+	if (unlikely(rc != 0)) {
+		/* Invalidate context on error */
+		ctxmgr_set_ctx_state(ctx_info, CTX_STATE_UNINITIALIZED);
+		ctxmgr_unmap_kernel_ctx(ctx_info);
+		dma_pool_free(sep_ctx_pool, sep_ctx_p, sep_ctx_dma_addr);
+#ifdef DEBUG
+	} else {		/* Context was changed by host */
+		ctxmgr_dump_sep_ctx(ctx_info);
+#endif
+		/* No need to dma_sync - sep_ctx is dma coherent mem. */
+	}
+
+	return rc;
+}
+
+/**
+ * release_symcipher_ctx() - Sync. back crypto context (IV) to desc->info)
+ *				to be able to track
+ * @op_ctx:	The associated operation context (from async req ctx)
+ * @iv_crypto:	The Crypto API IV buffer (req->info)
+ *
+ * Sync. back crypto context (IV) to desc->info) to be able to track
+ * IV changes, then unmap the context.
+ */
+static void release_symcipher_ctx(struct sep_op_ctx *op_ctx,
+				  u8 *iv_crypto)
+{
+	struct client_crypto_ctx_info *ctx_info = &op_ctx->ctx_info;
+	u8 iv_sep_ctx[DXDI_AES_BLOCK_SIZE];
+	u8 iv_size = DXDI_AES_BLOCK_SIZE;	/* Init. to max. */
+	struct sep_ctx_cache_entry *sep_ctx_p = ctx_info->sep_ctx_kptr;
+	dma_addr_t sep_ctx_dma_addr = ctx_info->sep_ctx_dma_addr;
+	int rc;
+
+	if (iv_crypto != NULL) {	/* Save IV (block state) */
+		rc = ctxmgr_get_symcipher_iv(ctx_info, NULL, iv_sep_ctx,
+					     &iv_size);
+		if (likely(rc == 0)) {
+			if (iv_size > 0) {
+				if (unlikely(iv_crypto == NULL)) {
+					pr_err(
+						    "iv_crypto==NULL when iv_size==%u\n",
+						    iv_size);
+				} else {
+					memcpy(iv_crypto, iv_sep_ctx, iv_size);
+				}
+			}
+		} else {
+			pr_err("Fail: getting IV information for ctx@%p\n",
+				    ctx_info->ctx_kptr);
+		}
+	}
+
+	ctxmgr_unmap_kernel_ctx(ctx_info);
+	dma_pool_free(sep_ctx_pool, sep_ctx_p, sep_ctx_dma_addr);
+}
+
+/**
+ * symcipher_process() - Process (encrypt/decrypt) data for given block-cipher
+ *			algorithm
+ * @req:	The async. request structure
+ * @direction:	Cipher operation direction
+ *
+ */
+static int symcipher_process(struct ablkcipher_request *req,
+			     enum dxdi_cipher_direction direction)
+{
+	struct async_req_ctx *areq_ctx = ablkcipher_request_ctx(req);
+	struct sep_op_ctx *op_ctx = &areq_ctx->op_ctx;
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    crypto_ablkcipher_ctx(tfm);
+	int rc;
+
+	pr_debug("alg=%s %scrypt (req=%p, op_ctx=%p, host_ctx=%p)\n",
+		      crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm)),
+		      direction == DXDI_CDIR_ENC ? "en" : "de",
+		      req, op_ctx, host_ctx_p);
+
+	/* Initialize async. req. context */
+	areq_ctx->initiating_req = &req->base;
+
+	/* Old method */
+	if (memcmp(crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm)),
+			"xxx(aes)", 8)) {
+		INIT_WORK(&areq_ctx->comp_work,
+				dx_crypto_api_handle_op_completion);
+		op_ctx_init(op_ctx, &crypto_api_ctx);
+		op_ctx->op_type = SEP_OP_CRYPTO_PROC;
+		op_ctx->comp_work = &areq_ctx->comp_work;
+
+		rc = prepare_symcipher_ctx_for_processing(op_ctx,
+				host_ctx_p, req->info,
+				direction);
+		if (unlikely(rc != 0)) {
+			op_ctx_fini(op_ctx);
+			return rc;
+		}
+
+		rc = prepare_data_for_sep(op_ctx, NULL, req->src,
+					  NULL, req->dst,
+					  req->nbytes, CRYPTO_DATA_TEXT);
+		if (unlikely(rc != 0)) {
+			SEP_LOG_ERR(
+				    "Failed preparing DMA buffers (rc=%d, err_info=0x%08X\n)\n",
+				    rc, op_ctx->error_info);
+			if (op_ctx->error_info == DXDI_ERROR_INVAL_DATA_SIZE) {
+				SEP_LOG_ERR("Invalid data unit size %u\n",
+						req->nbytes);
+				req->base.flags |=
+						CRYPTO_TFM_RES_BAD_BLOCK_LEN;
+			}
+		} else {		/* Initiate processing */
+			/* Async. block cipher op. cannot reuse cache entry
+			   bacause the IV is set on every operation. Invalidate
+			   before releasing the sequencer (that's "async"
+			   invalidation) */
+			rc = dispatch_crypto_op(op_ctx,
+					req->base.
+					flags & CRYPTO_TFM_REQ_MAY_BACKLOG,
+					true /*init. */ , SEP_PROC_MODE_PROC_T,
+					false /*cache */);
+		}
+		if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) { /* Dispatch failure */
+			crypto_op_completion_cleanup(op_ctx);
+			release_symcipher_ctx(op_ctx, req->info);
+			op_ctx_fini(op_ctx);
+		}
+	} else {
+		/* New method vith TEE api */
+		struct dxdi_sepapp_kparams *cmd_params =
+			kzalloc(sizeof(struct dxdi_sepapp_kparams), GFP_KERNEL);
+		enum dxdi_sep_module ret_origin;
+		struct scatterlist sg_iv;
+		u8 iv[SEP_AES_IV_SIZE];
+
+		if (cmd_params == NULL)
+			return -ENOMEM;
+
+		memcpy(iv, req->info, SEP_AES_IV_SIZE);
+		sg_init_one(&sg_iv, iv, SEP_AES_IV_SIZE);
+
+		cmd_params->params_types[0] = DXDI_SEPAPP_PARAM_VAL;
+		cmd_params->params[0].val.data[0] = direction;
+		cmd_params->params[0].val.data[1] = 0;
+		cmd_params->params[0].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+		cmd_params->params_types[1] = DXDI_SEPAPP_PARAM_MEMREF;
+		cmd_params->params[1].kmemref.dma_direction =
+					DXDI_DATA_TO_DEVICE;
+		cmd_params->params[1].kmemref.sgl = &sg_iv;
+		cmd_params->params[1].kmemref.nbytes = SEP_AES_IV_SIZE;
+
+		cmd_params->params_types[2] = DXDI_SEPAPP_PARAM_MEMREF;
+		cmd_params->params[2].kmemref.dma_direction =
+					DXDI_DATA_TO_DEVICE;
+		cmd_params->params[2].kmemref.sgl = req->src;
+		cmd_params->params[2].kmemref.nbytes = req->nbytes;
+
+		cmd_params->params_types[3] = DXDI_SEPAPP_PARAM_MEMREF;
+		cmd_params->params[3].kmemref.dma_direction =
+					DXDI_DATA_FROM_DEVICE;
+		cmd_params->params[3].kmemref.sgl = req->dst;
+		cmd_params->params[3].kmemref.nbytes = req->nbytes;
+
+		rc = async_sepapp_command_invoke(host_ctx_p->sctx,
+					host_ctx_p->sess_id, CMD_DO_CRYPTO,
+					cmd_params, &ret_origin, areq_ctx);
+	}
+
+	return rc;
+}
+
+/**
+ * symcipher_encrypt() - Encrypt for given sym-cipher context
+ * @req: The async. request structure
+ *
+ */
+static int symcipher_encrypt(struct ablkcipher_request *req)
+{
+	return symcipher_process(req, DXDI_CDIR_ENC);
+}
+
+/**
+ * symcipher_encrypt() - Decrypt for given sym-cipher context
+ * @req: The async. request structure
+ *
+ */
+static int symcipher_decrypt(struct ablkcipher_request *req)
+{
+	return symcipher_process(req, DXDI_CDIR_DEC);
+}
+
+static int ablkcipher_algs_init(void)
+{
+	int i, rc;
+	/* scratchpad to build crypto_alg from template + alg.specific data */
+	struct crypto_alg alg_spad;
+
+	/* Create block cipher algorithms from base + specs via scratchpad */
+	for (i = 0; i < DX_ABLKCIPHER_NUM; i++) {
+		/* Get base template */
+		memcpy(&alg_spad, &blkcipher_algs_base,
+		       sizeof(struct crypto_alg));
+		/* Get alg. specific attributes over base */
+		strcpy(alg_spad.cra_name, dx_ablkcipher_algs[i].cra_name);
+		strcpy(alg_spad.cra_driver_name,
+		       dx_ablkcipher_algs[i].cra_driver_name);
+		alg_spad.cra_blocksize = dx_ablkcipher_algs[i].cra_blocksize;
+		alg_spad.cra_u.ablkcipher.min_keysize =
+		    dx_ablkcipher_algs[i].cra_u.ablkcipher.min_keysize;
+		alg_spad.cra_u.ablkcipher.max_keysize =
+		    dx_ablkcipher_algs[i].cra_u.ablkcipher.max_keysize;
+		alg_spad.cra_u.ablkcipher.ivsize =
+		    dx_ablkcipher_algs[i].cra_u.ablkcipher.ivsize;
+		/* Copy scratchpad to real entry */
+		memcpy(&dx_ablkcipher_algs[i], &alg_spad,
+		       sizeof(struct crypto_alg));
+		/* The list must be initialized in place (pointers based) */
+		INIT_LIST_HEAD(&dx_ablkcipher_algs[i].cra_list);
+	}
+
+	/* Register algs */
+	pr_debug("Registering CryptoAPI blkciphers:\n");
+	for (i = 0, rc = 0; (i < DX_ABLKCIPHER_NUM) && (rc == 0); i++) {
+		pr_debug("%d. %s (__crt_alg=%p)\n", i,
+			      dx_ablkcipher_algs[i].cra_name,
+			      &dx_ablkcipher_algs[i]);
+		rc = crypto_register_alg(&dx_ablkcipher_algs[i]);
+		if (rc != 0)
+			break;
+	}
+	/* Failure: cleanup algorithms that already registered */
+	if (rc != 0) {
+		pr_err("Failed registering %s\n",
+			    dx_ablkcipher_algs[i].cra_name);
+		if (i > 0)
+			for (; i >= 0; i--)
+				crypto_unregister_alg(
+						&dx_ablkcipher_algs[i]);
+	}
+	return rc;
+}
+
+static void ablkcipher_algs_exit(void)
+{
+	int i;
+
+	for (i = 0; i < DX_ABLKCIPHER_NUM; i++)
+		crypto_unregister_alg(&dx_ablkcipher_algs[i]);
+}
+
+/****************************************************/
+/* Digest (hash/MAC) algorithms                     */
+/****************************************************/
+
+static struct dx_digest_alg *get_digest_alg(struct crypto_tfm *tfm)
+{
+	struct hash_alg_common *halg_common =
+	    container_of(tfm->__crt_alg, struct hash_alg_common, base);
+	struct ahash_alg *this_ahash =
+	    container_of(halg_common, struct ahash_alg, halg);
+	struct dx_digest_alg *this_digest_alg =
+	    container_of(this_ahash, struct dx_digest_alg, ahash);
+	int alg_index = this_digest_alg - dx_digest_algs;
+
+	/* Verify that the tfm is valid (inside our dx_digest_algs array) */
+	if ((alg_index < 0) || (alg_index >= DX_DIGEST_NUM)) {
+		pr_err("Invalid digest tfm @%p\n", tfm);
+		return NULL;
+	}
+	return this_digest_alg;
+}
+
+/**
+ * prepare_digest_context_for_processing() - Prepare the crypto context of
+ *	async. hash/mac operation. Initialize context if requested.
+ *
+ * @req:		Crypto request context
+ * @do_init:		When "true" the given context is initialized
+ */
+static int prepare_digest_context_for_processing(struct ahash_request *req,
+						 bool do_init)
+{
+	struct crypto_ahash *ahash_tfm = crypto_ahash_reqtfm(req);
+	struct crypto_tfm *tfm = &ahash_tfm->base;
+	struct dx_digest_alg *digest_alg = get_digest_alg(tfm);
+	struct mac_key_data *key_data = crypto_tfm_ctx(tfm);/* For MACS only */
+	struct async_digest_req_ctx *req_ctx = ahash_request_ctx(req);
+	struct device *mydev = crypto_api_ctx.drv_data->sep_data->dev;
+	struct sep_op_ctx *op_ctx = &req_ctx->async_req.op_ctx;
+	struct client_crypto_ctx_info *ctx_info;
+	enum dxdi_mac_type mac_type;
+	struct dxdi_mac_props mac_props;	/* For MAC init. */
+#ifdef DEBUG
+	enum host_ctx_state ctx_state;
+#endif
+	int error_info;
+	int rc;
+
+	if (unlikely(digest_alg == NULL))
+		return -EINVAL;
+
+	pr_debug("op_ctx=%p op_state=%d\n", op_ctx, op_ctx->op_state);
+	ctx_info = &op_ctx->ctx_info;
+	mac_type = digest_alg->mac_type;
+
+	if (!do_init) {
+		/* Verify given request context was initialized */
+		if (req_ctx->async_req.initiating_req == NULL) {
+			pr_err(
+				    "Invoked for uninitialized async. req. context\n");
+			return -EINVAL;
+		}
+		/* Verify this request context that is not in use */
+		if (op_ctx->op_state != USER_OP_NOP) {
+			pr_err("Invoked for context in use!\n");
+			return -EINVAL;
+			/*
+			 * We do not return -EBUSY because this is a valid
+			 * return code for async crypto operations that
+			 * indicates the given request was actually dispatched.
+			 */
+		}
+	}
+	op_ctx_init(op_ctx, &crypto_api_ctx);
+	op_ctx->comp_work = &req_ctx->async_req.comp_work;
+	rc = ctxmgr_map_kernel_ctx(ctx_info, mydev,
+				   (mac_type != DXDI_MAC_NONE) ?
+				   ALG_CLASS_MAC : ALG_CLASS_HASH,
+				   (struct host_crypto_ctx *)&req_ctx->host_ctx,
+				   NULL, 0);
+	if (rc != 0) {
+		pr_err("Failed mapping context (rc=%d)\n", rc);
+		return rc;
+	}
+	if (do_init) {
+		/* Allocate a new Crypto context ID */
+		ctxmgr_set_ctx_id(ctx_info,
+				  alloc_crypto_ctx_id(&crypto_api_ctx));
+		if (mac_type == DXDI_MAC_NONE) {	/* Hash alg. */
+			rc = ctxmgr_init_hash_ctx(ctx_info,
+						  digest_alg->hash_type,
+						  &error_info);
+		} else {	/* MAC */
+			mac_props.mac_type = mac_type;
+			mac_props.key_size = key_data->key_size;
+			memcpy(mac_props.key, key_data->key,
+			       key_data->key_size);
+			if (mac_type == DXDI_MAC_HMAC)
+				mac_props.alg_specific.hmac.hash_type =
+				    digest_alg->hash_type;
+			rc = ctxmgr_init_mac_ctx(ctx_info, &mac_props,
+						 &error_info);
+		}
+		if (unlikely(rc != 0)) {
+			pr_err("Failed initializing context\n");
+			ctxmgr_set_ctx_state(ctx_info, CTX_STATE_UNINITIALIZED);
+		} else {
+			ctxmgr_set_ctx_state(ctx_info, CTX_STATE_PARTIAL_INIT);
+			/* Init. the async. request context */
+			req_ctx->async_req.initiating_req = &req->base;
+			INIT_WORK(&req_ctx->async_req.comp_work,
+				  dx_crypto_api_handle_op_completion);
+			req_ctx->result = NULL;
+		}
+#ifdef DEBUG
+	} else {		/* Should have been initialized before */
+		ctx_state = ctxmgr_get_ctx_state(ctx_info);
+		if (ctx_state != CTX_STATE_INITIALIZED) {
+			pr_err("Invoked for context in state %d!\n",
+				    ctx_state);
+			rc = -EINVAL;
+		}
+#endif		 /*DEBUG*/
+	}
+	if (likely(rc == 0)) {
+#ifdef DEBUG
+		ctxmgr_dump_sep_ctx(ctx_info);
+#endif
+		/* Flush sep_ctx out of host cache */
+		ctxmgr_sync_sep_ctx(ctx_info, mydev);
+	}
+	return rc;
+}
+
+static int digest_req_dispatch(struct ahash_request *req,
+			       bool do_init, bool is_last,
+			       struct scatterlist *src, unsigned int nbytes)
+{
+	struct crypto_ahash *ahash_tfm = crypto_ahash_reqtfm(req);
+	struct dx_digest_alg *digest_alg = get_digest_alg(&ahash_tfm->base);
+	struct async_digest_req_ctx *req_ctx =
+	    (struct async_digest_req_ctx *)ahash_request_ctx(req);
+	struct sep_op_ctx *op_ctx = &req_ctx->async_req.op_ctx;
+	struct client_crypto_ctx_info *ctx_info = &op_ctx->ctx_info;
+	int rc;
+
+	if (digest_alg == NULL)
+		return -EINVAL;
+	if ((!do_init) && (req_ctx->result != NULL)) {
+		/* already finalized (AES based MACs) */
+		if (unlikely(nbytes > 0)) {
+			pr_err("Invoked with %u B after finalized\n",
+				    nbytes);
+			return -EINVAL;
+		}
+		if (is_last) {
+			/* Fetch saved result */
+			memcpy(req->result, req_ctx->result,
+			       SEP_AES_BLOCK_SIZE);
+			return 0;
+		}
+	}
+	rc = prepare_digest_context_for_processing(req, do_init);
+	if (unlikely(rc != 0))
+		return rc;
+	/* Preapre req_ctx->result */
+	if (is_last) {		/* Plain finalization */
+		req_ctx->result = req->result;
+	} else if (((digest_alg->mac_type == DXDI_MAC_AES_XCBC_MAC) ||
+		    (digest_alg->mac_type == DXDI_MAC_AES_CMAC)) &&
+		   (!IS_MULT_OF(nbytes, SEP_AES_BLOCK_SIZE))) {
+		/* Handle special case of AES based MAC update when not AES
+		   block multiple --> dispatch as final update */
+		is_last = true;
+		/* req_Ctx->result remains NULL. This would cause setting
+		   it to the result location in the sep context,
+		   when completed */
+	}
+
+	op_ctx->op_type = is_last ? SEP_OP_CRYPTO_FINI :
+	    do_init ? SEP_OP_CRYPTO_INIT : SEP_OP_CRYPTO_PROC;
+	if (op_ctx->op_type != SEP_OP_CRYPTO_INIT) {
+		rc = prepare_data_for_sep(op_ctx, NULL, src, NULL, NULL, nbytes,
+					  is_last ? CRYPTO_DATA_TEXT_FINALIZE :
+					  CRYPTO_DATA_TEXT);
+		if (rc == -ENOTBLK) {
+			/* Data was accumulated but less than a hash block */
+			/* Complete operation immediately */
+			rc = 0;
+			goto digest_proc_exit;
+		}
+		if (unlikely(rc != 0)) {
+			pr_err("Failed mapping client DMA buffer.\n");
+			goto digest_proc_exit;
+		}
+	}
+	rc = dispatch_crypto_op(op_ctx,
+				req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG,
+				do_init,
+				is_last ? SEP_PROC_MODE_FIN : do_init ?
+				SEP_PROC_MODE_NOP : SEP_PROC_MODE_PROC_T,
+				true /*cache */);
+	if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) {
+		pr_err("Failed dispatching CRYPTO_OP (rc=%d)\n", rc);
+		crypto_op_completion_cleanup(op_ctx);
+		ctxmgr_unmap_kernel_ctx(ctx_info);
+		op_ctx_fini(op_ctx);
+	}
+	return rc;
+/* Exit when there is no pending request (error or not enough data) */
+ digest_proc_exit:
+	ctxmgr_unmap_kernel_ctx(ctx_info);
+	op_ctx_fini(op_ctx);
+	return rc;
+}
+
+static int digest_init(struct ahash_request *req)
+{
+	pr_debug("\n");
+	return digest_req_dispatch(req, true, false, NULL, 0);
+}
+
+static int digest_update(struct ahash_request *req)
+{
+	pr_debug("nbytes=%u\n", req->nbytes);
+	if (req->nbytes == 0)
+		return 0;	/* Nothing to do (but valid for 0 data MACs */
+
+	return digest_req_dispatch(req, false, false, req->src, req->nbytes);
+}
+
+static int digest_final(struct ahash_request *req)
+{
+	pr_debug("\n");
+	return digest_req_dispatch(req, false, true, NULL, 0);
+}
+
+static int digest_finup(struct ahash_request *req)
+{
+	pr_debug("nbytes=%u\n", req->nbytes);
+	return digest_req_dispatch(req, false, true, req->src, req->nbytes);
+}
+
+static int digest_integrated(struct ahash_request *req)
+{
+	pr_debug("nbytes=%u\n", req->nbytes);
+	return digest_req_dispatch(req, true, true, req->src, req->nbytes);
+}
+
+/**
+ * do_hash_sync() - Do integrated hash operation synchronously
+ *
+ * @hash_type:		The hash type used for this HMAC
+ * @data_in:		The input data
+ * @data_len:		Size data_in in bytes
+ * @digest:		The hash result
+ * @digest_len_p:	Returned digest size
+ *
+ * This function is used to shorten long HMAC keys.
+ */
+static int do_hash_sync(enum dxdi_hash_type hash_type,
+			const u8 *data_in, unsigned int data_len,
+			u8 *digest, unsigned int *digest_len_p)
+{
+	int rc;
+	struct queue_drvdata *drvdata = crypto_api_ctx.drv_data;
+	struct host_crypto_ctx_hash host_ctx;
+	struct sep_op_ctx op_ctx;
+	struct client_crypto_ctx_info *ctx_info_p = &op_ctx.ctx_info;
+	struct scatterlist din_sgl;
+
+	op_ctx_init(&op_ctx, &crypto_api_ctx);
+	rc = ctxmgr_map_kernel_ctx(ctx_info_p, drvdata->sep_data->dev,
+				   ALG_CLASS_HASH,
+				   (struct host_crypto_ctx *)&host_ctx, NULL,
+				   0);
+	if (rc != 0) {
+		pr_err("Failed mapping crypto context (rc=%d)\n", rc);
+		op_ctx_fini(&op_ctx);
+		return rc;
+	}
+	/* Allocate a new Crypto context ID */
+	ctxmgr_set_ctx_id(ctx_info_p, alloc_crypto_ctx_id(op_ctx.client_ctx));
+	/* Algorithm class specific initialization */
+	rc = ctxmgr_init_hash_ctx(ctx_info_p, hash_type, &op_ctx.error_info);
+	if (rc != 0)
+		goto unmap_ctx_and_exit;
+	ctxmgr_set_ctx_state(ctx_info_p, CTX_STATE_PARTIAL_INIT);
+	op_ctx.op_type = SEP_OP_CRYPTO_FINI;	/* Integrated is also fin. */
+	sg_init_one(&din_sgl, data_in, data_len);
+	rc = prepare_data_for_sep(&op_ctx, NULL, &din_sgl, NULL, NULL,
+				  data_len, CRYPTO_DATA_TEXT_FINALIZE);
+	if (rc != 0)
+		goto unmap_ctx_and_exit;
+
+#ifdef DEBUG
+	ctxmgr_dump_sep_ctx(ctx_info_p);
+#endif
+	/* Flush sep_ctx out of host cache */
+	ctxmgr_sync_sep_ctx(ctx_info_p, drvdata->sep_data->dev);
+	rc = dispatch_crypto_op(&op_ctx, true, true, SEP_PROC_MODE_FIN, false);
+	if (likely(!IS_DESCQ_ENQUEUE_ERR(rc))) {
+		rc = 0;	/* Clear valid return code from dispatch_crypto_op */
+		wait_for_completion(&op_ctx.ioctl_op_compl);
+		if (likely(op_ctx.error_info == 0)) {
+			*digest_len_p =
+			    ctxmgr_get_digest_or_mac(ctx_info_p, digest);
+		}
+	}
+	crypto_op_completion_cleanup(&op_ctx);
+
+ unmap_ctx_and_exit:
+	ctxmgr_unmap_kernel_ctx(ctx_info_p);
+	return rc;
+}
+
+static int mac_setkey(struct crypto_ahash *tfm,
+		      const u8 *key, unsigned int keylen)
+{
+	struct dx_digest_alg *digest_alg = get_digest_alg(&tfm->base);
+	u32 tfm_flags = crypto_ahash_get_flags(tfm);
+	struct mac_key_data *key_data = crypto_tfm_ctx(&tfm->base);
+	int rc = 0;
+
+	if (unlikely(digest_alg == NULL))
+		return -EINVAL;
+	if (unlikely(digest_alg->mac_type == DXDI_MAC_NONE)) {
+		pr_err("Given algorithm which is not MAC\n");
+		return -EINVAL;
+	}
+	/* Pre-process HMAC key if larger than hash block size */
+	if ((digest_alg->hash_type != DXDI_HASH_NONE) &&
+	    (keylen > digest_alg->ahash.halg.base.cra_blocksize)) {
+		rc = do_hash_sync(digest_alg->hash_type, key, keylen,
+				  key_data->key, &key_data->key_size);
+		if (unlikely(rc != 0))
+			pr_err("Failed digesting key of %u bytes\n",
+			       keylen);
+		if (key_data->key_size != digest_alg->ahash.halg.digestsize)
+			pr_err("Returned digest size is %u != %u (expected)\n",
+			       key_data->key_size,
+			       digest_alg->ahash.halg.digestsize);
+	} else {		/* No need to digest the key */
+		/* Verify that the key size for AES based MACs is not too
+		   large. */
+		if ((digest_alg->hash_type == DXDI_HASH_NONE) &&
+		    (keylen > SEP_AES_KEY_SIZE_MAX)) {
+			pr_err("Invalid key size %u for %s\n",
+			       keylen,
+			       digest_alg->ahash.halg.base.cra_name);
+			tfm_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+			crypto_ahash_set_flags(tfm, tfm_flags);
+			rc = -EINVAL;
+		} else {
+			key_data->key_size = keylen;
+			memcpy(&key_data->key, key, keylen);
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * digest_tfm_init() - Initialize tfm with our reqsize (to accomodate context)
+ *	(cra_init entry point)
+ * @tfm:
+ */
+static int digest_tfm_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash_tfm =
+	    container_of(tfm, struct crypto_ahash, base);
+
+	ahash_tfm->reqsize = sizeof(struct async_digest_req_ctx);
+	return 0;
+}
+
+static int digest_algs_init(void)
+{
+	int i, rc;
+
+	/* Create hash algorithms from base + specs via scratchpad */
+	for (i = 0; i < DX_DIGEST_NUM; i++) {
+		/* Apply template values into given algorithms */
+		dx_digest_algs[i].ahash.init = digest_algs_base.init;
+		dx_digest_algs[i].ahash.update = digest_algs_base.update;
+		dx_digest_algs[i].ahash.final = digest_algs_base.final;
+		dx_digest_algs[i].ahash.finup = digest_algs_base.finup;
+		dx_digest_algs[i].ahash.digest = digest_algs_base.digest;
+		dx_digest_algs[i].ahash.halg.base.cra_type =
+		    digest_algs_base.halg.base.cra_type;
+		dx_digest_algs[i].ahash.halg.base.cra_priority =
+		    digest_algs_base.halg.base.cra_priority;
+		dx_digest_algs[i].ahash.halg.base.cra_flags =
+		    digest_algs_base.halg.base.cra_flags;
+		dx_digest_algs[i].ahash.halg.base.cra_module =
+		    digest_algs_base.halg.base.cra_module;
+		dx_digest_algs[i].ahash.halg.base.cra_init =
+		    digest_algs_base.halg.base.cra_init;
+		INIT_LIST_HEAD(&dx_digest_algs[i].ahash.halg.base.cra_list);
+	}
+
+	/* Register algs */
+	pr_debug("Registering CryptoAPI digest algorithms:\n");
+	for (i = 0, rc = 0; (i < DX_DIGEST_NUM) && (rc == 0); i++) {
+		pr_debug("%d. %s (__crt_alg=%p)\n", i,
+			      dx_digest_algs[i].ahash.halg.base.cra_name,
+			      &dx_digest_algs[i].ahash);
+		rc = crypto_register_ahash(&dx_digest_algs[i].ahash);
+		if (rc != 0)
+			break;
+	}
+	if (unlikely(rc != 0)) {
+		/* Failure: cleanup algorithms that already registered */
+		pr_err("Failed registering %s\n",
+			    dx_digest_algs[i].ahash.halg.base.cra_name);
+		if (i > 0)
+			for (; i >= 0; i--)
+				crypto_unregister_ahash(
+						&dx_digest_algs[i].ahash);
+	}
+	return rc;
+}
+
+static void digest_algs_exit(void)
+{
+	int i;
+
+	for (i = 0; i < DX_DIGEST_NUM; i++)
+		crypto_unregister_ahash(&dx_digest_algs[i].ahash);
+}
+
+/****************************************************/
+int dx_crypto_api_init(struct sep_drvdata *drvdata)
+{
+	/* Init. return code of each init. function to know which one to
+	   cleanup (only those with rc==0) */
+	int rc_ablkcipher_init = -EINVAL;
+	int rc_digest_init = -EINVAL;
+	int rc;
+
+	init_client_ctx(drvdata->queue + CRYPTO_API_QID, &crypto_api_ctx);
+
+	sep_ctx_pool = dma_pool_create("dx_sep_ctx",
+				       crypto_api_ctx.drv_data->sep_data->dev,
+				       sizeof(struct sep_ctx_cache_entry),
+				       L1_CACHE_BYTES, 0);
+	if (sep_ctx_pool == NULL) {
+		pr_err("Failed allocating pool for SeP contexts\n");
+		rc = -ENOMEM;
+		goto init_error;
+	}
+	rc_ablkcipher_init = ablkcipher_algs_init();
+	if (unlikely(rc_ablkcipher_init != 0)) {
+		rc = rc_ablkcipher_init;
+		goto init_error;
+	}
+	rc_digest_init = digest_algs_init();
+	if (unlikely(rc_digest_init != 0)) {
+		rc = rc_digest_init;
+		goto init_error;
+	}
+
+	return 0;
+
+ init_error:
+	if (rc_ablkcipher_init == 0)
+		ablkcipher_algs_exit();
+	if (sep_ctx_pool != NULL)
+		dma_pool_destroy(sep_ctx_pool);
+	cleanup_client_ctx(drvdata->queue + CRYPTO_API_QID, &crypto_api_ctx);
+	return rc;
+}
+
+void dx_crypto_api_fini(void)
+{
+	digest_algs_exit();
+	ablkcipher_algs_exit();
+	dma_pool_destroy(sep_ctx_pool);
+	cleanup_client_ctx(crypto_api_ctx.drv_data, &crypto_api_ctx);
+}
diff --git a/drivers/staging/sep54/crypto_api.h b/drivers/staging/sep54/crypto_api.h
new file mode 100644
index 0000000..67a546c
--- /dev/null
+++ b/drivers/staging/sep54/crypto_api.h
@@ -0,0 +1,64 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+
+/* \file cryto_api.h
+   Definitions of Linux Crypto API shared with the main driver code
+ */
+
+#ifndef __CRYPTO_API_H__
+#define __CRYPTO_API_H__
+
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/workqueue.h>
+
+/**
+ * struct async_req_ctx - Context for async. request (__ctx of request)
+ * @op_ctx:		SeP operation context
+ * @initiating_req:	The initiating crypto request
+ * @comp_work:		Completion work handler
+ */
+struct async_req_ctx {
+	struct sep_op_ctx op_ctx;
+	struct crypto_async_request *initiating_req;
+	struct work_struct comp_work;
+};
+
+/* Crypto-API init. entry point (to be used by sep_setup) */
+int dx_crypto_api_init(struct sep_drvdata *drvdata);
+void dx_crypto_api_fini(void);
+
+int hwk_init(void);
+void hwk_fini(void);
+
+#endif /*__CRYPTO_API_H__*/
+
+
+
diff --git a/drivers/staging/sep54/crypto_ctx_mgr.c b/drivers/staging/sep54/crypto_ctx_mgr.c
new file mode 100644
index 0000000..bd82f2f
--- /dev/null
+++ b/drivers/staging/sep54/crypto_ctx_mgr.c
@@ -0,0 +1,2559 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/*! \file
+ * This source file implements crypto context management services.
+ */
+
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_CTX_MGR
+#include "sep_log.h"
+#include "dx_driver.h"
+#include "crypto_ctx_mgr.h"
+
+struct ctxmgr_cache_entry {
+	struct crypto_ctx_uid ctx_id;/* Allocated context ID or CTX_INVALID_ID */
+	unsigned long lru_time;	/* Monotonically incrementing counter for LRU */
+};
+
+struct sep_ctx_cache {
+	unsigned long lru_clk;	/* Virtual clock counter */
+	/* The virtual clock counter is incremented for each entry
+	   allocation/reuse in order to provide LRU info */
+	int cache_size;		/* Num. of cache entries */
+	struct ctxmgr_cache_entry entries[1];	/*embedded entries */
+	/* The "entries" element is only a start point for an array with
+	   cache_size entries that starts in this location */
+};
+
+/* DES weak keys checking data */
+static const u8 des_key_parity[] = {
+	8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8, 0, 8, 8, 0, 8, 0, 0, 8,
+	    8,
+	0, 0, 8, 0, 8, 8, 3,
+	0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0,
+	    0,
+	8, 8, 0, 8, 0, 0, 8,
+	0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0,
+	    0,
+	8, 8, 0, 8, 0, 0, 8,
+	8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8,
+	    8,
+	0, 0, 8, 0, 8, 8, 0,
+	0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0,
+	    0,
+	8, 8, 0, 8, 0, 0, 8,
+	8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8,
+	    8,
+	0, 0, 8, 0, 8, 8, 0,
+	8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8,
+	    8,
+	0, 0, 8, 0, 8, 8, 0,
+	4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0, 8, 5, 0, 8, 0, 8, 8, 0,
+	    0,
+	8, 8, 0, 8, 0, 6, 8,
+};
+
+/**
+ * ctxmgr_get_ctx_size() - Get host context size for given algorithm class
+ * @alg_class:	 Queries algorithm class
+ *
+ * Returns size_t Size in bytes of host context
+ */
+size_t ctxmgr_get_ctx_size(enum crypto_alg_class alg_class)
+{
+	switch (alg_class) {
+	case ALG_CLASS_SYM_CIPHER:
+		return sizeof(struct host_crypto_ctx_sym_cipher);
+	case ALG_CLASS_AUTH_ENC:
+		return sizeof(struct host_crypto_ctx_auth_enc);
+	case ALG_CLASS_MAC:
+		return sizeof(struct host_crypto_ctx_mac);
+	case ALG_CLASS_HASH:
+		return sizeof(struct host_crypto_ctx_hash);
+	default:
+		return 0;
+	}
+}
+
+static size_t get_sep_ctx_offset(enum crypto_alg_class alg_class)
+{
+	switch (alg_class) {
+	case ALG_CLASS_SYM_CIPHER:
+		return offsetof(struct host_crypto_ctx_sym_cipher, sep_ctx);
+	case ALG_CLASS_AUTH_ENC:
+		return offsetof(struct host_crypto_ctx_auth_enc, sep_ctx);
+	case ALG_CLASS_MAC:
+		return offsetof(struct host_crypto_ctx_mac, sep_ctx);
+	case ALG_CLASS_HASH:
+		return offsetof(struct host_crypto_ctx_hash, sep_ctx);
+	default:
+		pr_err("Invalid algorith class = %d\n", alg_class);
+		return 0;
+	}
+
+}
+
+/**
+ * get_hash_digest_size() - Get hash digest size (in octets) of given (SeP)
+ *				hash mode
+ * @hash_mode:
+ *
+ * Returns u32 Digest size in octets (0 if unknown hash mode)
+ */
+static u32 get_hash_digest_size(enum sep_hash_mode hash_mode)
+{
+	switch (hash_mode) {
+	case SEP_HASH_SHA1:
+		return 160 >> 3;	/* 160 bit */
+	case SEP_HASH_SHA224:
+		return 224 >> 3;
+	case SEP_HASH_SHA256:
+		return 256 >> 3;
+	case SEP_HASH_SHA384:
+		return 384 >> 3;
+	case SEP_HASH_SHA512:
+		return 512 >> 3;
+	default:
+		pr_err("Unknown hash mode %d\n", hash_mode);
+	}
+	return 0;
+}
+
+static u16 get_hash_block_size(enum dxdi_hash_type hash_type)
+{
+
+	switch (hash_type) {
+	case DXDI_HASH_MD5:
+		pr_err("MD5 not supported\n");
+		break;
+	case DXDI_HASH_SHA1:
+	case DXDI_HASH_SHA224:
+	case DXDI_HASH_SHA256:
+		return 512 >> 3;
+	case DXDI_HASH_SHA384:
+	case DXDI_HASH_SHA512:
+		return 1024 >> 3;
+	default:
+		pr_err("Invalid hash type %d", hash_type);
+	}
+	return 0;
+}
+
+enum sep_hash_mode get_sep_hash_mode(enum dxdi_hash_type hash_type)
+{
+	switch (hash_type) {
+	case DXDI_HASH_MD5:
+		pr_err("MD5 not supported\n");
+		return SEP_HASH_NULL;
+	case DXDI_HASH_SHA1:
+		return SEP_HASH_SHA1;
+	case DXDI_HASH_SHA224:
+		return SEP_HASH_SHA224;
+	case DXDI_HASH_SHA256:
+		return SEP_HASH_SHA256;
+	case DXDI_HASH_SHA384:
+		return SEP_HASH_SHA384;
+	case DXDI_HASH_SHA512:
+		return SEP_HASH_SHA512;
+	default:
+		pr_err("Invalid hash type=%d\n", hash_type);
+		return SEP_HASH_NULL;
+	}
+}
+
+/**
+ * ctxmgr_map_user_ctx() - Map given user context to kernel space + DMA
+ * @ctx_info:	 User context info structure
+ * @mydev:	 Associated device context
+ * @alg_class:	If !ALG_CLASS_NONE, consider context of given class for
+ *		size validation (used in uninitialized context mapping)
+ *		When set to ALG_CLASS_NONE, the alg_class field of the
+ *		host_crypto_ctx is used to verify mapped buffer size.
+ * @user_ctx_ptr: Pointer to user space crypto context
+ *
+ * Returns int 0 for success
+ */
+int ctxmgr_map_user_ctx(struct client_crypto_ctx_info *ctx_info,
+			struct device *mydev,
+			enum crypto_alg_class alg_class,
+			u32 __user *user_ctx_ptr)
+{
+	const unsigned long offset_in_page =
+	    ((unsigned long)user_ctx_ptr & ~PAGE_MASK);
+	const unsigned long dist_from_page_end = PAGE_SIZE - offset_in_page;
+	size_t ctx_size = ctxmgr_get_ctx_size(alg_class);
+	int pages_mapped;
+	int rc;
+
+#ifdef DEBUG
+	if (ctx_info->user_ptr != NULL) {
+		pr_err("User context already mapped to 0x%p\n",
+			    ctx_info->user_ptr);
+		return -EINVAL;
+	}
+#endif
+	ctx_info->dev = mydev;
+
+	/* If unknown class, verify that it is at least host_ctx size */
+	/* (so we can access the alg_class field in it) */
+	ctx_size = (ctx_size == 0) ? sizeof(struct host_crypto_ctx) : ctx_size;
+	if (dist_from_page_end < ctx_size) {
+		pr_err("Given user context that crosses a page (0x%p)\n",
+			    user_ctx_ptr);
+		return -EINVAL;
+	}
+
+	down_read(&current->mm->mmap_sem);
+	pages_mapped = get_user_pages(current, current->mm,
+				      (unsigned long)user_ctx_ptr, 1, 1, 0,
+				      &ctx_info->ctx_page, 0);
+	up_read(&current->mm->mmap_sem);
+	if (pages_mapped < 1) {
+		pr_err("Failed getting user page\n");
+		return -ENOMEM;
+	}
+
+	/* Set pointer to correct offset in mapped page */
+	ctx_info->ctx_kptr = (struct host_crypto_ctx *)
+	    ((unsigned long)ctx_info->ctx_page |
+	     ((unsigned long)user_ctx_ptr & ~PAGE_MASK));
+
+	ctx_info->ctx_kptr = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (ctx_info->ctx_kptr == NULL) {
+		SEP_LOG_ERR("Memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	if (alg_class == ALG_CLASS_NONE) {
+		size_t host_ctx_size = sizeof(struct host_crypto_ctx);
+		/* Copy common header to get the alg class */
+		if (copy_from_user(ctx_info->ctx_kptr,
+				user_ctx_ptr, host_ctx_size)) {
+			pr_err("Copy from user failed\n");
+			rc = -EINVAL;
+			goto copy_from_user_failed;
+		}
+		/* Verify actual context size with class saved in context */
+		alg_class = ctx_info->ctx_kptr->alg_class;
+		ctx_size = ctxmgr_get_ctx_size(alg_class);
+		if (ctx_size == 0) {	/* Unknown class */
+			pr_err("Unknown alg class\n");
+			rc = -EINVAL;
+			goto unknown_alg_class;
+		}
+		if (dist_from_page_end < ctx_size) {
+			pr_err(
+				    "Given user context that crosses a page (0x%p)\n",
+				    user_ctx_ptr);
+			rc = -EINVAL;
+			goto ctx_cross_page;
+		}
+		/* Copy rest of the context when we know the actual size */
+		if (copy_from_user((u8 *)ctx_info->ctx_kptr + host_ctx_size,
+				(u8 *)user_ctx_ptr + host_ctx_size,
+				ctx_size - host_ctx_size)) {
+			pr_err("Copy from user failed\n");
+			rc = -EINVAL;
+			goto copy_from_user_failed;
+		}
+	}
+
+	/* Map sep_ctx */
+	ctx_info->sep_ctx_kptr = (struct sep_ctx_cache_entry *)
+	    (((unsigned long)ctx_info->ctx_kptr) +
+	     get_sep_ctx_offset(alg_class));
+	ctx_info->sep_ctx_dma_addr = dma_map_single(mydev,
+						    (void *)ctx_info->
+						    sep_ctx_kptr,
+						    sizeof(struct
+							   sep_ctx_cache_entry),
+						    DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(mydev, ctx_info->sep_ctx_dma_addr)) {
+		pr_err("Mapping sep_ctx for DMA failed");
+		rc = -ENOMEM;
+		goto sep_ctx_dma_map_failed;
+	}
+
+	ctx_info->sep_cache_idx = -1;
+
+	ctx_info->user_ptr = user_ctx_ptr;
+
+	return 0;
+
+ sep_ctx_dma_map_failed:
+ copy_from_user_failed:
+ ctx_cross_page:
+ unknown_alg_class:
+	kfree(ctx_info->ctx_kptr);
+	ctx_info->ctx_kptr = NULL;
+	page_cache_release(ctx_info->ctx_page);
+	ctx_info->ctx_page = NULL;
+	return rc;
+}
+
+/**
+ * ctxmgr_unmap_user_ctx() - Unmap given currently mapped user context
+ * @ctx_info:	 User context info structure
+ */
+void ctxmgr_unmap_user_ctx(struct client_crypto_ctx_info *ctx_info)
+{
+	size_t ctx_size;
+
+	if (ctx_info->ctx_kptr == NULL) {
+		/* This is a valid case since we invoke this function in some
+		   error cases without knowing if context was mapped or not */
+		pr_debug("Context not mapped\n");
+		return;
+	}
+
+	ctx_size = ctxmgr_get_ctx_size(ctx_info->ctx_kptr->alg_class);
+
+	dma_unmap_single(ctx_info->dev, ctx_info->sep_ctx_dma_addr,
+			 sizeof(struct sep_ctx_cache_entry), DMA_BIDIRECTIONAL);
+	ctx_info->sep_ctx_dma_addr = 0;
+
+	if (copy_to_user(ctx_info->user_ptr, ctx_info->ctx_kptr, ctx_size))
+		pr_err("Copy to user failed\n");
+
+	kfree(ctx_info->ctx_kptr);
+	ctx_info->ctx_kptr = NULL;
+
+	if (!PageReserved(ctx_info->ctx_page))
+		SetPageDirty(ctx_info->ctx_page);
+	page_cache_release(ctx_info->ctx_page);
+	ctx_info->ctx_page = NULL;
+
+	ctx_info->sep_cache_idx = -1;
+
+	ctx_info->user_ptr = NULL;
+
+}
+
+/**
+ * ctxmgr_map_kernel_ctx() - Map given kernel context + clone SeP context into
+ *				Privately allocated DMA buffer
+ *				(required for async. ops. on the same context)
+ * @ctx_info:	Client crypto context info structure
+ * @mydev:	Associated device context
+ * @alg_class:	If !ALG_CLASS_NONE, consider context of given class for
+ *		size validation (used in uninitialized context mapping)
+ *		When set to ALG_CLASS_NONE, the alg_class field of the
+ *		host_crypto_ctx is used to verify mapped buffer size.
+ * @kernel_ctx_p:	Pointer to kernel space crypto context
+ * @sep_ctx_p:	Pointer to (private) SeP context. If !NULL the embedded sep
+ *		context is copied into this buffer.
+ *		Set to NULL to use the one embedded in host_crypto_ctx.
+ * @sep_ctx_dma_addr:	DMA address of private SeP context (if sep_ctx_p!=NULL)
+ *
+ * Returns int 0 for success
+ */
+int ctxmgr_map_kernel_ctx(struct client_crypto_ctx_info *ctx_info,
+			  struct device *mydev,
+			  enum crypto_alg_class alg_class,
+			  struct host_crypto_ctx *kernel_ctx_p,
+			  struct sep_ctx_cache_entry *sep_ctx_p,
+			  dma_addr_t sep_ctx_dma_addr)
+{
+	int rc = 0;
+	size_t embedded_sep_ctx_offset = get_sep_ctx_offset(alg_class);
+	struct sep_ctx_cache_entry *embedded_sep_ctx_p =
+	    (struct sep_ctx_cache_entry *)(((unsigned long)kernel_ctx_p) +
+					   embedded_sep_ctx_offset);
+
+	if (embedded_sep_ctx_offset == 0)
+		return -EINVAL;
+	if (sep_ctx_p == NULL)	/* Default context is the one inside */
+		sep_ctx_p = embedded_sep_ctx_p;
+
+	pr_debug("kernel_ctx_p=%p\n", kernel_ctx_p);
+
+	ctx_info->dev = mydev;
+	/* These fields are only relevant for user space context mapping */
+	ctx_info->user_ptr = NULL;
+	ctx_info->ctx_page = NULL;
+	ctx_info->ctx_kptr = kernel_ctx_p;
+	ctx_info->sep_ctx_kptr = sep_ctx_p;
+	/* We assume that the CryptoAPI context of kernel is allocated using
+	   the slab pools, thus aligned to one of the standard blocks and
+	   does not cross page boundary (It is required to be physically
+	   contiguous for SeP DMA access) */
+	if ((((unsigned long)sep_ctx_p + sizeof(struct sep_ctx_cache_entry))
+	     >> PAGE_SHIFT) != ((unsigned long)sep_ctx_p >> PAGE_SHIFT)) {
+		pr_err("SeP context cross page boundary start=0x%lx len=0x%zX\n",
+		       (unsigned long)sep_ctx_p,
+		       sizeof(struct sep_ctx_cache_entry));
+		return -EINVAL;
+	}
+
+	/* Map sep_ctx if embedded in given host context */
+	/* (otherwise, assumed to be cache coherent DMA buffer) */
+	if (sep_ctx_p == embedded_sep_ctx_p) {
+		ctx_info->sep_ctx_dma_addr =
+				dma_map_single(mydev, sep_ctx_p,
+					       sizeof(struct
+						      sep_ctx_cache_entry),
+					       DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(mydev, ctx_info->sep_ctx_dma_addr)) {
+			pr_err("Mapping sep_ctx for DMA failed");
+			rc = -ENOMEM;
+		}
+	} else {
+		ctx_info->sep_ctx_dma_addr = sep_ctx_dma_addr;
+		/* Clone base context into external SeP context buffer */
+		memcpy(sep_ctx_p, embedded_sep_ctx_p,
+		       sizeof(struct sep_ctx_cache_entry));
+	}
+
+	ctx_info->sep_cache_idx = -1;
+
+	return rc;
+}
+
+/**
+ * ctxmgr_unmap_kernel_ctx() - Unmap given currently mapped kernel context
+ * @ctx_info:	 User context info structure
+ */
+void ctxmgr_unmap_kernel_ctx(struct client_crypto_ctx_info *ctx_info)
+{
+	struct sep_ctx_cache_entry *embedded_sep_ctx_p;
+	size_t embedded_sep_ctx_offset;
+
+	if (ctx_info == NULL) {
+		pr_err("Context not mapped\n");
+		return;
+	}
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		/* This is a valid case since we invoke this function in some
+		   error cases without knowing if context was mapped or not */
+		pr_debug("Context not mapped\n");
+		return;
+	}
+#endif
+
+	embedded_sep_ctx_offset =
+	    get_sep_ctx_offset(ctx_info->ctx_kptr->alg_class);
+
+#ifdef DEBUG
+	if (embedded_sep_ctx_offset == 0) {
+		pr_err("Invalid algorithm class\n");
+		return;
+	}
+#endif
+
+	pr_debug("kernel_ctx_ptr=%p\n", ctx_info->ctx_kptr);
+	embedded_sep_ctx_p = (struct sep_ctx_cache_entry *)
+	    (((unsigned long)ctx_info->ctx_kptr) + embedded_sep_ctx_offset);
+
+	if (embedded_sep_ctx_p == ctx_info->sep_ctx_kptr) {
+		dma_unmap_single(ctx_info->dev, ctx_info->sep_ctx_dma_addr,
+				 sizeof(struct sep_ctx_cache_entry),
+				 DMA_BIDIRECTIONAL);
+	}
+
+	ctx_info->sep_ctx_kptr = NULL;
+	ctx_info->sep_ctx_dma_addr = 0;
+	ctx_info->ctx_kptr = NULL;
+	ctx_info->sep_cache_idx = -1;
+	ctx_info->dev = NULL;
+}
+
+/**
+ * get_blk_rem_buf() - Get a pointer to the (hash) block remainder buffer
+ *			structure
+ * @ctx_info:
+ *
+ * Returns struct hash_block_remainder*
+ */
+static struct hash_block_remainder *get_blk_rem_buf(struct
+						    client_crypto_ctx_info
+						    *ctx_info)
+{
+	struct host_crypto_ctx_hash *hash_ctx_p =
+	    (struct host_crypto_ctx_hash *)ctx_info->ctx_kptr;
+	struct host_crypto_ctx_mac *mac_ctx_p =
+	    (struct host_crypto_ctx_mac *)ctx_info->ctx_kptr;
+	struct hash_block_remainder *blk_rem_p = NULL;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+	if ((hash_ctx_p->alg_class != ALG_CLASS_HASH) &&
+	    ctxmgr_get_mac_type(ctx_info) != DXDI_MAC_HMAC) {
+		pr_err("Not a hash/HMAC context\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	/* Get the correct block remainder buffer structure */
+	if (hash_ctx_p->alg_class == ALG_CLASS_HASH)
+		blk_rem_p = &hash_ctx_p->hash_tail;
+	else			/* HMAC */
+		blk_rem_p = &mac_ctx_p->hmac_tail;
+
+	return blk_rem_p;
+}
+
+/**
+ * ctxmgr_map2dev_hash_tail() - Map hash data tail buffer in the host context
+ *				for DMA to device
+ * @ctx_info:
+ * @mydev:
+ *
+ * Returns int 0 on success
+ */
+int ctxmgr_map2dev_hash_tail(struct client_crypto_ctx_info *ctx_info,
+			     struct device *mydev)
+{
+	struct hash_block_remainder *blk_rem_p = get_blk_rem_buf(ctx_info);
+
+	if (blk_rem_p->size > 0) {
+		ctx_info->hash_tail_dma_addr = dma_map_single(mydev,
+							      (void *)
+							      blk_rem_p->data,
+							      blk_rem_p->size,
+							      DMA_TO_DEVICE);
+		if (dma_mapping_error(mydev, ctx_info->hash_tail_dma_addr)) {
+			pr_err("Mapping hash_tail for DMA failed");
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_unmap2dev_hash_tail() - Unmap hash data tail buffer from DMA to device
+ * @ctx_info:
+ * @mydev:
+ *
+ */
+void ctxmgr_unmap2dev_hash_tail(struct client_crypto_ctx_info *ctx_info,
+				struct device *mydev)
+{
+	struct hash_block_remainder *blk_rem_p = get_blk_rem_buf(ctx_info);
+
+	if (blk_rem_p->size > 0) {
+		dma_unmap_single(mydev, ctx_info->hash_tail_dma_addr,
+				 blk_rem_p->size, DMA_TO_DEVICE);
+	}
+}
+
+/**
+ * ctxmgr_set_ctx_state() - Set context state
+ * @ctx_info:	User context info structure
+ * @state:	State to set in context
+ *
+ * Returns void
+ */
+void ctxmgr_set_ctx_state(struct client_crypto_ctx_info *ctx_info,
+			  const enum host_ctx_state state)
+{
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	ctx_info->ctx_kptr->state = state;
+}
+
+/**
+ * ctxmgr_get_ctx_state() - Get context state
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Current context state
+ */
+enum host_ctx_state ctxmgr_get_ctx_state(const struct client_crypto_ctx_info
+					 *ctx_info)
+{
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	return ctx_info->ctx_kptr->state;
+}
+
+/**
+ * ctxmgr_set_ctx_id() - Allocate unique ID for (initialized) user context
+ * @ctx_info:	 Client crypto context info structure
+ * @ctx_id:	 The unique ID allocated for given context
+ *
+ * Allocate unique ID for (initialized) user context
+ * (Assumes invoked within session mutex so no need for counter protection)
+ */
+void ctxmgr_set_ctx_id(struct client_crypto_ctx_info *ctx_info,
+		       const struct crypto_ctx_uid ctx_id)
+{
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+	pr_debug("ctx_id=0x%16llX for ctx@0x%p\n",
+		      ctx_id, (ctx_info->user_ptr == NULL) ?
+		      (void *)ctx_info->ctx_kptr : (void *)ctx_info->user_ptr);
+#endif
+	memcpy(&ctx_info->ctx_kptr->uid, &ctx_id,
+		sizeof(struct crypto_ctx_uid));
+}
+
+/**
+ * ctxmgr_get_ctx_id() - Return the unique ID for current user context
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Allocated ID (or CTX_ID_INVALID if none)
+ */
+struct crypto_ctx_uid ctxmgr_get_ctx_id(struct client_crypto_ctx_info *ctx_info)
+{
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	return ctx_info->ctx_kptr->uid;
+}
+
+/**
+ * ctxmgr_get_session_id() - Return the session ID of given context ID
+ * @ctx_info:
+ *
+ * Return the session ID of given context ID
+ * This may be used to validate ID and verify that it was not tampered
+ * in a manner that can allow access to a session of another process
+ * Returns u32
+ */
+u64 ctxmgr_get_session_id(struct client_crypto_ctx_info *ctx_info)
+{
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	/* session ID is the 32 MS-bits of the context ID */
+	return ctx_info->ctx_kptr->uid.addr;
+}
+
+/**
+ * ctxmgr_get_alg_class() - Get algorithm class of context
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Current algorithm class of context
+ */
+enum crypto_alg_class ctxmgr_get_alg_class(const struct client_crypto_ctx_info
+					   *ctx_info)
+{
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	return ctx_info->ctx_kptr->alg_class;
+}
+
+/**
+ * ctxmgr_get_crypto_blk_size() - Get the crypto-block length of given context
+ *					in octets
+ * @ctx_info:	 User context info structure
+ *
+ * Returns u32 Cypto-block size in bytes, 0 if invalid/unsupported alg.
+ */
+u32 ctxmgr_get_crypto_blk_size(struct client_crypto_ctx_info *ctx_info)
+{
+	u32 cblk_size = 0;
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	switch (ctx_info->ctx_kptr->alg_class) {
+
+	case ALG_CLASS_SYM_CIPHER:{
+			enum dxdi_sym_cipher_type cipher_type =
+			    ((struct host_crypto_ctx_sym_cipher *)
+			     ctx_info->ctx_kptr)->props.cipher_type;
+			if ((cipher_type >= _DXDI_SYMCIPHER_AES_FIRST) &&
+			    (cipher_type <= _DXDI_SYMCIPHER_AES_LAST)) {
+				cblk_size = SEP_AES_BLOCK_SIZE;
+			} else
+			    if (((cipher_type >= _DXDI_SYMCIPHER_DES_FIRST) &&
+				 (cipher_type <= _DXDI_SYMCIPHER_DES_LAST)) ||
+					    ((cipher_type >=
+					      _DXDI_SYMCIPHER_C2_FIRST) &&
+					     (cipher_type <=
+					      _DXDI_SYMCIPHER_C2_LAST))) {
+				/* DES and C2 have the same block size */
+				cblk_size = SEP_DES_BLOCK_SIZE;
+			} else {
+				pr_err("Invalid sym.cipher type %d",
+					    cipher_type);
+			}
+			break;	/*ALG_CLASS_SYM_CIPHER */
+		}
+
+	case ALG_CLASS_AUTH_ENC:{
+			enum dxdi_auth_enc_type ae_type =
+			    ((struct host_crypto_ctx_auth_enc *)
+			     ctx_info->ctx_kptr)->props.ae_type;
+			if (ae_type == DXDI_AUTHENC_AES_CCM)
+				cblk_size = SEP_AES_BLOCK_SIZE;
+			else
+				pr_err("Invalid auth.enc. type %d",
+					    ae_type);
+			break;
+		}
+
+	case ALG_CLASS_MAC:{
+			struct host_crypto_ctx_mac *ctx_p =
+			    ((struct host_crypto_ctx_mac *)ctx_info->ctx_kptr);
+			const enum dxdi_mac_type mac_type =
+			    ctx_p->props.mac_type;
+			switch (mac_type) {	/* switch for block size */
+			case DXDI_MAC_HMAC:
+				cblk_size =
+				    get_hash_block_size(ctx_p->props.
+							alg_specific.hmac.
+							hash_type);
+				break;
+			case DXDI_MAC_AES_CMAC:
+			case DXDI_MAC_AES_XCBC_MAC:
+			case DXDI_MAC_AES_MAC:
+				cblk_size = SEP_AES_BLOCK_SIZE;
+				break;
+			default:
+				pr_err("Invalid MAC type %d\n",
+					    ctx_p->props.mac_type);
+			}
+			break;	/* ALG_CLASS_MAC */
+		}
+
+	case ALG_CLASS_HASH:{
+			enum dxdi_hash_type hash_type =
+			    ((struct host_crypto_ctx_hash *)
+			     ctx_info->ctx_kptr)->hash_type;
+			cblk_size = get_hash_block_size(hash_type);
+			break;
+		}
+
+	default:
+		pr_err("Invalid algorithm class %d\n",
+			    ctx_info->ctx_kptr->alg_class);
+
+	}			/*switch alg_class */
+
+	return cblk_size;
+}
+
+/**
+ * ctxmgr_is_valid_adata_size() - Validate additional/associated data for
+ * @ctx_info:
+ * @adata_size:
+ *
+ * Validate additional/associated data for
+ * auth/enc algs.
+ * Returns bool
+ */
+bool ctxmgr_is_valid_adata_size(struct client_crypto_ctx_info *ctx_info,
+				unsigned long adata_size)
+{
+	struct host_crypto_ctx_auth_enc *host_ctx_p =
+	    (struct host_crypto_ctx_auth_enc *)ctx_info->ctx_kptr;
+
+	if (ctx_info->ctx_kptr->alg_class != ALG_CLASS_AUTH_ENC)
+		return false;
+
+	if ((adata_size != host_ctx_p->props.adata_size) || (adata_size == 0))
+		return false;
+
+	return true;
+}
+
+/**
+ * ctxmgr_is_valid_data_unit_size() - Validate given data unit for given
+ *					alg./mode
+ * @ctx_info:
+ * @data_unit_size:
+ * @is_finalize:
+ *
+ * Returns bool true is valid.
+ */
+bool ctxmgr_is_valid_size(struct client_crypto_ctx_info *ctx_info,
+				    unsigned long data_unit_size,
+				    bool is_finalize)
+{
+	if (!is_finalize && (data_unit_size == 0)) {
+		/* None allow 0 data for intermediate processing blocks */
+		pr_err("Given 0 B for intermediate processing!");
+		return false;
+	}
+
+	switch (ctx_info->ctx_kptr->alg_class) {
+
+	case ALG_CLASS_SYM_CIPHER:{
+		struct host_crypto_ctx_sym_cipher *host_ctx_p =
+		    (struct host_crypto_ctx_sym_cipher *)ctx_info->
+		    ctx_kptr;
+		struct sep_ctx_cipher *aes_ctx_p;
+
+		switch (host_ctx_p->props.cipher_type) {
+		case DXDI_SYMCIPHER_AES_XTS:
+			if (host_ctx_p->props.alg_specific.aes_xts.
+			    data_unit_size == 0) {
+				/* Initialize on first data unit if not
+				   provided by the user */
+				host_ctx_p->props.alg_specific.aes_xts.
+				    data_unit_size = data_unit_size;
+				aes_ctx_p = (struct sep_ctx_cipher *)
+				    &(host_ctx_p->sep_ctx);
+				aes_ctx_p->data_unit_size =
+				    cpu_to_le32(host_ctx_p->
+						props.
+						alg_specific.aes_xts.
+						data_unit_size);
+				break;
+			} else if (((is_finalize) &&
+				    (data_unit_size > 0)) ||
+					    (!is_finalize)) {
+				/* finalize which is not empty must be
+				   consistent with prev. data unit */
+				if (host_ctx_p->props.
+				    alg_specific.aes_xts.
+				    data_unit_size != data_unit_size) {
+					pr_err("Data unit mismatch. was %u. now %lu.\n",
+					       host_ctx_p->props.alg_specific.
+					       aes_xts.data_unit_size,
+					       data_unit_size);
+					return false;
+				}
+			}
+			break;
+		case DXDI_SYMCIPHER_AES_CTR:
+			if (!is_finalize) {	/* !finalize */
+				if (!IS_MULT_OF(data_unit_size,
+						SEP_AES_BLOCK_SIZE)) {
+					pr_err(
+						"Data unit size (%lu) is not AES block multiple\n",
+						data_unit_size);
+					return false;
+				}
+			}
+			break;
+		case DXDI_SYMCIPHER_AES_ECB:
+		case DXDI_SYMCIPHER_AES_CBC:
+			if (!IS_MULT_OF
+			    (data_unit_size, SEP_AES_BLOCK_SIZE)) {
+				pr_err(
+					"Data unit size (%lu) is not AES block multiple\n",
+					data_unit_size);
+				return false;
+			}
+			break;
+		case DXDI_SYMCIPHER_DES_ECB:
+		case DXDI_SYMCIPHER_DES_CBC:
+			if (!IS_MULT_OF
+			    (data_unit_size, SEP_DES_BLOCK_SIZE)) {
+				pr_err(
+					"Data unit size (%lu) is not DES block multiple\n",
+					data_unit_size);
+				return false;
+			}
+			break;
+		case DXDI_SYMCIPHER_C2_ECB:
+		case DXDI_SYMCIPHER_C2_CBC:
+			if (!IS_MULT_OF
+			    (data_unit_size, SEP_C2_BLOCK_SIZE)) {
+				pr_err(
+					"Data unit size (%lu) is not C2 block multiple\n",
+					data_unit_size);
+				return false;
+			}
+			break;
+		default:
+			pr_err("Invalid cipher type %d\n",
+				    host_ctx_p->props.cipher_type);
+			return false;
+		}
+
+		break;	/*ALG_CLASS_SYM_CIPHER */
+	}
+
+	case ALG_CLASS_AUTH_ENC:{
+		enum dxdi_auth_enc_type ae_type =
+		    ((struct host_crypto_ctx_auth_enc *)
+		     ctx_info->ctx_kptr)->props.ae_type;
+		if (ae_type == DXDI_AUTHENC_AES_CCM) {
+			if (!is_finalize) {	/* !finalize */
+				if (!IS_MULT_OF(data_unit_size,
+						SEP_AES_BLOCK_SIZE)) {
+					pr_err(
+						    "Data unit size (%lu) is not AES block multiple\n",
+						    data_unit_size);
+					return false;
+				}
+			}
+		} else {
+			pr_err("Invalid auth.enc. type %d",
+				    ae_type);
+			return false;
+		}
+		break;
+	}
+
+	case ALG_CLASS_MAC:{
+		struct host_crypto_ctx_mac *ctx_p =
+		    ((struct host_crypto_ctx_mac *)ctx_info->ctx_kptr);
+		const enum dxdi_mac_type mac_type =
+		    ctx_p->props.mac_type;
+		switch (mac_type) {	/* switch for block size */
+		case DXDI_MAC_HMAC:
+			break;	/* Any data unit size is allowed */
+		case DXDI_MAC_AES_CMAC:
+		case DXDI_MAC_AES_XCBC_MAC:
+			if (!is_finalize) {
+				if (!IS_MULT_OF(data_unit_size,
+						SEP_AES_BLOCK_SIZE)) {
+					pr_err(
+						    "Data unit size (%lu) is not AES block multiple\n",
+						    data_unit_size);
+					return false;
+				}
+			}
+			break;
+		case DXDI_MAC_AES_MAC:
+			if (!IS_MULT_OF
+			    (data_unit_size, SEP_AES_BLOCK_SIZE)) {
+				pr_err(
+					    "Data unit size (%lu) is not AES block multiple\n",
+					    data_unit_size);
+				return false;
+			}
+			break;
+		default:
+			pr_err("Invalid MAC type %d\n",
+				    ctx_p->props.mac_type);
+		}
+
+		ctx_p->client_data_size += data_unit_size;
+		break;	/* ALG_CLASS_MAC */
+	}
+
+	case ALG_CLASS_HASH:{
+		break;	/* Any data unit size is allowed for hash */
+	}
+
+	default:
+		pr_err("Invalid algorithm class %d\n",
+			    ctx_info->ctx_kptr->alg_class);
+
+	}			/*switch alg_class */
+
+	return true;		/* passed validations */
+}
+
+/**
+ * ctxmgr_get_sym_cipher_type() - Returns the sym cipher specific type.
+ * @ctx_info:	 The context info object of the sym cipher alg.
+ *
+ * Returns enum dxdi_sym_cipher_type The sym cipher type.
+ */
+enum dxdi_sym_cipher_type ctxmgr_get_sym_cipher_type(const struct
+						     client_crypto_ctx_info
+						     *ctx_info)
+{
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	return ((struct host_crypto_ctx_sym_cipher *)
+		ctx_info->ctx_kptr)->props.cipher_type;
+}
+
+/**
+ * ctxmgr_get_mac_type() - Returns the mac specific type.
+ * @ctx_info:	 The context info object of the mac alg.
+ *
+ * Returns enum dxdi_mac_type The mac type.
+ */
+enum dxdi_mac_type ctxmgr_get_mac_type(const struct client_crypto_ctx_info
+				       *ctx_info)
+{
+	struct host_crypto_ctx_mac *mac_ctx =
+	    (struct host_crypto_ctx_mac *)ctx_info->ctx_kptr;
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	if (mac_ctx->alg_class == ALG_CLASS_MAC)
+		return mac_ctx->props.mac_type;
+	else
+		return DXDI_MAC_NONE;
+}
+
+/**
+ * ctxmgr_get_hash_type() - Returns the hash specific type.
+ * @ctx_info:	 The context info object of the hash alg.
+ *
+ * Returns dxdi_hash_type The hash type.
+ */
+enum dxdi_hash_type ctxmgr_get_hash_type(const struct client_crypto_ctx_info
+					 *ctx_info)
+{
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	return ((struct host_crypto_ctx_hash *)ctx_info->ctx_kptr)->hash_type;
+}
+
+/**
+ * ctxmgr_save_hash_blk_remainder() - Save hash block tail data in given
+ *	context. The data is taken from the save4next chunk of given client
+ *	buffer.
+ * @ctx_info:	 Client context info structure (HASH's or HMAC's)
+ * @client_dma_buf_p:	A client DMA buffer object. Data is taken from the
+ *			save4next chunk of this buffer.
+ * @append_data:	When true, given data is appended to existing
+ *
+ * Returns 0 on success
+ */
+int ctxmgr_save_hash_blk_remainder(struct client_crypto_ctx_info *ctx_info,
+				   struct client_dma_buffer *client_dma_buf_p,
+				   bool append_data)
+{
+	u16 copy_offset;
+	int rc;
+	struct hash_block_remainder *blk_rem_p = get_blk_rem_buf(ctx_info);
+
+	copy_offset = append_data ? blk_rem_p->size : 0;
+	rc = llimgr_copy_from_client_buf_save4next(client_dma_buf_p,
+						   blk_rem_p->data +
+						   copy_offset,
+						   HASH_BLK_SIZE_MAX -
+						   copy_offset);
+	if (likely(rc >= 0)) {	/* rc is the num. of bytes copied */
+		blk_rem_p->size = copy_offset + rc;
+		pr_debug("Accumalted %u B at offset %u\n",
+			      rc, copy_offset);
+		rc = 0;	/* Caller of this function expects 0 on success */
+	} else {
+		pr_err("Failed copying hash block tail from user\n");
+	}
+	return rc;
+}
+
+/**
+ * ctxmgr_get_hash_blk_remainder_buf() - Get DMA info for hash block remainder
+ *	buffer from given context
+ * @ctx_info:			User context info structure
+ * @hash_blk_tail_dma_p:	Returned tail buffer DMA address
+ *
+ * Note: This function must be invoked only when tail_buf is mapped2dev
+ *	(using ctxmgr_map2dev_hash_tail)
+ * Returns u16 Number of valid bytes/octets in tail buffer
+ */
+u16 ctxmgr_get_hash_blk_remainder_buf(struct client_crypto_ctx_info *
+					   ctx_info,
+					   dma_addr_t *
+					   hash_blk_remainder_dma_p)
+{
+	struct hash_block_remainder *blk_rem_p = get_blk_rem_buf(ctx_info);
+
+	*hash_blk_remainder_dma_p = ctx_info->hash_tail_dma_addr;
+	return blk_rem_p->size;
+}
+
+/**
+ * ctxmgr_get_digest_or_mac() - Get the digest/MAC result when applicable
+ * @ctx_info:		User context info structure
+ * @digest_or_mac:	Pointer to digest/MAC buffer
+ *
+ * Returns Returned digest/MAC size
+ */
+u32 ctxmgr_get_digest_or_mac(struct client_crypto_ctx_info *ctx_info,
+				  u8 *digest_or_mac)
+{
+	u8 *digest_mac_source;
+	u32 digest_or_mac_size;
+
+	digest_or_mac_size = ctxmgr_get_digest_or_mac_ptr(ctx_info,
+							  &digest_mac_source);
+	if (digest_mac_source != NULL)
+		memcpy(digest_or_mac, digest_mac_source, digest_or_mac_size);
+	return digest_or_mac_size;
+}
+
+/**
+ * ctxmgr_get_digest_or_mac_ptr() - Get the digest/MAC pointer in SeP context
+ * @ctx_info:		User context info structure
+ * @digest_or_mac_pp:	Returned pointer to digest/MAC buffer
+ *
+ * Returns Returned digest/MAC size
+ * This function may be used for the caller to reference the result instead
+ * of always copying
+ */
+u32 ctxmgr_get_digest_or_mac_ptr(struct client_crypto_ctx_info *ctx_info,
+				      u8 **digest_or_mac_pp)
+{
+	struct sep_ctx_cache_entry *sep_ctx_p;
+	u32 digest_or_mac_size = 0;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	*digest_or_mac_pp = NULL;	/* default */
+	sep_ctx_p = ctx_info->sep_ctx_kptr;
+	switch (le32_to_cpu(sep_ctx_p->alg)) {
+	case SEP_CRYPTO_ALG_HMAC:
+	/* HMAC context holds the MAC (digest) in the same place as
+	   HASH context */
+	case SEP_CRYPTO_ALG_HASH:{
+		struct sep_ctx_hash *hash_ctx_p =
+		    (struct sep_ctx_hash *)sep_ctx_p;
+		digest_or_mac_size =
+		    get_hash_digest_size(le32_to_cpu(hash_ctx_p->mode));
+		*digest_or_mac_pp = hash_ctx_p->digest;
+		break;
+	}
+	case SEP_CRYPTO_ALG_AES:{
+		struct sep_ctx_cipher *aes_ctx_p =
+		    (struct sep_ctx_cipher *)sep_ctx_p;
+		switch (le32_to_cpu(aes_ctx_p->mode)) {
+		case SEP_CIPHER_CBC_MAC:
+		case SEP_CIPHER_XCBC_MAC:
+		case SEP_CIPHER_CMAC:
+			digest_or_mac_size = SEP_AES_BLOCK_SIZE;
+		/* The AES MACs are returned in the block_state field */
+			*digest_or_mac_pp = aes_ctx_p->block_state;
+			break;
+		default:
+			break;	/* No MAC for others */
+		}
+		break;
+	}
+	case SEP_CRYPTO_ALG_AEAD:{
+		struct sep_ctx_aead *aead_ctx_p =
+		    (struct sep_ctx_aead *)sep_ctx_p;
+
+		if (le32_to_cpu(aead_ctx_p->mode) == SEP_CIPHER_CCM) {
+			digest_or_mac_size =
+			    le32_to_cpu(aead_ctx_p->tag_size);
+			*digest_or_mac_pp = aead_ctx_p->mac_state;
+		} else {
+			pr_err(
+				    "Invalid mode (%d) for SEP_CRYPTO_ALG_AEAD\n",
+				    le32_to_cpu(aead_ctx_p->mode));
+		}
+		break;
+	}
+	default:
+		;		/* No MAC/digest for the other algorithms */
+	}
+
+	return digest_or_mac_size;
+}
+
+static int set_sep_ctx_alg_mode(struct client_crypto_ctx_info *ctx_info,
+				const enum dxdi_sym_cipher_type cipher_type)
+{
+	struct sep_ctx_cipher *aes_ctx_p;
+	struct sep_ctx_cipher *des_ctx_p;
+	struct sep_ctx_c2 *c2_ctx_p;
+
+	if (ctx_info == NULL) {
+		pr_err("Context not mapped\n");
+		return -EINVAL;
+	}
+
+	aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+	des_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+	c2_ctx_p = (struct sep_ctx_c2 *)ctx_info->sep_ctx_kptr;
+
+	switch (cipher_type) {
+	case DXDI_SYMCIPHER_AES_ECB:
+	case DXDI_SYMCIPHER_AES_CBC:
+	case DXDI_SYMCIPHER_AES_CTR:
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_AES);
+		break;
+	case DXDI_SYMCIPHER_DES_ECB:
+	case DXDI_SYMCIPHER_DES_CBC:
+		des_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_DES);
+		break;
+	case DXDI_SYMCIPHER_C2_ECB:
+	case DXDI_SYMCIPHER_C2_CBC:
+		c2_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_C2);
+		break;
+	case DXDI_SYMCIPHER_RC4:
+		return -ENOSYS;	/* Not supported via this API (only MSGs) */
+	default:
+		return -EINVAL;
+	}
+
+	/* mode specific initializations */
+	switch (cipher_type) {
+	case DXDI_SYMCIPHER_AES_ECB:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_ECB);
+		break;
+	case DXDI_SYMCIPHER_AES_CBC:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CBC);
+		break;
+	case DXDI_SYMCIPHER_AES_CTR:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CTR);
+		break;
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_XTS);
+		break;
+	case DXDI_SYMCIPHER_DES_ECB:
+		des_ctx_p->mode = cpu_to_le32(SEP_CIPHER_ECB);
+		break;
+	case DXDI_SYMCIPHER_DES_CBC:
+		des_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CBC);
+		break;
+	case DXDI_SYMCIPHER_C2_ECB:
+		c2_ctx_p->mode = cpu_to_le32(SEP_C2_ECB);
+		break;
+	case DXDI_SYMCIPHER_C2_CBC:
+		c2_ctx_p->mode = cpu_to_le32(SEP_C2_CBC);
+		break;
+	case DXDI_SYMCIPHER_RC4:
+		return -ENOSYS;	/* Not supported via this API (only RPC) */
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_set_symcipher_iv_user() - Set IV of symcipher context given in
+ *					user space pointer
+ * @user_ctx_ptr:	 A user space pointer to the host context
+ * @iv_ptr:	 The IV to set
+ *
+ * Returns int
+ */
+int ctxmgr_set_symcipher_iv_user(u32 __user *user_ctx_ptr,
+				 u8 *iv_ptr)
+{
+	struct host_crypto_ctx_sym_cipher __user *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher __user *)user_ctx_ptr;
+	struct sep_ctx_cipher *aes_ctx_p =
+	    (struct sep_ctx_cipher *)&host_ctx_p->sep_ctx;
+	enum dxdi_sym_cipher_type cipher_type;
+
+	/* Copy cypher type from user context */
+	if (copy_from_user(&cipher_type, &host_ctx_p->props.cipher_type,
+			   sizeof(enum dxdi_sym_cipher_type))) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	if ((cipher_type != DXDI_SYMCIPHER_AES_CBC) &&
+	    (cipher_type != DXDI_SYMCIPHER_AES_CTR) &&
+	    (cipher_type != DXDI_SYMCIPHER_AES_XTS)) {
+		return -EINVAL;
+	}
+
+	if (copy_to_user(aes_ctx_p->block_state, iv_ptr, SEP_AES_IV_SIZE) ||
+	    copy_to_user(host_ctx_p->props.alg_specific.aes_cbc.iv, iv_ptr,
+		     SEP_AES_IV_SIZE)) {
+		pr_err("Failed writing input parameters");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_get_symcipher_iv_user() - Read current "IV"
+ * @user_ctx_ptr:	 A user space pointer to the host context
+ * @iv_ptr:	 Where to return the read IV
+ *
+ * Read current "IV" (block state - not the actual set IV during initialization)
+ * This function works directly over a user space context
+ * Returns int
+ */
+int ctxmgr_get_symcipher_iv_user(u32 __user *user_ctx_ptr,
+				 u8 *iv_ptr)
+{
+	struct host_crypto_ctx_sym_cipher __user *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher __user *)user_ctx_ptr;
+	struct sep_ctx_cipher *aes_ctx_p =
+	    (struct sep_ctx_cipher *)&host_ctx_p->sep_ctx;
+	enum dxdi_sym_cipher_type cipher_type;
+
+	/* Copy cypher type from user context */
+	if (copy_from_user(&cipher_type, &host_ctx_p->props.cipher_type,
+			   sizeof(enum dxdi_sym_cipher_type))) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	if ((cipher_type != DXDI_SYMCIPHER_AES_CBC) &&
+	    (cipher_type != DXDI_SYMCIPHER_AES_CTR) &&
+	    (cipher_type != DXDI_SYMCIPHER_AES_XTS)) {
+		return -EINVAL;
+	}
+
+	if (copy_from_user(iv_ptr, aes_ctx_p->block_state, SEP_AES_IV_SIZE)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * get_symcipher_iv_info() - Return the IV location in given context
+ *				(based on symcipher type)
+ * @ctx_info:
+ * @host_ctx_iv:	Returned IV pointer in host props field
+ * @sep_ctx_iv:		Returned IV state block in sep context
+ * @iv_size:	Size of IV in bytes (0 if IV is not applicable for this alg.)
+ *
+ */
+static void get_symcipher_iv_info(struct client_crypto_ctx_info *ctx_info,
+				  u8 **host_ctx_iv, u8 **sep_ctx_iv,
+				  unsigned long *iv_size)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr;
+
+	switch (host_ctx_p->props.cipher_type) {
+	case DXDI_SYMCIPHER_AES_CBC:
+	case DXDI_SYMCIPHER_AES_CTR:
+	case DXDI_SYMCIPHER_AES_XTS:
+		*sep_ctx_iv = ((struct sep_ctx_cipher *)
+			       ctx_info->sep_ctx_kptr)->block_state;
+		*iv_size = SEP_AES_IV_SIZE;
+		break;
+	case DXDI_SYMCIPHER_DES_CBC:
+		*sep_ctx_iv = ((struct sep_ctx_cipher *)
+			       ctx_info->sep_ctx_kptr)->block_state;
+		*iv_size = SEP_DES_IV_SIZE;
+		break;
+	default:
+		*sep_ctx_iv = NULL;
+		*iv_size = 0;
+	}
+
+	switch (host_ctx_p->props.cipher_type) {
+	case DXDI_SYMCIPHER_AES_CBC:
+		*host_ctx_iv = host_ctx_p->props.alg_specific.aes_cbc.iv;
+		break;
+	case DXDI_SYMCIPHER_AES_CTR:
+		*host_ctx_iv = host_ctx_p->props.alg_specific.aes_ctr.cntr;
+		break;
+	case DXDI_SYMCIPHER_AES_XTS:
+		*host_ctx_iv =
+		    host_ctx_p->props.alg_specific.aes_xts.init_tweak;
+		break;
+	case DXDI_SYMCIPHER_DES_CBC:
+		*host_ctx_iv = host_ctx_p->props.alg_specific.des_cbc.iv;
+		break;
+	default:
+		*host_ctx_iv = NULL;
+	}
+
+}
+
+/**
+ * ctxmgr_set_symcipher_iv() - Set IV for given block symcipher algorithm
+ * @ctx_info:	Context to update
+ * @iv:		New IV
+ *
+ * Returns int 0 if changed IV, -EINVAL for error
+ * (given cipher type does not have IV)
+ */
+int ctxmgr_set_symcipher_iv(struct client_crypto_ctx_info *ctx_info,
+			    u8 *iv)
+{
+	u8 *host_ctx_iv;
+	u8 *sep_ctx_iv;
+	unsigned long iv_size = 0;
+
+	get_symcipher_iv_info(ctx_info, &host_ctx_iv, &sep_ctx_iv, &iv_size);
+	if (iv_size > 0 && host_ctx_iv != NULL && sep_ctx_iv != NULL) {
+		memcpy(sep_ctx_iv, iv, iv_size);
+		memcpy(host_ctx_iv, iv, iv_size);
+	}
+
+	return (iv_size > 0) ? 0 : -EINVAL;
+}
+
+/**
+ * ctxmgr_get_symcipher_iv() - Return given cipher IV
+ * @ctx_info:	Context to query
+ * @iv_user:	The IV given by the user on last ctxmgr_set_symcipher_iv
+ * @iv_current:	The current IV state block
+ * @iv_size_p:	[I/O] The given buffers size and returns actual IV size
+ *
+ * Return given cipher IV - Original IV given by user and current state "IV"
+ * The given IV buffers must be large enough to accomodate the IVs
+ * Returns int 0 on success, -ENOMEM if given iv_size is too small
+ */
+int ctxmgr_get_symcipher_iv(struct client_crypto_ctx_info *ctx_info,
+			    u8 *iv_user, u8 *iv_current,
+			    u8 *iv_size_p)
+{
+	u8 *host_ctx_iv;
+	u8 *sep_ctx_iv;
+	unsigned long iv_size;
+	int rc = 0;
+
+	get_symcipher_iv_info(ctx_info, &host_ctx_iv, &sep_ctx_iv, &iv_size);
+	if (iv_size > 0) {
+		if (*iv_size_p < iv_size) {
+			rc = -ENOMEM;
+		} else {
+			if (iv_current != NULL && sep_ctx_iv != NULL)
+				memcpy(iv_current, sep_ctx_iv, iv_size);
+			if (iv_user != NULL && host_ctx_iv != NULL)
+				memcpy(iv_user, host_ctx_iv, iv_size);
+		}
+	}
+
+	/* Always return IV size for informational purposes */
+	*iv_size_p = iv_size;
+	return rc;
+}
+
+/**
+ * ctxmgr_set_symcipher_direction() - Set the operation direction for given
+ *					symcipher context
+ * @ctx_info:		Context to update
+ * @dxdi_direction:	Requested cipher direction
+ *
+ * Returns int 0 on success
+ */
+int ctxmgr_set_symcipher_direction(struct client_crypto_ctx_info *ctx_info,
+				   enum dxdi_cipher_direction dxdi_direction)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr;
+	enum sep_crypto_direction sep_direction;
+	struct sep_ctx_cipher *aes_ctx_p;
+	struct sep_ctx_cipher *des_ctx_p;
+	struct sep_ctx_c2 *c2_ctx_p;
+
+	/* Translate direction from driver ABI to SeP ABI */
+	if (dxdi_direction == DXDI_CDIR_ENC) {
+		sep_direction = SEP_CRYPTO_DIRECTION_ENCRYPT;
+	} else if (dxdi_direction == DXDI_CDIR_DEC) {
+		sep_direction = SEP_CRYPTO_DIRECTION_DECRYPT;
+	} else {
+		pr_err("Invalid direction=%d\n", dxdi_direction);
+		return -EINVAL;
+	}
+
+	switch (host_ctx_p->props.cipher_type) {
+	case DXDI_SYMCIPHER_AES_ECB:
+	case DXDI_SYMCIPHER_AES_CBC:
+	case DXDI_SYMCIPHER_AES_CTR:
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		aes_ctx_p->direction = cpu_to_le32(sep_direction);
+		break;
+
+	case DXDI_SYMCIPHER_DES_ECB:
+	case DXDI_SYMCIPHER_DES_CBC:
+		des_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		des_ctx_p->direction = cpu_to_le32(sep_direction);
+		break;
+
+	case DXDI_SYMCIPHER_C2_ECB:
+	case DXDI_SYMCIPHER_C2_CBC:
+		c2_ctx_p = (struct sep_ctx_c2 *)ctx_info->sep_ctx_kptr;
+		c2_ctx_p->direction = cpu_to_le32(sep_direction);
+		break;
+
+	case DXDI_SYMCIPHER_RC4:
+		pr_err("Invoked for RC4!\n");
+		return -ENOSYS;	/* Not supported via this API (only RPC) */
+	default:
+		pr_err("Invalid symcipher type %d\n",
+			    host_ctx_p->props.cipher_type);
+		return -EINVAL;
+	}
+
+	host_ctx_p->props.direction = dxdi_direction;
+
+	return 0;
+}
+
+/**
+ * ctxmgr_get_symcipher_direction() - Return the operation direction of given
+ *					symcipher context
+ * @ctx_info:	Context to query
+ *
+ * Returns enum dxdi_cipher_direction (<0 on error)
+ */
+enum dxdi_cipher_direction ctxmgr_get_symcipher_direction(struct
+							  client_crypto_ctx_info
+							  *ctx_info)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr;
+	enum sep_crypto_direction sep_direction;
+	enum dxdi_cipher_direction dxdi_direction;
+	struct sep_ctx_cipher *aes_ctx_p;
+	struct sep_ctx_cipher *des_ctx_p;
+	struct sep_ctx_c2 *c2_ctx_p;
+
+	switch (host_ctx_p->props.cipher_type) {
+	case DXDI_SYMCIPHER_AES_ECB:
+	case DXDI_SYMCIPHER_AES_CBC:
+	case DXDI_SYMCIPHER_AES_CTR:
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		sep_direction = le32_to_cpu(aes_ctx_p->direction);
+		break;
+
+	case DXDI_SYMCIPHER_DES_ECB:
+	case DXDI_SYMCIPHER_DES_CBC:
+		des_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		sep_direction = le32_to_cpu(des_ctx_p->direction);
+		break;
+
+	case DXDI_SYMCIPHER_C2_ECB:
+	case DXDI_SYMCIPHER_C2_CBC:
+		c2_ctx_p = (struct sep_ctx_c2 *)ctx_info->sep_ctx_kptr;
+		sep_direction = le32_to_cpu(c2_ctx_p->direction);
+		break;
+
+	case DXDI_SYMCIPHER_RC4:
+		pr_err("Invoked for RC4!\n");
+		return -ENOSYS;	/* Not supported via this API (only RPC) */
+	default:
+		pr_err("Invalid symcipher type %d\n",
+			    host_ctx_p->props.cipher_type);
+		return -EINVAL;
+	}
+
+	/* Translate direction from driver ABI to SeP ABI */
+	if (sep_direction == SEP_CRYPTO_DIRECTION_ENCRYPT) {
+		dxdi_direction = DXDI_CDIR_ENC;
+	} else if (sep_direction == SEP_CRYPTO_DIRECTION_DECRYPT) {
+		dxdi_direction = DXDI_CDIR_DEC;
+	} else {
+		pr_err("Invalid (sep) direction=%d\n", sep_direction);
+		dxdi_direction = -EINVAL;
+	}
+
+	return dxdi_direction;
+}
+
+/*
+ * RFC2451: Weak DES key check
+ * Returns: 1 (weak), 0 (not weak)
+ */
+/**
+ * is_weak_des_key() - Validate DES weak keys per RFC2451 (section 2.3)
+ * @key:
+ *
+ * @keylen: 8 or 24 bytes
+ *
+ *
+ * Validate DES weak keys per RFC2451 (section 2.3)
+ * (weak key validation based on DX_CRYPTO_DES.c of cc52_crypto)
+ * Returns bool "true" for weak keys
+ */
+static bool is_weak_des_key(const u8 *key, unsigned int keylen)
+{
+	u32 n, w;
+	u64 *k1, *k2, *k3;	/* For 3DES/2DES checks */
+
+	if (keylen > 8) {/* For 3DES/2DES only validate no key repeatition */
+		k1 = (u64 *)key;
+		k2 = k1 + 1;
+		if (*k1 == *k2)
+			return true;
+		if (keylen > 16) {	/* 3DES */
+			k3 = k2 + 1;
+			if (*k2 == *k3)
+				return true;
+		}
+	}
+
+	/* Only for single-DES, check weak keys */
+	n = des_key_parity[key[0]];
+	n <<= 4;
+	n |= des_key_parity[key[1]];
+	n <<= 4;
+	n |= des_key_parity[key[2]];
+	n <<= 4;
+	n |= des_key_parity[key[3]];
+	n <<= 4;
+	n |= des_key_parity[key[4]];
+	n <<= 4;
+	n |= des_key_parity[key[5]];
+	n <<= 4;
+	n |= des_key_parity[key[6]];
+	n <<= 4;
+	n |= des_key_parity[key[7]];
+	w = 0x88888888L;
+
+	/* 1 in 10^10 keys passes this test */
+	if (!((n - (w >> 3)) & w)) {
+		switch (n) {
+		case 0x11111111:
+		case 0x13131212:
+		case 0x14141515:
+		case 0x16161616:
+		case 0x31312121:
+		case 0x33332222:
+		case 0x34342525:
+		case 0x36362626:
+		case 0x41415151:
+		case 0x43435252:
+		case 0x44445555:
+		case 0x46465656:
+		case 0x61616161:
+		case 0x63636262:
+		case 0x64646565:
+		case 0x66666666:
+			return true;
+		}
+	}
+	return false;
+}
+
+/**
+ * ctxmgr_set_symcipher_key() - Set a symcipher context key
+ * @ctx_info:	Context to update
+ * @key_size:	Size of key in bytes
+ * @key:	New key pointer
+ *
+ * Set a symcipher context key
+ * After invoking this function the context should be reinitialized by SeP
+ * (set its state to "partial init" if not done in this sequence)
+ * Returns int 0 on success, -EINVAL Invalid key len, -EPERM Forbidden/weak key
+ */
+int ctxmgr_set_symcipher_key(struct client_crypto_ctx_info *ctx_info,
+			     u8 key_size, const u8 *key)
+{
+	struct sep_ctx_cipher *aes_ctx_p = NULL;
+	struct sep_ctx_cipher *des_ctx_p = NULL;
+	struct sep_ctx_c2 *c2_ctx_p = NULL;
+	struct dxdi_sym_cipher_props *props =
+	    &((struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr)->props;
+
+	/* Update the respective sep context fields if valid */
+	switch (props->cipher_type) {
+	case DXDI_SYMCIPHER_AES_ECB:
+	case DXDI_SYMCIPHER_AES_CBC:
+	case DXDI_SYMCIPHER_AES_CTR:
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		/* Validate key size before copying */
+		if (props->cipher_type == DXDI_SYMCIPHER_AES_XTS) {
+			/* XTS has two keys of either 128b or 256b */
+			if ((key_size != 32) && (key_size != 64)) {
+				pr_err(
+					"Invalid key size for AES-XTS (%u bits)\n",
+					key_size * 8);
+				return -EINVAL;
+			}
+			/* Divide by two (we have two keys of the same size) */
+			key_size >>= 1;
+			/* copy second half of the double-key as XEX-key */
+			memcpy(aes_ctx_p->xex_key, key + key_size, key_size);
+			/* Always clear data_unit_size on key change
+			   (Assumes new data source with possibly different
+			   data unit size). The actual data unit size
+			   would be concluded on the next data processing. */
+			props->alg_specific.aes_xts.data_unit_size = 0;
+			aes_ctx_p->data_unit_size = cpu_to_le32(0);
+		} else {	/* AES engine support 128b/192b/256b keys */
+			if ((key_size != 16) &&
+			    (key_size != 24) && (key_size != 32)) {
+				pr_err(
+					"Invalid key size for AES (%u bits)\n",
+					key_size * 8);
+				return -EINVAL;
+			}
+		}
+		memcpy(aes_ctx_p->key, key, key_size);
+		aes_ctx_p->key_size = cpu_to_le32(key_size);
+		break;
+
+	case DXDI_SYMCIPHER_DES_ECB:
+	case DXDI_SYMCIPHER_DES_CBC:
+		if (is_weak_des_key(key, key_size)) {
+			pr_info("Weak DES key.\n");
+			return -EPERM;
+		}
+		des_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		des_ctx_p->key_size = cpu_to_le32(key_size);
+		if ((key_size != 8) && (key_size != 16) && (key_size != 24)) {
+			/* Avoid copying a key too large */
+			pr_err("Invalid key size for DES (%u bits)\n",
+				    key_size * 8);
+			return -EINVAL;
+		}
+		memcpy(des_ctx_p->key, key, key_size);
+		break;
+
+	case DXDI_SYMCIPHER_C2_ECB:
+	case DXDI_SYMCIPHER_C2_CBC:
+		c2_ctx_p = (struct sep_ctx_c2 *)ctx_info->sep_ctx_kptr;
+		c2_ctx_p->key_size = cpu_to_le32(key_size);
+		if (key_size != SEP_C2_KEY_SIZE_MAX) {
+			/* Avoid copying a key too large */
+			pr_err("Invalid key size for C2 (%u bits)\n",
+				    key_size * 8);
+			return -EINVAL;
+		}
+		memcpy(c2_ctx_p->key, key, key_size);
+		break;
+
+	case DXDI_SYMCIPHER_RC4:
+		return -ENOSYS;	/* Not supported via this API (only MSGs) */
+	default:
+		return -EINVAL;
+	}
+
+	/* If reached here then all validations passed */
+	/* Update in props of host context */
+	memcpy(props->key, key, key_size);
+	props->key_size = key_size;
+
+	return 0;
+}
+
+/**
+ * ctxmgr_init_symcipher_ctx_no_props() - Initialize symcipher context when full
+ *					props are not available, yet.
+ * @ctx_info:		Context to init.
+ * @cipher_type:	Cipher type for context
+ *
+ * Initialize symcipher context when full props are not available, yet.
+ * Later set_key and set_iv may update the context.
+ * Returns int 0 on success
+ */
+int ctxmgr_init_symcipher_ctx_no_props(struct client_crypto_ctx_info *ctx_info,
+				       enum dxdi_sym_cipher_type cipher_type)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr;
+
+	/* Initialize host context part with just cipher type */
+	host_ctx_p->alg_class = ALG_CLASS_SYM_CIPHER;
+	memset(&(host_ctx_p->props), 0, sizeof(struct dxdi_sym_cipher_props));
+	host_ctx_p->props.cipher_type = cipher_type;
+	host_ctx_p->is_encrypted = false;
+
+	/* Clear SeP context before setting specific fields */
+	/* This helps in case SeP-FW assumes zero on uninitialized fields */
+	memset(ctx_info->sep_ctx_kptr, 0, sizeof(struct sep_ctx_cache_entry));
+
+	/* with only cipher_type we can initialize just the alg/mode fields */
+	return set_sep_ctx_alg_mode(ctx_info, cipher_type);
+}
+
+/**
+ * ctxmgr_init_symcipher_ctx() - Initialize symCipher context based on given
+ *				properties.
+ * @ctx_info:	 User context mapping info.
+ * @props:	 The initialization properties
+ * @postpone_init:	Return "true" if INIT on SeP should be postponed
+ *			to first processing (e.g, in AES-XTS)
+ * @error_info:	Error info
+ *
+ * Returns 0 on success, otherwise on error
+ */
+int ctxmgr_init_symcipher_ctx(struct client_crypto_ctx_info *ctx_info,
+			      struct dxdi_sym_cipher_props *props,
+			      bool *postpone_init, u32 *error_info)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr;
+	struct sep_ctx_cipher *aes_ctx_p = NULL;
+	struct sep_ctx_cipher *des_ctx_p = NULL;
+	int rc;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	*postpone_init = false;	/* default */
+	*error_info = DXDI_ERROR_NULL;	/* assume no error */
+
+	/* Initialize host context part */
+	host_ctx_p->alg_class = ALG_CLASS_SYM_CIPHER;
+	memcpy(&(host_ctx_p->props), props,
+	       sizeof(struct dxdi_sym_cipher_props));
+	host_ctx_p->is_encrypted = false;
+
+	/* Clear SeP context before setting specific fields */
+	/* This helps in case SeP-FW assumes zero on uninitialized fields */
+	memset(&(host_ctx_p->sep_ctx), 0, sizeof(host_ctx_p->sep_ctx));
+
+	rc = set_sep_ctx_alg_mode(ctx_info, props->cipher_type);
+	if (rc != 0) {
+		*error_info = DXDI_ERROR_INVAL_MODE;
+		return rc;
+	}
+
+	rc = ctxmgr_set_symcipher_direction(ctx_info, props->direction);
+	if (rc != 0) {
+		*error_info = DXDI_ERROR_INVAL_DIRECTION;
+		return rc;
+	}
+
+	rc = ctxmgr_set_symcipher_key(ctx_info, props->key_size, props->key);
+	if (rc != 0) {
+		*error_info = DXDI_ERROR_INVAL_KEY_SIZE;
+		return rc;
+	}
+
+	/* mode specific initializations */
+	switch (props->cipher_type) {
+	case DXDI_SYMCIPHER_AES_CBC:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		memcpy(aes_ctx_p->block_state, props->alg_specific.aes_cbc.iv,
+		       SEP_AES_IV_SIZE);
+		break;
+	case DXDI_SYMCIPHER_AES_CTR:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		memcpy(aes_ctx_p->block_state, props->alg_specific.aes_ctr.cntr,
+		       SEP_AES_IV_SIZE);
+		break;
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		memcpy(aes_ctx_p->block_state,
+		       props->alg_specific.aes_xts.init_tweak, SEP_AES_IV_SIZE);
+		aes_ctx_p->data_unit_size =
+		    cpu_to_le32(props->alg_specific.aes_xts.data_unit_size);
+		/* update in context because was cleared by
+		   ctxmgr_set_symcipher_key */
+		host_ctx_p->props.alg_specific.aes_xts.data_unit_size =
+		    props->alg_specific.aes_xts.data_unit_size;
+		if (props->alg_specific.aes_xts.data_unit_size == 0)
+			*postpone_init = true;
+		break;
+	case DXDI_SYMCIPHER_DES_CBC:
+		des_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		memcpy(des_ctx_p->block_state, props->alg_specific.des_cbc.iv,
+		       SEP_DES_IV_SIZE);
+		break;
+	case DXDI_SYMCIPHER_C2_CBC:
+		/*C2 reset interval is not supported, yet */
+		*error_info = DXDI_ERROR_UNSUP;
+		return -ENOSYS;
+	case DXDI_SYMCIPHER_RC4:
+		*error_info = DXDI_ERROR_UNSUP;
+		return -ENOSYS;	/* Not supported via this API (only MSGs) */
+	default:
+		break;	/* No specific initializations for other modes */
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_init_auth_enc_ctx() - Initialize Authenticated Encryption class
+ *				context
+ * @ctx_info:
+ * @props:
+ * @error_info:	Error info
+ *
+ * Returns int
+ */
+int ctxmgr_init_auth_enc_ctx(struct client_crypto_ctx_info *ctx_info,
+			     struct dxdi_auth_enc_props *props,
+			     u32 *error_info)
+{
+	struct host_crypto_ctx_auth_enc *host_ctx_p =
+	    (struct host_crypto_ctx_auth_enc *)ctx_info->ctx_kptr;
+	struct sep_ctx_aead *aead_ctx_p = NULL;
+	enum sep_crypto_direction direction;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	*error_info = DXDI_ERROR_NULL;
+
+	/* Initialize host context part */
+	host_ctx_p->alg_class = ALG_CLASS_AUTH_ENC;
+	host_ctx_p->is_adata_processed = 0;
+	memcpy(&(host_ctx_p->props), props, sizeof(struct dxdi_auth_enc_props));
+	host_ctx_p->is_encrypted = false;
+
+	/* Clear SeP context before setting specific fields */
+	/* This helps in case SeP-FW assumes zero on uninitialized fields */
+	memset(&(host_ctx_p->sep_ctx), 0, sizeof(host_ctx_p->sep_ctx));
+
+	/* Translate direction from driver ABI to SeP ABI */
+	if (props->direction == DXDI_CDIR_ENC) {
+		direction = SEP_CRYPTO_DIRECTION_ENCRYPT;
+	} else if (props->direction == DXDI_CDIR_DEC) {
+		direction = SEP_CRYPTO_DIRECTION_DECRYPT;
+	} else {
+		pr_err("Invalid direction=%d\n", props->direction);
+		*error_info = DXDI_ERROR_INVAL_DIRECTION;
+		return -EINVAL;
+	}
+
+	/* initialize SEP context */
+	aead_ctx_p = (struct sep_ctx_aead *)&(host_ctx_p->sep_ctx);
+	aead_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_AEAD);
+	aead_ctx_p->direction = cpu_to_le32(direction);
+	aead_ctx_p->header_size = cpu_to_le32(props->adata_size);
+	if (props->nonce_size > SEP_AES_BLOCK_SIZE) {
+		pr_err("Invalid nonce size=%u\n", aead_ctx_p->nonce_size);
+		*error_info = DXDI_ERROR_INVAL_NONCE_SIZE;
+		return -EINVAL;
+	}
+	aead_ctx_p->nonce_size = cpu_to_le32(props->nonce_size);
+	memcpy(aead_ctx_p->nonce, props->nonce, props->nonce_size);
+	if (props->tag_size > SEP_AES_BLOCK_SIZE) {
+		pr_err("Invalid tag_size size=%u\n", aead_ctx_p->tag_size);
+		*error_info = DXDI_ERROR_INVAL_TAG_SIZE;
+		return -EINVAL;
+	}
+	aead_ctx_p->tag_size = cpu_to_le32(props->tag_size);
+	aead_ctx_p->text_size = cpu_to_le32(props->text_size);
+	if ((props->key_size != 16) &&
+	    (props->key_size != 24) && (props->key_size != 32)) {
+		pr_err("Invalid key size for AEAD (%u bits)\n",
+			    props->key_size * 8);
+		*error_info = DXDI_ERROR_INVAL_KEY_SIZE;
+		return -EINVAL;
+	}
+	memcpy(aead_ctx_p->key, props->key, props->key_size);
+	aead_ctx_p->key_size = cpu_to_le32(props->key_size);
+
+	/* mode specific initializations */
+	switch (props->ae_type) {
+	case DXDI_AUTHENC_AES_CCM:
+		aead_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CCM);
+		break;
+	case DXDI_AUTHENC_AES_GCM:
+		*error_info = DXDI_ERROR_UNSUP;
+		return -ENOSYS;	/* Not supported */
+	default:
+		*error_info = DXDI_ERROR_UNSUP;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_init_mac_ctx() - Initialize context for MAC algorithm
+ * @ctx_info:
+ * @props:
+ * @error_info:	Error info
+ *
+ * Returns int
+ */
+int ctxmgr_init_mac_ctx(struct client_crypto_ctx_info *ctx_info,
+			struct dxdi_mac_props *props, u32 *error_info)
+{
+	struct host_crypto_ctx_mac *host_ctx_p =
+	    (struct host_crypto_ctx_mac *)ctx_info->ctx_kptr;
+	struct sep_ctx_cipher *aes_ctx_p = NULL;
+	struct sep_ctx_hmac *hmac_ctx_p = NULL;
+	enum dxdi_hash_type hash_type;
+	enum sep_hash_mode hash_mode;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	*error_info = DXDI_ERROR_NULL;
+
+	/* Initialize host context part */
+	host_ctx_p->alg_class = ALG_CLASS_MAC;
+	host_ctx_p->client_data_size = 0;
+	memcpy(&(host_ctx_p->props), props, sizeof(struct dxdi_mac_props));
+	host_ctx_p->hmac_tail.size = 0;
+	host_ctx_p->is_encrypted = false;
+
+	/* Clear SeP context before setting specific fields */
+	/* This helps in case SeP-FW assumes zero on uninitialized fields */
+	memset(&(host_ctx_p->sep_ctx), 0, sizeof(host_ctx_p->sep_ctx));
+
+	switch (props->mac_type) {
+	case DXDI_MAC_HMAC:
+		hmac_ctx_p = (struct sep_ctx_hmac *)&(host_ctx_p->sep_ctx);
+		hmac_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_HMAC);
+		hash_type = props->alg_specific.hmac.hash_type;
+		hash_mode = get_sep_hash_mode(hash_type);
+		if (hash_mode == SEP_HASH_NULL) {
+			*error_info = DXDI_ERROR_INVAL_MODE;
+			return -EINVAL;
+		}
+		if (get_hash_block_size(hash_type) > SEP_HMAC_BLOCK_SIZE_MAX) {
+			pr_err(
+				    "Given hash type (%d) is not supported for HMAC\n",
+				    hash_type);
+			*error_info = DXDI_ERROR_UNSUP;
+			return -EINVAL;
+		}
+		hmac_ctx_p->mode = cpu_to_le32(hash_mode);
+		hmac_ctx_p->k0_size = cpu_to_le32(props->key_size);
+		if (props->key_size > SEP_HMAC_BLOCK_SIZE_MAX) {
+			pr_err("Invalid key size %u bits\n",
+				    props->key_size * 8);
+			*error_info = DXDI_ERROR_INVAL_KEY_SIZE;
+			return -EINVAL;
+		}
+		memcpy(&(hmac_ctx_p->k0), props->key, props->key_size);
+		break;
+
+	case DXDI_MAC_AES_MAC:
+	case DXDI_MAC_AES_CMAC:
+	case DXDI_MAC_AES_XCBC_MAC:
+		aes_ctx_p = (struct sep_ctx_cipher *)&(host_ctx_p->sep_ctx);
+		aes_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_AES);
+		aes_ctx_p->direction =
+		    cpu_to_le32(SEP_CRYPTO_DIRECTION_ENCRYPT);
+		aes_ctx_p->key_size = cpu_to_le32(props->key_size);
+		if ((props->key_size > SEP_AES_KEY_SIZE_MAX) ||
+		    ((props->mac_type == DXDI_MAC_AES_XCBC_MAC) &&
+		     (props->key_size != SEP_AES_128_BIT_KEY_SIZE))) {
+			/* Avoid copying a key too large */
+			pr_err("Invalid key size for MAC (%u bits)\n",
+				    props->key_size * 8);
+			*error_info = DXDI_ERROR_INVAL_KEY_SIZE;
+			return -EINVAL;
+		}
+		memcpy(aes_ctx_p->key, props->key, props->key_size);
+		break;
+
+	default:
+		*error_info = DXDI_ERROR_UNSUP;
+		return -EINVAL;
+	}
+
+	/* AES mode specific initializations */
+	switch (props->mac_type) {
+	case DXDI_MAC_AES_MAC:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CBC_MAC);
+		memcpy(aes_ctx_p->block_state, props->alg_specific.aes_mac.iv,
+		       SEP_AES_IV_SIZE);
+		break;
+	case DXDI_MAC_AES_CMAC:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CMAC);
+		break;
+	case DXDI_MAC_AES_XCBC_MAC:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_XCBC_MAC);
+		break;
+	default:
+		/* Invalid type was already handled in previous "switch" */
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_init_hash_ctx() - Initialize hash context
+ * @ctx_info:	 User context mapping info.
+ * @hash_type:	 Assigned hash type
+ * @error_info:	Error info
+ *
+ * Returns int 0 on success, -EINVAL, -ENOSYS
+ */
+int ctxmgr_init_hash_ctx(struct client_crypto_ctx_info *ctx_info,
+			 enum dxdi_hash_type hash_type, u32 *error_info)
+{
+	struct host_crypto_ctx_hash *host_ctx_p =
+	    (struct host_crypto_ctx_hash *)ctx_info->ctx_kptr;
+	struct sep_ctx_hash *sep_ctx_p;
+	enum sep_hash_mode hash_mode;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	*error_info = DXDI_ERROR_NULL;
+
+	/* Limit to hash types supported by SeP */
+	if ((hash_type != DXDI_HASH_SHA1) &&
+	    (hash_type != DXDI_HASH_SHA224) &&
+	    (hash_type != DXDI_HASH_SHA256)) {
+		pr_err("Unsupported hash type %d\n", hash_type);
+		*error_info = DXDI_ERROR_UNSUP;
+		return -ENOSYS;
+	}
+
+	/* Initialize host context part */
+	host_ctx_p->alg_class = ALG_CLASS_HASH;
+	host_ctx_p->hash_type = hash_type;
+	host_ctx_p->hash_tail.size = 0;
+	host_ctx_p->is_encrypted = false;
+
+	/* Initialize SeP/FW context part */
+	sep_ctx_p = (struct sep_ctx_hash *)&(host_ctx_p->sep_ctx);
+	/* Clear SeP context before setting specific fields */
+	/* This helps in case SeP-FW assumes zero on uninitialized fields */
+	memset(sep_ctx_p, 0, sizeof(struct sep_ctx_hash));
+	sep_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_HASH);
+	hash_mode = get_sep_hash_mode(hash_type);
+	if (hash_mode == SEP_HASH_NULL) {
+		*error_info = DXDI_ERROR_INVAL_MODE;
+		return -EINVAL;
+	}
+	sep_ctx_p->mode = cpu_to_le32(hash_mode);
+
+	return 0;
+}
+
+/**
+ * ctxmgr_set_sep_cache_idx() - Set the index of this context in the sep_cache
+ * @ctx_info:	 User context info structure
+ * @sep_cache_idx:	 The allocated index in SeP cache for this context
+ *
+ * Returns void
+ */
+void ctxmgr_set_sep_cache_idx(struct client_crypto_ctx_info *ctx_info,
+			      int sep_cache_idx)
+{
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	ctx_info->sep_cache_idx = sep_cache_idx;
+}
+
+/**
+ * ctxmgr_get_sep_cache_idx() - Get the index of this context in the sep_cache
+ * @ctx_info:	 User context info structure
+ *
+ * Returns The allocated index in SeP cache for this context
+ */
+int ctxmgr_get_sep_cache_idx(struct client_crypto_ctx_info *ctx_info)
+{
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	return ctx_info->sep_cache_idx;
+}
+
+#ifdef DEBUG
+static void dump_sep_aes_ctx(struct sep_ctx_cipher *ctx_p)
+{
+	pr_debug("Alg.=AES , Mode=%d , Direction=%d , Key size=%d\n",
+		      le32_to_cpu(ctx_p->mode),
+		      le32_to_cpu(ctx_p->direction),
+		      le32_to_cpu(ctx_p->key_size));
+	dump_byte_array("Key", ctx_p->key, le32_to_cpu(ctx_p->key_size));
+	dump_byte_array("block_state",
+			ctx_p->block_state, sizeof(ctx_p->block_state));
+	if (le32_to_cpu(ctx_p->mode) == SEP_CIPHER_XTS) {
+		pr_debug("data_unit_size=%u\n",
+			      le32_to_cpu(ctx_p->data_unit_size));
+		dump_byte_array("XEX-Key",
+				ctx_p->xex_key, le32_to_cpu(ctx_p->key_size));
+	}
+}
+
+static void dump_sep_aead_ctx(struct sep_ctx_aead *ctx_p)
+{
+	pr_debug(
+		      "Alg.=AEAD, Mode=%d, Direction=%d, Key size=%d, header size=%d, nonce size=%d, tag size=%d, text size=%d\n",
+		      le32_to_cpu(ctx_p->mode),
+		      le32_to_cpu(ctx_p->direction),
+		      le32_to_cpu(ctx_p->key_size),
+		      le32_to_cpu(ctx_p->header_size),
+		      le32_to_cpu(ctx_p->nonce_size),
+		      le32_to_cpu(ctx_p->tag_size),
+		      le32_to_cpu(ctx_p->text_size));
+	dump_byte_array("Key", ctx_p->key, le32_to_cpu(ctx_p->key_size));
+	dump_byte_array("block_state",
+			ctx_p->block_state, sizeof(ctx_p->block_state));
+	dump_byte_array("mac_state",
+			ctx_p->mac_state, sizeof(ctx_p->mac_state));
+	dump_byte_array("nonce", ctx_p->nonce, le32_to_cpu(ctx_p->nonce_size));
+}
+
+static void dump_sep_des_ctx(struct sep_ctx_cipher *ctx_p)
+{
+	pr_debug("Alg.=DES, Mode=%d, Direction=%d, Key size=%d\n",
+		      le32_to_cpu(ctx_p->mode),
+		      le32_to_cpu(ctx_p->direction),
+		      le32_to_cpu(ctx_p->key_size));
+	dump_byte_array("Key", ctx_p->key, le32_to_cpu(ctx_p->key_size));
+	dump_byte_array("IV", ctx_p->block_state, SEP_DES_IV_SIZE);
+}
+
+static void dump_sep_c2_ctx(struct sep_ctx_c2 *ctx_p)
+{
+	pr_debug("Alg.=C2, Mode=%d, Direction=%d, KeySz=%d, ResetInt.=%d",
+		      le32_to_cpu(ctx_p->mode),
+		      le32_to_cpu(ctx_p->direction),
+		      le32_to_cpu(ctx_p->key_size),
+		      0 /* reset_interval (CBC) not implemented, yet */);
+	dump_byte_array("Key", ctx_p->key, le32_to_cpu(ctx_p->key_size));
+}
+
+static const char *hash_mode_str(enum sep_hash_mode mode)
+{
+	switch (mode) {
+	case SEP_HASH_SHA1:
+		return "SHA1";
+	case SEP_HASH_SHA224:
+		return "SHA224";
+	case SEP_HASH_SHA256:
+		return "SHA256";
+	case SEP_HASH_SHA384:
+		return "SHA384";
+	case SEP_HASH_SHA512:
+		return "SHA512";
+	default:
+		return "(unknown)";
+	}
+}
+
+static void dump_sep_hash_ctx(struct sep_ctx_hash *ctx_p)
+{
+	pr_debug("Alg.=Hash , Mode=%s\n",
+		      hash_mode_str(le32_to_cpu(ctx_p->mode)));
+}
+
+static void dump_sep_hmac_ctx(struct sep_ctx_hmac *ctx_p)
+{
+	/* Alg./Mode of HMAC is identical to HASH */
+	pr_debug("Alg.=HMAC , Mode=%s\n",
+		      hash_mode_str(le32_to_cpu(ctx_p->mode)));
+	pr_debug("K0 size = %u B\n", le32_to_cpu(ctx_p->k0_size));
+	dump_byte_array("K0", ctx_p->k0, le32_to_cpu(ctx_p->k0_size));
+}
+
+/**
+ * ctxmgr_dump_sep_ctx() - Dump SeP context data
+ * @ctx_info:
+ *
+ */
+void ctxmgr_dump_sep_ctx(const struct client_crypto_ctx_info *ctx_info)
+{
+	struct sep_ctx_cache_entry *sep_ctx_p = ctx_info->sep_ctx_kptr;
+	enum sep_crypto_alg alg;
+	int ctx_idx;
+
+	/* For combined mode call recursively for each sub-context */
+	if (ctx_info->ctx_kptr->alg_class == ALG_CLASS_COMBINED) {
+		for (ctx_idx = 0; (ctx_info[ctx_idx].ctx_kptr != NULL) &&
+		     (ctx_idx < DXDI_COMBINED_NODES_MAX); ctx_idx++) {
+			ctxmgr_dump_sep_ctx(&ctx_info[ctx_idx]);
+		}
+		return;
+	}
+
+	alg = (enum sep_crypto_alg)le32_to_cpu(sep_ctx_p->alg);
+
+	pr_debug("SeP crypto context at %p: Algorithm=%d\n",
+		      sep_ctx_p, alg);
+	switch (alg) {
+	case SEP_CRYPTO_ALG_NULL:
+		break;		/* Nothing to dump */
+	case SEP_CRYPTO_ALG_AES:
+		dump_sep_aes_ctx((struct sep_ctx_cipher *)sep_ctx_p);
+		break;
+	case SEP_CRYPTO_ALG_AEAD:
+		dump_sep_aead_ctx((struct sep_ctx_aead *)sep_ctx_p);
+		break;
+	case SEP_CRYPTO_ALG_DES:
+		dump_sep_des_ctx((struct sep_ctx_cipher *)sep_ctx_p);
+		break;
+	case SEP_CRYPTO_ALG_C2:
+		dump_sep_c2_ctx((struct sep_ctx_c2 *)sep_ctx_p);
+		break;
+	case SEP_CRYPTO_ALG_HASH:
+		dump_sep_hash_ctx((struct sep_ctx_hash *)sep_ctx_p);
+		break;
+	case SEP_CRYPTO_ALG_HMAC:
+		dump_sep_hmac_ctx((struct sep_ctx_hmac *)sep_ctx_p);
+		break;
+	default:
+		pr_debug("(Unsupported algorithm dump - %d)\n", alg);
+	}
+
+}
+#endif
+
+/**
+ * ctxmgr_sync_sep_ctx() - Sync. SeP context to device (flush from cache...)
+ * @ctx_info:	 User context info structure
+ * @mydev:	 Associated device context
+ *
+ * Returns void
+ */
+void ctxmgr_sync_sep_ctx(const struct client_crypto_ctx_info *ctx_info,
+			 struct device *mydev)
+{
+	size_t embedded_sep_ctx_offset =
+	    get_sep_ctx_offset(ctx_info->ctx_kptr->alg_class);
+	struct sep_ctx_cache_entry *embedded_sep_ctx_p =
+	    (struct sep_ctx_cache_entry *)
+	    (((unsigned long)ctx_info->ctx_kptr) + embedded_sep_ctx_offset);
+
+#ifdef DEBUG
+	if (ctx_info->sep_ctx_dma_addr == 0) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+	if (embedded_sep_ctx_offset == 0)
+		pr_err("Invalid alg. class for algorithm\n");
+#endif
+
+	/* Only the embedded SeP context requires sync (it is in user memory)
+	   Otherwise, it is a cache coherent DMA buffer.                      */
+	if (ctx_info->sep_ctx_kptr == embedded_sep_ctx_p) {
+		dma_sync_single_for_device(mydev, ctx_info->sep_ctx_dma_addr,
+					   SEP_CTX_SIZE, DMA_BIDIRECTIONAL);
+	}
+}
+
+/**
+ * ctxmgr_get_sep_ctx_dma_addr() - Return DMA address of SeP (FW) area of
+ *					the context
+ * @ctx_info:	 User context info structure
+ *
+ * Returns DMA address of SeP (FW) area of the context
+ */
+dma_addr_t ctxmgr_get_sep_ctx_dma_addr(const struct client_crypto_ctx_info
+				       *ctx_info)
+{
+#ifdef DEBUG
+	if (ctx_info->sep_ctx_dma_addr == 0) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	return ctx_info->sep_ctx_dma_addr;
+}
+
+/**
+ * ctxmgr_sep_cache_create() - Create a SeP (FW) cache manager of given num.
+ *				of entries
+ * @num_of_entries:	 = Number of entries available in cache
+ *
+ * Returns void * handle (NULL on failure)
+ */
+void *ctxmgr_sep_cache_create(int num_of_entries)
+{
+	struct sep_ctx_cache *new_cache;
+	int i;
+
+	/* Allocate the sep_ctx_cache + additional entries beyond the one
+	 * that is included in the sep_ctx_cache structure */
+	new_cache = kmalloc(sizeof(struct sep_ctx_cache) +
+			    (num_of_entries - 1) *
+			    sizeof(struct ctxmgr_cache_entry), GFP_KERNEL);
+	if (new_cache == NULL) {
+		pr_err("Failed allocating SeP cache of %d entries\n",
+			    num_of_entries);
+		return NULL;
+	}
+
+	/* Initialize */
+	for (i = 0; i < num_of_entries; i++)
+		new_cache->entries[i].ctx_id.addr = CTX_ID_INVALID;
+
+	new_cache->cache_size = num_of_entries;
+
+	new_cache->lru_clk = 0;
+
+	return (void *)new_cache;
+}
+
+/**
+ * ctxmgr_sep_cache_destroy() - Destory SeP (FW) cache manager object
+ * @sep_cache:	 = The cache object
+ *
+ * Returns void
+ */
+void ctxmgr_sep_cache_destroy(void *sep_cache)
+{
+	struct sep_ctx_cache *this_cache = (struct sep_ctx_cache *)sep_cache;
+
+	kfree(this_cache);
+}
+
+/**
+ * ctxmgr_sep_cache_get_size() - Get cache size (entries count)
+ * @sep_cache:
+ *
+ * Returns int Number of cache entries available
+ */
+int ctxmgr_sep_cache_get_size(void *sep_cache)
+{
+	struct sep_ctx_cache *this_cache = sep_cache;
+
+	return this_cache->cache_size;
+}
+
+/**
+ * ctxmgr_sep_cache_alloc() - Allocate a cache entry of given SeP context cache
+ * @sep_cache:	 The cache object
+ * @ctx_id:	 The host crypto. context ID
+ * @load_required_p:	Pointed int is set to !0 if a cache load is required
+ *			(i.e., if item already loaded in cache it would be 0)
+ *
+ * Returns cache index
+ */
+int ctxmgr_sep_cache_alloc(void *sep_cache,
+			   struct crypto_ctx_uid ctx_id, int *load_required_p)
+{
+	struct sep_ctx_cache *this_cache = (struct sep_ctx_cache *)sep_cache;
+	int i;
+	int chosen_idx = 0;	/* first candidate... */
+
+	*load_required_p = 1;	/* until found assume a load is required */
+
+	/* First search for given ID or free/older entry  */
+	for (i = 0; i < this_cache->cache_size; i++) {
+		if (this_cache->entries[i].ctx_id.addr == ctx_id.addr
+		    && this_cache->entries[i].ctx_id.cntr == ctx_id.cntr) {
+			/* if found */
+			chosen_idx = i;
+			*load_required_p = 0;
+			break;
+		}
+		/* else... if no free entry, replace candidate with invalid
+		   or older entry */
+		if (this_cache->entries[chosen_idx].ctx_id.addr
+			!= CTX_ID_INVALID) {
+			if ((this_cache->entries[i].ctx_id.addr
+				== CTX_ID_INVALID) ||
+			    (this_cache->entries[chosen_idx].lru_time >
+			     this_cache->entries[i].lru_time)) {
+				/* Found free OR older entry */
+				chosen_idx = i;
+			}
+		}
+	}
+
+	/* Record allocation + update LRU "timestamp" */
+	this_cache->entries[chosen_idx].ctx_id.addr = ctx_id.addr;
+	this_cache->entries[chosen_idx].ctx_id.cntr = ctx_id.cntr;
+	this_cache->entries[chosen_idx].lru_time = this_cache->lru_clk++;
+
+#ifdef DEBUG
+	if (this_cache->lru_clk == 0xFFFFFFFF) {
+		pr_err("Reached lru_clk limit!\n");
+		SEP_DRIVER_BUG();
+		/* If this limit is found to be a practical real life
+		   case, a few workarounds may be used:
+		   1. Use a larger (64b) lru_clk
+		   2. Invalidate the whole cache before wrapping to 0
+		   3. Ignore this case - old contexts would persist over newer
+		   until they are all FINALIZEd and invalidated "manually".
+		   4. "shift down" existing timestamps so the lowest would be 0
+		   5. "pack" timestamps to be 0,1,2,... based on exisitng order
+		   and set lru_clk to the largest (which is the num. of
+		   entries)
+		 */
+	}
+#endif
+
+	return chosen_idx;
+}
+
+/**
+ * ctxmgr_sep_cache_invalidate() - Invalidate cache entry for given context ID
+ * @sep_cache:	 The cache object
+ * @ctx_id:	 The host crypto. context ID
+ * @id_mask:	 A bit mask to be used when comparing the ID
+ *                (to be used for a set of entries from the same client)
+ *
+ * Returns void
+ */
+void ctxmgr_sep_cache_invalidate(void *sep_cache,
+				 struct crypto_ctx_uid ctx_id,
+				 u64 id_mask)
+{
+	struct sep_ctx_cache *this_cache = (struct sep_ctx_cache *)sep_cache;
+	int i;
+
+	/* Search for given ID */
+	for (i = 0; i < this_cache->cache_size; i++) {
+		if ((this_cache->entries[i].ctx_id.addr) == ctx_id.addr) {
+			/* When invalidating single, check also counter */
+			if (id_mask == CRYPTO_CTX_ID_SINGLE_MASK
+			    && this_cache->entries[i].ctx_id.cntr
+			       != ctx_id.cntr)
+				continue;
+			this_cache->entries[i].ctx_id.addr = CTX_ID_INVALID;
+		}
+	}
+
+}
diff --git a/drivers/staging/sep54/crypto_ctx_mgr.h b/drivers/staging/sep54/crypto_ctx_mgr.h
new file mode 100644
index 0000000..a65eaf2
--- /dev/null
+++ b/drivers/staging/sep54/crypto_ctx_mgr.h
@@ -0,0 +1,694 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _CRYPTO_CTX_MGR_H_
+#define _CRYPTO_CTX_MGR_H_
+
+#include "sep_ctx.h"
+#include "dx_driver_abi.h"
+#include "lli_mgr.h"
+
+/* The largest hash block size is for SHA512 - 1024 bits */
+#define HASH_BLK_SIZE_MAX (1024>>3)	/*octets */
+
+/* Unique ID for a user context */
+/* This value is made unique by concatenating session ptr (in kernel)
+   with global counter incremented on each INIT phase                */
+#define CTX_ID_INVALID ((u64)0)
+
+enum host_ctx_state {
+	CTX_STATE_UNINITIALIZED = 0,
+	/* When a context is unitialized is can be any "garbage" since
+	   the context buffer is given from the user... */
+	CTX_STATE_INITIALIZED = 0x10000001,
+	/* INITIALIZED = Initialized */
+	CTX_STATE_PARTIAL_INIT = 0x10101011,
+	/* PARTIAL_INIT = Init. was done only on host. INIT on SeP was postponed
+	   - Requires INIT on next SeP operations. */
+};
+
+struct crypto_ctx_uid {
+	u64 addr;
+	u32 cntr;
+};
+
+/* Algorithm family/class enumeration */
+enum crypto_alg_class {
+	ALG_CLASS_NONE = 0,
+	ALG_CLASS_SYM_CIPHER,
+	ALG_CLASS_AUTH_ENC,
+	ALG_CLASS_MAC,
+	ALG_CLASS_HASH,
+	ALG_CLASS_COMBINED,
+	ALG_CLASS_MAX = ALG_CLASS_HASH
+};
+
+/* The common fields at start of a user context strcuture */
+#define HOST_CTX_COMMON						\
+		struct crypto_ctx_uid uid;					\
+		/* To hold CTX_VALID_SIG when initialized */	\
+		enum host_ctx_state state;			\
+		/*determine the context specification*/		\
+		enum crypto_alg_class alg_class;		\
+		/* Cast the whole struct to matching user_ctx_* struct */ \
+	/* When is_encrypted==true the props are not initialized and */\
+	/* the contained sep_ctx is encrypted (when created in SeP)  */\
+	/* Algorithm properties are encrypted */		\
+		bool is_encrypted; \
+		u32 sess_id; \
+		struct sep_client_ctx *sctx
+
+/* SeP context segment of a context */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+/* Allocate cache line bytes before/after sep context to assure
+   its cache line does not enter the cache during its mapping to SeP */
+#define SEP_CTX_SEGMENT                                                        \
+	u8 reserved_before[L1_CACHE_BYTES];			       \
+	struct sep_ctx_cache_entry sep_ctx;                                    \
+	u8 reserved_after[L1_CACHE_BYTES]
+#else
+/* Cache is coherent - no need for "protection" margins */
+#define SEP_CTX_SEGMENT \
+	struct sep_ctx_cache_entry sep_ctx
+#endif
+
+/* Generic host context*/
+struct host_crypto_ctx {
+	HOST_CTX_COMMON;
+};
+
+/* user_ctx specification for symettric ciphers */
+struct host_crypto_ctx_sym_cipher {
+	HOST_CTX_COMMON;
+	struct dxdi_sym_cipher_props props;
+	 SEP_CTX_SEGMENT;
+};
+
+/* user_ctx specification for authenticated encryption */
+struct host_crypto_ctx_auth_enc {
+	HOST_CTX_COMMON;
+	bool is_adata_processed;/* flag indicates if adata was processed */
+	struct dxdi_auth_enc_props props;
+	 SEP_CTX_SEGMENT;
+};
+
+/* Host data for hash block remainders */
+struct hash_block_remainder {
+	u16 size;		/* Octets available in data_blk_tail */
+	u8 data[HASH_BLK_SIZE_MAX] __aligned(8);
+	/* data_blk_tail holds remainder of user data because the HW requires
+	   integral hash blocks but for last data block.
+	   We take it aligned to 8 in order to optimize HW access to it via
+	   the 64bit AXI bus */
+};
+
+/* user_ctx specification for MAC algorithms */
+struct host_crypto_ctx_mac {
+	HOST_CTX_COMMON;
+	u64 client_data_size;	/* Sum. up the data processed so far */
+	struct dxdi_mac_props props;
+	struct hash_block_remainder hmac_tail;
+	 SEP_CTX_SEGMENT;
+};
+
+/* user_ctx specification for Hash algorithms */
+struct host_crypto_ctx_hash {
+	HOST_CTX_COMMON;
+	enum dxdi_hash_type hash_type;
+	struct hash_block_remainder hash_tail;
+	 SEP_CTX_SEGMENT;
+};
+
+/**
+ * struct client_crypto_ctx_info - Meta data on the client application crypto
+ *					context buffer and its mapping
+ * @dev:	Device context of the context (DMA) mapping.
+ * @user_ptr:	address of current context in user space (if no kernel op.)
+ * @ctx_page:	Mapped user page where user_ptr is located
+ * @ctx_kptr:	Mapping context to kernel VA
+ * @sep_ctx_kptr:	Kernel VA of SeP context portion of the host context
+ *			(for async. operations, this may be outside of host
+ *			 context)
+ * @sep_ctx_dma_addr:	DMA address of SeP context
+ * @hash_tail_dma_addr:	DMA of host_ctx_hash:data_blk_tail
+ * @sep_cache_idx:	if >=0, saves the allocated sep cache entry index
+ */
+struct client_crypto_ctx_info {
+	struct device *dev;
+	u32 __user *user_ptr;	/* */
+	struct page *ctx_page;
+	struct host_crypto_ctx *ctx_kptr;
+	struct sep_ctx_cache_entry *sep_ctx_kptr;
+	dma_addr_t sep_ctx_dma_addr;
+	dma_addr_t hash_tail_dma_addr;
+	int sep_cache_idx;
+
+	int sess_id;
+	struct sep_client_ctx *sctx;
+};
+/* Macro to initialize the context info structure */
+#define USER_CTX_INFO_INIT(ctx_info_p)	\
+do {					\
+	memset((ctx_info_p), 0, sizeof(struct client_crypto_ctx_info)); \
+	(ctx_info_p)->sep_cache_idx = -1; /* 0 is a valid entry idx */ \
+} while (0)
+
+#define SEP_CTX_CACHE_NULL_HANDLE NULL
+
+/**
+ * ctxmgr_get_ctx_size() - Get host context size for given algorithm class
+ * @alg_class:	 Queries algorithm class
+ *
+ * Returns size_t Size in bytes of host context
+ */
+size_t ctxmgr_get_ctx_size(enum crypto_alg_class alg_class);
+
+/**
+ * ctxmgr_map_user_ctx() - Map given user context to kernel space + DMA
+ * @ctx_info:	 User context info structure
+ * @mydev:	 Associated device context
+ * @alg_class:	If !ALG_CLASS_NONE, consider context of given class for
+ *		size validation (used in uninitialized context mapping)
+ *		When set to ALG_CLASS_NONE, the alg_class field of the
+ *		host_ctx is used to verify mapped buffer size.
+ * @user_ctx_ptr: Pointer to user space context
+ *
+ * Returns int 0 for success
+ */
+int ctxmgr_map_user_ctx(struct client_crypto_ctx_info *ctx_info,
+			struct device *mydev,
+			enum crypto_alg_class alg_class,
+			u32 __user *user_ctx_ptr);
+
+/**
+ * ctxmgr_unmap_user_ctx() - Unmap given currently mapped user context
+ * @ctx_info:	 User context info structure
+ */
+void ctxmgr_unmap_user_ctx(struct client_crypto_ctx_info *ctx_info);
+
+/**
+ * ctxmgr_map_kernel_ctx() - Map given kernel context + clone SeP context into
+ *				Privately allocated DMA buffer
+ *				(required for async. ops. on the same context)
+ * @ctx_info:	Client crypto context info structure
+ * @mydev:	Associated device context
+ * @alg_class:	If !ALG_CLASS_NONE, consider context of given class for
+ *		size validation (used in uninitialized context mapping)
+ *		When set to ALG_CLASS_NONE, the alg_class field of the
+ *		host_crypto_ctx is used to verify mapped buffer size.
+ * @kernel_ctx_p:	Pointer to kernel space crypto context
+ * @sep_ctx_p:	Pointer to (private) SeP context. If !NULL the embedded sep
+ *		context is copied into this buffer.
+ *		Set to NULL to use the one embedded in host_crypto_ctx.
+ * @sep_ctx_dma_addr:	DMA address of private SeP context (if sep_ctx_p!=NULL)
+ *
+ * Returns int 0 for success
+ */
+int ctxmgr_map_kernel_ctx(struct client_crypto_ctx_info *ctx_info,
+			  struct device *mydev,
+			  enum crypto_alg_class alg_class,
+			  struct host_crypto_ctx *kernel_ctx_p,
+			  struct sep_ctx_cache_entry *sep_ctx_p,
+			  dma_addr_t sep_ctx_dma_addr);
+
+/**
+ * ctxmgr_unmap_kernel_ctx() - Unmap given currently mapped kernel context
+ *				(was mapped with map_kernel_ctx)
+ * @ctx_info:	 User context info structure
+ */
+void ctxmgr_unmap_kernel_ctx(struct client_crypto_ctx_info *ctx_info);
+
+/**
+ * ctxmgr_map2dev_hash_tail() - Map hash data tail buffer in the host context
+ *				for DMA to device
+ * @ctx_info:
+ * @mydev:
+ *
+ * Returns int 0 on success
+ */
+int ctxmgr_map2dev_hash_tail(struct client_crypto_ctx_info *ctx_info,
+			     struct device *mydev);
+/**
+ * ctxmgr_unmap2dev_hash_tail() - Unmap hash data tail buffer from DMA tp device
+ * @ctx_info:
+ * @mydev:
+ *
+ */
+void ctxmgr_unmap2dev_hash_tail(struct client_crypto_ctx_info *ctx_info,
+				struct device *mydev);
+
+/**
+ * ctxmgr_set_ctx_state() - Set context state
+ * @ctx_info:	 User context info structure
+ * @state:	    State to set in context
+ *
+ * Returns void
+ */
+void ctxmgr_set_ctx_state(struct client_crypto_ctx_info *ctx_info,
+			  const enum host_ctx_state state);
+
+/**
+ * ctxmgr_get_ctx_state() - Set context state
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Current context state
+ */
+enum host_ctx_state ctxmgr_get_ctx_state(const struct client_crypto_ctx_info
+					 *ctx_info);
+
+/**
+ * ctxmgr_set_ctx_id() - Allocate unique ID for (initialized) user context
+ * @ctx_info:	 Client crypto context info structure
+ * @ctx_id:	 The unique ID allocated for given context
+ *
+ * Allocate unique ID for (initialized) user context
+ * (Assumes invoked within session mutex so no need for counter protection)
+ */
+void ctxmgr_set_ctx_id(struct client_crypto_ctx_info *ctx_info,
+		       const struct crypto_ctx_uid ctx_id);
+
+/**
+ * ctxmgr_get_ctx_id() - Return the unique ID for current user context
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Allocated ID (or CTX_INVALID_ID if none)
+ */
+struct crypto_ctx_uid ctxmgr_get_ctx_id(struct client_crypto_ctx_info
+					*ctx_info);
+
+/**
+ * ctxmgr_get_session_id() - Return the session ID of given context ID
+ * @ctx_info:
+ *
+ * Return the session ID of given context ID
+ * This may be used to validate ID and verify that it was not tampered
+ * in a manner that can allow access to a session of another process
+ * Returns u32
+ */
+u64 ctxmgr_get_session_id(struct client_crypto_ctx_info *ctx_info);
+
+/**
+ * ctxmgr_get_alg_class() - Get algorithm class of context
+ *				(set during _init_ of a context)
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Current algorithm class of context
+ */
+enum crypto_alg_class ctxmgr_get_alg_class(const struct client_crypto_ctx_info
+					   *ctx_info);
+
+/**
+ * ctxmgr_get_crypto_blk_size() - Get the crypto-block length of given context
+ *					in octets
+ * @ctx_info:	 User context info structure
+ *
+ * Returns u32 Cypto-block size in bytes, 0 if invalid/unsupported alg.
+ */
+u32 ctxmgr_get_crypto_blk_size(struct client_crypto_ctx_info *ctx_info);
+
+/**
+ * ctxmgr_is_valid_adata_size() - Validate additional/associated data for
+ * @ctx_info:
+ * @adata_size:
+ *
+ * Validate additional/associated data for
+ * auth/enc algs.
+ * Returns bool
+ */
+bool ctxmgr_is_valid_adata_size(struct client_crypto_ctx_info *ctx_info,
+				unsigned long adata_size);
+
+/**
+ * ctxmgr_is_valid_size() - Validate given data unit for given alg./mode
+ * @ctx_info:
+ * @data_unit_size:
+ * @is_finalize:
+ *
+ * Returns bool true is valid.
+ */
+bool ctxmgr_is_valid_size(struct client_crypto_ctx_info *ctx_info,
+				    unsigned long data_unit_size,
+				    bool is_finalize);
+
+/**
+ * ctxmgr_get_sym_cipher_type() - Returns the sym cipher specific type.
+ * @ctx_info:	 The context info object of the sym cipher alg.
+ *
+ * Returns enum dxdi_sym_cipher_type The sym cipher type.
+ */
+enum dxdi_sym_cipher_type ctxmgr_get_sym_cipher_type(const struct
+						     client_crypto_ctx_info
+						     *ctx_info);
+
+/**
+ * ctxmgr_get_mac_type() - Returns the mac specific type.
+ * @ctx_info:	 The context info object of the mac alg.
+ *
+ * Returns enum host_crypto_ctx_mac The mac type.
+ */
+enum dxdi_mac_type ctxmgr_get_mac_type(const struct client_crypto_ctx_info
+				       *ctx_info);
+
+/**
+ * ctxmgr_get_hash_type() - Returns the hash specific type.
+ * @ctx_info:	 The context info object of the hash alg.
+ *
+ * Returns enum dxdi_hash_type The hash type.
+ */
+enum dxdi_hash_type ctxmgr_get_hash_type(const struct client_crypto_ctx_info
+					 *ctx_info);
+
+/**
+ * ctxmgr_get_mac_type() - Returns the hash specific type.
+ * @ctx_info:	 The context info object of the hash alg.
+ *
+ * Returns enum dxdi_mac_type The hash type.
+ */
+enum dxdi_mac_type ctxmgr_get_mac_type(const struct client_crypto_ctx_info
+				       *ctx_info);
+
+/**
+ * ctxmgr_save_hash_blk_remainder() - Save hash block tail data in given
+ *	context. The data is taken from the save4next chunk of given client
+ *	buffer.
+ * @ctx_info:	 Client context info structure (HASH's or HMAC's)
+ * @client_dma_buf_p:	A client DMA buffer object. Data is taken from the
+ *			save4next chunk of this buffer.
+ * @append_data:	When true, given data is appended to existing
+ *
+ * Returns 0 on success
+ */
+int ctxmgr_save_hash_blk_remainder(struct client_crypto_ctx_info *ctx_info,
+				   struct client_dma_buffer *client_dma_buf_p,
+				   bool append_data);
+
+/**
+ * ctxmgr_get_hash_blk_remainder_buf() - Get DMA info for hash block remainder
+ *	buffer from given context
+ * @ctx_info:			User context info structure
+ * @hash_blk_tail_dma_p:	Returned tail buffer DMA address
+ *
+ * Note: This function must be invoked only when tail_buf is mapped2dev
+ *	(using ctxmgr_map2dev_hash_tail)
+ * Returns u16 Number of valid bytes/octets in tail buffer
+ */
+u16 ctxmgr_get_hash_blk_remainder_buf(struct client_crypto_ctx_info
+					   *ctx_info,
+					   dma_addr_t *
+					   hash_blk_remainder_dma_p);
+
+/**
+ * ctxmgr_get_digest_or_mac() - Get the digest/MAC result when applicable
+ * @ctx_info:		User context info structure
+ * @digest_or_mac:	Pointer to digest/MAC buffer
+ *
+ * Returns Returned digest/MAC size
+ */
+u32 ctxmgr_get_digest_or_mac(struct client_crypto_ctx_info *ctx_info,
+				  u8 *digest_or_mac);
+
+/**
+ * ctxmgr_get_digest_or_mac_ptr() - Get the digest/MAC pointer in SeP context
+ * @ctx_info:		User context info structure
+ * @digest_or_mac_pp:	Returned pointer to digest/MAC buffer
+ *
+ * Returns Returned digest/MAC size
+ * This function may be used for the caller to reference the result instead
+ * of always copying
+ */
+u32 ctxmgr_get_digest_or_mac_ptr(struct client_crypto_ctx_info *ctx_info,
+				      u8 **digest_or_mac_pp);
+
+/**
+ * ctxmgr_set_symcipher_iv_user() - Set IV of symcipher context given in
+ *					user space pointer
+ * @user_ctx_ptr:	 A user space pointer to the host context
+ * @iv_ptr:	 The IV to set
+ *
+ * Returns int
+ */
+int ctxmgr_set_symcipher_iv_user(u32 __user *user_ctx_ptr,
+				 u8 *iv_ptr);
+
+/**
+ * ctxmgr_get_symcipher_iv_user() - Read current "IV"
+ * @user_ctx_ptr:	A user space pointer to the host context
+ * @iv_ptr:		Where to return the read IV
+ *
+ * Read current "IV" (block state - not the actual set IV during initialization)
+ * This function works directly over a user space context
+ * Returns int
+ */
+int ctxmgr_get_symcipher_iv_user(u32 __user *user_ctx_ptr,
+				 u8 *iv_ptr);
+
+/**
+ * ctxmgr_set_symcipher_iv() - Set IV for given block symcipher algorithm
+ * @ctx_info:	Context to update
+ * @iv:		New IV
+ *
+ * Returns int 0 if changed IV, -EINVAL for error
+ * (given cipher type does not have IV)
+ */
+int ctxmgr_set_symcipher_iv(struct client_crypto_ctx_info *ctx_info,
+			    u8 *iv);
+
+/**
+ * ctxmgr_get_symcipher_iv() - Return given cipher IV
+ * @ctx_info:	Context to query
+ * @iv_user:	The IV given by the user on last ctxmgr_set_symcipher_iv
+ * @iv_current:	The current IV state block
+ * @iv_size_p:	[I/O] The given buffers size and returns actual IV size
+ *
+ * Return given cipher IV - Original IV given by user and current state "IV"
+ * The given IV buffers must be large enough to accomodate the IVs
+ * Returns int 0 on success, -ENOMEM if given iv_size is too small
+ */
+int ctxmgr_get_symcipher_iv(struct client_crypto_ctx_info *ctx_info,
+			    u8 *iv_user, u8 *iv_current,
+			    u8 *iv_size_p);
+
+/**
+ * ctxmgr_set_symcipher_direction() - Set the operation direction for given
+ *					symcipher context
+ * @ctx_info:		Context to update
+ * @dxdi_direction:	Requested cipher direction
+ *
+ * Returns int 0 on success
+ */
+int ctxmgr_set_symcipher_direction(struct client_crypto_ctx_info *ctx_info,
+				   enum dxdi_cipher_direction dxdi_direction);
+
+/**
+ * ctxmgr_get_symcipher_direction() - Return the operation direction of given
+ *					symcipher context
+ * @ctx_info:	Context to query
+ *
+ * Returns enum dxdi_cipher_direction (<0 on error)
+ */
+enum dxdi_cipher_direction ctxmgr_get_symcipher_direction(struct
+							  client_crypto_ctx_info
+							  *ctx_info);
+
+/**
+ * ctxmgr_set_symcipher_key() - Set a symcipher context key
+ * @ctx_info:	Context to update
+ * @key_size:	Size of key in bytes
+ * @key:	New key pointer
+ *
+ * Set a symcipher context key
+ * After invoking this function the context should be reinitialized by SeP
+ * (set its state to "partial init" if not done in this sequence)
+ * Returns int 0 on success, -EINVAL Invalid key len, -EPERM Forbidden/weak key
+ */
+int ctxmgr_set_symcipher_key(struct client_crypto_ctx_info *ctx_info,
+			     u8 key_size, const u8 *key);
+
+/**
+ * ctxmgr_init_symcipher_ctx_no_props() - Initialize symcipher context when full
+ *					props are not available, yet.
+ * @ctx_info:		Context to init.
+ * @cipher_type:	Cipher type for context
+ *
+ * Initialize symcipher context when full props are not available, yet.
+ * Later set_key and set_iv may update the context.
+ * Returns int 0 on success
+ */
+int ctxmgr_init_symcipher_ctx_no_props(struct client_crypto_ctx_info *ctx_info,
+				       enum dxdi_sym_cipher_type cipher_type);
+
+/**
+ * ctxmgr_init_symcipher_ctx() - Initialize symCipher context based on given
+ *				properties.
+ * @ctx_info:	 User context mapping info.
+ * @props:	 The initialization properties
+ * @postpone_init:	Return "true" if INIT on SeP should be postponed
+ *			to first processing (e.g, in AES-XTS)
+ * @error_info:	Error info
+ *
+ * Returns 0 on success, otherwise on error
+ */
+int ctxmgr_init_symcipher_ctx(struct client_crypto_ctx_info *ctx_info,
+			      struct dxdi_sym_cipher_props *props,
+			      bool *postpone_init, u32 *error_info);
+
+/**
+ * ctxmgr_init_auth_enc_ctx() - Initialize Authenticated Encryption class
+ *				context
+ * @ctx_info:
+ * @props:
+ * @error_info:	Error info
+ *
+ * Returns int
+ */
+int ctxmgr_init_auth_enc_ctx(struct client_crypto_ctx_info *ctx_info,
+			     struct dxdi_auth_enc_props *props,
+			     u32 *error_info);
+
+/**
+ * ctxmgr_init_mac_ctx() - Initialize context for MAC algorithm
+ * @ctx_info:
+ * @props:
+ * @error_info:	Error info
+ *
+ * Returns int
+ */
+int ctxmgr_init_mac_ctx(struct client_crypto_ctx_info *ctx_info,
+			struct dxdi_mac_props *props, u32 *error_info);
+
+/**
+ * ctxmgr_init_hash_ctx() - Initialize hash context
+ * @ctx_info:	 User context mapping info.
+ * @hash_type:	 Assigned hash type
+ * @error_info:	Error info
+ *
+ * Returns int 0 on success, -EINVAL, -ENOSYS
+ */
+int ctxmgr_init_hash_ctx(struct client_crypto_ctx_info *ctx_info,
+			 enum dxdi_hash_type hash_type, u32 *error_info);
+
+/**
+ * ctxmgr_set_sep_cache_idx() - Set the index of this context in the sep_cache
+ * @ctx_info:	 User context info structure
+ * @sep_cache_idx:	 The allocated index in SeP cache for this context
+ *
+ * Returns void
+ */
+void ctxmgr_set_sep_cache_idx(struct client_crypto_ctx_info *ctx_info,
+			      int sep_cache_idx);
+
+/**
+ * ctxmgr_get_sep_cache_idx() - Get the index of this context in the sep_cache
+ * @ctx_info:	 User context info structure
+ *
+ * Returns The allocated index in SeP cache for this context
+ */
+int ctxmgr_get_sep_cache_idx(struct client_crypto_ctx_info *ctx_info);
+
+#ifdef DEBUG
+/**
+ * ctxmgr_dump_sep_ctx() - Dump SeP context data
+ * @ctx_info:
+ *
+ */
+void ctxmgr_dump_sep_ctx(const struct client_crypto_ctx_info *ctx_info);
+#else
+#define ctxmgr_dump_sep_ctx(ctx_info) do {} while (0)
+#endif /*DEBUG*/
+/**
+ * ctxmgr_sync_sep_ctx() - Sync. SeP context to device (flush from cache...)
+ * @ctx_info:	 User context info structure
+ * @mydev:	 Associated device context
+ *
+ * Returns void
+ */
+void ctxmgr_sync_sep_ctx(const struct client_crypto_ctx_info *ctx_info,
+			 struct device *mydev);
+
+/**
+ * ctxmgr_get_sep_ctx_dma_addr() - Return DMA address of SeP (FW) area of the context
+ * @ctx_info:	 User context info structure
+ *
+ * Returns DMA address of SeP (FW) area of the context
+ */
+dma_addr_t ctxmgr_get_sep_ctx_dma_addr(const struct client_crypto_ctx_info
+				       *ctx_info);
+
+/******************************
+ * SeP context cache functions
+ ******************************/
+
+/**
+ * ctxmgr_sep_cache_create() - Create a SeP (FW) cache manager of given num. of
+ *				entries
+ * @num_of_entries:	 = Number of entries available in cache
+ *
+ * Returns void * handle (NULL on failure)
+ */
+void *ctxmgr_sep_cache_create(int num_of_entries);
+
+/**
+ * ctxmgr_sep_cache_destroy() - Destory SeP (FW) cache manager object
+ * @sep_cache:	 = The cache object
+ *
+ * Returns void
+ */
+void ctxmgr_sep_cache_destroy(void *sep_cache);
+
+/**
+ * ctxmgr_sep_cache_get_size() - Get cache size (entries count)
+ * @sep_cache:
+ *
+ * Returns int Number of cache entries available
+ */
+int ctxmgr_sep_cache_get_size(void *sep_cache);
+
+/**
+ * ctxmgr_sep_cache_alloc() - Allocate a cache entry of given SeP context cache
+ * @sep_cache:	 The cache object
+ * @ctx_id:	 The user context ID
+ * @load_required_p:	Pointed int is set to !0 if a cache load is required
+ *			(i.e., if item already loaded in cache it would be 0)
+ *
+ * Returns cache index
+ */
+int ctxmgr_sep_cache_alloc(void *sep_cache,
+			   struct crypto_ctx_uid ctx_id, int *load_required_p);
+
+/**
+ * ctxmgr_sep_cache_invalidate() - Invalidate cache entry for given context ID
+ * @sep_cache:	The cache object
+ * @ctx_id:	The host crypto. context ID
+ * @id_mask:	A bit mask to be used when comparing the ID
+ *		(to be used for a set of entries from the same client)
+ *
+ * Returns void
+ */
+void ctxmgr_sep_cache_invalidate(void *sep_cache,
+				 struct crypto_ctx_uid ctx_id,
+				 u64 id_mask);
+
+#endif /*_CRYPTO_CTX_MGR_H_*/
diff --git a/drivers/staging/sep54/crypto_hwk.c b/drivers/staging/sep54/crypto_hwk.c
new file mode 100644
index 0000000..32d999c
--- /dev/null
+++ b/drivers/staging/sep54/crypto_hwk.c
@@ -0,0 +1,419 @@
+/*
+ *  Copyright(c) 2012-2013 Intel Corporation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  You should have received a copy of the GNU General Public License along with
+ *  this program; if not, write to the Free Software Foundation, Inc., 59
+ *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "sep_hwk: " fmt
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <linux/highmem.h>
+
+#include "dx_driver.h"
+#include "sep_sysfs.h"
+#include "sep_power.h"
+#include "dx_sepapp_kapi.h"
+
+#define HWK_APP_UUID "INTEL HWK 000001"
+#define HWK_CMD_CRYPTO 8
+
+struct hwk_context {
+	struct sep_client_ctx *sctx;
+	u32 sess_id, key_id;
+};
+
+static inline void hwk_pm_runtime_get(void)
+{
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+}
+
+static inline void hwk_pm_runtime_put(void)
+{
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+}
+
+static int hwk_ctx_init(struct crypto_tfm *tfm)
+{
+	struct hwk_context *hctx = crypto_tfm_ctx(tfm);
+	u8 uuid[16] = HWK_APP_UUID;
+	enum dxdi_sep_module ret;
+	int rc;
+
+	hwk_pm_runtime_get();
+
+	hctx->sctx = dx_sepapp_context_alloc();
+	if (!hctx->sctx) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	pr_debug("%s: opening session\n", __func__);
+	rc = dx_sepapp_session_open(hctx->sctx, uuid, 0, NULL, NULL,
+				    &hctx->sess_id, &ret);
+	pr_debug("%s: %d: %p %d\n", __func__, rc, hctx->sctx, hctx->sess_id);
+	if (rc != 0)
+		dx_sepapp_context_free(hctx->sctx);
+
+out:
+	hwk_pm_runtime_put();
+
+	return rc;
+}
+
+static void hwk_ctx_cleanup(struct crypto_tfm *tfm)
+{
+	struct hwk_context *hctx = crypto_tfm_ctx(tfm);
+
+	pr_debug("%s: %p %d\n", __func__, hctx->sctx, hctx->sess_id);
+	if (dx_sepapp_session_close(hctx->sctx, hctx->sess_id))
+		BUG();
+	dx_sepapp_context_free(hctx->sctx);
+	pr_debug("%s: session closed\n", __func__);
+}
+
+static int hwk_set_key(struct crypto_ablkcipher *tfm,
+			     const u8 *key, unsigned int keylen)
+{
+	struct hwk_context *hctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+
+	hctx->key_id = *((u32 *)key);
+	pr_debug("%s: key_id=%d\n", __func__, hctx->key_id);
+	return 0;
+}
+
+#if defined(HWK_ST_DUMP_BUF) || defined(HWK_DUMP_BUF)
+static void hwk_dump_buf(u8 *buf, const char *buf_name, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		if (i % 64 == 0)
+			printk("\n%s: ", buf_name);
+		printk("%02x", buf[i]);
+	}
+	printk("\n");
+}
+#endif
+
+#ifdef HWK_DUMP_BUF
+static void hwk_dump_sg(struct scatterlist *sg, const char *buf_name)
+{
+	u8 *buf = kmap(sg_page(sg));
+	hwk_dump_buf(buf + sg->offset, buf_name, sg->length);
+	kunmap(sg_page(sg));
+}
+#endif
+
+static int hwk_process(struct ablkcipher_request *req, bool encrypt)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct hwk_context *hctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+	struct dxdi_sepapp_kparams p;
+	enum dxdi_sep_module ret_origin;
+	struct scatterlist iv_sg;
+	struct page *iv_page;
+	int rc;
+
+	iv_page = virt_to_page(req->info);
+	sg_init_table(&iv_sg, 1);
+	sg_set_page(&iv_sg, iv_page, SEP_AES_IV_SIZE,
+		    (unsigned long)req->info % PAGE_SIZE);
+
+#ifdef HWK_DUMP_BUF
+	hwk_dump_buf(req->info, "iv", SEP_AES_IV_SIZE);
+	hwk_dump_sg(&iv_sg, "iv");
+	hwk_dump_sg(req->src, "src");
+#endif
+
+	memset(&p, 0, sizeof(p));
+
+	p.params_types[0] = DXDI_SEPAPP_PARAM_VAL;
+	p.params[0].val.data[0] = hctx->key_id;
+	p.params[0].val.data[1] = req->nbytes | (encrypt << 16);
+	p.params[0].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	p.params_types[1] = DXDI_SEPAPP_PARAM_MEMREF;
+	p.params[1].kmemref.dma_direction = DXDI_DATA_TO_DEVICE;
+	p.params[1].kmemref.sgl = &iv_sg;
+	p.params[1].kmemref.nbytes = SEP_AES_IV_SIZE;
+
+	p.params_types[2] = DXDI_SEPAPP_PARAM_MEMREF;
+	p.params[2].kmemref.dma_direction = DXDI_DATA_TO_DEVICE;
+	p.params[2].kmemref.sgl = req->src;
+	p.params[2].kmemref.nbytes = req->nbytes;
+
+	p.params_types[3] = DXDI_SEPAPP_PARAM_MEMREF;
+	p.params[3].kmemref.dma_direction = DXDI_DATA_FROM_DEVICE;
+	p.params[3].kmemref.sgl = req->dst;
+	p.params[3].kmemref.nbytes = req->nbytes;
+
+	pr_debug("%s: size=%d dir=%d\n", __func__, req->nbytes, encrypt);
+	rc = dx_sepapp_command_invoke(hctx->sctx, hctx->sess_id,
+				      HWK_CMD_CRYPTO, &p, &ret_origin);
+	pr_debug("%s: done: %d\n", __func__, rc);
+
+	if (rc != 0) {
+		pr_err("%s: error invoking command %d: %x (ret_origin= %x)\n",
+			__func__, HWK_CMD_CRYPTO, rc, ret_origin);
+		return -EINVAL;
+	}
+
+#ifdef HWK_DUMP_BUF
+	hwk_dump_sg(req->dst, "dst");
+#endif
+
+	return rc;
+}
+
+static int hwk_encrypt(struct ablkcipher_request *req)
+{
+	return hwk_process(req, true);
+}
+
+static int hwk_decrypt(struct ablkcipher_request *req)
+{
+	return hwk_process(req, false);
+}
+
+static struct crypto_alg hwk_alg = {
+	.cra_priority = 0,
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_ctxsize = sizeof(struct hwk_context),
+	.cra_alignmask = 0, /* Cannot use this due to bug in kernel */
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_module = THIS_MODULE,
+	.cra_name = "cbchk(aes)",
+	.cra_driver_name = MODULE_NAME "-aes-cbchk",
+	.cra_blocksize = SEP_AES_BLOCK_SIZE,
+	.cra_u = {
+		.ablkcipher = {
+			.min_keysize = SEP_AES_256_BIT_KEY_SIZE,
+			.max_keysize = SEP_AES_256_BIT_KEY_SIZE,
+			.ivsize = SEP_AES_IV_SIZE,
+			.setkey = hwk_set_key,
+			.encrypt = hwk_encrypt,
+			.decrypt = hwk_decrypt,
+		},
+	},
+	.cra_init = hwk_ctx_init,
+	.cra_exit = hwk_ctx_cleanup
+};
+
+int hwk_init(void)
+{
+	int rc = crypto_register_alg(&hwk_alg);
+	if (rc != 0)
+		pr_err("failed to register %s\n", hwk_alg.cra_name);
+	return rc;
+}
+
+void hwk_fini(void)
+{
+	crypto_unregister_alg(&hwk_alg);
+}
+
+#ifdef SEP_HWK_UNIT_TEST
+enum hwk_self_test {
+	HWK_ST_NOT_STARTED = 0,
+	HWK_ST_RUNNING,
+	HWK_ST_SUCCESS,
+	HWK_ST_ERROR
+};
+
+static enum hwk_self_test hwk_st_status = HWK_ST_NOT_STARTED;
+static const char * const hwk_st_strings[] = {
+	"not started",
+	"running",
+	"success",
+	"error"
+};
+
+ssize_t sys_hwk_st_show(struct kobject *kobj, struct kobj_attribute *attr,
+			char *buf)
+{
+
+	return sprintf(buf, "%s\n", hwk_st_strings[hwk_st_status]);
+}
+
+struct hwk_st_op_result {
+	struct completion completion;
+	int rc;
+};
+
+static void hwk_st_op_complete(struct crypto_async_request *req, int rc)
+{
+	struct hwk_st_op_result *hr = req->data;
+
+	if (rc == -EINPROGRESS)
+		return;
+
+	hr->rc = rc;
+	complete(&hr->completion);
+}
+
+static int hwk_st_do_op(struct ablkcipher_request *req, struct page *src,
+		struct page *dst, bool enc)
+{
+	struct scatterlist src_sg, dst_sg;
+	struct hwk_st_op_result hr = { .rc = 0 };
+	char iv[SEP_AES_IV_SIZE] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+				     12, 13, 14, 15 };
+	int ret;
+
+	init_completion(&hr.completion);
+	ablkcipher_request_set_callback(req,
+			CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+			hwk_st_op_complete, &hr);
+
+	sg_init_table(&src_sg, 1);
+	sg_set_page(&src_sg, src, PAGE_SIZE, 0);
+	sg_init_table(&dst_sg, 1);
+	sg_set_page(&dst_sg, dst, PAGE_SIZE, 0);
+	ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, PAGE_SIZE, iv);
+
+	pr_info("%s: submiting %s op..\n", __func__, enc ? "enc" : "dec");
+	if (enc)
+		ret = crypto_ablkcipher_encrypt(req);
+	else
+		ret = crypto_ablkcipher_decrypt(req);
+	pr_info("%s: op submitted\n", __func__);
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		wait_for_completion(&hr.completion);
+		ret = hr.rc;
+	}
+	pr_info("%s: op completed\n", __func__);
+
+	return ret;
+}
+
+ssize_t sys_hwk_st_start(struct kobject *kobj, struct kobj_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct ablkcipher_request *req;
+	struct page *src, *enc, *dec;
+	struct crypto_ablkcipher *acipher;
+	char *tmp, *tmp2, *tmp3;
+	int ret = -EINVAL, i;
+	u32 hwk_id;
+
+	if (hwk_st_status == HWK_ST_RUNNING)
+		return count;
+
+	hwk_st_status = HWK_ST_RUNNING;
+
+	ret = kstrtouint(buf, 10, &hwk_id);
+	if (ret) {
+		pr_err("bad hardware key id: %d\n", ret);
+		goto out;
+	}
+
+	ret = -ENOMEM;
+	src = alloc_page(GFP_KERNEL);
+	if (!src) {
+		pr_err("failed to allocate src page\n");
+		goto out;
+	}
+	enc = alloc_page(GFP_KERNEL);
+	if (!enc) {
+		pr_err("failed to allocate enc page\n");
+		goto out_free_src;
+	}
+	dec = alloc_page(GFP_KERNEL);
+	if (!dec) {
+		pr_err("failed to allocate dec page\n");
+		goto out_free_enc;
+	}
+
+	acipher = crypto_alloc_ablkcipher("cbchk(aes)", 0, 0);
+	if (IS_ERR(acipher)) {
+		pr_err("error allocating cipher: %ld\n", PTR_ERR(acipher));
+		ret = -EINVAL;
+		goto out_free_dec;
+	}
+
+	tmp = kmap(src);
+	for (i = 0; i < PAGE_SIZE; i++)
+		tmp[i] = i;
+	kunmap(src);
+
+	crypto_ablkcipher_set_flags(acipher, CRYPTO_TFM_REQ_WEAK_KEY);
+
+	pr_debug("setting hardware key %d\n", hwk_id);
+	ret = crypto_ablkcipher_setkey(acipher, (u8 *)&hwk_id, sizeof(hwk_id));
+	if (ret) {
+		pr_err("error setting hardware key: %d\n", ret);
+		goto out_free_cipher;
+	}
+
+	req = ablkcipher_request_alloc(acipher, GFP_NOFS);
+	if (!req) {
+		ret = -EINVAL;
+		pr_err("failed to allocate cipher request\n");
+		goto out_free_cipher;
+	}
+
+
+	ret = hwk_st_do_op(req, src, enc, true);
+	if (ret) {
+		pr_err("encryption failed: %d\n", ret);
+		goto out_free_req;
+	}
+
+	ret = hwk_st_do_op(req, enc, dec, false);
+	if (ret) {
+		pr_err("decryption failed: %d\n", ret);
+		goto out_free_req;
+	}
+
+	tmp = kmap(src); tmp2 = kmap(enc); tmp3 = kmap(dec);
+#ifdef HWK_ST_DUMP_BUF
+	hwk_dump_buf(tmp, "src", PAGE_SIZE);
+	hwk_dump_buf(tmp2, "enc", PAGE_SIZE);
+	hwk_dump_buf(tmp3, "dec", PAGE_SIZE);
+#endif
+	for (i = 0; i < PAGE_SIZE; i++) {
+		if (tmp[i] != tmp3[i]) {
+			ret = -EINVAL;
+			break;
+		}
+	}
+	kunmap(src); kunmap(enc); kunmap(dec);
+
+	if (ret)
+		pr_err("dec != src\n");
+
+out_free_req:
+	ablkcipher_request_free(req);
+out_free_cipher:
+	crypto_free_ablkcipher(acipher);
+out_free_dec:
+	__free_pages(dec, 0);
+out_free_enc:
+	__free_pages(enc, 0);
+out_free_src:
+	__free_pages(src, 0);
+out:
+	if (ret)
+		hwk_st_status = HWK_ST_ERROR;
+	else
+		hwk_st_status = HWK_ST_SUCCESS;
+	return count;
+}
+#endif
diff --git a/drivers/staging/sep54/desc_mgr.c b/drivers/staging/sep54/desc_mgr.c
new file mode 100644
index 0000000..1aac086
--- /dev/null
+++ b/drivers/staging/sep54/desc_mgr.c
@@ -0,0 +1,1234 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_DESC_MGR
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include "dx_driver.h"
+#include "dx_bitops.h"
+#include "sep_log.h"
+#include "sep_sw_desc.h"
+#include "crypto_ctx_mgr.h"
+#include "sep_sysfs.h"
+/* Registers definitions from shared/hw/include */
+#include "dx_reg_base_host.h"
+#include "dx_host.h"
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_cc_regs.h"
+#include "sep_power.h"
+#include "desc_mgr.h"
+#include "dx_init_cc_abi.h"
+
+/* Queue buffer log(size in bytes) */
+#define SEP_SW_DESC_Q_MEM_SIZE_LOG 12	/*4KB */
+#define SEP_SW_DESC_Q_MEM_SIZE (1 << SEP_SW_DESC_Q_MEM_SIZE_LOG)
+#define WORD_SIZE_LOG 2		/*32b=4B=2^2 */
+/* Number of entries (descriptors in a queue) */
+#define SEP_DESC_Q_ENTRIES_NUM_LOG \
+	(SEP_SW_DESC_Q_MEM_SIZE_LOG - WORD_SIZE_LOG - SEP_SW_DESC_WORD_SIZE_LOG)
+#define SEP_DESC_Q_ENTRIES_NUM (1 << SEP_DESC_Q_ENTRIES_NUM_LOG)
+#define SEP_DESC_Q_ENTRIES_MASK BITMASK(SEP_DESC_Q_ENTRIES_NUM_LOG)
+
+/* This watermark is used to initiate dispatching after the queue entered
+   the FULL state in order to avoid interrupts flooding at SeP */
+#define SEP_DESC_Q_WATERMARK_MARGIN ((SEP_DESC_Q_ENTRIES_NUM)/4)
+
+/* convert from descriptor counters index in descriptors array */
+#define GET_DESC_IDX(cntr) ((cntr) & SEP_DESC_Q_ENTRIES_MASK)
+#define GET_DESC_PTR(q_p, idx)				\
+	((struct sep_sw_desc *)((q_p)->q_base_p +	\
+				(idx << SEP_SW_DESC_WORD_SIZE_LOG)))
+#define GET_Q_PENDING_DESCS(q_p) ((q_p)->sent_cntr - (q_p)->completed_cntr)
+#define GET_Q_FREE_DESCS(q_p) \
+	 (SEP_DESC_Q_ENTRIES_NUM - GET_Q_PENDING_DESCS(q_p))
+#define IS_Q_FULL(q_p) (GET_Q_FREE_DESCS(q_p) == 0)
+
+/* LUT for GPRs registers offsets (to be added to cc_regs_base) */
+static const unsigned long host_to_sep_gpr_offset[] = {
+	DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR0),
+	DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR1),
+	DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR2),
+	DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR3),
+};
+
+static const unsigned long sep_to_host_gpr_offset[] = {
+	DX_CC_REG_OFFSET(HOST, SEP_HOST_GPR0),
+	DX_CC_REG_OFFSET(HOST, SEP_HOST_GPR1),
+	DX_CC_REG_OFFSET(HOST, SEP_HOST_GPR2),
+	DX_CC_REG_OFFSET(HOST, SEP_HOST_GPR3),
+};
+
+/**
+ * struct descs_backlog_item - Item of the queue descs_backlog_queue
+ */
+struct descs_backlog_item {
+	struct list_head list;
+	struct sep_sw_desc desc;
+};
+
+/**
+ * struct descs_backlog_queue - Queue of backlog descriptors
+ * @list:		List head item
+ * @cur_q_len:		Current number of entries in the backlog_q
+ * @backlog_items_pool:	Memory pool for allocating elements of this queue
+ * @backlog_items_pool_name:	Pool name string for kmem_cache object
+ */
+struct descs_backlog_queue {
+	struct list_head list;
+	unsigned int cur_q_len;
+	struct kmem_cache *backlog_items_pool;
+	char backlog_items_pool_name[24];
+};
+
+/**
+ * struct desc_q - Descriptor queue object
+ * @qid:		The associated software queue ID
+ * @qstate:		Operational state of the queue
+ * @gpr_to_sep:		Pointer to host-to-sep GPR for this queue (requests)
+ * @gpr_from_sep:	Pointer to sep-to-host GPR for this queue (completion)
+ * @qlock:		Protect data structure in non-interrupt context
+ * @q_base_p:		The base address of the descriptors cyclic queue buffer
+ * @q_base_dma:		The DMA address for q_base_p
+ * @sent_cntr:		Sent descriptors counter
+ * @completed_cntr:	Completed descriptors counter as reported by SeP
+ * @idle_jiffies:	jiffies value when the queue became idle (empty)
+ * @backlog_q:		Queue of backlog descriptors - pending to be dispatched
+ *			into the descriptors queue (were not dispatched because
+ *			it was full or in "sleep" state)
+ * @backlog_work:	Work task for handling/equeuing backlog descriptors
+ * @enqueue_time:	Array to save descriptor start [ns] per descriptor
+ */
+struct desc_q {
+	int qid;
+	enum desc_q_state qstate;
+	void __iomem *gpr_to_sep;
+	void __iomem *gpr_from_sep;
+	struct queue_drvdata *drvdata;
+	struct mutex qlock;
+	u32 *q_base_p;
+	dma_addr_t q_base_dma;
+	u32 sent_cntr;
+	u32 completed_cntr;
+	unsigned long idle_jiffies;
+	struct descs_backlog_queue backlog_q;
+	struct work_struct backlog_work;
+	unsigned long long *enqueue_time;
+};
+
+static uintptr_t cookies[SEP_DESC_Q_ENTRIES_NUM];
+DEFINE_MUTEX(cookie_lock);
+
+u32 add_cookie(uintptr_t op_ctx)
+{
+	u32 i;
+
+	mutex_lock(&cookie_lock);
+	for (i = 0; i < SEP_DESC_Q_ENTRIES_NUM; i++) {
+		if (cookies[i] == 0) {
+			cookies[i] = op_ctx;
+			break;
+		}
+	}
+	mutex_unlock(&cookie_lock);
+
+	return i;
+}
+
+void delete_cookie(u32 index)
+{
+	mutex_lock(&cookie_lock);
+	cookies[index] = 0;
+	mutex_unlock(&cookie_lock);
+}
+
+void delete_context(uintptr_t op_ctx)
+{
+	u32 i;
+
+	mutex_lock(&cookie_lock);
+	for (i = 0; i < 128; i++) {
+		if (cookies[i] == op_ctx) {
+			cookies[i] = 0;
+			break;
+		}
+	}
+	mutex_unlock(&cookie_lock);
+}
+
+uintptr_t get_cookie(u32 index)
+{
+	return cookies[index];
+}
+
+#ifdef DEBUG
+static void dump_desc(const struct sep_sw_desc *desc_p);
+#else
+#define dump_desc(desc_p) do {} while (0)
+#endif /*DEBUG*/
+static int backlog_q_init(struct desc_q *q_p);
+static void backlog_q_cleanup(struct desc_q *q_p);
+static void backlog_q_process(struct work_struct *work);
+
+/**
+ * desc_q_create() - Create descriptors queue object
+ * @qid:	 The queue ID (index)
+ * @drvdata:	 The associated queue driver data
+ * @state: Current state of sep
+ *
+ * Returns Allocated queue object handle (DESC_Q_INVALID_HANDLE for failure)
+ */
+void *desc_q_create(int qid, struct queue_drvdata *drvdata, int state)
+{
+	struct device *dev = drvdata->sep_data->dev;
+	void __iomem *cc_regs_base = drvdata->sep_data->cc_base;
+	struct desc_q *new_q_p;
+
+	new_q_p = kzalloc(sizeof(struct desc_q), GFP_KERNEL);
+	if (unlikely(new_q_p == NULL)) {
+		pr_err("Q%d: Failed allocating %zu B for new_q\n",
+			    qid, sizeof(struct desc_q));
+		goto desc_q_create_failed;
+	}
+
+	/* Initialize fields */
+	mutex_init(&new_q_p->qlock);
+	new_q_p->drvdata = drvdata;
+	new_q_p->qid = qid;
+	new_q_p->gpr_to_sep = cc_regs_base + host_to_sep_gpr_offset[qid];
+	new_q_p->gpr_from_sep = cc_regs_base + sep_to_host_gpr_offset[qid];
+	new_q_p->sent_cntr = 0;
+	new_q_p->completed_cntr = 0;
+	new_q_p->idle_jiffies = jiffies;
+
+	new_q_p->q_base_p = dma_alloc_coherent(dev, SEP_SW_DESC_Q_MEM_SIZE,
+					       &new_q_p->q_base_dma,
+					       GFP_KERNEL);
+	if (unlikely(new_q_p->q_base_p == NULL)) {
+		pr_err("Q%d: Failed allocating %d B for desc buffer\n",
+			    qid, SEP_SW_DESC_Q_MEM_SIZE);
+		goto desc_q_create_failed;
+	}
+
+	new_q_p->enqueue_time = kmalloc(SEP_DESC_Q_ENTRIES_NUM *
+					sizeof(u64), GFP_KERNEL);
+	if (new_q_p->enqueue_time == NULL) {
+		pr_err("Q%d: Failed allocating time stats array\n", qid);
+		goto desc_q_create_failed;
+	}
+
+	if (backlog_q_init(new_q_p) != 0) {
+		pr_err("Q%d: Failed creating backlog queue\n", qid);
+		goto desc_q_create_failed;
+	}
+	INIT_WORK(&new_q_p->backlog_work, backlog_q_process);
+
+	/* Initialize respective GPR before SeP would be initialized.
+	   Required because the GPR may be non-zero as a result of CC-init
+	   sequence leftovers */
+	if (state != DX_SEP_STATE_DONE_FW_INIT)
+		WRITE_REGISTER(new_q_p->gpr_to_sep, new_q_p->sent_cntr);
+
+	new_q_p->qstate = DESC_Q_ACTIVE;
+	return (void *)new_q_p;
+
+	/* Error cases cleanup */
+ desc_q_create_failed:
+	if (new_q_p != NULL) {
+		kfree(new_q_p->enqueue_time);
+		if (new_q_p->q_base_p != NULL)
+			dma_free_coherent(dev, SEP_SW_DESC_Q_MEM_SIZE,
+					  new_q_p->q_base_p,
+					  new_q_p->q_base_dma);
+		mutex_destroy(&new_q_p->qlock);
+		kfree(new_q_p);
+	}
+	return DESC_Q_INVALID_HANDLE;
+}
+
+/**
+ * desc_q_destroy() - Destroy descriptors queue object (free resources)
+ * @q_h:	 The queue object handle
+ *
+ */
+void desc_q_destroy(void *q_h)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	struct device *dev = q_p->drvdata->sep_data->dev;
+
+	if (q_p->sent_cntr != q_p->completed_cntr) {
+		pr_err(
+			    "Q%d: destroyed while there are outstanding descriptors\n",
+			    q_p->qid);
+	}
+	backlog_q_cleanup(q_p);
+	kfree(q_p->enqueue_time);
+	dma_free_coherent(dev, SEP_SW_DESC_Q_MEM_SIZE,
+			  q_p->q_base_p, q_p->q_base_dma);
+	mutex_destroy(&q_p->qlock);
+	kfree(q_p);
+}
+
+/**
+ * desc_q_set_state() - Set queue state (SLEEP or ACTIVE)
+ * @q_h:	The queue object handle
+ * @state:	The requested state
+ */
+int desc_q_set_state(void *q_h, enum desc_q_state state)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	int rc = 0;
+
+#ifdef DEBUG
+	if ((q_p->qstate != DESC_Q_ACTIVE) && (q_p->qstate != DESC_Q_ASLEEP)) {
+		pr_err("Q%d is in invalid state: %d\n",
+			    q_p->qid, q_p->qstate);
+		return -EINVAL;
+	}
+#endif
+	mutex_lock(&q_p->qlock);
+	switch (state) {
+	case DESC_Q_ASLEEP:
+		if (q_p->qstate != DESC_Q_ASLEEP) {
+			/* If not already in this state */
+			if (desc_q_is_idle(q_h, NULL))
+				q_p->qstate = DESC_Q_ASLEEP;
+			else
+				rc = -EBUSY;
+		}		/* else: already asleep */
+		break;
+	case DESC_Q_ACTIVE:
+		if (q_p->qstate != DESC_Q_ACTIVE) {
+			/* Initiate enqueue from backlog if any is pending */
+			if (q_p->backlog_q.cur_q_len > 0)
+				(void)schedule_work(&q_p->backlog_work);
+			else	/* Empty --> Back to idle state */
+				q_p->idle_jiffies = jiffies;
+			q_p->qstate = DESC_Q_ACTIVE;
+		}		/* else: already active */
+		break;
+	default:
+		pr_err("Invalid requested state: %d\n", state);
+		rc = -EINVAL;
+	}
+	mutex_unlock(&q_p->qlock);
+	return rc;
+}
+
+/**
+ * desc_q_get_state() - Get queue state
+ * @q_h:	The queue object handle
+ */
+enum desc_q_state desc_q_get_state(void *q_h)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	return q_p->qstate;
+}
+
+/**
+ * desc_q_is_idle() - Report if given queue is active but empty/idle.
+ * @q_h:		The queue object handle
+ * @idle_jiffies_p:	Return jiffies at which the queue became idle
+ */
+bool desc_q_is_idle(void *q_h, unsigned long *idle_jiffies_p)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	if (idle_jiffies_p != NULL)
+		*idle_jiffies_p = q_p->idle_jiffies;
+	/* No need to lock the queue - returned information is "fluid" anyway */
+	return ((q_p->qstate == DESC_Q_ACTIVE) &&
+		(GET_Q_PENDING_DESCS(q_p) == 0) &&
+		(q_p->backlog_q.cur_q_len == 0));
+}
+
+/**
+ * desc_q_cntr_set() - set counters of the queue
+ * @q_h:	The queue object handle
+ */
+int desc_q_cntr_set(void *q_h)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	uint32_t val = READ_REGISTER(q_p->gpr_from_sep);
+
+	q_p->sent_cntr = val;
+	q_p->completed_cntr = val;
+
+	return 0;
+}
+
+/**
+ * desc_q_reset() - Reset sent/completed counters of queue
+ * @q_h:	The queue object handle
+ *
+ * This function should be invoked only when the queue is in ASLEEP state
+ * after the transition of SeP to sleep state completed.
+ * Returns -EBUSY if the queue is not in the correct state for reset.
+ */
+int desc_q_reset(void *q_h)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	int rc = 0;
+
+	mutex_lock(&q_p->qlock);
+	if ((q_p->qstate == DESC_Q_ASLEEP) && (GET_Q_PENDING_DESCS(q_p) == 0)) {
+		q_p->sent_cntr = 0;
+		q_p->completed_cntr = 0;
+	} else {
+		pr_err("Invoked when queue is not ASLEEP\n");
+		rc = -EBUSY;
+	}
+	mutex_unlock(&q_p->qlock);
+	return rc;
+}
+
+/**
+ * dispatch_sw_desc() - Copy given descriptor into next free entry in the
+ *			descriptors queue and signal SeP.
+ *
+ * @q_p:	Desc. queue context
+ * @desc_p:	The descriptor to dispatch
+ *
+ * This function should be called with qlock locked (non-interrupt context)
+ * and only if queue is not full (i.e., this function does not validate
+ * queue utilization)
+ */
+static inline void dispatch_sw_desc(struct desc_q *q_p,
+				    struct sep_sw_desc *desc_p)
+{
+	const u32 desc_idx = GET_DESC_IDX(q_p->sent_cntr);
+
+	dump_desc(desc_p);
+	preempt_disable_notrace();
+	q_p->enqueue_time[desc_idx] = sched_clock();	/* Save start time */
+	preempt_enable_notrace();
+	/* copy descriptor to free entry in queue */
+	SEP_SW_DESC_COPY_TO_SEP(GET_DESC_PTR(q_p, desc_idx), desc_p);
+	q_p->sent_cntr++;
+}
+
+/**
+ * desc_q_enqueue_sleep_req() - Enqueue SLEEP_REQ descriptor
+ * @q_h:	The queue object handle
+ * @op_ctx:	The operation context for this descriptor
+ * This function may be invoked only when the queue is in ASLEEP state
+ * (assuming SeP is still active).
+ * If the queue is not in ASLEEP state this function would return -EBUSY.
+ */
+int desc_q_enqueue_sleep_req(void *q_h, struct sep_op_ctx *op_ctx)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	struct sep_sw_desc desc;
+	int rc = 0;
+
+	SEP_SW_DESC_INIT(&desc);
+	SEP_SW_DESC_SET(&desc, TYPE, SEP_SW_DESC_TYPE_SLEEP_REQ);
+	SEP_SW_DESC_SET_COOKIE(&desc, op_ctx);
+
+	mutex_lock(&q_p->qlock);
+	if (q_p->qstate == DESC_Q_ASLEEP) {
+		op_ctx->op_state = USER_OP_INPROC;
+		/* In ASLEEP state the queue assumed to be empty... */
+		dispatch_sw_desc(q_p, &desc);
+		WRITE_REGISTER(q_p->gpr_to_sep, q_p->sent_cntr);
+		pr_debug("Sent SLEEP_REQ\n");
+	} else {
+		rc = -EBUSY;
+	}
+	mutex_unlock(&q_p->qlock);
+	return rc;
+}
+
+static int backlog_q_init(struct desc_q *q_p)
+{
+	struct descs_backlog_queue *backlog_q_p = &q_p->backlog_q;
+	int rc = 0;
+
+	snprintf(backlog_q_p->backlog_items_pool_name,
+		 sizeof(backlog_q_p->backlog_items_pool_name),
+		 "dx_sep_backlog%d", q_p->qid);
+	backlog_q_p->backlog_items_pool =
+	    kmem_cache_create(backlog_q_p->backlog_items_pool_name,
+			      sizeof(struct descs_backlog_item),
+			      sizeof(u32), 0, NULL);
+	if (unlikely(backlog_q_p->backlog_items_pool == NULL)) {
+		pr_err("Q%d: Failed allocating backlog_items_pool\n",
+			    q_p->qid);
+		rc = -ENOMEM;
+	} else {
+		INIT_LIST_HEAD(&backlog_q_p->list);
+		backlog_q_p->cur_q_len = 0;
+	}
+	return rc;
+}
+
+static void backlog_q_cleanup(struct desc_q *q_p)
+{
+	struct descs_backlog_queue *backlog_q_p = &q_p->backlog_q;
+
+	if (backlog_q_p->cur_q_len > 0) {
+		pr_err("Q%d: Cleanup while have %u pending items!",
+			    q_p->qid, backlog_q_p->cur_q_len);
+		/* TODO: Handle freeing of pending items? */
+	}
+	kmem_cache_destroy(backlog_q_p->backlog_items_pool);
+}
+
+/**
+ * backlog_q_enqueue() - Enqueue given descriptor for postponed processing
+ *				(e.g., in case of full desc_q)
+ *
+ * @q_p:	Desc. queue object
+ * @desc_p:	Descriptor to enqueue
+ *
+ * Caller must call this function with the qlock locked (non-interrupt context
+ * only)
+ */
+static int backlog_q_enqueue(struct desc_q *q_p, struct sep_sw_desc *desc_p)
+{
+	struct descs_backlog_queue *backlog_q_p = &q_p->backlog_q;
+	struct sep_op_ctx *op_ctx = SEP_SW_DESC_GET_COOKIE(desc_p);
+	struct descs_backlog_item *new_q_item;
+
+	pr_debug("->backlog(op_ctx=%p):\n", op_ctx);
+	dump_desc(desc_p);
+
+	new_q_item =
+	    kmem_cache_alloc(backlog_q_p->backlog_items_pool, GFP_KERNEL);
+	if (unlikely(new_q_item == NULL)) {
+		pr_err("Failed allocating descs_queue_item");
+		op_ctx->op_state = USER_OP_NOP;
+		return -ENOMEM;
+	}
+	op_ctx->op_state = USER_OP_PENDING;
+	memcpy(&new_q_item->desc, desc_p, sizeof(struct sep_sw_desc));
+	list_add_tail(&new_q_item->list, &backlog_q_p->list);
+	op_ctx->backlog_descs_cntr++;
+	backlog_q_p->cur_q_len++;
+	return 0;
+}
+
+/**
+ * backlog_q_dequeue() - Dequeue from pending descriptors queue and dispatch
+ *			into the SW-q the first pending descriptor
+ *
+ * @q_p:	Desc. queue object
+ *
+ * This function must be called with qlock locked and only if there is free
+ * space in the given descriptor queue.
+ * It returns 0 on success and -ENOMEM if there is no pending request
+ */
+static int backlog_q_dequeue(struct desc_q *q_p)
+{
+	struct descs_backlog_queue *backlog_q_p = &q_p->backlog_q;
+	struct descs_backlog_item *first_item;
+	struct sep_sw_desc *desc_p;
+	struct sep_op_ctx *op_ctx;
+
+	if (list_empty(&backlog_q_p->list))
+		return -ENOMEM;
+	/* Remove from the first item from the list but keep the item */
+	first_item = list_first_entry(&backlog_q_p->list,
+				      struct descs_backlog_item, list);
+	list_del(&first_item->list);
+	backlog_q_p->cur_q_len--;
+	/* Process/dispatch the descriptor to the SW-q. */
+	desc_p = &first_item->desc;
+	dump_desc(desc_p);
+	op_ctx = SEP_SW_DESC_GET_COOKIE(desc_p);
+	if (unlikely(op_ctx == NULL)) {
+		pr_err("Invalid desc - COOKIE is NULL\n");
+		return -EINVAL;
+	}
+	pr_debug("backlog(op_ctx=%p)->descQ:\n", op_ctx);
+	dispatch_sw_desc(q_p, desc_p);
+	op_ctx->backlog_descs_cntr--;
+	/* Now we can free the list item */
+	kmem_cache_free(backlog_q_p->backlog_items_pool, first_item);
+	if (op_ctx->backlog_descs_cntr == 0) {
+		/* All the operation descriptors reached the SW-q. */
+		op_ctx->op_state = USER_OP_INPROC;
+		if (op_ctx->comp_work != NULL)
+			/* Async. (CryptoAPI) */
+			/* Invoke completion callback directly because
+			   we are already in work_queue context and we wish
+			   to assure this state update (EINPROGRESS)
+			   is delivered before the request is completed */
+			op_ctx->comp_work->func(op_ctx->comp_work);
+	}
+	return 0;
+}
+
+/**
+ * backlog_q_process() - Handler for dispatching backlog descriptors
+ *			into the SW desc.Q when possible (dispatched from
+ *			the completion interrupt handler)
+ *
+ * @work:	The work context
+ */
+static void backlog_q_process(struct work_struct *work)
+{
+	int descs_to_enqueue;
+	struct desc_q *q_p = container_of(work, struct desc_q, backlog_work);
+
+	mutex_lock(&q_p->qlock);
+	if (q_p->qstate == DESC_Q_ACTIVE) {	/* Avoid on ASLEEP state */
+		descs_to_enqueue = GET_Q_FREE_DESCS(q_p);
+		/* Not more than pending descriptors */
+		if (descs_to_enqueue > q_p->backlog_q.cur_q_len)
+			descs_to_enqueue = q_p->backlog_q.cur_q_len;
+		pr_debug("Q%d: Dispatching %d descs. from pendQ\n",
+			      q_p->qid, descs_to_enqueue);
+		while (descs_to_enqueue > 0) {
+			/* From backlog queue to SW descriptors queue */
+			if (!backlog_q_dequeue(q_p))
+				descs_to_enqueue--;
+			else
+				break;
+		}
+		/* Signal SeP once of all new descriptors
+		   (interrupt coalescing) */
+		WRITE_REGISTER(q_p->gpr_to_sep, q_p->sent_cntr);
+	}
+	mutex_unlock(&q_p->qlock);
+}
+
+/**
+ * desc_q_get_info4sep() - Get queue address and size to be used in FW init
+ *				phase
+ * @q_h:	 The queue object handle
+ * @base_addr_p:	 Base address return parameter
+ * @size_p:	 Queue size (in bytes) return parameter
+ *
+ */
+void desc_q_get_info4sep(void *q_h,
+			 dma_addr_t *base_addr_p, unsigned long *size_p)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+
+	*base_addr_p = q_p->q_base_dma;
+	*size_p = SEP_SW_DESC_Q_MEM_SIZE;
+}
+
+/**
+ * desc_q_enqueue() - Enqueue given descriptor in given queue
+ * @q_h:		The queue object handle
+ * @desc_p:		Pointer to descriptor
+ * @may_backlog:	When "true" and descQ is full or ASLEEP, may enqueue
+ *			the given desc. in the backlog queue.
+ *			When "false", any of the above cases would cause
+ *			returning -ENOMEM.
+ *
+ * The function updates the op_ctx->op_state accoring to its results.
+ * Returns -EINPROGRESS on success to dispatch into the SW desc. q.
+ * Returns -EBUSY if may_backlog==true and the descriptor was enqueued in the
+ * the backlog queue.
+ * Returns -ENOMEM if queue is full and cannot enqueue in the backlog queue
+ */
+int desc_q_enqueue(void *q_h, struct sep_sw_desc *desc_p, bool may_backlog)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	struct sep_op_ctx *op_ctx = SEP_SW_DESC_GET_COOKIE(desc_p);
+	int rc;
+
+	mutex_lock(&q_p->qlock);
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+
+	if (IS_Q_FULL(q_p) ||	/* Queue is full */
+	    (q_p->backlog_q.cur_q_len > 0) ||	/* or already have pending d. */
+	    (q_p->qstate == DESC_Q_ASLEEP)) {	/* or in sleep state */
+		if (may_backlog) {
+			pr_debug("Enqueuing desc. to queue@%s\n",
+				 q_p->qstate == DESC_Q_ASLEEP ?
+					 "ASLEEP" : "FULL");
+			rc = backlog_q_enqueue(q_p, desc_p);
+			if (unlikely(rc != 0)) {
+				pr_err("Failed enqueuing desc. to queue@%s\n",
+				       q_p->qstate == DESC_Q_ASLEEP ?
+					       "ASLEEP" : "FULL");
+			} else {
+				rc = -EBUSY;
+			}
+		} else {
+			pr_debug("Q%d: %s and may not backlog.\n",
+				 q_p->qid,
+				 q_p->qstate == DESC_Q_ASLEEP ?
+					 "ASLEEP" : "FULL");
+			rc = -ENOMEM;
+		}
+
+	} else {		/* Can dispatch to actual descriptors queue */
+		op_ctx->op_state = USER_OP_INPROC;
+		dispatch_sw_desc(q_p, desc_p);
+		/* Signal SeP of new descriptors */
+		WRITE_REGISTER(q_p->gpr_to_sep, q_p->sent_cntr);
+		pr_debug("Q#%d: Sent SwDesc #%u (op_ctx=%p)\n",
+			      q_p->qid, q_p->sent_cntr, op_ctx);
+		rc = -EINPROGRESS;
+	}
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+	mutex_unlock(&q_p->qlock);
+
+	return rc;		/* Enqueued to desc. queue */
+}
+
+/**
+ * desc_q_mark_invalid_cookie() - Mark given cookie as invalid in case marked as
+ *					completed after a timeout
+ * @q_h:	 Descriptor queue handle
+ * @cookie:	 Invalidate descriptors with this cookie
+ *
+ * Mark given cookie as invalid in case marked as completed after a timeout
+ * Invoke this before releasing the op_ctx object.
+ * There is no race with the interrupt because the op_ctx (cookie) is still
+ * valid when invoking this function.
+ */
+void desc_q_mark_invalid_cookie(void *q_h, void *cookie)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	struct sep_sw_desc desc;
+	u32 cur_desc_cntr, cur_desc_idx;
+	unsigned int drop_cnt = 0;
+
+	mutex_lock(&q_p->qlock);
+
+	for (cur_desc_cntr = q_p->completed_cntr;
+	     cur_desc_cntr < q_p->sent_cntr; cur_desc_cntr++) {
+		/* Mark all outstanding of given cookie as invalid with NULL */
+		cur_desc_idx = GET_DESC_IDX(cur_desc_cntr);
+		/* Copy descriptor to spad (endianess fix-up) */
+		/* TODO: Optimize to avoid full copy back...  */
+		/* (we only need the cookie) */
+		SEP_SW_DESC_COPY_FROM_SEP(&desc,
+					  GET_DESC_PTR(q_p, cur_desc_idx));
+		if (SEP_SW_DESC_GET_COOKIE(&desc) == cookie) {
+			SEP_SW_DESC_SET_COOKIE(&desc, (uintptr_t *)0);	/* Invalidate */
+			SEP_SW_DESC_COPY_TO_SEP(GET_DESC_PTR(q_p, cur_desc_idx),
+						&desc);
+			pr_debug("Invalidated desc at desc_cnt=%u\n",
+				      cur_desc_idx);
+			drop_cnt++;
+		}
+	}
+
+	mutex_unlock(&q_p->qlock);
+
+	if (drop_cnt > 0)
+		pr_warn("Invalidated %u descriptors of cookie=0x%p\n",
+			drop_cnt, cookie);
+
+}
+
+/**
+ * desc_q_process_completed() - Dequeue and process any completed descriptors in
+ *				the queue
+ * @q_h:	 The queue object handle
+ *
+ * Dequeue and process any completed descriptors in the queue
+ * (This function assumes non-reentrancy since it is invoked from
+ *  either interrupt handler or in workqueue context)
+ */
+void desc_q_process_completed(void *q_h)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	struct sep_op_ctx *op_ctx;
+	struct sep_sw_desc desc;
+	enum sep_sw_desc_type desc_type;
+	struct sep_op_ctx *cookie;
+	u32 ret_code;
+	u32 desc_idx;
+	u32 new_completed_cntr;
+
+	new_completed_cntr = READ_REGISTER(q_p->gpr_from_sep);
+	/* Sanity check for read GPR value (must be between sent and completed).
+	   This arithmetic is cyclic so should work even after the counter
+	   wraps around. */
+	if ((q_p->sent_cntr - new_completed_cntr) >
+	    (q_p->sent_cntr - q_p->completed_cntr)) {
+		/* More completions than outstanding descriptors ?! */
+		pr_err(
+			    "sent_cntr=0x%08X completed_cntr=0x%08X gpr=0x%08X\n",
+			    q_p->sent_cntr, q_p->completed_cntr,
+			    new_completed_cntr);
+		return;		/*SEP_DRIVER_BUG() */
+		/* This is a (sep) bug case that is not supposed to happen,
+		   but we must verify this to avoid accessing stale descriptor
+		   data (which may cause system memory corruption).
+		   Returning to the caller may result in interrupt loss, but we
+		   prefer losing a completion and blocking the caller forever
+		   than invoking BUG() that would crash the whole system and may
+		   even loose the error log message. This would give a chance
+		   for a subsequent pending descriptor completion recover this
+		   case or in the worst case let the system administrator
+		   understand what is going on and let her perform a graceful
+		   reboot. */
+	}
+
+	while (new_completed_cntr > q_p->completed_cntr) {
+
+		desc_idx = GET_DESC_IDX(q_p->completed_cntr);
+		/* Copy descriptor to spad (endianess fix-up) */
+		/* TODO: Optimize to avoid full copy back...  */
+		/* (we only need type the fields: type, retcode, cookie) */
+		SEP_SW_DESC_COPY_FROM_SEP(&desc, GET_DESC_PTR(q_p, desc_idx));
+		desc_type = SEP_SW_DESC_GET(&desc, TYPE);
+		cookie = SEP_SW_DESC_GET_COOKIE(&desc);
+		ret_code = SEP_SW_DESC_GET(&desc, RET_CODE);
+		sysfs_update_sep_stats(q_p->qid, desc_type,
+				       q_p->enqueue_time[desc_idx],
+				       sched_clock());
+		q_p->completed_cntr++;	/* prepare for next */
+		pr_debug("type=%u retcode=0x%08X cookie=0x%p",
+			 desc_type, ret_code, cookie);
+		if (cookie == 0) {
+			/* Probably late completion on invalidated cookie */
+			pr_err("Got completion with NULL cookie\n");
+			continue;
+		}
+
+		op_ctx = (struct sep_op_ctx *)cookie;
+		if (desc_type == SEP_SW_DESC_TYPE_APP_REQ) {/* Applet Req. */
+			/* "internal error" flag is currently available only
+			   in this descriptor type. */
+			op_ctx->internal_error =
+			    SEP_SW_DESC_GET4TYPE(&desc, APP_REQ, INTERNAL_ERR);
+			/* Get session ID for SESSION_OPEN case */
+			op_ctx->session_ctx->sep_session_id =
+			    SEP_SW_DESC_GET4TYPE(&desc, APP_REQ, SESSION_ID);
+		}
+
+#ifdef DEBUG
+		if (op_ctx->pending_descs_cntr > MAX_PENDING_DESCS)
+			pr_err("Invalid num of pending descs %d\n",
+				    op_ctx->pending_descs_cntr);
+#endif
+		/* pending descriptors counter (apply for transactions composed
+		   of more than a single descriptor) */
+		op_ctx->pending_descs_cntr--;
+		/* Update associated operation context and notify it */
+		op_ctx->error_info |= ret_code;
+		if (op_ctx->pending_descs_cntr == 0) {
+			op_ctx->op_state = USER_OP_COMPLETED;
+			if (op_ctx->comp_work != NULL)	/* Async. (CryptoAPI) */
+				(void)schedule_work(op_ctx->comp_work);
+			else	/* Sync. (IOCTL or dx_sepapp_ API) */
+				complete(&(op_ctx->ioctl_op_compl));
+		}
+	}			/* while(new_completed_cntr) */
+
+	/* Dispatch pending requests */
+	/* if any pending descs. & utilization is below watermark & !ASLEEP */
+	if ((q_p->backlog_q.cur_q_len > 0) &&
+	    (GET_Q_FREE_DESCS(q_p) > SEP_DESC_Q_WATERMARK_MARGIN) &&
+	    (q_p->qstate != DESC_Q_ASLEEP)) {
+		(void)schedule_work(&q_p->backlog_work);
+	} else if (desc_q_is_idle(q_h, NULL)) {
+		q_p->idle_jiffies = jiffies;
+	}
+
+}
+
+/**
+ * desq_q_pack_debug_desc() - Create a debug descriptor in given buffer
+ * @desc_p:	 The descriptor buffer
+ * @op_ctx:	 The operation context
+ *
+ * TODO: Get additional debug descriptors (in addition to loopback)
+ *
+ */
+void desq_q_pack_debug_desc(struct sep_sw_desc *desc_p,
+			    struct sep_op_ctx *op_ctx)
+{
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_DEBUG);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+}
+
+/**
+ * desc_q_pack_crypto_op_desc() - Pack a CRYPTO_OP descriptor in given
+ *				descriptor buffer
+ * @desc_p:	 The descriptor buffer
+ * @op_ctx:	 The operation context
+ * @sep_ctx_load_req:	 Context load request flag
+ * @sep_ctx_init_req:	 Context initialize request flag
+ * @proc_mode:	 Descriptor processing mode
+ *
+ */
+void desc_q_pack_crypto_op_desc(struct sep_sw_desc *desc_p,
+				struct sep_op_ctx *op_ctx,
+				int sep_ctx_load_req, int sep_ctx_init_req,
+				enum sep_proc_mode proc_mode)
+{
+	u32 xlli_addr;
+	u16 xlli_size;
+	u16 table_count;
+
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_CRYPTO_OP);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+
+	SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, FW_CACHE_IDX,
+			     ctxmgr_get_sep_cache_idx(&op_ctx->ctx_info));
+	SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, L, sep_ctx_load_req);
+	SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, HCB_ADDR,
+			     ctxmgr_get_sep_ctx_dma_addr(&op_ctx->ctx_info));
+	SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, I, sep_ctx_init_req);
+	SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, PROC_MODE, proc_mode);
+
+	if (proc_mode != SEP_PROC_MODE_NOP) {	/* no need for IFT/OFT in NOP */
+		/* IFT details */
+		llimgr_get_mlli_desc_info(&op_ctx->ift,
+					  &xlli_addr, &xlli_size, &table_count);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_ADDR, xlli_addr);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_SIZE, xlli_size);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_NUM, table_count);
+
+		/* OFT details */
+		llimgr_get_mlli_desc_info(&op_ctx->oft,
+					  &xlli_addr, &xlli_size, &table_count);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_ADDR, xlli_addr);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_SIZE, xlli_size);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_NUM, table_count);
+	}
+}
+
+/**
+ * desc_q_pack_combined_op_desc() - Pack a COMBINED_OP descriptor in given
+ *					descriptor buffer
+ * @desc_p:	 The descriptor buffer
+ * @op_ctx:	 The operation context
+ * @sep_ctx_load_req:	 Context load request flag
+ * @sep_ctx_init_req:	 Context initialize request flag
+ * @proc_mode:	 Descriptor processing mode
+ * @cfg_scheme:	 The SEP format configuration scheme claimed by the user
+ *
+ */
+void desc_q_pack_combined_op_desc(struct sep_sw_desc *desc_p,
+				  struct sep_op_ctx *op_ctx,
+				  int sep_ctx_load_req, int sep_ctx_init_req,
+				  enum sep_proc_mode proc_mode,
+				  u32 cfg_scheme)
+{
+	u32 xlli_addr;
+	u16 xlli_size;
+	u16 table_count;
+
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_COMBINED_OP);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+
+	SEP_SW_DESC_SET4TYPE(desc_p, COMBINED_OP, L, sep_ctx_load_req);
+	SEP_SW_DESC_SET4TYPE(desc_p, COMBINED_OP, CONFIG_SCHEME, cfg_scheme);
+	SEP_SW_DESC_SET4TYPE(desc_p, COMBINED_OP, I, sep_ctx_init_req);
+	SEP_SW_DESC_SET4TYPE(desc_p, COMBINED_OP, PROC_MODE, proc_mode);
+
+	if (proc_mode != SEP_PROC_MODE_NOP) {	/* no need for IFT/OFT in NOP */
+		/* IFT details */
+		llimgr_get_mlli_desc_info(&op_ctx->ift,
+					  &xlli_addr, &xlli_size, &table_count);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_ADDR, xlli_addr);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_SIZE, xlli_size);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_NUM, table_count);
+
+		/* OFT details */
+		llimgr_get_mlli_desc_info(&op_ctx->oft,
+					  &xlli_addr, &xlli_size, &table_count);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_ADDR, xlli_addr);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_SIZE, xlli_size);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_NUM, table_count);
+	}
+}
+
+/**
+ * desc_q_pack_load_op_desc() - Pack a LOAD_OP descriptor in given descriptor
+ *				buffer
+ * @desc_p:	 The descriptor buffer
+ * @op_ctx:	 The operation context
+ * @sep_ctx_load_req:	 Context load request flag
+ *
+ */
+void desc_q_pack_load_op_desc(struct sep_sw_desc *desc_p,
+			      struct sep_op_ctx *op_ctx, int *sep_ctx_load_req)
+{
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+	u32 *p = (u32 *)desc_p;
+	int idx;
+
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_LOAD_OP);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+
+	for (idx = 0; idx < SEP_MAX_COMBINED_ENGINES; idx++, ctx_info_p++) {
+		BITFIELD_SET(p[SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_WORD_OFFSET],
+			     SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_OFFSET(idx),
+			     SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_SIZE,
+			     (ctx_info_p->ctx_kptr == NULL) ? (-1) :
+			     ctxmgr_get_sep_cache_idx(ctx_info_p));
+
+		BITFIELD_SET(p[SEP_SW_DESC_LOAD_OP_HCB_ADDR_WORD_OFFSET(idx)],
+			     SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_OFFSET,
+			     SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_SIZE,
+			     (ctx_info_p->ctx_kptr == NULL) ? 0 :
+			     (ctxmgr_get_sep_ctx_dma_addr(ctx_info_p) >>
+			      SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_OFFSET));
+		/* shiffting DMA address due to the "L" bit in the
+		   LS bit */
+
+		BITFIELD_SET(p[SEP_SW_DESC_LOAD_OP_L_WORD_OFFSET(idx)],
+			     SEP_SW_DESC_LOAD_OP_L_BIT_OFFSET,
+			     SEP_SW_DESC_LOAD_OP_L_BIT_SIZE,
+			     sep_ctx_load_req[idx]);
+	}
+}
+
+/**
+ * desc_q_pack_rpc_desc() - Pack the RPC (message) descriptor type
+ * @desc_p:	 The descriptor buffer
+ * @op_ctx:	 The operation context
+ * @agent_id:	 RPC agent (API) ID
+ * @func_id:	 Function ID (index)
+ * @rpc_msg_size:	 Size of RPC parameters message buffer
+ * @rpc_msg_dma_addr:	 DMA address of RPC parameters message buffer
+ *
+ */
+void desc_q_pack_rpc_desc(struct sep_sw_desc *desc_p,
+			  struct sep_op_ctx *op_ctx,
+			  u16 agent_id,
+			  u16 func_id,
+			  unsigned long rpc_msg_size,
+			  dma_addr_t rpc_msg_dma_addr)
+{
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_RPC_MSG);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+
+#ifdef DEBUG
+	/* Verify that given agent_id is not too large for AGENT_ID field */
+	if (agent_id >= (1 << SEP_SW_DESC_RPC_MSG_AGENT_ID_BIT_SIZE)) {
+		pr_err(
+			    "Given agent_id=%d is too large for AGENT_ID field. Value truncated!",
+			    agent_id);
+	}
+#endif
+	SEP_SW_DESC_SET4TYPE(desc_p, RPC_MSG, AGENT_ID, agent_id);
+	SEP_SW_DESC_SET4TYPE(desc_p, RPC_MSG, FUNC_ID, func_id);
+	SEP_SW_DESC_SET4TYPE(desc_p, RPC_MSG, HMB_SIZE, rpc_msg_size);
+	SEP_SW_DESC_SET4TYPE(desc_p, RPC_MSG, HMB_ADDR, rpc_msg_dma_addr);
+}
+
+/**
+ * desc_q_pack_app_req_desc() - Pack the Applet Request descriptor
+ * @desc_p:	The descriptor buffer
+ * @op_ctx:	The operation context
+ * @req_type:	The Applet request type
+ * @session_id:	Session ID - Required only for SESSION_CLOSE and
+ *		COMMAND_INVOKE requests
+ * @inparams_addr:	DMA address of the "In Params." structure for the
+ *			request.
+ *
+ */
+void desc_q_pack_app_req_desc(struct sep_sw_desc *desc_p,
+			      struct sep_op_ctx *op_ctx,
+			      enum sepapp_req_type req_type,
+			      u16 session_id, dma_addr_t inparams_addr)
+{
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_APP_REQ);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+
+	SEP_SW_DESC_SET4TYPE(desc_p, APP_REQ, REQ_TYPE, req_type);
+	SEP_SW_DESC_SET4TYPE(desc_p, APP_REQ, SESSION_ID, session_id);
+	SEP_SW_DESC_SET4TYPE(desc_p, APP_REQ, IN_PARAMS_ADDR, inparams_addr);
+}
+
+/**
+ * crypto_proc_mode_to_str() - Convert from crypto_proc_mode to string
+ * @proc_mode:	 The proc_mode enumeration value
+ *
+ * Returns A string description of the processing mode (NULL if invalid mode)
+ */
+const char *crypto_proc_mode_to_str(enum sep_proc_mode proc_mode)
+{
+	switch (proc_mode) {
+	case SEP_PROC_MODE_NOP:
+		return "NOP";
+	case SEP_PROC_MODE_PROC_T:
+		return "PROC_T";
+	case SEP_PROC_MODE_FIN:
+		return "FIN";
+	case SEP_PROC_MODE_PROC_A:
+		return "PROC_A";
+	default:
+		return "?";
+	}
+}
+
+#ifdef DEBUG
+static void dump_crypto_op_desc(const struct sep_sw_desc *desc_p)
+{
+	pr_debug("CRYPTO_OP::%s (type=%lu,cookie=0x%08lX)\n",
+		crypto_proc_mode_to_str(SEP_SW_DESC_GET4TYPE
+				(desc_p, CRYPTO_OP, PROC_MODE)),
+				SEP_SW_DESC_GET(desc_p, TYPE),
+				(uintptr_t)SEP_SW_DESC_GET_COOKIE(desc_p));
+
+	pr_debug("HCB=0x%08lX @ FwIdx=%lu %s%s\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, HCB_ADDR),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, FW_CACHE_IDX),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, L) ? "(load)" : "",
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, I) ? "(init)" : "");
+
+	pr_debug("IFT: addr=0x%08lX , size=0x%08lX , tbl_num=%lu\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, IFT_ADDR),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, IFT_SIZE),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, IFT_NUM));
+
+	pr_debug("OFT: addr=0x%08lX , size=0x%08lX , tbl_num=%lu\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, OFT_ADDR),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, OFT_SIZE),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, OFT_NUM));
+
+	pr_debug("0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+		 ((u32 *)desc_p)[0], ((u32 *)desc_p)[1],
+		 ((u32 *)desc_p)[2], ((u32 *)desc_p)[3],
+		 ((u32 *)desc_p)[4], ((u32 *)desc_p)[5],
+		 ((u32 *)desc_p)[6], ((u32 *)desc_p)[7]);
+}
+
+static void dump_load_op_desc(const struct sep_sw_desc *desc_p)
+{
+	u32 *p = (u32 *)desc_p;
+	u32 hcb, cache_idx, is_load;
+	int idx;
+
+	pr_debug("LOAD_OP (type=%lu,cookie=0x%08lX)\n",
+		SEP_SW_DESC_GET(desc_p, TYPE),
+		(uintptr_t)SEP_SW_DESC_GET_COOKIE(desc_p));
+
+	for (idx = 0; idx < SEP_MAX_COMBINED_ENGINES; idx++) {
+		cache_idx =
+		    BITFIELD_GET(p
+				 [SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_WORD_OFFSET],
+				 SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_OFFSET
+				 (idx),
+				 SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_SIZE);
+
+		hcb =
+		    BITFIELD_GET(p
+				 [SEP_SW_DESC_LOAD_OP_HCB_ADDR_WORD_OFFSET
+				 (idx)],
+				 SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_OFFSET,
+				 SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_SIZE);
+
+		is_load =
+		    BITFIELD_GET(p[SEP_SW_DESC_LOAD_OP_L_WORD_OFFSET(idx)],
+				 SEP_SW_DESC_LOAD_OP_L_BIT_OFFSET,
+				 SEP_SW_DESC_LOAD_OP_L_BIT_SIZE);
+
+		pr_debug("[%d] HCB=0x%08X FwIdx=%u %s\n",
+			      idx, hcb, cache_idx,
+			      is_load ? "(load)" : "(do not load)");
+	}
+}
+
+static void dump_combined_op_desc(const struct sep_sw_desc *desc_p)
+{
+	pr_debug("COMBINED_OP::%s (type=%lu,cookie=0x%08lX)\n",
+		crypto_proc_mode_to_str(SEP_SW_DESC_GET4TYPE
+				(desc_p, COMBINED_OP, PROC_MODE)),
+				SEP_SW_DESC_GET(desc_p, TYPE),
+				(uintptr_t)SEP_SW_DESC_GET_COOKIE(desc_p));
+
+	pr_debug("SCHEME=0x%08lX %s%s\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, CONFIG_SCHEME),
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, L) ? "(load)" : "",
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, I) ? "(init)" : "");
+
+	pr_debug("IFT: addr=0x%08lX , size=0x%08lX , tbl_num=%lu\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, IFT_ADDR),
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, IFT_SIZE),
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, IFT_NUM));
+
+	pr_debug("OFT: addr=0x%08lX , size=0x%08lX , tbl_num=%lu\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, OFT_ADDR),
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, OFT_SIZE),
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, OFT_NUM));
+}
+
+static void dump_rpc_msg_desc(const struct sep_sw_desc *desc_p)
+{
+	pr_debug(
+		      "RPC_MSG: agentId=%lu, funcId=%lu, HmbAddr=0x%08lX, HmbSize=%lu\n",
+		      SEP_SW_DESC_GET4TYPE(desc_p, RPC_MSG, AGENT_ID),
+		      SEP_SW_DESC_GET4TYPE(desc_p, RPC_MSG, FUNC_ID),
+		      SEP_SW_DESC_GET4TYPE(desc_p, RPC_MSG, HMB_ADDR),
+		      SEP_SW_DESC_GET4TYPE(desc_p, RPC_MSG, HMB_SIZE));
+}
+
+static void dump_app_req_desc(const struct sep_sw_desc *desc_p)
+{
+	pr_debug(
+		      "APP_REQ: reqType=%lu, sessionId=%lu, InParamsAddr=0x%08lX\n",
+		      SEP_SW_DESC_GET4TYPE(desc_p, APP_REQ, REQ_TYPE),
+		      SEP_SW_DESC_GET4TYPE(desc_p, APP_REQ, SESSION_ID),
+		      SEP_SW_DESC_GET4TYPE(desc_p, APP_REQ, IN_PARAMS_ADDR));
+}
+
+static void dump_desc(const struct sep_sw_desc *desc_p)
+{				/* dump descriptor based on its type */
+	switch (SEP_SW_DESC_GET(desc_p, TYPE)) {
+	case SEP_SW_DESC_TYPE_NULL:
+		pr_debug("NULL descriptor type.\n");
+		break;
+	case SEP_SW_DESC_TYPE_CRYPTO_OP:
+		dump_crypto_op_desc(desc_p);
+		break;
+	case SEP_SW_DESC_TYPE_LOAD_OP:
+		dump_load_op_desc(desc_p);
+		break;
+	case SEP_SW_DESC_TYPE_COMBINED_OP:
+		dump_combined_op_desc(desc_p);
+		break;
+	case SEP_SW_DESC_TYPE_RPC_MSG:
+		dump_rpc_msg_desc(desc_p);
+		break;
+	case SEP_SW_DESC_TYPE_APP_REQ:
+		dump_app_req_desc(desc_p);
+		break;
+	case SEP_SW_DESC_TYPE_DEBUG:
+		pr_debug("DEBUG descriptor type.\n");
+		break;
+	default:
+		pr_warn("Unknown descriptor type = %lu\n",
+			     SEP_SW_DESC_GET(desc_p, TYPE));
+	}
+}
+#endif /*DEBUG*/
diff --git a/drivers/staging/sep54/desc_mgr.h b/drivers/staging/sep54/desc_mgr.h
new file mode 100644
index 0000000..11405b0
--- /dev/null
+++ b/drivers/staging/sep54/desc_mgr.h
@@ -0,0 +1,282 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/*! \file desc_mgr.h
+    Descriptor manager API and associated data structures
+*/
+
+#ifndef _DESC_MGR_H_
+#define _DESC_MGR_H_
+
+#include "sep_sw_desc.h"
+
+/* Max. from Combined mode operation */
+#define MAX_PENDING_DESCS 2
+
+/* Evaluate desc_q_enqueue() return code (EINPROGRESS and EBUSY are ok) */
+#define IS_DESCQ_ENQUEUE_ERR(_rc) ((_rc != -EINPROGRESS) && (_rc != -EBUSY))
+
+#define DESC_Q_INVALID_HANDLE NULL
+
+/* Opaque structure - accessed using SEP_SW_DESC_* macros */
+struct sep_sw_desc {
+	u32 data[SEP_SW_DESC_WORD_SIZE];
+};
+
+enum desc_q_state {
+	DESC_Q_UNINITIALIZED,	/* Before initializing the queue */
+	DESC_Q_ACTIVE,		/* Queue has pending requests    */
+	DESC_Q_ASLEEP		/* Queue is in "sleep" state (cannot accept
+				   additional requests - descs should be
+				   enqueued in descs backlog queue) */
+};
+
+/* Declare like this to avoid cyclic inclusion with dx_cc54_driver.h */
+struct sep_op_ctx;
+struct queue_drvdata;
+struct sep_app_session;
+
+/**
+ * desc_q_create() - Create descriptors queue object
+ * @qid:	 The queue ID (index)
+ * @drvdata:	 The associated queue driver data
+ *
+ * Returns Allocated queue object handle (DESC_Q_INVALID_HANDLE for failure)
+ */
+void *desc_q_create(int qid, struct queue_drvdata *drvdata, int state);
+
+/**
+ * desc_q_destroy() - Destroy descriptors queue object (free resources)
+ * @q_h: The queue object handle
+ */
+void desc_q_destroy(void *q_h);
+
+/**
+ * desc_q_set_state() - Set queue state (SLEEP or ACTIVE)
+ * @q_h:	The queue object handle
+ * @state:	The requested state
+ */
+int desc_q_set_state(void *q_h, enum desc_q_state state);
+
+/**
+ * desc_q_cntr_set() - Set counters for  queue
+ * @q_h:	The queue object handle
+ */
+int desc_q_cntr_set(void *q_h);
+
+/**
+ * desc_q_get_state() - Get queue state
+ * @q_h:	The queue object handle
+ */
+enum desc_q_state desc_q_get_state(void *q_h);
+
+/**
+ * desc_q_is_idle() - Report if given queue is active but empty/idle.
+ * @q_h:		The queue object handle
+ * @idle_jiffies_p:	Return jiffies at which the queue became idle
+ */
+bool desc_q_is_idle(void *q_h, unsigned long *idle_jiffies_p);
+
+/**
+ * desc_q_reset() - Reset sent/completed counters of queue
+ * @q_h:	The queue object handle
+ *
+ * This function should be invoked only when the queue is in ASLEEP state
+ * after the transition of SeP to sleep state completed.
+ * Returns -EBUSY if the queue is not in the correct state for reset.
+ */
+int desc_q_reset(void *q_h);
+
+/**
+ * desc_q_enqueue_sleep_req() - Enqueue SLEEP_REQ descriptor
+ * @q_h:	The queue object handle
+ * @op_ctx:	The operation context for this descriptor
+ */
+int desc_q_enqueue_sleep_req(void *q_h, struct sep_op_ctx *op_ctx);
+
+/**
+ * desc_q_get_info4sep()- Get queue address and size to be used in FW init phase
+ * @q_h:		The queue object handle
+ * @base_addr_p:	Base address return parameter
+ * @size_p:		Queue size (in bytes) return parameter
+ */
+void desc_q_get_info4sep(void *q_h,
+			 dma_addr_t *base_addr_p, unsigned long *size_p);
+
+/**
+ * desc_q_enqueue() - Enqueue given descriptor in given queue
+ * @q_h:		The queue object handle
+ * @desc_p:		Pointer to descriptor
+ * @may_backlog:	When "true" and descQ is full or ASLEEP, may enqueue
+ *			the given desc. in the backlog queue.
+ *			When "false", any of the above cases would cause
+ *			returning -ENOMEM.
+ *
+ * The function updates the op_ctx->op_state accoring to its results.
+ * Returns -EINPROGRESS on success to dispatch into the SW desc. q.
+ * Returns -EBUSY if may_backlog==true and the descriptor was enqueued in the
+ * the backlog queue.
+ * Returns -ENOMEM if queue is full and cannot enqueue in the backlog queue
+ */
+int desc_q_enqueue(void *q_h, struct sep_sw_desc *desc_p, bool may_backlog);
+
+/*!
+ * Mark given cookie as invalid in case marked as completed after a timeout
+ * Invoke this before releasing the op_ctx object.
+ * There is no race with the interrupt because the client_ctx (cookie) is still
+ * valid when invoking this function.
+ *
+ * \param q_h Descriptor queue handle
+ * \param cookie Invalidate descriptors with this cookie
+ */
+void desc_q_mark_invalid_cookie(void *q_h, void *cookie);
+
+/*!
+ * Dequeue and process any completed descriptors in the queue
+ * (This function assumes non-reentrancy since it is invoked from
+ *  either interrupt handler or in workqueue context)
+ *
+ * \param q_h The queue object handle
+ *
+ */
+void desc_q_process_completed(void *q_h);
+
+/*!
+ * Create a debug descriptor in given buffer
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * TODO: Get additional debug descriptors (in addition to loopback)
+ */
+void desq_q_pack_debug_desc(struct sep_sw_desc *desc_p,
+			    struct sep_op_ctx *op_ctx);
+
+/*!
+ * Pack a CRYPTO_OP descriptor in given descriptor buffer
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * \param sep_ctx_load_req Context load request flag
+ * \param sep_ctx_init_req Context initialize request flag
+ * \param proc_mode Descriptor processing mode
+ */
+void desc_q_pack_crypto_op_desc(struct sep_sw_desc *desc_p,
+				struct sep_op_ctx *op_ctx,
+				int sep_ctx_load_req, int sep_ctx_init_req,
+				enum sep_proc_mode proc_mode);
+
+/*!
+ * Pack a COMBINED_OP descriptor in given descriptor buffer
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * \param sep_ctx_load_req Context load request flag
+ * \param sep_ctx_init_req Context initialize request flag
+ * \param proc_mode Descriptor processing mode
+ * \param cfg_scheme The SEP format configuration scheme claimed by the user
+ */
+void desc_q_pack_combined_op_desc(struct sep_sw_desc *desc_p,
+				  struct sep_op_ctx *op_ctx,
+				  int sep_ctx_load_req, int sep_ctx_init_req,
+				  enum sep_proc_mode proc_mode,
+				  u32 cfg_scheme);
+/*!
+ * Pack a LOAD_OP descriptor in given descriptor buffer
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * \param sep_ctx_load_req Context load request flag
+ */
+void desc_q_pack_load_op_desc(struct sep_sw_desc *desc_p,
+			      struct sep_op_ctx *op_ctx, int *sep_ctx_load_req);
+
+/*!
+ * Pack the RPC (message) descriptor type
+ *
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * \param agent_id RPC agent (API) ID
+ * \param func_id Function ID (index)
+ * \param rpc_msg_size Size of RPC parameters message buffer
+ * \param rpc_msg_dma_addr DMA address of RPC parameters message buffer
+ */
+void desc_q_pack_rpc_desc(struct sep_sw_desc *desc_p,
+			  struct sep_op_ctx *op_ctx,
+			  u16 agent_id,
+			  u16 func_id,
+			  unsigned long rpc_msg_size,
+			  dma_addr_t rpc_msg_dma_addr);
+
+/*!
+ * Pack the Applet Request descriptor
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * \param req_type The Applet request type
+ * \param session_id Session ID - Required only for SESSION_CLOSE and
+ *                   COMMAND_INVOKE requests
+ * \param inparams_addr DMA address of the "In Params." structure for the
+ *                      request.
+ */
+void desc_q_pack_app_req_desc(struct sep_sw_desc *desc_p,
+			      struct sep_op_ctx *op_ctx,
+			      enum sepapp_req_type req_type,
+			      u16 session_id, dma_addr_t inparams_addr);
+
+/*!
+ * Convert from crypto_proc_mode to string
+ *
+ * \param proc_mode The proc_mode enumeration value
+ *
+ * \return A string description of the processing mode
+ */
+const char *crypto_proc_mode_to_str(enum sep_proc_mode proc_mode);
+
+inline u32 add_cookie(uintptr_t op_ctx);
+
+inline void delete_cookie(u32 index);
+
+inline void delete_context(uintptr_t op_ctx);
+
+inline uintptr_t get_cookie(u32 index);
+
+#define SEP_SW_DESC_GET_COOKIE(desc_p)\
+	((struct sep_op_ctx *)get_cookie(((u32 *)desc_p)[SEP_SW_DESC_COOKIE_WORD_OFFSET]))
+
+#define SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx) \
+do {\
+	u32 __ctx_ptr__ = 0;\
+	if (op_ctx == NULL) {\
+		delete_cookie(((u32 *)desc_p)[SEP_SW_DESC_COOKIE_WORD_OFFSET]);\
+	} else {\
+		__ctx_ptr__ = add_cookie((uintptr_t)op_ctx);\
+	} \
+	memcpy(((u32 *)desc_p) + SEP_SW_DESC_COOKIE_WORD_OFFSET,\
+	&__ctx_ptr__, sizeof(u32));\
+} while (0)
+
+#endif /*_DESC_MGR_H_*/
diff --git a/drivers/staging/sep54/dx_bitops.h b/drivers/staging/sep54/dx_bitops.h
new file mode 100644
index 0000000..fd9524c
--- /dev/null
+++ b/drivers/staging/sep54/dx_bitops.h
@@ -0,0 +1,58 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/*!
+ * \file dx_bitops.h
+ * Bit fields operations macros.
+ */
+#ifndef _DX_BITOPS_H_
+#define _DX_BITOPS_H_
+
+#define BITMASK(mask_size) (((mask_size) < 32) ?	\
+	((1UL << (mask_size)) - 1) : 0xFFFFFFFFUL)
+#define BITMASK_AT(mask_size, mask_offset) (BITMASK(mask_size) << (mask_offset))
+
+#define BITFIELD_GET(word, bit_offset, bit_size)	\
+	(((word) >> (bit_offset)) & BITMASK(bit_size))
+#define BITFIELD_SET(word, bit_offset, bit_size, new_val) \
+	(word = ((word) & ~BITMASK_AT(bit_size, bit_offset)) |		\
+		(((new_val) & BITMASK(bit_size)) << (bit_offset)))
+
+/* Is val aligned to "align" ("align" must be power of 2) */
+#ifndef IS_ALIGNED
+#define IS_ALIGNED(val, align)	(((u32)(val) & ((align) - 1)) == 0)
+#endif
+
+#define SWAP_ENDIAN(word)		\
+	(((word) >> 24) | (((word) & 0x00FF0000) >> 8) | \
+	(((word) & 0x0000FF00) << 8) | (((word) & 0x000000FF) << 24))
+
+/* Is val a multiple of "mult" ("mult" must be power of 2) */
+#define IS_MULT(val, mult)	(((val) & ((mult) - 1)) == 0)
+
+#define IS_NULL_ADDR(adr)	(!(adr))
+
+#endif /*_DX_BITOPS_H_*/
diff --git a/drivers/staging/sep54/dx_cc_defs.h b/drivers/staging/sep54/dx_cc_defs.h
new file mode 100644
index 0000000..0bdd38d
--- /dev/null
+++ b/drivers/staging/sep54/dx_cc_defs.h
@@ -0,0 +1,42 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _DX_CC_DEFS_H_
+#define _DX_CC_DEFS_H
+
+#define DX_INT32_MAX 0x7FFFFFFFL
+
+enum dx_crypto_key_type {
+	DX_USER_KEY = 0,
+	DX_ROOT_KEY = 1,
+	DX_PROVISIONING_KEY = 2,
+	DX_XOR_HDCP_KEY = 3,
+	DX_APPLET_KEY = 4,
+	DX_SESSION_KEY = 5,
+	DX_END_OF_KEYS = DX_INT32_MAX
+};
+
+#endif
diff --git a/drivers/staging/sep54/dx_cc_regs.h b/drivers/staging/sep54/dx_cc_regs.h
new file mode 100644
index 0000000..2f91bd6
--- /dev/null
+++ b/drivers/staging/sep54/dx_cc_regs.h
@@ -0,0 +1,162 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/*!
+ * \file dx_cc_regs.h
+ * \brief Macro definitions for accessing Dx CryptoCell register space
+ *
+ * For SeP code define DX_CC_SEP
+ * For Host physical/direct registers access define DX_CC_HOST
+ * For Host virtual mapping of registers define DX_CC_HOST_VIRT
+ */
+
+#ifndef _DX_CC_REGS_H_
+#define _DX_CC_REGS_H_
+
+#include "dx_bitops.h"
+
+/* Include register base addresses data */
+#if defined(DX_CC_SEP)
+#include "dx_reg_base_sep.h"
+
+#elif defined(DX_CC_HOST) || defined(DX_CC_HOST_VIRT) || defined(DX_CC_TEE)
+#include "dx_reg_base_host.h"
+
+#else
+#error Define either DX_CC_SEP or DX_CC_HOST or DX_CC_HOST_VIRT
+#endif
+
+/* CC registers address calculation */
+#if defined(DX_CC_SEP)
+#define DX_CC_REG_ADDR(unit_name, reg_name)            \
+	 (DX_BASE_CC_PERIF + DX_BASE_ ## unit_name + \
+	  DX_ ## reg_name ## _REG_OFFSET)
+
+/* In host macros we ignore the unit_name because all offsets are from base */
+#elif defined(DX_CC_HOST)
+#define DX_CC_REG_ADDR(unit_name, reg_name)            \
+	(DX_BASE_CC + DX_ ## reg_name ## _REG_OFFSET)
+
+#elif defined(DX_CC_TEE)
+#define DX_CC_REG_ADDR(unit_name, reg_name)            \
+	(DX_BASE_CC + DX_ ## reg_name ## _REG_OFFSET)
+
+#elif defined(DX_CC_HOST_VIRT)
+#define DX_CC_REG_ADDR(cc_base_virt, unit_name, reg_name) \
+	(((unsigned long)(cc_base_virt)) + DX_ ## reg_name ## _REG_OFFSET)
+
+#endif
+
+/* Register Offset macros (from registers base address in host) */
+#if defined(DX_CC_HOST) || defined(DX_CC_HOST_VIRT)
+
+#define DX_CC_REG_OFFSET(reg_domain, reg_name)               \
+	(DX_ ## reg_domain ## _ ## reg_name ## _REG_OFFSET)
+
+/* Indexed GPR offset macros - note the (not original) preprocessor tricks...*/
+/* (Using the macro without the "_" prefix is allowed with another macro      *
+ *  as the gpr_idx) */
+#define _SEP_HOST_GPR_REG_OFFSET(gpr_idx) \
+	DX_CC_REG_OFFSET(HOST, SEP_HOST_GPR ## gpr_idx)
+#define SEP_HOST_GPR_REG_OFFSET(gpr_idx) _SEP_HOST_GPR_REG_OFFSET(gpr_idx)
+#define _HOST_SEP_GPR_REG_OFFSET(gpr_idx) \
+	DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR ## gpr_idx)
+#define HOST_SEP_GPR_REG_OFFSET(gpr_idx) _HOST_SEP_GPR_REG_OFFSET(gpr_idx)
+
+/* GPR IRQ bit mask by GPR index */
+#define _SEP_HOST_GPR_IRQ_MASK(gpr_idx) \
+	(1 << DX_HOST_IRR_SEP_HOST_GPR ## gpr_idx ## _INT_BIT_SHIFT)
+#define SEP_HOST_GPR_IRQ_MASK(gpr_idx) _SEP_HOST_GPR_IRQ_MASK(gpr_idx)
+
+#elif defined(DX_CC_SEP)
+
+#define DX_CC_REG_OFFSET(unit_name, reg_name)               \
+	(DX_BASE_ ## unit_name + DX_ ## reg_name ## _REG_OFFSET)
+
+/* Indexed GPR address macros - note the (not original) preprocessor tricks...*/
+/* (Using the macro without the "_" prefix is allowed with another macro      *
+ *  as the gpr_idx) */
+#define _SEP_HOST_GPR_REG_ADDR(gpr_idx) \
+	DX_CC_REG_ADDR(SEP_RGF, SEP_SEP_HOST_GPR ## gpr_idx)
+#define SEP_HOST_GPR_REG_ADDR(gpr_idx) _SEP_HOST_GPR_REG_ADDR(gpr_idx)
+#define _HOST_SEP_GPR_REG_ADDR(gpr_idx) \
+	DX_CC_REG_ADDR(SEP_RGF, SEP_HOST_SEP_GPR ## gpr_idx)
+#define HOST_SEP_GPR_REG_ADDR(gpr_idx) _HOST_SEP_GPR_REG_ADDR(gpr_idx)
+
+#elif defined(DX_CC_TEE)
+
+#define DX_CC_REG_OFFSET(unit_name, reg_name) \
+	(DX_BASE_ ## unit_name + DX_ ## reg_name ## _REG_OFFSET)
+
+#else
+#error "Undef exec domain,not DX_CC_SEP, DX_CC_HOST, DX_CC_HOST_VIRT, DX_CC_TEE"
+#endif
+
+/* Registers address macros for ENV registers (development FPGA only) */
+#ifdef DX_BASE_ENV_REGS
+
+#if defined(DX_CC_HOST)
+#define DX_ENV_REG_ADDR(reg_name) \
+	(DX_BASE_ENV_REGS + DX_ENV_ ## reg_name ## _REG_OFFSET)
+
+#elif defined(DX_CC_HOST_VIRT)
+/* The OS driver resource address space covers the ENV registers, too */
+/* Since DX_BASE_ENV_REGS is given in absolute address, we calc. the distance */
+#define DX_ENV_REG_ADDR(cc_base_virt, reg_name) \
+	(((cc_base_virt) + (DX_BASE_ENV_REGS - DX_BASE_CC)) + \
+	 DX_ENV_ ## reg_name ## _REG_OFFSET)
+
+#endif
+
+#endif				/*DX_BASE_ENV_REGS */
+
+/* Bit fields access */
+#define DX_CC_REG_FLD_GET(unit_name, reg_name, fld_name, reg_val)	      \
+	(DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20 ?	      \
+	reg_val /* Optimization for 32b fields */ :			      \
+	BITFIELD_GET(reg_val, DX_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT, \
+		     DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE))
+
+#define DX_CC_REG_FLD_SET(                                               \
+	unit_name, reg_name, fld_name, reg_shadow_var, new_fld_val)      \
+do {                                                                     \
+	if (DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20)       \
+		reg_shadow_var = new_fld_val; /* Optimization for 32b fields */\
+	else                                                             \
+		BITFIELD_SET(reg_shadow_var,                             \
+			DX_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT,  \
+			DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE,   \
+			new_fld_val);                                    \
+} while (0)
+
+/* Usage example:
+   u32 reg_shadow = READ_REGISTER(DX_CC_REG_ADDR(CRY_KERNEL,AES_CONTROL));
+   DX_CC_REG_FLD_SET(CRY_KERNEL,AES_CONTROL,NK_KEY0,reg_shadow, 3);
+   DX_CC_REG_FLD_SET(CRY_KERNEL,AES_CONTROL,NK_KEY1,reg_shadow, 1);
+   WRITE_REGISTER(DX_CC_REG_ADDR(CRY_KERNEL,AES_CONTROL), reg_shadow);
+ */
+
+#endif /*_DX_CC_REGS_H_*/
diff --git a/drivers/staging/sep54/dx_dev_defs.h b/drivers/staging/sep54/dx_dev_defs.h
new file mode 100644
index 0000000..1626df8
--- /dev/null
+++ b/drivers/staging/sep54/dx_dev_defs.h
@@ -0,0 +1,67 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/** @file: dx_dev_defs.h
+ * Device specific definitions for the CC device driver */
+
+#ifndef _DX_DEV_DEFS_H_
+#define _DX_DEV_DEFS_H_
+
+#define DRIVER_NAME MODULE_NAME
+
+/* OpenFirmware matches */
+#define DX_DEV_OF_MATCH  {                        \
+	{.name = "dx_cc54" },                     \
+	{.compatible = "xlnx,plbv46-cc-1.00.c",}, \
+	{}                                        \
+}
+
+/* Firmware images file names (for request_firmware) */
+#define RESIDENT_IMAGE_NAME DRIVER_NAME "-resident.bin"
+#define CACHE_IMAGE_NAME DRIVER_NAME "-cache.bin"
+#define VRL_IMAGE_NAME DRIVER_NAME "-Primary_VRL.bin"
+
+/* OTP index of verification hash for key in VRL */
+#define VRL_KEY_INDEX 0
+
+#define ICACHE_SIZE_LOG2_DEFAULT 20	/* 1MB */
+#define DCACHE_SIZE_LOG2_DEFAULT 20	/* 1MB */
+
+#define EXPECTED_FW_VER 0x01000000
+
+/* The known SeP clock frequency in MHz (30 MHz on Virtex-5 FPGA) */
+/* Comment this line if SeP frequency is already initialized in CC_INIT ext. */
+/*#define SEP_FREQ_MHZ 30*/
+
+/* Number of SEP descriptor queues */
+#define SEP_MAX_NUM_OF_DESC_Q  2
+
+/* Maximum number of registered memory buffers per user context */
+#define MAX_REG_MEMREF_PER_CLIENT_CTX 16
+
+/* Maximum number of SeP Applets session per client context */
+#define MAX_SEPAPP_SESSION_PER_CLIENT_CTX 16
+
+#endif				/* _DX_DEV_DEFS_H_ */
diff --git a/drivers/staging/sep54/dx_driver.c b/drivers/staging/sep54/dx_driver.c
new file mode 100644
index 0000000..5a97c29
--- /dev/null
+++ b/drivers/staging/sep54/dx_driver.c
@@ -0,0 +1,4935 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_MAIN
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/fcntl.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/sysctl.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+/* cache.h required for L1_CACHE_ALIGN() and cache_line_size() */
+#include <linux/cache.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/pagemap.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/kthread.h>
+#include <linux/genhd.h>
+#include <linux/mmc/card.h>
+
+#include <generated/autoconf.h>
+#if defined(DEBUG) && defined(CONFIG_KGDB)
+/* For setup_break option */
+#include <linux/kgdb.h>
+#endif
+
+#include "sep_log.h"
+#include "sep_init.h"
+#include "desc_mgr.h"
+#include "lli_mgr.h"
+#include "sep_sysfs.h"
+#include "dx_driver_abi.h"
+#include "crypto_ctx_mgr.h"
+#include "crypto_api.h"
+#include "sep_request.h"
+#include "dx_sep_kapi.h"
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+#include "sepapp.h"
+#endif
+#include "sep_power.h"
+#include "sep_request_mgr.h"
+#include "sep_applets.h"
+
+/* Registers definitions from shared/hw/include */
+#include "dx_reg_base_host.h"
+#include "dx_host.h"
+#ifdef DX_BASE_ENV_REGS
+#include "dx_env.h"
+#endif
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_cc_regs.h"
+#include "dx_init_cc_abi.h"
+#include "dx_driver.h"
+
+#ifdef CONFIG_COMPAT
+#include "sep_compat_ioctl.h"
+#endif
+
+#if SEPAPP_UUID_SIZE != DXDI_SEPAPP_UUID_SIZE
+#error Size mismatch of SEPAPP_UUID_SIZE and DXDI_SEPAPP_UUID_SIZE
+#endif
+
+#define DEVICE_NAME_PREFIX  "dx_sep_q"
+#define SEP_DEVICES         SEP_MAX_NUM_OF_DESC_Q
+
+#define DRIVER_NAME     MODULE_NAME
+#define SEP_DEVICES     SEP_MAX_NUM_OF_DESC_Q
+
+#ifdef SEP_PRINTF
+#define SEP_PRINTF_H2S_GPR_OFFSET \
+	HOST_SEP_GPR_REG_OFFSET(DX_SEP_HOST_PRINTF_GPR_IDX)
+#define SEP_PRINTF_S2H_GPR_OFFSET \
+	SEP_HOST_GPR_REG_OFFSET(DX_SEP_HOST_PRINTF_GPR_IDX)
+/* Ack is allocated only upper 24 bits */
+#define SEP_PRINTF_ACK_MAX 0xFFFFFF
+/* Sync. host-SeP value */
+#define SEP_PRINTF_ACK_SYNC_VAL SEP_PRINTF_ACK_MAX
+#endif
+
+static struct class *sep_class;
+
+int q_num;			/* Initialized to 0 */
+module_param(q_num, int, 0444);
+MODULE_PARM_DESC(q_num, "Num. of active queues 1-2");
+
+int sep_log_level = SEP_BASE_LOG_LEVEL;
+module_param(sep_log_level, int, 0644);
+MODULE_PARM_DESC(sep_log_level, "Log level: min ERR = 0, max TRACE = 4");
+
+int sep_log_mask = SEP_LOG_MASK_ALL;
+module_param(sep_log_mask, int, 0644);
+MODULE_PARM_DESC(sep_log_mask, "Log components mask");
+
+int disable_linux_crypto;
+module_param(disable_linux_crypto, int, 0444);
+MODULE_PARM_DESC(disable_linux_crypto,
+		 "Set to !0 to disable registration with Linux CryptoAPI");
+
+/* Parameters to override default sep cache memories reserved pages */
+#ifdef ICACHE_SIZE_LOG2_DEFAULT
+#include "dx_init_cc_defs.h"
+int icache_size_log2 = ICACHE_SIZE_LOG2_DEFAULT;
+module_param(icache_size_log2, int, 0444);
+MODULE_PARM_DESC(icache_size_log2, "Size of Icache memory in log2(bytes)");
+
+int dcache_size_log2 = DCACHE_SIZE_LOG2_DEFAULT;
+module_param(dcache_size_log2, int, 0444);
+MODULE_PARM_DESC(icache_size_log2, "Size of Dcache memory in log2(bytes)");
+#endif
+
+#ifdef SEP_BACKUP_BUF_SIZE
+int sep_backup_buf_size = SEP_BACKUP_BUF_SIZE;
+module_param(sep_backup_buf_size, int, 0444);
+MODULE_PARM_DESC(sep_backup_buf_size,
+		 "Size of backup buffer of SeP context (for warm-boot)");
+#endif
+
+/* Interrupt mask assigned to GPRs */
+/* Used for run time lookup, where SEP_HOST_GPR_IRQ_MASK() cannot be used */
+static const u32 gpr_interrupt_mask[] = {
+	SEP_HOST_GPR_IRQ_MASK(0),
+	SEP_HOST_GPR_IRQ_MASK(1),
+	SEP_HOST_GPR_IRQ_MASK(2),
+	SEP_HOST_GPR_IRQ_MASK(3),
+	SEP_HOST_GPR_IRQ_MASK(4),
+	SEP_HOST_GPR_IRQ_MASK(5),
+	SEP_HOST_GPR_IRQ_MASK(6),
+	SEP_HOST_GPR_IRQ_MASK(7)
+};
+
+u32 __iomem *security_cfg_reg;
+
+#ifdef DEBUG
+void dump_byte_array(const char *name, const u8 *the_array,
+		     unsigned long size)
+{
+	int i, line_offset = 0;
+	const u8 *cur_byte;
+	char line_buf[80];
+
+	line_offset = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ",
+			       name, size);
+
+	for (i = 0, cur_byte = the_array;
+	     (i < size) && (line_offset < sizeof(line_buf)); i++, cur_byte++) {
+		line_offset += snprintf(line_buf + line_offset,
+					sizeof(line_buf) - line_offset,
+					"%02X ", *cur_byte);
+		if (line_offset > 75) {	/* Cut before line end */
+			pr_debug("%s\n", line_buf);
+			line_offset = 0;
+		}
+	}
+
+	if (line_offset > 0)	/* Dump remainding line */
+		pr_debug("%s\n", line_buf);
+
+}
+
+void dump_word_array(const char *name, const u32 *the_array,
+		     unsigned long size_in_words)
+{
+	int i, line_offset = 0;
+	const u32 *cur_word;
+	char line_buf[80];
+
+	line_offset = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ",
+			       name, size_in_words);
+
+	for (i = 0, cur_word = the_array;
+	     (i < size_in_words) && (line_offset < sizeof(line_buf));
+	     i++, cur_word++) {
+		line_offset += snprintf(line_buf + line_offset,
+					sizeof(line_buf) - line_offset,
+					"%08X ", *cur_word);
+		if (line_offset > 70) {	/* Cut before line end */
+			pr_debug("%s\n", line_buf);
+			line_offset = 0;
+		}
+	}
+
+	if (line_offset > 0)	/* Dump remainding line */
+		pr_debug("%s\n", line_buf);
+}
+
+#endif /*DEBUG*/
+/**** SeP descriptor operations implementation functions *****/
+/* (send descriptor, wait for completion, process result)    */
+/**
+ * send_crypto_op_desc() - Pack crypto op. descriptor and send
+ * @op_ctx:
+ * @sep_ctx_load_req:	Flag if context loading is required
+ * @sep_ctx_init_req:	Flag if context init is required
+ * @proc_mode:		Processing mode
+ *
+ * On failure desc_type is retained so process_desc_completion cleans up
+ * resources anyway (error_info denotes failure to send/complete)
+ * Returns int 0 on success
+ */
+static int send_crypto_op_desc(struct sep_op_ctx *op_ctx,
+			       int sep_ctx_load_req, int sep_ctx_init_req,
+			       enum sep_proc_mode proc_mode)
+{
+	const struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct sep_sw_desc desc;
+	int rc;
+
+	desc_q_pack_crypto_op_desc(&desc, op_ctx,
+				   sep_ctx_load_req, sep_ctx_init_req,
+				   proc_mode);
+	/* op_state must be updated before dispatching descriptor */
+	rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+	if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) {
+		/* Desc. sending failed - "signal" process_desc_completion */
+		op_ctx->error_info = DXDI_ERROR_INTERNAL;
+	} else {
+		rc = 0;
+	}
+
+	return rc;
+}
+
+/**
+ * send_combined_op_desc() - Pack combined or/and load operation descriptor(s)
+ * @op_ctx:
+ * @sep_ctx_load_req: Flag if context loading is required
+ * @sep_ctx_init_req: Flag if context init is required
+ * @proc_mode: Processing mode
+ * @cfg_scheme: The SEP format configuration scheme claimed by the user
+ *
+ * On failure desc_type is retained so process_desc_completion cleans up
+ * resources anyway (error_info denotes failure to send/complete)
+ * Returns int 0 on success
+ */
+static int send_combined_op_desc(struct sep_op_ctx *op_ctx,
+				 int *sep_ctx_load_req, int sep_ctx_init_req,
+				 enum sep_proc_mode proc_mode,
+				 u32 cfg_scheme)
+{
+	const struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct sep_sw_desc desc;
+	int rc;
+
+	/* transaction of two descriptors */
+	op_ctx->pending_descs_cntr = 2;
+
+	/* prepare load descriptor of combined associated contexts */
+	desc_q_pack_load_op_desc(&desc, op_ctx, sep_ctx_load_req);
+
+	/* op_state must be updated before dispatching descriptor */
+	rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+	if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) {
+		/* Desc. sending failed - "signal" process_desc_completion */
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		goto send_combined_op_exit;
+	}
+
+	/* prepare crypto descriptor for the combined scheme operation */
+	desc_q_pack_combined_op_desc(&desc, op_ctx, 0/* contexts already loaded
+						      * in prior descriptor */ ,
+				     sep_ctx_init_req, proc_mode, cfg_scheme);
+	rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+	if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) {
+		/*invalidate first descriptor (if still pending) */
+		desc_q_mark_invalid_cookie(drvdata->desc_queue, (void *)op_ctx);
+		/* Desc. sending failed - "signal" process_desc_completion */
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	} else {
+		rc = 0;
+	}
+
+ send_combined_op_exit:
+	return rc;
+}
+
+/**
+ * register_client_memref() - Register given client memory buffer reference
+ * @client_ctx:		User context data
+ * @user_buf_ptr:	Buffer address in user space. NULL if sgl!=NULL.
+ * @sgl:		Scatter/gather list (for kernel buffers)
+ *			NULL if user_buf_ptr!=NULL.
+ * @buf_size:		Buffer size in bytes
+ * @dma_direction:	DMA direction
+ *
+ * Returns int >= is the registered memory reference ID, <0 for error
+ */
+int register_client_memref(struct sep_client_ctx *client_ctx,
+			   u8 __user *user_buf_ptr,
+			   struct scatterlist *sgl,
+			   const unsigned long buf_size,
+			   const enum dma_data_direction dma_direction)
+{
+	int free_memref_idx, rc;
+	struct registered_memref *regmem_p;
+
+	if (unlikely((user_buf_ptr != NULL) && (sgl != NULL))) {
+		pr_err("Both user_buf_ptr and sgl are given!\n");
+		return -EINVAL;
+	}
+
+	/* Find free entry in user_memref */
+	for (free_memref_idx = 0, regmem_p = &client_ctx->reg_memrefs[0];
+	     free_memref_idx < MAX_REG_MEMREF_PER_CLIENT_CTX;
+	     free_memref_idx++, regmem_p++) {
+		mutex_lock(&regmem_p->buf_lock);
+		if (regmem_p->ref_cnt == 0)
+			break;	/* found free entry */
+		mutex_unlock(&regmem_p->buf_lock);
+	}
+	if (unlikely(free_memref_idx == MAX_REG_MEMREF_PER_CLIENT_CTX)) {
+		pr_warn("No free entry for user memory registration\n");
+		free_memref_idx = -ENOMEM;/* Negative error code as index */
+	} else {
+		pr_debug("Allocated memref_idx=%d (regmem_p=%p)\n",
+			      free_memref_idx, regmem_p);
+		regmem_p->ref_cnt = 1;	/* Capture entry */
+		/* Lock user pages for DMA and save pages info.
+		 * (prepare for MLLI) */
+		rc = llimgr_register_client_dma_buf(client_ctx->drv_data->
+						    sep_data->llimgr,
+						    user_buf_ptr, sgl, buf_size,
+						    0, dma_direction,
+						    &regmem_p->dma_obj);
+		if (unlikely(rc < 0)) {
+			/* Release entry */
+			regmem_p->ref_cnt = 0;
+			free_memref_idx = rc;
+		}
+		mutex_unlock(&regmem_p->buf_lock);
+	}
+
+	/* Return user_memref[] entry index as the memory reference ID */
+	return free_memref_idx;
+}
+
+/**
+ * free_user_memref() - Free resources associated with a user memory reference
+ * @client_ctx:	 User context data
+ * @memref_idx:	 Index of the user memory reference
+ *
+ * Free resources associated with a user memory reference
+ * (The referenced memory may be locked user pages or allocated DMA-coherent
+ *  memory mmap'ed to the user space)
+ * Returns int !0 on failure (memref still in use or unknown)
+ */
+int free_client_memref(struct sep_client_ctx *client_ctx,
+		       int memref_idx)
+{
+	struct registered_memref *regmem_p =
+	    &client_ctx->reg_memrefs[memref_idx];
+	int rc = 0;
+
+	if (!IS_VALID_MEMREF_IDX(memref_idx)) {
+		pr_err("Invalid memref ID %d\n", memref_idx);
+		return -EINVAL;
+	}
+
+	mutex_lock(&regmem_p->buf_lock);
+
+	if (likely(regmem_p->ref_cnt == 1)) {
+		/* TODO: support case of allocated DMA-coherent buffer */
+		llimgr_deregister_client_dma_buf(client_ctx->drv_data->
+						 sep_data->llimgr,
+						 &regmem_p->dma_obj);
+		regmem_p->ref_cnt = 0;
+	} else if (unlikely(regmem_p->ref_cnt == 0)) {
+		pr_err("Invoked for free memref ID=%d\n", memref_idx);
+		rc = -EINVAL;
+	} else {		/* ref_cnt > 1 */
+		pr_err(
+			    "BUSY/Invalid memref to release: ref_cnt=%d, user_buf_ptr=%p\n",
+			    regmem_p->ref_cnt, regmem_p->dma_obj.user_buf_ptr);
+		rc = -EBUSY;
+	}
+
+	mutex_unlock(&regmem_p->buf_lock);
+
+	return rc;
+}
+
+/**
+ * acquire_dma_obj() - Get the memref object of given memref_idx and increment
+ *			reference count of it
+ * @client_ctx:	Associated client context
+ * @memref_idx:	Required registered memory reference ID (index)
+ *
+ * Get the memref object of given memref_idx and increment reference count of it
+ * The returned object must be released by invoking release_dma_obj() before
+ * the object (memref) may be freed.
+ * Returns struct user_dma_buffer The memref object or NULL for invalid
+ */
+struct client_dma_buffer *acquire_dma_obj(struct sep_client_ctx *client_ctx,
+					  int memref_idx)
+{
+	struct registered_memref *regmem_p =
+	    &client_ctx->reg_memrefs[memref_idx];
+	struct client_dma_buffer *rc;
+
+	if (!IS_VALID_MEMREF_IDX(memref_idx)) {
+		pr_err("Invalid memref ID %d\n", memref_idx);
+		return NULL;
+	}
+
+	mutex_lock(&regmem_p->buf_lock);
+	if (regmem_p->ref_cnt < 1) {
+		pr_err("Invalid memref (ID=%d, ref_cnt=%d)\n",
+			    memref_idx, regmem_p->ref_cnt);
+		rc = NULL;
+	} else {
+		regmem_p->ref_cnt++;
+		rc = &regmem_p->dma_obj;
+	}
+	mutex_unlock(&regmem_p->buf_lock);
+
+	return rc;
+}
+
+/**
+ * release_dma_obj() - Release memref object taken with get_memref_obj
+ *			(Does not free!)
+ * @client_ctx:	Associated client context
+ * @dma_obj:	The DMA object returned from acquire_dma_obj()
+ *
+ * Returns void
+ */
+void release_dma_obj(struct sep_client_ctx *client_ctx,
+		     struct client_dma_buffer *dma_obj)
+{
+	struct registered_memref *regmem_p;
+	int memref_idx;
+
+	if (dma_obj == NULL)	/* Probably failed on acquire_dma_obj */
+		return;
+	/* Verify valid container */
+	memref_idx = DMA_OBJ_TO_MEMREF_IDX(client_ctx, dma_obj);
+	if (!IS_VALID_MEMREF_IDX(memref_idx)) {
+		pr_err("Given DMA object is not registered\n");
+		return;
+	}
+	/* Get container */
+	regmem_p = &client_ctx->reg_memrefs[memref_idx];
+	mutex_lock(&regmem_p->buf_lock);
+	if (regmem_p->ref_cnt < 2) {
+		pr_err("Invalid memref (ref_cnt=%d, user_buf_ptr=%p)\n",
+			    regmem_p->ref_cnt, regmem_p->dma_obj.user_buf_ptr);
+	} else {
+		regmem_p->ref_cnt--;
+	}
+	mutex_unlock(&regmem_p->buf_lock);
+}
+
+/**
+ * crypto_op_completion_cleanup() - Cleanup CRYPTO_OP descriptor operation
+ *					resources after completion
+ * @op_ctx:
+ *
+ * Returns int
+ */
+int crypto_op_completion_cleanup(struct sep_op_ctx *op_ctx)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+	void *llimgr = drvdata->sep_data->llimgr;
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+	enum sep_op_type op_type = op_ctx->op_type;
+	u32 error_info = op_ctx->error_info;
+	u32 ctx_info_idx;
+	bool data_in_place;
+	const enum crypto_alg_class alg_class =
+	    ctxmgr_get_alg_class(&op_ctx->ctx_info);
+
+	/* Resources cleanup on data processing operations (PROC/FINI) */
+	if (op_type & (SEP_OP_CRYPTO_PROC | SEP_OP_CRYPTO_FINI)) {
+		if ((alg_class == ALG_CLASS_HASH) ||
+		    (ctxmgr_get_mac_type(ctx_info_p) == DXDI_MAC_HMAC)) {
+			/* Unmap what was mapped in prepare_data_for_sep() */
+			ctxmgr_unmap2dev_hash_tail(ctx_info_p,
+						   drvdata->sep_data->dev);
+			/* Save last data block tail (remainder of crypto-block)
+			 * or clear that buffer after it was used if there is
+			 * no new block remainder data */
+			ctxmgr_save_hash_blk_remainder(ctx_info_p,
+						       &op_ctx->din_dma_obj,
+						       false);
+		}
+		data_in_place = llimgr_is_same_mlli_tables(llimgr,
+							   &op_ctx->ift,
+							   &op_ctx->oft);
+		/* First free IFT resources */
+		llimgr_destroy_mlli(llimgr, &op_ctx->ift);
+		llimgr_deregister_client_dma_buf(llimgr, &op_ctx->din_dma_obj);
+		/* Free OFT resources */
+		if (data_in_place) {
+			/* OFT already destroyed as IFT. Just clean it. */
+			MLLI_TABLES_LIST_INIT(&op_ctx->oft);
+			CLEAN_DMA_BUFFER_INFO(&op_ctx->din_dma_obj);
+		} else {	/* OFT resources cleanup */
+			llimgr_destroy_mlli(llimgr, &op_ctx->oft);
+			llimgr_deregister_client_dma_buf(llimgr,
+							 &op_ctx->dout_dma_obj);
+		}
+	}
+
+	for (ctx_info_idx = 0;
+	     ctx_info_idx < op_ctx->ctx_info_num;
+	     ctx_info_idx++, ctx_info_p++) {
+		if ((op_type & SEP_OP_CRYPTO_FINI) || (error_info != 0)) {
+			/* If this was a finalizing descriptor, or any error,
+			 * invalidate from cache */
+			ctxmgr_sep_cache_invalidate(drvdata->sep_cache,
+					ctxmgr_get_ctx_id(ctx_info_p),
+					CRYPTO_CTX_ID_SINGLE_MASK);
+		}
+		/* Update context state */
+		if ((op_type & SEP_OP_CRYPTO_FINI) ||
+		    ((op_type & SEP_OP_CRYPTO_INIT) && (error_info != 0))) {
+			/* If this was a finalizing descriptor,
+			 * or a failing initializing descriptor: */
+			ctxmgr_set_ctx_state(ctx_info_p,
+					CTX_STATE_UNINITIALIZED);
+		} else if (op_type & SEP_OP_CRYPTO_INIT)
+			ctxmgr_set_ctx_state(ctx_info_p,
+					CTX_STATE_INITIALIZED);
+	}
+
+	return 0;
+}
+
+/**
+ * wait_for_sep_op_result() - Wait for outstanding SeP operation to complete and
+ *				fetch SeP ret-code
+ * @op_ctx:
+ *
+ * Wait for outstanding SeP operation to complete and fetch SeP ret-code
+ * into op_ctx->sep_ret_code
+ * Returns int
+ */
+int wait_for_sep_op_result(struct sep_op_ctx *op_ctx)
+{
+#ifdef DEBUG
+	if (unlikely(op_ctx->op_state == USER_OP_NOP)) {
+		pr_err("Operation context is inactive!\n");
+		op_ctx->error_info = DXDI_ERROR_FATAL;
+		return -EINVAL;
+	}
+#endif
+
+	/* wait until crypto operation is completed.
+	 * We cannot timeout this operation because hardware operations may
+	 * be still pending on associated data buffers.
+	 * Only system reboot can take us out of this abnormal state in a safe
+	 * manner (avoiding data corruption) */
+	wait_for_completion(&(op_ctx->ioctl_op_compl));
+#ifdef DEBUG
+	if (unlikely(op_ctx->op_state != USER_OP_COMPLETED)) {
+		pr_err(
+			    "Op. state is not COMPLETED after getting completion event (op_ctx=0x%p, op_state=%d)\n",
+			    op_ctx, op_ctx->op_state);
+		dump_stack();	/*SEP_DRIVER_BUG(); */
+		op_ctx->error_info = DXDI_ERROR_FATAL;
+		return -EINVAL;
+	}
+#endif
+
+	return 0;
+}
+
+/**
+ * get_num_of_ctx_info() - Count the number of valid contexts assigned for the
+ *				combined operation.
+ * @config:	 The user configuration scheme array
+ *
+ * Returns int
+ */
+static int get_num_of_ctx_info(struct dxdi_combined_props *config)
+{
+	int valid_ctx_n;
+
+	for (valid_ctx_n = 0;
+	     (valid_ctx_n < DXDI_COMBINED_NODES_MAX) &&
+	     (config->node_props[valid_ctx_n].context != NULL)
+	     ; valid_ctx_n++) {
+		/*NOOP*/
+	}
+
+	return valid_ctx_n;
+}
+
+/***** Driver Interface implementation functions *****/
+
+/**
+ * format_sep_combined_cfg_scheme() - Encode the user configuration scheme to
+ *					SeP format.
+ * @config:	 The user configuration scheme array
+ * @op_ctx:	 Operation context
+ *
+ * Returns u32
+ */
+static u32 format_sep_combined_cfg_scheme(struct dxdi_combined_props
+					       *config,
+					       struct sep_op_ctx *op_ctx)
+{
+	enum dxdi_input_engine_type engine_src = DXDI_INPUT_NULL;
+	enum sep_engine_type engine_type = SEP_ENGINE_NULL;
+	enum crypto_alg_class alg_class;
+	u32 sep_cfg_scheme = 0;	/* the encoded config scheme */
+	struct client_crypto_ctx_info *ctx_info_p = &op_ctx->ctx_info;
+	enum dxdi_sym_cipher_type symc_type;
+	int eng_idx, done = 0;
+	int prev_direction = -1;
+
+	/* encode engines connections into SEP format */
+	for (eng_idx = 0;
+	     (eng_idx < DXDI_COMBINED_NODES_MAX) && (!done);
+	     eng_idx++, ctx_info_p++) {
+
+		/* set engine source */
+		engine_src = config->node_props[eng_idx].eng_input;
+
+		/* set engine type */
+		if (config->node_props[eng_idx].context != NULL) {
+			int dir;
+			alg_class = ctxmgr_get_alg_class(ctx_info_p);
+			switch (alg_class) {
+			case ALG_CLASS_HASH:
+				engine_type = SEP_ENGINE_HASH;
+				break;
+			case ALG_CLASS_SYM_CIPHER:
+				symc_type =
+				    ctxmgr_get_sym_cipher_type(ctx_info_p);
+				if ((symc_type == DXDI_SYMCIPHER_AES_ECB) ||
+				    (symc_type == DXDI_SYMCIPHER_AES_CBC) ||
+				    (symc_type == DXDI_SYMCIPHER_AES_CTR))
+					engine_type = SEP_ENGINE_AES;
+				else
+					engine_type = SEP_ENGINE_NULL;
+
+				dir =
+				    (int)
+				    ctxmgr_get_symcipher_direction(ctx_info_p);
+				if (prev_direction == -1) {
+					prev_direction = dir;
+				} else {
+					/* Only decrypt->encrypt operation */
+					if (!
+					    (prev_direction ==
+					     SEP_CRYPTO_DIRECTION_DECRYPT &&
+					     (dir ==
+					      SEP_CRYPTO_DIRECTION_ENCRYPT))) {
+						pr_err(
+						    "Invalid direction combination %s->%s\n",
+						    prev_direction ==
+						    SEP_CRYPTO_DIRECTION_DECRYPT
+						    ? "DEC" : "ENC",
+						    dir ==
+						    SEP_CRYPTO_DIRECTION_DECRYPT
+						    ? "DEC" : "ENC");
+						op_ctx->error_info =
+						    DXDI_ERROR_INVAL_DIRECTION;
+					}
+				}
+				break;
+			default:
+				engine_type = SEP_ENGINE_NULL;
+				break;	/*unsupported alg class */
+			}
+		} else if (engine_src != DXDI_INPUT_NULL) {
+			/* incase engine source is not NULL and NULL sub-context
+			 * is passed then DOUT is -DOUT type */
+			engine_type = SEP_ENGINE_DOUT;
+			/* exit after props set */
+			done = 1;
+		} else {
+			/* both context pointer & input type are
+			 * NULL -we're done */
+			break;
+		}
+
+		sep_comb_eng_props_set(&sep_cfg_scheme, eng_idx,
+					  engine_src, engine_type);
+	}
+
+	return sep_cfg_scheme;
+}
+
+/**
+ * init_crypto_context() - Initialize host crypto context
+ * @op_ctx:
+ * @context_buf:
+ * @alg_class:
+ * @props:	Pointer to configuration properties which match given alg_class:
+ *		ALG_CLASS_SYM_CIPHER: struct dxdi_sym_cipher_props
+ *		ALG_CLASS_AUTH_ENC: struct dxdi_auth_enc_props
+ *		ALG_CLASS_MAC: struct dxdi_mac_props
+ *		ALG_CLASS_HASH: enum dxdi_hash_type
+ *
+ * Returns int 0 if operation executed in SeP.
+ * See error_info for actual results.
+ */
+static int init_crypto_context(struct sep_op_ctx *op_ctx,
+			       u32 __user *context_buf,
+			       enum crypto_alg_class alg_class, void *props)
+{
+	int rc;
+	int sep_cache_load_req;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	bool postpone_init = false;
+
+	rc = ctxmgr_map_user_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev,
+				 alg_class, context_buf);
+	if (rc != 0) {
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		return rc;
+	}
+	op_ctx->op_type = SEP_OP_CRYPTO_INIT;
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_UNINITIALIZED);
+	/* Allocate a new Crypto context ID */
+	ctxmgr_set_ctx_id(&op_ctx->ctx_info,
+			  alloc_crypto_ctx_id(op_ctx->client_ctx));
+
+	switch (alg_class) {
+	case ALG_CLASS_SYM_CIPHER:
+		rc = ctxmgr_init_symcipher_ctx(&op_ctx->ctx_info,
+					       (struct dxdi_sym_cipher_props *)
+					       props, &postpone_init,
+					       &op_ctx->error_info);
+		break;
+	case ALG_CLASS_AUTH_ENC:
+		rc = ctxmgr_init_auth_enc_ctx(&op_ctx->ctx_info,
+					      (struct dxdi_auth_enc_props *)
+					      props, &op_ctx->error_info);
+		break;
+	case ALG_CLASS_MAC:
+		rc = ctxmgr_init_mac_ctx(&op_ctx->ctx_info,
+					 (struct dxdi_mac_props *)props,
+					 &op_ctx->error_info);
+		break;
+	case ALG_CLASS_HASH:
+		rc = ctxmgr_init_hash_ctx(&op_ctx->ctx_info,
+					  *((enum dxdi_hash_type *)props),
+					  &op_ctx->error_info);
+		break;
+	default:
+		pr_err("Invalid algorithm class %d\n", alg_class);
+		op_ctx->error_info = DXDI_ERROR_UNSUP;
+		rc = -EINVAL;
+	}
+	if (rc != 0)
+		goto ctx_init_exit;
+
+	/* After inialization above we are partially init. missing SeP init. */
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_PARTIAL_INIT);
+#ifdef DEBUG
+	ctxmgr_dump_sep_ctx(&op_ctx->ctx_info);
+#endif
+
+	/* If not all the init. information is available at this time
+	 * we postpone INIT in SeP to processing phase */
+	if (postpone_init) {
+		pr_debug("Init. postponed to processing phase\n");
+		ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+		/* must be valid on "success" */
+		op_ctx->error_info = DXDI_ERROR_NULL;
+		return 0;
+	}
+
+	/* Flush out of host cache */
+	ctxmgr_sync_sep_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev);
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc != 0) {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+		       op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		goto ctx_init_exit;
+	}
+
+	ctxmgr_set_sep_cache_idx(&op_ctx->ctx_info,
+				 ctxmgr_sep_cache_alloc(drvdata->sep_cache,
+							ctxmgr_get_ctx_id
+							(&op_ctx->ctx_info),
+							&sep_cache_load_req));
+	if (!sep_cache_load_req)
+		pr_err("New context already in SeP cache?!");
+
+	rc = send_crypto_op_desc(op_ctx,
+				 1 /*always load on init */ , 1 /*INIT*/,
+				 SEP_PROC_MODE_NOP);
+
+	mutex_unlock(&drvdata->desc_queue_sequencer);
+	if (likely(rc == 0))
+		rc = wait_for_sep_op_result(op_ctx);
+
+ ctx_init_exit:
+	/* Cleanup resources and update context state */
+	crypto_op_completion_cleanup(op_ctx);
+	ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+
+	return rc;
+}
+
+/**
+ * map_ctx_for_proc() - Map previously initialized crypto context before data
+ *			processing
+ * @op_ctx:
+ * @context_buf:
+ * @ctx_state:	 Returned current context state
+ *
+ * Returns int
+ */
+static int map_ctx_for_proc(struct sep_client_ctx *client_ctx,
+			    struct client_crypto_ctx_info *ctx_info,
+			    u32 __user *context_buf,
+			    enum host_ctx_state *ctx_state_p)
+{
+	int rc;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+
+	/* default state in case of error */
+	*ctx_state_p = CTX_STATE_UNINITIALIZED;
+
+	rc = ctxmgr_map_user_ctx(ctx_info, drvdata->sep_data->dev,
+				 ALG_CLASS_NONE, context_buf);
+	if (rc != 0) {
+		pr_err("Failed mapping context\n");
+		return rc;
+	}
+	if (ctxmgr_get_session_id(ctx_info) != (uintptr_t) client_ctx) {
+		pr_err("Context ID is not associated with this session\n");
+		rc = -EINVAL;
+	}
+	if (rc == 0)
+		*ctx_state_p = ctx_info->ctx_kptr->state;
+
+#ifdef DEBUG
+	ctxmgr_dump_sep_ctx(ctx_info);
+	/* Flush out of host cache */
+	ctxmgr_sync_sep_ctx(ctx_info, drvdata->sep_data->dev);
+#endif
+	if (rc != 0)
+		ctxmgr_unmap_user_ctx(ctx_info);
+
+	return rc;
+}
+
+/**
+ * init_combined_context() - Initialize Combined context
+ * @op_ctx:
+ * @config: Pointer to configuration scheme to be validate by the SeP
+ *
+ * Returns int 0 if operation executed in SeP.
+ * See error_info for actual results.
+ */
+static int init_combined_context(struct sep_op_ctx *op_ctx,
+				 struct dxdi_combined_props *config)
+{
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_crypto_ctx_info *ctx_info_p = &op_ctx->ctx_info;
+	int sep_ctx_load_req[DXDI_COMBINED_NODES_MAX];
+	enum host_ctx_state ctx_state;
+	int rc, ctx_idx, ctx_mapped_n = 0;
+
+	op_ctx->op_type = SEP_OP_CRYPTO_INIT;
+
+	/* no context to load -clear buffer */
+	memset(sep_ctx_load_req, 0, sizeof(sep_ctx_load_req));
+
+	/* set the number of active contexts */
+	op_ctx->ctx_info_num = get_num_of_ctx_info(config);
+
+	/* map each context in the configuration scheme */
+	for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+	     ctx_idx++, ctx_info_p++) {
+		/* context already initialized by the user */
+		rc = map_ctx_for_proc(op_ctx->client_ctx, ctx_info_p,
+				      config->node_props[ctx_idx].context,
+				      /*user ctx */ &ctx_state);
+		if (rc != 0) {
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			goto ctx_init_exit;
+		}
+
+		ctx_mapped_n++;	/*ctx mapped successfully */
+
+		/* context must be initialzed */
+		if (ctx_state != CTX_STATE_INITIALIZED) {
+			pr_err(
+				    "Given context [%d] in invalid state for processing -%d\n",
+				    ctx_idx, ctx_state);
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			rc = -EINVAL;
+			goto ctx_init_exit;
+		}
+	}
+
+	ctx_info_p = &op_ctx->ctx_info;
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* Coupled sequence */
+		for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+		     ctx_idx++, ctx_info_p++) {
+			ctxmgr_set_sep_cache_idx(ctx_info_p,
+						 ctxmgr_sep_cache_alloc
+						 (drvdata->sep_cache,
+						  ctxmgr_get_ctx_id(ctx_info_p),
+						  &sep_ctx_load_req[ctx_idx]));
+		}
+
+		rc = send_combined_op_desc(op_ctx,
+					   sep_ctx_load_req /*nothing to load */
+					   ,
+					   1 /*INIT*/, SEP_PROC_MODE_NOP,
+					   0 /*no scheme in init */);
+
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ ctx_init_exit:
+	ctx_info_p = &op_ctx->ctx_info;
+
+	for (ctx_idx = 0; ctx_idx < ctx_mapped_n; ctx_idx++, ctx_info_p++)
+		ctxmgr_unmap_user_ctx(ctx_info_p);
+
+	return rc;
+}
+
+/**
+ * prepare_adata_for_sep() - Generate MLLI tables for additional/associated data
+ *				(input only)
+ * @op_ctx:	 Operation context
+ * @adata_in:
+ * @adata_in_size:
+ *
+ * Returns int
+ */
+static inline int prepare_adata_for_sep(struct sep_op_ctx *op_ctx,
+					u8 __user *adata_in,
+					u32 adata_in_size)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	void *llimgr = client_ctx->drv_data->sep_data->llimgr;
+	struct dma_pool *spad_buf_pool =
+	    client_ctx->drv_data->sep_data->spad_buf_pool;
+	struct mlli_tables_list *ift_p = &op_ctx->ift;
+	unsigned long a0_buf_size;
+	u8 *a0_buf_p;
+	int rc = 0;
+
+	if (adata_in == NULL) {	/*data_out required for this alg_class */
+		pr_err("adata_in==NULL for authentication\n");
+		op_ctx->error_info = DXDI_ERROR_INVAL_DIN_PTR;
+		return -EINVAL;
+	}
+
+	op_ctx->spad_buf_p = dma_pool_alloc(spad_buf_pool,
+					    GFP_KERNEL,
+					    &op_ctx->spad_buf_dma_addr);
+	if (unlikely(op_ctx->spad_buf_p == NULL)) {
+		pr_err("Failed allocating from spad_buf_pool for A0\n");
+		return -ENOMEM;
+	}
+	a0_buf_p = op_ctx->spad_buf_p;
+
+	/* format A0 (the first 2 words in the first block) */
+	if (adata_in_size < ((1UL << 16) - (1UL << 8))) {
+		a0_buf_size = 2;
+
+		a0_buf_p[0] = (adata_in_size >> 8) & 0xFF;
+		a0_buf_p[1] = adata_in_size & 0xFF;
+	} else {
+		a0_buf_size = 6;
+
+		a0_buf_p[0] = 0xFF;
+		a0_buf_p[1] = 0xFE;
+		a0_buf_p[2] = (adata_in_size >> 24) & 0xFF;
+		a0_buf_p[3] = (adata_in_size >> 16) & 0xFF;
+		a0_buf_p[4] = (adata_in_size >> 8) & 0xFF;
+		a0_buf_p[5] = adata_in_size & 0xFF;
+	}
+
+	/* Create IFT (MLLI table) */
+	rc = llimgr_register_client_dma_buf(llimgr,
+					    adata_in, NULL, adata_in_size, 0,
+					    DMA_TO_DEVICE,
+					    &op_ctx->din_dma_obj);
+	if (likely(rc == 0)) {
+		rc = llimgr_create_mlli(llimgr, ift_p, DMA_TO_DEVICE,
+					&op_ctx->din_dma_obj,
+					op_ctx->spad_buf_dma_addr, a0_buf_size);
+		if (unlikely(rc != 0)) {
+			llimgr_deregister_client_dma_buf(llimgr,
+							 &op_ctx->din_dma_obj);
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * prepare_cipher_data_for_sep() - Generate MLLI tables for cipher algorithms
+ *				(input + output)
+ * @op_ctx:	 Operation context
+ * @data_in:	User space pointer for input data (NULL for kernel data)
+ * @sgl_in:	Kernel buffers s/g list for input data (NULL for user data)
+ * @data_out:	User space pointer for output data (NULL for kernel data)
+ * @sgl_out:	Kernel buffers s/g list for output data (NULL for user data)
+ * @data_in_size:
+ *
+ * Returns int
+ */
+static inline int prepare_cipher_data_for_sep(struct sep_op_ctx *op_ctx,
+					      u8 __user *data_in,
+					      struct scatterlist *sgl_in,
+					      u8 __user *data_out,
+					      struct scatterlist *sgl_out,
+					      u32 data_in_size)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	void *llimgr = client_ctx->drv_data->sep_data->llimgr;
+	const bool is_data_inplace =
+	    data_in != NULL ? (data_in == data_out) : (sgl_in == sgl_out);
+	const enum dma_data_direction din_dma_direction =
+	    is_data_inplace ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+	int rc;
+
+	/* Check parameters */
+	if (data_in_size == 0) {	/* No data to prepare */
+		return 0;
+	}
+	if ((data_out == NULL) && (sgl_out == NULL)) {
+		/* data_out required for this alg_class */
+		pr_err("data_out/sgl_out==NULL for enc/decryption\n");
+		op_ctx->error_info = DXDI_ERROR_INVAL_DOUT_PTR;
+		return -EINVAL;
+	}
+
+	/* Avoid partial overlapping of data_in with data_out */
+	if (!is_data_inplace)
+		if (data_in != NULL) {	/* User space buffer */
+			if (((data_in < data_out) &&
+			     ((data_in + data_in_size) > data_out)) ||
+			    ((data_out < data_in) &&
+			     ((data_out + data_in_size) > data_in))) {
+				pr_err("Buffers partially overlap!\n");
+				op_ctx->error_info =
+				    DXDI_ERROR_DIN_DOUT_OVERLAP;
+				return -EINVAL;
+			}
+		}
+	/* else: TODO - scan s/g lists for overlapping */
+
+	/* Create IFT + OFT (MLLI tables) */
+	rc = llimgr_register_client_dma_buf(llimgr,
+					    data_in, sgl_in, data_in_size, 0,
+					    din_dma_direction,
+					    &op_ctx->din_dma_obj);
+	if (likely(rc == 0)) {
+		rc = llimgr_create_mlli(llimgr, &op_ctx->ift, din_dma_direction,
+					&op_ctx->din_dma_obj, 0, 0);
+	}
+	if (likely(rc == 0)) {
+		if (is_data_inplace) {
+			/* Mirror IFT data in OFT */
+			op_ctx->dout_dma_obj = op_ctx->din_dma_obj;
+			op_ctx->oft = op_ctx->ift;
+		} else {	/* Create OFT */
+			rc = llimgr_register_client_dma_buf(llimgr,
+							    data_out, sgl_out,
+							    data_in_size, 0,
+							    DMA_FROM_DEVICE,
+							    &op_ctx->
+							    dout_dma_obj);
+			if (likely(rc == 0)) {
+				rc = llimgr_create_mlli(llimgr, &op_ctx->oft,
+							DMA_FROM_DEVICE,
+							&op_ctx->dout_dma_obj,
+							0, 0);
+			}
+		}
+	}
+
+	if (unlikely(rc != 0)) {	/* Failure cleanup */
+		/* No output MLLI to free in error case */
+		if (!is_data_inplace) {
+			llimgr_deregister_client_dma_buf(llimgr,
+							 &op_ctx->dout_dma_obj);
+		}
+		llimgr_destroy_mlli(llimgr, &op_ctx->ift);
+		llimgr_deregister_client_dma_buf(llimgr, &op_ctx->din_dma_obj);
+	}
+
+	return rc;
+}
+
+/**
+ * prepare_hash_data_for_sep() - Prepare data for hash operation
+ * @op_ctx:		Operation context
+ * @is_finalize:	Is hash finialize operation (last)
+ * @data_in:		Pointer to user buffer OR...
+ * @sgl_in:		Gather list for kernel buffers
+ * @data_in_size:	Data size in bytes
+ *
+ * Returns 0 on success
+ */
+static int prepare_hash_data_for_sep(struct sep_op_ctx *op_ctx,
+				     bool is_finalize,
+				     u8 __user *data_in,
+				     struct scatterlist *sgl_in,
+				     u32 data_in_size)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+	void *llimgr = drvdata->sep_data->llimgr;
+	u32 crypto_blk_size;
+	/* data size for processing this op. (incl. prev. block remainder) */
+	u32 data_size4hash;
+	/* amount of data_in to process in this op. */
+	u32 data_in_save4next;
+	u16 last_hash_blk_tail_size;
+	dma_addr_t last_hash_blk_tail_dma;
+	int rc;
+
+	if ((data_in != NULL) && (sgl_in != NULL)) {
+		pr_err("Given valid data_in+sgl_in!\n");
+		return -EINVAL;
+	}
+
+	/*Hash block size required in order to buffer block remainders */
+	crypto_blk_size = ctxmgr_get_crypto_blk_size(&op_ctx->ctx_info);
+	if (crypto_blk_size == 0) {	/* Unsupported algorithm?... */
+		op_ctx->error_info = DXDI_ERROR_UNSUP;
+		return -ENOSYS;
+	}
+
+	/* Map for DMA the last block tail, if any */
+	rc = ctxmgr_map2dev_hash_tail(&op_ctx->ctx_info,
+				      drvdata->sep_data->dev);
+	if (rc != 0) {
+		pr_err("Failed mapping hash data tail buffer\n");
+		return rc;
+	}
+	last_hash_blk_tail_size =
+	    ctxmgr_get_hash_blk_remainder_buf(&op_ctx->ctx_info,
+					      &last_hash_blk_tail_dma);
+	data_size4hash = data_in_size + last_hash_blk_tail_size;
+	if (!is_finalize) {
+		/* Not last. Round to hash block size. */
+		data_size4hash = (data_size4hash & ~(crypto_blk_size - 1));
+		/* On the last hash op. take all that's left */
+	}
+	data_in_save4next = (data_size4hash > 0) ?
+	    data_in_size - (data_size4hash - last_hash_blk_tail_size) :
+	    data_in_size;
+	rc = llimgr_register_client_dma_buf(llimgr,
+					    data_in, sgl_in, data_in_size,
+					    data_in_save4next, DMA_TO_DEVICE,
+					    &op_ctx->din_dma_obj);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed registering client buffer (rc=%d)\n", rc);
+	} else {
+		if ((!is_finalize) && (data_size4hash == 0)) {
+			/* Not enough for even one hash block
+			 * (all saved for next) */
+			ctxmgr_unmap2dev_hash_tail(&op_ctx->ctx_info,
+						   drvdata->sep_data->dev);
+			/* Append to existing tail buffer */
+			rc = ctxmgr_save_hash_blk_remainder(&op_ctx->ctx_info,
+							    &op_ctx->
+							    din_dma_obj,
+							    true /*append */);
+			if (rc == 0)	/* signal: not even one block */
+				rc = -ENOTBLK;
+		} else {
+			rc = llimgr_create_mlli(llimgr, &op_ctx->ift,
+						DMA_TO_DEVICE,
+						&op_ctx->din_dma_obj,
+						last_hash_blk_tail_dma,
+						last_hash_blk_tail_size);
+		}
+	}
+
+	if (unlikely(rc != 0)) {	/* Failed (or -ENOTBLK) */
+		/* No harm if we invoke deregister if it was not registered */
+		llimgr_deregister_client_dma_buf(llimgr, &op_ctx->din_dma_obj);
+		/* Unmap hash block tail buffer */
+		ctxmgr_unmap2dev_hash_tail(&op_ctx->ctx_info,
+					   drvdata->sep_data->dev);
+	}
+	/* Hash block remainder would be copied into tail buffer only after
+	 * operation completion, because this buffer is still in use for
+	 * current operation */
+
+	return rc;
+}
+
+/**
+ * prepare_mac_data_for_sep() - Prepare data memory objects for (AES) MAC
+ *				operations
+ * @op_ctx:
+ * @data_in:		Pointer to user buffer OR...
+ * @sgl_in:		Gather list for kernel buffers
+ * @data_in_size:
+ *
+ * Returns int
+ */
+static inline int prepare_mac_data_for_sep(struct sep_op_ctx *op_ctx,
+					   u8 __user *data_in,
+					   struct scatterlist *sgl_in,
+					   u32 data_in_size)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	void *llimgr = client_ctx->drv_data->sep_data->llimgr;
+	int rc;
+
+	rc = llimgr_register_client_dma_buf(llimgr,
+					    data_in, sgl_in, data_in_size, 0,
+					    DMA_TO_DEVICE,
+					    &op_ctx->din_dma_obj);
+	if (likely(rc == 0)) {
+		rc = llimgr_create_mlli(llimgr, &op_ctx->ift,
+					DMA_TO_DEVICE, &op_ctx->din_dma_obj, 0,
+					0);
+		if (rc != 0) {
+			llimgr_deregister_client_dma_buf(llimgr,
+							 &op_ctx->din_dma_obj);
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * prepare_data_for_sep() - Prepare data for processing by SeP
+ * @op_ctx:
+ * @data_in:	User space pointer for input data (NULL for kernel data)
+ * @sgl_in:	Kernel buffers s/g list for input data (NULL for user data)
+ * @data_out:	User space pointer for output data (NULL for kernel data)
+ * @sgl_out:	Kernel buffers s/g list for output data (NULL for user data)
+ * @data_in_size:	 data_in buffer size (and data_out's if not NULL)
+ * @data_intent:	 the purpose of the given data
+ *
+ * Prepare data for processing by SeP
+ * (common flow for sep_proc_dblk and sep_fin_proc) .
+ * Returns int
+ */
+int prepare_data_for_sep(struct sep_op_ctx *op_ctx,
+			 u8 __user *data_in,
+			 struct scatterlist *sgl_in,
+			 u8 __user *data_out,
+			 struct scatterlist *sgl_out,
+			 u32 data_in_size,
+			 enum crypto_data_intent data_intent)
+{
+	int rc;
+	enum crypto_alg_class alg_class;
+
+	if (data_intent == CRYPTO_DATA_ADATA) {
+		/* additional/associated data */
+		if (!ctxmgr_is_valid_adata_size(&op_ctx->ctx_info,
+						data_in_size)) {
+			op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			return -EINVAL;
+		}
+	} else {
+		/* cipher/text data */
+		if (!ctxmgr_is_valid_size(&op_ctx->ctx_info,
+					  data_in_size,
+					  (data_intent ==
+					   CRYPTO_DATA_TEXT_FINALIZE))) {
+			op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			return -EINVAL;
+		}
+	}
+
+	pr_debug("data_in=0x%p/0x%p data_out=0x%p/0x%p data_in_size=%uB\n",
+		      data_in, sgl_in, data_out, sgl_out, data_in_size);
+
+	alg_class = ctxmgr_get_alg_class(&op_ctx->ctx_info);
+	pr_debug("alg_class = %d\n", alg_class);
+	switch (alg_class) {
+	case ALG_CLASS_SYM_CIPHER:
+		rc = prepare_cipher_data_for_sep(op_ctx,
+						 data_in, sgl_in, data_out,
+						 sgl_out, data_in_size);
+		break;
+	case ALG_CLASS_AUTH_ENC:
+		if (data_intent == CRYPTO_DATA_ADATA) {
+			struct host_crypto_ctx_auth_enc *aead_ctx_p =
+			    (struct host_crypto_ctx_auth_enc *)op_ctx->ctx_info.
+			    ctx_kptr;
+
+			if (!aead_ctx_p->is_adata_processed) {
+				rc = prepare_adata_for_sep(op_ctx,
+							   data_in,
+							   data_in_size);
+				/* no more invocation to adata process
+				 * is allowed */
+				aead_ctx_p->is_adata_processed = 1;
+			} else {
+				/* additional data may be processed
+				 * only once */
+				return -EPERM;
+			}
+		} else {
+			rc = prepare_cipher_data_for_sep(op_ctx,
+							 data_in, sgl_in,
+							 data_out, sgl_out,
+							 data_in_size);
+		}
+		break;
+	case ALG_CLASS_MAC:
+	case ALG_CLASS_HASH:
+		if ((alg_class == ALG_CLASS_MAC) &&
+		    (ctxmgr_get_mac_type(&op_ctx->ctx_info) != DXDI_MAC_HMAC)) {
+			/* Handle all MACs but HMAC */
+			if (data_in_size == 0) {	/* No data to prepare */
+				rc = 0;
+				break;
+			}
+#if 0
+			/* ignore checking the user out pointer due to crys api
+			 * limitation */
+			if (data_out != NULL) {
+				pr_err("data_out!=NULL for MAC\n");
+				return -EINVAL;
+			}
+#endif
+			rc = prepare_mac_data_for_sep(op_ctx, data_in, sgl_in,
+						      data_in_size);
+			break;
+		}
+
+		/* else: HASH or HMAC require the same handling */
+		rc = prepare_hash_data_for_sep(op_ctx,
+					       (data_intent ==
+						CRYPTO_DATA_TEXT_FINALIZE),
+					       data_in, sgl_in, data_in_size);
+		break;
+
+	default:
+		pr_err("Invalid algorithm class %d in context\n",
+			    alg_class);
+		/* probably context was corrupted since init. phase */
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+/**
+ * prepare_combined_data_for_sep() - Prepare combined data for processing by SeP
+ * @op_ctx:
+ * @data_in:
+ * @data_out:
+ * @data_in_size:	 data_in buffer size (and data_out's if not NULL)
+ * @data_intent:	 the purpose of the given data
+ *
+ * Prepare combined data for processing by SeP
+ * (common flow for sep_proc_dblk and sep_fin_proc) .
+ * Returns int
+ */
+static int prepare_combined_data_for_sep(struct sep_op_ctx *op_ctx,
+					 u8 __user *data_in,
+					 u8 __user *data_out,
+					 u32 data_in_size,
+					 enum crypto_data_intent data_intent)
+{
+	int rc;
+
+	if (data_intent == CRYPTO_DATA_TEXT) {
+		/* restrict data unit size to the max block size multiple */
+		if (!IS_MULT_OF(data_in_size, SEP_HASH_BLOCK_SIZE_MAX)) {
+			pr_err(
+				    "Data unit size (%u) is not HASH block multiple\n",
+				    data_in_size);
+			op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			return -EINVAL;
+		}
+	} else if (data_intent == CRYPTO_DATA_TEXT_FINALIZE) {
+		/* user may finalize with zero or AES block size multiple */
+		if (!IS_MULT_OF(data_in_size, SEP_AES_BLOCK_SIZE)) {
+			pr_err("Data size (%u), not AES block multiple\n",
+				    data_in_size);
+			op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			return -EINVAL;
+		}
+	}
+
+	pr_debug("data_in=0x%08lX data_out=0x%08lX data_in_size=%uB\n",
+		      (unsigned long)data_in, (unsigned long)data_out,
+		      data_in_size);
+
+	pr_debug("alg_class = COMBINED\n");
+	if (data_out == NULL)
+		rc = prepare_mac_data_for_sep(op_ctx,
+					      data_in, NULL, data_in_size);
+	else
+		rc = prepare_cipher_data_for_sep(op_ctx,
+						 data_in, NULL, data_out, NULL,
+						 data_in_size);
+
+	return rc;
+}
+
+/**
+ * sep_proc_dblk() - Process data block
+ * @op_ctx:
+ * @context_buf:
+ * @data_block_type:
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ *
+ * Returns int
+ */
+static int sep_proc_dblk(struct sep_op_ctx *op_ctx,
+			 u32 __user *context_buf,
+			 enum dxdi_data_block_type data_block_type,
+			 u8 __user *data_in,
+			 u8 __user *data_out, u32 data_in_size)
+{
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	int rc;
+	int sep_cache_load_required;
+	int sep_ctx_init_required = 0;
+	enum crypto_alg_class alg_class;
+	enum host_ctx_state ctx_state;
+
+	if (data_in_size == 0) {
+		pr_err("Got empty data_in\n");
+		op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+		return -EINVAL;
+	}
+
+	rc = map_ctx_for_proc(op_ctx->client_ctx, &op_ctx->ctx_info,
+			      context_buf, &ctx_state);
+	if (rc != 0) {
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		return rc;
+	}
+	op_ctx->op_type = SEP_OP_CRYPTO_PROC;
+	if (ctx_state == CTX_STATE_PARTIAL_INIT) {
+		/* case of postponed sep context init. */
+		sep_ctx_init_required = 1;
+		op_ctx->op_type |= SEP_OP_CRYPTO_INIT;
+	} else if (ctx_state != CTX_STATE_INITIALIZED) {
+		pr_err("Context in invalid state for processing %d\n",
+			    ctx_state);
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		rc = -EINVAL;
+		goto unmap_ctx_and_return;
+	}
+	alg_class = ctxmgr_get_alg_class(&op_ctx->ctx_info);
+
+	rc = prepare_data_for_sep(op_ctx, data_in, NULL, data_out, NULL,
+				  data_in_size,
+				  (data_block_type ==
+				   DXDI_DATA_TYPE_TEXT ? CRYPTO_DATA_TEXT :
+				   CRYPTO_DATA_ADATA));
+	if (rc != 0) {
+		if (rc == -ENOTBLK) {
+			/* Did not accumulate even a single hash block */
+			/* The data_in already copied to context, in addition
+			 * to existing data. Report as success with no op. */
+			op_ctx->error_info = DXDI_ERROR_NULL;
+			rc = 0;
+		}
+		goto unmap_ctx_and_return;
+	}
+
+	if (sep_ctx_init_required) {
+		/* If this flag is set it implies that we have updated
+		 * parts of the sep_ctx structure during data preparation -
+		 * need to sync. context to memory (from host cache...) */
+#ifdef DEBUG
+		ctxmgr_dump_sep_ctx(&op_ctx->ctx_info);
+#endif
+		ctxmgr_sync_sep_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev);
+	}
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* coupled sequence */
+		ctxmgr_set_sep_cache_idx(&op_ctx->ctx_info,
+			 ctxmgr_sep_cache_alloc(drvdata->
+						sep_cache,
+						ctxmgr_get_ctx_id
+						(&op_ctx->ctx_info),
+						&sep_cache_load_required));
+		rc = send_crypto_op_desc(op_ctx, sep_cache_load_required,
+					 sep_ctx_init_required,
+					 (data_block_type ==
+					  DXDI_DATA_TYPE_TEXT ?
+					  SEP_PROC_MODE_PROC_T :
+					  SEP_PROC_MODE_PROC_A));
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ unmap_ctx_and_return:
+	ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+
+	return rc;
+}
+
+/**
+ * sep_fin_proc() - Finalize processing of given context with given (optional)
+ *			data
+ * @op_ctx:
+ * @context_buf:
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ * @digest_or_mac_p:
+ * @digest_or_mac_size_p:
+ *
+ * Returns int
+ */
+static int sep_fin_proc(struct sep_op_ctx *op_ctx,
+			u32 __user *context_buf,
+			u8 __user *data_in,
+			u8 __user *data_out,
+			u32 data_in_size,
+			u8 *digest_or_mac_p,
+			u8 *digest_or_mac_size_p)
+{
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	int rc;
+	int sep_cache_load_required;
+	int sep_ctx_init_required = 0;
+	enum host_ctx_state ctx_state;
+
+	rc = map_ctx_for_proc(op_ctx->client_ctx, &op_ctx->ctx_info,
+			      context_buf, &ctx_state);
+	if (rc != 0) {
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		return rc;
+	}
+	if (ctx_state == CTX_STATE_PARTIAL_INIT) {
+		/* case of postponed sep context init. */
+		sep_ctx_init_required = 1;
+	} else if (ctx_state != CTX_STATE_INITIALIZED) {
+		pr_err("Context in invalid state for finalizing %d\n",
+			    ctx_state);
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		rc = -EINVAL;
+		goto data_prepare_err;
+	}
+
+	op_ctx->op_type = SEP_OP_CRYPTO_FINI;
+
+	rc = prepare_data_for_sep(op_ctx, data_in, NULL, data_out, NULL,
+				  data_in_size, CRYPTO_DATA_TEXT_FINALIZE);
+	if (rc != 0)
+		goto data_prepare_err;
+
+	if (sep_ctx_init_required) {
+		/* If this flag is set it implies that we have updated
+		 * parts of the sep_ctx structure during data preparation -
+		 * need to sync. context to memory (from host cache...) */
+#ifdef DEBUG
+		ctxmgr_dump_sep_ctx(&op_ctx->ctx_info);
+#endif
+		ctxmgr_sync_sep_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev);
+	}
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* Coupled sequence */
+		ctxmgr_set_sep_cache_idx(&op_ctx->ctx_info,
+				 ctxmgr_sep_cache_alloc(drvdata->sep_cache,
+					ctxmgr_get_ctx_id(&op_ctx->ctx_info),
+					&sep_cache_load_required));
+		rc = send_crypto_op_desc(op_ctx, sep_cache_load_required,
+					 sep_ctx_init_required,
+					 SEP_PROC_MODE_FIN);
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	if ((rc == 0) && (op_ctx->error_info == 0)) {
+		/* Digest or MAC are embedded in the SeP/FW context */
+		*digest_or_mac_size_p =
+		    ctxmgr_get_digest_or_mac(&op_ctx->ctx_info,
+					     digest_or_mac_p);
+		/* If above is not applicable for given algorithm,
+		 *digest_or_mac_size_p would be set to 0          */
+	} else {
+		/* Nothing valid in digest_or_mac_p */
+		*digest_or_mac_size_p = 0;
+	}
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ data_prepare_err:
+	ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+
+	return rc;
+}
+
+/**
+ * sep_combined_proc_dblk() - Process Combined operation block
+ * @op_ctx:
+ * @config:
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ *
+ * Returns int
+ */
+static int sep_combined_proc_dblk(struct sep_op_ctx *op_ctx,
+				  struct dxdi_combined_props *config,
+				  u8 __user *data_in,
+				  u8 __user *data_out,
+				  u32 data_in_size)
+{
+	int rc;
+	int ctx_idx, ctx_mapped_n = 0;
+	int sep_cache_load_required[DXDI_COMBINED_NODES_MAX];
+	enum host_ctx_state ctx_state;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+	u32 cfg_scheme;
+
+	if (data_in_size == 0) {
+		pr_err("Got empty data_in\n");
+		op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+		return -EINVAL;
+	}
+
+	/* assume nothing to load */
+	memset(sep_cache_load_required, 0, sizeof(sep_cache_load_required));
+
+	/* set the number of active contexts */
+	op_ctx->ctx_info_num = get_num_of_ctx_info(config);
+
+	/* map each context in the configuration scheme */
+	for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+	     ctx_idx++, ctx_info_p++) {
+		/* context already initialized by the user */
+		rc = map_ctx_for_proc(op_ctx->client_ctx,
+				      ctx_info_p,
+				      config->node_props[ctx_idx].context,
+				      /*user ctx */ &ctx_state);
+		if (rc != 0) {
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			goto unmap_ctx_and_return;
+		}
+
+		ctx_mapped_n++;	/*ctx mapped successfully */
+
+		/* context must be initialzed */
+		if (ctx_state != CTX_STATE_INITIALIZED) {
+			pr_err(
+				    "Given context [%d] in invalid state for processing -%d\n",
+				    ctx_idx, ctx_state);
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			rc = -EINVAL;
+			goto unmap_ctx_and_return;
+		}
+	}
+
+	op_ctx->op_type = SEP_OP_CRYPTO_PROC;
+
+	/* Construct SeP combined scheme */
+	cfg_scheme = format_sep_combined_cfg_scheme(config, op_ctx);
+	pr_debug("SeP Config. Scheme: 0x%08X\n", cfg_scheme);
+
+	rc = prepare_combined_data_for_sep(op_ctx, data_in, data_out,
+					   data_in_size, CRYPTO_DATA_TEXT);
+	if (unlikely(rc != 0)) {
+		SEP_LOG_ERR(
+			    "Failed preparing DMA buffers (rc=%d, err_info=0x%08X\n)\n",
+			    rc, op_ctx->error_info);
+		return rc;
+	}
+
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* coupled sequence */
+		for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+		     ctx_idx++, ctx_info_p++) {
+			ctxmgr_set_sep_cache_idx(ctx_info_p,
+						 ctxmgr_sep_cache_alloc
+						 (drvdata->sep_cache,
+						  ctxmgr_get_ctx_id(ctx_info_p),
+						  &sep_cache_load_required
+						  [ctx_idx]));
+		}
+
+		rc = send_combined_op_desc(op_ctx,
+					   sep_cache_load_required, 0 /*INIT*/,
+					   SEP_PROC_MODE_PROC_T, cfg_scheme);
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ unmap_ctx_and_return:
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	for (ctx_idx = 0; ctx_idx < ctx_mapped_n; ctx_idx++, ctx_info_p++)
+		ctxmgr_unmap_user_ctx(ctx_info_p);
+
+	return rc;
+}
+
+/**
+ * sep_combined_fin_proc() - Finalize Combined processing
+ *			     with given (optional) data
+ * @op_ctx:
+ * @config:
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ * @auth_data_p:
+ * @auth_data_size_p:
+ *
+ * Returns int
+ */
+static int sep_combined_fin_proc(struct sep_op_ctx *op_ctx,
+				 struct dxdi_combined_props *config,
+				 u8 __user *data_in,
+				 u8 __user *data_out,
+				 u32 data_in_size,
+				 u8 *auth_data_p,
+				 u8 *auth_data_size_p)
+{
+	int rc;
+	int ctx_idx, ctx_mapped_n = 0;
+	int sep_cache_load_required[DXDI_COMBINED_NODES_MAX];
+	enum host_ctx_state ctx_state;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+	u32 cfg_scheme;
+
+	/* assume nothing to load */
+	memset(sep_cache_load_required, 0, sizeof(sep_cache_load_required));
+
+	/* set the number of active contexts */
+	op_ctx->ctx_info_num = get_num_of_ctx_info(config);
+
+	/* map each context in the configuration scheme */
+	for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+	     ctx_idx++, ctx_info_p++) {
+		/* context already initialized by the user */
+		rc = map_ctx_for_proc(op_ctx->client_ctx,
+				      ctx_info_p,
+				      config->node_props[ctx_idx].context,
+				      /*user ctx */ &ctx_state);
+		if (rc != 0) {
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			goto unmap_ctx_and_return;
+		}
+
+		ctx_mapped_n++;	/*ctx mapped successfully */
+
+		/* context must be initialzed */
+		if (ctx_state != CTX_STATE_INITIALIZED) {
+			pr_err(
+				    "Given context [%d] in invalid state for processing -%d\n",
+				    ctx_idx, ctx_state);
+			rc = -EINVAL;
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			goto unmap_ctx_and_return;
+		}
+	}
+
+	op_ctx->op_type = SEP_OP_CRYPTO_FINI;
+
+	/* Construct SeP combined scheme */
+	cfg_scheme = format_sep_combined_cfg_scheme(config, op_ctx);
+	pr_debug("SeP Config. Scheme: 0x%08X\n", cfg_scheme);
+
+	rc = prepare_combined_data_for_sep(op_ctx, data_in, data_out,
+					   data_in_size,
+					   CRYPTO_DATA_TEXT_FINALIZE);
+	if (rc != 0)
+		goto unmap_ctx_and_return;
+
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* Coupled sequence */
+		for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+		     ctx_idx++, ctx_info_p++) {
+			ctxmgr_set_sep_cache_idx(ctx_info_p,
+						 ctxmgr_sep_cache_alloc
+						 (drvdata->sep_cache,
+						  ctxmgr_get_ctx_id(ctx_info_p),
+						  &sep_cache_load_required
+						  [ctx_idx]));
+		}
+		rc = send_combined_op_desc(op_ctx,
+					   sep_cache_load_required, 0 /*INIT*/,
+					   SEP_PROC_MODE_FIN, cfg_scheme);
+
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	if (auth_data_size_p != NULL) {
+		if ((rc == 0) && (op_ctx->error_info == 0)) {
+			ctx_info_p = &(op_ctx->ctx_info);
+			ctx_info_p += op_ctx->ctx_info_num - 1;
+
+			/* Auth data embedded in the last SeP/FW context */
+			*auth_data_size_p =
+			    ctxmgr_get_digest_or_mac(ctx_info_p,
+						     auth_data_p);
+		} else {	/* Failure */
+			*auth_data_size_p = 0;	/* Nothing valid */
+		}
+	}
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ unmap_ctx_and_return:
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	for (ctx_idx = 0; ctx_idx < ctx_mapped_n; ctx_idx++, ctx_info_p++)
+		ctxmgr_unmap_user_ctx(ctx_info_p);
+
+	return rc;
+}
+
+/**
+ * process_combined_integrated() - Integrated processing of
+ *				   Combined data (init+proc+fin)
+ * @op_ctx:
+ * @props:	 Initialization properties (see init_context)
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ * @auth_data_p:
+ * @auth_data_size_p:
+ *
+ * Returns int
+ */
+static int process_combined_integrated(struct sep_op_ctx *op_ctx,
+				       struct dxdi_combined_props *config,
+				       u8 __user *data_in,
+				       u8 __user *data_out,
+				       u32 data_in_size,
+				       u8 *auth_data_p,
+				       u8 *auth_data_size_p)
+{
+	int rc;
+	int ctx_idx, ctx_mapped_n = 0;
+	int sep_cache_load_required[DXDI_COMBINED_NODES_MAX];
+	enum host_ctx_state ctx_state;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+	u32 cfg_scheme;
+
+	/* assume nothing to load */
+	memset(sep_cache_load_required, 0, sizeof(sep_cache_load_required));
+
+	/* set the number of active contexts */
+	op_ctx->ctx_info_num = get_num_of_ctx_info(config);
+
+	/* map each context in the configuration scheme */
+	for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+	     ctx_idx++, ctx_info_p++) {
+		/* context already initialized by the user */
+		rc = map_ctx_for_proc(op_ctx->client_ctx,
+				      ctx_info_p,
+				      config->node_props[ctx_idx].context,
+				      &ctx_state);
+		if (rc != 0) {
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			goto unmap_ctx_and_return;
+		}
+
+		ctx_mapped_n++;	/*ctx mapped successfully */
+
+		if (ctx_state != CTX_STATE_INITIALIZED) {
+			pr_err(
+				    "Given context [%d] in invalid state for processing 0x%08X\n",
+				    ctx_idx, ctx_state);
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			rc = -EINVAL;
+			goto unmap_ctx_and_return;
+		}
+	}
+
+	op_ctx->op_type = SEP_OP_CRYPTO_FINI;
+	/* reconstruct combined scheme */
+	cfg_scheme = format_sep_combined_cfg_scheme(config, op_ctx);
+	pr_debug("SeP Config. Scheme: 0x%08X\n", cfg_scheme);
+
+	rc = prepare_combined_data_for_sep(op_ctx, data_in, data_out,
+					   data_in_size,
+					   CRYPTO_DATA_TEXT_FINALIZE /*last */
+					   );
+	if (rc != 0)
+		goto unmap_ctx_and_return;
+
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* Coupled sequence */
+		for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+		     ctx_idx++, ctx_info_p++) {
+			ctxmgr_set_sep_cache_idx(ctx_info_p,
+						 ctxmgr_sep_cache_alloc
+						 (drvdata->sep_cache,
+						  ctxmgr_get_ctx_id(ctx_info_p),
+						  &sep_cache_load_required
+						  [ctx_idx]));
+		}
+		rc = send_combined_op_desc(op_ctx,
+					   sep_cache_load_required, 1 /*INIT*/,
+					   SEP_PROC_MODE_FIN, cfg_scheme);
+
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	if (auth_data_size_p != NULL) {
+		if ((rc == 0) && (op_ctx->error_info == 0)) {
+			ctx_info_p = &(op_ctx->ctx_info);
+			ctx_info_p += op_ctx->ctx_info_num - 1;
+
+			/* Auth data embedded in the last SeP/FW context */
+			*auth_data_size_p =
+			    ctxmgr_get_digest_or_mac(ctx_info_p,
+						     auth_data_p);
+		} else {	/* Failure */
+			*auth_data_size_p = 0;	/* Nothing valid */
+		}
+	}
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ unmap_ctx_and_return:
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	for (ctx_idx = 0; ctx_idx < ctx_mapped_n; ctx_idx++, ctx_info_p++)
+		ctxmgr_unmap_user_ctx(ctx_info_p);
+
+	return rc;
+}
+
+/**
+ * process_integrated() - Integrated processing of data (init+proc+fin)
+ * @op_ctx:
+ * @context_buf:
+ * @alg_class:
+ * @props:	 Initialization properties (see init_context)
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ * @digest_or_mac_p:
+ * @digest_or_mac_size_p:
+ *
+ * Returns int
+ */
+static int process_integrated(struct sep_op_ctx *op_ctx,
+			      u32 __user *context_buf,
+			      enum crypto_alg_class alg_class,
+			      void *props,
+			      u8 __user *data_in,
+			      u8 __user *data_out,
+			      u32 data_in_size,
+			      u8 *digest_or_mac_p,
+			      u8 *digest_or_mac_size_p)
+{
+	int rc;
+	int sep_cache_load_required;
+	bool postpone_init;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+
+	rc = ctxmgr_map_user_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev,
+				 alg_class, context_buf);
+	if (rc != 0) {
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		return rc;
+	}
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_UNINITIALIZED);
+	/* Allocate a new Crypto context ID */
+	ctxmgr_set_ctx_id(&op_ctx->ctx_info,
+			  alloc_crypto_ctx_id(op_ctx->client_ctx));
+
+	/* Algorithm class specific initialization */
+	switch (alg_class) {
+	case ALG_CLASS_SYM_CIPHER:
+		rc = ctxmgr_init_symcipher_ctx(&op_ctx->ctx_info,
+					       (struct dxdi_sym_cipher_props *)
+					       props, &postpone_init,
+					       &op_ctx->error_info);
+		/* postpone_init would be ignored because this is an integrated
+		 * operation - all required data would be updated in the
+		 * context before the descriptor is sent */
+		break;
+	case ALG_CLASS_AUTH_ENC:
+		rc = ctxmgr_init_auth_enc_ctx(&op_ctx->ctx_info,
+					      (struct dxdi_auth_enc_props *)
+					      props, &op_ctx->error_info);
+		break;
+	case ALG_CLASS_MAC:
+		rc = ctxmgr_init_mac_ctx(&op_ctx->ctx_info,
+					 (struct dxdi_mac_props *)props,
+					 &op_ctx->error_info);
+		break;
+	case ALG_CLASS_HASH:
+		rc = ctxmgr_init_hash_ctx(&op_ctx->ctx_info,
+					  *((enum dxdi_hash_type *)props),
+					  &op_ctx->error_info);
+		break;
+	default:
+		pr_err("Invalid algorithm class %d\n", alg_class);
+		op_ctx->error_info = DXDI_ERROR_UNSUP;
+		rc = -EINVAL;
+	}
+	if (rc != 0)
+		goto unmap_ctx_and_exit;
+
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_PARTIAL_INIT);
+	op_ctx->op_type = SEP_OP_CRYPTO_FINI;	/* Integrated is also fin. */
+	rc = prepare_data_for_sep(op_ctx, data_in, NULL, data_out, NULL,
+				  data_in_size,
+				  CRYPTO_DATA_TEXT_FINALIZE /*last */);
+	if (rc != 0)
+		goto unmap_ctx_and_exit;
+
+#ifdef DEBUG
+	ctxmgr_dump_sep_ctx(&op_ctx->ctx_info);
+#endif
+	/* Flush sep_ctx out of host cache */
+	ctxmgr_sync_sep_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev);
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {
+		/* Allocate SeP context cache entry */
+		ctxmgr_set_sep_cache_idx(&op_ctx->ctx_info,
+				 ctxmgr_sep_cache_alloc(drvdata->sep_cache,
+					ctxmgr_get_ctx_id(&op_ctx->ctx_info),
+					&sep_cache_load_required));
+		if (!sep_cache_load_required)
+			pr_err("New context already in SeP cache?!");
+		/* Send descriptor with combined load+init+fin */
+		rc = send_crypto_op_desc(op_ctx, 1 /*load */ , 1 /*INIT*/,
+					 SEP_PROC_MODE_FIN);
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+
+	} else {		/* failed acquiring mutex */
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	if (digest_or_mac_size_p != NULL) {
+		if ((rc == 0) && (op_ctx->error_info == 0)) {
+			/* Digest or MAC are embedded in the SeP/FW context */
+			*digest_or_mac_size_p =
+			    ctxmgr_get_digest_or_mac(&op_ctx->ctx_info,
+						     digest_or_mac_p);
+		} else {	/* Failure */
+			*digest_or_mac_size_p = 0;	/* Nothing valid */
+		}
+	}
+
+	/* Hash tail buffer is never used/mapped in integrated op -->
+	 * no need to unmap */
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ unmap_ctx_and_exit:
+	ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+
+	return rc;
+}
+
+/**
+ * process_integrated_auth_enc() - Integrated processing of aead
+ * @op_ctx:
+ * @context_buf:
+ * @alg_class:
+ * @props:
+ * @data_header:
+ * @data_in:
+ * @data_out:
+ * @data_header_size:
+ * @data_in_size:
+ * @mac_p:
+ * @mac_size_p:
+ *
+ * Integrated processing of authenticate & encryption of data
+ * (init+proc_a+proc_t+fin)
+ * Returns int
+ */
+static int process_integrated_auth_enc(struct sep_op_ctx *op_ctx,
+				       u32 __user *context_buf,
+				       enum crypto_alg_class alg_class,
+				       void *props,
+				       u8 __user *data_header,
+				       u8 __user *data_in,
+				       u8 __user *data_out,
+				       u32 data_header_size,
+				       u32 data_in_size,
+				       u8 *mac_p, u8 *mac_size_p)
+{
+	int rc;
+	int sep_cache_load_required;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+
+	rc = ctxmgr_map_user_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev,
+				 alg_class, context_buf);
+	if (rc != 0) {
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		goto integ_ae_exit;
+	}
+
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_UNINITIALIZED);
+	/* Allocate a new Crypto context ID */
+	ctxmgr_set_ctx_id(&op_ctx->ctx_info,
+			  alloc_crypto_ctx_id(op_ctx->client_ctx));
+
+	/* initialization */
+	rc = ctxmgr_init_auth_enc_ctx(&op_ctx->ctx_info,
+				      (struct dxdi_auth_enc_props *)props,
+				      &op_ctx->error_info);
+	if (rc != 0) {
+		ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		goto integ_ae_exit;
+	}
+
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_PARTIAL_INIT);
+	/* Op. type is to init. the context and process Adata */
+	op_ctx->op_type = SEP_OP_CRYPTO_INIT | SEP_OP_CRYPTO_PROC;
+	/* prepare additional/assoc data */
+	rc = prepare_data_for_sep(op_ctx, data_header, NULL, NULL, NULL,
+				  data_header_size, CRYPTO_DATA_ADATA);
+	if (rc != 0) {
+		ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+		goto integ_ae_exit;
+	}
+#ifdef DEBUG
+	ctxmgr_dump_sep_ctx(&op_ctx->ctx_info);
+#endif
+	/* Flush sep_ctx out of host cache */
+	ctxmgr_sync_sep_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev);
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {
+		/* Allocate SeP context cache entry */
+		ctxmgr_set_sep_cache_idx(&op_ctx->ctx_info,
+			 ctxmgr_sep_cache_alloc(drvdata->sep_cache,
+					ctxmgr_get_ctx_id(&op_ctx->ctx_info),
+					&sep_cache_load_required));
+		if (!sep_cache_load_required)
+			pr_err("New context already in SeP cache?!");
+		/* Send descriptor with combined load+init+fin */
+		rc = send_crypto_op_desc(op_ctx, 1 /*load */ , 1 /*INIT*/,
+					 SEP_PROC_MODE_PROC_A);
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+
+	} else {		/* failed acquiring mutex */
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+
+	/* set status and cleanup last descriptor */
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+	crypto_op_completion_cleanup(op_ctx);
+	ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+
+	/* process text data only on adata success */
+	if ((rc == 0) && (op_ctx->error_info == 0)) {/* Init+Adata succeeded */
+		/* reset pending descriptor and preserve operation
+		 * context for the finalize phase */
+		op_ctx->pending_descs_cntr = 1;
+		/* process & finalize operation with entire user data */
+		rc = sep_fin_proc(op_ctx, context_buf, data_in,
+				  data_out, data_in_size, mac_p, mac_size_p);
+	}
+
+ integ_ae_exit:
+	return rc;
+}
+
+/**
+ * dxdi_data_dir_to_dma_data_dir() - Convert from DxDI DMA direction type to
+ *					Linux kernel DMA direction type
+ * @dxdi_dir:	 DMA direction in DxDI encoding
+ *
+ * Returns enum dma_data_direction
+ */
+enum dma_data_direction dxdi_data_dir_to_dma_data_dir(enum dxdi_data_direction
+						      dxdi_dir)
+{
+	switch (dxdi_dir) {
+	case DXDI_DATA_BIDIR:
+		return DMA_BIDIRECTIONAL;
+	case DXDI_DATA_TO_DEVICE:
+		return DMA_TO_DEVICE;
+	case DXDI_DATA_FROM_DEVICE:
+		return DMA_FROM_DEVICE;
+	default:
+		return DMA_NONE;
+	}
+}
+
+/**
+ * dispatch_sep_rpc() - Dispatch a SeP RPC descriptor and process results
+ * @op_ctx:
+ * @agent_id:
+ * @func_id:
+ * @mem_refs:
+ * @rpc_params_size:
+ * @rpc_params:
+ *
+ * Returns int
+ */
+static int dispatch_sep_rpc(struct sep_op_ctx *op_ctx,
+			    u16 agent_id,
+			    u16 func_id,
+			    struct dxdi_memref mem_refs[],
+			    unsigned long rpc_params_size,
+			    struct seprpc_params __user *rpc_params)
+{
+	int i, rc = 0;
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+	enum dma_data_direction dma_dir;
+	unsigned int num_of_mem_refs;
+	int memref_idx;
+	struct client_dma_buffer *local_dma_objs[SEP_RPC_MAX_MEMREF_PER_FUNC];
+	struct mlli_tables_list mlli_tables[SEP_RPC_MAX_MEMREF_PER_FUNC];
+	struct sep_sw_desc desc;
+	struct seprpc_params *rpc_msg_p;
+
+	/* Verify RPC message size */
+	if (unlikely(SEP_RPC_MAX_MSG_SIZE < rpc_params_size)) {
+		pr_err("Given rpc_params is too big (%lu B)\n",
+		       rpc_params_size);
+		return -EINVAL;
+	}
+
+	op_ctx->spad_buf_p = dma_pool_alloc(drvdata->sep_data->spad_buf_pool,
+					    GFP_KERNEL,
+					    &op_ctx->spad_buf_dma_addr);
+	if (unlikely(op_ctx->spad_buf_p == NULL)) {
+		pr_err("Fail: alloc from spad_buf_pool for RPC message\n");
+		return -ENOMEM;
+	}
+	rpc_msg_p = (struct seprpc_params *)op_ctx->spad_buf_p;
+
+	/* Copy params to DMA buffer of message */
+	rc = copy_from_user(rpc_msg_p, rpc_params, rpc_params_size);
+	if (rc) {
+		pr_err("Fail: copy RPC message from user at 0x%p, rc=%d\n",
+			    rpc_params, rc);
+		return -EFAULT;
+	}
+	/* Get num. of memory references in host endianess */
+	num_of_mem_refs = le32_to_cpu(rpc_msg_p->num_of_memrefs);
+
+	/* Handle user memory references - prepare DMA buffers */
+	if (unlikely(num_of_mem_refs > SEP_RPC_MAX_MEMREF_PER_FUNC)) {
+		pr_err("agent_id=%d func_id=%d: Invalid # of memref %u\n",
+			    agent_id, func_id, num_of_mem_refs);
+		return -EINVAL;
+	}
+	for (i = 0; i < num_of_mem_refs; i++) {
+		/* Init. tables lists for proper cleanup */
+		MLLI_TABLES_LIST_INIT(mlli_tables + i);
+		local_dma_objs[i] = NULL;
+	}
+	for (i = 0; i < num_of_mem_refs; i++) {
+		pr_debug(
+			"memref[%d]: id=%d dma_dir=%d start/offset 0x%08x size %u\n",
+			i, mem_refs[i].ref_id, mem_refs[i].dma_direction,
+			mem_refs[i].start_or_offset, mem_refs[i].size);
+
+		/* convert DMA direction to enum dma_data_direction */
+		dma_dir =
+		    dxdi_data_dir_to_dma_data_dir(mem_refs[i].dma_direction);
+		if (unlikely(dma_dir == DMA_NONE)) {
+			pr_err(
+				    "agent_id=%d func_id=%d: Invalid DMA direction (%d) for memref %d\n",
+				    agent_id, func_id,
+				    mem_refs[i].dma_direction, i);
+			rc = -EINVAL;
+			break;
+		}
+		/* Temporary memory registration if needed */
+		if (IS_VALID_MEMREF_IDX(mem_refs[i].ref_id)) {
+			memref_idx = mem_refs[i].ref_id;
+			if (unlikely(mem_refs[i].start_or_offset != 0)) {
+				pr_err(
+					    "Offset in memref is not supported for RPC.\n");
+				rc = -EINVAL;
+				break;
+			}
+		} else {
+			memref_idx = register_client_memref(client_ctx,
+							    (u8 __user *)
+							    mem_refs[i].
+							    start_or_offset,
+							    NULL,
+							    mem_refs[i].size,
+							    dma_dir);
+			if (unlikely(!IS_VALID_MEMREF_IDX(memref_idx))) {
+				pr_err("Fail: temp memory registration\n");
+				rc = -ENOMEM;
+				break;
+			}
+		}
+		/* MLLI table creation */
+		local_dma_objs[i] = acquire_dma_obj(client_ctx, memref_idx);
+		if (unlikely(local_dma_objs[i] == NULL)) {
+			pr_err("Failed acquiring DMA objects.\n");
+			rc = -ENOMEM;
+			break;
+		}
+		if (unlikely(local_dma_objs[i]->buf_size != mem_refs[i].size)) {
+			pr_err("RPC: Partial memory ref not supported.\n");
+			rc = -EINVAL;
+			break;
+		}
+		rc = llimgr_create_mlli(drvdata->sep_data->llimgr,
+					mlli_tables + i, dma_dir,
+					local_dma_objs[i], 0, 0);
+		if (unlikely(rc != 0))
+			break;
+		llimgr_mlli_to_seprpc_memref(&(mlli_tables[i]),
+					     &(rpc_msg_p->memref[i]));
+	}
+
+	op_ctx->op_type = SEP_OP_RPC;
+	if (rc == 0) {
+		/* Pack SW descriptor */
+		desc_q_pack_rpc_desc(&desc, op_ctx, agent_id, func_id,
+				     rpc_params_size,
+				     op_ctx->spad_buf_dma_addr);
+		op_ctx->op_state = USER_OP_INPROC;
+		/* Enqueue descriptor */
+		rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+		if (likely(!IS_DESCQ_ENQUEUE_ERR(rc)))
+			rc = 0;
+	}
+
+	if (likely(rc == 0))
+		rc = wait_for_sep_op_result(op_ctx);
+	else
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+
+	/* Process descriptor completion */
+	if ((rc == 0) && (op_ctx->error_info == 0)) {
+		/* Copy back RPC message buffer */
+		rc = copy_to_user(rpc_params, rpc_msg_p, rpc_params_size);
+		if (rc) {
+			pr_err(
+				    "Failed copying back RPC parameters/message to user at 0x%p (rc=%d)\n",
+				    rpc_params, rc);
+			rc = -EFAULT;
+		}
+	}
+	op_ctx->op_state = USER_OP_NOP;
+	for (i = 0; i < num_of_mem_refs; i++) {
+		/* Can call for all - unitialized mlli tables would have
+		 * table_count == 0 */
+		llimgr_destroy_mlli(drvdata->sep_data->llimgr, mlli_tables + i);
+		if (local_dma_objs[i] != NULL) {
+			release_dma_obj(client_ctx, local_dma_objs[i]);
+			memref_idx =
+			    DMA_OBJ_TO_MEMREF_IDX(client_ctx,
+						  local_dma_objs[i]);
+			if (memref_idx != mem_refs[i].ref_id) {
+				/* Memory reference was temp. registered */
+				(void)free_client_memref(client_ctx,
+							 memref_idx);
+			}
+		}
+	}
+
+	return rc;
+}
+
+#if defined(SEP_PRINTF) && defined(DEBUG)
+/* Replace component mask */
+#undef SEP_LOG_CUR_COMPONENT
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SEP_PRINTF
+void sep_printf_handler(struct sep_drvdata *drvdata)
+{
+	int cur_ack_cntr;
+	u32 gpr_val;
+	int i;
+
+	/* Reduce interrupts by polling until no more characters */
+	/* Loop for at most a line - to avoid inifite looping in wq ctx */
+	for (i = 0; i < SEP_PRINTF_LINE_SIZE; i++) {
+
+		gpr_val = READ_REGISTER(drvdata->cc_base +
+					SEP_PRINTF_S2H_GPR_OFFSET);
+		cur_ack_cntr = gpr_val >> 8;
+		/*
+		 * ack as soon as possible (data is already in local variable)
+		 * let SeP push one more character until we finish processing
+		 */
+		WRITE_REGISTER(drvdata->cc_base + SEP_PRINTF_H2S_GPR_OFFSET,
+			       cur_ack_cntr);
+#if 0
+		pr_debug("%d. GPR=0x%08X (cur_ack=0x%08X , last=0x%08X)\n",
+			      i, gpr_val, cur_ack_cntr, drvdata->last_ack_cntr);
+#endif
+		if (cur_ack_cntr == drvdata->last_ack_cntr)
+			break;
+
+		/* Identify lost characters case */
+		if (cur_ack_cntr >
+		    ((drvdata->last_ack_cntr + 1) & SEP_PRINTF_ACK_MAX)) {
+			/* NULL terminate */
+			drvdata->line_buf[drvdata->cur_line_buf_offset] = 0;
+			if (sep_log_mask & SEP_LOG_CUR_COMPONENT)
+				pr_info("SeP(lost %d): %s",
+				       cur_ack_cntr - drvdata->last_ack_cntr
+				       - 1, drvdata->line_buf);
+			drvdata->cur_line_buf_offset = 0;
+		}
+		drvdata->last_ack_cntr = cur_ack_cntr;
+
+		drvdata->line_buf[drvdata->cur_line_buf_offset] =
+		    gpr_val & 0xFF;
+
+		/* Is end of line? */
+		if ((drvdata->line_buf[drvdata->cur_line_buf_offset] == '\n') ||
+		    (drvdata->line_buf[drvdata->cur_line_buf_offset] == 0) ||
+		    (drvdata->cur_line_buf_offset == (SEP_PRINTF_LINE_SIZE - 1))
+		    ) {
+			/* NULL terminate */
+			drvdata->line_buf[drvdata->cur_line_buf_offset + 1] = 0;
+			if (sep_log_mask & SEP_LOG_CUR_COMPONENT)
+				pr_info("SeP: %s", drvdata->line_buf);
+			drvdata->cur_line_buf_offset = 0;
+		} else {
+			drvdata->cur_line_buf_offset++;
+		}
+
+	}
+
+}
+
+/* Restore component mask */
+#undef SEP_LOG_CUR_COMPONENT
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_MAIN
+#endif				/*SEP_PRINTF */
+
+static int sep_interrupt_process(struct sep_drvdata *drvdata)
+{
+	u32 cause_reg = 0;
+	int i;
+
+	/* read the interrupt status */
+	cause_reg = READ_REGISTER(drvdata->cc_base +
+				  DX_CC_REG_OFFSET(HOST, IRR));
+
+	if (cause_reg == 0) {
+		/* pr_debug("Got interrupt with empty cause_reg\n"); */
+		return IRQ_NONE;
+	}
+#if 0
+	pr_debug("cause_reg=0x%08X gpr5=0x%08X\n", cause_reg,
+		      READ_REGISTER(drvdata->cc_base +
+				    SEP_PRINTF_S2H_GPR_OFFSET));
+#endif
+	/* clear interrupt */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, ICR),
+		       cause_reg);
+
+#ifdef SEP_PRINTF
+	if (cause_reg & SEP_HOST_GPR_IRQ_MASK(DX_SEP_HOST_PRINTF_GPR_IDX)) {
+#ifdef DEBUG
+		sep_printf_handler(drvdata);
+#else				/* Just ack to SeP so it does not stall */
+		WRITE_REGISTER(drvdata->cc_base + SEP_PRINTF_H2S_GPR_OFFSET,
+			       READ_REGISTER(drvdata->cc_base +
+					     SEP_PRINTF_S2H_GPR_OFFSET) >> 8);
+#endif
+		/* handled */
+		cause_reg &= ~SEP_HOST_GPR_IRQ_MASK(DX_SEP_HOST_PRINTF_GPR_IDX);
+	}
+#endif
+
+	if (cause_reg & SEP_HOST_GPR_IRQ_MASK(DX_SEP_STATE_GPR_IDX)) {
+		dx_sep_state_change_handler(drvdata);
+		cause_reg &= ~SEP_HOST_GPR_IRQ_MASK(DX_SEP_STATE_GPR_IDX);
+	}
+
+	if (cause_reg & SEP_HOST_GPR_IRQ_MASK(DX_SEP_REQUEST_GPR_IDX)) {
+		if (drvdata->
+		    irq_mask & SEP_HOST_GPR_IRQ_MASK(DX_SEP_REQUEST_GPR_IDX)) {
+			dx_sep_req_handler(drvdata);
+		}
+		cause_reg &= ~SEP_HOST_GPR_IRQ_MASK(DX_SEP_REQUEST_GPR_IDX);
+	}
+
+	/* Check interrupt flag for each queue */
+	for (i = 0; cause_reg && i < drvdata->num_of_desc_queues; i++) {
+		if (cause_reg & gpr_interrupt_mask[i]) {
+			desc_q_process_completed(drvdata->queue[i].desc_queue);
+			cause_reg &= ~gpr_interrupt_mask[i];
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+#ifdef SEP_INTERRUPT_BY_TIMER
+static void sep_timer(unsigned long arg)
+{
+	struct sep_drvdata *drvdata = (struct sep_drvdata *)arg;
+
+	(void)sep_interrupt_process(drvdata);
+
+	mod_timer(&drvdata->delegate, jiffies + msecs_to_jiffies(10));
+}
+#else
+irqreturn_t sep_interrupt(int irq, void *dev_id)
+{
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata((struct device *)dev_id);
+
+	if (drvdata->sep_suspended) {
+		WARN(1, "sep_interrupt rise in suspend!");
+		return IRQ_HANDLED;
+	}
+
+	return sep_interrupt_process(drvdata);
+}
+#endif
+
+/***** IOCTL commands handlers *****/
+
+static int sep_ioctl_get_ver_major(unsigned long arg)
+{
+	u32 __user *ver_p = (u32 __user *)arg;
+	const u32 ver_major = DXDI_VER_MAJOR;
+
+	return put_user(ver_major, ver_p);
+}
+
+static int sep_ioctl_get_ver_minor(unsigned long arg)
+{
+	u32 __user *ver_p = (u32 __user *)arg;
+	const u32 ver_minor = DXDI_VER_MINOR;
+
+	return put_user(ver_minor, ver_p);
+}
+
+static int sep_ioctl_get_sym_cipher_ctx_size(unsigned long arg)
+{
+	struct dxdi_get_sym_cipher_ctx_size_params __user *user_params =
+	    (struct dxdi_get_sym_cipher_ctx_size_params __user *)arg;
+	enum dxdi_sym_cipher_type sym_cipher_type;
+	const u32 ctx_size = ctxmgr_get_ctx_size(ALG_CLASS_SYM_CIPHER);
+	int err;
+
+	err = __get_user(sym_cipher_type, &(user_params->sym_cipher_type));
+	if (err)
+		return err;
+
+	if (((sym_cipher_type >= _DXDI_SYMCIPHER_AES_FIRST) &&
+	     (sym_cipher_type <= _DXDI_SYMCIPHER_AES_LAST)) ||
+	    ((sym_cipher_type >= _DXDI_SYMCIPHER_DES_FIRST) &&
+	     (sym_cipher_type <= _DXDI_SYMCIPHER_DES_LAST)) ||
+	    ((sym_cipher_type >= _DXDI_SYMCIPHER_C2_FIRST) &&
+	     (sym_cipher_type <= _DXDI_SYMCIPHER_C2_LAST))
+	    ) {
+		pr_debug("sym_cipher_type=%u\n", sym_cipher_type);
+		return put_user(ctx_size, &(user_params->ctx_size));
+	} else {
+		pr_err("Invalid cipher type=%u\n", sym_cipher_type);
+		return -EINVAL;
+	}
+}
+
+static int sep_ioctl_get_auth_enc_ctx_size(unsigned long arg)
+{
+	struct dxdi_get_auth_enc_ctx_size_params __user *user_params =
+	    (struct dxdi_get_auth_enc_ctx_size_params __user *)arg;
+	enum dxdi_auth_enc_type ae_type;
+	const u32 ctx_size = ctxmgr_get_ctx_size(ALG_CLASS_AUTH_ENC);
+	int err;
+
+	err = __get_user(ae_type, &(user_params->ae_type));
+	if (err)
+		return err;
+
+	if ((ae_type == DXDI_AUTHENC_NONE) || (ae_type > DXDI_AUTHENC_MAX)) {
+		pr_err("Invalid auth-enc. type=%u\n", ae_type);
+		return -EINVAL;
+	}
+
+	pr_debug("A.E. type=%u\n", ae_type);
+	return put_user(ctx_size, &(user_params->ctx_size));
+}
+
+static int sep_ioctl_get_mac_ctx_size(unsigned long arg)
+{
+	struct dxdi_get_mac_ctx_size_params __user *user_params =
+	    (struct dxdi_get_mac_ctx_size_params __user *)arg;
+	enum dxdi_mac_type mac_type;
+	const u32 ctx_size = ctxmgr_get_ctx_size(ALG_CLASS_MAC);
+	int err;
+
+	err = __get_user(mac_type, &(user_params->mac_type));
+	if (err)
+		return err;
+
+	if ((mac_type == DXDI_MAC_NONE) || (mac_type > DXDI_MAC_MAX)) {
+		pr_err("Invalid MAC type=%u\n", mac_type);
+		return -EINVAL;
+	}
+
+	pr_debug("MAC type=%u\n", mac_type);
+	return put_user(ctx_size, &(user_params->ctx_size));
+}
+
+static int sep_ioctl_get_hash_ctx_size(unsigned long arg)
+{
+	struct dxdi_get_hash_ctx_size_params __user *user_params =
+	    (struct dxdi_get_hash_ctx_size_params __user *)arg;
+	enum dxdi_hash_type hash_type;
+	const u32 ctx_size = ctxmgr_get_ctx_size(ALG_CLASS_HASH);
+	int err;
+
+	err = __get_user(hash_type, &(user_params->hash_type));
+	if (err)
+		return err;
+
+	if ((hash_type == DXDI_HASH_NONE) || (hash_type > DXDI_HASH_MAX)) {
+		pr_err("Invalid hash type=%u\n", hash_type);
+		return -EINVAL;
+	}
+
+	pr_debug("hash type=%u\n", hash_type);
+	return put_user(ctx_size, &(user_params->ctx_size));
+}
+
+static int sep_ioctl_sym_cipher_init(struct sep_client_ctx *client_ctx,
+				     unsigned long arg)
+{
+	struct dxdi_sym_cipher_init_params __user *user_init_params =
+			(struct dxdi_sym_cipher_init_params __user *)arg;
+	struct dxdi_sym_cipher_init_params init_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+		offsetof(struct dxdi_sym_cipher_init_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&init_params, user_init_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+
+	rc = init_crypto_context(&op_ctx, init_params.context_buf,
+				 ALG_CLASS_SYM_CIPHER, &(init_params.props));
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_init_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_auth_enc_init(struct sep_client_ctx *client_ctx,
+				   unsigned long arg)
+{
+	struct dxdi_auth_enc_init_params __user *user_init_params =
+	    (struct dxdi_auth_enc_init_params __user *)arg;
+	struct dxdi_auth_enc_init_params init_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_sym_cipher_init_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&init_params, user_init_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = init_crypto_context(&op_ctx, init_params.context_buf,
+				 ALG_CLASS_AUTH_ENC, &(init_params.props));
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_init_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_mac_init(struct sep_client_ctx *client_ctx,
+			      unsigned long arg)
+{
+	struct dxdi_mac_init_params __user *user_init_params =
+	    (struct dxdi_mac_init_params __user *)arg;
+	struct dxdi_mac_init_params init_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_mac_init_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&init_params, user_init_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = init_crypto_context(&op_ctx, init_params.context_buf,
+				 ALG_CLASS_MAC, &(init_params.props));
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_init_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_hash_init(struct sep_client_ctx *client_ctx,
+			       unsigned long arg)
+{
+	struct dxdi_hash_init_params __user *user_init_params =
+	    (struct dxdi_hash_init_params __user *)arg;
+	struct dxdi_hash_init_params init_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_hash_init_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&init_params, user_init_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = init_crypto_context(&op_ctx, init_params.context_buf,
+				 ALG_CLASS_HASH, &(init_params.hash_type));
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_init_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_proc_dblk(struct sep_client_ctx *client_ctx,
+			       unsigned long arg)
+{
+	struct dxdi_process_dblk_params __user *user_dblk_params =
+	    (struct dxdi_process_dblk_params __user *)arg;
+	struct dxdi_process_dblk_params dblk_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_process_dblk_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&dblk_params, user_dblk_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sep_proc_dblk(&op_ctx, dblk_params.context_buf,
+			   dblk_params.data_block_type,
+			   dblk_params.data_in, dblk_params.data_out,
+			   dblk_params.data_in_size);
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_dblk_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_fin_proc(struct sep_client_ctx *client_ctx,
+			      unsigned long arg)
+{
+	struct dxdi_fin_process_params __user *user_fin_params =
+	    (struct dxdi_fin_process_params __user *)arg;
+	struct dxdi_fin_process_params fin_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_fin_process_params, digest_or_mac);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&fin_params, user_fin_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sep_fin_proc(&op_ctx, fin_params.context_buf,
+			  fin_params.data_in, fin_params.data_out,
+			  fin_params.data_in_size,
+			  fin_params.digest_or_mac,
+			  &(fin_params.digest_or_mac_size));
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	if (rc == 0) {
+		/* Always copy back digest/mac size + error_info */
+		/* (that's the reason for keeping them together)  */
+		rc = put_user(fin_params.digest_or_mac_size,
+			      &user_fin_params->digest_or_mac_size);
+		rc += put_user(fin_params.error_info,
+				 &user_fin_params->error_info);
+
+		/* We always need to copy back the digest/mac size (even if 0)
+		 * in order to indicate validity of digest_or_mac buffer */
+	}
+	if ((rc == 0) && (op_ctx.error_info == 0) &&
+	    (fin_params.digest_or_mac_size > 0)) {
+		if (likely(fin_params.digest_or_mac_size <=
+			   DXDI_DIGEST_SIZE_MAX)) {
+			/* Copy back digest/mac if valid */
+			rc = copy_to_user(&(user_fin_params->digest_or_mac),
+					    fin_params.digest_or_mac,
+					    fin_params.digest_or_mac_size);
+		} else {	/* Invalid digest/mac size! */
+			pr_err("Got invalid digest/MAC size = %u",
+				    fin_params.digest_or_mac_size);
+			op_ctx.error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			rc = -EINVAL;
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_fin_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_combined_init(struct sep_client_ctx *client_ctx,
+				   unsigned long arg)
+{
+	struct dxdi_combined_init_params __user *user_init_params =
+	    (struct dxdi_combined_init_params __user *)arg;
+	struct dxdi_combined_init_params init_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_combined_init_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&init_params, user_init_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = init_combined_context(&op_ctx, &(init_params.props));
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_init_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_combined_proc_dblk(struct sep_client_ctx *client_ctx,
+					unsigned long arg)
+{
+	struct dxdi_combined_proc_dblk_params __user *user_dblk_params =
+	    (struct dxdi_combined_proc_dblk_params __user *)arg;
+	struct dxdi_combined_proc_dblk_params dblk_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_combined_proc_dblk_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&dblk_params, user_dblk_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sep_combined_proc_dblk(&op_ctx, &dblk_params.props,
+				    dblk_params.data_in, dblk_params.data_out,
+				    dblk_params.data_in_size);
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_dblk_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_combined_fin_proc(struct sep_client_ctx *client_ctx,
+				       unsigned long arg)
+{
+	struct dxdi_combined_proc_params __user *user_fin_params =
+	    (struct dxdi_combined_proc_params __user *)arg;
+	struct dxdi_combined_proc_params fin_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_combined_proc_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&fin_params, user_fin_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sep_combined_fin_proc(&op_ctx, &fin_params.props,
+				   fin_params.data_in, fin_params.data_out,
+				   fin_params.data_in_size,
+				   fin_params.auth_data,
+				   &(fin_params.auth_data_size));
+
+	if (rc == 0) {
+		/* Always copy back digest size + error_info */
+		/* (that's the reason for keeping them together)  */
+		rc = put_user(fin_params.auth_data_size,
+			      &user_fin_params->auth_data_size);
+		rc += put_user(fin_params.error_info,
+				 &user_fin_params->error_info);
+
+		/* We always need to copy back the digest size (even if 0)
+		 * in order to indicate validity of digest buffer */
+	}
+
+	if ((rc == 0) && (op_ctx.error_info == 0)) {
+		if (likely((fin_params.auth_data_size > 0) &&
+			   (fin_params.auth_data_size <=
+			    DXDI_DIGEST_SIZE_MAX))) {
+			/* Copy back auth if valid */
+			rc = copy_to_user(&(user_fin_params->auth_data),
+					    fin_params.auth_data,
+					    fin_params.auth_data_size);
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_fin_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_combined_proc(struct sep_client_ctx *client_ctx,
+				   unsigned long arg)
+{
+	struct dxdi_combined_proc_params __user *user_params =
+	    (struct dxdi_combined_proc_params __user *)arg;
+	struct dxdi_combined_proc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_combined_proc_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = process_combined_integrated(&op_ctx, &(params.props),
+					 params.data_in, params.data_out,
+					 params.data_in_size, params.auth_data,
+					 &(params.auth_data_size));
+
+	if (rc == 0) {
+		/* Always copy back digest size + error_info */
+		/* (that's the reason for keeping them together)  */
+		rc = put_user(params.auth_data_size,
+			      &user_params->auth_data_size);
+		rc += put_user(params.error_info, &user_params->error_info);
+
+		/* We always need to copy back the digest size (even if 0)
+		 * in order to indicate validity of digest buffer */
+	}
+
+	if ((rc == 0) && (op_ctx.error_info == 0)) {
+		if (likely((params.auth_data_size > 0) &&
+			   (params.auth_data_size <= DXDI_DIGEST_SIZE_MAX))) {
+			/* Copy back auth if valid */
+			rc = copy_to_user(&(user_params->auth_data),
+					  params.auth_data,
+					  params.auth_data_size);
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_sym_cipher_proc(struct sep_client_ctx *client_ctx,
+				     unsigned long arg)
+{
+	struct dxdi_sym_cipher_proc_params __user *user_params =
+	    (struct dxdi_sym_cipher_proc_params __user *)arg;
+	struct dxdi_sym_cipher_proc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_sym_cipher_proc_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = process_integrated(&op_ctx, params.context_buf,
+				ALG_CLASS_SYM_CIPHER, &(params.props),
+				params.data_in, params.data_out,
+				params.data_in_size, NULL, NULL);
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_auth_enc_proc(struct sep_client_ctx *client_ctx,
+				   unsigned long arg)
+{
+	struct dxdi_auth_enc_proc_params __user *user_params =
+	    (struct dxdi_auth_enc_proc_params __user *)arg;
+	struct dxdi_auth_enc_proc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_auth_enc_proc_params, tag);
+	int rc;
+	u8 tag_size;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+
+	if (params.props.adata_size == 0) {
+		/* without assoc data we can optimize for one descriptor
+		 * sequence */
+		rc = process_integrated(&op_ctx, params.context_buf,
+					ALG_CLASS_AUTH_ENC, &(params.props),
+					params.text_data, params.data_out,
+					params.props.text_size, params.tag,
+					&tag_size);
+	} else {
+		/* Integrated processing with auth. enc. algorithms with
+		 * Additional-Data requires special two-descriptors flow */
+		rc = process_integrated_auth_enc(&op_ctx, params.context_buf,
+						 ALG_CLASS_AUTH_ENC,
+						 &(params.props), params.adata,
+						 params.text_data,
+						 params.data_out,
+						 params.props.adata_size,
+						 params.props.text_size,
+						 params.tag, &tag_size);
+
+	}
+
+	if ((rc == 0) && (tag_size != params.props.tag_size)) {
+		pr_warn(
+			"Tag result size different than requested (%u != %u)\n",
+			tag_size, params.props.tag_size);
+	}
+
+	if ((rc == 0) && (op_ctx.error_info == 0) && (tag_size > 0)) {
+		if (likely(tag_size <= DXDI_DIGEST_SIZE_MAX)) {
+			/* Copy back digest/mac if valid */
+			rc = __copy_to_user(&(user_params->tag), params.tag,
+					    tag_size);
+		} else {	/* Invalid digest/mac size! */
+			pr_err("Got invalid tag size = %u", tag_size);
+			op_ctx.error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			rc = -EINVAL;
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_mac_proc(struct sep_client_ctx *client_ctx,
+			      unsigned long arg)
+{
+	struct dxdi_mac_proc_params __user *user_params =
+	    (struct dxdi_mac_proc_params __user *)arg;
+	struct dxdi_mac_proc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_mac_proc_params, mac);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = process_integrated(&op_ctx, params.context_buf,
+				ALG_CLASS_MAC, &(params.props),
+				params.data_in, NULL, params.data_in_size,
+				params.mac, &(params.mac_size));
+
+	if (rc == 0) {
+		/* Always copy back mac size + error_info */
+		/* (that's the reason for keeping them together)  */
+		rc = put_user(params.mac_size, &user_params->mac_size);
+		rc += put_user(params.error_info, &user_params->error_info);
+
+		/* We always need to copy back the mac size (even if 0)
+		 * in order to indicate validity of mac buffer */
+	}
+
+	if ((rc == 0) && (op_ctx.error_info == 0)) {
+		if (likely((params.mac_size > 0) &&
+			   (params.mac_size <= DXDI_DIGEST_SIZE_MAX))) {
+			/* Copy back mac if valid */
+			rc = copy_to_user(&(user_params->mac), params.mac,
+					  params.mac_size);
+		} else {	/* Invalid mac size! */
+			pr_err("Got invalid MAC size = %u",
+				    params.mac_size);
+			op_ctx.error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			rc = -EINVAL;
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_hash_proc(struct sep_client_ctx *client_ctx,
+			       unsigned long arg)
+{
+	struct dxdi_hash_proc_params __user *user_params =
+	    (struct dxdi_hash_proc_params __user *)arg;
+	struct dxdi_hash_proc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_hash_proc_params, digest);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = process_integrated(&op_ctx, params.context_buf,
+				ALG_CLASS_HASH, &(params.hash_type),
+				params.data_in, NULL, params.data_in_size,
+				params.digest, &(params.digest_size));
+
+	if (rc == 0) {
+		/* Always copy back digest size + error_info */
+		/* (that's the reason for keeping them together)  */
+		rc = put_user(params.digest_size, &user_params->digest_size);
+		rc += put_user(params.error_info, &user_params->error_info);
+
+		/* We always need to copy back the digest size (even if 0)
+		 * in order to indicate validity of digest buffer */
+	}
+
+	if ((rc == 0) && (op_ctx.error_info == 0)) {
+		if (likely((params.digest_size > 0) &&
+			   (params.digest_size <= DXDI_DIGEST_SIZE_MAX))) {
+			/* Copy back mac if valid */
+			rc = copy_to_user(&(user_params->digest),
+					  params.digest, params.digest_size);
+		} else {	/* Invalid digest size! */
+			pr_err("Got invalid digest size = %u",
+				    params.digest_size);
+			op_ctx.error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			rc = -EINVAL;
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_sep_rpc(struct sep_client_ctx *client_ctx,
+			     unsigned long arg)
+{
+
+	struct dxdi_sep_rpc_params __user *user_params =
+	    (struct dxdi_sep_rpc_params __user *)arg;
+	struct dxdi_sep_rpc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_sep_rpc_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = dispatch_sep_rpc(&op_ctx, params.agent_id, params.func_id,
+			      params.mem_refs, params.rpc_params_size,
+			      params.rpc_params);
+
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+static int sep_ioctl_register_mem4dma(struct sep_client_ctx *client_ctx,
+				      unsigned long arg)
+{
+
+	struct dxdi_register_mem4dma_params __user *user_params =
+	    (struct dxdi_register_mem4dma_params __user *)arg;
+	struct dxdi_register_mem4dma_params params;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_register_mem4dma_params, memref_id);
+	enum dma_data_direction dma_dir;
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	/* convert DMA direction to enum dma_data_direction */
+	dma_dir = dxdi_data_dir_to_dma_data_dir(params.memref.dma_direction);
+	if (unlikely(dma_dir == DMA_NONE)) {
+		pr_err("Invalid DMA direction (%d)\n",
+			    params.memref.dma_direction);
+		rc = -EINVAL;
+	} else {
+		params.memref_id = register_client_memref(client_ctx,
+							  (u8 __user *)
+							  params.memref.
+							  start_or_offset, NULL,
+							  params.memref.size,
+							  dma_dir);
+		if (unlikely(!IS_VALID_MEMREF_IDX(params.memref_id))) {
+			rc = -ENOMEM;
+		} else {
+			rc = put_user(params.memref_id,
+				      &(user_params->memref_id));
+			if (rc != 0)	/* revert if failed __put_user */
+				(void)free_client_memref(client_ctx,
+							 params.memref_id);
+		}
+	}
+
+	return rc;
+}
+
+static int sep_ioctl_free_mem4dma(struct sep_client_ctx *client_ctx,
+				  unsigned long arg)
+{
+	struct dxdi_free_mem4dma_params __user *user_params =
+	    (struct dxdi_free_mem4dma_params __user *)arg;
+	int memref_id;
+	int err;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	err = __get_user(memref_id, &user_params->memref_id);
+	if (err) {
+		pr_err("Failed reading input parameter\n");
+		return -EFAULT;
+	}
+
+	return free_client_memref(client_ctx, memref_id);
+}
+#endif
+
+static int sep_ioctl_set_iv(struct sep_client_ctx *client_ctx,
+			    unsigned long arg)
+{
+	struct dxdi_aes_iv_params *user_params =
+	    (struct dxdi_aes_iv_params __user *)arg;
+	struct dxdi_aes_iv_params params;
+	struct host_crypto_ctx_sym_cipher *host_context =
+	    (struct host_crypto_ctx_sym_cipher *)user_params->context_buf;
+	struct crypto_ctx_uid uid;
+	int err;
+
+	/* Copy ctx uid from user context */
+	if (copy_from_user(&uid, &host_context->uid,
+			   sizeof(struct crypto_ctx_uid))) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	/* Copy IV from user context */
+	if (__copy_from_user(&params, user_params,
+			     sizeof(struct dxdi_aes_iv_params))) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+	err =
+	    ctxmgr_set_symcipher_iv_user(user_params->context_buf,
+					 params.iv_ptr);
+	if (err != 0)
+		return err;
+
+	ctxmgr_sep_cache_invalidate(client_ctx->drv_data->sep_cache,
+				    uid, CRYPTO_CTX_ID_SINGLE_MASK);
+
+	return 0;
+}
+
+static int sep_ioctl_get_iv(struct sep_client_ctx *client_ctx,
+			    unsigned long arg)
+{
+	struct dxdi_aes_iv_params *user_params =
+	    (struct dxdi_aes_iv_params __user *)arg;
+	struct dxdi_aes_iv_params params;
+	int err;
+
+	/* copy context ptr from user */
+	if (__copy_from_user(&params, user_params, sizeof(u32))) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	err =
+	    ctxmgr_get_symcipher_iv_user(user_params->context_buf,
+					 params.iv_ptr);
+	if (err != 0)
+		return err;
+
+	if (copy_to_user(user_params, &params,
+	    sizeof(struct dxdi_aes_iv_params))) {
+		pr_err("Failed writing input parameters");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/****** Driver entry points (open, release, ioctl, etc.) ******/
+
+/**
+ * init_client_ctx() - Initialize a client context object given
+ * @drvdata:	Queue driver context
+ * @client_ctx:
+ *
+ * Returns void
+ */
+void init_client_ctx(struct queue_drvdata *drvdata,
+		     struct sep_client_ctx *client_ctx)
+{
+	int i;
+	const unsigned int qid = drvdata - (drvdata->sep_data->queue);
+
+	/* Initialize user data structure */
+	client_ctx->qid = qid;
+	client_ctx->drv_data = drvdata;
+	atomic_set(&client_ctx->uid_cntr, 0);
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+	/* Initialize sessions */
+	for (i = 0; i < MAX_SEPAPP_SESSION_PER_CLIENT_CTX; i++) {
+		mutex_init(&client_ctx->sepapp_sessions[i].session_lock);
+		client_ctx->sepapp_sessions[i].sep_session_id =
+		    SEP_SESSION_ID_INVALID;
+		/* The rest of the fields are 0/NULL from kzalloc */
+	}
+#endif
+	/* Initialize memrefs */
+	for (i = 0; i < MAX_REG_MEMREF_PER_CLIENT_CTX; i++) {
+		mutex_init(&client_ctx->reg_memrefs[i].buf_lock);
+		/* The rest of the fields are 0/NULL from kzalloc */
+	}
+
+	init_waitqueue_head(&client_ctx->memref_wq);
+	client_ctx->memref_cnt = 0;
+}
+
+/**
+ * sep_open() - "open" device file entry point.
+ * @inode:
+ * @file:
+ *
+ * Returns int
+ */
+static int sep_open(struct inode *inode, struct file *file)
+{
+	struct queue_drvdata *drvdata;
+	struct sep_client_ctx *client_ctx;
+	unsigned int qid;
+
+	drvdata = container_of(inode->i_cdev, struct queue_drvdata, cdev);
+
+	if (imajor(inode) != MAJOR(drvdata->sep_data->devt_base)) {
+		pr_err("Invalid major device num=%d\n", imajor(inode));
+		return -ENOENT;
+	}
+	qid = iminor(inode) - MINOR(drvdata->sep_data->devt_base);
+	if (qid >= drvdata->sep_data->num_of_desc_queues) {
+		pr_err("Invalid minor device num=%d\n", iminor(inode));
+		return -ENOENT;
+	}
+#ifdef DEBUG
+	/* The qid based on the minor device number must match the offset
+	 * of given drvdata in the queues array of the sep_data context */
+	if (qid != (drvdata - (drvdata->sep_data->queue))) {
+		pr_err("qid=%d but drvdata index is %d\n",
+			    qid, (drvdata - (drvdata->sep_data->queue)));
+		return -EINVAL;
+	}
+#endif
+	pr_debug("qid=%d\n", qid);
+	pr_debug("calling kzalloc\n");
+	client_ctx = kzalloc(sizeof(*client_ctx), GFP_KERNEL);
+	if (client_ctx == NULL)
+		return -ENOMEM;
+	pr_debug("calling init_client_ctx\n");
+	init_client_ctx(drvdata, client_ctx);
+	pr_debug("after init_client_ctx\n");
+	file->private_data = client_ctx;
+	pr_debug("after private_data\n");
+	return 0;
+}
+
+void cleanup_client_ctx(struct queue_drvdata *drvdata,
+			struct sep_client_ctx *client_ctx)
+{
+	int memref_id;
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+	struct sep_op_ctx op_ctx;
+	int session_id;
+	struct crypto_ctx_uid uid;
+
+	/* Free any Applet session left open */
+	for (session_id = 0; session_id < MAX_SEPAPP_SESSION_PER_CLIENT_CTX;
+	     session_id++) {
+		if (IS_VALID_SESSION_CTX
+		    (&client_ctx->sepapp_sessions[session_id])) {
+			pr_debug("Closing session ID=%d\n", session_id);
+			op_ctx_init(&op_ctx, client_ctx);
+			sepapp_session_close(&op_ctx, session_id);
+			/* Note: There is never a problem with the session's
+			 * ref_cnt because when "release" is invoked there
+			 * are no pending IOCTLs, so ref_cnt is at most 1 */
+			op_ctx_fini(&op_ctx);
+		}
+		mutex_destroy(&client_ctx->sepapp_sessions[session_id].
+			      session_lock);
+	}
+#endif				/*MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0 */
+
+	/* Free registered user memory references */
+	for (memref_id = 0; memref_id < MAX_REG_MEMREF_PER_CLIENT_CTX;
+	     memref_id++) {
+		if (client_ctx->reg_memrefs[memref_id].ref_cnt > 0) {
+			pr_debug("Freeing user memref ID=%d\n", memref_id);
+			(void)free_client_memref(client_ctx, memref_id);
+		}
+		/* There is no problem with memref ref_cnt because when
+		 * "release" is invoked there are no pending IOCTLs,
+		 * so ref_cnt is at most 1                             */
+		mutex_destroy(&client_ctx->reg_memrefs[memref_id].buf_lock);
+	}
+
+	/* Invalidate any outstanding descriptors associated with this
+	 * client_ctx */
+	desc_q_mark_invalid_cookie(drvdata->desc_queue, (void *)client_ctx);
+
+	uid.addr = ((u64) (unsigned long)client_ctx);
+	uid.cntr = 0;
+
+	/* Invalidate any crypto context cache entry associated with this client
+	 * context before freeing context data object that may be reused.
+	 * This assures retaining of UIDs uniqueness (and makes sense since all
+	 * associated contexts does not exist anymore) */
+	ctxmgr_sep_cache_invalidate(drvdata->sep_cache, uid,
+				    CRYPTO_CTX_ID_CLIENT_MASK);
+}
+
+static int sep_release(struct inode *inode, struct file *file)
+{
+	struct sep_client_ctx *client_ctx = file->private_data;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+
+	cleanup_client_ctx(drvdata, client_ctx);
+
+	kfree(client_ctx);
+
+	return 0;
+}
+
+static ssize_t
+sep_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
+{
+	pr_debug("Invoked for %zu bytes", count);
+	return -ENOSYS;		/* nothing to read... IOCTL only */
+}
+
+/*!
+ * The SeP device does not support read/write
+ * We use it for debug purposes. Currently loopback descriptor is sent given
+ * number of times. Usage example: echo 10 > /dev/dx_sep_q0
+ * TODO: Move this functionality to sysfs?
+ *
+ * \param filp
+ * \param buf
+ * \param count
+ * \param ppos
+ *
+ * \return ssize_t
+ */
+static ssize_t
+sep_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos)
+{
+#ifdef DEBUG
+	struct sep_sw_desc desc;
+	struct sep_client_ctx *client_ctx = filp->private_data;
+	struct sep_op_ctx op_ctx;
+	unsigned int loop_times = 1;
+	unsigned int i;
+	int rc = 0;
+	char tmp_buf[80];
+
+	if (count > 79)
+		return -ENOMEM;	/* Avoid buffer overflow */
+	memcpy(tmp_buf, buf, count);	/* Copy to NULL terminate */
+	tmp_buf[count] = 0;	/* NULL terminate */
+
+	/*pr_debug("Invoked for %u bytes", count); */
+	sscanf(buf, "%u", &loop_times);
+	pr_debug("Loopback X %u...\n", loop_times);
+
+	op_ctx_init(&op_ctx, client_ctx);
+	/* prepare loopback descriptor */
+	desq_q_pack_debug_desc(&desc, &op_ctx);
+
+	/* Perform loopback for given times */
+	for (i = 0; i < loop_times; i++) {
+		op_ctx.op_state = USER_OP_INPROC;
+		rc = desc_q_enqueue(client_ctx->drv_data->desc_queue, &desc,
+				    true);
+		if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) {
+			pr_err("Failed sending desc. %u\n", i);
+			break;
+		}
+		rc = wait_for_sep_op_result(&op_ctx);
+		if (rc != 0) {
+			pr_err("Failed completion of desc. %u\n", i);
+			break;
+		}
+		op_ctx.op_state = USER_OP_NOP;
+	}
+
+	op_ctx_fini(&op_ctx);
+
+	pr_debug("Completed loopback of %u desc.\n", i);
+
+	return count;		/* Noting to write for this device... */
+#else /* DEBUG */
+	pr_debug("Invoked for %zu bytes", count);
+	return -ENOSYS;		/* nothing to write... IOCTL only */
+#endif /* DEBUG */
+}
+
+/*!
+ * IOCTL entry point
+ *
+ * \param filp
+ * \param cmd
+ * \param arg
+ *
+ * \return int
+ * \retval 0 Operation succeeded (but SeP return code may indicate an error)
+ * \retval -ENOTTY  : Unknown IOCTL command
+ * \retval -ENOSYS  : Unsupported/not-implemented (known) operation
+ * \retval -EINVAL  : Invalid parameters
+ * \retval -EFAULT  : Bad pointers for given user memory space
+ * \retval -EPERM   : Not enough permissions for given command
+ * \retval -ENOMEM,-EAGAIN: when not enough resources available for given op.
+ * \retval -EIO     : SeP HW error or another internal error
+ *                    (probably operation timed out or unexpected behavior)
+ */
+long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	struct sep_client_ctx *client_ctx = filp->private_data;
+	unsigned long long ioctl_start, ioctl_end;
+	int err = 0;
+
+	preempt_disable_notrace();
+	ioctl_start = sched_clock();
+	preempt_enable_notrace();
+
+	/* Verify IOCTL command: magic + number */
+	if (_IOC_TYPE(cmd) != DXDI_IOC_MAGIC) {
+		pr_err("Invalid IOCTL type=%u", _IOC_TYPE(cmd));
+		return -ENOTTY;
+	}
+	if (_IOC_NR(cmd) > DXDI_IOC_NR_MAX) {
+		pr_err("IOCTL NR=%u out of range for ABI ver.=%u.%u",
+			    _IOC_NR(cmd), DXDI_VER_MAJOR, DXDI_VER_MINOR);
+		return -ENOTTY;
+	}
+
+	/* Verify permissions on parameters pointer (arg) */
+	if (_IOC_DIR(cmd) & _IOC_READ)
+		err = !access_ok(ACCESS_WRITE,
+				 (void __user *)arg, _IOC_SIZE(cmd));
+	else if (_IOC_DIR(cmd) & _IOC_WRITE)
+		err = !access_ok(ACCESS_READ,
+				 (void __user *)arg, _IOC_SIZE(cmd));
+	if (err)
+		return -EFAULT;
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+
+	switch (_IOC_NR(cmd)) {
+		/* Version info. commands */
+	case DXDI_IOC_NR_GET_VER_MAJOR:
+		pr_debug("DXDI_IOC_NR_GET_VER_MAJOR\n");
+		err = sep_ioctl_get_ver_major(arg);
+		break;
+	case DXDI_IOC_NR_GET_VER_MINOR:
+		pr_debug("DXDI_IOC_NR_GET_VER_MINOR\n");
+		err = sep_ioctl_get_ver_minor(arg);
+		break;
+		/* Context size queries */
+	case DXDI_IOC_NR_GET_SYMCIPHER_CTX_SIZE:
+		pr_debug("DXDI_IOC_NR_GET_SYMCIPHER_CTX_SIZE\n");
+		err = sep_ioctl_get_sym_cipher_ctx_size(arg);
+		break;
+	case DXDI_IOC_NR_GET_AUTH_ENC_CTX_SIZE:
+		pr_debug("DXDI_IOC_NR_GET_AUTH_ENC_CTX_SIZE\n");
+		err = sep_ioctl_get_auth_enc_ctx_size(arg);
+		break;
+	case DXDI_IOC_NR_GET_MAC_CTX_SIZE:
+		pr_debug("DXDI_IOC_NR_GET_MAC_CTX_SIZE\n");
+		err = sep_ioctl_get_mac_ctx_size(arg);
+		break;
+	case DXDI_IOC_NR_GET_HASH_CTX_SIZE:
+		pr_debug("DXDI_IOC_NR_GET_HASH_CTX_SIZE\n");
+		err = sep_ioctl_get_hash_ctx_size(arg);
+		break;
+		/* Init context commands */
+	case DXDI_IOC_NR_SYMCIPHER_INIT:
+		pr_debug("DXDI_IOC_NR_SYMCIPHER_INIT\n");
+		err = sep_ioctl_sym_cipher_init(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_AUTH_ENC_INIT:
+		pr_debug("DXDI_IOC_NR_AUTH_ENC_INIT\n");
+		err = sep_ioctl_auth_enc_init(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_MAC_INIT:
+		pr_debug("DXDI_IOC_NR_MAC_INIT\n");
+		err = sep_ioctl_mac_init(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_HASH_INIT:
+		pr_debug("DXDI_IOC_NR_HASH_INIT\n");
+		err = sep_ioctl_hash_init(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_COMBINED_INIT:
+		pr_debug("DXDI_IOC_NR_COMBINED_INIT\n");
+		err = sep_ioctl_combined_init(client_ctx, arg);
+		break;
+		/* Processing commands */
+	case DXDI_IOC_NR_PROC_DBLK:
+		pr_debug("DXDI_IOC_NR_PROC_DBLK\n");
+		err = sep_ioctl_proc_dblk(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_COMBINED_PROC_DBLK:
+		pr_debug("DXDI_IOC_NR_COMBINED_PROC_DBLK\n");
+		err = sep_ioctl_combined_proc_dblk(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_FIN_PROC:
+		pr_debug("DXDI_IOC_NR_FIN_PROC\n");
+		err = sep_ioctl_fin_proc(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_COMBINED_PROC_FIN:
+		pr_debug("DXDI_IOC_NR_COMBINED_PROC_FIN\n");
+		err = sep_ioctl_combined_fin_proc(client_ctx, arg);
+		break;
+		/* "Integrated" processing operations */
+	case DXDI_IOC_NR_SYMCIPHER_PROC:
+		pr_debug("DXDI_IOC_NR_SYMCIPHER_PROC\n");
+		err = sep_ioctl_sym_cipher_proc(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_AUTH_ENC_PROC:
+		pr_debug("DXDI_IOC_NR_AUTH_ENC_PROC\n");
+		err = sep_ioctl_auth_enc_proc(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_MAC_PROC:
+		pr_debug("DXDI_IOC_NR_MAC_PROC\n");
+		err = sep_ioctl_mac_proc(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_HASH_PROC:
+		pr_debug("DXDI_IOC_NR_HASH_PROC\n");
+		err = sep_ioctl_hash_proc(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_COMBINED_PROC:
+		pr_debug("DXDI_IOC_NR_COMBINED_PROC\n");
+		err = sep_ioctl_combined_proc(client_ctx, arg);
+		break;
+		/* SeP RPC */
+	case DXDI_IOC_NR_SEP_RPC:
+		err = sep_ioctl_sep_rpc(client_ctx, arg);
+		break;
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+		/* Memory registation */
+	case DXDI_IOC_NR_REGISTER_MEM4DMA:
+		pr_debug("DXDI_IOC_NR_REGISTER_MEM4DMA\n");
+		err = sep_ioctl_register_mem4dma(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_ALLOC_MEM4DMA:
+		pr_err("DXDI_IOC_NR_ALLOC_MEM4DMA: Not supported, yet");
+		err = -ENOTTY;
+		break;
+	case DXDI_IOC_NR_FREE_MEM4DMA:
+		pr_debug("DXDI_IOC_NR_FREE_MEM4DMA\n");
+		err = sep_ioctl_free_mem4dma(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_SEPAPP_SESSION_OPEN:
+		pr_debug("DXDI_IOC_NR_SEPAPP_SESSION_OPEN\n");
+		err = sep_ioctl_sepapp_session_open(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_SEPAPP_SESSION_CLOSE:
+		pr_debug("DXDI_IOC_NR_SEPAPP_SESSION_CLOSE\n");
+		err = sep_ioctl_sepapp_session_close(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_SEPAPP_COMMAND_INVOKE:
+		pr_debug("DXDI_IOC_NR_SEPAPP_COMMAND_INVOKE\n");
+		err = sep_ioctl_sepapp_command_invoke(client_ctx, arg);
+		break;
+#endif
+	case DXDI_IOC_NR_SET_IV:
+		pr_debug("DXDI_IOC_NR_SET_IV\n");
+		err = sep_ioctl_set_iv(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_GET_IV:
+		pr_debug("DXDI_IOC_NR_GET_IV\n");
+		err = sep_ioctl_get_iv(client_ctx, arg);
+		break;
+	default:/* Not supposed to happen - we already tested for NR range */
+		pr_err("bad IOCTL cmd 0x%08X\n", cmd);
+		err = -ENOTTY;
+	}
+	pr_debug("sep_ioctl error: %d\n", err); 
+	/* Update stats per IOCTL command */
+	if (err == 0) {
+		preempt_disable_notrace();
+		ioctl_end = sched_clock();
+		preempt_enable_notrace();
+		sysfs_update_drv_stats(client_ctx->qid, _IOC_NR(cmd),
+				       ioctl_start, ioctl_end);
+	}
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+
+	return err;
+}
+
+static const struct file_operations sep_fops = {
+	.owner = THIS_MODULE,
+	.open = sep_open,
+	.release = sep_release,
+	.read = sep_read,
+	.write = sep_write,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = sep_compat_ioctl,
+#else
+	.unlocked_ioctl = sep_ioctl,
+#endif
+};
+
+/**
+ * get_q_cache_size() - Get the number of entries to allocate for the SeP/FW
+ *			cache of given queue
+ * @drvdata:	 Driver context
+ * @qid:	 The queue to allocate for
+ *
+ * Get the number of entries to allocate for the SeP/FW cache of given queue
+ * The function assumes that num_of_desc_queues and num_of_sep_cache_entries
+ * are already initialized in drvdata.
+ * Returns Number of cache entries to allocate
+ */
+static int get_q_cache_size(struct sep_drvdata *drvdata, int qid)
+{
+	/* Simple allocation - divide evenly among queues */
+	/* consider prefering higher priority queues...   */
+	return drvdata->num_of_sep_cache_entries / drvdata->num_of_desc_queues;
+}
+
+/**
+ * enable_descq_interrupt() - Enable interrupt for given queue (GPR)
+ * @drvdata:
+ * @qid:
+ *
+ * Returns int
+ */
+static void enable_descq_interrupt(struct sep_drvdata *drvdata, int qid)
+{
+
+	/* clear pending interrupts in GPRs of SW-queues
+	 * (leftovers from init writes to GPRs..) */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, ICR),
+		       gpr_interrupt_mask[qid]);
+
+	drvdata->irq_mask |= gpr_interrupt_mask[qid];
+	/* set IMR register */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, IMR),
+		       ~drvdata->irq_mask);
+}
+
+/**
+ * alloc_host_mem_for_sep() - Allocate memory pages for sep icache/dcache
+ *	or for SEP backup memory in case there is not SEP cache memory.
+ *
+ * @drvdata:
+ *
+ * Currently using alloc_pages to allocate the pages.
+ * Consider using CMA feature for the memory allocation
+ */
+static int alloc_host_mem_for_sep(struct sep_drvdata *drvdata)
+{
+#ifdef CACHE_IMAGE_NAME
+	int i;
+	const int icache_sizes_enum2log[] = DX_CC_ICACHE_SIZE_ENUM2LOG;
+
+	pr_debug("icache_size=%uKB dcache_size=%uKB\n",
+		 1 << (icache_size_log2 - 10),
+		 1 << (dcache_size_log2 - 10));
+
+	/* Verify validity of chosen cache memory sizes */
+	if ((dcache_size_log2 > DX_CC_INIT_D_CACHE_MAX_SIZE_LOG2) ||
+	    (dcache_size_log2 < DX_CC_INIT_D_CACHE_MIN_SIZE_LOG2)) {
+		pr_err("Requested Dcache size (%uKB) is invalid\n",
+		       1 << (dcache_size_log2 - 10));
+		return -EINVAL;
+	}
+	/* Icache size must be one of values defined for this device */
+	for (i = 0; i < sizeof(icache_sizes_enum2log) / sizeof(int); i++)
+		if ((icache_size_log2 == icache_sizes_enum2log[i]) &&
+		    (icache_sizes_enum2log[i] >= 0))
+			/* Found valid value */
+			break;
+	if (unlikely(i == sizeof(icache_sizes_enum2log))) {
+		pr_err("Requested Icache size (%uKB) is invalid\n",
+		       1 << (icache_size_log2 - 10));
+	}
+	drvdata->icache_size_log2 = icache_size_log2;
+	/* Allocate pages suitable for 32bit DMA and out of cache (cold) */
+	drvdata->icache_pages = alloc_pages(GFP_KERNEL | GFP_DMA32 | __GFP_COLD,
+					    icache_size_log2 - PAGE_SHIFT);
+	if (drvdata->icache_pages == NULL) {
+		pr_err("Failed allocating %uKB for Icache\n",
+			    1 << (icache_size_log2 - 10));
+		return -ENOMEM;
+	}
+	drvdata->dcache_size_log2 = dcache_size_log2;
+	/* same as for icache */
+	drvdata->dcache_pages = alloc_pages(GFP_KERNEL | GFP_DMA32 | __GFP_COLD,
+					    dcache_size_log2 - PAGE_SHIFT);
+	if (drvdata->dcache_pages == NULL) {
+		pr_err("Failed allocating %uKB for Dcache\n",
+		       1 << (dcache_size_log2 - 10));
+		__free_pages(drvdata->icache_pages,
+			     drvdata->icache_size_log2 - PAGE_SHIFT);
+		return -ENOMEM;
+	}
+#elif defined(SEP_BACKUP_BUF_SIZE)
+	/* This size is not enforced by power of two, so we use
+	 * alloc_pages_exact() */
+	drvdata->sep_backup_buf = alloc_pages_exact(SEP_BACKUP_BUF_SIZE,
+						    GFP_KERNEL | GFP_DMA32 |
+						    __GFP_COLD);
+	if (unlikely(drvdata->sep_backup_buf == NULL)) {
+		pr_err("Failed allocating %d B for SEP backup buffer\n",
+		       SEP_BACKUP_BUF_SIZE);
+		return -ENOMEM;
+	}
+	drvdata->sep_backup_buf_size = SEP_BACKUP_BUF_SIZE;
+#endif
+	return 0;
+}
+
+/**
+ * free_host_mem_for_sep() - Free the memory resources allocated by
+ *	alloc_host_mem_for_sep()
+ *
+ * @drvdata:
+ */
+static void free_host_mem_for_sep(struct sep_drvdata *drvdata)
+{
+#ifdef CACHE_IMAGE_NAME
+	if (drvdata->dcache_pages != NULL) {
+		__free_pages(drvdata->dcache_pages,
+			     drvdata->dcache_size_log2 - PAGE_SHIFT);
+		drvdata->dcache_pages = NULL;
+	}
+	if (drvdata->icache_pages != NULL) {
+		__free_pages(drvdata->icache_pages,
+			     drvdata->icache_size_log2 - PAGE_SHIFT);
+		drvdata->icache_pages = NULL;
+	}
+#elif defined(SEP_BACKUP_BUF_SIZE)
+	if (drvdata->sep_backup_buf != NULL) {
+		free_pages_exact(drvdata->sep_backup_buf,
+				 drvdata->sep_backup_buf_size);
+		drvdata->sep_backup_buf_size = 0;
+		drvdata->sep_backup_buf = NULL;
+	}
+#endif
+}
+
+static int emmc_match(struct device *dev, const void *data)
+{
+	if (strcmp(dev_name(dev), data) == 0)
+		return 1;
+	return 0;
+}
+
+static int mmc_blk_rpmb_req_handle(struct mmc_ioc_rpmb_req *req)
+{
+#define EMMC_BLK_NAME   "mmcblk0rpmb"
+
+	struct device *emmc = NULL;
+
+	if (!req)
+		return -EINVAL;
+
+	emmc = class_find_device(&block_class, NULL, EMMC_BLK_NAME, emmc_match);
+	if (!emmc) {
+		pr_err("eMMC reg failed\n");
+		return -ENODEV;
+	}
+
+	return mmc_rpmb_req_handle(emmc, req);
+}
+
+static int rpmb_agent(void *unused)
+{
+#define AGENT_TIMEOUT_MS (1000 * 60 * 5) /* 5 minutes */
+
+#define AUTH_DAT_WR_REQ 0x0003
+#define AUTH_DAT_RD_REQ 0x0004
+
+#define RPMB_FRAME_LENGTH      512
+#define RPMB_MAC_KEY_LENGTH     32
+#define RPMB_NONCE_LENGTH       16
+#define RPMB_DATA_LENGTH       256
+#define RPMB_STUFFBYTES_LENGTH 196
+#define RPMB_COUNTER_LENGTH      4
+#define RPMB_ADDR_LENGTH         2
+#define RPMB_BLKCNT_LENGTH       2
+#define RPMB_RESULT_LENGTH       2
+#define RPMB_RSPREQ_LENGTH       2
+
+#define RPMB_STUFFBYTES_OFFSET 0
+#define RPMB_MAC_KEY_OFFSET   (RPMB_STUFFBYTES_OFFSET + RPMB_STUFFBYTES_LENGTH)
+#define RPMB_DATA_OFFSET      (RPMB_MAC_KEY_OFFSET + RPMB_MAC_KEY_LENGTH)
+#define RPMB_NONCE_OFFSET     (RPMB_DATA_OFFSET + RPMB_DATA_LENGTH)
+#define RPMB_COUNTER_OFFSET   (RPMB_NONCE_OFFSET + RPMB_NONCE_LENGTH)
+#define RPMB_ADDR_OFFSET      (RPMB_COUNTER_OFFSET + RPMB_COUNTER_LENGTH)
+#define RPMB_BLKCNT_OFFSET    (RPMB_ADDR_OFFSET + RPMB_ADDR_LENGTH)
+#define RPMB_RESULT_OFFSET    (RPMB_BLKCNT_OFFSET + RPMB_BLKCNT_LENGTH)
+#define RPMB_RSPREQ_OFFSET    (RPMB_RESULT_OFFSET + RPMB_RESULT_LENGTH)
+
+	int ret = 0;
+	u32 tmp = 0;
+	u32 max_buf_size = 0;
+	u8 in_buf[RPMB_FRAME_LENGTH];
+	u8 *out_buf = NULL;
+	u32 in_buf_size = RPMB_FRAME_LENGTH;
+	u32 timeout = INT_MAX;
+	/* structure to pass to the eMMC driver's RPMB API */
+	struct mmc_ioc_rpmb_req req2emmc;
+
+	ret = dx_sep_req_register_agent(RPMB_AGENT_ID, &max_buf_size);
+	if (ret) {
+		pr_err("REG FAIL %d\n", ret);
+		return -EINVAL;
+	}
+
+	out_buf = kmalloc(RPMB_FRAME_LENGTH, GFP_KERNEL);
+	if (!out_buf) {
+		pr_err("MALLOC FAIL\n");
+		return -ENOMEM;
+	}
+
+	while (1) {
+		/* Block until called by SEP */
+		do {
+			pr_info("RPMB AGENT BLOCKED\n");
+			ret = dx_sep_req_wait_for_request(RPMB_AGENT_ID,
+					in_buf, &in_buf_size, timeout);
+		} while (ret == -EAGAIN);
+
+		if (ret) {
+			pr_err("WAIT FAILED %d\n", ret);
+			break;
+		}
+
+		pr_info("RPMB AGENT UNBLOCKED\n");
+
+		/* Process request */
+		memset(&req2emmc, 0x00, sizeof(struct mmc_ioc_rpmb_req));
+
+		/* Copy from incoming buffer into variables and swap
+		 * endianess if needed */
+		req2emmc.addr = *((u16 *)(in_buf+RPMB_ADDR_OFFSET));
+		req2emmc.addr = be16_to_cpu(req2emmc.addr);
+		/* As we are supporting only one block transfers */
+		req2emmc.blk_cnt = 1;
+		req2emmc.data = in_buf+RPMB_DATA_OFFSET;
+		req2emmc.mac = in_buf+RPMB_MAC_KEY_OFFSET;
+		req2emmc.nonce = in_buf+RPMB_NONCE_OFFSET;
+		req2emmc.result = (u16 *)(in_buf+RPMB_RESULT_OFFSET);
+		req2emmc.type = *((u16 *)(in_buf+RPMB_RSPREQ_OFFSET));
+		req2emmc.type = be16_to_cpu(req2emmc.type);
+		req2emmc.wc = (u32 *)(in_buf+RPMB_COUNTER_OFFSET);
+		*req2emmc.wc = be32_to_cpu(*req2emmc.wc);
+
+		/* Send request to eMMC driver */
+		ret = mmc_blk_rpmb_req_handle(&req2emmc);
+		if (ret) {
+			pr_err("mmc_blk_rpmb_req_handle fail %d", ret);
+			/* If access to eMMC driver failed send back
+			 * artificial error */
+			req2emmc.type = 0x0008;
+		}
+
+		/* Rebuild RPMB from response */
+		memset(out_buf, 0, RPMB_FRAME_LENGTH);
+
+		if (req2emmc.type == AUTH_DAT_RD_REQ) {
+			pr_info("READ OPERATION RETURN\n");
+			memcpy(out_buf+RPMB_DATA_OFFSET,
+					req2emmc.data,  RPMB_DATA_LENGTH);
+			memcpy(out_buf+RPMB_NONCE_OFFSET,
+					req2emmc.nonce, RPMB_NONCE_LENGTH);
+
+			out_buf[RPMB_BLKCNT_OFFSET]   = req2emmc.blk_cnt >> 8;
+			out_buf[RPMB_BLKCNT_OFFSET+1] = req2emmc.blk_cnt;
+		} else {
+			pr_info("WRITE OPERATION RETURN\n");
+			memcpy(&tmp, req2emmc.wc, RPMB_COUNTER_LENGTH);
+			tmp = cpu_to_be32(tmp);
+			memcpy(out_buf+RPMB_COUNTER_OFFSET,
+					&tmp, RPMB_COUNTER_LENGTH);
+		}
+
+		memcpy(out_buf+RPMB_MAC_KEY_OFFSET,
+				req2emmc.mac,    RPMB_MAC_KEY_LENGTH);
+		memcpy(out_buf+RPMB_RESULT_OFFSET,
+				req2emmc.result, RPMB_RESULT_LENGTH);
+
+		memcpy(out_buf+RPMB_RSPREQ_OFFSET,
+				&req2emmc.type, RPMB_RSPREQ_LENGTH);
+		out_buf[RPMB_ADDR_OFFSET]   = req2emmc.addr >> 8;
+		out_buf[RPMB_ADDR_OFFSET+1] = req2emmc.addr;
+
+		/* Send response */
+		ret = dx_sep_req_send_response(RPMB_AGENT_ID,
+				out_buf, RPMB_FRAME_LENGTH);
+		if (ret) {
+			pr_err("dx_sep_req_send_response fail %d", ret);
+			break;
+		}
+	}
+
+	kfree(out_buf);
+
+	return ret;
+}
+
+static int sep_setup(struct device *dev,
+		     const struct resource *regs_res,
+		     struct resource *r_irq)
+{
+	dev_t devt;
+	struct sep_drvdata *drvdata = NULL;
+	enum dx_sep_state sep_state;
+	int rc = 0;
+	int i, init_flag = INIT_FW_FLAG;
+	/* Create kernel thread for RPMB agent */
+	static struct task_struct *rpmb_thread;
+	char thread_name[] = "rpmb_agent";
+	int sess_id = 0;
+	enum dxdi_sep_module ret_origin;
+	struct sep_client_ctx *sctx = NULL;
+	u8 uuid[16] = DEFAULT_APP_UUID;
+
+	pr_info("Discretix %s Driver initializing...\n", DRIVER_NAME);
+
+	drvdata = kzalloc(sizeof(struct sep_drvdata), GFP_KERNEL);
+	if (unlikely(drvdata == NULL)) {
+		pr_err("Unable to allocate device private record\n");
+		rc = -ENOMEM;
+		goto failed0;
+	}
+	dev_set_drvdata(dev, (void *)drvdata);
+
+	if (!regs_res) {
+		pr_err("Couldn't get registers resource\n");
+		rc = -EFAULT;
+		goto failed1;
+	}
+
+	if (q_num > SEP_MAX_NUM_OF_DESC_Q) {
+		pr_err(
+			    "Requested number of queues (%u) is out of range must be no more than %u\n",
+			    q_num, SEP_MAX_NUM_OF_DESC_Q);
+		rc = -EINVAL;
+		goto failed1;
+	}
+
+	/* TODO: Verify number of queues also with SeP capabilities */
+	/* Initialize objects arrays for proper cleanup in case of error */
+	for (i = 0; i < SEP_MAX_NUM_OF_DESC_Q; i++) {
+		drvdata->queue[i].desc_queue = DESC_Q_INVALID_HANDLE;
+		drvdata->queue[i].sep_cache = SEP_CTX_CACHE_NULL_HANDLE;
+	}
+
+	drvdata->mem_start = regs_res->start;
+	drvdata->mem_end = regs_res->end;
+	drvdata->mem_size = regs_res->end - regs_res->start + 1;
+
+	if (!request_mem_region(drvdata->mem_start,
+				drvdata->mem_size, DRIVER_NAME)) {
+		pr_err("Couldn't lock memory region at %Lx\n",
+			    (unsigned long long)regs_res->start);
+		rc = -EBUSY;
+		goto failed1;
+	}
+
+	/* create a mask in the lower 4 GB of memory */
+	if (!dma_set_mask(dev, DMA_BIT_MASK(32)))
+		dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+	else
+		pr_warn("sep54: No suitble DMA available\n");
+
+	drvdata->dev = dev;
+
+	drvdata->cc_base = ioremap(drvdata->mem_start, drvdata->mem_size);
+	if (drvdata->cc_base == NULL) {
+		pr_err("ioremap() failed\n");
+		goto failed2;
+	}
+
+	pr_info("regbase_phys=0x%0x..0x%0x\n", drvdata->mem_start,
+		drvdata->mem_end);
+	pr_info("regbase_virt=0x%p\n", drvdata->cc_base);
+
+#ifdef DX_BASE_ENV_REGS
+	pr_info("FPGA ver. = UNKNOWN\n");
+	/* TODO: verify FPGA version against expected version */
+#endif
+
+#ifdef SEP_PRINTF
+	/* Sync. host to SeP initialization counter */
+	/* After seting the interrupt mask, the interrupt from the GPR would
+	 * trigger host_printf_handler to ack this value */
+	drvdata->last_ack_cntr = SEP_PRINTF_ACK_SYNC_VAL;
+#endif
+
+	dx_sep_power_init(drvdata);
+
+	/* Interrupt handler setup */
+#ifdef SEP_INTERRUPT_BY_TIMER
+	init_timer(&drvdata->delegate);
+	drvdata->delegate.function = sep_timer;
+	drvdata->delegate.data = (unsigned long)drvdata;
+	mod_timer(&drvdata->delegate, jiffies);
+
+#else				/* IRQ handler setup */
+	/* Initialize IMR (mask) before registering interrupt handler */
+	/* Enable only state register interrupt */
+	drvdata->irq_mask = SEP_HOST_GPR_IRQ_MASK(DX_SEP_STATE_GPR_IDX);
+#ifdef SEP_PRINTF
+	/* Enable interrupt from host_printf GPR */
+	drvdata->irq_mask |= SEP_HOST_GPR_IRQ_MASK(DX_SEP_HOST_PRINTF_GPR_IDX);
+#endif
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, IMR),
+		       ~drvdata->irq_mask);
+	/* The GPRs interrupts are set only after sep_init is done to avoid
+	 * "garbage" interrupts as a result of the FW init process */
+	drvdata->irq = r_irq->start;
+	rc = request_irq(drvdata->irq, sep_interrupt,
+			 IRQF_SHARED, DRIVER_NAME, drvdata->dev);
+	if (unlikely(rc != 0)) {
+		pr_err("Could not allocate interrupt %d\n", drvdata->irq);
+		goto failed3;
+	}
+	pr_info("%s at 0x%p mapped to interrupt %d\n",
+		DRIVER_NAME, drvdata->cc_base, drvdata->irq);
+
+#endif				/*SEP_INTERRUPT_BY_TIMER */
+
+	/* SeP FW initialization sequence */
+	/* Cold boot before creating descQ objects */
+	sep_state = GET_SEP_STATE(drvdata);
+	if (sep_state != DX_SEP_STATE_DONE_COLD_BOOT) {
+		pr_debug("sep_state=0x%08X\n", sep_state);
+		/* If INIT_CC was not done externally, take care of it here */
+		rc = alloc_host_mem_for_sep(drvdata);
+		if (unlikely(rc != 0))
+			goto failed4;
+		rc = sepinit_do_cc_init(drvdata);
+		if (unlikely(rc != 0))
+			goto failed5;
+	}
+
+	if (sep_state == DX_SEP_STATE_DONE_FW_INIT) {
+		/*If fw init was done change the state to reload driver state*/
+		rc = sepinit_reload_driver_state(drvdata);
+		if (unlikely(rc != 0))
+			goto failed5;
+	}
+
+	sepinit_get_fw_props(drvdata);
+	if (drvdata->fw_ver != EXPECTED_FW_VER) {
+		pr_warn("Expected FW version %u.%u.%u but got %u.%u.%u\n",
+			     VER_MAJOR(EXPECTED_FW_VER),
+			     VER_MINOR(EXPECTED_FW_VER),
+			     VER_PATCH(EXPECTED_FW_VER),
+			     VER_MAJOR(drvdata->fw_ver),
+			     VER_MINOR(drvdata->fw_ver),
+			     VER_PATCH(drvdata->fw_ver));
+	}
+
+	if (q_num > drvdata->num_of_desc_queues) {
+		pr_err(
+			    "Requested number of queues (%u) is greater than SEP could support (%u)\n",
+			    q_num, drvdata->num_of_desc_queues);
+		rc = -EINVAL;
+		goto failed5;
+	}
+
+	if (q_num == 0) {
+		if (drvdata->num_of_desc_queues > SEP_MAX_NUM_OF_DESC_Q) {
+			pr_info(
+				     "The SEP number of queues (%u) is greater than the driver could support (%u)\n",
+				     drvdata->num_of_desc_queues,
+				     SEP_MAX_NUM_OF_DESC_Q);
+			q_num = SEP_MAX_NUM_OF_DESC_Q;
+		} else {
+			q_num = drvdata->num_of_desc_queues;
+		}
+	}
+	drvdata->num_of_desc_queues = q_num;
+
+	pr_info("q_num=%d\n", drvdata->num_of_desc_queues);
+
+	rc = dx_sep_req_init(drvdata);
+	if (unlikely(rc != 0))
+		goto failed6;
+
+	/* Create descriptor queues objects - must be before
+	 *  sepinit_set_fw_init_params to assure GPRs from host are reset */
+	for (i = 0; i < drvdata->num_of_desc_queues; i++) {
+		drvdata->queue[i].sep_data = drvdata;
+		mutex_init(&drvdata->queue[i].desc_queue_sequencer);
+		drvdata->queue[i].desc_queue =
+		    desc_q_create(i, &drvdata->queue[i], sep_state);
+		if (drvdata->queue[i].desc_queue == DESC_Q_INVALID_HANDLE) {
+			pr_err("Unable to allocate desc_q object (%d)\n", i);
+			rc = -ENOMEM;
+			goto failed7;
+		}
+	}
+
+	/* Create context cache management objects */
+	for (i = 0; i < drvdata->num_of_desc_queues; i++) {
+		const int num_of_cache_entries = get_q_cache_size(drvdata, i);
+		if (num_of_cache_entries < 1) {
+			pr_err("No SeP cache entries were assigned for qid=%d",
+			       i);
+			rc = -ENOMEM;
+			goto failed7;
+		}
+		drvdata->queue[i].sep_cache =
+		    ctxmgr_sep_cache_create(num_of_cache_entries);
+		if (drvdata->queue[i].sep_cache == SEP_CTX_CACHE_NULL_HANDLE) {
+			pr_err("Unable to allocate SeP cache object (%d)\n", i);
+			rc = -ENOMEM;
+			goto failed7;
+		}
+	}
+
+	if (sep_state != DX_SEP_STATE_DONE_FW_INIT) {
+		rc = sepinit_do_fw_init(drvdata, init_flag);
+	} else {
+		init_flag = INIT_SEP_SWQ_FLAG;
+		/*In case the sep state is DONE perform update counter
+		  of the queues */
+		for (i = 0; i < drvdata->num_of_desc_queues; i++)
+			desc_q_cntr_set(drvdata->queue[i].desc_queue);
+
+		rc = sepinit_do_fw_init(drvdata, init_flag);
+	}
+
+	if (unlikely(rc != 0))
+		goto failed7;
+
+	drvdata->llimgr = llimgr_create(drvdata->dev, drvdata->mlli_table_size);
+	if (drvdata->llimgr == LLIMGR_NULL_HANDLE) {
+		pr_err("Failed creating LLI-manager object\n");
+		rc = -ENOMEM;
+		goto failed7;
+	}
+
+	drvdata->spad_buf_pool = dma_pool_create("dx_sep_rpc_msg", drvdata->dev,
+						 USER_SPAD_SIZE,
+						 L1_CACHE_BYTES, 0);
+	if (drvdata->spad_buf_pool == NULL) {
+		pr_err("Failed allocating DMA pool for RPC messages\n");
+		rc = -ENOMEM;
+		goto failed8;
+	}
+
+	/* Add character device nodes */
+	rc = alloc_chrdev_region(&drvdata->devt_base, 0, SEP_DEVICES,
+				 DRIVER_NAME);
+	if (unlikely(rc != 0))
+		goto failed9;
+	pr_debug("Allocated %u chrdevs at %u:%u\n", SEP_DEVICES,
+		 MAJOR(drvdata->devt_base), MINOR(drvdata->devt_base));
+	for (i = 0; i < drvdata->num_of_desc_queues; i++) {
+		devt = MKDEV(MAJOR(drvdata->devt_base),
+			     MINOR(drvdata->devt_base) + i);
+		cdev_init(&drvdata->queue[i].cdev, &sep_fops);
+		drvdata->queue[i].cdev.owner = THIS_MODULE;
+		rc = cdev_add(&drvdata->queue[i].cdev, devt, 1);
+		if (unlikely(rc != 0)) {
+			pr_err("cdev_add() failed for q%d\n", i);
+			goto failed9;
+		}
+		drvdata->queue[i].dev = device_create(sep_class, dev, devt,
+						      &drvdata->queue[i],
+						      "%s%d",
+						      DEVICE_NAME_PREFIX, i);
+		drvdata->queue[i].devt = devt;
+	}
+
+	rc = sep_setup_sysfs(&(dev->kobj), drvdata);
+	if (unlikely(rc != 0))
+		goto failed9;
+
+#ifndef SEP_INTERRUPT_BY_TIMER
+	/* Everything is ready - enable interrupts of desc-Qs */
+	for (i = 0; i < drvdata->num_of_desc_queues; i++)
+		enable_descq_interrupt(drvdata, i);
+#endif
+
+	/* Enable sep request interrupt handling */
+	dx_sep_req_enable(drvdata);
+
+	/* Init DX Linux crypto module */
+	if (!disable_linux_crypto) {
+		rc = dx_crypto_api_init(drvdata);
+		if (unlikely(rc != 0))
+			goto failed10;
+		rc = hwk_init();
+		if (unlikely(rc != 0))
+			goto failed10;
+	}
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+	dx_sepapp_init(drvdata);
+#endif
+
+	rpmb_thread = kthread_create(rpmb_agent, NULL, thread_name);
+	if (!rpmb_thread) {
+		pr_err("RPMB agent thread create fail");
+		goto failed10;
+	} else {
+		wake_up_process(rpmb_thread);
+	}
+
+	/* Inform SEP RPMB driver that it can enable RPMB access again */
+	sctx = dx_sepapp_context_alloc();
+	if (unlikely(!sctx))
+		goto failed10;
+
+	rc = dx_sepapp_session_open(sctx, uuid, 0, NULL, NULL, &sess_id,
+				    &ret_origin);
+	if (unlikely(rc != 0))
+		goto failed11;
+	rc = dx_sepapp_command_invoke(sctx, sess_id, CMD_RPMB_ENABLE, NULL,
+				      &ret_origin);
+	if (unlikely(rc != 0))
+		goto failed11;
+
+	rc = dx_sepapp_session_close(sctx, sess_id);
+	if (unlikely(rc != 0))
+		goto failed11;
+
+	dx_sepapp_context_free(sctx);
+
+	return 0;
+
+/* Error cases cleanup */
+ failed11:
+	dx_sepapp_context_free(sctx);
+ failed10:
+	/* Disable interrupts */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, IMR), ~0);
+	sep_free_sysfs();
+ failed9:
+	dma_pool_destroy(drvdata->spad_buf_pool);
+ failed8:
+	llimgr_destroy(drvdata->llimgr);
+ failed7:
+	for (i = 0; i < drvdata->num_of_desc_queues; i++) {
+		if (drvdata->queue[i].devt) {
+			cdev_del(&drvdata->queue[i].cdev);
+			device_destroy(sep_class, drvdata->queue[i].devt);
+		}
+
+		if (drvdata->queue[i].sep_cache != SEP_CTX_CACHE_NULL_HANDLE) {
+			ctxmgr_sep_cache_destroy(drvdata->queue[i].sep_cache);
+			drvdata->queue[i].sep_cache = SEP_CTX_CACHE_NULL_HANDLE;
+		}
+
+		if (drvdata->queue[i].desc_queue != DESC_Q_INVALID_HANDLE) {
+			desc_q_destroy(drvdata->queue[i].desc_queue);
+			drvdata->queue[i].desc_queue = DESC_Q_INVALID_HANDLE;
+			mutex_destroy(&drvdata->queue[i].desc_queue_sequencer);
+		}
+	}
+
+ failed6:
+	dx_sep_req_fini(drvdata);
+ failed5:
+	free_host_mem_for_sep(drvdata);
+ failed4:
+#ifdef SEP_INTERRUPT_BY_TIMER
+	del_timer_sync(&drvdata->delegate);
+#else
+	free_irq(drvdata->irq, dev);
+#endif
+ failed3:
+	dx_sep_power_exit();
+	iounmap(drvdata->cc_base);
+ failed2:
+	release_mem_region(regs_res->start, drvdata->mem_size);
+ failed1:
+	kfree(drvdata);
+ failed0:
+
+	return rc;
+}
+
+static void sep_pci_remove(struct pci_dev *pdev)
+{
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata(&pdev->dev);
+	int i;
+
+	if (!drvdata)
+		return;
+	dx_sep_req_fini(drvdata);
+	if (!disable_linux_crypto) {
+		dx_crypto_api_fini();
+		hwk_fini();
+	}
+	/* Disable interrupts */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, IMR), ~0);
+
+#ifdef SEP_RUNTIME_PM
+	pm_runtime_get_noresume(&pdev->dev);
+	pm_runtime_forbid(&pdev->dev);
+#endif
+
+	for (i = 0; i < drvdata->num_of_desc_queues; i++) {
+		if (drvdata->queue[i].desc_queue != DESC_Q_INVALID_HANDLE) {
+			desc_q_destroy(drvdata->queue[i].desc_queue);
+			mutex_destroy(&drvdata->queue[i].desc_queue_sequencer);
+		}
+		if (drvdata->queue[i].sep_cache != NULL)
+			ctxmgr_sep_cache_destroy(drvdata->queue[i].sep_cache);
+		cdev_del(&drvdata->queue[i].cdev);
+		device_destroy(sep_class, drvdata->queue[i].devt);
+	}
+
+	dma_pool_destroy(drvdata->spad_buf_pool);
+	drvdata->spad_buf_pool = NULL;
+	llimgr_destroy(drvdata->llimgr);
+	drvdata->llimgr = LLIMGR_NULL_HANDLE;
+	free_host_mem_for_sep(drvdata);
+#ifdef SEP_INTERRUPT_BY_TIMER
+	del_timer_sync(&drvdata->delegate);
+#else
+	free_irq(drvdata->irq, &pdev->dev);
+#endif
+	dx_sep_power_exit();
+	iounmap(drvdata->cc_base);
+	release_mem_region(drvdata->mem_start, drvdata->mem_size);
+	sep_free_sysfs();
+	kfree(drvdata);
+	dev_set_drvdata(&pdev->dev, NULL);
+	pci_dev_put(pdev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
+	{
+	PCI_DEVICE(PCI_VENDOR_ID_INTEL, MRLD_SEP_PCI_DEVICE_ID)}, {
+	0}
+};
+
+MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
+
+/**
+ *	sep_probe - probe a matching PCI device
+ *	@pdev: pci_device
+ *	@end: pci_device_id
+ *
+ *	Attempt to set up and configure a SEP device that has been
+ *	discovered by the PCI layer.
+ */
+static int sep_pci_probe(struct pci_dev *pdev,
+			 const struct pci_device_id *ent)
+{
+	int error;
+	struct resource res;
+	struct resource r_irq;
+
+	security_cfg_reg = ioremap_nocache(SECURITY_CFG_ADDR, 4);
+	if (security_cfg_reg == NULL) {
+		dev_err(&pdev->dev, "ioremap of security_cfg_reg failed\n");
+		return -ENOMEM;
+	}
+
+	/* Enable Chaabi */
+	error = pci_enable_device(pdev);
+	if (error) {
+		dev_err(&pdev->dev, "error enabling SEP device\n");
+		goto end;
+	}
+
+	/* Fill resource variables */
+	res.start = pci_resource_start(pdev, 0);
+
+#ifdef PCI_REGION_BUG		/* TODO for wrong sep address bug */
+	res.start += 0x8000;
+#endif
+
+	if (!res.start) {
+		dev_warn(&pdev->dev, "Error getting register start\n");
+		error = -ENODEV;
+		goto disable_pci;
+	}
+
+	res.end = pci_resource_end(pdev, 0);
+	if (!res.end) {
+		dev_warn(&pdev->dev, "Error getting register end\n");
+		error = -ENODEV;
+		goto disable_pci;
+	}
+
+	/* Fill irq resource variable */
+	r_irq.start = pdev->irq;
+
+	/* Use resource variables */
+	error = sep_setup(&pdev->dev, &res, &r_irq);
+	if (error)
+		goto disable_pci;
+
+	pdev = pci_dev_get(pdev);
+
+#ifdef SEP_RUNTIME_PM
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, SEP_AUTOSUSPEND_DELAY);
+	pm_runtime_mark_last_busy(&pdev->dev);
+	pm_runtime_use_autosuspend(&pdev->dev);
+#endif
+
+	return 0;
+
+ disable_pci:
+	pci_disable_device(pdev);
+ end:
+	iounmap(security_cfg_reg);
+
+	return error;
+}
+
+#if defined(CONFIG_PM_RUNTIME) && defined(SEP_RUNTIME_PM)
+static int sep_runtime_suspend(struct device *dev)
+{
+	int ret;
+	int count = 0;
+	u32 val;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata(dev);
+
+	ret = dx_sep_power_state_set(DX_SEP_POWER_HIBERNATED);
+	if (ret) {
+		pr_err("%s failed! ret = %d\n", __func__, ret);
+		return ret;
+	}
+
+	/*poll for chaabi_powerdown_en bit in SECURITY_CFG*/
+	while (count < SEP_TIMEOUT) {
+		val = readl(security_cfg_reg);
+		if (val & PWR_DWN_ENB_MASK)
+			break;
+		usleep_range(40, 60);
+		count++;
+	}
+	if (count >= SEP_TIMEOUT) {
+		dev_err(&pdev->dev,
+			"SEP: timed out waiting for chaabi_powerdown_en\n");
+		WARN_ON(1);
+		/*Let's continue to suspend as chaabi is not stable*/
+	}
+
+	disable_irq(pdev->irq);
+	drvdata->sep_suspended = 1;
+
+	return ret;
+}
+
+static int sep_runtime_resume(struct device *dev)
+{
+	int ret;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata(dev);
+
+	drvdata->sep_suspended = 0;
+	enable_irq(pdev->irq);
+	ret = dx_sep_power_state_set(DX_SEP_POWER_ACTIVE);
+	WARN(ret, "%s failed! ret = %d\n", __func__, ret);
+
+	/*
+	 * sep device might return to ACTIVE in time.
+	 * As sep device is not stable, we choose return 0
+	 * in case it blocks s3.
+	 */
+	return 0;
+}
+
+static int sep_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata(dev);
+	int ret = 0;
+	int count = 0;
+	u32 val;
+
+	ret = dx_sep_power_state_set(DX_SEP_POWER_HIBERNATED);
+	if (ret) {
+		pr_err("%s failed! ret = %d\n", __func__, ret);
+		return ret;
+	}
+
+	/*poll for chaabi_powerdown_en bit in SECURITY_CFG*/
+	while (count < SEP_TIMEOUT) {
+		val = readl(security_cfg_reg);
+		if (val & PWR_DWN_ENB_MASK)
+			break;
+		usleep_range(40, 60);
+		count++;
+	}
+	if (count >= SEP_TIMEOUT) {
+		dev_err(dev,
+			"SEP: timed out waiting for chaabi_powerdown_en\n");
+		WARN_ON(1);
+		/*Let's continue to suspend as chaabi is not stable*/
+	}
+
+	disable_irq(pdev->irq);
+	drvdata->sep_suspended = 1;
+
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return ret;
+}
+
+static int sep_resume(struct device *dev)
+{
+	int ret = 0;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata(dev);
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	ret = pci_enable_device(pdev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "SEP: pci_enable_device failed\n");
+		return ret;
+	}
+
+	drvdata->sep_suspended = 0;
+	enable_irq(pdev->irq);
+
+	ret = dx_sep_power_state_set(DX_SEP_POWER_ACTIVE);
+	WARN(ret, "%s failed! ret = %d\n", __func__, ret);
+
+	/*
+	 * sep device might return to ACTIVE in time.
+	 * As sep device is not stable, we choose return 0
+	 * in case it blocks s3.
+	 */
+	return 0;
+}
+
+static const struct dev_pm_ops sep_pm_ops = {
+	.runtime_suspend = sep_runtime_suspend,
+	.runtime_resume = sep_runtime_resume,
+	.suspend = sep_suspend,
+	.resume = sep_resume,
+};
+#endif /* CONFIG_PM_RUNTIME && SEP_RUNTIME_PM */
+
+/* Field for registering driver to PCI device */
+static struct pci_driver sep_pci_driver = {
+#if defined(CONFIG_PM_RUNTIME) && defined(SEP_RUNTIME_PM)
+	.driver = {
+		.pm = &sep_pm_ops,
+	},
+#endif /* CONFIG_PM_RUNTIME && SEP_RUNTIME_PM */
+	.name = DRIVER_NAME,
+	.id_table = sep_pci_id_tbl,
+	.probe = sep_pci_probe,
+	.remove = sep_pci_remove
+};
+
+static int __init sep_module_init(void)
+{
+	int rc;
+
+	sep_class = class_create(THIS_MODULE, "sep_ctl");
+
+	/* Register PCI device */
+	rc = pci_register_driver(&sep_pci_driver);
+	if (rc) {
+		class_destroy(sep_class);
+		return rc;
+	}
+
+	return 0;		/*success */
+}
+
+static void __exit sep_module_cleanup(void)
+{
+	pci_unregister_driver(&sep_pci_driver);
+	class_destroy(sep_class);
+}
+
+/* Entry points  */
+module_init(sep_module_init);
+module_exit(sep_module_cleanup);
+/* Module description */
+MODULE_DESCRIPTION("Discretix " DRIVER_NAME " Driver");
+MODULE_VERSION("0.7");
+MODULE_AUTHOR("Discretix");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/sep54/dx_driver.h b/drivers/staging/sep54/dx_driver.h
new file mode 100644
index 0000000..9325d72
--- /dev/null
+++ b/drivers/staging/sep54/dx_driver.h
@@ -0,0 +1,603 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _DX_DRIVER_H_
+#define _DX_DRIVER_H_
+
+#include <generated/autoconf.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/completion.h>
+#include <linux/export.h>
+#include <linux/semaphore.h>
+
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_cc_regs.h"
+#include "dx_reg_common.h"
+#include "dx_host.h"
+#include "sep_log.h"
+#include "sep_rpc.h"
+#include "crypto_ctx_mgr.h"
+#include "desc_mgr.h"
+#include "lli_mgr.h"
+#include "dx_driver_abi.h"
+#include "dx_dev_defs.h"
+
+/* Control printf's from SeP via GPR.
+ * Keep this macro defined as long as SeP code uses host_printf
+ * (otherwise, SeP would stall waiting for host to ack characters)
+ */
+#define SEP_PRINTF
+/* Note: If DEBUG macro is undefined, SeP prints would not be printed
+ * but the host driver would still ack the characters.                */
+
+#define MODULE_NAME "sep54"
+
+/* PCI ID's */
+#define MRLD_SEP_PCI_DEVICE_ID 0x1198
+
+#define VER_MAJOR(ver)  ((ver) >> 24)
+#define VER_MINOR(ver)  (((ver) >> 16) & 0xFF)
+#define VER_PATCH(ver)  (((ver) >> 8) & 0xFF)
+#define VER_INTERNAL(ver) ((ver) & 0xFF)
+
+#define SECURITY_CFG_ADDR	0xFF03A01C
+#define PWR_DWN_ENB_MASK	0x20
+#define SEP_TIMEOUT		50000
+#define SEP_POWERON_TIMEOUT     10000
+#define SEP_SLEEP_ENABLE 5
+
+#define SEP_AUTOSUSPEND_DELAY 5000
+
+#define INIT_FW_FLAG 0
+#define INIT_SEP_SWQ_FLAG 1
+
+/* GPR that holds SeP state */
+#define SEP_STATE_GPR_OFFSET SEP_HOST_GPR_REG_OFFSET(DX_SEP_STATE_GPR_IDX)
+/* In case of a change in GPR7 (state) we dump also GPR6 */
+#define SEP_STATUS_GPR_OFFSET SEP_HOST_GPR_REG_OFFSET(DX_SEP_STATUS_GPR_IDX)
+
+/* User memref index access macros */
+#define IS_VALID_MEMREF_IDX(idx) \
+	(((idx) >= 0) && ((idx) < MAX_REG_MEMREF_PER_CLIENT_CTX))
+#define INVALIDATE_MEMREF_IDX(idx) ((idx) = DXDI_MEMREF_ID_NULL)
+
+/* Session context access macros - must be invoked with mutex acquired */
+#define SEP_SESSION_ID_INVALID 0xFFFF
+#define IS_VALID_SESSION_CTX(session_ctx_p) \
+	 (((session_ctx_p)->ref_cnt > 0) && \
+	 ((session_ctx_p)->sep_session_id != SEP_SESSION_ID_INVALID))
+#define INVALIDATE_SESSION_CTX(session_ctx_p) do {              \
+	session_ctx_p->sep_session_id = SEP_SESSION_ID_INVALID; \
+	session_ctx_p->ref_cnt = 0;                             \
+} while (0)
+/* Session index access macros */
+/* Index is considered valid even if the pointed session context is not.
+   One should use IS_VALID_SESSION_CTX to verify the validity of the context. */
+#define IS_VALID_SESSION_IDX(idx) \
+	 (((idx) >= 0) && ((idx) < MAX_SEPAPP_SESSION_PER_CLIENT_CTX))
+#define INVALIDATE_SESSION_IDX(idx) ((idx) = DXDI_SEPAPP_SESSION_INVALID)
+
+/*
+   Size of DMA-coherent scratchpad buffer allocated per client_ctx context
+   Currently, this buffer is used for 3 purposes:
+   1. SeP RPC messages.
+   2. SeP Applets messages.
+   3. AES-CCM A0 (prepend) data.
+*/
+#define USER_SPAD_SIZE SEP_RPC_MAX_MSG_SIZE
+#if (SEP_RPC_MAX_MSG_SIZE >= (1 << SEP_SW_DESC_RPC_MSG_HMB_SIZE_BIT_SIZE))
+#error SEP_RPC_MAX_MSG_SIZE too large for HMB_SIZE field
+#endif
+
+/* Get the memref ID/index for given dma_obj (struct user_dma_buffer) */
+#define DMA_OBJ_TO_MEMREF_IDX(client_ctx, the_dma_obj)                 \
+	(container_of(the_dma_obj, struct registered_memref, dma_obj) - \
+		(client_ctx)->reg_memrefs)
+
+/* Crypto context IDs masks (to be used with ctxmgr_sep_cache_invalidate) */
+#define CRYPTO_CTX_ID_SINGLE_MASK 0xFFFFFFFFFFFFFFFFULL
+#define CRYPTO_CTX_ID_CLIENT_SHIFT 32
+#define CRYPTO_CTX_ID_CLIENT_MASK (0xFFFFFFFFULL << CRYPTO_CTX_ID_CLIENT_SHIFT)
+
+/* Return 'true' if val is a multiple of given blk_size */
+/* blk_size must be a power of 2 */
+#define IS_MULT_OF(val, blk_size)  ((val & (blk_size - 1)) == 0)
+
+struct sep_drvdata;
+
+/**
+ * struct queue_drvdata - Data for a specific SeP queue
+ * @desc_queue:	The associated descriptor queue object
+ * @desc_queue_sequencer: Mutex to assure sequence of operations associated
+ *			  with desc. queue enqueue
+ * @sep_cache:	SeP context cache management object
+ * @cdev:	Assocated character device
+ * @devt:	Associated device
+ * @sep_data:	Associated SeP driver context
+ */
+struct queue_drvdata {
+	void *desc_queue;
+	struct mutex desc_queue_sequencer;
+	void *sep_cache;
+	struct device *dev;
+	struct cdev cdev;
+	dev_t devt;
+	struct sep_drvdata *sep_data;
+};
+
+/**
+ * struct sep_drvdata - SeP driver private data context
+ * @mem_start:	phys. address of the control registers
+ * @mem_end:	phys. address of the control registers
+ * @mem_size:	Control registers memory range size (mem_end - mem_start)
+ * @cc_base:	virt address of the CC registers
+ * @irq:	device IRQ number
+ * @irq_mask:	Interrupt mask
+ * @rom_ver:	SeP ROM version
+ * @fw_ver:	SeP loaded firmware version
+ * @icache_size_log2:	Icache memory size in power of 2 bytes
+ * @dcache_size_log2:	Dcache memory size in power of 2 bytes
+ * @icache_pages:	Pages allocated for Icache
+ * @dcache_pages:	Pages allocated for Dcache
+ * @sep_backup_buf_size:	Size of host memory allocated for sep context
+ * @sep_backup_buf:		Buffer allocated for sep context backup
+ * @sepinit_params_buf_dma:	DMA address of sepinit_params_buf_p
+ * @spad_buf_pool:	DMA pool for RPC messages or scratch pad use
+ * @mlli_table_size:	bytes as set by sepinit_get_fw_props
+ * @llimgr:	LLI-manager object handle
+ * @num_of_desc_queues:	Actual number of (active) queues
+ * @num_of_sep_cache_entries:	Available SeP/FW cache entries
+ * @devt_base:	Allocated char.dev. major/minor (with alloc_chrdev_region)
+ * @dev:	Device context
+ * @queue:	Array of objects for each SeP SW-queue
+ * @last_ack_cntr:	The last counter value ACK'ed over the SEP_PRINTF GPR
+ * @cur_line_buf_offset:	Offset in line_buf
+ * @line_buf:	A buffer to accumulate SEP_PRINTF characters up to EOL
+ * @delegate:	Timer to initiate GPRs polling for interrupt-less system
+ */
+struct sep_drvdata {
+	resource_size_t mem_start;
+	resource_size_t mem_end;
+	resource_size_t mem_size;
+	void __iomem *cc_base;
+	unsigned int irq;
+	u32 irq_mask;
+	u32 rom_ver;
+	u32 fw_ver;
+#ifdef CACHE_IMAGE_NAME
+	u8 icache_size_log2;
+	u8 dcache_size_log2;
+	struct page *icache_pages;
+	struct page *dcache_pages;
+#elif defined(SEP_BACKUP_BUF_SIZE)
+	unsigned long sep_backup_buf_size;
+	void *sep_backup_buf;
+#endif
+	int sep_suspended;
+	struct dma_pool *spad_buf_pool;
+	unsigned long mlli_table_size;
+	void *llimgr;
+	unsigned int num_of_desc_queues;
+	int num_of_sep_cache_entries;
+	dev_t devt_base;
+	struct device *dev;
+	struct queue_drvdata queue[SEP_MAX_NUM_OF_DESC_Q];
+
+#ifdef SEP_PRINTF
+	int last_ack_cntr;
+	int cur_line_buf_offset;
+#define SEP_PRINTF_LINE_SIZE 100
+	char line_buf[SEP_PRINTF_LINE_SIZE + 1];
+#endif
+
+#ifdef SEP_INTERRUPT_BY_TIMER
+	struct timer_list delegate;
+#endif
+};
+
+/* Enumerate the session operational state */
+enum user_op_state {
+	USER_OP_NOP = 0,	/* No operation is in processing */
+	USER_OP_PENDING,	/* Operation waiting to enter the desc. queue */
+	USER_OP_INPROC,		/* Operation is in process       */
+	USER_OP_COMPLETED	/* Operation completed, waiting for "read" */
+};
+
+/* Enumerate the data operation types */
+enum crypto_data_intent {
+	CRYPTO_DATA_NULL = 0,
+	CRYPTO_DATA_TEXT,	/* plain/cipher text */
+	CRYPTO_DATA_TEXT_FINALIZE,
+	CRYPTO_DATA_ADATA,	/* Additional/Associated data for AEAD */
+	CRYPTO_DATA_MAX = CRYPTO_DATA_ADATA,
+};
+
+/* SeP Applet session data */
+struct sep_app_session {
+	struct mutex session_lock;	/* Protect updates in entry */
+	/* Reference count on session (initialized to 1 on opening) */
+	u16 ref_cnt;
+	u16 sep_session_id;
+};
+
+/**
+ * struct registered_memref - Management information for registered memory
+ * @buf_lock:	Mutex on buffer state changes (ref. count, etc.)
+ * @ref_cnt:	Reference count for protecting freeing while in use.
+ * @dma_obj:	The client DMA object container for the registered mem.
+ */
+struct registered_memref {
+	struct mutex buf_lock;
+	unsigned int ref_cnt;
+	struct client_dma_buffer dma_obj;
+};
+
+struct async_ctx_info {
+	struct dxdi_sepapp_params *dxdi_params;
+	struct dxdi_sepapp_kparams *dxdi_kparams;
+	struct sepapp_client_params *sw_desc_params;
+	struct client_dma_buffer *local_dma_objs[SEPAPP_MAX_PARAMS];
+	struct mlli_tables_list mlli_tables[SEPAPP_MAX_PARAMS];
+	int session_id;
+};
+
+/*
+ * struct sep_client_ctx - SeP client application context allocated per each
+ *                         open()
+ * @drv_data:	Associated queue driver context
+ * @qid:	Priority queue ID
+ * @uid_cntr:	Persistent unique ID counter to be used for crypto context UIDs
+ *		allocation
+ * @user_memrefs:	Registered user DMA memory buffers
+ * @sepapp_sessions:	SeP Applet client sessions
+ */
+struct sep_client_ctx {
+	struct queue_drvdata *drv_data;
+	unsigned int qid;
+	atomic_t uid_cntr;
+	struct registered_memref reg_memrefs[MAX_REG_MEMREF_PER_CLIENT_CTX];
+	struct sep_app_session
+	    sepapp_sessions[MAX_SEPAPP_SESSION_PER_CLIENT_CTX];
+
+	wait_queue_head_t memref_wq;
+	int memref_cnt;
+	struct mutex memref_lock;
+};
+
+/**
+ * sep_op_type - Flags to describe dispatched type of sep_op_ctx
+ * The flags may be combined when applicable (primarily for CRYPTO_OP desc.).
+ * Because operations maybe asynchronous, we cannot set operation type in the
+ * crypto context.
+ */
+enum sep_op_type {
+	SEP_OP_NULL = 0,
+	SEP_OP_CRYPTO_INIT = 1,	/* CRYPTO_OP::Init. */
+	SEP_OP_CRYPTO_PROC = (1 << 1),	/* CRYPTO_OP::Process */
+	SEP_OP_CRYPTO_FINI = (1 << 2),	/* CRYPTO_OP::Finalize (integrated) */
+	SEP_OP_RPC = (1 << 3),	/* RPC_MSG */
+	SEP_OP_APP = (1 << 4),	/* APP_REQ */
+	SEP_OP_SLEEP = (1 << 7)	/* SLEEP_REQ */
+};
+
+/*
+ * struct sep_op_ctx - A SeP operation context.
+ * @client_ctx:	The client context associated with this operation
+ * @session_ctx:	For SEP_SW_DESC_TYPE_APP_REQ we need the session context
+ *			(otherwise NULL)
+ * @op_state:	Operation progress state indicator
+ * @ioctl_op_compl:	Operation completion signaling object for IOCTLs
+ *			(updated by desc_mgr on completion)
+ * @comp_work:	Async. completion work. NULL for IOCTLs.
+ * @op_ret_code: The operation return code from SeP (valid on desc. completion)
+ * @internal_error:	Mark that return code (error) is from the SeP FW
+ *			infrastructure and not from the requested operation.
+ *			Currently, this is used only for Applet Manager errors.
+ * @ctx_info:	Current Context. If there are more than one context (such as
+ *		in combined alg.) use (&ctx_info)[] into _ctx_info[].
+ * @_ctx_info:	Extension of ctx_info for additional contexts associated with
+ *		current operation (combined op.)
+ * @ctx_info_num:	number of active ctx_info in (&ctx_info)[]
+ * @pending_descs_cntr:	Pending SW descriptor associated with this operation.
+ *			(Number of descriptor completions required to complete
+ *			this operation)
+ * @backlog_descs_cntr:	Descriptors of this operation enqueued in the backlog q.
+ * @ift:	Input data MLLI table object
+ * @oft:	Output data MLLI table object
+ * @ift_dma_obj:	Temporary user memory registrations for IFT
+ * @oft_dma_obj:	Temporary user memory registrations for OFT
+ * @spad_buf_p:	Scratchpad DMA buffer for different temp. buffers required
+ *		during a specific operation: (allocated from rpc_msg_pool)
+ *		- SeP RPC message buffer         or
+ *		- AES-CCM A0 scratchpad buffers  or
+ *		- Next IV for AES-CBC, AES-CTR, DES-CBC
+ * @spad_buf_dma_addr:	DMA address of spad_buf_p
+ * @next_hash_blk_tail_size:	The tail of data_in which is a remainder of a
+ *				block size. We use this info to copy the
+ *				remainder data to the context after block
+ *				processing completion (saved from
+ *				prepare_data_for_sep).
+ *
+ * Retains the operation status and associated resources while operation in
+ * progress. This object provides threads concurrency support since each thread
+ * may work on different instance of this object, within the scope of the same
+ * client (process) context.
+*/
+struct sep_op_ctx {
+	struct sep_client_ctx *client_ctx;
+	struct sep_app_session *session_ctx;
+	enum sep_op_type op_type;
+	enum user_op_state op_state;
+	struct completion ioctl_op_compl;
+	struct work_struct *comp_work;
+	u32 error_info;
+	bool internal_error;
+	struct client_crypto_ctx_info ctx_info;
+	struct client_crypto_ctx_info _ctx_info[DXDI_COMBINED_NODES_MAX - 1];
+	u8 ctx_info_num;
+	u8 pending_descs_cntr;
+	u8 backlog_descs_cntr;
+	/* Client memory resources for (sym.) crypto-ops */
+	struct mlli_tables_list ift;
+	struct mlli_tables_list oft;
+	struct client_dma_buffer din_dma_obj;
+	struct client_dma_buffer dout_dma_obj;
+	void *spad_buf_p;
+	dma_addr_t spad_buf_dma_addr;
+
+	struct async_ctx_info async_info;
+};
+
+/***************************/
+/* SeP registers access    */
+/***************************/
+/* "Raw" read version to be used in IS_CC_BUS_OPEN and in READ_REGISTER */
+#define _READ_REGISTER(_addr) __raw_readl(        \
+	(const volatile void __iomem *)(_addr))
+/* The device register space is considered accessible when expected
+   device signature value is read from HOST_CC_SIGNATURE register   */
+#define IS_CC_BUS_OPEN(drvdata)                                           \
+	(_READ_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base,                  \
+		CRY_KERNEL, HOST_CC_SIGNATURE)) == DX_DEV_SIGNATURE)
+/*FIXME: Temporary w/a to HW problem with registers reading - double-read */
+#define READ_REGISTER(_addr) \
+	({(void)_READ_REGISTER(_addr); _READ_REGISTER(_addr); })
+#define WRITE_REGISTER(_addr, _data)  __raw_writel(_data, \
+	(volatile void __iomem *)(_addr))
+#define GET_SEP_STATE(drvdata)                                           \
+	(IS_CC_BUS_OPEN(drvdata) ?                                       \
+		READ_REGISTER(drvdata->cc_base + SEP_STATE_GPR_OFFSET) : \
+		DX_SEP_STATE_OFF)
+
+#ifdef DEBUG
+void dump_byte_array(const char *name, const u8 *the_array,
+		     unsigned long size);
+void dump_word_array(const char *name, const u32 *the_array,
+		     unsigned long size_in_words);
+#else
+#define dump_byte_array(name, the_array, size) do {} while (0)
+#define dump_word_array(name, the_array, size_in_words) do {} while (0)
+#endif
+
+
+/**
+ * alloc_crypto_ctx_id() - Allocate unique ID for crypto context
+ * @client_ctx:	 The client context object
+ *
+ */
+static inline struct crypto_ctx_uid alloc_crypto_ctx_id(
+	struct sep_client_ctx *client_ctx)
+{
+	struct crypto_ctx_uid uid;
+
+	/* Assuming 32 bit atomic counter is large enough to never wrap
+	 * during a lifetime of a process...
+	 * Someone would laugh (or cry) on this one day */
+#ifdef DEBUG
+	if (atomic_read(&client_ctx->uid_cntr) == 0xFFFFFFFF) {
+		pr_err("uid_cntr overflow for client_ctx=%p\n",
+			    client_ctx);
+		BUG();
+	}
+#endif
+
+	uid.addr = (uintptr_t)client_ctx;
+	uid.cntr = (u32)atomic_inc_return(&client_ctx->uid_cntr);
+
+	return uid;
+}
+
+/**
+ * op_ctx_init() - Initialize an operation context
+ * @op_ctx:	 The allocated struct sep_op_ctx (may be on caller's stack)
+ * @client_ctx:	 The "parent" client context
+ *
+ */
+static inline void op_ctx_init(struct sep_op_ctx *op_ctx,
+			       struct sep_client_ctx *client_ctx)
+{
+	int i;
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+
+	pr_debug("op_ctx=%p\n", op_ctx);
+	memset(op_ctx, 0, sizeof(struct sep_op_ctx));
+	op_ctx->client_ctx = client_ctx;
+	op_ctx->ctx_info_num = 1;	/*assume a signle context operation */
+	op_ctx->pending_descs_cntr = 1;	/* assume a single desc. transcation */
+	init_completion(&(op_ctx->ioctl_op_compl));
+	pr_debug("after init_completion\n");
+	MLLI_TABLES_LIST_INIT(&(op_ctx->ift));
+	MLLI_TABLES_LIST_INIT(&(op_ctx->oft));
+	pr_debug("MLLI_TABLES_LIST_INIT\n");
+	for (i = 0; i < DXDI_COMBINED_NODES_MAX; i++, ctx_info_p++)
+		USER_CTX_INFO_INIT(ctx_info_p);
+	pr_debug("USER_CTX_INFO_INIT(ctx_info_p)\n");
+}
+
+/**
+ * op_ctx_fini() - Finalize op_ctx (free associated resources before freeing
+ *		memory)
+ * @op_ctx:	The op_ctx initialized with op_ctx_init
+ *
+ * Returns void
+ */
+static inline void op_ctx_fini(struct sep_op_ctx *op_ctx)
+{
+	pr_debug("op_ctx=%p\n", op_ctx);
+	if (op_ctx->spad_buf_p != NULL)
+		dma_pool_free(op_ctx->client_ctx->drv_data->sep_data->
+			      spad_buf_pool, op_ctx->spad_buf_p,
+			      op_ctx->spad_buf_dma_addr);
+
+	delete_context((uintptr_t)op_ctx);
+	memset(op_ctx, 0, sizeof(struct sep_op_ctx));
+}
+
+/**
+ * init_client_ctx() - Initialize a client context object given
+ * @drvdata:	Queue driver context
+ * @client_ctx:
+ *
+ * Returns void
+ */
+void init_client_ctx(struct queue_drvdata *drvdata,
+		     struct sep_client_ctx *client_ctx);
+
+void cleanup_client_ctx(struct queue_drvdata *drvdata,
+			struct sep_client_ctx *client_ctx);
+
+/**
+ * register_client_memref() - Register given client memory buffer reference
+ * @client_ctx:		User context data
+ * @user_buf_ptr:	Buffer address in user space. NULL if sgl!=NULL.
+ * @sgl:		Scatter/gather list (for kernel buffers)
+ *			NULL if user_buf_ptr!=NULL.
+ * @buf_size:		Buffer size in bytes
+ * @dma_direction:	DMA direction
+ *
+ * Returns int >= is the registered memory reference ID, <0 for error
+ */
+int register_client_memref(struct sep_client_ctx *client_ctx,
+			   u8 __user *user_buf_ptr,
+			   struct scatterlist *sgl,
+			   const unsigned long buf_size,
+			   const enum dma_data_direction dma_direction);
+
+/**
+ * free_client_memref() - Free resources associated with a client mem. reference
+ * @client_ctx:	 User context data
+ * @memref_idx:	 Index of the user memory reference
+ *
+ * Free resources associated with a user memory reference
+ * (The referenced memory may be locked user pages or allocated DMA-coherent
+ *  memory mmap'ed to the user space)
+ * Returns int !0 on failure (memref still in use or unknown)
+ */
+int free_client_memref(struct sep_client_ctx *client_ctx,
+		       int memref_idx);
+
+/**
+ * acquire_dma_obj() - Get the memref object of given memref_idx and increment
+ *			reference count of it
+ * @client_ctx:	Associated client context
+ * @memref_idx:	Required registered memory reference ID (index)
+ *
+ * Get the memref object of given memref_idx and increment reference count of it
+ * The returned object must be released by invoking release_dma_obj() before
+ * the object (memref) may be freed.
+ * Returns struct user_dma_buffer The memref object or NULL for invalid
+ */
+struct client_dma_buffer *acquire_dma_obj(struct sep_client_ctx *client_ctx,
+					  int memref_idx);
+
+/**
+ * release_dma_obj() - Release memref object taken with get_memref_obj
+ *			(Does not free!)
+ * @client_ctx:	Associated client context
+ * @dma_obj:	The DMA object returned from acquire_dma_obj()
+ *
+ * Returns void
+ */
+void release_dma_obj(struct sep_client_ctx *client_ctx,
+		     struct client_dma_buffer *dma_obj);
+
+/**
+ * dxdi_data_dir_to_dma_data_dir() - Convert from DxDI DMA direction type to
+ *					Linux kernel DMA direction type
+ * @dxdi_dir:	 DMA direction in DxDI encoding
+ *
+ * Returns enum dma_data_direction
+ */
+enum dma_data_direction dxdi_data_dir_to_dma_data_dir(enum dxdi_data_direction
+						      dxdi_dir);
+
+/**
+ * wait_for_sep_op_result() - Wait for outstanding SeP operation to complete and
+ *				fetch SeP ret-code
+ * @op_ctx:
+ *
+ * Wait for outstanding SeP operation to complete and fetch SeP ret-code
+ * into op_ctx->sep_ret_code
+ * Returns int
+ */
+int wait_for_sep_op_result(struct sep_op_ctx *op_ctx);
+
+/**
+ * crypto_op_completion_cleanup() - Cleanup CRYPTO_OP descriptor operation
+ *					resources after completion
+ * @op_ctx:
+ *
+ * Returns int
+ */
+int crypto_op_completion_cleanup(struct sep_op_ctx *op_ctx);
+
+
+/*!
+ * IOCTL entry point
+ *
+ * \param filp
+ * \param cmd
+ * \param arg
+ *
+ * \return int
+ * \retval 0 Operation succeeded (but SeP return code may indicate an error)
+ * \retval -ENOTTY  : Unknown IOCTL command
+ * \retval -ENOSYS  : Unsupported/not-implemented (known) operation
+ * \retval -EINVAL  : Invalid parameters
+ * \retval -EFAULT  : Bad pointers for given user memory space
+ * \retval -EPERM   : Not enough permissions for given command
+ * \retval -ENOMEM,-EAGAIN: when not enough resources available for given op.
+ * \retval -EIO     : SeP HW error or another internal error
+ *                    (probably operation timed out or unexpected behavior)
+ */
+long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+#endif				/* _DX_DRIVER_H_ */
diff --git a/drivers/staging/sep54/dx_driver_abi.h b/drivers/staging/sep54/dx_driver_abi.h
new file mode 100644
index 0000000..3996375
--- /dev/null
+++ b/drivers/staging/sep54/dx_driver_abi.h
@@ -0,0 +1,656 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef __SEP_DRIVER_ABI_H__
+#define __SEP_DRIVER_ABI_H__
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#ifndef INT32_MAX
+#define INT32_MAX 0x7FFFFFFFL
+#endif
+#else
+/* For inclusion in user space library */
+#include <stdint.h>
+#endif
+
+#include <linux/ioctl.h>
+#include <linux/errno.h>
+#include "sep_rpc.h"
+
+/* Proprietary error code for unexpected internal errors */
+#define EBUG 999
+
+/****************************/
+/**** IOCTL return codes ****/
+/*****************************************************************************
+ ENOTTY  : Unknown IOCTL command					     *
+ ENOSYS  : Unsupported/not-implemented (known) operation		     *
+ EINVAL  : Invalid parameters                                                *
+ EFAULT  : Bad pointers for given user memory space                          *
+ EPERM   : Not enough permissions for given command                          *
+ ENOMEM,EAGAIN: when not enough resources available for given op.            *
+ EIO     : SeP HW error or another internal error (probably operation timed  *
+	   out or unexpected behavior)                                       *
+ EBUG    : Driver bug found ("assertion") - see system log                   *
+*****************************************************************************/
+
+/****** IOCTL commands ********/
+/* The magic number appears free in Documentation/ioctl/ioctl-number.txt */
+#define DXDI_IOC_MAGIC 0xD1
+
+/* IOCTL ordinal numbers */
+/*(for backward compatibility, add new ones only at end of list!) */
+enum dxdi_ioc_nr {
+	/* Version info. commands */
+	DXDI_IOC_NR_GET_VER_MAJOR = 0,
+	DXDI_IOC_NR_GET_VER_MINOR = 1,
+	/* Context size queries */
+	DXDI_IOC_NR_GET_SYMCIPHER_CTX_SIZE = 2,
+	DXDI_IOC_NR_GET_AUTH_ENC_CTX_SIZE = 3,
+	DXDI_IOC_NR_GET_MAC_CTX_SIZE = 4,
+	DXDI_IOC_NR_GET_HASH_CTX_SIZE = 5,
+	/* Init context commands */
+	DXDI_IOC_NR_SYMCIPHER_INIT = 7,
+	DXDI_IOC_NR_AUTH_ENC_INIT = 8,
+	DXDI_IOC_NR_MAC_INIT = 9,
+	DXDI_IOC_NR_HASH_INIT = 10,
+	/* Processing commands */
+	DXDI_IOC_NR_PROC_DBLK = 12,
+	DXDI_IOC_NR_FIN_PROC = 13,
+	/* "Integrated" processing operations */
+	DXDI_IOC_NR_SYMCIPHER_PROC = 14,
+	DXDI_IOC_NR_AUTH_ENC_PROC = 15,
+	DXDI_IOC_NR_MAC_PROC = 16,
+	DXDI_IOC_NR_HASH_PROC = 17,
+	/* SeP RPC */
+	DXDI_IOC_NR_SEP_RPC = 19,
+	/* Memory registration */
+	DXDI_IOC_NR_REGISTER_MEM4DMA = 20,
+	DXDI_IOC_NR_ALLOC_MEM4DMA = 21,
+	DXDI_IOC_NR_FREE_MEM4DMA = 22,
+	/* SeP Applets API */
+	DXDI_IOC_NR_SEPAPP_SESSION_OPEN = 23,
+	DXDI_IOC_NR_SEPAPP_SESSION_CLOSE = 24,
+	DXDI_IOC_NR_SEPAPP_COMMAND_INVOKE = 25,
+	/* Combined mode */
+	DXDI_IOC_NR_COMBINED_INIT = 26,
+	DXDI_IOC_NR_COMBINED_PROC_DBLK = 27,
+	DXDI_IOC_NR_COMBINED_PROC_FIN = 28,
+	DXDI_IOC_NR_COMBINED_PROC = 29,
+
+	/* AES IV set/get API */
+	DXDI_IOC_NR_SET_IV = 30,
+	DXDI_IOC_NR_GET_IV = 31,
+	DXDI_IOC_NR_MAX = DXDI_IOC_NR_GET_IV
+};
+
+/* In case error is not DXDI_RET_ESEP these are the
+*  errors embedded in the error info field "error_info" */
+enum dxdi_error_info {
+	DXDI_ERROR_NULL = 0,
+	DXDI_ERROR_BAD_CTX = 1,
+	DXDI_ERROR_UNSUP = 2,
+	DXDI_ERROR_INVAL_MODE = 3,
+	DXDI_ERROR_INVAL_DIRECTION = 4,
+	DXDI_ERROR_INVAL_KEY_SIZE = 5,
+	DXDI_ERROR_INVAL_NONCE_SIZE = 6,
+	DXDI_ERROR_INVAL_TAG_SIZE = 7,
+	DXDI_ERROR_INVAL_DIN_PTR = 8,
+	DXDI_ERROR_INVAL_DOUT_PTR = 9,
+	DXDI_ERROR_INVAL_DATA_SIZE = 10,
+	DXDI_ERROR_DIN_DOUT_OVERLAP = 11,
+	DXDI_ERROR_INTERNAL = 12,
+	DXDI_ERROR_NO_RESOURCE = 13,
+	DXDI_ERROR_FATAL = 14,
+	DXDI_ERROR_INFO_RESERVE32B = INT32_MAX
+};
+
+/* ABI Version info. */
+#define DXDI_VER_MAJOR 1
+#define DXDI_VER_MINOR DXDI_IOC_NR_MAX
+
+/******************************/
+/* IOCTL commands definitions */
+/******************************/
+/* Version info. commands */
+#define DXDI_IOC_GET_VER_MAJOR _IOR(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_VER_MAJOR, u32)
+#define DXDI_IOC_GET_VER_MINOR _IOR(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_VER_MINOR, u32)
+/* Context size queries */
+#define DXDI_IOC_GET_SYMCIPHER_CTX_SIZE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_SYMCIPHER_CTX_SIZE,\
+		struct dxdi_get_sym_cipher_ctx_size_params)
+#define DXDI_IOC_GET_AUTH_ENC_CTX_SIZE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_AUTH_ENC_CTX_SIZE,\
+		struct dxdi_get_auth_enc_ctx_size_params)
+#define DXDI_IOC_GET_MAC_CTX_SIZE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_MAC_CTX_SIZE,\
+		struct dxdi_get_mac_ctx_size_params)
+#define DXDI_IOC_GET_HASH_CTX_SIZE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_HASH_CTX_SIZE,\
+		struct dxdi_get_hash_ctx_size_params)
+/* Init. Sym. Crypto. */
+#define DXDI_IOC_SYMCIPHER_INIT _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SYMCIPHER_INIT, struct dxdi_sym_cipher_init_params)
+#define DXDI_IOC_AUTH_ENC_INIT _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_AUTH_ENC_INIT, struct dxdi_auth_enc_init_params)
+#define DXDI_IOC_MAC_INIT _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_MAC_INIT, struct dxdi_mac_init_params)
+#define DXDI_IOC_HASH_INIT _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_HASH_INIT, struct dxdi_hash_init_params)
+
+/* Sym. Crypto. Processing commands */
+#define DXDI_IOC_PROC_DBLK _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_PROC_DBLK, struct dxdi_process_dblk_params)
+#define DXDI_IOC_FIN_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_FIN_PROC, struct dxdi_fin_process_params)
+
+/* Integrated Sym. Crypto. */
+#define DXDI_IOC_SYMCIPHER_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SYMCIPHER_PROC, struct dxdi_sym_cipher_proc_params)
+#define DXDI_IOC_AUTH_ENC_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_AUTH_ENC_PROC, struct dxdi_auth_enc_proc_params)
+#define DXDI_IOC_MAC_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_MAC_PROC, struct dxdi_mac_proc_params)
+#define DXDI_IOC_HASH_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_HASH_PROC, struct dxdi_hash_proc_params)
+
+/* AES Initial Vector set/get */
+#define DXDI_IOC_SET_IV _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SET_IV, struct dxdi_aes_iv_params)
+#define DXDI_IOC_GET_IV _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_IV, struct dxdi_aes_iv_params)
+
+/* Combined mode  */
+#define DXDI_IOC_COMBINED_INIT _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_COMBINED_INIT,\
+		struct dxdi_combined_init_params)
+#define DXDI_IOC_COMBINED_PROC_DBLK _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_COMBINED_PROC_DBLK,\
+		struct dxdi_combined_proc_dblk_params)
+#define DXDI_IOC_COMBINED_PROC_FIN _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_COMBINED_PROC_FIN,\
+		struct dxdi_combined_proc_params)
+#define DXDI_IOC_COMBINED_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_COMBINED_PROC,\
+		struct dxdi_combined_proc_params)
+
+/* SeP RPC */
+#define DXDI_IOC_SEP_RPC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SEP_RPC, struct dxdi_sep_rpc_params)
+/* Memory registration */
+#define DXDI_IOC_REGISTER_MEM4DMA _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_REGISTER_MEM4DMA, \
+		struct dxdi_register_mem4dma_params)
+#define DXDI_IOC_ALLOC_MEM4DMA _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_ALLOC_MEM4DMA, \
+		struct dxdi_alloc_mem4dma_params)
+#define DXDI_IOC_FREE_MEM4DMA _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_FREE_MEM4DMA, \
+		struct dxdi_free_mem4dma_params)
+/* SeP Applets API */
+#define DXDI_IOC_SEPAPP_SESSION_OPEN _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SEPAPP_SESSION_OPEN, \
+		struct dxdi_sepapp_session_open_params)
+#define DXDI_IOC_SEPAPP_SESSION_CLOSE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SEPAPP_SESSION_CLOSE, \
+		struct dxdi_sepapp_session_close_params)
+#define DXDI_IOC_SEPAPP_COMMAND_INVOKE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SEPAPP_COMMAND_INVOKE, \
+		struct dxdi_sepapp_command_invoke_params)
+
+/*** ABI constants ***/
+/* Max. symmetric crypto key size (512b) */
+#define DXDI_SYM_KEY_SIZE_MAX 64	/*octets */
+/* Max. MAC key size (applicable to HMAC-SHA512) */
+#define DXDI_MAC_KEY_SIZE_MAX 128	/*octets */
+/* AES IV/Counter size (128b) */
+#define DXDI_AES_BLOCK_SIZE 16	/*octets */
+/* DES IV size (64b) */
+#define DXDI_DES_BLOCK_SIZE 8	/*octets */
+/* Max. Nonce size */
+#define DXDI_NONCE_SIZE_MAX 16	/*octets */
+/* Max. digest size */
+#define DXDI_DIGEST_SIZE_MAX 64	/*octets */
+/* Max. nodes */
+#define DXDI_COMBINED_NODES_MAX 4
+#define DXDI_AES_IV_SIZE DXDI_AES_BLOCK_SIZE
+
+/*** ABI data types ***/
+
+enum dxdi_cipher_direction {
+	DXDI_CDIR_ENC = 0,
+	DXDI_CDIR_DEC = 1,
+	DXDI_CDIR_MAX = DXDI_CDIR_DEC,
+	DXDI_CDIR_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_sym_cipher_type {
+	DXDI_SYMCIPHER_NONE = 0,
+	_DXDI_SYMCIPHER_AES_FIRST = 1,
+	DXDI_SYMCIPHER_AES_XXX = _DXDI_SYMCIPHER_AES_FIRST,
+	DXDI_SYMCIPHER_AES_ECB = _DXDI_SYMCIPHER_AES_FIRST + 1,
+	DXDI_SYMCIPHER_AES_CBC = _DXDI_SYMCIPHER_AES_FIRST + 2,
+	DXDI_SYMCIPHER_AES_CTR = _DXDI_SYMCIPHER_AES_FIRST + 3,
+	DXDI_SYMCIPHER_AES_XTS = _DXDI_SYMCIPHER_AES_FIRST + 4,
+	_DXDI_SYMCIPHER_AES_LAST = DXDI_SYMCIPHER_AES_XTS,
+	_DXDI_SYMCIPHER_DES_FIRST = 0x11,
+	DXDI_SYMCIPHER_DES_ECB = _DXDI_SYMCIPHER_DES_FIRST,
+	DXDI_SYMCIPHER_DES_CBC = _DXDI_SYMCIPHER_DES_FIRST + 1,
+	_DXDI_SYMCIPHER_DES_LAST = DXDI_SYMCIPHER_DES_CBC,
+	_DXDI_SYMCIPHER_C2_FIRST = 0x21,
+	DXDI_SYMCIPHER_C2_ECB = _DXDI_SYMCIPHER_C2_FIRST,
+	DXDI_SYMCIPHER_C2_CBC = _DXDI_SYMCIPHER_C2_FIRST + 1,
+	_DXDI_SYMCIPHER_C2_LAST = DXDI_SYMCIPHER_C2_CBC,
+	DXDI_SYMCIPHER_RC4 = 0x31,	/* Supported in message API only */
+	DXDI_SYMCIPHER_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_auth_enc_type {
+	DXDI_AUTHENC_NONE = 0,
+	DXDI_AUTHENC_AES_CCM = 1,
+	DXDI_AUTHENC_AES_GCM = 2,
+	DXDI_AUTHENC_MAX = DXDI_AUTHENC_AES_GCM,
+	DXDI_AUTHENC_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_mac_type {
+	DXDI_MAC_NONE = 0,
+	DXDI_MAC_HMAC = 1,
+	DXDI_MAC_AES_MAC = 2,
+	DXDI_MAC_AES_CMAC = 3,
+	DXDI_MAC_AES_XCBC_MAC = 4,
+	DXDI_MAC_MAX = DXDI_MAC_AES_XCBC_MAC,
+	DXDI_MAC_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_hash_type {
+	DXDI_HASH_NONE = 0,
+	DXDI_HASH_MD5 = 1,
+	DXDI_HASH_SHA1 = 2,
+	DXDI_HASH_SHA224 = 3,
+	DXDI_HASH_SHA256 = 4,
+	DXDI_HASH_SHA384 = 5,
+	DXDI_HASH_SHA512 = 6,
+	DXDI_HASH_MAX = DXDI_HASH_SHA512,
+	DXDI_HASH_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_data_block_type {
+	DXDI_DATA_TYPE_NULL = 0,
+	DXDI_DATA_TYPE_TEXT = 1,/* Plain/cipher text */
+	DXDI_DATA_TYPE_ADATA = 2,/* Additional/Associated data for AEAD */
+	DXDI_DATA_TYPE_MAX = DXDI_DATA_TYPE_ADATA,
+	DXDI_DATA_TYPE_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_input_engine_type {
+	DXDI_INPUT_NULL = 0,	/* no input */
+	DXDI_INPUT_ENGINE_1 = 1,
+	DXID_INPUT_ENGINE_2 = 2,
+	DXDI_INPUT_DIN = 15,	/* input from DIN */
+	DXDI_INPUT_ENGINE_RESERVE32B = INT32_MAX,
+};
+
+#pragma pack(push)
+#pragma pack(4) /* Force to 32 bit alignment */
+/* Properties of specific ciphers */
+/* (for use in alg_specific union of dxdi_cipher_props) */
+struct dxdi_des_cbc_props {
+	u8 iv[DXDI_DES_BLOCK_SIZE];
+};
+struct dxdi_aes_cbc_props {
+	u8 iv[DXDI_AES_BLOCK_SIZE];
+};
+struct dxdi_aes_ctr_props {
+	u8 cntr[DXDI_AES_BLOCK_SIZE];
+};
+struct dxdi_aes_xts_props {
+	u8 init_tweak[DXDI_AES_BLOCK_SIZE];
+	u32 data_unit_size;
+};
+struct dxdi_c2_cbc_props {
+	u32 reset_interval;
+};
+
+struct dxdi_sym_cipher_props {
+	enum dxdi_sym_cipher_type cipher_type;
+	enum dxdi_cipher_direction direction;
+	u8 key_size;	/* In octets */
+	u8 key[DXDI_SYM_KEY_SIZE_MAX];
+	union {			/* cipher specific properties */
+		struct dxdi_des_cbc_props des_cbc;
+		struct dxdi_aes_cbc_props aes_cbc;
+		struct dxdi_aes_ctr_props aes_ctr;
+		struct dxdi_aes_xts_props aes_xts;
+		struct dxdi_c2_cbc_props c2_cbc;
+		u32 __assure_32b_union_alignment;
+		/* Reserve space for future extension? */
+	} alg_specific;
+};
+
+struct dxdi_auth_enc_props {
+	enum dxdi_auth_enc_type ae_type;
+	enum dxdi_cipher_direction direction;
+	u32 adata_size;	/* In octets */
+	u32 text_size;	/* In octets */
+	u8 key_size;	/* In octets */
+	u8 nonce_size;	/* In octets */
+	u8 tag_size;	/* In octets */
+	u8 key[DXDI_SYM_KEY_SIZE_MAX];
+	u8 nonce[DXDI_NONCE_SIZE_MAX];
+};
+
+/* Properties specific for HMAC */
+/* (for use in properties union of dxdi_mac_props) */
+struct dxdi_hmac_props {
+	enum dxdi_hash_type hash_type;
+};
+
+struct dxdi_aes_mac_props {
+	u8 iv[DXDI_AES_BLOCK_SIZE];
+};
+
+struct dxdi_mac_props {
+	enum dxdi_mac_type mac_type;
+	u32 key_size;	/* In octets */
+	u8 key[DXDI_MAC_KEY_SIZE_MAX];
+	union {			/* Union of algorithm specific properties */
+		struct dxdi_hmac_props hmac;
+		struct dxdi_aes_mac_props aes_mac;
+		u32 __assure_32b_union_alignment;
+		/* Reserve space for future extension? */
+	} alg_specific;
+};
+
+/* Combined mode props */
+struct dxdi_combined_node_props {
+	u32 *context;
+	enum dxdi_input_engine_type eng_input;
+};
+
+struct dxdi_combined_props {
+	struct dxdi_combined_node_props node_props[DXDI_COMBINED_NODES_MAX];
+};
+
+/*** IOCTL commands parameters structures ***/
+
+struct dxdi_get_sym_cipher_ctx_size_params {
+	enum dxdi_sym_cipher_type sym_cipher_type;	/*[in] */
+	u32 ctx_size;	/*[out] */
+};
+
+struct dxdi_get_auth_enc_ctx_size_params {
+	enum dxdi_auth_enc_type ae_type;	/*[in] */
+	u32 ctx_size;	/*[out] */
+};
+
+struct dxdi_get_mac_ctx_size_params {
+	enum dxdi_mac_type mac_type;	/*[in] */
+	u32 ctx_size;	/*[out] */
+};
+
+struct dxdi_get_hash_ctx_size_params {
+	enum dxdi_hash_type hash_type;	/*[in] */
+	u32 ctx_size;	/*[out] */
+};
+
+/* Init params */
+struct dxdi_sym_cipher_init_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_sym_cipher_props props;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_auth_enc_init_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_auth_enc_props props;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_mac_init_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_mac_props props;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_hash_init_params {
+	u32 *context_buf;	/*[in] */
+	enum dxdi_hash_type hash_type;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+/* Processing params */
+struct dxdi_process_dblk_params {
+	u32 *context_buf;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u8 *data_out;	/*[in] */
+	enum dxdi_data_block_type data_block_type;	/*[in] */
+	u32 data_in_size;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_fin_process_params {
+	u32 *context_buf;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u8 *data_out;	/*[in] */
+	u32 data_in_size;	/*[in] (octets) */
+	u8 digest_or_mac[DXDI_DIGEST_SIZE_MAX];	/*[out] */
+	u8 digest_or_mac_size;	/*[out] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_sym_cipher_proc_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_sym_cipher_props props;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u8 *data_out;	/*[in] */
+	u32 data_in_size;	/*[in] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_auth_enc_proc_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_auth_enc_props props;	/*[in] */
+	u8 *adata;		/*[in] */
+	u8 *text_data;	/*[in] */
+	u8 *data_out;	/*[in] */
+	u8 tag[DXDI_DIGEST_SIZE_MAX];	/*[out] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_mac_proc_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_mac_props props;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u32 data_in_size;	/*[in] (octets) */
+	u8 mac[DXDI_DIGEST_SIZE_MAX];	/*[out] */
+	u8 mac_size;	/*[out] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_hash_proc_params {
+	u32 *context_buf;	/*[in] */
+	enum dxdi_hash_type hash_type;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u32 data_in_size;	/*[in] (octets) */
+	u8 digest[DXDI_DIGEST_SIZE_MAX];	/*[out] */
+	u8 digest_size;	/*[out] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_aes_iv_params {
+	u32 *context_buf;	/*[in] */
+	u8 iv_ptr[DXDI_AES_IV_SIZE];	/*[in]/[out] */
+};
+
+/* Combined params */
+struct dxdi_combined_init_params {
+	struct dxdi_combined_props props;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_combined_proc_dblk_params {
+	struct dxdi_combined_props props;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u8 *data_out;	/*[out] */
+	u32 data_in_size;	/*[in] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+/* the structure used in finalize and integrated processing */
+struct dxdi_combined_proc_params {
+	struct dxdi_combined_props props;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u8 *data_out;	/*[out] */
+	u32 data_in_size;	/*[in] (octets) */
+	u8 auth_data[DXDI_DIGEST_SIZE_MAX];	/*[out] */
+	u8 auth_data_size;	/*[out] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+/**************************************/
+/* Memory references and registration */
+/**************************************/
+
+enum dxdi_data_direction {
+	DXDI_DATA_NULL = 0,
+	DXDI_DATA_TO_DEVICE = 1,
+	DXDI_DATA_FROM_DEVICE = (1 << 1),
+	DXDI_DATA_BIDIR = (DXDI_DATA_TO_DEVICE | DXDI_DATA_FROM_DEVICE)
+};
+
+/* Reference to pre-registered memory */
+#define DXDI_MEMREF_ID_NULL -1
+
+struct dxdi_memref {
+	enum dxdi_data_direction dma_direction;
+	/* Memory reference ID - DXDI_MEMREF_ID_NULL if not registered */
+	int ref_id;
+	/* Start address of a non-registered memory or offset within a
+	 * registered memory */
+	u32 start_or_offset;
+	/* Size in bytes of non-registered buffer or size of chunk within a
+	 * registered buffer */
+	u32 size;
+};
+
+struct dxdi_register_mem4dma_params {
+	struct dxdi_memref memref;	/*[in] */
+	int memref_id;	/*[out] */
+};
+
+struct dxdi_alloc_mem4dma_params {
+	u32 size;	/*[in] */
+	int memref_id;	/*[out] */
+};
+
+struct dxdi_free_mem4dma_params {
+	int memref_id;	/*[in] */
+};
+
+/***********/
+/* SeP-RPC */
+/***********/
+struct dxdi_sep_rpc_params {
+	u16 agent_id;	/*[in] */
+	u16 func_id;	/*[in] */
+	struct dxdi_memref mem_refs[SEP_RPC_MAX_MEMREF_PER_FUNC]; /*[in] */
+	u32 rpc_params_size;	/*[in] */
+	struct seprpc_params *rpc_params;	/*[in] */
+	/* rpc_params to be copied into kernel DMA buffer */
+	enum seprpc_retcode error_info;	/*[out] */
+};
+
+/***************/
+/* SeP Applets */
+/***************/
+
+enum dxdi_sepapp_param_type {
+	DXDI_SEPAPP_PARAM_NULL = 0,
+	DXDI_SEPAPP_PARAM_VAL = 1,
+	DXDI_SEPAPP_PARAM_MEMREF = 2,
+	DXDI_SEPAPP_PARAM_RESERVE32B = 0x7FFFFFFF
+};
+
+struct dxdi_val_param {
+	enum dxdi_data_direction copy_dir;	/* Copy direction */
+	u32 data[2];
+};
+
+#define SEP_APP_PARAMS_MAX 4
+
+struct dxdi_sepapp_params {
+	enum dxdi_sepapp_param_type params_types[SEP_APP_PARAMS_MAX];
+	union {
+		struct dxdi_val_param val;
+		struct dxdi_memref memref;
+	} params[SEP_APP_PARAMS_MAX];
+};
+
+/* SeP modules ID for returnOrigin */
+enum dxdi_sep_module {
+	DXDI_SEP_MODULE_NULL = 0,
+	DXDI_SEP_MODULE_HOST_DRIVER = 1,
+	DXDI_SEP_MODULE_SW_QUEUE = 2,	/* SW-queue task: Inc. desc. parsers */
+	DXDI_SEP_MODULE_APP_MGR = 3,	/* Applet Manager */
+	DXDI_SEP_MODULE_APP = 4,	/* Applet */
+	DXDI_SEP_MODULE_RPC_AGENT = 5,	/* Down to RPC parsers */
+	DXDI_SEP_MODULE_SYM_CRYPTO = 6,	/* Symmetric crypto driver */
+	DXDI_SEP_MODULE_RESERVE32B = 0x7FFFFFFF
+};
+
+#define DXDI_SEPAPP_UUID_SIZE 16
+
+#define DXDI_SEPAPP_SESSION_INVALID (-1)
+
+struct dxdi_sepapp_session_open_params {
+	u8 app_uuid[DXDI_SEPAPP_UUID_SIZE];	/*[in] */
+	u32 auth_method;	/*[in] */
+	u32 auth_data[3];	/*[in] */
+	struct dxdi_sepapp_params app_auth_data;	/*[in/out] */
+	int session_id;	/*[out] */
+	enum dxdi_sep_module sep_ret_origin;	/*[out] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_sepapp_session_close_params {
+	int session_id;	/*[in] */
+};
+
+struct dxdi_sepapp_command_invoke_params {
+	int session_id;	/*[in] */
+	u32 command_id;	/*[in] */
+	struct dxdi_sepapp_params command_params;	/*[in/out] */
+	enum dxdi_sep_module sep_ret_origin;	/*[out] */
+	u32 error_info;	/*[out] */
+};
+
+#pragma pack(pop)
+
+#endif /*__SEP_DRIVER_ABI_H__*/
diff --git a/drivers/staging/sep54/dx_env.h b/drivers/staging/sep54/dx_env.h
new file mode 100644
index 0000000..299e3c7
--- /dev/null
+++ b/drivers/staging/sep54/dx_env.h
@@ -0,0 +1,230 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+#ifndef __DX_ENV_H__
+#define __DX_ENV_H__
+
+/*--------------------------------------*/
+/* BLOCK: ENV_REGS                      */
+/*--------------------------------------*/
+#define DX_ENV_CC_GPI_REG_OFFSET     0x18UL
+#define DX_ENV_CC_GPI_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_GPI_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CC_GPO_REG_OFFSET     0x1cUL
+#define DX_ENV_CC_GPO_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_GPO_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_PKA_DEBUG_MODE_REG_OFFSET     0x24UL
+#define DX_ENV_PKA_DEBUG_MODE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_PKA_DEBUG_MODE_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_SCAN_MODE_REG_OFFSET     0x30UL
+#define DX_ENV_SCAN_MODE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_SCAN_MODE_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_ALLOW_SCAN_REG_OFFSET     0x34UL
+#define DX_ENV_CC_ALLOW_SCAN_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_ALLOW_SCAN_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_HOST_CC_EXT_INT_REG_OFFSET     0x38UL
+#define DX_ENV_HOST_CC_EXT_INT_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_HOST_CC_EXT_INT_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_SW_MONITOR_ADDR_REG_OFFSET     0x60UL
+#define DX_ENV_CC_SW_MONITOR_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_SW_MONITOR_ADDR_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CC_HOST_INT_REG_OFFSET     0x0A0UL
+#define DX_ENV_CC_HOST_INT_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_HOST_INT_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_RST_N_REG_OFFSET     0x0A8UL
+#define DX_ENV_CC_RST_N_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_RST_N_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_RST_OVERRIDE_REG_OFFSET     0x0ACUL
+#define DX_ENV_RST_OVERRIDE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_RST_OVERRIDE_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_HOST_EXT_ACK_REG_OFFSET     0x0B0UL
+#define DX_ENV_CC_HOST_EXT_ACK_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_HOST_EXT_ACK_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_POR_N_ADDR_REG_OFFSET     0x0E0UL
+#define DX_ENV_CC_POR_N_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_POR_N_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_WARM_BOOT_REG_OFFSET     0x0E4UL
+#define DX_ENV_CC_WARM_BOOT_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_WARM_BOOT_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_COLD_BOOT_REG_OFFSET     0x0E8UL
+#define DX_ENV_CC_COLD_BOOT_CC_COLD_BOOT_FULL_BIT_SHIFT  0x0UL
+#define DX_ENV_CC_COLD_BOOT_CC_COLD_BOOT_FULL_BIT_SIZE   0x1UL
+#define DX_ENV_CC_COLD_BOOT_CC_COLD_BOOT_SEMI_BIT_SHIFT  0x1UL
+#define DX_ENV_CC_COLD_BOOT_CC_COLD_BOOT_SEMI_BIT_SIZE   0x1UL
+#define DX_ENV_CC_BM_LOWER_BOUND_ADDR_REG_OFFSET     0x0F0UL
+#define DX_ENV_CC_BM_LOWER_BOUND_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_BM_LOWER_BOUND_ADDR_VALUE_BIT_SIZE    0x14UL
+#define DX_ENV_CC_BM_UPPER_BOUND_ADDR_REG_OFFSET     0x0F4UL
+#define DX_ENV_CC_BM_UPPER_BOUND_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_BM_UPPER_BOUND_ADDR_VALUE_BIT_SIZE    0x14UL
+#define DX_ENV_CC_BM_ENB_ADDR_REG_OFFSET     0x0F8UL
+#define DX_ENV_CC_BM_ENB_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_BM_ENB_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_COLD_RST_REG_OFFSET     0x0FCUL
+#define DX_ENV_CC_COLD_RST_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_COLD_RST_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_BM_ERR_ACK_ADDR_REG_OFFSET     0x100UL
+#define DX_ENV_CC_BM_ERR_ACK_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_BM_ERR_ACK_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_BM_CC_ERR_ADDR_REG_OFFSET     0x104UL
+#define DX_ENV_BM_CC_ERR_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_BM_CC_ERR_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_DUMMY_ADDR_REG_OFFSET     0x108UL
+#define DX_ENV_DUMMY_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_DUMMY_ADDR_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CLK_STATUS_REG_OFFSET     0x10CUL
+#define DX_ENV_CLK_STATUS_AES_CLK_STATUS_BIT_SHIFT  0x0UL
+#define DX_ENV_CLK_STATUS_AES_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_DES_CLK_STATUS_BIT_SHIFT  0x1UL
+#define DX_ENV_CLK_STATUS_DES_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_HASH_CLK_STATUS_BIT_SHIFT  0x2UL
+#define DX_ENV_CLK_STATUS_HASH_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_PKA_CLK_STATUS_BIT_SHIFT  0x3UL
+#define DX_ENV_CLK_STATUS_PKA_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_RC4_CLK_STATUS_BIT_SHIFT  0x4UL
+#define DX_ENV_CLK_STATUS_RC4_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_AHB_CLK_STATUS_BIT_SHIFT  0x5UL
+#define DX_ENV_CLK_STATUS_AHB_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_RNG_CLK_STATUS_BIT_SHIFT  0x6UL
+#define DX_ENV_CLK_STATUS_RNG_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_C2_CLK_STATUS_BIT_SHIFT  0x7UL
+#define DX_ENV_CLK_STATUS_C2_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_SEP_CLK_STATUS_BIT_SHIFT  0x8UL
+#define DX_ENV_CLK_STATUS_SEP_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_COMM_CLK_STATUS_BIT_SHIFT  0x9UL
+#define DX_ENV_CLK_STATUS_COMM_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_COUNTER_CLR_REG_OFFSET     0x118UL
+#define DX_ENV_COUNTER_CLR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_COUNTER_CLR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_COUNTER_RD_REG_OFFSET     0x11CUL
+#define DX_ENV_COUNTER_RD_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_COUNTER_RD_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CC_SECOND_BM_LOWER_BOUND_ADDR_REG_OFFSET     0x120UL
+#define DX_ENV_CC_SECOND_BM_LOWER_BOUND_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_SECOND_BM_LOWER_BOUND_ADDR_VALUE_BIT_SIZE    0x14UL
+#define DX_ENV_CC_SECOND_BM_UPPER_BOUND_ADDR_REG_OFFSET     0x124UL
+#define DX_ENV_CC_SECOND_BM_UPPER_BOUND_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_SECOND_BM_UPPER_BOUND_ADDR_VALUE_BIT_SIZE    0x14UL
+#define DX_ENV_CC_SECOND_BM_ENB_ADDR_REG_OFFSET     0x128UL
+#define DX_ENV_CC_SECOND_BM_ENB_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_SECOND_BM_ENB_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_SECOND_BM_ERR_ACK_ADDR_REG_OFFSET     0x12CUL
+#define DX_ENV_CC_SECOND_BM_ERR_ACK_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_SECOND_BM_ERR_ACK_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_SECOND_BM_CC_ERR_ADDR_REG_OFFSET     0x130UL
+#define DX_ENV_SECOND_BM_CC_ERR_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_SECOND_BM_CC_ERR_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_RNG_DEBUG_ENABLE_REG_OFFSET     0x430UL
+#define DX_ENV_RNG_DEBUG_ENABLE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_RNG_DEBUG_ENABLE_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_WARM_BOOT_FINISHED_REG_OFFSET     0x434UL
+#define DX_ENV_CC_WARM_BOOT_FINISHED_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_WARM_BOOT_FINISHED_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_LCS_REG_OFFSET     0x43CUL
+#define DX_ENV_CC_LCS_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_LCS_VALUE_BIT_SIZE    0x8UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_REG_OFFSET     0x440UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_CM_BIT_SHIFT  0x0UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_CM_BIT_SIZE   0x1UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_DM_BIT_SHIFT  0x1UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_DM_BIT_SIZE   0x1UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_SECURE_BIT_SHIFT  0x2UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_SECURE_BIT_SIZE   0x1UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_RMA_BIT_SHIFT  0x3UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_RMA_BIT_SIZE   0x1UL
+#define DX_ENV_DCU_EN_REG_OFFSET     0x444UL
+#define DX_ENV_DCU_EN_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_DCU_EN_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CC_LCS_IS_VALID_REG_OFFSET     0x448UL
+#define DX_ENV_CC_LCS_IS_VALID_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_LCS_IS_VALID_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CRYPTOKEY_0_REG_OFFSET     0x450UL
+#define DX_ENV_CRYPTOKEY_0_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_0_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_1_REG_OFFSET     0x454UL
+#define DX_ENV_CRYPTOKEY_1_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_1_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_2_REG_OFFSET     0x458UL
+#define DX_ENV_CRYPTOKEY_2_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_2_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_3_REG_OFFSET     0x45CUL
+#define DX_ENV_CRYPTOKEY_3_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_3_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_4_REG_OFFSET     0x460UL
+#define DX_ENV_CRYPTOKEY_4_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_4_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_5_REG_OFFSET     0x464UL
+#define DX_ENV_CRYPTOKEY_5_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_5_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_6_REG_OFFSET     0x468UL
+#define DX_ENV_CRYPTOKEY_6_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_6_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_7_REG_OFFSET     0x46CUL
+#define DX_ENV_CRYPTOKEY_7_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_7_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_POWER_DOWN_REG_OFFSET     0x478UL
+#define DX_ENV_POWER_DOWN_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_POWER_DOWN_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_POWER_DOWN_EN_REG_OFFSET     0x47CUL
+#define DX_ENV_POWER_DOWN_EN_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_POWER_DOWN_EN_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_OTF_SECURE_BOOT_DONE_REG_OFFSET     0x480UL
+#define DX_ENV_OTF_SECURE_BOOT_DONE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_OTF_SECURE_BOOT_DONE_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_DCU_H_EN_REG_OFFSET     0x484UL
+#define DX_ENV_DCU_H_EN_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_DCU_H_EN_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_VERSION_REG_OFFSET     0x488UL
+#define DX_ENV_VERSION_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_VERSION_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_FUSE_AIB_1K_OFFSET_REG_OFFSET     0x48CUL
+#define DX_ENV_FUSE_AIB_1K_OFFSET_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_FUSE_AIB_1K_OFFSET_VALUE_BIT_SIZE    0x2UL
+/* --------------------------------------*/
+/* BLOCK: ENV_CC_MEMORIES                */
+/* --------------------------------------*/
+#define DX_ENV_FUSE_READY_REG_OFFSET     0x414UL
+#define DX_ENV_FUSE_READY_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_FUSE_READY_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_ROM_BANK_REG_OFFSET     0x420UL
+#define DX_ENV_ROM_BANK_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_ROM_BANK_VALUE_BIT_SIZE    0x2UL
+#define DX_ENV_PERF_RAM_MASTER_REG_OFFSET     0x500UL
+#define DX_ENV_PERF_RAM_MASTER_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_PERF_RAM_MASTER_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_PERF_RAM_ADDR_HIGH4_REG_OFFSET     0x504UL
+#define DX_ENV_PERF_RAM_ADDR_HIGH4_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_PERF_RAM_ADDR_HIGH4_VALUE_BIT_SIZE    0x2UL
+#define DX_ENV_FUSES_RAM_REG_OFFSET     0x800UL
+#define DX_ENV_FUSES_RAM_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_FUSES_RAM_VALUE_BIT_SIZE    0x20UL
+/* --------------------------------------*/
+/* BLOCK: ENV_PERF_RAM_BASE              */
+/* --------------------------------------*/
+#define DX_ENV_PERF_RAM_BASE_REG_OFFSET     0x0UL
+#define DX_ENV_PERF_RAM_BASE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_PERF_RAM_BASE_VALUE_BIT_SIZE    0x20UL
+
+#endif /*__DX_ENV_H__*/
diff --git a/drivers/staging/sep54/dx_host.h b/drivers/staging/sep54/dx_host.h
new file mode 100644
index 0000000..0ae53b2
--- /dev/null
+++ b/drivers/staging/sep54/dx_host.h
@@ -0,0 +1,398 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+#ifndef __DX_HOST_H__
+#define __DX_HOST_H__
+/* -------------------------------------- */
+/* BLOCK: HOST                            */
+/* -------------------------------------- */
+#define DX_HOST_IRR_REG_OFFSET     0xA00UL
+#define DX_HOST_IRR_SEP_WATCHDOG_BIT_SHIFT  0x0UL
+#define DX_HOST_IRR_SEP_WATCHDOG_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_DSCRPTR_DONE_LOW_INT_BIT_SHIFT  0x2UL
+#define DX_HOST_IRR_DSCRPTR_DONE_LOW_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_HOST_SRAM_VIO_BIT_SHIFT  0x3UL
+#define DX_HOST_IRR_HOST_SRAM_VIO_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SRAM_TO_DIN_INT_BIT_SHIFT  0x4UL
+#define DX_HOST_IRR_SRAM_TO_DIN_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_DOUT_TO_SRAM_INT_BIT_SHIFT  0x5UL
+#define DX_HOST_IRR_DOUT_TO_SRAM_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_MEM_TO_DIN_INT_BIT_SHIFT  0x6UL
+#define DX_HOST_IRR_MEM_TO_DIN_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_DOUT_TO_MEM_INT_BIT_SHIFT  0x7UL
+#define DX_HOST_IRR_DOUT_TO_MEM_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT  0x8UL
+#define DX_HOST_IRR_AXI_ERR_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_PKA_EXP_INT_BIT_SHIFT  0x9UL
+#define DX_HOST_IRR_PKA_EXP_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_RNG_INT_BIT_SHIFT  0xAUL
+#define DX_HOST_IRR_RNG_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR0_INT_BIT_SHIFT  0xBUL
+#define DX_HOST_IRR_SEP_HOST_GPR0_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR1_INT_BIT_SHIFT  0xCUL
+#define DX_HOST_IRR_SEP_HOST_GPR1_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR2_INT_BIT_SHIFT  0xDUL
+#define DX_HOST_IRR_SEP_HOST_GPR2_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR3_INT_BIT_SHIFT  0xEUL
+#define DX_HOST_IRR_SEP_HOST_GPR3_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR4_INT_BIT_SHIFT  0xFUL
+#define DX_HOST_IRR_SEP_HOST_GPR4_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR5_INT_BIT_SHIFT  0x10UL
+#define DX_HOST_IRR_SEP_HOST_GPR5_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR6_INT_BIT_SHIFT  0x11UL
+#define DX_HOST_IRR_SEP_HOST_GPR6_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR7_INT_BIT_SHIFT  0x12UL
+#define DX_HOST_IRR_SEP_HOST_GPR7_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT  0x13UL
+#define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_REG_OFFSET     0xA04UL
+#define DX_HOST_IMR_SEP_WATCHDOG_MASK_BIT_SHIFT  0x0UL
+#define DX_HOST_IMR_SEP_WATCHDOG_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_NOT_USED_MASK_BIT_SHIFT  0x1UL
+#define DX_HOST_IMR_NOT_USED_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT  0x2UL
+#define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_HOST_SRAM_VIO_MASK_BIT_SHIFT  0x3UL
+#define DX_HOST_IMR_HOST_SRAM_VIO_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SRAM_TO_DIN_MASK_BIT_SHIFT  0x4UL
+#define DX_HOST_IMR_SRAM_TO_DIN_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_DOUT_TO_SRAM_MASK_BIT_SHIFT  0x5UL
+#define DX_HOST_IMR_DOUT_TO_SRAM_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_MEM_TO_DIN_MASK_BIT_SHIFT  0x6UL
+#define DX_HOST_IMR_MEM_TO_DIN_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_DOUT_TO_MEM_MASK_BIT_SHIFT  0x7UL
+#define DX_HOST_IMR_DOUT_TO_MEM_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT  0x8UL
+#define DX_HOST_IMR_AXI_ERR_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_PKA_EXP_MASK_BIT_SHIFT  0x9UL
+#define DX_HOST_IMR_PKA_EXP_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_RNG_INT_MASK_BIT_SHIFT  0xAUL
+#define DX_HOST_IMR_RNG_INT_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR0_MASK_BIT_SHIFT  0xBUL
+#define DX_HOST_IMR_SEP_HOST_GPR0_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR1_MASK_BIT_SHIFT  0xCUL
+#define DX_HOST_IMR_SEP_HOST_GPR1_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR2_MASK_BIT_SHIFT  0xDUL
+#define DX_HOST_IMR_SEP_HOST_GPR2_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR3_MASK_BIT_SHIFT  0xEUL
+#define DX_HOST_IMR_SEP_HOST_GPR3_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR4_MASK_BIT_SHIFT  0xFUL
+#define DX_HOST_IMR_SEP_HOST_GPR4_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR5_MASK_BIT_SHIFT  0x10UL
+#define DX_HOST_IMR_SEP_HOST_GPR5_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR6_MASK_BIT_SHIFT  0x11UL
+#define DX_HOST_IMR_SEP_HOST_GPR6_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR7_MASK_BIT_SHIFT  0x12UL
+#define DX_HOST_IMR_SEP_HOST_GPR7_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT  0x13UL
+#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK1_BIT_SHIFT  0x14UL
+#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK1_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_CNTX_SWITCH_CNTR_EXPIRED_BIT_SHIFT  0x15UL
+#define DX_HOST_IMR_CNTX_SWITCH_CNTR_EXPIRED_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_REG_OFFSET     0xA08UL
+#define DX_HOST_ICR_SEP_WATCHDOG_CLEAR_BIT_SHIFT  0x0UL
+#define DX_HOST_ICR_SEP_WATCHDOG_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT  0x2UL
+#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_HOST_SRAM_VIO_CLEAR_BIT_SHIFT  0x3UL
+#define DX_HOST_ICR_HOST_SRAM_VIO_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SRAM_TO_DIN_CLEAR_BIT_SHIFT  0x4UL
+#define DX_HOST_ICR_SRAM_TO_DIN_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_DOUT_TO_SRAM_CLEAR_BIT_SHIFT  0x5UL
+#define DX_HOST_ICR_DOUT_TO_SRAM_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_MEM_TO_DIN_CLEAR_BIT_SHIFT  0x6UL
+#define DX_HOST_ICR_MEM_TO_DIN_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_DOUT_TO_MEM_CLEAR_BIT_SHIFT  0x7UL
+#define DX_HOST_ICR_DOUT_TO_MEM_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT  0x8UL
+#define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_PKA_EXP_CLEAR_BIT_SHIFT  0x9UL
+#define DX_HOST_ICR_PKA_EXP_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_RNG_INT_CLEAR_BIT_SHIFT  0xAUL
+#define DX_HOST_ICR_RNG_INT_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR0_CLEAR_BIT_SHIFT  0xBUL
+#define DX_HOST_ICR_SEP_HOST_GPR0_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR1_CLEAR_BIT_SHIFT  0xCUL
+#define DX_HOST_ICR_SEP_HOST_GPR1_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR2_CLEAR_BIT_SHIFT  0xDUL
+#define DX_HOST_ICR_SEP_HOST_GPR2_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR3_CLEAR_BIT_SHIFT  0xEUL
+#define DX_HOST_ICR_SEP_HOST_GPR3_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR4_CLEAR_BIT_SHIFT  0xFUL
+#define DX_HOST_ICR_SEP_HOST_GPR4_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR5_CLEAR_BIT_SHIFT  0x10UL
+#define DX_HOST_ICR_SEP_HOST_GPR5_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR6_CLEAR_BIT_SHIFT  0x11UL
+#define DX_HOST_ICR_SEP_HOST_GPR6_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR7_CLEAR_BIT_SHIFT  0x12UL
+#define DX_HOST_ICR_SEP_HOST_GPR7_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SHIFT  0x13UL
+#define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_SEP_SRAM_THRESHOLD_REG_OFFSET     0xA10UL
+#define DX_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SIZE    0x10UL
+#define DX_HOST_SEP_BUSY_REG_OFFSET     0xA14UL
+#define DX_HOST_SEP_BUSY_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_BUSY_VALUE_BIT_SIZE    0x1UL
+#define DX_HOST_SEP_SW_MONITOR_REG_OFFSET     0xA20UL
+#define DX_HOST_SEP_SW_MONITOR_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_SW_MONITOR_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_CC_SW_RST_REG_OFFSET     0xA40UL
+#define DX_HOST_CC_SW_RST_CC_SW_RST_REQ_BIT_SHIFT  0x0UL
+#define DX_HOST_CC_SW_RST_CC_SW_RST_REQ_BIT_SIZE   0x1UL
+#define DX_HOST_CC_SW_RST_CC_SW_RST_FORCE_BIT_SHIFT  0x1UL
+#define DX_HOST_CC_SW_RST_CC_SW_RST_FORCE_BIT_SIZE   0x1UL
+#define DX_HOST_CC_SW_RST_AXIS_SYSREQ_BIT_SHIFT  0x2UL
+#define DX_HOST_CC_SW_RST_AXIS_SYSREQ_BIT_SIZE   0x1UL
+#define DX_HOST_CC_SW_RST_AXIM_SYSREQ_BIT_SHIFT  0x3UL
+#define DX_HOST_CC_SW_RST_AXIM_SYSREQ_BIT_SIZE   0x1UL
+#define DX_HOST_SEP_HOST_GPR0_REG_OFFSET     0xA80UL
+#define DX_HOST_SEP_HOST_GPR0_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR0_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR1_REG_OFFSET     0xA88UL
+#define DX_HOST_SEP_HOST_GPR1_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR1_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR2_REG_OFFSET     0xA90UL
+#define DX_HOST_SEP_HOST_GPR2_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR2_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR3_REG_OFFSET     0xA98UL
+#define DX_HOST_SEP_HOST_GPR3_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR3_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR4_REG_OFFSET     0xAA0UL
+#define DX_HOST_SEP_HOST_GPR4_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR4_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR5_REG_OFFSET     0xAA8UL
+#define DX_HOST_SEP_HOST_GPR5_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR5_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR6_REG_OFFSET     0xAB0UL
+#define DX_HOST_SEP_HOST_GPR6_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR6_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR7_REG_OFFSET     0xAB8UL
+#define DX_HOST_SEP_HOST_GPR7_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR7_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR0_REG_OFFSET     0xA84UL
+#define DX_HOST_HOST_SEP_GPR0_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR0_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR1_REG_OFFSET     0xA8CUL
+#define DX_HOST_HOST_SEP_GPR1_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR1_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR2_REG_OFFSET     0xA94UL
+#define DX_HOST_HOST_SEP_GPR2_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR2_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR3_REG_OFFSET     0xA9CUL
+#define DX_HOST_HOST_SEP_GPR3_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR3_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR4_REG_OFFSET     0xAA4UL
+#define DX_HOST_HOST_SEP_GPR4_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR4_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR5_REG_OFFSET     0xAACUL
+#define DX_HOST_HOST_SEP_GPR5_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR5_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR6_REG_OFFSET     0xAB4UL
+#define DX_HOST_HOST_SEP_GPR6_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR6_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR7_REG_OFFSET     0xABCUL
+#define DX_HOST_HOST_SEP_GPR7_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR7_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_ENDIAN_REG_OFFSET     0xAD0UL
+#define DX_HOST_HOST_ENDIAN_DIN_ICACHE_END_BIT_SHIFT  0x0UL
+#define DX_HOST_HOST_ENDIAN_DIN_ICACHE_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DIN_DCAHE_END_BIT_SHIFT  0x1UL
+#define DX_HOST_HOST_ENDIAN_DIN_DCAHE_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DIN_DD_END_BIT_SHIFT  0x2UL
+#define DX_HOST_HOST_ENDIAN_DIN_DD_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DIN_DMA_END_BIT_SHIFT  0x3UL
+#define DX_HOST_HOST_ENDIAN_DIN_DMA_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DOUT_ICACHE_END_BIT_SHIFT  0x4UL
+#define DX_HOST_HOST_ENDIAN_DOUT_ICACHE_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DCACHE_END_BIT_SHIFT  0x5UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DCACHE_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DD_END_BIT_SHIFT  0x6UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DD_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DMA_END_BIT_SHIFT  0x7UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DMA_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_INTENAL_WORD_END_BIT_SHIFT  0x8UL
+#define DX_HOST_HOST_ENDIAN_INTENAL_WORD_END_BIT_SIZE   0x8UL
+#define DX_SRAM_DATA_REG_OFFSET     0xF00UL
+#define DX_SRAM_DATA_VALUE_BIT_SHIFT   0x0UL
+#define DX_SRAM_DATA_VALUE_BIT_SIZE    0x20UL
+#define DX_SRAM_ADDR_REG_OFFSET     0xF04UL
+#define DX_SRAM_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_SRAM_ADDR_VALUE_BIT_SIZE    0xFUL
+#define DX_SRAM_DATA_READY_REG_OFFSET     0xF08UL
+#define DX_SRAM_DATA_READY_VALUE_BIT_SHIFT   0x0UL
+#define DX_SRAM_DATA_READY_VALUE_BIT_SIZE    0x1UL
+#define DX_HOST_RKEK1_0_REG_OFFSET     0xA00UL
+#define DX_HOST_RKEK1_0_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_0_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_1_REG_OFFSET     0xA04UL
+#define DX_HOST_RKEK1_1_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_1_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_2_REG_OFFSET     0xA08UL
+#define DX_HOST_RKEK1_2_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_2_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_3_REG_OFFSET     0xA0CUL
+#define DX_HOST_RKEK1_3_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_3_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_4_REG_OFFSET     0xA10UL
+#define DX_HOST_RKEK1_4_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_4_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_5_REG_OFFSET     0xA14UL
+#define DX_HOST_RKEK1_5_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_5_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_6_REG_OFFSET     0xA18UL
+#define DX_HOST_RKEK1_6_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_6_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_7_REG_OFFSET     0xA1CUL
+#define DX_HOST_RKEK1_7_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_7_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_ECC_REG_OFFSET     0xA20UL
+#define DX_HOST_RKEK1_ECC_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_ECC_VALUE_BIT_SIZE    0x20UL
+#define DX_LCS_REG_REG_OFFSET     0xA24UL
+#define DX_LCS_REG_VALUE_BIT_SHIFT   0x0UL
+#define DX_LCS_REG_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_0_REG_OFFSET     0xA2CUL
+#define DX_HOST_RKEK2_0_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_0_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_1_REG_OFFSET     0xA30UL
+#define DX_HOST_RKEK2_1_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_1_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_2_REG_OFFSET     0xA34UL
+#define DX_HOST_RKEK2_2_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_2_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_3_REG_OFFSET     0xA38UL
+#define DX_HOST_RKEK2_3_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_3_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_4_REG_OFFSET     0xA3CUL
+#define DX_HOST_RKEK2_4_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_4_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_5_REG_OFFSET     0xA40UL
+#define DX_HOST_RKEK2_5_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_5_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_6_REG_OFFSET     0xA44UL
+#define DX_HOST_RKEK2_6_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_6_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_7_REG_OFFSET     0xA48UL
+#define DX_HOST_RKEK2_7_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_7_VALUE_BIT_SIZE    0x20UL
+#define DX_NVM_CC_BOOT_REG_OFFSET     0xAA4UL
+#define DX_NVM_CC_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT  0x1UL
+#define DX_NVM_CC_BOOT_LARGE_RKEK_LOCAL_BIT_SIZE   0x1UL
+#define DX_NVM_CC_BOOT_HASH_IN_FUSES_LOCAL_BIT_SHIFT  0x2UL
+#define DX_NVM_CC_BOOT_HASH_IN_FUSES_LOCAL_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CNTR0_REG_OFFSET     0xA50UL
+#define DX_PAU_HOST_CNTR0_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_CNTR0_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_CNTR1_REG_OFFSET     0xA54UL
+#define DX_PAU_HOST_CNTR1_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_CNTR1_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_CNTR2_REG_OFFSET     0xA58UL
+#define DX_PAU_HOST_CNTR2_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_CNTR2_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_CNTR3_REG_OFFSET     0xA5CUL
+#define DX_PAU_HOST_CNTR3_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_CNTR3_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_CNTR4_REG_OFFSET     0xA60UL
+#define DX_PAU_HOST_CNTR4_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_CNTR4_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_XOR_REG_OFFSET     0xA64UL
+#define DX_PAU_HOST_XOR_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_XOR_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_MASK0_REG_OFFSET     0xA68UL
+#define DX_PAU_HOST_MASK0_PAU_HOST_MASK0_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_MASK0_PAU_HOST_MASK0_BIT_SIZE   0xDUL
+#define DX_PAU_HOST_MASK0_UN_USED_BIT_SHIFT  0xDUL
+#define DX_PAU_HOST_MASK0_UN_USED_BIT_SIZE   0x13UL
+#define DX_PAU_HOST_MASK1_REG_OFFSET     0xA6CUL
+#define DX_PAU_HOST_MASK1_PAU_HOST_MASK1_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_MASK1_PAU_HOST_MASK1_BIT_SIZE   0x19UL
+#define DX_PAU_HOST_MASK1_UN_USED_BIT_SHIFT  0x19UL
+#define DX_PAU_HOST_MASK1_UN_USED_BIT_SIZE   0x7UL
+#define DX_PAU_HOST_MASK2_REG_OFFSET     0xA70UL
+#define DX_PAU_HOST_MASK2_PAU_HOST_MASK2_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_MASK2_PAU_HOST_MASK2_BIT_SIZE   0x19UL
+#define DX_PAU_HOST_MASK2_UN_USED_BIT_SHIFT  0x19UL
+#define DX_PAU_HOST_MASK2_UN_USED_BIT_SIZE   0x7UL
+#define DX_PAU_HOST_MASK3_REG_OFFSET     0xA74UL
+#define DX_PAU_HOST_MASK3_PAU_HOST_MASK3_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_MASK3_PAU_HOST_MASK3_BIT_SIZE   0x1EUL
+#define DX_PAU_HOST_MASK3_UN_USED_BIT_SHIFT  0x1EUL
+#define DX_PAU_HOST_MASK3_UN_USED_BIT_SIZE   0x2UL
+#define DX_PAU_HOST_MASK4_REG_OFFSET     0xA78UL
+#define DX_PAU_HOST_MASK4_PAU_HOST_MASK4_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_MASK4_PAU_HOST_MASK4_BIT_SIZE   0x1EUL
+#define DX_PAU_HOST_MASK4_UN_USED_BIT_SHIFT  0x1EUL
+#define DX_PAU_HOST_MASK4_UN_USED_BIT_SIZE   0x2UL
+#define DX_PAU_HOST_CONFIG_REG_OFFSET     0xA7CUL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND0_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND0_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND1_BIT_SHIFT  0x1UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND1_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND2_BIT_SHIFT  0x2UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND2_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND3_BIT_SHIFT  0x3UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND3_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND4_BIT_SHIFT  0x4UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND4_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL0_BIT_SHIFT  0x5UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL0_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL1_BIT_SHIFT  0x6UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL1_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL2_BIT_SHIFT  0x7UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL2_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL3_BIT_SHIFT  0x8UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL3_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL4_BIT_SHIFT  0x9UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL4_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING_BIT_SHIFT  0xAUL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_AND_COUNETER_EVENT_BIT_SHIFT  0xBUL
+#define DX_PAU_HOST_CONFIG_AND_COUNETER_EVENT_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING0_BIT_SHIFT  0xCUL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING0_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING1_BIT_SHIFT  0xDUL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING1_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING2_BIT_SHIFT  0xEUL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING2_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING3_BIT_SHIFT  0xFUL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING3_BIT_SIZE   0x1UL
+#define DX_HOST_REGION_MASK_REG_OFFSET     0xAC4UL
+#define DX_HOST_REGION_MASK_HOST_REGION_SECURED_MASK_BIT_SHIFT  0x0UL
+#define DX_HOST_REGION_MASK_HOST_REGION_SECURED_MASK_BIT_SIZE   0x10UL
+#define DX_HOST_REGION_MASK_HOST_REGION_NON_SECURED_MASK_BIT_SHIFT  0x10UL
+#define DX_HOST_REGION_MASK_HOST_REGION_NON_SECURED_MASK_BIT_SIZE   0x10UL
+#define DX_HOST_REGION_GPRS_MASK_REG_OFFSET     0xAC0UL
+#define DX_HOST_REGION_GPRS_MASK_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_REGION_GPRS_MASK_VALUE_BIT_SIZE    0x8UL
+#define DX_HOST_CC_SW_RESET_ALLOWED_REG_OFFSET     0xA48UL
+#define DX_HOST_CC_SW_RESET_ALLOWED_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_CC_SW_RESET_ALLOWED_VALUE_BIT_SIZE    0x1UL
+#define DX_HOST_CC_SIGNATURE_REG_OFFSET     0xAC8UL
+#define DX_HOST_CC_SIGNATURE_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_CC_SIGNATURE_VALUE_BIT_SIZE    0x20UL
+
+#endif /*__DX_HOST_H__*/
diff --git a/drivers/staging/sep54/dx_init_cc_abi.h b/drivers/staging/sep54/dx_init_cc_abi.h
new file mode 100644
index 0000000..ba52531
--- /dev/null
+++ b/drivers/staging/sep54/dx_init_cc_abi.h
@@ -0,0 +1,207 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/***************************************************************************
+ *  This file provides the CC-init ABI (SeP-Host binary interface)         *
+ ***************************************************************************/
+
+#ifndef __DX_INIT_CC_ABI_H__
+#define __DX_INIT_CC_ABI_H__
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+/* For SeP code environment */
+#include <stdint.h>
+#endif
+#include "dx_bitops.h"
+
+#ifndef INT16_MAX
+#define INT16_MAX		(32767)
+#endif
+#ifndef INT32_MAX
+#define INT32_MAX		(2147483647)
+#endif
+
+/***********************************/
+/* SeP to host communication       */
+/***********************************/
+/* GPRs for CC-init state/status from SeP */
+#define DX_SEP_STATE_GPR_IDX               7	/* SeP state */
+#define DX_SEP_STATUS_GPR_IDX              6	/* SeP status */
+/* GPRs used for passing driver init. parameters */
+/* (Valid while in DX_SEP_STATE_DONE_COLD_BOOT) */
+#define DX_SEP_INIT_SEP_PROPS_GPR_IDX      3	/* SEP properties passed to the
+						 * driver (see fields below) */
+#define DX_SEP_INIT_FW_PROPS_GPR_IDX      2	/* FW properties passed to the
+						 * driver (see fields below) */
+#define DX_SEP_INIT_FW_VER_GPR_IDX         1	/* SeP FW images version */
+#define DX_SEP_INIT_ROM_VER_GPR_IDX        0	/* SeP ROM image version */
+
+/* Debugging "stdout" tunnel via GPR5 - see sep_driver_cc54.c for details */
+#define DX_SEP_HOST_PRINTF_GPR_IDX         5
+
+/* Fields in DX_SEP_INIT_FW_PROPS_GPR_IDX */
+/* MLLI table size in bytes */
+#define DX_SEP_INIT_MLLI_TBL_SIZE_BIT_OFFSET	0
+#define DX_SEP_INIT_MLLI_TBL_SIZE_BIT_SIZE	12
+/* Maximum number of work queues supported */
+#define DX_SEP_INIT_NUM_OF_QUEUES_BIT_OFFSET	12
+#define DX_SEP_INIT_NUM_OF_QUEUES_BIT_SIZE	4
+/* Number of availabel context cache entries */
+#define DX_SEP_INIT_CACHE_CTX_SIZE_BIT_OFFSET	16
+#define DX_SEP_INIT_CACHE_CTX_SIZE_BIT_SIZE     8
+
+/* Fields in DX_SEP_INIT_SEP_PROPS_GPR_IDX */
+/* SEP frequency */
+#define DX_SEP_INIT_SEP_FREQUENCY_BIT_OFFSET	0
+#define DX_SEP_INIT_SEP_FREQUENCY_BIT_SIZE	12
+
+/***********************************/
+/* Host to SeP communication       */
+/***********************************/
+/* GPRs for requests from host to SeP */
+#define DX_HOST_REQ_GPR_IDX 7	/* Host-to-SeP requests */
+#define DX_HOST_REQ_PARAM_GPR_IDX 6	/* Host request parameters */
+/* The parameters in GPR6 must be ready before writing the request to GPR7*/
+
+/* MAGIC value of TLV "FIRST" parameter */
+#define DX_FW_INIT_PARAM_FIRST_MAGIC	0x3243F6A8
+
+/* Type-Length word manipulation macros */
+/* Note that all TLV communication assumes little-endian words */
+/* (i.e., responsibility of host driver to convert to LE before passing) */
+#define DX_TL_WORD(type, length)  ((length) << 16 | (type))
+#define DX_TL_GET_TYPE(tl_word)   ((tl_word) & 0xFFFF)
+#define DX_TL_GET_LENGTH(tl_word) ((tl_word) >> 16)
+
+/* Macros for Assembly code */
+#define ASM_DX_SEP_STATE_ILLEGAL_INST   0x100
+#define ASM_DX_SEP_STATE_STACK_OVERFLOW 0x200
+/* SeP states over (SeP-to-host) GPR7*/
+enum dx_sep_state {
+	DX_SEP_STATE_OFF = 0x0,
+	DX_SEP_STATE_FATAL_ERROR = 0x1,
+	DX_SEP_STATE_START_SECURE_BOOT = 0x2,
+	DX_SEP_STATE_PROC_COLD_BOOT = 0x4,
+	DX_SEP_STATE_PROC_WARM_BOOT = 0x8,
+	DX_SEP_STATE_DONE_COLD_BOOT = 0x10,
+	DX_SEP_STATE_DONE_WARM_BOOT = 0x20,
+	/*DX_SEP_STATE_BM_ERR           = 0x40, */
+	/*DX_SEP_STATE_SECOND_BM_ERR    = 0x80, */
+	DX_SEP_STATE_ILLEGAL_INST = ASM_DX_SEP_STATE_ILLEGAL_INST,
+	DX_SEP_STATE_STACK_OVERFLOW = ASM_DX_SEP_STATE_STACK_OVERFLOW,
+	/* Response to DX_HOST_REQ_FW_INIT: */
+	DX_SEP_STATE_RELOAD_DRIVER	= 0x300,
+	DX_SEP_STATE_DONE_FW_INIT	= 0x400,
+	DX_SEP_STATE_PROC_SLEEP_MODE	= 0x800,
+	DX_SEP_STATE_DONE_SLEEP_MODE	= 0x1000,
+	DX_SEP_STATE_FW_ABORT		= 0xBAD0BAD0,
+	DX_SEP_STATE_ROM_ABORT		= 0xBAD1BAD1,
+	DX_SEP_STATE_RESERVE32B		= INT32_MAX
+};
+
+/* Host requests over (host-to-SeP) GPR7 */
+enum dx_host_req {
+	DX_HOST_REQ_RELEASE_CRYPTO_ENGINES        = 0x0,
+	DX_HOST_REQ_ACQUIRE_CRYPTO_ENGINES        = 0x2,
+	DX_HOST_REQ_CC_INIT                       = 0x1,
+	DX_HOST_REQ_FW_INIT                       = 0x4,
+	DX_HOST_REQ_UPDATE_SWQ_ADDR               = 0x6,
+	DX_HOST_REQ_CHANGE_TO_RELOAD_DRIVER_STATE = 0x7,
+	DX_HOST_REQ_SEP_SLEEP                     = 0x8,
+	DX_HOST_REQ_RESERVE32B                    = INT32_MAX
+};
+
+/* Init. TLV parameters from host to SeP */
+/* Some parameters are used by CC-init flow with DX_HOST_REQ_CC_INIT          *
+ * and the others by the host driver initialization with DX_HOST_REQ_FW_INIT  */
+enum dx_fw_init_tlv_params {
+	DX_FW_INIT_PARAM_NULL = 0,
+	/* Common parameters */
+	DX_FW_INIT_PARAM_FIRST = 1,	/* Param.=FIRST_MAGIC */
+	DX_FW_INIT_PARAM_LAST = 2,	/* Param.=checksum 32b */
+
+	/* CC-init. parameters */
+	DX_FW_INIT_PARAM_DISABLE_MODULES = 3,
+	DX_FW_INIT_PARAM_HOST_AXI_CONFIG = 4,
+	DX_FW_INIT_PARAM_HOST_DEF_APPLET_CONFIG = 5,
+	DX_FW_INIT_PARAM_SEP_FREQ = 6,
+
+	/* Host driver (post-cold-boot) parameters */
+	/* Number of descriptor queues: Length = 1 */
+	DX_FW_INIT_PARAM_NUM_OF_DESC_QS = 0x101,
+	/* The following parameters provide an array with value per queue: */
+	/* Length = num. of queues */
+	DX_FW_INIT_PARAM_DESC_QS_ADDR = 0x102,	/* Queue base addr. */
+	DX_FW_INIT_PARAM_DESC_QS_SIZE = 0x103,	/* Queue size(byte) */
+	/* FW context cache partition (num. of entries) per queue */
+	DX_FW_INIT_PARAM_CTX_CACHE_PART = 0x104,
+	/* sep request module parameters ( msg and response buffers and size */
+	DX_FW_INIT_PARAM_SEP_REQUEST_PARAMS = 0x105,
+	DX_FW_INIT_PARAM_RESERVE16B = INT16_MAX
+};
+
+/* FW-init. error code encoding - GPR6 contents in DX_SEP_STATE_DONE_FW_INIT  */
+/* | 0xE | Param. Type | Error code |  */
+/* |--4--|-----16------|----12------|  */
+#define DX_FW_INIT_ERR_CODE_SIZE 12
+#define DX_FW_INIT_ERR_PARAM_TYPE_SHIFT DX_FW_INIT_ERR_CODE_SIZE
+#define DX_FW_INIT_ERR_PARAM_TYPE_SIZE 16
+#define DX_FW_INIT_ERR_TYPE_SHIFT \
+	(DX_FW_INIT_ERR_PARAM_TYPE_SHIFT + DX_FW_INIT_ERR_PARAM_TYPE_SIZE)
+#define DX_FW_INIT_ERR_TYPE_SIZE 4
+#define DX_FW_INIT_ERR_TYPE_VAL 0xE
+
+#define DX_SEP_REQUEST_PARAM_MSG_LEN		3
+
+/* Build error word to put in status GPR6 */
+#define DX_FW_INIT_ERR_WORD(err_code, param_type)                   \
+	((DX_FW_INIT_ERR_TYPE_VAL << DX_FW_INIT_ERR_TYPE_SHIFT) |   \
+	 ((param_type & BITMASK(DX_FW_INIT_ERR_PARAM_TYPE_SIZE)) << \
+	  DX_FW_INIT_ERR_PARAM_TYPE_SHIFT) |                        \
+	 (err_code))
+/* Parse status of DX_SEP_STATE_DONE_FW_INIT*/
+#define DX_FW_INIT_IS_SUCCESS(status_word) ((err_word) == 0)
+#define DX_FW_INIT_GET_ERR_CODE(status_word) \
+	 ((status_word) & BITMASK(DX_FW_INIT_ERR_CODE_SIZE))
+
+/* FW INIT Error codes */
+/* extract from the status word - GPR6 - using DX_FW_INIT_GET_ERR_CODE() */
+enum dx_fw_init_error_code {
+	DX_FW_INIT_ERR_INVALID_TYPE = 0x001,
+	DX_FW_INIT_ERR_INVALID_LENGTH = 0x002,
+	DX_FW_INIT_ERR_INVALID_VALUE = 0x003,
+	DX_FW_INIT_ERR_PARAM_MISSING = 0x004,
+	DX_FW_INIT_ERR_NOT_SUPPORTED = 0x005,
+	DX_FW_INIT_ERR_RNG_FAILURE = 0x00F,
+	DX_FW_INIT_ERR_MALLOC_FAILURE = 0x0FC,
+	DX_FW_INIT_ERR_INIT_FAILURE = 0x0FD,
+	DX_FW_INIT_ERR_TIMEOUT = 0x0FE,
+	DX_FW_INIT_ERR_GENERAL_FAILURE = 0x0FF
+};
+
+#endif /*__DX_INIT_CC_ABI_H__*/
diff --git a/drivers/staging/sep54/dx_init_cc_defs.h b/drivers/staging/sep54/dx_init_cc_defs.h
new file mode 100644
index 0000000..4524c35
--- /dev/null
+++ b/drivers/staging/sep54/dx_init_cc_defs.h
@@ -0,0 +1,163 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef __DX_INIT_CC_DEFS__H__
+#define __DX_INIT_CC_DEFS__H__
+
+/** @file dx_init_cc_defs.h
+*  \brief definitions for the CC54 initialization API
+*
+*  \version
+*  \author avis
+*/
+
+/* message token to sep */
+/* CC_INIT definitions */
+#define DX_CC_INIT_HEAD_MSG_TOKEN		0X544B2FBAUL
+
+/*The below enumerator includes the offsets inside the CC_Init message*/
+/*The last enum is the length of the message */
+enum dx_cc_init_msg_offset {
+	DX_CC_INIT_MSG_TOKEN_OFFSET,
+	DX_CC_INIT_MSG_LENGTH_OFFSET,
+	DX_CC_INIT_MSG_OP_CODE_OFFSET,
+	DX_CC_INIT_MSG_FLAGS_OFFSET,
+	DX_CC_INIT_MSG_RESIDENT_IMAGE_OFFSET,
+	DX_CC_INIT_MSG_I_CACHE_IMAGE_OFFSET,
+	DX_CC_INIT_MSG_I_CACHE_DEST_OFFSET,
+	DX_CC_INIT_MSG_I_CACHE_SIZE_OFFSET,
+	DX_CC_INIT_MSG_D_CACHE_ADDR_OFFSET,
+	DX_CC_INIT_MSG_D_CACHE_SIZE_OFFSET,
+	DX_CC_INIT_MSG_CC_INIT_EXT_ADDR_OFFSET,
+	DX_CC_INIT_MSG_USER_CONFIG_OFFSET,
+	DX_CC_INIT_MSG_VRL_ADDR_OFFSET,
+	DX_CC_INIT_MSG_MAGIC_NUM_OFFSET,
+	DX_CC_INIT_MSG_KEY_INDEX_OFFSET,
+	DX_CC_INIT_MSG_KEY_HASH_0_OFFSET,
+	DX_CC_INIT_MSG_KEY_HASH_1_OFFSET,
+	DX_CC_INIT_MSG_KEY_HASH_2_OFFSET,
+	DX_CC_INIT_MSG_KEY_HASH_3_OFFSET,
+	DX_CC_INIT_MSG_CHECK_SUM_OFFSET,
+	DX_CC_INIT_MSG_LENGTH
+};
+
+/* Set this value if key used in the VRL is to be verified against KEY_HASH
+   fields in the CC_INIT message */
+#define DX_CC_INIT_MSG_VRL_KEY_INDEX_INVALID 0xFFFFFFFF
+
+enum dx_cc_init_msg_icache_size {
+	DX_CC_INIT_MSG_ICACHE_SCR_DISABLE_SIZE,
+	DX_CC_INIT_MSG_ICACHE_SCR_256K_SIZE,
+	DX_CC_INIT_MSG_ICACHE_SCR_1M_SIZE,
+	DX_CC_INIT_MSG_ICACHE_SCR_2M_SIZE,
+	DX_CC_INIT_MSG_ICACHE_SCR_4M_SIZE,
+	DX_CC_INIT_MSG_ICACHE_SCR_INVALID_SIZE
+};
+/* Icache sizes enum to log2 -
+ * Map enum of dx_cc_init_msg_icache_size to log2(size)
+ * (-1) for invalid value. */
+#define DX_CC_ICACHE_SIZE_ENUM2LOG { -1, 18, 20, 21, 22, -1 }
+
+#define DX_CC_INIT_D_CACHE_MIN_SIZE_LOG2 17	/* 128KB */
+#define DX_CC_INIT_D_CACHE_MIN_SIZE (1 << DX_CC_INIT_D_CACHE_MIN_SIZE_LOG2)
+#define DX_CC_INIT_D_CACHE_MAX_SIZE_LOG2 27	/* 128MB */
+#define DX_CC_INIT_D_CACHE_MAX_SIZE (1 << DX_CC_INIT_D_CACHE_MAX_SIZE_LOG2)
+
+/* Bit flags for the CC_Init flags word*/
+/* The CC_Init resident address address is valid (it might be passed via VRL) */
+#define DX_CC_INIT_FLAGS_RESIDENT_ADDR_FLAG		0x00000001
+/* The CC_Init I$ address address is valid (it might be passed via VRL) */
+#define DX_CC_INIT_FLAGS_I_CACHE_ADDR_FLAG		0x00000002
+/* The CC_Init D$ address address is valid (First CC_Init does not config. D$)*/
+#define DX_CC_INIT_FLAGS_D_CACHE_EXIST_FLAG		0x00000004
+/* The CC_Init extension address is valid and should be used */
+#define DX_CC_INIT_FLAGS_INIT_EXT_FLAG			0x00000008
+/* The I$ (and applets) should be encrypted */
+#define DX_CC_INIT_FLAGS_CACHE_ENC_FLAG			0x00000010
+/* The I$ (and applets) should be scrambled */
+#define DX_CC_INIT_FLAGS_CACHE_SCRAMBLE_FLAG		0x00000020
+/* The I$ (and applets) should be copied to new address (Icache address) */
+#define DX_CC_INIT_FLAGS_CACHE_COPY_FLAG		0x00000040
+/* use the magic number in the CC_Init to verify the  VRL */
+#define DX_CC_INIT_FLAGS_MAGIC_NUMBER_FLAG		0x00000080
+
+#define DX_CC_INIT_FLAGS_CACHE_COPY_MASK_FLAG \
+	(DX_CC_INIT_FLAGS_CACHE_ENC_FLAG | \
+	DX_CC_INIT_FLAGS_CACHE_SCRAMBLE_FLAG | \
+	DX_CC_INIT_FLAGS_CACHE_COPY_FLAG)
+
+/*-------------------------------
+  STRUCTURES
+---------------------------------*/
+struct dx_cc_def_applet_msg {
+	u32 cc_flags;
+	u32 icache_image_addr;
+	u32 vrl_addr;
+	u32 magic_num;
+	u32 ver_key_index;
+	u32 hashed_key_val[4];
+};
+
+/**
+ * struct dx_cc_init_msg - used for passing the parameters to the CC_Init API.
+ * The structure is converted in to the CC_Init message
+ * @cc_flags:		Bits flags for different fields in the message
+ * @res_image_addr:	resident image address in the HOST memory
+ * @Icache_image_addr:	I$ image address in the HOST memeory
+ * @Icache_addr:	I$ memory allocation in case the I$ is not placed
+ *			(scramble or encrypted I$)
+ * @Icache_size:	Icache size ( The totoal I$ for Dx image and all
+ *			applets). The size is limited to: 256K,1M,2M and 4M.
+ * @Dcache_addr:	D$ memory allocation in the HOST memory
+ * @Dcache_size:	D$ memory allocation size
+ * @init_ext_addr:	Address of the cc_Init extension message in the HOST
+ * @vrl_addr:		The address of teh VRL in the HOST memory
+ * @magic_num:		Requested VRL magic number
+ * @ver_key_index:	The index of the verification key
+ * @output_buff_addr:	buffer to the HOST memeory, where the secure boot
+ *			process might write the results of the secure boot
+ * @output_buff_size:	size of the out put buffer ( in bytes)
+ * @Hashed_key_val:	the trunked hash value of teh verification key in case
+ *			the OTP keys are not in use
+*/
+
+struct dx_cc_init_msg {
+	u32 cc_flags;
+	u32 res_image_addr;
+	u32 icache_image_addr;
+	u32 icache_addr;
+	enum dx_cc_init_msg_icache_size icache_size;
+	u32 dcache_addr;
+	u32 dcache_size;
+	u32 init_ext_addr;
+	u32 user_config;
+	u32 vrl_addr;
+	u32 magic_num;
+	u32 ver_key_index;
+	u32 hashed_key_val[4];
+};
+
+#endif /*__DX_INIT_CC_DEFS__H__*/
diff --git a/drivers/staging/sep54/dx_reg_base_host.h b/drivers/staging/sep54/dx_reg_base_host.h
new file mode 100644
index 0000000..afe6ea6
--- /dev/null
+++ b/drivers/staging/sep54/dx_reg_base_host.h
@@ -0,0 +1,37 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+#ifndef __DX_REG_BASE_HOST_H__
+#define __DX_REG_BASE_HOST_H__
+
+#define DX_BASE_CC 0x83F00000
+#define DX_BASE_ENV_REGS 0x83F88000
+#define DX_BASE_ENV_CC_MEMORIES 0x83F88000
+#define DX_BASE_ENV_PERF_RAM 0x83F89000
+
+#define DX_BASE_CRY_KERNEL     0x0UL
+#define DX_BASE_ROM     0x83F80000
+
+#endif /*__DX_REG_BASE_HOST_H__*/
diff --git a/drivers/staging/sep54/dx_reg_common.h b/drivers/staging/sep54/dx_reg_common.h
new file mode 100644
index 0000000..dd43153
--- /dev/null
+++ b/drivers/staging/sep54/dx_reg_common.h
@@ -0,0 +1,39 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+#ifndef __DX_REG_COMMON_H__
+#define __DX_REG_COMMON_H__
+
+/* \file dx_reg_common.h
+   This file includes additions to the HW-specific information that is missing
+   from the header files provided by the HW team */
+
+#define DX_ICACHE_SIZE 0x08000000UL
+#define DX_DCACHE_SIZE 0x08000000UL
+#define DX_DEV_SIGNATURE 0xDCC54000UL
+
+#define DX_DD_REGION_MASK_SIZE 25/* Number of bits in direct-access region */
+
+#endif /*__DX_REG_COMMON_H__*/
diff --git a/drivers/staging/sep54/dx_sep_kapi.h b/drivers/staging/sep54/dx_sep_kapi.h
new file mode 100644
index 0000000..cb71f6a
--- /dev/null
+++ b/drivers/staging/sep54/dx_sep_kapi.h
@@ -0,0 +1,39 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/**
+ * Kernel services API of CC driver:
+ * 1. Host-to-SeP Applet request API.
+ * 2. SeP Request agent API.
+ * 3. Power state control (sleep, warm-boot)
+ */
+#ifndef __DX_SEP_KAPI_H__
+#define __DX_SEP_KAPI_H__
+
+#include "dx_sepapp_kapi.h"
+#include "dx_sep_req_kapi.h"
+#include "dx_sep_power_kapi.h"
+
+#endif /*__DX_SEP_KAPI_H__*/
diff --git a/drivers/staging/sep54/dx_sep_power_kapi.h b/drivers/staging/sep54/dx_sep_power_kapi.h
new file mode 100644
index 0000000..43cb59b
--- /dev/null
+++ b/drivers/staging/sep54/dx_sep_power_kapi.h
@@ -0,0 +1,78 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/**
+ * Kernel services API of power state control (sleep, warm-boot)
+ */
+#ifndef __DX_SEP_POWER_KAPI_H__
+#define __DX_SEP_POWER_KAPI_H__
+
+#include <linux/types.h>
+
+/******************************************/
+/* Power state control (sleep, warm-boot) */
+/******************************************/
+
+/**
+ * Power states of SeP
+ */
+enum dx_sep_power_state {
+	DX_SEP_POWER_INVALID = -1,	/* SeP is in unexpected (error) state */
+	DX_SEP_POWER_OFF = 0,	/* SeP is assumed to be off (unreachable) */
+	DX_SEP_POWER_BOOT,	/* SeP is in (warm) boot process */
+	DX_SEP_POWER_IDLE,	/* SeP is running but no request is pending */
+	DX_SEP_POWER_ACTIVE,	/* SeP is running and processing */
+	DX_SEP_POWER_HIBERNATED	/* SeP is in hibernated (sleep) state */
+};
+
+/* Prototype for callback on sep state change */
+typedef void (*dx_sep_state_change_callback_t) (unsigned long cookie);
+
+/**
+ * dx_sep_power_state_set() - Change power state of SeP (CC)
+ *
+ * @req_state:	The requested power state (_HIBERNATED or _ACTIVE)
+ *
+ * Request changing of power state to given state and block until transition
+ * is completed.
+ * Requesting _HIBERNATED is allowed only from _ACTIVE state.
+ * Requesting _ACTIVE is allowed only after CC was powered back on (warm boot).
+ * Return codes:
+ * 0 -	Power state change completed.
+ * -EINVAL -	This request is not allowed in current SeP state or req_state
+ *		value is invalid.
+ * -EBUSY -	State change request ignored because SeP is busy (primarily,
+ *		when requesting hibernation while SeP is processing something).
+ * -ETIME -	Request timed out (primarily, when asking for _ACTIVE)
+ */
+int dx_sep_power_state_set(enum dx_sep_power_state req_state);
+
+/**
+ * dx_sep_power_state_get() - Get the current power state of SeP (CC)
+ * @state_jiffies_p:	The "jiffies" value at which given state was detected.
+ */
+enum dx_sep_power_state dx_sep_power_state_get(unsigned long *state_jiffies_p);
+
+#endif /*__DX_SEP_POWER_KAPI_H__*/
diff --git a/drivers/staging/sep54/dx_sep_req_kapi.h b/drivers/staging/sep54/dx_sep_req_kapi.h
new file mode 100644
index 0000000..b4ff56f
--- /dev/null
+++ b/drivers/staging/sep54/dx_sep_req_kapi.h
@@ -0,0 +1,78 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/**
+ * Kernel services API of SeP Request agent API.
+ */
+#ifndef __DX_SEP_REQ_KAPI_H__
+#define __DX_SEP_REQ_KAPI_H__
+
+#include <linux/types.h>
+
+/*******************************/
+/* SeP-to-Host request agents  */
+/*******************************/
+
+/**
+ * dx_sep_req_register_agent() - Register an agent
+ * @agent_id: The agent ID
+ * @max_buf_size: A pointer to the max buffer size
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_register_agent(u8 agent_id, u32 *max_buf_size);
+
+/**
+ * dx_sep_req_unregister_agent() - Unregister an agent
+ * @agent_id: The agent ID
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_unregister_agent(u8 agent_id);
+
+/**
+ * dx_sep_req_wait_for_request() - Wait from an incoming sep request
+ * @agent_id: The agent ID
+ * @sep_req_buf_p: Pointer to the incoming request buffer
+ * @req_buf_size: Pointer to the incoming request size
+ * @timeout: Time to wait for an incoming request in jiffies
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_wait_for_request(u8 agent_id, u8 *sep_req_buf_p,
+				u32 *req_buf_size, u32 timeout);
+
+/**
+ * dx_sep_req_send_response() - Send a response to the sep
+ * @agent_id: The agent ID
+ * @host_resp_buf_p: Pointer to the outgoing response buffer
+ * @resp_buf_size: Pointer to the outgoing response size
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_send_response(u8 agent_id, u8 *host_resp_buf_p,
+			     u32 resp_buf_size);
+
+#endif /*__DX_SEP_REQ_KAPI_H__*/
diff --git a/drivers/staging/sep54/dx_sepapp_kapi.h b/drivers/staging/sep54/dx_sepapp_kapi.h
new file mode 100644
index 0000000..c85f6d3
--- /dev/null
+++ b/drivers/staging/sep54/dx_sepapp_kapi.h
@@ -0,0 +1,135 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/**
+ * Kernel API for Host-to-SeP Applet request API.
+ */
+#ifndef __DX_SEPAPP_KAPI_H__
+#define __DX_SEPAPP_KAPI_H__
+
+#include <linux/types.h>
+#include "dx_driver_abi.h"
+#include "crypto_api.h"
+
+/**
+ * struct dxdi_kmemref - Kernel memory reference
+ * @dma_direction:	Planned DMA direction
+ * @sgl:	Scatter/Gather list of given buffer (NULL if using buf_p)
+ * @nbytes:	Size in bytes of data referenced by "sgl"
+ */
+struct dxdi_kmemref {
+	enum dxdi_data_direction dma_direction;
+	struct scatterlist *sgl;
+	unsigned long nbytes;	/* data size */
+};
+
+/**
+ * dxdi_sepapp_kparams - Kernel parameters description for dx_sepapp_* func.
+ * @params_types:	The type of each paramter in params[] array
+ * @params:		The given parameters description
+ */
+struct dxdi_sepapp_kparams {
+	enum dxdi_sepapp_param_type params_types[SEP_APP_PARAMS_MAX];
+	union {
+		struct dxdi_val_param val;	/* DXDI_SEPAPP_PARAM_VAL */
+		struct dxdi_kmemref kmemref;	/* DXDI_SEPAPP_PARAM_MEMREF */
+	} params[SEP_APP_PARAMS_MAX];
+};
+
+#define DX_SEPAPP_CLIENT_CTX_NULL NULL
+
+/*******************************/
+/* Host-to-SeP Applet requests */
+/*******************************/
+
+/**
+ * dx_sepapp_context_alloc() - Allocate client context for SeP applets ops.
+ * Returns DX_SEPAPP_CLIENT_CTX_NULL on failure.
+ */
+void *dx_sepapp_context_alloc(void);
+
+/**
+ * dx_sepapp_context_free() - Free client context.
+ *
+ * @ctx: Client context to free.
+ */
+void dx_sepapp_context_free(void *ctx);
+
+/**
+ * dx_sepapp_session_open() - Open a session with a SeP applet
+ *
+ * @ctx:		SeP client context
+ * @sepapp_uuid:	Target applet UUID
+ * @auth_method:	Session connection authentication method
+ *			(Currently only 0/Public is supported)
+ * @auth_data:		Pointer to authentication data - Should be NULL
+ * @open_params:	Parameters for session opening
+ * @session_id:		Returned session ID (on success)
+ * @ret_origin:		Return code origin
+ *
+ * If ret_origin is not DXDI_SEP_MODULE_APP (i.e., above the applet), it must
+ * be 0 on success. For DXDI_SEP_MODULE_APP it is an applet-specific return code
+ */
+int dx_sepapp_session_open(void *ctx,
+			   u8 *sepapp_uuid,
+			   u32 auth_method,
+			   void *auth_data,
+			   struct dxdi_sepapp_kparams *open_params,
+			   int *session_id, enum dxdi_sep_module *ret_origin);
+
+/**
+ * dx_sepapp_session_close() - Close a session with an applet
+ *
+ * @ctx:	SeP client context
+ * @session_id: Session ID as returned from dx_sepapp_open_session()
+ *
+ * Return code would be 0 on success
+ */
+int dx_sepapp_session_close(void *ctx, int session_id);
+
+/**
+ * dx_sepapp_command_invoke() - Initiate command in the applet associated with
+ *				given session ID
+ *
+ * @ctx:	SeP client context
+ * @session_id:	The target session ID
+ * @command_id:	The ID of the command to initiate (applet-specific)
+ * @command_params:	The command parameters
+ * @ret_origin:	The origin of the return code
+ */
+int dx_sepapp_command_invoke(void *ctx,
+			     int session_id,
+			     u32 command_id,
+			     struct dxdi_sepapp_kparams *command_params,
+			     enum dxdi_sep_module *ret_origin);
+
+int async_sepapp_command_invoke(void *ctx,
+			     int session_id,
+			     u32 command_id,
+			     struct dxdi_sepapp_kparams *command_params,
+			     enum dxdi_sep_module *ret_origin,
+			     struct async_req_ctx *areq_ctx);
+
+#endif /*__DX_SEPAPP_KAPI_H__*/
diff --git a/drivers/staging/sep54/lli_mgr.c b/drivers/staging/sep54/lli_mgr.c
new file mode 100644
index 0000000..414157e
--- /dev/null
+++ b/drivers/staging/sep54/lli_mgr.c
@@ -0,0 +1,2110 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+ /*!
+  * \file lli_mgr.c
+  * \brief LLI logic: Bulding MLLI tables from user virtual memory buffers
+  */
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_LLI_MGR
+
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/pagemap.h>
+#include "sep_ctx.h"
+#include "dx_driver.h"
+#include "sep_log.h"
+#include "lli_mgr.h"
+
+/* Limitation of DLLI buffer size due to size of "SIZE" field */
+/* Set this to 0 in order to disable DLLI support */
+/*#define DLLI_BUF_LIMIT \
+	((1UL << SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_BIT_SIZE) - 1)*/
+/* For now limit to the size planned for DLLI_AUX_BUF_LIMIT because we
+   must always have both din and dout DLLI or MLLI. When mixing would be
+   supported we can increase this limit to the one in comment, above */
+#define DLLI_BUF_LIMIT 2048
+
+/* The following size defines up to which size we would optimize for DLLI
+   buffer descriptor even for non-contiguous buffers. If not physically
+   contiguous (or not cache aligned on platforms with no cache coherency) an
+   auxilliary buffer would be allocated and the data copied to/from it. */
+#define DLLI_AUX_BUF_LIMIT 2048
+/* Note: The value of DLLI_AUX_BUF_LIMIT is tuned based on emipirical tests
+   on our system so the memcpy overhead does not "consume" the performance
+   benefit of using DLLI */
+
+#if DLLI_AUX_BUF_LIMIT > DLLI_BUF_LIMIT
+#error DLLI_AUX_BUF_LIMIT is too large. May be at most 64KB-1
+#endif
+
+#if (SEP_SUPPORT_SHA > 256)
+#define MAX_CRYPTO_BLOCK_LOG2 7
+#else
+#define MAX_CRYPTO_BLOCK_LOG2 6
+#endif
+#define MAX_CRYPTO_BLOCK_SIZE (1 << MAX_CRYPTO_BLOCK_LOG2)
+#define MAX_CRYPTO_BLOCK_MASK (MAX_CRYPTO_BLOCK_SIZE - 1)
+
+#define SEP_LLI_ENTRY_BYTE_SIZE (SEP_LLI_ENTRY_WORD_SIZE * sizeof(u32))
+
+/* Index of first LLI which encodes data buffer
+   (after "next VA" and "next DMA") */
+#define FIRST_DATA_LLI_INDEX 2
+
+/* Overhead for tables linked list:
+ * one entry for FW linked list + one for host/kernel linked list
+ * (required due to difference between dma_addr and kernel virt. addr.) +
+ * last entry is reserved to the protected entry with the stop bit */
+#define SEP_MLLI_LINK_TO_NEXT_OVERHEAD 3
+
+/* macro to set/get link-to-next virtual address for next MLLI
+ * This macro relies on availability of an extra LLI entry per MLLI table.
+ * It uses the space of the first entry so the "SeP" table start after it.
+ */
+#define SEP_MLLI_SET_NEXT_VA(cur_mlli_p, next_mlli_p) \
+do { \
+	u32 __phys_ptr_ = virt_to_phys(next_mlli_p) & (DMA_BIT_MASK(32));\
+	SEP_LLI_SET(cur_mlli_p , ADDR, __phys_ptr_);\
+} while (0)
+#define SEP_MLLI_SET_NEXT_VA_NULL(cur_mlli_start) \
+		SEP_MLLI_SET_NEXT_VA(mlli_table_p, 0)
+#define SEP_MLLI_GET_NEXT_VA(cur_mlli_start) \
+	SEP_LLI_GET((cur_mlli_start), ADDR) == 0 ? 0 :  ((u32 *)phys_to_virt(SEP_LLI_GET((cur_mlli_start), ADDR)));\
+
+
+#define CACHE_LINE_MASK (L1_CACHE_BYTES - 1)
+
+/* Number of data bytes to gather at last LLI of for end of Din buffer */
+#define DIN_LAST_LLI_GATHER_SIZE 32
+
+/* Select the client buffer amount to copy into aux. buffers (head/tail) */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+#if (DIN_LAST_LLI_GATHER_SIZE > L1_CACHE_BYTES)
+#define EDGE_BUFS_POOL_ITEM_SIZE DIN_LAST_LLI_GATHER_SIZE
+#else
+#define EDGE_BUFS_POOL_ITEM_SIZE L1_CACHE_BYTES
+#endif
+#else	/* Coherent cache - only tail buffer required per CC requirements */
+#define EDGE_BUFS_POOL_ITEM_SIZE DIN_LAST_LLI_GATHER_SIZE
+#endif
+
+/* Similar to for_each_sg but no need for nents - runs until NULL */
+#define for_each_valid_sg(sglist, cur_sge)	\
+	for (cur_sge = (sglist); cur_sge != NULL; cur_sge = sg_next(cur_sge))
+
+/**
+ * struct llimgr_obj - The LLI manager object (exposed as llimgr_h)
+ * @dev:	The associated device context (for DMA operations)
+ * @mlli_cache:	DMA coherent memory pool for the MLLI tables
+ * @edge_bufs_pool:	Pool for auxilliary buffers used instead of user buffer
+ *			start/end. Used for last LLI data mirroring to fulfil
+ *			requirement of 32B on last LLI.
+ *			In case of a non-coherent cache and a data buffer
+ *			which starts/ends unaligned to cache line we should
+ *			allocate external buffer to be used as the first/last
+ *			LLI entry instead of the "tail". This is required to
+ *			avoid cache incoherency due to access by other process
+ *			entities to the cache lines where the data is. This is
+ *			required only for the output buffer where the same cache
+ *			line is accessed by the host processor while SeP should
+ *			DMA output data into it.
+ * @dlli_bufs_pool:	Pool for client buffers up to DLLI_AUX_BUF_LIMIT which
+ *			are not phys. contig. and copied into it in order to be
+ *			physically contiguous, thus suitable for DLLI access.
+ * @max_lli_num:	Maximum LLI entries number in MLLI table.
+ * @max_data_per_mlli:	Maximum bytes of data mapped by each MLLI table.
+ *
+ */
+struct llimgr_obj {
+	struct device *dev;
+	struct dma_pool *mlli_cache;
+	struct dma_pool *edge_bufs_pool;
+	struct dma_pool *dlli_bufs_pool;
+	unsigned int max_lli_num;
+	unsigned long max_data_per_mlli;
+};
+
+/* Iterator state for building the MLLI tables list */
+struct mlli_tables_list_iterator {
+	u32 *prev_mlli_table_p;
+	u32 *cur_mlli_table_p;
+	dma_addr_t cur_mlli_dma_addr;
+	unsigned int next_lli_idx; /* Data LLI (After FIRST_DATA_LLI_INDEX) */
+	unsigned long cur_mlli_accum_data; /* Accumulated in current MLLI */
+};
+
+static void cleanup_mlli_tables_list(struct llimgr_obj *llimgr_p,
+				     struct mlli_tables_list *mlli_tables_ptr,
+				     int is_data_dirty);
+static inline unsigned int get_sgl_nents(struct scatterlist *sgl);
+
+/**
+ * llimgr_create() - Create LLI-manager object
+ * @dev:	 Device context
+ * @mlli_table_size:	 The maximum size of an MLLI table in bytes
+ *
+ * Returns llimgr_h Created object handle or LLIMGR_NULL_HANDLE if failed
+ */
+void *llimgr_create(struct device *dev, unsigned long mlli_table_size)
+{
+	struct llimgr_obj *new_llimgr_p;
+	unsigned int num_of_full_page_llis;
+
+	new_llimgr_p = kmalloc(sizeof(struct llimgr_obj), GFP_KERNEL);
+	if (new_llimgr_p == NULL)
+		return LLIMGR_NULL_HANDLE;
+	new_llimgr_p->dev = dev;
+	/* create dma "coherent" memory pool for MLLI tables */
+	new_llimgr_p->mlli_cache = dma_pool_create("dx_sep_mlli_tables", dev,
+						   mlli_table_size,
+						   L1_CACHE_BYTES, 0);
+	if (new_llimgr_p->mlli_cache == NULL) {
+		pr_err("Failed creating DMA pool for MLLI tables\n");
+		goto create_failed_mlli_pool;
+	}
+
+	/* Create pool for holding buffer "tails" which share cache lines with
+	 * other data buffers */
+	new_llimgr_p->edge_bufs_pool = dma_pool_create("dx_sep_edge_bufs", dev,
+						       EDGE_BUFS_POOL_ITEM_SIZE,
+						       EDGE_BUFS_POOL_ITEM_SIZE,
+						       0);
+	if (new_llimgr_p->edge_bufs_pool == NULL) {
+		pr_err("Failed creating DMA pool for edge buffers\n");
+		goto create_failed_edge_bufs_pool;
+	}
+
+	new_llimgr_p->max_lli_num =
+	    ((mlli_table_size / SEP_LLI_ENTRY_BYTE_SIZE) -
+	     SEP_MLLI_LINK_TO_NEXT_OVERHEAD);
+	num_of_full_page_llis = new_llimgr_p->max_lli_num;
+	num_of_full_page_llis -= 2;/*First and last entries are partial pages */
+	num_of_full_page_llis -= 1;	/* One less for end aux. buffer */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	num_of_full_page_llis -= 1;	/* One less for start aux. buffer */
+#endif
+	/* Always a multiple of PAGE_SIZE - assures that it is also a
+	 * crypto block multiple. */
+	new_llimgr_p->max_data_per_mlli = num_of_full_page_llis * PAGE_SIZE;
+
+#if DLLI_AUX_BUF_LIMIT > 0
+	new_llimgr_p->dlli_bufs_pool = dma_pool_create("dx_sep_dlli_bufs", dev,
+						       DLLI_AUX_BUF_LIMIT,
+						       DLLI_AUX_BUF_LIMIT, 0);
+	if (new_llimgr_p->dlli_bufs_pool == NULL) {
+		pr_err("Failed creating DMA pool for DLLI buffers\n");
+		goto create_failed_dlli_bufs_pool;
+	}
+#endif
+
+	return new_llimgr_p;
+
+ create_failed_dlli_bufs_pool:
+	dma_pool_destroy(new_llimgr_p->edge_bufs_pool);
+ create_failed_edge_bufs_pool:
+	dma_pool_destroy(new_llimgr_p->mlli_cache);
+ create_failed_mlli_pool:
+	kfree(new_llimgr_p);
+	return LLIMGR_NULL_HANDLE;
+}
+
+/**
+ * llimgr_destroy() - Destroy (free resources of) given LLI-manager object
+ * @llimgr:	 LLI-manager object handle
+ *
+ */
+void llimgr_destroy(void *llimgr)
+{
+	struct llimgr_obj *llimgr_p = (struct llimgr_obj *)llimgr;
+
+#if DLLI_AUX_BUF_LIMIT > 0
+	dma_pool_destroy(llimgr_p->dlli_bufs_pool);
+#endif
+	dma_pool_destroy(llimgr_p->edge_bufs_pool);
+	dma_pool_destroy(llimgr_p->mlli_cache);
+	kfree(llimgr_p);
+}
+
+/*****************************************/
+/* Auxilliary buffers handling functions */
+/*****************************************/
+
+/**
+ * calc_aux_bufs_size() - Calculate required aux. buffers for given user buffer
+ * @buf_start:	A pointer value at buffer start (used to calculate alignment)
+ * @buf_size:	User buffer size in bytes
+ * @data_direction:	DMA direction
+ * @last_blk_with_prelast:	Last crypto block must be in the same LLI and
+ *				pre-last block.
+ * @crypto_block_size:	The Crypto-block size in bytes
+ * @start_aux_buf_size_p:	Returned required aux. buffer size at start
+ * @end_aux_buf_size_p:	Returned required aux. buffers size at end
+ *
+ * Returns void
+ */
+static void calc_aux_bufs_size(const unsigned long buf_start,
+			       unsigned long buf_size,
+			       enum dma_data_direction data_direction,
+			       unsigned long *start_aux_buf_size_p,
+			       unsigned long *end_aux_buf_size_p)
+{
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	const bool is_dout = ((data_direction == DMA_BIDIRECTIONAL) ||
+			      (data_direction == DMA_FROM_DEVICE));
+#endif				/*CONFIG_NOT_COHERENT_CACHE */
+
+	/* Calculate required aux. buffers: cache line tails + last w/prelast */
+	*start_aux_buf_size_p = 0;
+	*end_aux_buf_size_p = 0;
+
+	if (buf_size == 0)
+		return;
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	/* start of buffer unaligned to cache line... */
+	if ((is_dout) && (buf_start & CACHE_LINE_MASK)) {
+		*start_aux_buf_size_p =	/* Remainder to end of cache line */
+		    L1_CACHE_BYTES - (buf_start & CACHE_LINE_MASK);
+		/* But not more than buffer size */
+		if (*start_aux_buf_size_p > buf_size)
+			*start_aux_buf_size_p = buf_size;
+	}
+#endif				/*CONFIG_NOT_COHERENT_CACHE */
+
+	/* last 32 B must always be on last LLI entry */
+	/* Put 32 B or the whole buffer if smaller than 32 B */
+	*end_aux_buf_size_p = buf_size >= DIN_LAST_LLI_GATHER_SIZE ?
+	    DIN_LAST_LLI_GATHER_SIZE : buf_size;
+	if ((*end_aux_buf_size_p + *start_aux_buf_size_p) > buf_size) {
+		/* End aux. buffer covers part of
+		 * start aux. buffer - leave only remainder in
+		 * start aux. buffer. */
+		*start_aux_buf_size_p = buf_size - *end_aux_buf_size_p;
+	}
+#ifdef DEBUG
+	if (((*end_aux_buf_size_p + *start_aux_buf_size_p) > buf_size) ||
+	    (*start_aux_buf_size_p > EDGE_BUFS_POOL_ITEM_SIZE) ||
+	    (*end_aux_buf_size_p > EDGE_BUFS_POOL_ITEM_SIZE)) {
+		pr_err(
+			    "Invalid aux. buffer sizes: buf_size=%lu B, start_aux=%lu B, end_aux=%lu B\n",
+			    buf_size, *start_aux_buf_size_p,
+			    *end_aux_buf_size_p);
+	} else {
+		pr_debug
+		    ("buf_size=%lu B, start_aux=%lu B, end_aux=%lu B\n",
+		     buf_size, *start_aux_buf_size_p, *end_aux_buf_size_p);
+	}
+#endif
+}
+
+#ifdef DEBUG
+static void dump_client_buf_pages(const struct client_dma_buffer
+				  *client_dma_buf_p)
+{
+	int i;
+	struct scatterlist *sgentry;
+
+	if (client_dma_buf_p->user_buf_ptr != NULL) {
+		pr_debug(
+			      "Client DMA buffer %p maps %lu B over %d pages at user_ptr=0x%p (dma_dir=%d):\n",
+			      client_dma_buf_p, client_dma_buf_p->buf_size,
+			      client_dma_buf_p->num_of_pages,
+			      client_dma_buf_p->user_buf_ptr,
+			      client_dma_buf_p->dma_direction);
+	} else {
+		pr_debug("Client DMA buffer %p maps %lu B (dma_dir=%d):\n",
+			      client_dma_buf_p, client_dma_buf_p->buf_size,
+			      client_dma_buf_p->dma_direction);
+	}
+
+	if (client_dma_buf_p->user_pages != NULL) {
+		pr_debug("%d user_pages:\n",
+			      client_dma_buf_p->num_of_pages);
+		for (i = 0; i < client_dma_buf_p->num_of_pages; i++) {
+			pr_debug("%d. phys_addr=0x%08lX\n", i,
+				      page_to_pfn(client_dma_buf_p->
+						  user_pages[i]) << PAGE_SHIFT);
+		}
+	}
+#if 0
+	pr_debug("sg_head:\n");
+	i = 0;
+	for_each_valid_sg(client_dma_buf_p->sg_head, sgentry) {
+		pr_debug("%d. phys_addr=0x%08llX len=0x%08X\n", i,
+			      sg_phys(sgentry), sgentry->length);
+		i++;
+	}
+#endif
+	pr_debug("sg_main:\n");
+	i = 0;
+	for_each_valid_sg(client_dma_buf_p->sg_main, sgentry) {
+		pr_debug("%d. dma_addr=0x%08llX len=0x%08X\n", i,
+			(long long unsigned int)sg_dma_address(sgentry),
+			sg_dma_len(sgentry));
+		i++;
+	}
+	pr_debug("sg_tail:\n");
+	i = 0;
+	for_each_valid_sg(client_dma_buf_p->sg_tail, sgentry) {
+		pr_debug("%d. phys_addr=0x%08llX len=0x%08X\n", i,
+				(long long unsigned int)sg_phys(sgentry),
+				sgentry->length);
+		i++;
+	}
+	pr_debug("sg_save4next:\n");
+	i = 0;
+	for_each_valid_sg(client_dma_buf_p->sg_save4next, sgentry) {
+		pr_debug("%d. phys_addr=0x%08llX len=0x%08X\n", i,
+				(long long unsigned int)sg_phys(sgentry),
+				sgentry->length);
+		i++;
+	}
+
+}
+#endif /*DEBUG*/
+/**
+ * create_sg_list() - Allocate/create S/G list for given page array,
+ * @page_array:	 The source pages array
+ * @offset_in_first_page:	 Offset in bytes in the first page of page_array
+ * @sg_data_size:	 Number of bytes to include in the create S/G list
+ * @new_sg_list_p:	 The allocated S/G list buffer
+ * @next_page_p:	 The next page to map (for incremental list creation)
+ * @next_page_offset_p:	 The offset to start in next page to map
+ *	(The list is allocated by this func. and should be freed by the caller)
+ *
+ * Allocate/create S/G list for given page array,
+ * starting at given offset in the first page spanning across given sg_data_size
+ * Returns int 0 for success
+ */
+static int create_sg_list(struct page **pages_array,
+			  unsigned long offset_in_first_page,
+			  unsigned long sg_data_size,
+			  struct scatterlist **new_sg_list_p,
+			  struct page ***next_page_p,
+			  unsigned long *next_page_offset_p)
+{
+	const unsigned long end_offset =
+	    offset_in_first_page + sg_data_size - 1;
+	const unsigned long num_of_sg_ents = (end_offset >> PAGE_SHIFT) + 1;
+	const unsigned long size_of_first_page = (num_of_sg_ents == 1) ?
+	    sg_data_size : (PAGE_SIZE - offset_in_first_page);
+	const unsigned long size_of_last_page = (end_offset & ~PAGE_MASK) + 1;
+	struct scatterlist *cur_sge;
+	int i;
+
+	if (sg_data_size == 0) {	/* Empty S/G list */
+		*new_sg_list_p = NULL;
+		*next_page_p = pages_array;
+		*next_page_offset_p = offset_in_first_page;
+		return 0;
+	}
+
+	*new_sg_list_p =
+	    kmalloc(sizeof(struct scatterlist) * num_of_sg_ents, GFP_KERNEL);
+	if (unlikely(*new_sg_list_p == NULL)) {
+		pr_err("Failed allocating sglist array for %lu entries\n",
+			    num_of_sg_ents);
+		return -ENOMEM;
+	}
+
+	/* Set default for next table assuming full pages */
+	*next_page_p = pages_array + num_of_sg_ents;
+	*next_page_offset_p = 0;
+
+	sg_init_table(*new_sg_list_p, num_of_sg_ents);
+	cur_sge = *new_sg_list_p;
+	/* First page is partial
+	 * - May start in middle of page
+	 * - May end in middle of page if single page */
+	sg_set_page(cur_sge, pages_array[0],
+		    size_of_first_page, offset_in_first_page);
+	/* Handle following (whole) pages, but last (which may be partial) */
+	for (i = 1; i < (num_of_sg_ents - 1); i++) {
+		cur_sge = sg_next(cur_sge);
+		if (unlikely(cur_sge == NULL)) {
+			pr_err(
+				    "Reached end of sgl before (%d) num_of_sg_ents (%lu)\n",
+				    i, num_of_sg_ents);
+			kfree(*new_sg_list_p);
+			*new_sg_list_p = NULL;
+			return -EINVAL;
+		}
+		sg_set_page(cur_sge, pages_array[i], PAGE_SIZE, 0);
+	}
+	/* Handle last (partial?) page */
+	if (num_of_sg_ents > 1) {
+		/* only if was not handled already as first */
+		cur_sge = sg_next(cur_sge);
+		if (unlikely(cur_sge == NULL)) {
+			pr_err(
+				    "Cannot put last page in given num_of_sg_ents (%lu)\n",
+				    num_of_sg_ents);
+			kfree(*new_sg_list_p);
+			*new_sg_list_p = NULL;
+			return -EINVAL;
+		}
+		sg_set_page(cur_sge,
+			    pages_array[num_of_sg_ents - 1],
+			    size_of_last_page, 0);
+		if (size_of_last_page < PAGE_SIZE) {
+			(*next_page_p)--; /* Last page was not fully consumed */
+			*next_page_offset_p = size_of_last_page;
+		}
+	} else {		/* First was last */
+		if ((offset_in_first_page + size_of_first_page) < PAGE_SIZE) {
+			(*next_page_p)--; /* Page was not fully consumed */
+			*next_page_offset_p =
+			    (offset_in_first_page + size_of_first_page);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * split_sg_list() - Split SG list at given offset.
+ * @sgl_to_split:	The SG list to split
+ * @split_sg_list:	Returned new SG list which starts with second half
+ *			of split entry.
+ * @split_offset:	Size in bytes of entry part to leave on original list.
+ *
+ * Split SG list at given offset.
+ * The entry is shortened at given length and the remainder is added to
+ * a new SG entry that is chained to the original list following.
+ * Returns int 0 on success
+ */
+static int split_sg_list(struct scatterlist *sgl_to_split,
+			 struct scatterlist **split_sg_list,
+			 unsigned long split_offset)
+{
+	struct scatterlist *cur_sge = sgl_to_split;
+
+	/* Scan list until consuming enough for first part to fit in cur_sge */
+	while ((cur_sge != NULL) && (split_offset > cur_sge->length)) {
+		split_offset -= cur_sge->length;
+		cur_sge = sg_next(cur_sge);
+	}
+	/* After the loop above, split_offset is actually the offset within
+	 * cur_sge */
+
+	if (cur_sge == NULL)
+		return -ENOMEM;	/* SG list too short for given first_part_len */
+
+	if (split_offset < cur_sge->length) {
+		/* Split entry */
+		*split_sg_list =
+		    kmalloc(sizeof(struct scatterlist) * 2, GFP_KERNEL);
+		if (*split_sg_list == NULL) {
+			pr_err("Failed allocating SGE for split entry\n");
+			return -ENOMEM;
+		}
+		sg_init_table(*split_sg_list, 2);
+		sg_set_page(*split_sg_list, sg_page(cur_sge),
+			    cur_sge->length - split_offset,
+			    cur_sge->offset + split_offset);
+		/* Link to second SGE */
+		sg_chain(*split_sg_list, 2, sg_next(cur_sge));
+		cur_sge->length = split_offset;
+	} else {		/* Split at entry boundary */
+		*split_sg_list = sg_next(cur_sge);
+		sg_mark_end(cur_sge);
+	}
+
+	return 0;
+}
+
+/**
+ * link_sg_lists() - Link back split S/G list
+ * @first_sgl:	The first chunk s/g list
+ * @second_sgl:	The second chunk s/g list
+ *
+ * Returns Unified lists list head
+ */
+static struct scatterlist *link_sg_lists(struct scatterlist *first_sgl,
+					 struct scatterlist *second_sgl)
+{
+	struct scatterlist *second_sgl_second = NULL;
+	struct scatterlist *first_sgl_last;
+	struct scatterlist *cur_sge;
+
+	if (first_sgl == NULL)
+		return second_sgl;	/* Second list is the "unified" list */
+	if (second_sgl == NULL)
+		return first_sgl;	/* Nothing to link back */
+	/* Seek end of first s/g list */
+	first_sgl_last = NULL;	/* To save last s/g entry */
+	for_each_valid_sg(first_sgl, cur_sge)
+	    first_sgl_last = cur_sge;
+	if ((sg_page(first_sgl_last) == sg_page(second_sgl)) &&
+	    ((first_sgl_last->offset + first_sgl_last->length) ==
+	     second_sgl->offset)) {
+		/* Case of entry split */
+		/* Restore first entry length */
+		first_sgl_last->length += second_sgl->length;
+		/* Save before freeing */
+		second_sgl_second = sg_next(second_sgl);
+		kfree(second_sgl);
+	}
+	/* This entry was allocated by split_sg_list */
+	/* else, list was split on entry boundary */
+	if (second_sgl_second != NULL) {
+		/*
+		 * Restore link to following entries
+		 * Clear chain termination flag to link back to next sge
+		 * Unfortunately there is no direct function to do this
+		 * so we rely on implementation detail (all flags cleared)
+		 */
+		first_sgl_last->page_link =
+		    (unsigned long)sg_page(first_sgl_last);
+	}
+	return first_sgl;
+}
+
+/**
+ * cleanup_client_dma_buf() - Cleanup client_dma_buf resources (S/G lists,
+ *				pages array, aux. bufs)
+ * @client_dma_buf_p:
+
+ *
+ * Returns void
+ */
+static void cleanup_client_dma_buf(struct llimgr_obj *llimgr_p,
+				   struct client_dma_buffer *client_dma_buf_p)
+{
+	struct page *cur_page;
+	int i;
+	const bool is_outbuf =
+	    (client_dma_buf_p->dma_direction == DMA_FROM_DEVICE) ||
+	    (client_dma_buf_p->dma_direction == DMA_BIDIRECTIONAL);
+
+	/* User space buffer */
+	if (client_dma_buf_p->user_buf_ptr != NULL) {
+#ifdef CONFIG_NOT_COHERENT_CACHE
+		if (client_dma_buf_p->sg_head != NULL)
+			kfree(client_dma_buf_p->sg_head);
+#endif
+		if (client_dma_buf_p->sg_main != NULL)
+			kfree(client_dma_buf_p->sg_main);
+		if (client_dma_buf_p->sg_tail != NULL)
+			kfree(client_dma_buf_p->sg_tail);
+		if (client_dma_buf_p->sg_save4next != NULL)
+			kfree(client_dma_buf_p->sg_save4next);
+		/* Unmap pages that were mapped/locked */
+		if (client_dma_buf_p->user_pages != NULL) {
+			for (i = 0; i < client_dma_buf_p->num_of_pages; i++) {
+				cur_page = client_dma_buf_p->user_pages[i];
+				/* Mark dirty for pages written by HW/DMA */
+				if (is_outbuf && !PageReserved(cur_page))
+					SetPageDirty(cur_page);
+				page_cache_release(cur_page);	/* Unlock */
+			}
+			kfree(client_dma_buf_p->user_pages);
+		}
+
+	} else {
+		/* (kernel) given s/g list */
+		/* Fix S/G list back to what was given */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+		if (client_dma_buf_p->sg_head != NULL) {
+			if (client_dma_buf_p->sg_main != NULL) {
+				client_dma_buf_p->sg_main =
+				    link_sg_lists(client_dma_buf_p->sg_head,
+						  client_dma_buf_p->sg_main);
+			} else {
+				client_dma_buf_p->sg_tail =
+				    link_sg_lists(client_dma_buf_p->sg_head,
+						  client_dma_buf_p->sg_tail);
+			}
+			/* Linked to next */
+			client_dma_buf_p->sg_head = NULL;
+		}
+#endif
+		client_dma_buf_p->sg_tail =
+		    link_sg_lists(client_dma_buf_p->sg_main,
+				  client_dma_buf_p->sg_tail);
+		client_dma_buf_p->sg_save4next =
+		    link_sg_lists(client_dma_buf_p->sg_tail,
+				  client_dma_buf_p->sg_save4next);
+	}
+
+	/* Free aux. buffers */
+	if (client_dma_buf_p->buf_end_aux_buf_va != NULL) {
+		if (client_dma_buf_p->buf_end_aux_buf_size <=
+		    EDGE_BUFS_POOL_ITEM_SIZE) {
+			dma_pool_free(llimgr_p->edge_bufs_pool,
+				      client_dma_buf_p->buf_end_aux_buf_va,
+				      client_dma_buf_p->buf_end_aux_buf_dma);
+		} else {	/* From DLLI buffers pool */
+			dma_pool_free(llimgr_p->dlli_bufs_pool,
+				      client_dma_buf_p->buf_end_aux_buf_va,
+				      client_dma_buf_p->buf_end_aux_buf_dma);
+		}
+	}
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	if (client_dma_buf_p->buf_start_aux_buf_va != NULL)
+		dma_pool_free(llimgr_p->edge_bufs_pool,
+			      client_dma_buf_p->buf_start_aux_buf_va,
+			      client_dma_buf_p->buf_start_aux_buf_dma);
+#endif
+	CLEAN_DMA_BUFFER_INFO(client_dma_buf_p);
+}
+
+/**
+ * is_sgl_phys_contig() - Check if given scatterlist is physically contig.
+ *
+ * @sgl:		Checked scatter/gather list
+ * @data_size_p:	Size of phys. contig. portion of given sgl
+ */
+static bool is_sgl_phys_contig(struct scatterlist *sgl,
+			       unsigned long *data_size_p)
+{
+	struct scatterlist *cur_sge = sgl;
+	struct scatterlist *next_sge;
+
+	*data_size_p = 0;
+	for (cur_sge = sgl; cur_sge != NULL; cur_sge = next_sge) {
+		(*data_size_p) += cur_sge->length;
+		next_sge = sg_next(cur_sge);
+		if ((next_sge != NULL) &&
+		    /* Check proximity of current entry to next entry */
+		    ((page_to_phys(sg_page(cur_sge)) + cur_sge->length) !=
+		     (page_to_phys(sg_page(next_sge)) + next_sge->offset))) {
+			/* End of cur_sge does not reach start of next_sge */
+			return false;
+		}
+	}
+	/* If we passed the loop then data is phys. contig. */
+	return true;
+}
+
+/**
+ * is_pages_phys_contig() - Check if given pages are phys. contig.
+ *
+ * @pages_list:		Array of pages
+ * @num_of_pages:	Number of pages in provided pages_list
+ */
+static bool is_pages_phys_contig(struct page *pages_list[],
+				 unsigned int num_of_pages)
+{
+	int i;
+
+	for (i = 0; i < (num_of_pages - 1); i++) {
+		if ((page_to_phys(pages_list[i]) + PAGE_SIZE) !=
+		    page_to_phys(pages_list[i + 1]))
+			return false;
+	}
+	/* If reached here then all pages are following each other */
+	return true;
+}
+
+/**
+ * user_buf_to_client_dma_buf() - Apply given user_buf_ptr (user space buffer)
+ *				into client_dma_buffer
+ * @llimgr_p:
+ * @client_dma_buf_p:	 Client DMA object
+ *
+ * Apply given user_buf_ptr (user space buffer) into client_dma_buffer
+ * This function should be invoked after caller has set in the given
+ * client_dma_buf_p object the user_buf_ptr, buf_size and dma_direction
+ * Returns int 0 for success
+ */
+static int user_buf_to_client_dma_buf(struct llimgr_obj *llimgr_p,
+				      struct client_dma_buffer
+				      *client_dma_buf_p)
+{
+	u8 __user const *user_buf_ptr = client_dma_buf_p->user_buf_ptr;
+	unsigned long buf_size = client_dma_buf_p->buf_size;
+	const enum dma_data_direction dma_direction =
+	    client_dma_buf_p->dma_direction;
+	unsigned long buf_end = (unsigned long)user_buf_ptr + buf_size - 1;
+	const int num_of_pages =
+	    (buf_end >> PAGE_SHIFT) -
+	    ((unsigned long)user_buf_ptr >> PAGE_SHIFT) + 1;
+	const unsigned long offset_in_first_page =
+	    (unsigned long)user_buf_ptr & ~PAGE_MASK;
+	const bool is_inbuf = (dma_direction == DMA_TO_DEVICE) ||
+	    (dma_direction == DMA_BIDIRECTIONAL);
+	const bool is_outbuf = (dma_direction == DMA_FROM_DEVICE) ||
+	    (dma_direction == DMA_BIDIRECTIONAL);
+	unsigned long head_buf_size = 0, tail_buf_size = 0, main_buf_size = 0;
+	struct page **cur_page_p;
+	unsigned long cur_page_offset;
+	int rc = 0;
+
+	/* Verify permissions */
+	if (is_inbuf && !access_ok(ACCESS_READ, user_buf_ptr, buf_size)) {
+		pr_err("No read access to data buffer at %p\n",
+			    user_buf_ptr);
+		return -EFAULT;
+	}
+	if (is_outbuf && !access_ok(ACCESS_WRITE, user_buf_ptr, buf_size)) {
+		pr_err("No write access to data buffer at %p\n",
+			    user_buf_ptr);
+		return -EFAULT;
+	}
+	client_dma_buf_p->user_pages =
+	    kmalloc(sizeof(struct page *)*num_of_pages, GFP_KERNEL);
+	if (unlikely(client_dma_buf_p->user_pages == NULL)) {
+		pr_err("Failed allocating user_pages array for %d pages\n",
+			    num_of_pages);
+		return -ENOMEM;
+	}
+	/* Get user pages structure (also increment ref. count... lock) */
+	client_dma_buf_p->num_of_pages = get_user_pages_fast((unsigned long)
+							     user_buf_ptr,
+							     num_of_pages,
+							     is_outbuf,
+							     client_dma_buf_p->
+							     user_pages);
+	if (client_dma_buf_p->num_of_pages != num_of_pages) {
+		pr_warn(
+			     "Failed to lock all user pages (locked %d, requested lock = %d)\n",
+			     client_dma_buf_p->num_of_pages, num_of_pages);
+		rc = -ENOMEM;
+	}
+	/* Leave only currently processed data (remainder in sg_save4next */
+	buf_size -= client_dma_buf_p->save4next_size;
+	buf_end -= client_dma_buf_p->save4next_size;
+	/* Decide on type of mapping: MLLI, DLLI or DLLI after copy */
+	if (buf_size <= DLLI_BUF_LIMIT) {
+		/* Check if possible to map buffer directly as DLLI */
+		if (
+#ifdef CONFIG_NOT_COHERENT_CACHE
+			   /* For systems with incoherent cache the buffer
+			    * must be cache line aligned to be cosidered
+			    * for this case
+			    */
+			   (((unsigned long)user_buf_ptr & CACHE_LINE_MASK) ==
+			    0) &&
+				((buf_end & CACHE_LINE_MASK) ==
+				 CACHE_LINE_MASK) &&
+#endif
+			   is_pages_phys_contig(client_dma_buf_p->user_pages,
+						client_dma_buf_p->
+						num_of_pages)) {
+			pr_debug(
+				      "Mapping user buffer @%p (0x%08lX B) to DLLI directly\n",
+				      client_dma_buf_p->user_buf_ptr, buf_size);
+			main_buf_size = buf_size;/* Leave 0 for tail_buf_size */
+			/* 0 for the tail buffer indicates that we use this
+			 * optimization, because in any other case there must
+			 * be some data in the tail buffer (if buf_size>0) */
+		} else if (buf_size <= DLLI_AUX_BUF_LIMIT) {
+			pr_debug(
+				      "Mapping user buffer @%p (0x%08lX B) to DLLI via aux. buffer\n",
+				      client_dma_buf_p->user_buf_ptr, buf_size);
+			tail_buf_size = buf_size;
+			/* All data goes to "tail" in order to piggy-back over
+			 * the aux. buffers logic for copying data in/out of the
+			 * temp. DLLI DMA buffer */
+		}
+	}
+	if ((main_buf_size + tail_buf_size) == 0) {
+		/* If none of the optimizations was applied... */
+		calc_aux_bufs_size((unsigned long)user_buf_ptr, buf_size,
+				   dma_direction, &head_buf_size,
+				   &tail_buf_size);
+		main_buf_size = buf_size - head_buf_size - tail_buf_size;
+	}
+
+	/* Create S/G list */
+	cur_page_p = client_dma_buf_p->user_pages;
+	cur_page_offset = offset_in_first_page;
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	if (likely(rc == 0)) {
+		/* Create S/G list for head (aux.) buffer */
+		rc = create_sg_list(cur_page_p, cur_page_offset,
+				    head_buf_size, &client_dma_buf_p->sg_head,
+				    &cur_page_p, &cur_page_offset);
+	}
+#endif
+	/* Create S/G list for buffer "body" - to be used for DMA */
+	if (likely(rc == 0)) {
+		rc = create_sg_list(cur_page_p, cur_page_offset,
+				    main_buf_size, &client_dma_buf_p->sg_main,
+				    &cur_page_p, &cur_page_offset);
+	}
+	/* Create S/G list for tail (aux.) buffer */
+	if (likely(rc == 0)) {
+		rc = create_sg_list(cur_page_p, cur_page_offset,
+				    tail_buf_size, &client_dma_buf_p->sg_tail,
+				    &cur_page_p, &cur_page_offset);
+	}
+	/* Create S/G list for save4next buffer */
+	if (likely(rc == 0)) {
+		rc = create_sg_list(cur_page_p, cur_page_offset,
+				    client_dma_buf_p->save4next_size,
+				    &client_dma_buf_p->sg_save4next,
+				    &cur_page_p, &cur_page_offset);
+	}
+
+	if (unlikely(rc != 0)) {
+		cleanup_client_dma_buf(llimgr_p, client_dma_buf_p);
+	} else {
+		/* Save head/tail sizes */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+		client_dma_buf_p->buf_start_aux_buf_size = head_buf_size;
+#endif
+		client_dma_buf_p->buf_end_aux_buf_size = tail_buf_size;
+	}
+
+	return rc;
+}
+
+/**
+ * client_sgl_to_client_dma_buf() - Create head/main/tail sg lists from given
+ *					sg list
+ * @llimgr_p:
+ * @sgl:
+ * @client_dma_buf_p:
+ *
+ * Returns int
+ */
+static int client_sgl_to_client_dma_buf(struct llimgr_obj *llimgr_p,
+					struct scatterlist *sgl,
+					struct client_dma_buffer
+					*client_dma_buf_p)
+{
+	const unsigned long buf_size = client_dma_buf_p->buf_size -
+	    client_dma_buf_p->save4next_size;
+	const enum dma_data_direction dma_direction =
+	    client_dma_buf_p->dma_direction;
+	unsigned long sgl_phys_contig_size;	/* Phys. contig. part size */
+	unsigned long head_buf_size = 0, tail_buf_size = 0;
+	unsigned long main_buf_size = 0;
+	unsigned long last_sgl_size = 0;
+	struct scatterlist *last_sgl = NULL;	/* sgl to split of save4next */
+	int rc;
+
+	pr_debug("sgl=%p nbytes=%lu save4next=%lu client_dma_buf=%p\n",
+		      sgl, client_dma_buf_p->buf_size,
+		      client_dma_buf_p->save4next_size, client_dma_buf_p);
+
+	if (buf_size == 0) {	/* all goes to save4next (if anything) */
+		client_dma_buf_p->sg_save4next = sgl;
+		return 0;
+	}
+
+	/* Decide on type of mapping: MLLI, DLLI or DLLI after copy */
+	if (buf_size <= DLLI_BUF_LIMIT) {
+		/* Check if possible to map buffer directly as DLLI */
+		if (
+#ifdef CONFIG_NOT_COHERENT_CACHE
+			   /*
+			    * For systems with incoherent cache the
+			    * buffer must be cache line aligned to be
+			    * cosidered for this case
+			    */
+			   ((sgl->offset & CACHE_LINE_MASK) == 0) &&
+			   (((sgl->offset + buf_size) &
+			     CACHE_LINE_MASK) == 0) &&
+#endif
+			   is_sgl_phys_contig(sgl, &sgl_phys_contig_size)) {
+			pr_debug(
+				      "Mapping sgl buffer (0x%08lX B) to DLLI directly\n",
+				      buf_size);
+			main_buf_size = buf_size;
+			/* Leave 0 for tail_buf_size
+			 * 0 for the tail buffer indicates that we use this
+			 * optimization, because in any other case there must
+			 * be some data in the tail buffer (if buf_size>0)
+			 */
+		} else if (buf_size <= DLLI_AUX_BUF_LIMIT) {
+			pr_debug(
+				      "Mapping sgl buffer (0x%08lX B) to DLLI via aux. buffer\n",
+				      buf_size);
+			tail_buf_size = buf_size;
+			/* All data goes to "tail" in order to piggy-back
+			 * over the aux. buffers logic for copying data
+			 * in/out of the temp. DLLI DMA buffer
+			 */
+		}
+	}
+	if ((main_buf_size + tail_buf_size) == 0) {
+		/* If none of the optimizations was applied... */
+		/* Use first SG entry for start alignment */
+		calc_aux_bufs_size((unsigned long)sgl->offset, buf_size,
+				   dma_direction, &head_buf_size,
+				   &tail_buf_size);
+		main_buf_size = buf_size - head_buf_size - tail_buf_size;
+	}
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	if (head_buf_size > 0) {
+		client_dma_buf_p->sg_head = sgl;
+		rc = split_sg_list(client_dma_buf_p->sg_head,
+				   &client_dma_buf_p->sg_main, head_buf_size);
+		if (unlikely(rc != 0)) {
+			pr_err("Failed splitting sg_head-sg_main\n");
+			cleanup_client_dma_buf(llimgr_p, client_dma_buf_p);
+			return rc;
+		}
+		last_sgl_size = head_buf_size;
+		last_sgl = client_dma_buf_p->sg_head;
+	} else
+#endif
+		/* Initilize sg_main to given sgl */
+		client_dma_buf_p->sg_main = sgl;
+
+	if (tail_buf_size > 0) {
+		if (main_buf_size > 0) {
+			rc = split_sg_list(client_dma_buf_p->sg_main,
+					   &client_dma_buf_p->sg_tail,
+					   main_buf_size);
+			if (unlikely(rc != 0)) {
+				pr_err("Fail:splitting sg_main-sg_tail\n");
+				cleanup_client_dma_buf(llimgr_p,
+						       client_dma_buf_p);
+				return rc;
+			}
+		} else {	/* All data moved to sg_tail */
+			client_dma_buf_p->sg_tail = client_dma_buf_p->sg_main;
+			client_dma_buf_p->sg_main = NULL;
+		}
+		last_sgl_size = tail_buf_size;
+		last_sgl = client_dma_buf_p->sg_tail;
+	} else if (main_buf_size > 0) {	/* main only */
+		last_sgl_size = main_buf_size;
+		last_sgl = client_dma_buf_p->sg_main;
+	}
+
+	/* Save head/tail sizes */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	client_dma_buf_p->buf_start_aux_buf_size = head_buf_size;
+#endif
+	client_dma_buf_p->buf_end_aux_buf_size = tail_buf_size;
+
+	if (client_dma_buf_p->save4next_size > 0) {
+		if (last_sgl != NULL) {
+			rc = split_sg_list(last_sgl,
+					   &client_dma_buf_p->sg_save4next,
+					   last_sgl_size);
+			if (unlikely(rc != 0)) {
+				pr_err("Failed splitting sg_save4next\n");
+				cleanup_client_dma_buf(llimgr_p,
+						       client_dma_buf_p);
+				return rc;
+			}
+		} else {	/* Whole buffer goes to save4next */
+			client_dma_buf_p->sg_save4next = sgl;
+		}
+	}
+	return 0;
+}
+
+/**
+ * llimgr_register_client_dma_buf() - Register given client buffer for DMA
+ *					operation.
+ * @llimgr:	 The LLI manager object handle
+ * @user_buf_ptr:	 Pointer in user space of the user buffer
+ * @sgl:	Client provided s/g list. user_buf_ptr is assumed NULL if this
+ *		list is given (!NULL).
+ * @buf_size:	 The user buffer size in bytes (incl. save4next). May be 0.
+ * @save4next_size:	Amount from buffer end to save for next op.
+ *			(split into seperate sgl). May be 0.
+ * @dma_direction:	The DMA direction this buffer would be used for
+ * @client_dma_buf_p:	Pointer to the user DMA buffer "object"
+ *
+ * Register given client buffer for DMA operation.
+ * If user_buf_ptr!=NULL and sgl==NULL it locks the user pages and creates
+ * head/main/tail s/g lists. If sgl!=NULL is splits it into head/main/tail
+ * s/g lists.
+ * Returns 0 for success
+ */
+int llimgr_register_client_dma_buf(void *llimgr,
+				   u8 __user *user_buf_ptr,
+				   struct scatterlist *sgl,
+				   const unsigned long buf_size,
+				   const unsigned long save4next_size,
+				   const enum dma_data_direction dma_direction,
+				   struct client_dma_buffer *client_dma_buf_p)
+{
+	struct llimgr_obj *llimgr_p = (struct llimgr_obj *)llimgr;
+	int rc, tmp;
+
+	CLEAN_DMA_BUFFER_INFO(client_dma_buf_p);
+
+	if (buf_size == 0) {	/* Handle empty buffer */
+		pr_debug("buf_size == 0\n");
+		return 0;
+	}
+
+	if ((user_buf_ptr == NULL) && (sgl == NULL)) {
+		pr_err("NULL user_buf_ptr/sgl\n");
+		return -EINVAL;
+	}
+	if ((user_buf_ptr != NULL) && (sgl != NULL)) {
+		pr_err("Provided with dual buffer info (both user+sgl)\n");
+		return -EINVAL;
+	}
+
+	/* Init. basic/common attributes */
+	client_dma_buf_p->user_buf_ptr = user_buf_ptr;
+	client_dma_buf_p->buf_size = buf_size;
+	client_dma_buf_p->save4next_size = save4next_size;
+	client_dma_buf_p->dma_direction = dma_direction;
+
+	if (user_buf_ptr != NULL) {
+		rc = user_buf_to_client_dma_buf(llimgr_p, client_dma_buf_p);
+	} else {
+		rc = client_sgl_to_client_dma_buf(llimgr_p, sgl,
+						  client_dma_buf_p);
+	}
+	if (unlikely(rc != 0))
+		return rc;
+	/* Since sg_main may be large and we need its nents for each
+	 * dma_map_sg/dma_unmap_sg operation, we count its nents once and
+	 * save the result.
+	 * (for the other sgl's in the object we can count when accessed) */
+	client_dma_buf_p->sg_main_nents =
+	    get_sgl_nents(client_dma_buf_p->sg_main);
+
+	/* Allocate auxilliary buffers for sg_head/sg_tail copies */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	if ((likely(rc == 0)) &&
+	    (client_dma_buf_p->buf_start_aux_buf_size > 0)) {
+		client_dma_buf_p->buf_start_aux_buf_va =
+		    dma_pool_alloc(llimgr_p->edge_bufs_pool, GFP_KERNEL,
+				   &client_dma_buf_p->buf_start_aux_buf_dma);
+		if (unlikely(client_dma_buf_p->buf_start_aux_buf_va == NULL)) {
+			pr_err("Fail alloc from edge_bufs_pool, head\n");
+			rc = -ENOMEM;
+		} else {
+			pr_debug("start_aux: va=%p dma=0x%08llX\n",
+				      client_dma_buf_p->buf_start_aux_buf_va,
+				      client_dma_buf_p->buf_start_aux_buf_dma);
+		}
+	}
+#endif
+	if ((likely(rc == 0)) && (client_dma_buf_p->buf_end_aux_buf_size > 0)) {
+#ifdef DEBUG
+		if (client_dma_buf_p->buf_end_aux_buf_size >
+				DLLI_AUX_BUF_LIMIT) {
+			pr_err("end_aux_buf size too large = 0x%08lX\n",
+				    client_dma_buf_p->buf_end_aux_buf_size);
+			return -EINVAL;
+		}
+#endif
+		if (client_dma_buf_p->buf_end_aux_buf_size <=
+		    EDGE_BUFS_POOL_ITEM_SIZE) {
+			client_dma_buf_p->buf_end_aux_buf_va =
+			    dma_pool_alloc(llimgr_p->edge_bufs_pool, GFP_KERNEL,
+					   &client_dma_buf_p->
+					   buf_end_aux_buf_dma);
+		} else {
+			/* Allocate from the dedicated DLLI buffers pool */
+			client_dma_buf_p->buf_end_aux_buf_va =
+			    dma_pool_alloc(llimgr_p->dlli_bufs_pool, GFP_KERNEL,
+					   &client_dma_buf_p->
+					   buf_end_aux_buf_dma);
+		}
+		if (unlikely(client_dma_buf_p->buf_end_aux_buf_va == NULL)) {
+			pr_err("Fail:allocating from aux. buf for tail\n");
+			rc = -ENOMEM;
+		} else {
+			pr_debug("end_aux: va=%p dma=0x%08llX\n",
+				client_dma_buf_p->buf_end_aux_buf_va,
+				(long long unsigned int)
+				client_dma_buf_p->buf_end_aux_buf_dma);
+		}
+	}
+
+	/* Map the main sglist (head+tail would not be used for DMA) */
+	if (likely(rc == 0) && (client_dma_buf_p->sg_main != NULL)) {
+		tmp = dma_map_sg(llimgr_p->dev, client_dma_buf_p->sg_main,
+				 client_dma_buf_p->sg_main_nents,
+				 dma_direction);
+		if (unlikely(tmp == 0)) {
+			pr_err("dma_map_sg failed\n");
+			rc = -ENOMEM;
+		}
+	}
+
+#ifdef DEBUG
+	if (likely(rc == 0))
+		dump_client_buf_pages(client_dma_buf_p);
+#endif
+
+	if (unlikely(rc != 0)) {	/* Error cases cleanup */
+		cleanup_client_dma_buf(llimgr_p, client_dma_buf_p);
+	}
+
+	return rc;
+}
+
+/**
+ * llimgr_deregister_client_dma_buf() - Unmap given user DMA buffer
+ * @llimgr:
+
+ * @client_dma_buf_p:	 User DMA buffer object
+ *
+ * Unmap given user DMA buffer (flush and unlock pages)
+ * (this function can handle client_dma_buffer of size 0)
+ */
+void llimgr_deregister_client_dma_buf(void *llimgr,
+				      struct client_dma_buffer
+				      *client_dma_buf_p)
+{
+	struct llimgr_obj *llimgr_p = (struct llimgr_obj *)llimgr;
+
+	/* Cleanup DMA mappings */
+	if (client_dma_buf_p->sg_main != NULL) {
+		dma_unmap_sg(llimgr_p->dev, client_dma_buf_p->sg_main,
+			     client_dma_buf_p->sg_main_nents,
+			     client_dma_buf_p->dma_direction);
+	}
+	cleanup_client_dma_buf(llimgr_p, client_dma_buf_p);
+}
+
+/**
+ * get_sgl_nents() - Get (count) the number of entries in given s/g list
+ *	(used in order to be able to invoke sg_copy_to/from_buffer)
+ * @sgl:	Counted s/g list entries
+ */
+static inline unsigned int get_sgl_nents(struct scatterlist *sgl)
+{
+	int cnt = 0;
+	struct scatterlist *cur_sge;
+	for_each_valid_sg(sgl, cur_sge)
+	    cnt++;
+	return cnt;
+}
+
+/**
+ * copy_to_from_aux_buf() - Copy to/from given aux.buffer from/to given s/g list
+ * @to_buf:	 "TRUE" for copying to the given buffer from sgl
+ * @sgl:	 The S/G list data source/target
+ * @to_buf:	 Target/source buffer
+ * @buf_len:	 Buffer length
+ *
+ * Returns int
+ */
+static inline int copy_to_from_aux_buf(bool to_buf,
+				       struct scatterlist *sgl, void *buf_p,
+				       size_t buf_len)
+{
+	size_t copied_cnt;
+	unsigned int nents = get_sgl_nents(sgl);
+
+	if (to_buf)
+		copied_cnt = sg_copy_to_buffer(sgl, nents, buf_p, buf_len);
+	else
+		copied_cnt = sg_copy_from_buffer(sgl, nents, buf_p, buf_len);
+
+	if (copied_cnt < buf_len) {
+		pr_err("Failed copying %s buf of %zu B\n",
+			    to_buf ? "to" : "from", buf_len);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * sync_client_dma_buf() - Sync. pages before DMA to device at given offset in
+ *				the user buffer
+ * @dev:	 Associated device structure
+ * @client_dma_buf_p:	 The user DMA buffer object
+ * @for_device:	 Set to "true" to sync before device DMA op.
+ * @dma_direction:	 DMA direction for sync.
+ *
+ * Returns int 0 for success
+ */
+static int sync_client_dma_buf(struct device *dev,
+			       struct client_dma_buffer *client_dma_buf_p,
+			       const bool for_device,
+			       const enum dma_data_direction dma_direction)
+{
+	const bool is_from_device = ((dma_direction == DMA_BIDIRECTIONAL) ||
+				     (dma_direction == DMA_FROM_DEVICE));
+	const bool is_to_device = ((dma_direction == DMA_BIDIRECTIONAL) ||
+				   (dma_direction == DMA_TO_DEVICE));
+	int rc;
+
+	pr_debug("DMA buf %p (0x%08lX B) for %s\n",
+		      client_dma_buf_p, client_dma_buf_p->buf_size,
+		      for_device ? "device" : "cpu");
+
+	if (for_device) {
+		/* Copy out aux. buffers if required */
+		/* We should copy before dma_sync_sg_for_device */
+		if (is_to_device) {
+#ifdef CONFIG_NOT_COHERENT_CACHE
+			if (client_dma_buf_p->sg_head != NULL) {
+				rc = copy_to_from_aux_buf(true,
+						  client_dma_buf_p->sg_head,
+						  client_dma_buf_p->
+						  buf_start_aux_buf_va,
+						  client_dma_buf_p->
+						  buf_start_aux_buf_size);
+				if (rc != 0)
+					return rc;
+			}
+#endif
+			if (client_dma_buf_p->sg_tail != NULL) {
+				rc = copy_to_from_aux_buf(true,
+						  client_dma_buf_p->sg_tail,
+						  client_dma_buf_p->
+						  buf_end_aux_buf_va,
+						  client_dma_buf_p->
+						  buf_end_aux_buf_size);
+				if (rc != 0)
+					return rc;
+			}
+		}
+		if (client_dma_buf_p->sg_main != NULL) {
+			dma_sync_sg_for_device(dev, client_dma_buf_p->sg_main,
+					       client_dma_buf_p->sg_main_nents,
+					       dma_direction);
+		}
+
+	} else {		/* for CPU */
+		if (client_dma_buf_p->sg_main != NULL) {
+			dma_sync_sg_for_cpu(dev, client_dma_buf_p->sg_main,
+					    client_dma_buf_p->sg_main_nents,
+					    dma_direction);
+		}
+		/* Copy back from aux. buffers */
+		/* We should copy after dma_sync_sg_for_cpu */
+		if (is_from_device) {
+#ifdef CONFIG_NOT_COHERENT_CACHE
+			if (client_dma_buf_p->sg_head != NULL) {
+				rc = copy_to_from_aux_buf(false,
+						  client_dma_buf_p->sg_head,
+						  client_dma_buf_p->
+						  buf_start_aux_buf_va,
+						  client_dma_buf_p->
+						  buf_start_aux_buf_size);
+				if (rc != 0)
+					return rc;
+			}
+#endif
+			if (client_dma_buf_p->sg_tail != NULL) {
+				rc = copy_to_from_aux_buf(false,
+						  client_dma_buf_p->sg_tail,
+						  client_dma_buf_p->
+						  buf_end_aux_buf_va,
+						  client_dma_buf_p->
+						  buf_end_aux_buf_size);
+				if (rc != 0)
+					return rc;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * llimgr_copy_from_client_buf_save4next() - Copy from the sg_save4next chunk
+ *	of the client DMA buffer to given buffer.
+ *	Used to save hash block remainder.
+ *
+ * @client_dma_buf_p:	The client DMA buffer with the save4next chunk
+ * @to_buf:		Target buffer to copy to.
+ * @buf_len:		Given buffer length (to avoid buffer overflow)
+ *
+ * Returns number of bytes copied or -ENOMEM if given buffer is too small
+ */
+int llimgr_copy_from_client_buf_save4next(struct client_dma_buffer
+					  *client_dma_buf_p, u8 *to_buf,
+					  unsigned long buf_len)
+{
+	int copied_cnt;
+	struct scatterlist *sgl = client_dma_buf_p->sg_save4next;
+	unsigned int nents;
+
+	if (buf_len < client_dma_buf_p->save4next_size) {
+		pr_err("Invoked for copying %lu B to a buffer of %lu B\n",
+			    client_dma_buf_p->save4next_size, buf_len);
+		copied_cnt = -ENOMEM;
+	} else {
+		nents = get_sgl_nents(sgl);
+		if (nents > 0)
+			copied_cnt = sg_copy_to_buffer(sgl, nents,
+						       to_buf, buf_len);
+		else		/* empty */
+			copied_cnt = 0;
+	}
+	return copied_cnt;
+}
+
+#ifdef DEBUG
+/**
+ * dump_mlli_table() - Dump given MLLI table
+ * @table_start_p:	 Pointer to allocated table buffer
+ * @dma_addr:	 The table's DMA address as given to SeP
+ * @table_size:	 The table size in bytes
+ *
+ */
+static void dump_mlli_table(u32 *table_start_p, dma_addr_t dma_addr,
+			    unsigned long table_size)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];
+	int i;
+	u32 *cur_entry_p;
+	unsigned int num_of_entries = table_size / SEP_LLI_ENTRY_BYTE_SIZE;
+
+	pr_debug("MLLI table at %p (dma_addr=0x%08X) with %u ent.:\n",
+		      table_start_p, (unsigned int)dma_addr, num_of_entries);
+
+	for (i = 0, cur_entry_p = table_start_p + SEP_LLI_ENTRY_WORD_SIZE;
+	     i < num_of_entries; cur_entry_p += SEP_LLI_ENTRY_WORD_SIZE, i++) {
+		/* LE to BE... */
+		SEP_LLI_COPY_FROM_SEP(lli_spad, cur_entry_p);
+		/*
+		 * pr_debug("%02d: addr=0x%08lX , size=0x%08lX\n  %s\n", i,
+		 * SEP_LLI_GET(lli_spad, ADDR),
+		 * SEP_LLI_GET(lli_spad, SIZE),
+		 */
+		pr_debug("%02d: [0x%08X,0x%08X] %s\n", i,
+			      lli_spad[0], lli_spad[1],
+			      i == 0 ? "(next table)" : "");
+	}
+}
+
+/**
+ * llimgr_dump_mlli_tables_list() - Dump all the MLLI tables in a given tables
+ *					list
+ * @mlli_tables_list_p:	 Pointer to tables list structure
+ *
+ */
+void llimgr_dump_mlli_tables_list(struct mlli_tables_list *mlli_tables_list_p)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];
+	u32 *cur_table_p;
+	u32 *next_table_p;
+	u32 *link_entry_p;
+	unsigned long next_table_size;
+	dma_addr_t next_table_dma;
+	u16 table_count = 0;
+
+	/* This loop uses "cur_table_p" as the previous table that
+	 * was already dumped */
+	for (cur_table_p = mlli_tables_list_p->link_to_first_table;
+	     cur_table_p != NULL; cur_table_p = next_table_p, table_count++) {
+
+		if (table_count > mlli_tables_list_p->table_count) {
+			pr_err(
+				"MLLI tables list has more tables than table_cnt=%u. Stopping dump.\n",
+				mlli_tables_list_p->table_count);
+			break;
+		}
+
+		/* The SeP link entry is second in the buffer */
+		link_entry_p = cur_table_p + SEP_LLI_ENTRY_WORD_SIZE;
+		SEP_LLI_COPY_FROM_SEP(lli_spad, link_entry_p);/* LE to BE... */
+		next_table_p = SEP_MLLI_GET_NEXT_VA(cur_table_p);
+		next_table_dma = SEP_LLI_GET(lli_spad, ADDR);
+		next_table_size = SEP_LLI_GET(lli_spad, SIZE);
+		if (next_table_p != NULL)
+			dump_mlli_table(next_table_p,
+					next_table_dma, next_table_size);
+
+	}
+
+}
+#endif /*DEBUG*/
+/*****************************************/
+/* MLLI tables construction functions    */
+/*****************************************/
+/**
+ * set_dlli() - Set given mlli object to function as DLLI descriptor
+ *
+ * @mlli_tables_list_p:	The associated "MLLI" tables list
+ * @dlli_addr:	The DMA address for the DLLI
+ * @dlli_size:	The size in bytes of referenced data
+ *
+ * The MLLI tables list object represents DLLI when its table_count is 0
+ * and the "link_to_first_table" actually points to the data.
+ */
+static inline void set_dlli(struct mlli_tables_list *mlli_tables_list_p,
+			    u32 dlli_addr, u16 dlli_size)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];	/* LLI scratchpad */
+
+	SEP_LLI_INIT(lli_spad);
+	SEP_LLI_SET(lli_spad, ADDR, dlli_addr);
+	SEP_LLI_SET(lli_spad, SIZE, dlli_size);
+	SEP_LLI_COPY_TO_SEP(mlli_tables_list_p->link_to_first_table +
+			    SEP_LLI_ENTRY_WORD_SIZE, lli_spad);
+	mlli_tables_list_p->table_count = 0;
+}
+
+/**
+ * set_last_lli() - Set "Last" bit on last LLI data entry
+ * @last_lli_p:
+ *
+ */
+static inline void set_last_lli(u32 *last_lli_p)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];	/* LLI scratchpad */
+
+	SEP_LLI_COPY_FROM_SEP(lli_spad, last_lli_p);
+	SEP_LLI_SET(lli_spad, LAST, 1);
+	SEP_LLI_COPY_TO_SEP(last_lli_p, lli_spad);
+}
+
+/**
+ * set_last_table() - Set table link to next as NULL (last table).
+ * @mlli_table_p:
+ *
+ */
+static inline void set_last_table(u32 *mlli_table_p)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];	/* LLI scratchpad */
+
+	/* Set SeP link entry */
+	SEP_LLI_INIT(lli_spad);
+	SEP_LLI_SET(lli_spad, FIRST, 1);
+	SEP_LLI_SET(lli_spad, LAST, 1);
+	/* The rest of the field are zero from SEP_LLI_INIT */
+	SEP_LLI_COPY_TO_SEP(mlli_table_p, lli_spad);
+	/* Set NULL for next VA */
+	SEP_MLLI_SET_NEXT_VA_NULL(mlli_table_p);
+}
+
+static inline void link_to_prev_mlli(struct mlli_tables_list_iterator
+				     *mlli_iter_p)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];	/* LLI scratchpad */
+	const u32 cur_mlli_table_size =
+	    (mlli_iter_p->next_lli_idx + 1) * SEP_LLI_ENTRY_BYTE_SIZE;
+	/* +1 for link entry at table start */
+
+	SEP_LLI_INIT(lli_spad);
+	SEP_LLI_SET(lli_spad, ADDR, mlli_iter_p->cur_mlli_dma_addr);
+	SEP_LLI_SET(lli_spad, SIZE, cur_mlli_table_size);
+	SEP_LLI_SET(lli_spad, FIRST, 1);
+	SEP_LLI_SET(lli_spad, LAST, 1);
+	SEP_LLI_COPY_TO_SEP(mlli_iter_p->prev_mlli_table_p +
+			    SEP_LLI_ENTRY_WORD_SIZE, lli_spad);
+	SEP_MLLI_SET_NEXT_VA(mlli_iter_p->prev_mlli_table_p,
+			     mlli_iter_p->cur_mlli_table_p);
+}
+
+/**
+ * terminate_mlli_tables_list() - "NULL" terminate the MLLI tables list and link
+ *				to previous table if any.
+ * @mlli_iter_p:	 MLLI tables list iterator
+ *
+ */
+static inline void terminate_mlli_tables_list(struct mlli_tables_list_iterator
+					      *mlli_iter_p)
+{
+	u32 *last_lli_p = mlli_iter_p->cur_mlli_table_p +
+	    ((FIRST_DATA_LLI_INDEX + mlli_iter_p->next_lli_idx - 1) *
+	     SEP_LLI_ENTRY_WORD_SIZE);
+
+	if (mlli_iter_p->prev_mlli_table_p != NULL)
+		link_to_prev_mlli(mlli_iter_p);
+
+	if (mlli_iter_p->cur_mlli_table_p != NULL) {
+		set_last_lli(last_lli_p);
+		set_last_table(mlli_iter_p->cur_mlli_table_p);
+	}
+
+}
+
+static int alloc_next_mlli(struct llimgr_obj *llimgr_p,
+			   struct mlli_tables_list *mlli_tables_list_p,
+			   struct mlli_tables_list_iterator *mlli_iter_p)
+{
+	u32 *last_lli_p = mlli_iter_p->cur_mlli_table_p +
+	    ((FIRST_DATA_LLI_INDEX + mlli_iter_p->next_lli_idx - 1) *
+	     SEP_LLI_ENTRY_WORD_SIZE);
+
+	if (mlli_iter_p->prev_mlli_table_p != NULL) {
+		/* "prev == NULL" means that we are on the stub link entry. */
+		/* If we have "prev" it means that we already have one table */
+		link_to_prev_mlli(mlli_iter_p);
+		set_last_lli(last_lli_p);
+	}
+
+	mlli_iter_p->prev_mlli_table_p = mlli_iter_p->cur_mlli_table_p;
+
+	/* Allocate MLLI table buffer from the pool */
+	mlli_iter_p->cur_mlli_table_p =
+	    dma_pool_alloc(llimgr_p->mlli_cache, GFP_KERNEL,
+			   &mlli_iter_p->cur_mlli_dma_addr);
+	if (mlli_iter_p->cur_mlli_table_p == NULL) {
+		pr_err("Failed allocating MLLI table\n");
+		return -ENOMEM;
+	}
+
+	/* Set DMA addr to the table start from SeP perpective */
+	mlli_iter_p->cur_mlli_dma_addr += SEP_LLI_ENTRY_BYTE_SIZE;
+	mlli_iter_p->next_lli_idx = 0;
+	mlli_iter_p->cur_mlli_accum_data = 0;
+	/* Set as last until linked to next (for keeping a valid tables list) */
+	set_last_table(mlli_iter_p->cur_mlli_table_p);
+
+	mlli_tables_list_p->table_count++;
+
+	return 0;
+}
+
+/**
+ * append_lli_to_mlli() - Set current LLI info and progress to next entry
+ * @mlli_iter_p:
+ * @dma_addr:
+ * @dma_size:
+ *
+ * Returns void
+ */
+static inline void append_lli_to_mlli(struct mlli_tables_list_iterator
+				      *mlli_iter_p, dma_addr_t dma_addr,
+				      u32 data_size)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];	/* LLI scratchpad */
+	u32 *next_lli_p;
+
+	next_lli_p = mlli_iter_p->cur_mlli_table_p +
+	    ((FIRST_DATA_LLI_INDEX + mlli_iter_p->next_lli_idx) *
+	     SEP_LLI_ENTRY_WORD_SIZE);
+	/* calc. includes first link entry */
+	/* Create LLI entry */
+	SEP_LLI_INIT(lli_spad);
+	SEP_LLI_SET(lli_spad, ADDR, dma_addr);
+	SEP_LLI_SET(lli_spad, SIZE, data_size);
+	SEP_LLI_COPY_TO_SEP(next_lli_p, lli_spad);
+
+	mlli_iter_p->next_lli_idx++;
+	mlli_iter_p->cur_mlli_accum_data += data_size;
+}
+
+/**
+ * append_data_to_mlli() - Append given DMA data chunk to given MLLI tables list
+ * @llimgr_p:
+ * @mlli_tables_list_p:
+ * @mlli_iter_p:
+ * @dma_addr:	 LLI entry ADDR
+ * @dma_size:	 LLI entry SIZE
+ *
+ * Append given DMA data chunk to given MLLI tables list.
+ * Based on given iterator the LLI entry may be added in the current table
+ * or if table end reached, a new table would be allocated.
+ * In the latter case the previous MLLI table would be linked to current.
+ * If not all data fits into current table limit, it would be split in to
+ * last LLI in this table and first LLI in the next table (assuming overall
+ * DMA data is not more than max_data_per_mlli)
+ * Returns int 0 for success
+ */
+static int append_data_to_mlli(struct llimgr_obj *llimgr_p,
+			       struct mlli_tables_list *mlli_tables_list_p,
+			       struct mlli_tables_list_iterator *mlli_iter_p,
+			       dma_addr_t data_dma_addr, u32 data_size)
+{
+	u32 remaining_data_for_mlli;
+	int rc;
+
+#ifdef DEBUG
+	if (data_size > llimgr_p->max_data_per_mlli) {
+		pr_err(
+			    "Given data size (%uB) is too large for MLLI (%luB)\n",
+			    data_size, llimgr_p->max_data_per_mlli);
+		return -EINVAL;
+	}
+#endif
+
+	if (mlli_iter_p->next_lli_idx >= llimgr_p->max_lli_num) {
+		/* Reached end of current MLLI table */
+		rc = alloc_next_mlli(llimgr_p, mlli_tables_list_p, mlli_iter_p);
+		if (rc != 0)
+			return rc;
+	}
+
+	remaining_data_for_mlli =
+	    llimgr_p->max_data_per_mlli - mlli_iter_p->cur_mlli_accum_data;
+
+	if (data_size > remaining_data_for_mlli) {
+		/* This chunk does not fit in this table */
+		if (remaining_data_for_mlli > 0) {/* Space left in this MLLI */
+			/* Add to this table first "half" of the chunk */
+			append_lli_to_mlli(mlli_iter_p, data_dma_addr,
+					   remaining_data_for_mlli);
+			pr_debug("Splitting SG of %uB to %uB+%uB\n",
+				      data_size, remaining_data_for_mlli,
+				      data_size - remaining_data_for_mlli);
+			/* Set the remainder to be pushed in the new table */
+			data_dma_addr += remaining_data_for_mlli;
+			data_size -= remaining_data_for_mlli;
+		}
+		rc = alloc_next_mlli(llimgr_p, mlli_tables_list_p, mlli_iter_p);
+		if (rc != 0)
+			return rc;
+	}
+
+	append_lli_to_mlli(mlli_iter_p, data_dma_addr, data_size);
+
+	return 0;
+}
+
+/**
+ * init_mlli_tables_list() - Initialize MLLI tables list object with user buffer
+ *				information
+ * @mlli_tables_list_p:
+ * @dma_direction:
+ * @user_buf_ptr:
+ * @buf_size:
+ *
+ * Returns int 0 on success
+ */
+static int init_mlli_tables_list(struct llimgr_obj *llimgr_p,
+				 struct mlli_tables_list *mlli_tables_list_p,
+				 struct client_dma_buffer *client_memref,
+				 enum dma_data_direction dma_direction)
+{
+	const bool is_inbuf = (dma_direction == DMA_TO_DEVICE) ||
+	    (dma_direction == DMA_BIDIRECTIONAL);
+	const bool is_outbuf = (dma_direction == DMA_FROM_DEVICE) ||
+	    (dma_direction == DMA_BIDIRECTIONAL);
+	const bool is_memref_inbuf =
+	    (client_memref->dma_direction == DMA_TO_DEVICE) ||
+	    (client_memref->dma_direction == DMA_BIDIRECTIONAL);
+	const bool is_memref_outbuf =
+	    (client_memref->dma_direction == DMA_FROM_DEVICE) ||
+	    (client_memref->dma_direction == DMA_BIDIRECTIONAL);
+	int rc;
+
+#ifdef DEBUG
+	/* Verify that given MLLI tables list is "clean" */
+	if (mlli_tables_list_p->user_memref != NULL) {
+		pr_err("Got \"dirty\" MLLI tables list!\n");
+		return -EINVAL;
+	}
+#endif	 /*DEBUG*/
+	    MLLI_TABLES_LIST_INIT(mlli_tables_list_p);
+	if (client_memref->buf_size > 0) {
+		/* Validate buffer access permissions */
+		if (is_inbuf && !is_memref_inbuf) {
+			pr_err("No read access (%d) to user buffer @ %p\n",
+				    client_memref->dma_direction,
+				    client_memref->user_buf_ptr);
+			return -EFAULT;
+		}
+		if (is_outbuf && !is_memref_outbuf) {
+			pr_err("No write access (%d), data buffer @ %p\n",
+				    client_memref->dma_direction,
+				    client_memref->user_buf_ptr);
+			return -EFAULT;
+		}
+
+	}
+	rc = sync_client_dma_buf(llimgr_p->dev,
+				 client_memref, true /*for device */ ,
+				 dma_direction);
+	if (likely(rc == 0)) {
+		/* Init. these fields only if the operations above succeeded */
+		mlli_tables_list_p->user_memref = client_memref;
+		mlli_tables_list_p->data_direction = dma_direction;
+	}
+	return rc;
+}
+
+/**
+ * cleanup_mlli_tables_list() - Cleanup MLLI tables resources
+ * @llimgr_p:	 LLI-manager pointer
+ * @mlli_table_p:	The MLLI tables list object
+ * @is_data_dirty:	If true (!0) the (output) data pages are marked as dirty
+ *
+ * Cleanup MLLI tables resources
+ * This function may be invoked for partially constructed MLLI tables list
+ * as tests for existence of released resources before trying to release.
+ */
+static void cleanup_mlli_tables_list(struct llimgr_obj *llimgr_p,
+				     struct mlli_tables_list
+				     *mlli_tables_list_p, int is_data_dirty)
+{
+	dma_addr_t cur_mlli_dma_addr;
+	dma_addr_t next_mlli_dma_addr;
+	u32 *cur_mlli_p;
+	u32 *next_mlli_p;
+	u32 *link_entry_p;
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];
+
+	pr_debug("mlli_tables_list_p=%p user_memref=%p table_count=%u\n",
+		      mlli_tables_list_p, mlli_tables_list_p->user_memref,
+		      mlli_tables_list_p->table_count);
+	/* Initialize to the first MLLI table */
+	if (mlli_tables_list_p->table_count > 0) {
+		cur_mlli_p =
+		    SEP_MLLI_GET_NEXT_VA(mlli_tables_list_p->
+					 link_to_first_table);
+		link_entry_p =
+		    mlli_tables_list_p->link_to_first_table +
+		    SEP_LLI_ENTRY_WORD_SIZE;
+		/* LE to BE... */
+		SEP_LLI_COPY_FROM_SEP(lli_spad, link_entry_p);
+		/* Actual allocation DMA address is one entry before
+		 * saved address */
+		cur_mlli_dma_addr =
+		    SEP_LLI_GET(lli_spad, ADDR) - SEP_LLI_ENTRY_BYTE_SIZE;
+	} else {
+		/*DLLI*/ cur_mlli_p = NULL;
+		/* Skip the cleanup loop below */
+	}
+
+	/* Cleanup MLLI tables */
+	while (cur_mlli_p != NULL) {
+		pr_debug("Freeing MLLI table buffer at %p (%08llX)\n",
+			cur_mlli_p, (long long unsigned int)cur_mlli_dma_addr);
+		/* The link entry follows the first entry that holds next VA */
+		link_entry_p = cur_mlli_p + SEP_LLI_ENTRY_WORD_SIZE;
+		SEP_LLI_COPY_FROM_SEP(lli_spad, link_entry_p);/* LE to BE... */
+		/* Save link pointers before freeing the table */
+		next_mlli_p = SEP_MLLI_GET_NEXT_VA(cur_mlli_p);
+		/* Actual allocation DMA address is one entry before
+		 * saved address */
+		next_mlli_dma_addr =
+		    SEP_LLI_GET(lli_spad, ADDR) - SEP_LLI_ENTRY_BYTE_SIZE;
+		dma_pool_free(llimgr_p->mlli_cache,
+			      cur_mlli_p, cur_mlli_dma_addr);
+
+		cur_mlli_p = next_mlli_p;
+		cur_mlli_dma_addr = next_mlli_dma_addr;
+	}
+
+	if ((is_data_dirty) && (mlli_tables_list_p->user_memref != NULL))
+		sync_client_dma_buf(llimgr_p->dev,
+				    mlli_tables_list_p->user_memref,
+				    false /*for CPU */ ,
+				    mlli_tables_list_p->data_direction);
+
+	/* Clear traces (pointers) of released resources */
+	MLLI_TABLES_LIST_INIT(mlli_tables_list_p);
+
+}
+
+/**
+ * process_as_dlli() - Consider given MLLI request as DLLI and update the MLLI
+ *			object if possible. Otherwise return error.
+ *
+ * @mlli_tables_p:	Assciated MLLI object with client memref set.
+ * @prepend_data:	Optional prepend data
+ * @prepend_data_size:	Optional prepend data size
+ */
+static int process_as_dlli(struct mlli_tables_list *mlli_tables_p,
+			   dma_addr_t prepend_data,
+			   unsigned long prepend_data_size)
+{
+	struct client_dma_buffer *memref = mlli_tables_p->user_memref;
+	u32 dma_size = memref->buf_size - memref->save4next_size;
+
+	/* Prepend data only (or 0 data) case */
+	if (memref->buf_size == 0) {
+		/* Handle 0-sized buffer or prepend_data only */
+		set_dlli(mlli_tables_p, prepend_data, prepend_data_size);
+		return 0;
+	}
+
+	/* Cannot concatenate prepend_data to client data with DLLI */
+	if (prepend_data_size > 0)
+		return -EINVAL;
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	/* None of the DLLI cases is possible with cache line alignment buf. */
+	if (memref->sg_head != NULL)
+		return -EINVAL;
+#endif
+
+	/* Physically contiguous buffer case */
+	if (memref->sg_tail == NULL) {
+		/* If no sg_tail it is an indication that sg_main is phys.
+		 * contiguous - DLLI directly to client buffer */
+		set_dlli(mlli_tables_p, sg_dma_address(memref->sg_main),
+			 dma_size);
+		return 0;
+	}
+
+	/* Small buffer copied to aux. buffer */
+	if (memref->sg_main == NULL) {
+		/* If not sg_main (i.e., only sg_tail) we can
+		 * DLLI to the aux. buf. */
+		set_dlli(mlli_tables_p, memref->buf_end_aux_buf_dma, dma_size);
+		return 0;
+	}
+
+	return -EINVAL;		/* Not suitable for DLLI */
+}
+
+/**
+ * llimgr_create_mlli() - Create MLLI tables list for given user buffer
+ * @llimgr:
+ * @mlli_tables_p:	 A pointer to MLLI tables list object
+ * @dma_direction:	 The DMA direction of data flow
+ * @user_memref:	 User DMA memory reference (locked pages, etc.)
+ * @prepend_data:	 DMA address of data buffer to prepend before user data
+ * @prepend_data_size:	 Size of prepend_data (0 if none)
+ *
+ * Returns int 0 on success
+ */
+int llimgr_create_mlli(void *llimgr,
+		       struct mlli_tables_list *mlli_tables_p,
+		       enum dma_data_direction dma_direction,
+		       struct client_dma_buffer *client_memref,
+		       dma_addr_t prepend_data, unsigned long prepend_data_size)
+{
+	struct llimgr_obj *llimgr_p = (struct llimgr_obj *)llimgr;
+	unsigned long remaining_main_data;
+	unsigned int remaining_main_sg_ents;
+	struct mlli_tables_list_iterator mlli_iter;
+	unsigned long client_dma_size;
+	unsigned long cur_sge_len = 0;
+	dma_addr_t cur_sge_addr;
+	/* cur_lli_index initialized to end of "virtual" link table */
+	struct scatterlist *cur_sg_entry;
+	int rc;
+
+	/* client_memref must exist even if no user data (buf_size == 0), i.e.,
+	 * just prepend_data. */
+	if (client_memref == NULL) {
+		pr_err("Client memref is NULL.\n");
+		return -EINVAL;
+	}
+
+	client_dma_size =
+		client_memref->buf_size - client_memref->save4next_size;
+
+	SEP_LOG_TRACE(
+		      "buf @ 0x%08lX, size=0x%08lX B, prepend_size=0x%08lX B, dma_dir=%d\n",
+		      (unsigned long)client_memref->user_buf_ptr,
+		      client_memref->buf_size, prepend_data_size,
+		      dma_direction);
+
+	rc = init_mlli_tables_list(llimgr_p,
+				   mlli_tables_p, client_memref, dma_direction);
+	if (unlikely(rc != 0))
+		return rc;	/* No resources to cleanup */
+
+	rc = process_as_dlli(mlli_tables_p, prepend_data, prepend_data_size);
+	if (rc == 0)		/* Mapped as DLLI */
+		return 0;
+	rc = 0;			/* In case checked below before updating */
+
+	/* Initialize local state to empty list */
+	mlli_iter.prev_mlli_table_p = NULL;
+	mlli_iter.cur_mlli_table_p = mlli_tables_p->link_to_first_table;
+	/* "First" table is the stub in struct mlli_tables_list, so we
+	 * mark it as "full" by setting next_lli_idx to maximum */
+	mlli_iter.next_lli_idx = llimgr_p->max_lli_num;
+	mlli_iter.cur_mlli_accum_data = 0;
+
+	if (prepend_data_size > 0) {
+		rc = append_data_to_mlli(llimgr_p, mlli_tables_p, &mlli_iter,
+					 prepend_data, prepend_data_size);
+		if (unlikely(rc != 0)) {
+			pr_err("Fail: add LLI entry for prepend_data\n");
+			goto mlli_create_exit;	/* do cleanup */
+		}
+	}
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	if (client_memref->buf_start_aux_buf_size > 0) {
+		rc = append_data_to_mlli(llimgr_p, mlli_tables_p, &mlli_iter,
+					 client_memref->buf_start_aux_buf_dma,
+					 client_memref->buf_start_aux_buf_size);
+		if (unlikely(rc != 0)) {
+			pr_err("Fail: add LLI entry for start_aux_buf\n");
+			goto mlli_create_exit;	/* do cleanup */
+		}
+	}
+#endif
+
+	/* Calculate amount of "main" data before last LLI */
+	/* (round to crypto block multiple to avoid having MLLI table
+	 * which is not last with non-crypto-block multiple */
+	remaining_main_data =
+	    (prepend_data_size + client_dma_size -
+	     client_memref->buf_end_aux_buf_size) & ~MAX_CRYPTO_BLOCK_MASK;
+	/* Now remove the data outside of the main buffer */
+	remaining_main_data -= prepend_data_size;
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	remaining_main_data -= client_memref->buf_start_aux_buf_size;
+#endif
+	cur_sg_entry = client_memref->sg_main;
+	remaining_main_sg_ents = client_memref->sg_main_nents;
+
+	/* construct MLLI tables for sg_main list */
+	for (cur_sg_entry = client_memref->sg_main,
+	     remaining_main_sg_ents = client_memref->sg_main_nents;
+	     cur_sg_entry != NULL;
+	     cur_sg_entry = sg_next(cur_sg_entry), remaining_main_sg_ents--) {
+		/* Get current S/G entry length */
+		cur_sge_len = sg_dma_len(cur_sg_entry);
+		cur_sge_addr = sg_dma_address(cur_sg_entry);
+
+		/* Reached end of "main" data which is multiple of largest
+		 * crypto block? (consider split to next/last table).
+		 * (Check if needs to skip to next table for 2nd half) */
+		if ((remaining_main_data > 0) &&
+		    (cur_sge_len >=
+		     remaining_main_data) /*last "main" data */ &&
+		    /* NOT at end of table (i.e.,starting a new table anyway) */
+		    (llimgr_p->max_lli_num > mlli_iter.next_lli_idx) &&
+		    /* Checks if remainig entries don't fit into this table */
+		    (remaining_main_sg_ents -
+		     ((cur_sge_len == remaining_main_data) ? 1 : 0) +
+		     ((client_memref->buf_end_aux_buf_size > 0) ? 1 : 0)) >
+		    (llimgr_p->max_lli_num - mlli_iter.next_lli_idx)) {
+			/* "tail" would be in next/last table */
+			/* Add last LLI for "main" data */
+			rc = append_data_to_mlli(llimgr_p, mlli_tables_p,
+						 &mlli_iter, cur_sge_addr,
+						 remaining_main_data);
+			if (unlikely(rc != 0)) {
+				pr_err(
+					    "Failed adding LLI entry for sg_main (last).\n");
+				goto mlli_create_exit;	/* do cleanup */
+			}
+			cur_sge_len -= remaining_main_data;
+			cur_sge_addr += remaining_main_data;
+			/* Skip to next MLLI for tail data */
+			rc = alloc_next_mlli(llimgr_p, mlli_tables_p,
+					     &mlli_iter);
+			if (unlikely(rc != 0)) {
+				pr_err("Fail add MLLI table for tail.\n");
+				goto mlli_create_exit;	/* do cleanup */
+			}
+		}
+
+		if (likely(cur_sge_len > 0)) {
+			/* When entry is split to next table, this would append
+			 * the second half of it. */
+			rc = append_data_to_mlli(llimgr_p, mlli_tables_p,
+						 &mlli_iter, cur_sge_addr,
+						 cur_sge_len);
+			if (unlikely(rc != 0)) {
+				pr_err("Fail add LLI entry for sg_main\n");
+				goto mlli_create_exit;	/* do cleanup */
+			}
+		}
+	}			/*for */
+
+	if (remaining_main_sg_ents > 0) {
+		pr_err("Remaining sg_ents>0 after end of S/G list!\n");
+		rc = -EINVAL;
+		goto mlli_create_exit;	/* do cleanup */
+	}
+
+	/* Append end aux. buffer */
+	if (client_memref->buf_end_aux_buf_size > 0) {
+		rc = append_data_to_mlli(llimgr_p, mlli_tables_p, &mlli_iter,
+					 client_memref->buf_end_aux_buf_dma,
+					 client_memref->buf_end_aux_buf_size);
+		if (unlikely(rc != 0)) {
+			pr_err("Fail: add LLI entry for end_aux_buf\n");
+			goto mlli_create_exit;	/* do cleanup */
+		}
+	}
+
+	terminate_mlli_tables_list(&mlli_iter);
+	pr_debug("MLLI %u tables (rc=%d):\n",
+		      mlli_tables_p->table_count, rc);
+	llimgr_dump_mlli_tables_list(mlli_tables_p);
+
+ mlli_create_exit:
+	if (rc != 0) {
+		/* The MLLI tables list are always consistent at bail-out points
+		 * so we can use the simple cleanup function.                 */
+		cleanup_mlli_tables_list(llimgr_p, mlli_tables_p, 0);
+	}
+
+	return rc;
+
+}
+
+/**
+ * llimgr_destroy_mlli() - Cleanup resources of given MLLI tables list object
+ *				(if has any tables)
+ * @llimgr:
+ * @mlli_tables_p:
+ *
+ */
+void llimgr_destroy_mlli(void *llimgr,
+			 struct mlli_tables_list *mlli_tables_p)
+{
+	struct llimgr_obj *llimgr_p = (struct llimgr_obj *)llimgr;
+	const bool is_dirty =
+	    (mlli_tables_p->data_direction == DMA_BIDIRECTIONAL) ||
+	    (mlli_tables_p->data_direction == DMA_FROM_DEVICE);
+
+	cleanup_mlli_tables_list(llimgr_p, mlli_tables_p, is_dirty);
+}
+
+/**
+ * llimgr_mlli_to_seprpc_memref() - Convert given MLLI tables list into a SeP
+ *					RPC memory reference format
+ * @mlli_tables_p:	 The source MLLI table
+ * @memref_p:	 The destination RPC memory reference
+ *
+ */
+void llimgr_mlli_to_seprpc_memref(struct mlli_tables_list *mlli_tables_p,
+				  struct seprpc_memref *memref_p)
+{
+	u32 xlli_addr;
+	u16 xlli_size;
+	u16 table_count;
+
+	llimgr_get_mlli_desc_info(mlli_tables_p,
+				  &xlli_addr, &xlli_size, &table_count);
+
+	memref_p->ref_type = cpu_to_le32(table_count > 0 ?
+					 SEPRPC_MEMREF_MLLI :
+					 SEPRPC_MEMREF_DLLI);
+	memref_p->location = cpu_to_le32(xlli_addr);
+	memref_p->size = cpu_to_le32(xlli_size);
+	memref_p->count = cpu_to_le32(table_count);
+}
+
+/**
+ * llimgr_get_mlli_desc_info() - Get the MLLI info required for a descriptor.
+ *
+ * @mlli_tables_p:	The source MLLI table
+ * @first_table_addr_p:	First table DMA address or data DMA address for DLLI
+ * @first_table_size_p:	First table size in bytes or data size for DLLI
+ * @num_of_tables:	Number of MLLI tables in the list (0 for DLLI)
+ *
+ * In case of DLLI, first_table_* refers to the client DMA buffer (DLLI info.)
+ */
+void llimgr_get_mlli_desc_info(struct mlli_tables_list *mlli_tables_p,
+			       u32 *first_table_addr_p,
+			       u16 *first_table_size_p,
+			       u16 *num_of_tables_p)
+{
+	u32 link_lli_spad[SEP_LLI_ENTRY_WORD_SIZE];
+	u32 *first_mlli_link_p;
+
+	first_mlli_link_p = mlli_tables_p->link_to_first_table +
+	    SEP_LLI_ENTRY_WORD_SIZE;
+	SEP_LLI_COPY_FROM_SEP(link_lli_spad, first_mlli_link_p);
+	/* Descriptor are read by direct-access which takes care of
+	 * swapping from host endianess to SeP endianess, so we need
+	 * to revert the endiness in the link LLI entry */
+	*first_table_addr_p = SEP_LLI_GET(link_lli_spad, ADDR);
+	*first_table_size_p = SEP_LLI_GET(link_lli_spad, SIZE);
+	*num_of_tables_p = mlli_tables_p->table_count;
+}
diff --git a/drivers/staging/sep54/lli_mgr.h b/drivers/staging/sep54/lli_mgr.h
new file mode 100644
index 0000000..ab4bec9
--- /dev/null
+++ b/drivers/staging/sep54/lli_mgr.h
@@ -0,0 +1,291 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/*!                                                                           *
+ * \file lli_mgr.h                                                            *
+ * \brief LLI logic: API definition                                           *
+ *                                                                            */
+
+#ifndef __LLI_MGR_H__
+#define __LLI_MGR_H__
+
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include "sep_lli.h"
+
+#define LLIMGR_NULL_HANDLE NULL
+
+/* Using this macros assures correct initialization in case of future changes */
+#define MLLI_TABLES_LIST_INIT(mlli_tables_list_ptr) do {		\
+	memset((mlli_tables_list_ptr), 0,				\
+	       sizeof(struct mlli_tables_list));			\
+} while (0)		/* Executed once */
+
+/**
+ * llimgr_is_same_mlli_tables() - Tell if given tables list are pointing to the
+ *					same tables list
+ * @llimgr:
+ * @mlli_tables1:	 First MLLI tables list object
+ * @mlli_tables2:	 Second MLLI tables list object
+ *
+ * Tell if given tables list are pointing to the same tables list
+ * (used to identify in-place operation)
+ */
+#define llimgr_is_same_mlli_tables(llimgr, mlli_tables1, mlli_tables2)   \
+	((mlli_tables1)->user_memref == (mlli_tables2)->user_memref)
+
+/* Clean struct client_dma_buffer buffer info */
+#define CLEAN_DMA_BUFFER_INFO(_client_dma_buf_p) \
+	memset(_client_dma_buf_p, 0, sizeof(struct client_dma_buffer))
+
+/**
+ * struct client_dma_buffer - Client DMA buffer object
+ * @buf_size: buffer size in bytes
+ * @user_buf_ptr:	Pointer to start of buffer in user space. May be NULL
+ *			for buf_size==0 or if the user is the kernel (given
+ *			scatterlist)
+ * @num_of_pages:	Number of pages in user_pages array
+ * @user_pages:		Locked user pages (for user space buffer)
+ * @dma_direction:	DMA direction over given buffer mapping
+ * @sg_head:		S/G list of buffer header (memcopied)
+ * @sg_main:		S/G list of buffer body (for DMA)
+ * @sg_tail:		S/G list of buffer tail (memcopied)
+ * @sg_main_nents:	Num. of S/G entries for sg_main
+ * @sg_save4next:	S/G list of buffer chunk past "tail" for copying to
+ *			side buffer for next operation (for hash block remains)
+ * @sg_save4next_nents:	Num. of S/G entries for sg_save4next
+ * @save4next_size:	Size of data in sg_save4next
+ * @buf_end_aux_buf_va:	Tail aux. buffer virtual address
+ * @buf_end_aux_buf_dma:	DMA address of buf_end_aux_buf_va
+ * @buf_end_aux_buf_size: Number of bytes copied in tail aux. buffer
+ * @buf_start_aux_buf_va:	Header aux. buffer virtual address
+ * @buf_start_aux_buf_dma:	DMA address of buf_start_aux_buf_va
+ * @buf_start_aux_buf_size: Number of bytes copied in header aux. buffer
+ */
+struct client_dma_buffer {
+	unsigned long buf_size;
+	/* User buffer info. */
+	u8 __user *user_buf_ptr;
+	int num_of_pages;
+	struct page **user_pages;
+
+	/*
+	 * DMA mapping info (either created for user space pages
+	 * or retrieved from kernel client)
+	 */
+	enum dma_data_direction dma_direction;
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	struct scatterlist *sg_head;
+#endif
+	struct scatterlist *sg_main;
+	struct scatterlist *sg_tail;
+	struct scatterlist *sg_save4next;
+
+	unsigned int sg_main_nents;
+	unsigned long save4next_size;
+	/* Auxilliary driver buffer for sg_head and sg_tail copies */
+	void *buf_end_aux_buf_va;
+	dma_addr_t buf_end_aux_buf_dma;
+	unsigned long buf_end_aux_buf_size;
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	/* Currently only cache line at buffer start requires aux. buffers */
+	void *buf_start_aux_buf_va;
+	dma_addr_t buf_start_aux_buf_dma;
+	unsigned long buf_start_aux_buf_size;
+#endif				/*CONFIG_NOT_COHERENT_CACHE */
+
+};
+
+/**
+ * struct mlli_tables_list - MLLI tables list object
+ * @link_to_first_table:	"link" to first table (one extra entry for link
+ *				to first table we are using LLI entry in order
+ *				to implement the list head using the same data
+ *				structure used to link subsequent tables. This
+ *				helps avoiding special code for the first table
+ *				linking.
+ * @table_count:	The total number of fragmented tables. table_count is
+ *			only 16b following the size allocated for it in the
+ *			SW descriptor.
+ * @user_memref:	Referenced client DMA buffer
+ * @data_direction:	This operation DMA direction
+ *
+ */
+struct mlli_tables_list {
+	u32 link_to_first_table[2 * SEP_LLI_ENTRY_WORD_SIZE];
+	u16 table_count;
+	struct client_dma_buffer *user_memref;
+	enum dma_data_direction data_direction;
+};
+
+/**
+ * checks if the given MLLI tables list object points to a Direct LLI buffer.
+ * @mlli_tables_p:	a pointer to a table list
+ *
+ * Returns true if DLLI buffer, false otherwise. If mlli_tables_p points to
+ * an empty data buffer this function will return true
+ */
+static inline bool llimgr_mlli_is_dlli(struct mlli_tables_list *mlli_tables_p)
+{
+	return (mlli_tables_p->user_memref != NULL) &&
+	    (mlli_tables_p->table_count == 0);
+}
+
+/**
+ * llimgr_create() - Create LLI-manager object
+ * @dev:	 Device context
+ * @mlli_table_size:	 The maximum size of an MLLI table in bytes
+ *
+ * Returns llimgr_h Created object handle or LLIMGR_NULL_HANDLE if failed
+ */
+void *llimgr_create(struct device *dev, unsigned long mlli_table_size);
+
+/**
+ * llimgr_destroy() - Destroy (free resources of) given LLI-manager object
+ * @llimgr:	 LLI-manager object handle
+ *
+ */
+void llimgr_destroy(void *llimgr);
+
+/**
+ * llimgr_register_client_dma_buf() - Register given client buffer for DMA
+ *					operation.
+ * @llimgr:	 The LLI manager object handle
+ * @user_buf_ptr:	 Pointer in user space of the user buffer
+ * @sgl:	Client provided s/g list. user_buf_ptr is assumed NULL if this
+ *		list is given (!NULL).
+ * @buf_size:	 The user buffer size in bytes (incl. save4next). May be 0.
+ * @save4next_size:	Amount from buffer end to save for next op.
+ *			(split into seperate sgl). May be 0.
+ * @dma_direction:	The DMA direction this buffer would be used for
+ * @client_dma_buf_p:	Pointer to the user DMA buffer "object"
+ *
+ * Register given client buffer for DMA operation.
+ * If user_buf_ptr!=NULL and sgl==NULL it locks the user pages and creates
+ * head/main/tail s/g lists. If sgl!=NULL is splits it into head/main/tail
+ * s/g lists.
+ * Returns 0 for success
+ */
+int llimgr_register_client_dma_buf(void *llimgr,
+				   u8 __user *user_buf_ptr,
+				   struct scatterlist *sgl,
+				   const unsigned long buf_size,
+				   const unsigned long save4next_size,
+				   const enum dma_data_direction dma_direction,
+				   struct client_dma_buffer *client_dma_buf_p);
+
+/**
+ * llimgr_deregister_client_dma_buf() - Unmap given user DMA buffer
+ *					(flush and unlock pages)
+ * @llimgr:
+
+ * @client_dma_buf_p:	 User DMA buffer object
+ *
+ */
+void llimgr_deregister_client_dma_buf(void *llimgr,
+				      struct client_dma_buffer
+				      *client_dma_buf_p);
+
+/**
+ * llimgr_copy_from_client_buf_save4next() - Copy from the sg_save4next chunk
+ *	of the client DMA buffer to given buffer.
+ *	Used to save hash block remainder.
+ *
+ * @client_dma_buf_p:	The client DMA buffer with the save4next chunk
+ * @to_buf:		Target buffer to copy to.
+ * @buf_len:		Given buffer length (to avoid buffer overflow)
+ *
+ * Returns number of bytes copied or -ENOMEM if given buffer is too small
+ */
+int llimgr_copy_from_client_buf_save4next(struct client_dma_buffer
+					  *client_dma_buf_p, u8 *to_buf,
+					  unsigned long buf_len);
+
+/**
+ * llimgr_create_mlli() - Create MLLI tables list for given user buffer
+ * @llimgr:
+ * @mlli_tables_p:	 A pointer to MLLI tables list object
+ * @dma_direction:	 The DMA direction of data flow
+ * @user_memref:	 User DMA memory reference (locked pages, etc.)
+ * @prepend_data:	 DMA address of data buffer to prepend before user data
+ * @prepend_data_size:	 Size of prepend_data (0 if none)
+ *
+ * Returns int 0 on success
+ */
+int llimgr_create_mlli(void *llimgr,
+		       struct mlli_tables_list *mlli_tables_p,
+		       enum dma_data_direction dma_direction,
+		       struct client_dma_buffer *user_memref,
+		       dma_addr_t prepend_data,
+		       unsigned long prepend_data_size);
+
+/**
+ * llimgr_destroy_mlli() - Cleanup resources of given MLLI tables list object
+ *				(if has any tables)
+ * @llimgr:
+ * @mlli_tables_p:
+ *
+ */
+void llimgr_destroy_mlli(void *llimgr,
+			 struct mlli_tables_list *mlli_tables_p);
+
+/**
+ * llimgr_mlli_to_seprpc_memref() - Convert given MLLI tables list into a
+ *					SeP RPC memory reference format
+ * @mlli_tables_p:	 The source MLLI table
+ * @memref_p:	 The destination RPC memory reference
+ *
+ */
+void llimgr_mlli_to_seprpc_memref(struct mlli_tables_list *mlli_tables_p,
+				  struct seprpc_memref *memref_p);
+
+/**
+ * llimgr_get_mlli_desc_info() - Get the MLLI info required for a descriptor.
+ *
+ * @mlli_tables_p:	The source MLLI table
+ * @first_table_addr_p:	First table DMA address or data DMA address for DLLI
+ * @first_table_size_p:	First table size in bytes or data size for DLLI
+ * @num_of_tables:	Number of MLLI tables in the list (0 for DLLI)
+ *
+ * In case of DLLI, first_table_* refers to the client DMA buffer (DLLI info.)
+ */
+void llimgr_get_mlli_desc_info(struct mlli_tables_list *mlli_tables_p,
+			       u32 *first_table_addr_p,
+			       u16 *first_table_size_p,
+			       u16 *num_of_tables_p);
+
+#ifdef DEBUG
+/**
+ * llimgr_dump_mlli_tables_list() - Dump all the MLLI tables in a given tables
+ *					list
+ * @mlli_tables_list_p:	 Pointer to tables list structure
+ *
+ */
+void llimgr_dump_mlli_tables_list(struct mlli_tables_list *mlli_tables_list_p);
+#else
+#define llimgr_dump_mlli_tables_list(mlli_tables_list_p) do {} while (0)
+#endif /*DEBUG*/
+#endif /*__LLI_MGR_H__*/
diff --git a/drivers/staging/sep54/sep_applets.h b/drivers/staging/sep54/sep_applets.h
new file mode 100644
index 0000000..0b65a9f
--- /dev/null
+++ b/drivers/staging/sep54/sep_applets.h
@@ -0,0 +1,37 @@
+/*
+ *  sep_applets.h - Security Processor applet definitions
+ *
+ *  Copyright(c) 2012-2013 Intel Corporation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  You should have received a copy of the GNU General Public License along with
+ *  this program; if not, write to the Free Software Foundation, Inc., 59
+ *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+#ifndef _SEP_APPLETS_H
+#define _SEP_APPLETS_H
+
+/* Kernel side threads (Agents as DX calls them) */
+#define RPMB_AGENT_ID            0
+
+/* Applet UUIDs */
+#define DEFAULT_APP_UUID { 0x00, 0xDE, 0xFA, 0x01, 0xDE, 0xFA, 0x02, 0xDE, \
+	0xFA, 0x03, 0xDE, 0xFA, 0x04, 0xDE, 0xFA, 0xFF }
+
+#define HDCP_APP_UUID { 0x10, 0x21, 0x32, 0x43, 0x54, 0x65, 0x76, 0x87,     \
+	0x98, 0xA9, 0xBA, 0xCB, 0xDC, 0xED, 0xFE, 0x0F }
+
+#define CMD_RPMB_ENABLE          1
+#define CMD_IMAGE_VERIFY         3
+#define CMD_KEYPOLICY_CHECK      7
+#define HDCP_RX_HDMI_STATUS 0x80000080
+
+#endif /* _SEP_APPLETS_H_ */
diff --git a/drivers/staging/sep54/sep_compat_ioctl.c b/drivers/staging/sep54/sep_compat_ioctl.c
new file mode 100644
index 0000000..26fbb7e
--- /dev/null
+++ b/drivers/staging/sep54/sep_compat_ioctl.c
@@ -0,0 +1,1068 @@
+/*
+ * Copyright (C) 2013 Intel Finland Oy
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/compat.h>
+#include <linux/compiler.h>
+#include <linux/uaccess.h>
+#include <linux/printk.h>
+#include "sep_compat_ioctl.h"
+#include "dx_driver_abi.h"
+#include "dx_dev_defs.h"
+#include "sepapp.h"
+
+typedef int sep_ioctl_compat_t(struct file *filp, unsigned int cmd,
+			       unsigned long arg);
+
+static int compat_sep_ioctl_get_ver_major(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+static int compat_sep_ioctl_get_ver_minor(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+static int compat_sep_ioctl_get_sym_cipher_ctx_size(struct file *filp,
+						    unsigned int cmd,
+						    unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+static int compat_sep_ioctl_get_auth_enc_ctx_size(struct file *filp,
+						  unsigned int cmd,
+						  unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+static int compat_sep_ioctl_get_mac_ctx_size(struct file *filp,
+					     unsigned int cmd,
+					     unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+static int compat_sep_ioctl_get_hash_ctx_size(struct file *filp,
+					      unsigned int cmd,
+					      unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sym_cipher_init_params_32 {
+	u32 context_buf;	/*[in] */
+	struct dxdi_sym_cipher_props props;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sym_cipher_init(struct file *filp, unsigned int cmd,
+					    unsigned long arg)
+{
+	struct sym_cipher_init_params_32 init_32;
+	struct dxdi_sym_cipher_init_params __user *init_params;
+	int ret;
+
+	if (copy_from_user(&init_32, (void __user *)arg, sizeof(init_32)))
+		return -EFAULT;
+
+	init_params = compat_alloc_user_space(sizeof(*init_params));
+	if (!access_ok(VERIFY_WRITE, init_params, sizeof(*init_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)init_32.context_buf,
+		       &init_params->context_buf)
+	    || copy_to_user(&init_params->props, &init_32.props,
+			    sizeof(init_32.props)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)init_params);
+
+	if (__get_user(init_32.error_info, &init_params->error_info))
+		return -EFAULT;
+
+	if (put_user(init_32.error_info,
+		       &((struct sym_cipher_init_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct auth_enc_init_params_32 {
+	u32 context_buf; /*[in] */
+	struct dxdi_auth_enc_props props; /*[in] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_auth_enc_init(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	struct auth_enc_init_params_32 up32;
+	struct dxdi_auth_enc_init_params __user *init_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	init_params = compat_alloc_user_space(sizeof(*init_params));
+	if (!access_ok(VERIFY_WRITE, init_params, sizeof(*init_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &init_params->context_buf)
+	    || copy_to_user(&init_params->props, &up32.props,
+			    sizeof(up32.props)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)init_params);
+
+	if (__get_user(up32.error_info, &init_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct auth_enc_init_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct mac_init_params_32 {
+	u32 context_buf; /*[in] */
+	struct dxdi_mac_props props;  /*[in] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_mac_init(struct file *filp, unsigned int cmd,
+				     unsigned long arg)
+{
+	struct mac_init_params_32 up32;
+	struct dxdi_mac_init_params __user *init_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	init_params = compat_alloc_user_space(sizeof(*init_params));
+	if (!access_ok(VERIFY_WRITE, init_params, sizeof(*init_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &init_params->context_buf)
+	    || copy_to_user(&init_params->props, &up32.props,
+			    sizeof(up32.props)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)init_params);
+
+	if (__get_user(up32.error_info, &init_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct mac_init_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct hash_init_params_32 {
+	u32 context_buf; /*[in] */
+	enum dxdi_hash_type hash_type;  /*[in] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_hash_init(struct file *filp, unsigned int cmd,
+				      unsigned long arg)
+{
+	struct hash_init_params_32 up32;
+	struct dxdi_hash_init_params __user *init_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	init_params = compat_alloc_user_space(sizeof(*init_params));
+	if (!access_ok(VERIFY_WRITE, init_params, sizeof(*init_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &init_params->context_buf)
+	    || __put_user(up32.hash_type, &init_params->hash_type))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)init_params);
+
+	if (__get_user(up32.error_info, &init_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct hash_init_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct process_dblk_params_32 {
+	u32 context_buf; /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_out; /*[in] */
+	enum dxdi_data_block_type data_block_type;  /*[in] */
+	u32 data_in_size; /*[in] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_proc_dblk(struct file *filp, unsigned int cmd,
+				      unsigned long arg)
+{
+	struct process_dblk_params_32 up32;
+	struct dxdi_process_dblk_params __user *dblk_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	dblk_params = compat_alloc_user_space(sizeof(*dblk_params));
+	if (!access_ok(VERIFY_WRITE, dblk_params, sizeof(*dblk_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &dblk_params->context_buf)
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &dblk_params->data_in)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &dblk_params->data_out)
+	    || __put_user(up32.data_block_type, &dblk_params->data_block_type)
+	    || __put_user(up32.data_in_size, &dblk_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)dblk_params);
+
+	if (__get_user(up32.error_info, &dblk_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct process_dblk_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct fin_process_params_32 {
+	u32 context_buf; /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_out; /*[in] */
+	u32 data_in_size; /*[in] (octets) */
+	u8 digest_or_mac[DXDI_DIGEST_SIZE_MAX]; /*[out] */
+	u8 digest_or_mac_size;  /*[out] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_fin_proc(struct file *filp, unsigned int cmd,
+				     unsigned long arg)
+{
+	struct fin_process_params_32 up32;
+	struct dxdi_fin_process_params __user *fin_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	fin_params = compat_alloc_user_space(sizeof(*fin_params));
+	if (!access_ok(VERIFY_WRITE, fin_params, sizeof(*fin_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &fin_params->context_buf)
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &fin_params->data_in)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &fin_params->data_out)
+	    || __put_user(up32.data_in_size, &fin_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)fin_params);
+
+	if (copy_from_user(up32.digest_or_mac, fin_params->digest_or_mac,
+			   DXDI_DIGEST_SIZE_MAX)
+	    || __get_user(up32.digest_or_mac_size,
+			  &fin_params->digest_or_mac_size)
+	    || __get_user(up32.error_info, &fin_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct combined_init_params_32 {
+	struct dxdi_combined_props props; /*[in] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_combined_init(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	struct combined_init_params_32 up32;
+	struct dxdi_combined_init_params __user *init_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	init_params = compat_alloc_user_space(sizeof(*init_params));
+	if (!access_ok(VERIFY_WRITE, init_params, sizeof(*init_params)))
+		return -EFAULT;
+
+	if (copy_to_user(&init_params->props, &up32.props, sizeof(up32.props)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)init_params);
+
+	if (__get_user(up32.error_info, &init_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct combined_init_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct combined_proc_dblk_params_32 {
+	struct dxdi_combined_props props; /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_out; /*[out] */
+	u32 data_in_size; /*[in] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_combined_proc_dblk(struct file *filp,
+					       unsigned int cmd,
+					       unsigned long arg)
+{
+	struct combined_proc_dblk_params_32 up32;
+	struct dxdi_combined_proc_dblk_params __user *blk_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	blk_params = compat_alloc_user_space(sizeof(*blk_params));
+	if (!access_ok(VERIFY_WRITE, blk_params, sizeof(*blk_params)))
+		return -EFAULT;
+
+	if (copy_to_user(&blk_params->props, &up32.props,
+			 sizeof(up32.props))
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &blk_params->data_in)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &blk_params->data_out)
+	    || __put_user(up32.data_in_size, &blk_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)blk_params);
+
+	if (__get_user(up32.error_info, &blk_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct combined_proc_dblk_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct combined_proc_params_32 {
+	struct dxdi_combined_props props; /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_out; /*[out] */
+	u32 data_in_size; /*[in] (octets) */
+	u8 auth_data[DXDI_DIGEST_SIZE_MAX]; /*[out] */
+	u8 auth_data_size;  /*[out] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_combined_fin_proc(struct file *filp,
+					      unsigned int cmd,
+					      unsigned long arg)
+{
+	struct combined_proc_params_32 up32;
+	struct dxdi_combined_proc_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (copy_to_user(&user_params->props, &up32.props,
+			 sizeof(up32.props))
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &user_params->data_in)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &user_params->data_out)
+	    || __put_user(up32.data_in_size, &user_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (copy_from_user(up32.auth_data, user_params->auth_data,
+			   DXDI_DIGEST_SIZE_MAX)
+	    || __get_user(up32.auth_data_size,
+			  &user_params->auth_data_size)
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+static int compat_sep_ioctl_combined_proc(struct file *filp,
+					  unsigned int cmd, unsigned long arg)
+{
+	return compat_sep_ioctl_combined_fin_proc(filp, cmd, arg);
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sym_cipher_proc_params_32 {
+	u32 context_buf; /*[in] */
+	struct dxdi_sym_cipher_props props; /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_out; /*[in] */
+	u32 data_in_size; /*[in] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sym_cipher_proc(struct file *filp, unsigned int cmd,
+					    unsigned long arg)
+{
+	struct sym_cipher_proc_params_32 up32;
+	struct dxdi_sym_cipher_proc_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &user_params->context_buf)
+	    || copy_to_user(&user_params->props, &up32.props,
+			    sizeof(up32.props))
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &user_params->data_in)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &user_params->data_out)
+	    || __put_user(up32.data_in_size, &user_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (__get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct sym_cipher_proc_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct auth_enc_proc_params_32 {
+	u32 context_buf; /*[in] */
+	struct dxdi_auth_enc_props props; /*[in] */
+	u32 adata;    /*[in] */
+	u32 text_data;  /*[in] */
+	u32 data_out; /*[in] */
+	u8 tag[DXDI_DIGEST_SIZE_MAX]; /*[out] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_auth_enc_proc(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	struct auth_enc_proc_params_32 up32;
+	struct dxdi_auth_enc_proc_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &user_params->context_buf)
+	    || copy_to_user(&user_params->props, &up32.props,
+			    sizeof(up32.props))
+	    || __put_user((void __user *)(unsigned long)up32.adata,
+			  &user_params->adata)
+	    || __put_user((void __user *)(unsigned long)up32.text_data,
+			  &user_params->text_data)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &user_params->data_out))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (copy_from_user(up32.tag, user_params->tag,
+			   DXDI_DIGEST_SIZE_MAX)
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct mac_proc_params_32 {
+	u32 context_buf; /*[in] */
+	struct dxdi_mac_props props;  /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_in_size; /*[in] (octets) */
+	u8 mac[DXDI_DIGEST_SIZE_MAX]; /*[out] */
+	u8 mac_size;  /*[out] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_mac_proc(struct file *filp, unsigned int cmd,
+				     unsigned long arg)
+{
+	struct mac_proc_params_32 up32;
+	struct dxdi_mac_proc_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &user_params->context_buf)
+	    || copy_to_user(&user_params->props, &up32.props,
+			 sizeof(up32.props))
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &user_params->data_in)
+	    || __put_user(up32.data_in_size, &user_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (copy_from_user(up32.mac, user_params->mac, DXDI_DIGEST_SIZE_MAX)
+	    || __get_user(up32.mac_size, &user_params->mac_size)
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct hash_proc_params_32 {
+	u32 context_buf; /*[in] */
+	enum dxdi_hash_type hash_type;  /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_in_size; /*[in] (octets) */
+	u8 digest[DXDI_DIGEST_SIZE_MAX];  /*[out] */
+	u8 digest_size; /*[out] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_hash_proc(struct file *filp, unsigned int cmd,
+				      unsigned long arg)
+{
+	struct hash_proc_params_32 up32;
+	struct dxdi_hash_proc_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &user_params->context_buf)
+	    || __put_user(up32.hash_type, &user_params->hash_type)
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &user_params->data_in)
+	    || __put_user(up32.data_in_size, &user_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (copy_from_user(up32.digest, user_params->digest,
+			   DXDI_DIGEST_SIZE_MAX)
+	    || __get_user(up32.digest_size, &user_params->digest_size)
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sep_rpc_params_32 {
+	u16 agent_id; /*[in] */
+	u16 func_id;  /*[in] */
+	struct dxdi_memref mem_refs[SEP_RPC_MAX_MEMREF_PER_FUNC]; /*[in] */
+	u32 rpc_params_size;  /*[in] */
+	u32 rpc_params; /*[in] */
+	/* rpc_params to be copied into kernel DMA buffer */
+	enum seprpc_retcode error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sep_rpc(struct file *filp, unsigned int cmd,
+				    unsigned long arg)
+{
+	struct sep_rpc_params_32 up32;
+	struct dxdi_sep_rpc_params __user *user_params;
+
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user(up32.agent_id, &user_params->agent_id)
+	    || __put_user(up32.func_id, &user_params->func_id)
+	    || copy_to_user(user_params->mem_refs, up32.mem_refs,
+			    sizeof(struct dxdi_memref)
+			    * SEP_RPC_MAX_MEMREF_PER_FUNC)
+	    || __put_user(up32.rpc_params_size, &user_params->rpc_params_size)
+	    || __put_user((void __user *)(unsigned long)up32.rpc_params,
+			 &user_params->rpc_params))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+	if (ret)
+		return ret;
+
+	if (__get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct sep_rpc_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return 0;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct register_mem4dma_params_32 {
+	struct dxdi_memref memref;  /*[in] */
+	int memref_id;  /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_register_mem4dma(struct file *filp,
+					     unsigned int cmd,
+					     unsigned long arg)
+{
+	struct register_mem4dma_params_32 up32;
+	struct dxdi_register_mem4dma_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (copy_to_user(&user_params->memref, &up32.memref,
+		sizeof(struct dxdi_memref)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+	if (ret)
+		return ret;
+
+	if (__get_user(up32.memref_id, &user_params->memref_id))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct alloc_mem4dma_params_32 {
+	u32 size; /*[in] */
+	int memref_id;  /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_alloc_mem4dma(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	struct alloc_mem4dma_params_32 up32;
+	struct dxdi_alloc_mem4dma_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user(up32.size, &user_params->size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (__get_user(up32.memref_id, &user_params->memref_id))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct free_mem4dma_params_32 {
+	int memref_id;  /*[in] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_free_mem4dma(struct file *filp, unsigned int cmd,
+					 unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct aes_iv_params_32 {
+	u32 context_buf; /*[in] */
+	u8 iv_ptr[DXDI_AES_IV_SIZE];  /*[in]/[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_set_iv(struct file *filp, unsigned int cmd,
+				   unsigned long arg)
+{
+	struct aes_iv_params_32 up32;
+	struct dxdi_aes_iv_params *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		      &user_params->context_buf)
+	    || __copy_to_user(user_params->iv_ptr, up32.iv_ptr,
+			      DXDI_AES_IV_SIZE))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	return ret;
+}
+
+static int compat_sep_ioctl_get_iv(struct file *filp, unsigned int cmd,
+				   unsigned long arg)
+{
+	struct aes_iv_params_32 up32;
+	struct dxdi_aes_iv_params *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &user_params->context_buf)
+	    || __copy_to_user(&user_params->iv_ptr, &up32.iv_ptr,
+			      DXDI_AES_IV_SIZE))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (ret)
+		return ret;
+
+	if (__copy_from_user(up32.iv_ptr, user_params->iv_ptr,
+			     DXDI_AES_IV_SIZE))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sepapp_session_open_params_32 {
+	u8 app_uuid[DXDI_SEPAPP_UUID_SIZE]; /*[in] */
+	u32 auth_method;  /*[in] */
+	u32 auth_data[3]; /*[in] */
+	struct dxdi_sepapp_params app_auth_data;  /*[in/out] */
+	int session_id; /*[out] */
+	enum dxdi_sep_module sep_ret_origin;  /*[out] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sepapp_session_open(struct file *filp,
+						unsigned int cmd,
+						unsigned long arg)
+{
+	struct sepapp_session_open_params_32 up32;
+	struct dxdi_sepapp_session_open_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (copy_to_user(&user_params->app_uuid[0], &up32.app_uuid[0],
+			 DXDI_SEPAPP_UUID_SIZE)
+	    || __put_user(up32.auth_method, &user_params->auth_method)
+	    || copy_to_user(user_params->auth_data, &up32.auth_data,
+			    3 * sizeof(u32))
+	    || copy_to_user(&user_params->app_auth_data, &up32.app_auth_data,
+			    sizeof(struct dxdi_sepapp_params)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+	if (ret)
+		return ret;
+
+	if (__copy_from_user(&up32.app_auth_data, &user_params->app_auth_data,
+			     sizeof(struct dxdi_sepapp_params))
+	    || __get_user(up32.session_id, &user_params->session_id)
+	    || __get_user(up32.sep_ret_origin, &user_params->sep_ret_origin)
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sepapp_session_close_params_32 {
+	int session_id; /*[in] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sepapp_session_close(struct file *filp,
+						 unsigned int cmd,
+						 unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sepapp_command_invoke_params_32 {
+	int session_id; /*[in] */
+	u32 command_id; /*[in] */
+	struct dxdi_sepapp_params command_params; /*[in/out] */
+	enum dxdi_sep_module sep_ret_origin;  /*[out] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sepapp_command_invoke(struct file *filp,
+						  unsigned int cmd,
+						  unsigned long arg)
+{
+	struct sepapp_command_invoke_params_32 up32;
+	struct dxdi_sepapp_command_invoke_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user(up32.session_id, &user_params->session_id)
+	    || __put_user(up32.command_id, &user_params->command_id)
+	    || copy_to_user(&user_params->command_params,
+			    &up32.command_params,
+			    sizeof(struct dxdi_sepapp_params)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+
+	if (__get_user(up32.sep_ret_origin, &user_params->sep_ret_origin)
+	    || __copy_from_user(&up32.command_params,
+				&user_params->command_params,
+				sizeof(struct dxdi_sepapp_params))
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static sep_ioctl_compat_t *sep_compat_ioctls[] = {
+	/* Version info. commands */
+	[DXDI_IOC_NR_GET_VER_MAJOR] = compat_sep_ioctl_get_ver_major,
+	[DXDI_IOC_NR_GET_VER_MINOR] = compat_sep_ioctl_get_ver_minor,
+	/* Context size queries */
+	[DXDI_IOC_NR_GET_SYMCIPHER_CTX_SIZE] = compat_sep_ioctl_get_sym_cipher_ctx_size,
+	[DXDI_IOC_NR_GET_AUTH_ENC_CTX_SIZE] = compat_sep_ioctl_get_auth_enc_ctx_size,
+	[DXDI_IOC_NR_GET_MAC_CTX_SIZE] = compat_sep_ioctl_get_mac_ctx_size,
+	[DXDI_IOC_NR_GET_HASH_CTX_SIZE] = compat_sep_ioctl_get_hash_ctx_size,
+	/* Init context commands */
+	[DXDI_IOC_NR_SYMCIPHER_INIT] = compat_sep_ioctl_sym_cipher_init,
+	[DXDI_IOC_NR_AUTH_ENC_INIT] = compat_sep_ioctl_auth_enc_init,
+	[DXDI_IOC_NR_MAC_INIT] = compat_sep_ioctl_mac_init,
+	[DXDI_IOC_NR_HASH_INIT] = compat_sep_ioctl_hash_init,
+	/* Processing commands */
+	[DXDI_IOC_NR_PROC_DBLK] = compat_sep_ioctl_proc_dblk,
+	[DXDI_IOC_NR_FIN_PROC] = compat_sep_ioctl_fin_proc,
+	/* "Integrated" processing operations */
+	[DXDI_IOC_NR_SYMCIPHER_PROC] = compat_sep_ioctl_sym_cipher_proc,
+	[DXDI_IOC_NR_AUTH_ENC_PROC] = compat_sep_ioctl_auth_enc_proc,
+	[DXDI_IOC_NR_MAC_PROC] = compat_sep_ioctl_mac_proc,
+	[DXDI_IOC_NR_HASH_PROC] = compat_sep_ioctl_hash_proc,
+	/* SeP RPC */
+	[DXDI_IOC_NR_SEP_RPC] = compat_sep_ioctl_sep_rpc,
+	/* Memory registration */
+	[DXDI_IOC_NR_REGISTER_MEM4DMA] = compat_sep_ioctl_register_mem4dma,
+	[DXDI_IOC_NR_ALLOC_MEM4DMA] = compat_sep_ioctl_alloc_mem4dma,
+	[DXDI_IOC_NR_FREE_MEM4DMA] = compat_sep_ioctl_free_mem4dma,
+	/* SeP Applets API */
+	[DXDI_IOC_NR_SEPAPP_SESSION_OPEN] = compat_sep_ioctl_sepapp_session_open,
+	[DXDI_IOC_NR_SEPAPP_SESSION_CLOSE] = compat_sep_ioctl_sepapp_session_close,
+	[DXDI_IOC_NR_SEPAPP_COMMAND_INVOKE] = compat_sep_ioctl_sepapp_command_invoke,
+	/* Combined mode */
+	[DXDI_IOC_NR_COMBINED_INIT] = compat_sep_ioctl_combined_init,
+	[DXDI_IOC_NR_COMBINED_PROC_DBLK] = compat_sep_ioctl_combined_proc_dblk,
+	[DXDI_IOC_NR_COMBINED_PROC_FIN] = compat_sep_ioctl_combined_fin_proc,
+	[DXDI_IOC_NR_COMBINED_PROC] = compat_sep_ioctl_combined_proc,
+
+	/* AES IV set/get API */
+	[DXDI_IOC_NR_SET_IV] = compat_sep_ioctl_set_iv,
+	[DXDI_IOC_NR_GET_IV] = compat_sep_ioctl_get_iv,
+};
+
+long sep_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = _IOC_NR(cmd);
+	sep_ioctl_compat_t *callback = NULL;
+	int ret;
+
+	pr_debug("Calling the IOCTL compat %d\n", nr);
+
+	if (nr < ARRAY_SIZE(sep_compat_ioctls))
+		callback = sep_compat_ioctls[nr];
+
+	if (callback != NULL)
+		ret = (*callback) (filp, cmd, arg);
+	else
+		ret = sep_ioctl(filp, cmd, arg);
+
+	return ret;
+}
diff --git a/drivers/staging/sep54/sep_compat_ioctl.h b/drivers/staging/sep54/sep_compat_ioctl.h
new file mode 100644
index 0000000..5188269
--- /dev/null
+++ b/drivers/staging/sep54/sep_compat_ioctl.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2013 Intel Finland Oy
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SEP_COMPAT_IOCTL_H__
+#define __SEP_COMPAT_IOCTL_H__
+
+#include <linux/fs.h>
+
+/**
+ * \brief drm_compat_ioctl
+ *
+ * \param filp
+ * \param cmd
+ * \param arg
+ * \return 0 on success or a negtive number on failure
+ */
+long sep_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+#endif
diff --git a/drivers/staging/sep54/sep_ctx.h b/drivers/staging/sep54/sep_ctx.h
new file mode 100644
index 0000000..0fa0259
--- /dev/null
+++ b/drivers/staging/sep54/sep_ctx.h
@@ -0,0 +1,297 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_CTX_H_
+#define _SEP_CTX_H_
+#ifdef __KERNEL__
+#include <linux/types.h>
+#define INT32_MAX 0x7FFFFFFFL
+#else
+#include <stdint.h>
+#endif
+
+#include "dx_cc_defs.h"
+
+#ifndef max
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#endif
+
+/* SeP context size */
+#define SEP_CTX_SIZE_LOG2 7
+#define SEP_CTX_SIZE (1<<SEP_CTX_SIZE_LOG2)
+#define SEP_CTX_SIZE_WORDS (SEP_CTX_SIZE >> 2)
+
+#define SEP_DES_IV_SIZE 8
+#define SEP_DES_BLOCK_SIZE 8
+
+#define SEP_DES_ONE_KEY_SIZE 8
+#define SEP_DES_DOUBLE_KEY_SIZE 16
+#define SEP_DES_TRIPLE_KEY_SIZE 24
+#define SEP_DES_KEY_SIZE_MAX SEP_DES_TRIPLE_KEY_SIZE
+
+#define SEP_AES_IV_SIZE 16
+#define SEP_AES_IV_SIZE_WORDS (SEP_AES_IV_SIZE >> 2)
+
+#define SEP_AES_BLOCK_SIZE 16
+#define SEP_AES_BLOCK_SIZE_WORDS 4
+
+#define SEP_AES_128_BIT_KEY_SIZE 16
+#define SEP_AES_128_BIT_KEY_SIZE_WORDS	(SEP_AES_128_BIT_KEY_SIZE >> 2)
+#define SEP_AES_192_BIT_KEY_SIZE 24
+#define SEP_AES_192_BIT_KEY_SIZE_WORDS	(SEP_AES_192_BIT_KEY_SIZE >> 2)
+#define SEP_AES_256_BIT_KEY_SIZE 32
+#define SEP_AES_256_BIT_KEY_SIZE_WORDS	(SEP_AES_256_BIT_KEY_SIZE >> 2)
+#define SEP_AES_KEY_SIZE_MAX			SEP_AES_256_BIT_KEY_SIZE
+#define SEP_AES_KEY_SIZE_WORDS_MAX		(SEP_AES_KEY_SIZE_MAX >> 2)
+
+#define SEP_SHA1_DIGEST_SIZE 20
+#define SEP_SHA224_DIGEST_SIZE 28
+#define SEP_SHA256_DIGEST_SIZE 32
+#define SEP_SHA384_DIGEST_SIZE 48
+#define SEP_SHA512_DIGEST_SIZE 64
+#define SEP_SHA1024_DIGEST_SIZE 128
+
+#define SEP_SHA1_BLOCK_SIZE 64
+#define SEP_SHA224_BLOCK_SIZE 64
+#define SEP_SHA256_BLOCK_SIZE 64
+#define SEP_SHA1_224_256_BLOCK_SIZE 64
+#define SEP_SHA384_BLOCK_SIZE 128
+#define SEP_SHA512_BLOCK_SIZE 128
+#define SEP_SHA1024_BLOCK_SIZE 128
+
+#if (SEP_SUPPORT_SHA > 256)
+#define SEP_DIGEST_SIZE_MAX SEP_SHA512_DIGEST_SIZE
+#define SEP_HASH_BLOCK_SIZE_MAX SEP_SHA512_BLOCK_SIZE	/*1024b */
+#else				/* Only up to SHA256 */
+#define SEP_DIGEST_SIZE_MAX SEP_SHA256_DIGEST_SIZE
+#define SEP_HASH_BLOCK_SIZE_MAX SEP_SHA256_BLOCK_SIZE	/*256 */
+#endif
+
+#define SEP_HMAC_BLOCK_SIZE_MAX SEP_HASH_BLOCK_SIZE_MAX
+
+#define SEP_RC4_KEY_SIZE_MIN 1
+#define SEP_RC4_KEY_SIZE_MAX 20
+#define SEP_RC4_STATE_SIZE 264
+
+#define SEP_C2_KEY_SIZE_MAX 16
+#define SEP_C2_BLOCK_SIZE 8
+
+#define SEP_ALG_MAX_BLOCK_SIZE SEP_HASH_BLOCK_SIZE_MAX
+
+#define SEP_MAX_COMBINED_ENGINES 4
+
+#define SEP_MAX_CTX_SIZE (max(sizeof(struct sep_ctx_rc4), \
+				sizeof(struct sep_ctx_cache_entry)))
+enum sep_engine_type {
+	SEP_ENGINE_NULL = 0,
+	SEP_ENGINE_AES = 1,
+	SEP_ENGINE_DES = 2,
+	SEP_ENGINE_HASH = 3,
+	SEP_ENGINE_RC4 = 4,
+	SEP_ENGINE_DOUT = 5,
+	SEP_ENGINE_RESERVE32B = INT32_MAX,
+};
+
+enum sep_crypto_alg {
+	SEP_CRYPTO_ALG_NULL = -1,
+	SEP_CRYPTO_ALG_AES = 0,
+	SEP_CRYPTO_ALG_DES = 1,
+	SEP_CRYPTO_ALG_HASH = 2,
+	SEP_CRYPTO_ALG_RC4 = 3,
+	SEP_CRYPTO_ALG_C2 = 4,
+	SEP_CRYPTO_ALG_HMAC = 5,
+	SEP_CRYPTO_ALG_AEAD = 6,
+	SEP_CRYPTO_ALG_BYPASS = 7,
+	SEP_CRYPTO_ALG_COMBINED = 8,
+	SEP_CRYPTO_ALG_NUM = 9,
+	SEP_CRYPTO_ALG_RESERVE32B = INT32_MAX
+};
+
+enum sep_crypto_direction {
+	SEP_CRYPTO_DIRECTION_NULL = -1,
+	SEP_CRYPTO_DIRECTION_ENCRYPT = 0,
+	SEP_CRYPTO_DIRECTION_DECRYPT = 1,
+	SEP_CRYPTO_DIRECTION_DECRYPT_ENCRYPT = 3,
+	SEP_CRYPTO_DIRECTION_RESERVE32B = INT32_MAX
+};
+
+enum sep_cipher_mode {
+	SEP_CIPHER_NULL_MODE = -1,
+	SEP_CIPHER_ECB = 0,
+	SEP_CIPHER_CBC = 1,
+	SEP_CIPHER_CTR = 2,
+	SEP_CIPHER_CBC_MAC = 3,
+	SEP_CIPHER_XTS = 4,
+	SEP_CIPHER_XCBC_MAC = 5,
+	SEP_CIPHER_CMAC = 7,
+	SEP_CIPHER_CCM = 8,
+	SEP_CIPHER_RESERVE32B = INT32_MAX
+};
+
+enum sep_hash_mode {
+	SEP_HASH_NULL = -1,
+	SEP_HASH_SHA1 = 0,
+	SEP_HASH_SHA256 = 1,
+	SEP_HASH_SHA224 = 2,
+	SEP_HASH_MODE_NUM = 3,
+
+	/* Unsupported */
+	SEP_HASH_SHA512 = 3,
+	SEP_HASH_SHA384 = 4,
+	SEP_HASH_RESERVE32B = INT32_MAX
+};
+
+enum sep_hash_hw_mode {
+	SEP_HASH_HW_SHA1 = 1,
+	SEP_HASH_HW_SHA256 = 2,
+	SEP_HASH_HW_SHA224 = 10,
+	SEP_HASH_HW_SHA512 = 4,
+	SEP_HASH_HW_SHA384 = 12,
+	SEP_HASH_HW_RESERVE32B = INT32_MAX
+};
+
+enum sep_c2_mode {
+	SEP_C2_NULL = -1,
+	SEP_C2_ECB = 0,
+	SEP_C2_CBC = 1,
+	SEP_C2_RESERVE32B = INT32_MAX
+};
+
+/*******************************************************************/
+/***************** DESCRIPTOR BASED CONTEXTS ***********************/
+/*******************************************************************/
+
+ /* Generic context ("super-class") */
+struct sep_ctx_generic {
+	enum sep_crypto_alg alg;
+} __attribute__ ((__may_alias__));
+
+/* Cache context entry ("sub-class") */
+struct sep_ctx_cache_entry {
+	enum sep_crypto_alg alg;
+	u32 reserved[SEP_CTX_SIZE_WORDS - 1];
+};
+
+struct sep_ctx_c2 {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_C2 */
+	enum sep_c2_mode mode;
+	enum sep_crypto_direction direction;
+	/* reserve to end of allocated context size */
+	u32 key_size;	/* numeric value in bytes */
+	u8 key[SEP_C2_KEY_SIZE_MAX];
+	u8 reserved[SEP_CTX_SIZE - 4 * sizeof(u32) -
+			 SEP_C2_KEY_SIZE_MAX];
+};
+
+struct sep_ctx_hash {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_HASH */
+	enum sep_hash_mode mode;
+	u8 digest[SEP_DIGEST_SIZE_MAX];
+	/* reserve to end of allocated context size */
+	u8 reserved[SEP_CTX_SIZE - 2 * sizeof(u32) -
+			 SEP_DIGEST_SIZE_MAX];
+};
+
+/* !!!! sep_ctx_hmac should have the same structure as sep_ctx_hash except
+   k0, k0_size fields */
+struct sep_ctx_hmac {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_HMAC */
+	enum sep_hash_mode mode;
+	u8 digest[SEP_DIGEST_SIZE_MAX];
+	u8 k0[SEP_HMAC_BLOCK_SIZE_MAX];
+	u32 k0_size;
+	/* reserve to end of allocated context size */
+	u8 reserved[SEP_CTX_SIZE - 3 * sizeof(u32) -
+			 SEP_DIGEST_SIZE_MAX - SEP_HMAC_BLOCK_SIZE_MAX];
+};
+
+struct sep_ctx_cipher {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_AES */
+	enum sep_cipher_mode mode;
+	enum sep_crypto_direction direction;
+	enum dx_crypto_key_type crypto_key_type;
+	u32 key_size;	/* numeric value in bytes   */
+	u32 data_unit_size;	/* required for XTS */
+	/* block_state is the AES engine block state.
+	 *  It is used by the host to pass IV or counter at initialization.
+	 *  It is used by SeP for intermediate block chaining state and for
+	 *  returning MAC algorithms results.           */
+	u8 block_state[SEP_AES_BLOCK_SIZE];
+	u8 key[SEP_AES_KEY_SIZE_MAX];
+	u8 xex_key[SEP_AES_KEY_SIZE_MAX];
+	/* reserve to end of allocated context size */
+	u32 reserved[SEP_CTX_SIZE_WORDS - 6 -
+			  SEP_AES_BLOCK_SIZE / sizeof(u32) - 2 *
+			  (SEP_AES_KEY_SIZE_MAX / sizeof(u32))];
+};
+
+/* authentication and encryption with associated data class */
+struct sep_ctx_aead {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_AES */
+	enum sep_cipher_mode mode;
+	enum sep_crypto_direction direction;
+	enum dx_crypto_key_type crypto_key_type;
+	u32 key_size;	/* numeric value in bytes   */
+	u32 nonce_size;	/* nonce size (octets) */
+	u32 header_size;	/* finit additional data size (octets) */
+	u32 text_size;	/* finit text data size (octets) */
+	/* mac size, element of {4, 6, 8, 10, 12, 14, 16} */
+	u32 tag_size;
+	/* block_state1/2 is the AES engine block state */
+	u8 block_state[SEP_AES_BLOCK_SIZE];
+	u8 mac_state[SEP_AES_BLOCK_SIZE];	/* MAC result */
+	u8 nonce[SEP_AES_BLOCK_SIZE];	/* nonce buffer */
+	u8 key[SEP_AES_KEY_SIZE_MAX];
+	/* reserve to end of allocated context size */
+	u32 reserved[SEP_CTX_SIZE_WORDS - 9 -
+			  3 * (SEP_AES_BLOCK_SIZE / sizeof(u32)) -
+			  SEP_AES_KEY_SIZE_MAX / sizeof(u32)];
+};
+
+/* crys combined context */
+struct sep_ctx_combined {
+	enum sep_crypto_alg alg;
+	u32 mode;
+	/* array of sub contexts used for the combined operation      *
+	 *  according to the given mode                               */
+	struct sep_ctx_cache_entry *sub_ctx[SEP_MAX_COMBINED_ENGINES];
+	/* store the host contexts addresses (optimization) */
+	u32 host_addr[SEP_MAX_COMBINED_ENGINES];
+};
+
+/*******************************************************************/
+/***************** MESSAGE BASED CONTEXTS **************************/
+/*******************************************************************/
+
+struct sep_ctx_rc4 {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_RC4 */
+	u32 key_size;	/* numeric value in bytes */
+	u8 key[SEP_RC4_KEY_SIZE_MAX];
+	u8 state[SEP_RC4_STATE_SIZE];
+};
+
+#endif				/* _SEP_CTX_H_ */
diff --git a/drivers/staging/sep54/sep_init.c b/drivers/staging/sep54/sep_init.c
new file mode 100644
index 0000000..c513512
--- /dev/null
+++ b/drivers/staging/sep54/sep_init.c
@@ -0,0 +1,848 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/* \file
+   This file implements the SeP FW initialization sequence.
+   This is part of the Discretix CC initalization specifications              */
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SEP_INIT
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+/* Registers definitions from shared/hw/include */
+#include "dx_reg_base_host.h"
+#include "dx_host.h"
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_cc_regs.h"
+#include "dx_bitops.h"
+#include "sep_log.h"
+#include "dx_driver.h"
+#include "dx_init_cc_abi.h"
+#include "dx_init_cc_defs.h"
+#include "sep_sram_map.h"
+#include "sep_init.h"
+#include "sep_request_mgr.h"
+#include "sep_power.h"
+
+#ifdef DEBUG
+#define FW_INIT_TIMEOUT_SEC     10
+#define COLD_BOOT_TIMEOUT_SEC	10
+#else
+#define FW_INIT_TIMEOUT_SEC     3
+#define COLD_BOOT_TIMEOUT_SEC	3
+#endif
+#define FW_INIT_TIMEOUT_MSEC	(FW_INIT_TIMEOUT_SEC * 1000)
+#define COLD_BOOT_TIMEOUT_MSEC  (COLD_BOOT_TIMEOUT_SEC * 1000)
+
+#define FW_INIT_PARAMS_BUF_LEN		1024
+
+/*** CC_INIT handlers ***/
+
+/**
+ * struct cc_init_ctx - CC init. context
+ * @drvdata:		Associated device driver
+ * @resident_p:		Pointer to resident image buffer
+ * @resident_dma_addr:	DMA address of resident image buffer
+ * @resident_size:	Size in bytes of the resident image
+ * @cache_p:	Pointer to (i)cache image buffer
+ * @cache_dma_addr:	DMA address of the (i)cache image
+ * @cache_size:		Size in bytes if the (i)cache image
+ * @vrl_p:		Pointer to VRL image buffer
+ * @vrl_dma_addr:	DMA address of the VRL
+ * @vrl_size:		Size in bytes of the VRL
+ * @msg_buf:		A buffer for building the CC-Init message
+ */
+struct cc_init_ctx {
+	struct sep_drvdata *drvdata;
+	void *resident_p;
+	dma_addr_t resident_dma_addr;
+	size_t resident_size;
+	void *cache_p;
+	dma_addr_t cache_dma_addr;
+	size_t cache_size;
+	void *vrl_p;
+	dma_addr_t vrl_dma_addr;
+	size_t vrl_size;
+	u32 msg_buf[DX_CC_INIT_MSG_LENGTH];
+};
+
+static void destroy_cc_init_ctx(struct cc_init_ctx *ctx)
+{
+	struct device *mydev = ctx->drvdata->dev;
+
+	if (ctx->vrl_p != NULL)
+		dma_free_coherent(mydev, ctx->vrl_size,
+				  ctx->vrl_p, ctx->vrl_dma_addr);
+	if (ctx->cache_p != NULL)
+		dma_free_coherent(mydev, ctx->cache_size,
+				  ctx->cache_p, ctx->cache_dma_addr);
+	if (ctx->resident_p != NULL)
+		dma_free_coherent(mydev, ctx->resident_size,
+				  ctx->resident_p, ctx->resident_dma_addr);
+	kfree(ctx);
+}
+
+/**
+ * fetch_image() - Fetch CC image using request_firmware mechanism and
+ *	locate it in a DMA coherent buffer.
+ *
+ * @mydev:
+ * @image_name:		Image file name (from /lib/firmware/)
+ * @image_pp:		Allocated image buffer
+ * @image_dma_addr_p:	Allocated image DMA addr
+ * @image_size_p:	Loaded image size
+ */
+static int fetch_image(struct device *mydev, const char *image_name,
+		       void **image_pp, dma_addr_t *image_dma_addr_p,
+		       size_t *image_size_p)
+{
+	const struct firmware *image;
+	int rc;
+
+	rc = request_firmware(&image, image_name, mydev);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed loading image %s (%d)\n", image_name, rc);
+		return -ENODEV;
+	}
+	*image_pp = dma_alloc_coherent(mydev,
+				       image->size, image_dma_addr_p,
+				       GFP_KERNEL);
+	if (unlikely(*image_pp == NULL)) {
+		pr_err("Failed allocating DMA mem. for resident image\n");
+		rc = -ENOMEM;
+	} else {
+		memcpy(*image_pp, image->data, image->size);
+		*image_size_p = image->size;
+	}
+	/* Image copied into the DMA coherent buffer. No need for "firmware" */
+	release_firmware(image);
+	if (likely(rc == 0))
+		pr_debug("%s: %zu Bytes\n", image_name, *image_size_p);
+	return rc;
+}
+
+#ifdef CACHE_IMAGE_NAME
+static enum dx_cc_init_msg_icache_size icache_size_to_enum(u8
+							   icache_size_log2)
+{
+	int i;
+	const int icache_sizes_enum2log[] = DX_CC_ICACHE_SIZE_ENUM2LOG;
+
+	for (i = 0; i < sizeof(icache_sizes_enum2log) / sizeof(int); i++)
+		if ((icache_size_log2 == icache_sizes_enum2log[i]) &&
+		    (icache_sizes_enum2log[i] >= 0))
+			return (enum dx_cc_init_msg_icache_size)i;
+	pr_err("Requested Icache size (%uKB) is invalid\n",
+		    1 << (icache_size_log2 - 10));
+	return DX_CC_INIT_MSG_ICACHE_SCR_INVALID_SIZE;
+}
+#endif
+
+/**
+ * get_cc_init_checksum() - Calculate CC_INIT message checksum
+ *
+ * @msg_p:	Pointer to the message buffer
+ * @length:	Size of message in _bytes_
+ */
+static u32 get_cc_init_checksum(u32 *msg_p)
+{
+	int bytes_remain;
+	u32 sum = 0;
+	u16 *tdata = (u16 *)msg_p;
+
+	for (bytes_remain = DX_CC_INIT_MSG_LENGTH * sizeof(u32);
+	     bytes_remain > 1; bytes_remain -= 2)
+		sum += *tdata++;
+	/*  Add left-over byte, if any */
+	if (bytes_remain > 0)
+		sum += *(u8 *)tdata;
+	/*  Fold 32-bit sum to 16 bits */
+	while ((sum >> 16) != 0)
+		sum = (sum & 0xFFFF) + (sum >> 16);
+	return ~sum & 0xFFFF;
+}
+
+static void build_cc_init_msg(struct cc_init_ctx *init_ctx)
+{
+	u32 *const msg_p = init_ctx->msg_buf;
+	struct sep_drvdata *drvdata = init_ctx->drvdata;
+#ifndef VRL_KEY_INDEX
+	/* Verify VRL key against this truncated hash value */
+	u32 const vrl_key_hash[] = VRL_KEY_HASH;
+#endif
+#ifdef CACHE_IMAGE_NAME
+	enum dx_cc_init_msg_icache_size icache_size_code;
+#endif
+
+	memset(msg_p, 0, DX_CC_INIT_MSG_LENGTH * sizeof(u32));
+	msg_p[DX_CC_INIT_MSG_TOKEN_OFFSET] = DX_CC_INIT_HEAD_MSG_TOKEN;
+	msg_p[DX_CC_INIT_MSG_LENGTH_OFFSET] = DX_CC_INIT_MSG_LENGTH;
+	msg_p[DX_CC_INIT_MSG_OP_CODE_OFFSET] = DX_HOST_REQ_CC_INIT;
+	msg_p[DX_CC_INIT_MSG_FLAGS_OFFSET] =
+	    DX_CC_INIT_FLAGS_RESIDENT_ADDR_FLAG;
+	msg_p[DX_CC_INIT_MSG_RESIDENT_IMAGE_OFFSET] =
+	    init_ctx->resident_dma_addr;
+
+#ifdef CACHE_IMAGE_NAME
+	icache_size_code = icache_size_to_enum(drvdata->icache_size_log2);
+	msg_p[DX_CC_INIT_MSG_FLAGS_OFFSET] |=
+	    DX_CC_INIT_FLAGS_I_CACHE_ADDR_FLAG |
+	    DX_CC_INIT_FLAGS_D_CACHE_EXIST_FLAG |
+	    DX_CC_INIT_FLAGS_CACHE_ENC_FLAG | DX_CC_INIT_FLAGS_CACHE_COPY_FLAG;
+#ifdef DX_CC_INIT_FLAGS_CACHE_SCRAMBLE_FLAG
+	/* Enable scrambling if available */
+	msg_p[DX_CC_INIT_MSG_FLAGS_OFFSET] |=
+	    DX_CC_INIT_FLAGS_CACHE_SCRAMBLE_FLAG;
+#endif
+	msg_p[DX_CC_INIT_MSG_I_CACHE_IMAGE_OFFSET] = init_ctx->cache_dma_addr;
+	msg_p[DX_CC_INIT_MSG_I_CACHE_DEST_OFFSET] =
+	    page_to_phys(drvdata->icache_pages);
+	msg_p[DX_CC_INIT_MSG_I_CACHE_SIZE_OFFSET] = icache_size_code;
+	msg_p[DX_CC_INIT_MSG_D_CACHE_ADDR_OFFSET] =
+	    page_to_phys(drvdata->dcache_pages);
+	msg_p[DX_CC_INIT_MSG_D_CACHE_SIZE_OFFSET] =
+	    1 << drvdata->dcache_size_log2;
+#elif defined(SEP_BACKUP_BUF_SIZE)
+	/* Declare SEP backup buffer resources */
+	msg_p[DX_CC_INIT_MSG_HOST_BUFF_ADDR_OFFSET] =
+	    virt_to_phys(drvdata->sep_backup_buf);
+	msg_p[DX_CC_INIT_MSG_HOST_BUFF_SIZE_OFFSET] =
+	    drvdata->sep_backup_buf_size;
+#endif				/*CACHE_IMAGE_NAME */
+
+	msg_p[DX_CC_INIT_MSG_VRL_ADDR_OFFSET] = init_ctx->vrl_dma_addr;
+	/* Handle VRL key hash */
+#ifdef VRL_KEY_INDEX
+	msg_p[DX_CC_INIT_MSG_KEY_INDEX_OFFSET] = VRL_KEY_INDEX;
+#else	/* Key should be validated against VRL_KEY_HASH */
+	msg_p[DX_CC_INIT_MSG_KEY_INDEX_OFFSET] =
+	    DX_CC_INIT_MSG_VRL_KEY_INDEX_INVALID;
+	msg_p[DX_CC_INIT_MSG_KEY_HASH_0_OFFSET] = vrl_key_hash[0];
+	msg_p[DX_CC_INIT_MSG_KEY_HASH_1_OFFSET] = vrl_key_hash[1];
+	msg_p[DX_CC_INIT_MSG_KEY_HASH_2_OFFSET] = vrl_key_hash[2];
+	msg_p[DX_CC_INIT_MSG_KEY_HASH_3_OFFSET] = vrl_key_hash[3];
+#endif
+
+	msg_p[DX_CC_INIT_MSG_CHECK_SUM_OFFSET] = get_cc_init_checksum(msg_p);
+
+	dump_word_array("CC_INIT", msg_p, DX_CC_INIT_MSG_LENGTH);
+}
+
+/**
+ * create_cc_init_ctx() - Create CC-INIT message and allocate associated
+ *	resources (load FW images, etc.)
+ *
+ * @drvdata:		Device context
+ *
+ * Returns the allocated message context or NULL on failure.
+ */
+struct cc_init_ctx *create_cc_init_ctx(struct sep_drvdata *drvdata)
+{
+	struct cc_init_ctx *init_ctx;
+	struct device *const mydev = drvdata->dev;
+	int rc;
+
+	init_ctx = kzalloc(sizeof(struct cc_init_ctx), GFP_KERNEL);
+	if (unlikely(init_ctx == NULL)) {
+		pr_err("Failed allocating CC-Init. context\n");
+		rc = -ENOMEM;
+		goto create_err;
+	}
+	init_ctx->drvdata = drvdata;
+	rc = fetch_image(mydev, RESIDENT_IMAGE_NAME, &init_ctx->resident_p,
+			 &init_ctx->resident_dma_addr,
+			 &init_ctx->resident_size);
+	if (unlikely(rc != 0))
+		goto create_err;
+#ifdef CACHE_IMAGE_NAME
+	rc = fetch_image(mydev, CACHE_IMAGE_NAME, &init_ctx->cache_p,
+			 &init_ctx->cache_dma_addr, &init_ctx->cache_size);
+	if (unlikely(rc != 0))
+		goto create_err;
+#endif				/*CACHE_IMAGE_NAME */
+	rc = fetch_image(mydev, VRL_IMAGE_NAME, &init_ctx->vrl_p,
+			 &init_ctx->vrl_dma_addr, &init_ctx->vrl_size);
+	if (unlikely(rc != 0))
+		goto create_err;
+	build_cc_init_msg(init_ctx);
+	return init_ctx;
+
+ create_err:
+
+	if (init_ctx != NULL)
+		destroy_cc_init_ctx(init_ctx);
+	return NULL;
+}
+
+/**
+ * sepinit_wait_for_cold_boot_finish() - Wait for SeP to reach cold-boot-finish
+ *					state (i.e., ready for driver-init)
+ * @drvdata:
+ *
+ * Returns int 0 for success, !0 on timeout while waiting for cold-boot-finish
+ */
+static int sepinit_wait_for_cold_boot_finish(struct sep_drvdata *drvdata)
+{
+	enum dx_sep_state cur_state;
+	u32 cur_status;
+	int rc = 0;
+
+	cur_state =
+	    dx_sep_wait_for_state(DX_SEP_STATE_DONE_COLD_BOOT |
+				  DX_SEP_STATE_FATAL_ERROR,
+				  COLD_BOOT_TIMEOUT_MSEC);
+	if (cur_state != DX_SEP_STATE_DONE_COLD_BOOT) {
+		rc = -EIO;
+		cur_status =
+		    READ_REGISTER(drvdata->cc_base + SEP_STATUS_GPR_OFFSET);
+		pr_err(
+			    "Failed waiting for DONE_COLD_BOOT from SeP (state=0x%08X status=0x%08X)\n",
+			    cur_state, cur_status);
+	}
+
+	return rc;
+}
+
+/**
+ * dispatch_cc_init_msg() - Push given CC_INIT message into SRAM and signal
+ *	SeP to start cold boot sequence
+ *
+ * @drvdata:
+ * @init_cc_msg_p:	A pointer to the message context
+ */
+static int dispatch_cc_init_msg(struct sep_drvdata *drvdata,
+				struct cc_init_ctx *cc_init_ctx_p)
+{
+	int i;
+	u32 is_data_ready;
+	/*
+	 * get the base address of the SRAM and add the offset
+	 * for the CC_Init message
+	 */
+	const u32 msg_target_addr =
+	    READ_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base,
+					 HOST, HOST_SEP_SRAM_THRESHOLD)) +
+	    DX_CC_INIT_MSG_OFFSET_IN_SRAM;
+
+	/* Initialize SRAM access address register for message location */
+	WRITE_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base, SRAM, SRAM_ADDR),
+		       msg_target_addr);
+	/* Write the message word by word to the SEP intenral offset */
+	for (i = 0; i < sizeof(cc_init_ctx_p->msg_buf) / sizeof(u32);
+		i++) {
+		/* write data to SRAM */
+		WRITE_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base,
+					      SRAM, SRAM_DATA),
+			       cc_init_ctx_p->msg_buf[i]);
+		/* wait for write complete */
+		do {
+			is_data_ready = 1 &
+			    READ_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base,
+							 SRAM,
+							 SRAM_DATA_READY));
+		} while (!is_data_ready);
+		/* TODO: Timeout in case something gets broken */
+	}
+	/* Signal SeP: Request CC_INIT */
+	WRITE_REGISTER(drvdata->cc_base +
+		       HOST_SEP_GPR_REG_OFFSET(DX_HOST_REQ_GPR_IDX),
+		       DX_HOST_REQ_CC_INIT);
+	return 0;
+}
+
+/**
+ * sepinit_do_cc_init() - Initiate SeP cold boot sequence and wait for
+ *	its completion.
+ *
+ * @drvdata:
+ *
+ * This function loads the CC firmware and dispatches a CC_INIT request message
+ * Returns int 0 for success
+ */
+int sepinit_do_cc_init(struct sep_drvdata *drvdata)
+{
+	u32 cur_state;
+	struct cc_init_ctx *cc_init_ctx_p;
+	int rc;
+
+	/* Sep was already initialized */
+	if (GET_SEP_STATE(drvdata) == DX_SEP_STATE_DONE_FW_INIT)
+		return 0;
+
+	cur_state = dx_sep_wait_for_state(DX_SEP_STATE_START_SECURE_BOOT,
+					  COLD_BOOT_TIMEOUT_MSEC);
+	if (cur_state != DX_SEP_STATE_START_SECURE_BOOT) {
+		pr_err("Bad SeP state = 0x%08X\n", cur_state);
+		return -EIO;
+	}
+#ifdef __BIG_ENDIAN
+	/* Enable byte swapping in DMA operations */
+	WRITE_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base, HOST, HOST_HOST_ENDIAN),
+		       0xCCUL);
+	/* TODO: Define value in device specific header files? */
+#endif
+	cc_init_ctx_p = create_cc_init_ctx(drvdata);
+	if (likely(cc_init_ctx_p != NULL))
+		rc = dispatch_cc_init_msg(drvdata, cc_init_ctx_p);
+	else
+		rc = -ENOMEM;
+	if (likely(rc == 0))
+		rc = sepinit_wait_for_cold_boot_finish(drvdata);
+	if (cc_init_ctx_p != NULL)
+		destroy_cc_init_ctx(cc_init_ctx_p);
+	return rc;
+}
+
+/*** FW_INIT handlers ***/
+
+#ifdef DEBUG
+#define ENUM_CASE_RETURN_STR(enum_name) case enum_name: return #enum_name
+
+static const char *param2str(enum dx_fw_init_tlv_params param_type)
+{
+	switch (param_type) {
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_NULL);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_FIRST);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_LAST);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_DISABLE_MODULES);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_HOST_AXI_CONFIG);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_HOST_DEF_APPLET_CONFIG);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_NUM_OF_DESC_QS);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_DESC_QS_ADDR);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_DESC_QS_SIZE);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_CTX_CACHE_PART);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_SEP_REQUEST_PARAMS);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_SEP_FREQ);
+	default:
+		return "(unknown param.)";
+	}
+}
+
+static void dump_fwinit_params(struct sep_drvdata *drvdata,
+			       u32 *fw_init_params_buf_p)
+{
+#define LINE_BUF_LEN 90		/* increased to hold values for 2 queues */
+	const u32 last_tl_word = DX_TL_WORD(DX_FW_INIT_PARAM_LAST, 1);
+	u32 tl_word;
+	u16 type, len;
+	u32 *cur_buf_p;
+	unsigned int i = 0;
+	char line_buf[LINE_BUF_LEN];
+	unsigned int line_offset;
+
+	pr_debug("Dx SeP fw_init params dump:\n");
+	cur_buf_p = fw_init_params_buf_p;
+	do {
+		tl_word = le32_to_cpu(*cur_buf_p);
+		cur_buf_p++;
+		type = DX_TL_GET_TYPE(tl_word);
+		len = DX_TL_GET_LENGTH(tl_word);
+
+		if ((cur_buf_p + len - fw_init_params_buf_p) >
+		    (FW_INIT_PARAMS_BUF_LEN / sizeof(u32))) {
+			pr_err
+			    ("LAST parameter not found up to buffer end\n");
+			break;
+		}
+
+		line_offset = snprintf(line_buf, LINE_BUF_LEN,
+				       "Type=0x%04X (%s), Length=%u , Val={",
+				       type, param2str(type), len);
+		for (i = 0; i < len; i++) {
+			/*
+			 * 11 is length of printed value
+			 * (formatted with 0x%08X in the
+			 * next call to snprintf
+			 */
+			if (line_offset + 11 >= LINE_BUF_LEN) {
+				pr_debug("%s\n", line_buf);
+				line_offset = 0;
+			}
+			line_offset += snprintf(line_buf + line_offset,
+						LINE_BUF_LEN - line_offset,
+						"0x%08X,",
+						le32_to_cpu(*cur_buf_p));
+			cur_buf_p++;
+		}
+		pr_debug("%s}\n", line_buf);
+	} while (tl_word != last_tl_word);
+}
+#else
+#define dump_fwinit_params(drvdata, fw_init_params_buf_p) do {} while (0)
+#endif /*DEBUG*/
+/**
+ * add_fwinit_param() - Add TLV parameter for FW-init.
+ * @tlv_buf:	 The base of the TLV parameters buffers
+ * @idx_p:	 (in/out): Current tlv_buf word index
+ * @checksum_p:	 (in/out): 32bit checksum for TLV array
+ * @type:	 Parameter type
+ * @length:	 Parameter length (size in words of "values")
+ * @values:	 Values array ("length" values)
+ *
+ * Returns void
+ */
+static void add_fwinit_param(u32 *tlv_buf, u32 *idx_p,
+			     u32 *checksum_p,
+			     enum dx_fw_init_tlv_params type, u16 length,
+			     const u32 *values)
+{
+	const u32 tl_word = DX_TL_WORD(type, length);
+	int i;
+
+#ifdef DEBUG
+	/* Verify that we have enough space for LAST param. after this param. */
+	if ((*idx_p + 1 + length + 2) > (FW_INIT_PARAMS_BUF_LEN / 4)) {
+		pr_err("tlv_buf size limit reached!\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	/* Add type-length word */
+	tlv_buf[(*idx_p)++] = cpu_to_le32(tl_word);
+	*checksum_p += tl_word;
+
+	/* Add values */
+	for (i = 0; i < length; i++) {
+		/* Add value words if any. TL-word is counted as first... */
+		tlv_buf[(*idx_p)++] = cpu_to_le32(values[i]);
+		*checksum_p += values[i];
+	}
+}
+
+/**
+ * init_fwinit_param_list() - Initialize TLV parameters list
+ * @tlv_buf:	The pointer to the TLV list array buffer
+ * @idx_p:	The pointer to the variable that would maintain current
+ *		position in the tlv_array
+ * @checksum_p:	The pointer to the variable that would accumulate the
+ *		TLV array checksum
+ *
+ * Returns void
+ */
+static void init_fwinit_param_list(u32 *tlv_buf, u32 *idx_p,
+				   u32 *checksum_p)
+{
+	const u32 magic = DX_FW_INIT_PARAM_FIRST_MAGIC;
+	/* Initialize index and checksum variables */
+	*idx_p = 0;
+	*checksum_p = 0;
+	/* Start with FIRST_MAGIC parameter */
+	add_fwinit_param(tlv_buf, idx_p, checksum_p,
+			 DX_FW_INIT_PARAM_FIRST, 1, &magic);
+}
+
+/**
+ * terminate_fwinit_param_list() - Terminate the TLV parameters list with
+ *				LAST/checksum parameter
+ * @tlv_buf:	The pointer to the TLV list array buffer
+ * @idx_p:	The pointer to the variable that would maintain current
+ *		position in the tlv_array
+ * @checksum_p:	The pointer to the variable that would accumulate the
+ *		TLV array checksum
+ *
+ * Returns void
+ */
+static void terminate_fwinit_param_list(u32 *tlv_buf, u32 *idx_p,
+					u32 *checksum_p)
+{
+	const u32 tl_word = DX_TL_WORD(DX_FW_INIT_PARAM_LAST, 1);
+
+	tlv_buf[(*idx_p)++] = cpu_to_le32(tl_word);
+	*checksum_p += tl_word;	/* Last TL word is included in checksum */
+	tlv_buf[(*idx_p)++] = cpu_to_le32(~(*checksum_p));
+}
+
+static int create_fwinit_command(struct sep_drvdata *drvdata,
+				 u32 **fw_init_params_buf_pp,
+				 dma_addr_t *fw_init_params_dma_p)
+{
+	u32 idx;
+	u32 checksum = 0;
+#ifdef SEP_FREQ_MHZ
+	u32 sep_freq = SEP_FREQ_MHZ;
+#endif
+	dma_addr_t q_base_dma;
+	unsigned long q_size;
+	/* arrays for queue parameters values */
+	u32 qs_base_dma[SEP_MAX_NUM_OF_DESC_Q];
+	u32 qs_size[SEP_MAX_NUM_OF_DESC_Q];
+	u32 qs_ctx_size[SEP_MAX_NUM_OF_DESC_Q];
+	u32 qs_ctx_size_total;
+	u32 qs_num = drvdata->num_of_desc_queues;
+	u32 sep_request_params[DX_SEP_REQUEST_PARAM_MSG_LEN];
+	int i;
+	int rc;
+
+	/* For klocwork, add extra check */
+	if (qs_num > SEP_MAX_NUM_OF_DESC_Q) {
+		pr_err("Max number of desc queues (%d) exceeded (%d)\n");
+		return -EINVAL;
+	}
+
+	/* allocate coherent working buffer */
+	*fw_init_params_buf_pp = dma_alloc_coherent(drvdata->dev,
+						    FW_INIT_PARAMS_BUF_LEN,
+						    fw_init_params_dma_p,
+						    GFP_KERNEL);
+	pr_debug("fw_init_params_dma=0x%08lX fw_init_params_va=0x%p\n",
+		      (unsigned long)*fw_init_params_dma_p,
+		      *fw_init_params_buf_pp);
+	if (*fw_init_params_buf_pp == NULL) {
+		pr_err("Unable to allocate coherent workspace buffer\n");
+		return -ENOMEM;
+	}
+
+	init_fwinit_param_list(*fw_init_params_buf_pp, &idx, &checksum);
+
+#ifdef SEP_FREQ_MHZ
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_SEP_FREQ, 1, &sep_freq);
+#endif
+
+	/* No need to validate number of queues - already validated in
+	 * sep_setup() */
+
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_NUM_OF_DESC_QS, 1, &qs_num);
+
+	/* Fetch per-queue information */
+	qs_ctx_size_total = 0;
+	for (i = 0; i < qs_num; i++) {
+		desc_q_get_info4sep(drvdata->queue[i].desc_queue,
+				    &q_base_dma, &q_size);
+		/* Data is first fetched into q_base_dma and q_size because
+		 * return value is of different type than u32 */
+		qs_base_dma[i] = q_base_dma;
+		qs_size[i] = q_size;
+		qs_ctx_size[i] =
+		    ctxmgr_sep_cache_get_size(drvdata->queue[i].sep_cache);
+		if ((qs_base_dma[i] == 0) || (qs_size[i] == 0) ||
+		    (qs_ctx_size[i] == 0)) {
+			pr_err(
+				    "Invalid queue %d resources: base=0x%08X size=%u ctx_cache_size=%u\n",
+				    i, qs_base_dma[i], qs_size[i],
+				    qs_ctx_size[i]);
+			rc = -EINVAL;
+			goto fwinit_error;
+		}
+		qs_ctx_size_total += qs_ctx_size[i];
+	}
+
+	if (qs_ctx_size_total > drvdata->num_of_sep_cache_entries) {
+		pr_err("Too many context cache entries allocated(%u>%u)\n",
+			    qs_ctx_size_total,
+			    drvdata->num_of_sep_cache_entries);
+		rc = -EINVAL;
+		goto fwinit_error;
+	}
+
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_DESC_QS_ADDR, qs_num, qs_base_dma);
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_DESC_QS_SIZE, qs_num, qs_size);
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_CTX_CACHE_PART, qs_num, qs_ctx_size);
+
+	/* Prepare sep request params */
+	dx_sep_req_get_sep_init_params(sep_request_params);
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_SEP_REQUEST_PARAMS,
+			 DX_SEP_REQUEST_PARAM_MSG_LEN, sep_request_params);
+
+	terminate_fwinit_param_list(*fw_init_params_buf_pp, &idx, &checksum);
+
+	return 0;
+
+ fwinit_error:
+	dma_free_coherent(drvdata->dev, FW_INIT_PARAMS_BUF_LEN,
+			  *fw_init_params_buf_pp, *fw_init_params_dma_p);
+	return rc;
+}
+
+static void destroy_fwinit_command(struct sep_drvdata *drvdata,
+				   u32 *fw_init_params_buf_p,
+				   dma_addr_t fw_init_params_dma)
+{
+	/* release TLV parameters buffer */
+	dma_free_coherent(drvdata->dev, FW_INIT_PARAMS_BUF_LEN,
+			  fw_init_params_buf_p, fw_init_params_dma);
+}
+
+/**
+ * sepinit_get_fw_props() - Get the FW properties (version, cache size, etc.)
+ *				as given in the
+ * @drvdata:	 Context where to fill retrieved data
+ *
+ * Get the FW properties (version, cache size, etc.) as given in the
+ * respective GPRs.
+ * This function should be called only after sepinit_wait_for_cold_boot_finish
+ */
+void sepinit_get_fw_props(struct sep_drvdata *drvdata)
+{
+
+	u32 init_fw_props;
+	/* SeP ROM version */
+	drvdata->rom_ver = READ_REGISTER(drvdata->cc_base +
+					 SEP_HOST_GPR_REG_OFFSET
+					 (DX_SEP_INIT_ROM_VER_GPR_IDX));
+	drvdata->fw_ver =
+	    READ_REGISTER(drvdata->cc_base +
+			  SEP_HOST_GPR_REG_OFFSET(DX_SEP_INIT_FW_VER_GPR_IDX));
+	mdelay(100);		/* TODO for kernel hang bug */
+	init_fw_props = READ_REGISTER(drvdata->cc_base +
+				      SEP_HOST_GPR_REG_OFFSET
+				      (DX_SEP_INIT_FW_PROPS_GPR_IDX));
+
+	drvdata->num_of_desc_queues =
+	    BITFIELD_GET(init_fw_props,
+			 DX_SEP_INIT_NUM_OF_QUEUES_BIT_OFFSET,
+			 DX_SEP_INIT_NUM_OF_QUEUES_BIT_SIZE);
+	drvdata->num_of_sep_cache_entries =
+	    BITFIELD_GET(init_fw_props,
+			 DX_SEP_INIT_CACHE_CTX_SIZE_BIT_OFFSET,
+			 DX_SEP_INIT_CACHE_CTX_SIZE_BIT_SIZE);
+	drvdata->mlli_table_size =
+	    BITFIELD_GET(init_fw_props,
+			 DX_SEP_INIT_MLLI_TBL_SIZE_BIT_OFFSET,
+			 DX_SEP_INIT_MLLI_TBL_SIZE_BIT_SIZE);
+
+	pr_info("ROM Ver.=0x%08X , FW Ver.=0x%08X\n"
+		     "SEP queues=%u, Ctx.Cache#ent.=%u , MLLIsize=%lu B\n",
+		     drvdata->rom_ver, drvdata->fw_ver,
+		     drvdata->num_of_desc_queues,
+		     drvdata->num_of_sep_cache_entries,
+		     drvdata->mlli_table_size);
+}
+
+/**
+ * sepinit_wait_for_fw_init_done() - Wait for FW initialization to complete
+ * @drvdata:
+ *
+ * Wait for FW initialization to complete
+ * This function should be invoked after sepinit_set_fw_init_params
+ * Returns int
+ */
+static int sepinit_wait_for_fw_init_done(struct sep_drvdata *drvdata)
+{
+	enum dx_sep_state cur_state;
+	u32 cur_status;
+	int rc = 0;
+
+	cur_state =
+	    dx_sep_wait_for_state(DX_SEP_STATE_DONE_FW_INIT |
+				  DX_SEP_STATE_FATAL_ERROR,
+				  FW_INIT_TIMEOUT_MSEC);
+	if (cur_state != DX_SEP_STATE_DONE_FW_INIT) {
+		rc = -EIO;
+		cur_status =
+		    READ_REGISTER(drvdata->cc_base + SEP_STATUS_GPR_OFFSET);
+		pr_err(
+			    "Failed waiting for DONE_FW_INIT from SeP (state=0x%08X status=0x%08X)\n",
+			    cur_state, cur_status);
+	} else {
+		pr_info("DONE_FW_INIT\n");
+	}
+
+	return rc;
+}
+/**
+ * sepinit_reload_driver_state() - Wait for FW to update Sep state to reload driver state.
+ * @drvdata:
+ *
+ * Returns int
+ */
+int sepinit_reload_driver_state(struct sep_drvdata *drvdata)
+{
+	int rc = 0;
+	enum dx_sep_state cur_state;
+	uint32_t cur_status;
+
+	WRITE_REGISTER(drvdata->cc_base +
+			HOST_SEP_GPR_REG_OFFSET(DX_HOST_REQ_GPR_IDX),
+			DX_HOST_REQ_CHANGE_TO_RELOAD_DRIVER_STATE);
+
+	cur_state = dx_sep_wait_for_state(
+		DX_SEP_STATE_RELOAD_DRIVER | DX_SEP_STATE_FATAL_ERROR,
+		FW_INIT_TIMEOUT_MSEC);
+	pr_debug("cur_state is 0x%x", cur_state);
+
+	if (cur_state != DX_SEP_STATE_RELOAD_DRIVER) {
+		rc = -EIO;
+		cur_status =
+			READ_REGISTER(drvdata->cc_base + SEP_STATUS_GPR_OFFSET);
+		pr_err("Failed waiting for DX_SEP_STATE_RELOAD_DRIVER from SeP (state=0x%08X status=0x%08X)\n",
+			cur_state, cur_status);
+	} else {
+		pr_info("DONE_FW_INIT\n");
+	}
+
+	return rc;
+}
+
+/**
+ * sepinit_do_fw_init() - Initialize SeP FW
+ * @drvdata:
+ *
+ * Provide SeP FW with initialization parameters and wait for DONE_FW_INIT.
+ *
+ * Returns int 0 on success
+ */
+int sepinit_do_fw_init(struct sep_drvdata *drvdata, int init_flag)
+{
+	int rc;
+	u32 *fw_init_params_buf_p;
+	dma_addr_t fw_init_params_dma;
+
+	rc = create_fwinit_command(drvdata,
+				   &fw_init_params_buf_p, &fw_init_params_dma);
+	if (rc != 0)
+		return rc;
+	dump_fwinit_params(drvdata, fw_init_params_buf_p);
+	/* Write the physical address of the FW init parameters buffer */
+	WRITE_REGISTER(drvdata->cc_base +
+		       HOST_SEP_GPR_REG_OFFSET(DX_HOST_REQ_PARAM_GPR_IDX),
+		       fw_init_params_dma);
+	/* Initiate FW-init */
+	if (init_flag == INIT_FW_FLAG)
+		WRITE_REGISTER(drvdata->cc_base +
+			HOST_SEP_GPR_REG_OFFSET(DX_HOST_REQ_GPR_IDX),
+			DX_HOST_REQ_FW_INIT);
+	else
+		WRITE_REGISTER(drvdata->cc_base +
+			HOST_SEP_GPR_REG_OFFSET(DX_HOST_REQ_GPR_IDX),
+			DX_HOST_REQ_UPDATE_SWQ_ADDR);
+
+	rc = sepinit_wait_for_fw_init_done(drvdata);
+	destroy_fwinit_command(drvdata,
+		fw_init_params_buf_p, fw_init_params_dma);
+
+	WRITE_REGISTER(drvdata->cc_base +
+			HOST_SEP_GPR_REG_OFFSET(DX_HOST_REQ_GPR_IDX),
+			0);
+
+	return rc;
+}
diff --git a/drivers/staging/sep54/sep_init.h b/drivers/staging/sep54/sep_init.h
new file mode 100644
index 0000000..1e16e19
--- /dev/null
+++ b/drivers/staging/sep54/sep_init.h
@@ -0,0 +1,72 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef __SEP_INIT_H__
+#define __SEP_INIT_H__
+
+#include "dx_driver.h"
+
+/**
+ * sepinit_do_cc_init() - Initiate SeP cold boot sequence and wait for
+ *	its completion.
+ *
+ * @drvdata:
+ *
+ * This function loads the CC firmware and dispatches an CC_INIT request message
+ * Returns int 0 for success
+ */
+int sepinit_do_cc_init(struct sep_drvdata *drvdata);
+
+/**
+ * sepinit_get_fw_props() - Get the FW properties (version, cache size, etc.)
+ *	after completing cold boot
+ * @drvdata:	 Context where to fill retrieved data
+ *
+ * This function should be called only after sepinit_do_cc_init completes
+ * successfully.
+ */
+void sepinit_get_fw_props(struct sep_drvdata *drvdata);
+
+/**
+ * sepinit_do_fw_init() - Initialize SeP FW
+ * @drvdata:
+ *
+ * Provide SeP FW with initialization parameters and wait for DONE_FW_INIT.
+ *
+ * Returns int 0 on success
+ */
+int sepinit_do_fw_init(struct sep_drvdata *drvdata, int init_flag);
+
+/**
+ * sepinit_reload_driver_state() - Wait for FW to update Sep state to reloaded
+ * driver state.
+ * @drvdata:
+ *
+ * Returns int
+ */
+int sepinit_reload_driver_state(struct sep_drvdata *drvdata);
+
+#endif /*__SEP_INIT_H__*/
diff --git a/drivers/staging/sep54/sep_init_cc_errors.h b/drivers/staging/sep54/sep_init_cc_errors.h
new file mode 100644
index 0000000..3c16d16
--- /dev/null
+++ b/drivers/staging/sep54/sep_init_cc_errors.h
@@ -0,0 +1,84 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef SEP_INIT_CC_ERROR_H
+#define SEP_INIT_CC_ERROR_H
+
+#include "sep_error.h"
+
+	/*! \file lcs.h
+	 * \brief This file containes lcs definitions
+	 */
+#define DX_INIT_CC_OK DX_SEP_OK
+/* DX_INIT_CC_MODULE_ERROR_BASE - 0xE0004000 */
+#define DX_CC_INIT_MSG_CS_ERROR	\
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x1)
+#define DX_CC_INIT_MSG_WRONG_TOKEN_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x2)
+#define DX_CC_INIT_MSG_WRONG_OP_CODE_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x3)
+#define DX_CC_INIT_MSG_WRONG_RESIDENT_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x4)
+#define DX_CC_INIT_MSG_WRONG_I_CACHE_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x5)
+#define DX_CC_INIT_MSG_WRONG_I_CACHE_DEST_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x6)
+#define DX_CC_INIT_MSG_WRONG_D_CACHE_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x7)
+#define DX_CC_INIT_MSG_WRONG_D_CACHE_SIZE_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x8)
+#define DX_CC_INIT_MSG_WRONG_INIT_EXT_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x9)
+#define DX_CC_INIT_MSG_WRONG_VRL_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0xA)
+#define DX_CC_INIT_MSG_WRONG_MAGIC_NUM_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0xB)
+#define DX_CC_INIT_MSG_WRONG_OUTPUT_BUFF_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0xC)
+#define DX_CC_INIT_MSG_WRONG_OUTPUT_BUFF_SIZE_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0xD)
+#define DX_RESERVED_0_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0xE)
+
+/* DX_INIT_CC_EXT_MODULE_ERROR_BASE - 0xE0005000 */
+#define DX_CC_INIT_EXT_FIRST_PARAM_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x1)
+#define DX_CC_INIT_EXT_WRONG_LAST_PARAM_LENGTH_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x2)
+#define DX_CC_INIT_EXT_WRONG_CHECKSUM_VALUE_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x3)
+#define DX_CC_INIT_EXT_WRONG_DISABLE_MODULE_LENGTH_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x4)
+#define DX_CC_INIT_EXT_WRONG_AXI_CONFIG_LENGTH_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x5)
+#define DX_CC_INIT_EXT_WRONG_PARAM_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x6)
+#define DX_CC_INIT_EXT_EXCEED_MAX_PARAM_PARAM_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x7)
+#define DX_CC_INIT_EXT_WRONG_SEP_FREQ_LENGTH_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x8)
+
+#endif
diff --git a/drivers/staging/sep54/sep_lli.h b/drivers/staging/sep54/sep_lli.h
new file mode 100644
index 0000000..b778f4f
--- /dev/null
+++ b/drivers/staging/sep54/sep_lli.h
@@ -0,0 +1,86 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_LLI_H_
+#define _SEP_LLI_H_
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+#include "dx_bitops.h"
+
+#define SEP_LLI_GET(lli_p, lli_field) BITFIELD_GET(                            \
+		((u32 *)(lli_p))[SEP_LLI_ ## lli_field ## _WORD_OFFSET],  \
+		SEP_LLI_ ## lli_field ## _BIT_OFFSET,			       \
+		SEP_LLI_ ## lli_field ## _BIT_SIZE)
+#define SEP_LLI_SET(lli_p, lli_field, new_val) BITFIELD_SET(                   \
+		((u32 *)(lli_p))[SEP_LLI_ ## lli_field ## _WORD_OFFSET],  \
+		SEP_LLI_ ## lli_field ## _BIT_OFFSET,			       \
+		SEP_LLI_ ## lli_field ## _BIT_SIZE,			       \
+		new_val)
+
+#define SEP_LLI_INIT(lli_p)  do { \
+	((u32 *)(lli_p))[0] = 0; \
+	((u32 *)(lli_p))[1] = 0; \
+} while (0)
+
+/* Copy local LLI scratchpad to SeP LLI buffer */
+#define SEP_LLI_COPY_TO_SEP(sep_lli_p, host_lli_p) do {             \
+	int i;                                                      \
+	for (i = 0; i < SEP_LLI_ENTRY_WORD_SIZE; i++)               \
+		((u32 *)(sep_lli_p))[i] =                      \
+			cpu_to_le32(((u32 *)(host_lli_p))[i]); \
+} while (0)
+/* and vice-versa */
+#define SEP_LLI_COPY_FROM_SEP(host_lli_p, sep_lli_p) do {                 \
+	int i;                                                            \
+		for (i = 0; i < SEP_LLI_ENTRY_WORD_SIZE; i++)             \
+			((u32 *)(host_lli_p))[i] =                   \
+				le32_to_cpu(((u32 *)(sep_lli_p))[i]);\
+} while (0)
+
+/* Size of entry */
+#define SEP_LLI_ENTRY_WORD_SIZE 2
+#define SEP_LLI_ENTRY_BYTE_SIZE (SEP_LLI_ENTRY_WORD_SIZE * sizeof(u32))
+
+/* (DMA) Address: ADDR */
+#define SEP_LLI_ADDR_WORD_OFFSET 0
+#define SEP_LLI_ADDR_BIT_OFFSET 0
+#define SEP_LLI_ADDR_BIT_SIZE 32
+/* Size: SIZE */
+#define SEP_LLI_SIZE_WORD_OFFSET 1
+#define SEP_LLI_SIZE_BIT_OFFSET 0
+#define SEP_LLI_SIZE_BIT_SIZE 30
+/* First/Last LLI entries bit marks: FIRST, LAST */
+#define SEP_LLI_FIRST_WORD_OFFSET 1
+#define SEP_LLI_FIRST_BIT_OFFSET 30
+#define SEP_LLI_FIRST_BIT_SIZE 1
+#define SEP_LLI_LAST_WORD_OFFSET 1
+#define SEP_LLI_LAST_BIT_OFFSET 31
+#define SEP_LLI_LAST_BIT_SIZE 1
+
+#endif /*_SEP_LLI_H_*/
diff --git a/drivers/staging/sep54/sep_log.h b/drivers/staging/sep54/sep_log.h
new file mode 100644
index 0000000..fd7a9ae
--- /dev/null
+++ b/drivers/staging/sep54/sep_log.h
@@ -0,0 +1,131 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+
+
+#ifndef _SEP_LOG__H_
+#define _SEP_LOG__H_
+
+/* Define different "BUG()" behavior in DEBUG mode */
+#ifdef DEBUG
+/* It is easier to attach a debugger without causing the exception of "BUG()" */
+#define SEP_DRIVER_BUG() do {dump_stack(); while (1); } while (0)
+#else
+#define SEP_DRIVER_BUG() BUG()
+#endif
+
+/* SeP log levels (to be used in sep_log_level and SEP_BASE_LOG_LEVEL) */
+#define SEP_LOG_LEVEL_ERR       0
+#define SEP_LOG_LEVEL_WARN      1
+#define SEP_LOG_LEVEL_INFO      2
+#define SEP_LOG_LEVEL_DEBUG     3
+#define SEP_LOG_LEVEL_TRACE     4
+
+/* SeP log components (to be used in sep_log_mask and SEP_LOG_CUR_COMPONENT) */
+#define SEP_LOG_MASK_MAIN        1 /* dx_driver.c */
+#define SEP_LOG_MASK_LLI_MGR     (1<<1)
+#define SEP_LOG_MASK_CTX_MGR     (1<<2)
+#define SEP_LOG_MASK_DESC_MGR    (1<<3)
+#define SEP_LOG_MASK_SYSFS       (1<<4)
+#define SEP_LOG_MASK_SEP_INIT    (1<<5)
+#define SEP_LOG_MASK_CRYPTO_API  (1<<6)
+#define SEP_LOG_MASK_SEP_REQUEST (1<<7)
+#define SEP_LOG_MASK_SEP_POWER   (1<<8)
+#define SEP_LOG_MASK_SEP_APP     (1<<9)
+#define SEP_LOG_MASK_SEP_PRINTF  (1<<31)
+#define SEP_LOG_MASK_ALL        (SEP_LOG_MASK_MAIN | SEP_LOG_MASK_SEP_INIT |\
+	SEP_LOG_MASK_LLI_MGR | SEP_LOG_MASK_CTX_MGR | SEP_LOG_MASK_DESC_MGR |\
+	SEP_LOG_MASK_SYSFS | SEP_LOG_MASK_CRYPTO_API |\
+	SEP_LOG_MASK_SEP_REQUEST | SEP_LOG_MASK_SEP_POWER |\
+	SEP_LOG_MASK_SEP_APP | SEP_LOG_MASK_SEP_PRINTF)
+
+
+/* This printk wrapper masks maps log level to KERN_* levels and masks prints *
+   from specific components at run time based on SEP_LOG_CUR_COMPONENT and    *
+*  sep_log_component_mask.                                                    */
+#define MODULE_PRINTK(level, format, ...) do {			\
+	if (sep_log_mask & SEP_LOG_CUR_COMPONENT)		\
+		printk(level MODULE_NAME ":%s: " format,	\
+			__func__, ##__VA_ARGS__);		\
+} while (0)
+
+extern int sep_log_level;
+extern int sep_log_mask;
+
+
+/* change this to set the preferred log level */
+#ifdef DEBUG
+#define SEP_BASE_LOG_LEVEL SEP_LOG_LEVEL_TRACE
+#else
+#define SEP_BASE_LOG_LEVEL SEP_LOG_LEVEL_WARN
+#endif
+
+#define SEP_LOG_ERR(format, ...) \
+	MODULE_PRINTK(KERN_ERR, format, ##__VA_ARGS__);
+
+#if (SEP_BASE_LOG_LEVEL >= SEP_LOG_LEVEL_WARN)
+#define SEP_LOG_WARN(format, ...) do {				\
+	if (sep_log_level >= SEP_LOG_LEVEL_WARN)		\
+		MODULE_PRINTK(KERN_WARNING, format,		\
+		##__VA_ARGS__);					\
+} while (0)
+#else
+#define SEP_LOG_WARN(format, arg...) do {} while (0)
+#endif
+
+#if (SEP_BASE_LOG_LEVEL >= SEP_LOG_LEVEL_INFO)
+#define SEP_LOG_INFO(format, ...) do {				\
+	if (sep_log_level >= SEP_LOG_LEVEL_INFO)		\
+		MODULE_PRINTK(KERN_INFO, format, ##__VA_ARGS__); \
+} while (0)
+#else
+#define SEP_LOG_INFO(format, arg...) do {} while (0)
+#endif
+
+#if (SEP_BASE_LOG_LEVEL >= SEP_LOG_LEVEL_DEBUG)
+#define SEP_LOG_DEBUG(format, ...) do {				\
+	if (sep_log_level >= SEP_LOG_LEVEL_DEBUG)		\
+		MODULE_PRINTK(KERN_DEBUG, format, ##__VA_ARGS__);\
+} while (0)
+#else
+#define SEP_LOG_DEBUG(format, arg...) do {} while (0)
+#endif
+
+#if (SEP_BASE_LOG_LEVEL >= SEP_LOG_LEVEL_TRACE)
+#define SEP_LOG_TRACE(format, ...) do {				\
+	if (sep_log_level >= SEP_LOG_LEVEL_TRACE)		\
+		MODULE_PRINTK(KERN_DEBUG, "<trace> " format,	\
+			      ##__VA_ARGS__);			\
+} while (0)
+#else
+#define SEP_LOG_TRACE(format, arg...) do {} while (0)
+#endif
+
+#undef pr_fmt
+#define pr_fmt(fmt)     KBUILD_MODNAME ": %s:%d: " fmt, __func__, __LINE__
+
+#endif
+
diff --git a/drivers/staging/sep54/sep_power.c b/drivers/staging/sep54/sep_power.c
new file mode 100644
index 0000000..d007907
--- /dev/null
+++ b/drivers/staging/sep54/sep_power.c
@@ -0,0 +1,431 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/**
+ * This file implements the power state control functions for SeP/CC
+ */
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SEP_POWER
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+/*#include <linux/export.h>*/
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_driver.h"
+#include "dx_cc_regs.h"
+#include "dx_init_cc_abi.h"
+#include "sep_sw_desc.h"
+#include "dx_sep_kapi.h"
+#include <linux/delay.h>
+
+#define SEP_STATE_CHANGE_TIMEOUT_MSEC 2500
+/**
+ * struct sep_power_control - Control data for power state change operations
+ * @drvdata:		The associated driver context
+ * @state_changed:	Completion object to signal state change
+ * @last_state:		Recorded last state
+ * @state_jiffies:	jiffies at recorded last state
+ *
+ * last_state and state_jiffies are volatile because may be updated in
+ * the interrupt context while tested in _state_get function.
+ */
+struct sep_power_control {
+	struct sep_drvdata *drvdata;
+	struct completion state_changed;
+	volatile enum dx_sep_state last_state;
+	volatile unsigned long state_jiffies;
+};
+
+/* Global context for power management */
+static struct sep_power_control power_control;
+
+static const char *power_state_str(enum dx_sep_power_state pstate)
+{
+	switch (pstate) {
+	case DX_SEP_POWER_INVALID:
+		return "INVALID";
+	case DX_SEP_POWER_OFF:
+		return "OFF";
+	case DX_SEP_POWER_BOOT:
+		return "BOOT";
+	case DX_SEP_POWER_IDLE:
+		return "IDLE";
+	case DX_SEP_POWER_ACTIVE:
+		return "ACTIVE";
+	case DX_SEP_POWER_HIBERNATED:
+		return "HIBERNATED";
+	}
+	return "(unknown)";
+}
+
+/**
+ * dx_sep_state_change_handler() - Interrupt handler for SeP state changes
+ * @drvdata:	Associated driver context
+ */
+void dx_sep_state_change_handler(struct sep_drvdata *drvdata)
+{
+	pr_warn("State=0x%08X Status/RetCode=0x%08X\n",
+		     READ_REGISTER(drvdata->cc_base + SEP_STATE_GPR_OFFSET),
+		     READ_REGISTER(drvdata->cc_base + SEP_STATUS_GPR_OFFSET));
+	power_control.state_jiffies = jiffies;
+	power_control.last_state = GET_SEP_STATE(drvdata);
+	complete(&power_control.state_changed);
+}
+
+/**
+ * dx_sep_wait_for_state() - Wait for SeP to reach one of the states reflected
+ *				with given state mask
+ * @state_mask:		The OR of expected SeP states
+ * @timeout_msec:	Timeout of waiting for the state (in millisec.)
+ *
+ * Returns the state reached. In case of wait timeout the returned state
+ * may not be one of the expected states.
+ */
+enum dx_sep_state dx_sep_wait_for_state(u32 state_mask, int timeout_msec)
+{
+	int wait_jiffies = msecs_to_jiffies(timeout_msec);
+	enum dx_sep_state sep_state;
+
+	do {
+		/* Poll for state transition completion or failure */
+		/* Arm for next state change before reading current state */
+		INIT_COMPLETION(power_control.state_changed);
+		sep_state = GET_SEP_STATE(power_control.drvdata);
+		if ((sep_state & state_mask) || (wait_jiffies == 0))
+			/* It's a match or wait timed out */
+			break;
+		wait_jiffies =
+		    wait_for_completion_timeout(&power_control.state_changed,
+						wait_jiffies);
+	} while (1);
+
+	return sep_state;
+}
+
+void dx_sep_pm_runtime_get(void)
+{
+	pm_runtime_get_sync(power_control.drvdata->dev);
+}
+
+void dx_sep_pm_runtime_put(void)
+{
+	pm_runtime_mark_last_busy(power_control.drvdata->dev);
+	pm_runtime_put_autosuspend(power_control.drvdata->dev);
+}
+
+/**
+ * set_desc_qs_state() - Modify states of all Desc. queues
+ *
+ * @state:	Requested new state
+ */
+static int set_desc_qs_state(enum desc_q_state state)
+{
+	int i, rc;
+
+	for (i = 0, rc = 0;
+	     (i < power_control.drvdata->num_of_desc_queues) && (rc == 0); i++)
+		rc = desc_q_set_state(power_control.drvdata->queue[i].
+				      desc_queue, state);
+	if (unlikely(rc != 0))
+		/* Error - revert state of queues that were already changed */
+		for (i--; i >= 0; i--)
+			desc_q_set_state(power_control.drvdata->queue[i].
+					 desc_queue,
+					 (state ==
+					  DESC_Q_ASLEEP) ? DESC_Q_ACTIVE :
+					 DESC_Q_ASLEEP);
+	return rc;
+}
+
+static bool is_desc_qs_active(void)
+{
+	int i;
+	enum desc_q_state qstate;
+	bool is_all_qs_active = true;
+
+	for (i = 0; i < power_control.drvdata->num_of_desc_queues; i++) {
+		qstate =
+		    desc_q_get_state(power_control.drvdata->queue[i].
+				     desc_queue);
+		if ((qstate != DESC_Q_ACTIVE)) {
+			is_all_qs_active = false;
+			break;
+		}
+	}
+	return is_all_qs_active;
+}
+
+static bool is_desc_qs_idle(unsigned long *idle_jiffies_p)
+{
+	int i;
+	unsigned long this_q_idle_jiffies;
+	bool is_all_qs_idle = true;
+
+	*idle_jiffies_p = 0;	/* Max. of both queues if both idle */
+
+	for (i = 0; i < power_control.drvdata->num_of_desc_queues; i++) {
+		if (!desc_q_is_idle(power_control.drvdata->queue[i].desc_queue,
+				    &this_q_idle_jiffies)) {
+			is_all_qs_idle = false;
+			break;
+		}
+		if (this_q_idle_jiffies > *idle_jiffies_p)
+			*idle_jiffies_p = this_q_idle_jiffies;
+	}
+	return is_all_qs_idle;
+}
+
+/**
+ * reset_desc_qs() - Initiate clearing of desc. queue counters
+ * This function must be called only when transition to hibernation state
+ * is completed successfuly, i.e., the desc. queue is empty and asleep
+ */
+static void reset_desc_qs(void)
+{
+	int i;
+
+	for (i = 0; i < power_control.drvdata->num_of_desc_queues; i++)
+		(void)desc_q_reset(power_control.drvdata->queue[i].desc_queue);
+}
+
+static int process_hibernation_req(void)
+{
+	enum dx_sep_state sep_state;
+	int rc;
+
+	sep_state = GET_SEP_STATE(power_control.drvdata);
+	/* Already off, no need to send the sleep descriptor */
+	if (sep_state == DX_SEP_STATE_OFF ||
+		sep_state == DX_SEP_STATE_DONE_SLEEP_MODE)
+		return 0;
+
+	if (sep_state != DX_SEP_STATE_DONE_FW_INIT ||
+		!(is_desc_qs_active())) {
+		pr_err("Requested hibernation while SeP state=0x%08X\n",
+			    sep_state);
+		return -EINVAL;
+	}
+	rc = set_desc_qs_state(DESC_Q_ASLEEP);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed moving queues to SLEEP state (%d)\n", rc);
+		return rc;
+	}
+	/* Write value SEP_SLEEP_ENABLE command to GPR7 to initialize
+	   sleep sequence */
+	WRITE_REGISTER(power_control.drvdata->cc_base +
+			DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR7),
+			SEP_SLEEP_ENABLE);
+	/* Process state change */
+	sep_state = dx_sep_wait_for_state(DX_SEP_STATE_DONE_SLEEP_MODE,
+					SEP_STATE_CHANGE_TIMEOUT_MSEC);
+	switch (sep_state) {
+	case DX_SEP_STATE_DONE_SLEEP_MODE:
+		break;
+	case DX_SEP_STATE_DONE_FW_INIT:
+		pr_err("Transition to SLEEP mode aborted.\n");
+		rc = -EBUSY;
+		break;
+	case DX_SEP_STATE_PROC_SLEEP_MODE:
+		pr_err("Stuck in processing of SLEEP req.\n");
+		rc = -ETIME;
+		break;
+	default:
+		pr_err(
+			"Unexpected SeP state after SLEEP request: 0x%08X\n",
+			sep_state);
+		rc = -EINVAL;
+	}
+	if (unlikely(rc != 0)) {
+		sep_state = GET_SEP_STATE(power_control.drvdata);
+		if (sep_state == DX_SEP_STATE_DONE_FW_INIT)
+			/* Revert queues state on failure */
+			/* (if remained on active state)  */
+			set_desc_qs_state(DESC_Q_ACTIVE);
+	} else {
+		reset_desc_qs();
+	}
+
+	return rc;
+}
+
+static int process_activate_req(void)
+{
+	enum dx_sep_state sep_state;
+	int rc;
+	int count = 0;
+
+	sep_state = GET_SEP_STATE(power_control.drvdata);
+	if ((sep_state == DX_SEP_STATE_DONE_FW_INIT) && is_desc_qs_active()) {
+		pr_info("Requested activation when in active state\n");
+		return 0;	/* Already in this state */
+	}
+
+	/* make sure Sep is not off before restore IMR */
+	if (sep_state == DX_SEP_STATE_OFF) {
+		while (count < SEP_POWERON_TIMEOUT) {
+			sep_state = GET_SEP_STATE(power_control.drvdata);
+			if (sep_state != DX_SEP_STATE_OFF)
+				break;
+			usleep_range(50, 150);
+			count++;
+		}
+		if (count >= SEP_TIMEOUT) {
+			pr_info("Timeout while waiting SEP poweron\n");
+			return -ETIME;
+		}
+
+	}
+	/* SeP may have been reset - restore IMR if SeP is not off */
+	/* This must be done before dx_sep_wait_for_state() */
+	WRITE_REGISTER(power_control.drvdata->cc_base +
+		       DX_CC_REG_OFFSET(HOST, IMR),
+		       ~power_control.drvdata->irq_mask);
+	/* Nothing to initiate - just wait for FW_INIT_DONE */
+	sep_state = dx_sep_wait_for_state(DX_SEP_STATE_DONE_FW_INIT,
+					  SEP_STATE_CHANGE_TIMEOUT_MSEC);
+	if (sep_state == DX_SEP_STATE_DONE_FW_INIT)
+		rc = set_desc_qs_state(DESC_Q_ACTIVE);
+	else {
+		pr_info("Timeout while waiting SEP wakeup\n");
+		rc = -ETIME;	/* Timed out waiting */
+	}
+
+	return rc;
+}
+
+/**
+ * dx_sep_power_state_set() - Change power state of SeP (CC)
+ *
+ * @req_state:	The requested power state (_HIBERNATED or _ACTIVE)
+ *
+ * Request changing of power state to given state and block until transition
+ * is completed.
+ * Requesting _HIBERNATED is allowed only from _ACTIVE state.
+ * Requesting _ACTIVE is allowed only after CC was powered back on (warm boot).
+ * Return codes:
+ * 0 -	Power state change completed.
+ * -EINVAL -	This request is not allowed in current SeP state or req_state
+ *		value is invalid.
+ * -EBUSY -	State change request ignored because SeP is busy (primarily,
+ *		when requesting hibernation while SeP is processing something).
+ * -ETIME -	Request timed out (primarily, when asking for _ACTIVE)
+ */
+int dx_sep_power_state_set(enum dx_sep_power_state req_state)
+{
+	int rc = 0;
+
+	switch (req_state) {
+	case DX_SEP_POWER_HIBERNATED:
+		rc = process_hibernation_req();
+		break;
+	case DX_SEP_POWER_IDLE:
+	case DX_SEP_POWER_ACTIVE:
+		rc = process_activate_req();
+		break;
+	default:
+		pr_err("Invalid state to request (%s)\n",
+			    power_state_str(req_state));
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(dx_sep_power_state_set);
+
+/**
+ * dx_sep_power_state_get() - Get the current power state of SeP (CC)
+ * @state_jiffies_p:	The "jiffies" value at which given state was detected.
+ */
+enum dx_sep_power_state dx_sep_power_state_get(unsigned long *state_jiffies_p)
+{
+	enum dx_sep_state sep_state;
+	enum dx_sep_power_state rc;
+	unsigned long idle_jiffies;
+
+	sep_state = GET_SEP_STATE(power_control.drvdata);
+	if (sep_state != power_control.last_state) {
+		/* Probably off or after warm-boot with lost IMR */
+		/* Recover last_state */
+		power_control.last_state = sep_state;
+		power_control.state_jiffies = jiffies;
+	}
+	if (state_jiffies_p != NULL)
+		*state_jiffies_p = power_control.state_jiffies;
+	switch (sep_state) {
+	case DX_SEP_STATE_PROC_WARM_BOOT:
+	 /*FALLTRHOUGH*/ case DX_SEP_STATE_DONE_WARM_BOOT:
+		rc = DX_SEP_POWER_BOOT;
+		break;
+	case DX_SEP_STATE_DONE_FW_INIT:
+		if (is_desc_qs_active()) {
+			if (is_desc_qs_idle(&idle_jiffies)) {
+				rc = DX_SEP_POWER_IDLE;
+				if (state_jiffies_p != NULL)
+					*state_jiffies_p = idle_jiffies;
+			} else {
+				rc = DX_SEP_POWER_ACTIVE;
+			}
+		} else {
+			/* SeP was woken up but dx_sep_power_state_set was not
+			 * invoked to activate the queues */
+			rc = DX_SEP_POWER_BOOT;
+		}
+		break;
+	case DX_SEP_STATE_PROC_SLEEP_MODE:
+		/* Report as active until actually asleep */
+		rc = DX_SEP_POWER_ACTIVE;
+		break;
+	case DX_SEP_STATE_DONE_SLEEP_MODE:
+		rc = DX_SEP_POWER_HIBERNATED;
+		break;
+	case DX_SEP_STATE_OFF:
+		rc = DX_SEP_POWER_OFF;
+		break;
+	case DX_SEP_STATE_FATAL_ERROR:
+	default:/* Any state not supposed to happen for the driver */
+		rc = DX_SEP_POWER_INVALID;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(dx_sep_power_state_get);
+
+/**
+ * dx_sep_power_init() - Init resources for this module
+ */
+void dx_sep_power_init(struct sep_drvdata *drvdata)
+{
+	power_control.drvdata = drvdata;
+	init_completion(&power_control.state_changed);
+	/* Init. recorded last state */
+	power_control.last_state = GET_SEP_STATE(drvdata);
+	power_control.state_jiffies = jiffies;
+}
+
+/**
+ * dx_sep_power_exit() - Cleanup resources for this module
+ */
+void dx_sep_power_exit(void)
+{
+}
diff --git a/drivers/staging/sep54/sep_power.h b/drivers/staging/sep54/sep_power.h
new file mode 100644
index 0000000..b92bd65
--- /dev/null
+++ b/drivers/staging/sep54/sep_power.h
@@ -0,0 +1,60 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+#ifndef __SEP_POWER_H__
+#define __SEP_POWER_H__
+
+/**
+ * dx_sep_power_init() - Init resources for this module
+ */
+void dx_sep_power_init(struct sep_drvdata *drvdata);
+
+/**
+ * dx_sep_power_exit() - Cleanup resources for this module
+ */
+void dx_sep_power_exit(void);
+
+/**
+ * dx_sep_state_change_handler() - Interrupt handler for SeP state changes
+ * @drvdata:	Associated driver context
+ */
+void dx_sep_state_change_handler(struct sep_drvdata *drvdata);
+
+/**
+ * dx_sep_wait_for_state() - Wait for SeP to reach one of the states reflected
+ *				with given state mask
+ * @state_mask:		The OR of expected SeP states
+ * @timeout_msec:	Timeout of waiting for the state (in millisec.)
+ *
+ * Returns the state reached. In case of wait timeout the returned state
+ * may not be one of the expected states.
+ */
+u32 dx_sep_wait_for_state(u32 state_mask, int timeout_msec);
+
+void dx_sep_pm_runtime_get(void);
+
+void dx_sep_pm_runtime_put(void);
+
+#endif				/* __SEP_POWER_H__ */
diff --git a/drivers/staging/sep54/sep_request.h b/drivers/staging/sep54/sep_request.h
new file mode 100644
index 0000000..dc1f5f0
--- /dev/null
+++ b/drivers/staging/sep54/sep_request.h
@@ -0,0 +1,96 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_REQUEST_H_
+#define _SEP_REQUEST_H_
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+#include "dx_bitops.h"
+
+#define DX_SEP_REQUEST_GPR_IDX 3
+
+#define DX_SEP_REQUEST_4KB_MASK 0xFFF
+#define DX_SEP_REQUEST_MIN_BUF_SIZE (4*1024)
+#define DX_SEP_REQUEST_MAX_BUF_SIZE (32*1024)
+
+/* Protocol error codes */
+#define DX_SEP_REQUEST_SUCCESS 0x00
+#define DX_SEP_REQUEST_OUT_OF_SYNC_ERR 0x01
+#define DX_SEP_REQUEST_INVALID_REQ_SIZE_ERR 0x02
+#define DX_SEP_REQUEST_INVALID_AGENT_ID_ERR 0x03
+
+/* Sep Request GPR3 format (Sep to Host) */
+#define DX_SEP_REQUEST_AGENT_ID_BIT_OFFSET 0
+#define DX_SEP_REQUEST_AGENT_ID_BIT_SIZE 8
+#define DX_SEP_REQUEST_COUNTER_BIT_OFFSET 8
+#define DX_SEP_REQUEST_COUNTER_BIT_SIZE 8
+#define DX_SEP_REQUEST_REQ_LEN_BIT_OFFSET 16
+#define DX_SEP_REQUEST_REQ_LEN_BIT_SIZE 16
+
+/* Sep Request GPR3 format (Host to Sep) */
+#define DX_SEP_REQUEST_RETURN_CODE_BIT_OFFSET 0
+#define DX_SEP_REQUEST_RETURN_CODE_BIT_SIZE 8
+/* #define DX_SEP_REQUEST_COUNTER_BIT_OFFSET 8 */
+/* #define DX_SEP_REQUEST_COUNTER_BIT_SIZE 8 */
+#define DX_SEP_REQUEST_RESP_LEN_BIT_OFFSET 16
+#define DX_SEP_REQUEST_RESP_LEN_BIT_SIZE 16
+
+/* Get/Set macros */
+#define SEP_REQUEST_GET_AGENT_ID(gpr) BITFIELD_GET(                           \
+	(gpr), DX_SEP_REQUEST_AGENT_ID_BIT_OFFSET,                            \
+	DX_SEP_REQUEST_AGENT_ID_BIT_SIZE)
+#define SEP_REQUEST_SET_AGENT_ID(gpr, val) BITFIELD_SET(                      \
+	(gpr), DX_SEP_REQUEST_AGENT_ID_BIT_OFFSET,                            \
+	DX_SEP_REQUEST_AGENT_ID_BIT_SIZE, (val))
+#define SEP_REQUEST_GET_RETURN_CODE(gpr) BITFIELD_GET(                        \
+	(gpr), DX_SEP_REQUEST_RETURN_CODE_BIT_OFFSET,                         \
+	DX_SEP_REQUEST_RETURN_CODE_BIT_SIZE)
+#define SEP_REQUEST_SET_RETURN_CODE(gpr, val) BITFIELD_SET(                   \
+	(gpr), DX_SEP_REQUEST_RETURN_CODE_BIT_OFFSET,                         \
+	DX_SEP_REQUEST_RETURN_CODE_BIT_SIZE, (val))
+#define SEP_REQUEST_GET_COUNTER(gpr) BITFIELD_GET(                            \
+	(gpr), DX_SEP_REQUEST_COUNTER_BIT_OFFSET,                             \
+	DX_SEP_REQUEST_COUNTER_BIT_SIZE)
+#define SEP_REQUEST_SET_COUNTER(gpr, val) BITFIELD_SET(                       \
+	(gpr), DX_SEP_REQUEST_COUNTER_BIT_OFFSET,                             \
+	DX_SEP_REQUEST_COUNTER_BIT_SIZE, (val))
+#define SEP_REQUEST_GET_REQ_LEN(gpr) BITFIELD_GET(                            \
+	(gpr), DX_SEP_REQUEST_REQ_LEN_BIT_OFFSET,                             \
+	DX_SEP_REQUEST_REQ_LEN_BIT_SIZE)
+#define SEP_REQUEST_SET_REQ_LEN(gpr, val) BITFIELD_SET(                       \
+	(gpr), DX_SEP_REQUEST_REQ_LEN_BIT_OFFSET,                             \
+	DX_SEP_REQUEST_REQ_LEN_BIT_SIZE, (val))
+#define SEP_REQUEST_GET_RESP_LEN(gpr) BITFIELD_GET(                           \
+	(gpr), DX_SEP_REQUEST_RESP_LEN_BIT_OFFSET,                            \
+	DX_SEP_REQUEST_RESP_LEN_BIT_SIZE)
+#define SEP_REQUEST_SET_RESP_LEN(gpr, val) BITFIELD_SET(                      \
+	(gpr), DX_SEP_REQUEST_RESP_LEN_BIT_OFFSET,                            \
+	DX_SEP_REQUEST_RESP_LEN_BIT_SIZE, (val))
+
+#endif /*_SEP_REQUEST_H_*/
diff --git a/drivers/staging/sep54/sep_request_mgr.c b/drivers/staging/sep54/sep_request_mgr.c
new file mode 100644
index 0000000..681ddd0
--- /dev/null
+++ b/drivers/staging/sep54/sep_request_mgr.c
@@ -0,0 +1,515 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SEP_REQUEST
+
+#include <linux/sched.h>
+/*#include <linux/export.h>*/
+#include "dx_driver.h"
+#include "dx_bitops.h"
+#include "sep_log.h"
+#include "dx_reg_base_host.h"
+#include "dx_host.h"
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_cc_regs.h"
+#include "sep_request.h"
+#include "sep_request_mgr.h"
+#include "dx_sep_kapi.h"
+
+/* The request/response coherent buffer size */
+#define DX_SEP_REQUEST_BUF_SIZE (4*1024)
+#if (DX_SEP_REQUEST_BUF_SIZE < DX_SEP_REQUEST_MIN_BUF_SIZE)
+#error DX_SEP_REQUEST_BUF_SIZE too small
+#endif
+#if (DX_SEP_REQUEST_BUF_SIZE > DX_SEP_REQUEST_MAX_BUF_SIZE)
+#error DX_SEP_REQUEST_BUF_SIZE too big
+#endif
+#if (DX_SEP_REQUEST_BUF_SIZE & DX_SEP_REQUEST_4KB_MASK)
+#error DX_SEP_REQUEST_BUF_SIZE must be a 4KB multiple
+#endif
+
+/* The maximum number of sep request agents */
+/* Valid IDs are 0 to (DX_SEP_REQUEST_MAX_AGENTS-1) */
+#define DX_SEP_REQUEST_MAX_AGENTS 4
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+/* Sep Request state object */
+static struct {
+	u8 *sep_req_buf_p;
+	dma_addr_t sep_req_buf_dma;
+	u8 *host_resp_buf_p;
+	dma_addr_t host_resp_buf_dma;
+	u8 req_counter;
+	wait_queue_head_t agent_event[DX_SEP_REQUEST_MAX_AGENTS];
+	bool agent_valid[DX_SEP_REQUEST_MAX_AGENTS];
+	bool agent_busy[DX_SEP_REQUEST_MAX_AGENTS];
+	bool request_pending;
+	u32 *sep_host_gpr_adr;
+	u32 *host_sep_gpr_adr;
+} sep_req_state;
+
+/* TODO:
+   1) request_pending should use agent id instead of a global flag
+   2) agent id 0 should be changed to non-valid
+   3) Change sep request params for sep init to [] array instead pointer
+   4) Consider usage of a mutex for syncing all access to the state
+*/
+
+/**
+ * dx_sep_req_handler() - SeP request interrupt handler
+ * @drvdata: The driver private info
+ */
+void dx_sep_req_handler(struct sep_drvdata *drvdata)
+{
+	u8 agent_id;
+	u32 gpr_val;
+	u32 sep_req_error = DX_SEP_REQUEST_SUCCESS;
+	u32 counter_val;
+	u32 req_len;
+
+	/* Read GPR3 value */
+	gpr_val = READ_REGISTER(drvdata->cc_base +
+				SEP_HOST_GPR_REG_OFFSET
+				(DX_SEP_REQUEST_GPR_IDX));
+
+	/* Parse the new gpr value */
+	counter_val = SEP_REQUEST_GET_COUNTER(gpr_val);
+	agent_id = SEP_REQUEST_GET_AGENT_ID(gpr_val);
+	req_len = SEP_REQUEST_GET_REQ_LEN(gpr_val);
+
+	/* Increase the req_counter value in the state structure */
+	sep_req_state.req_counter++;
+
+	if (unlikely(counter_val != sep_req_state.req_counter))
+		/* Verify new req_counter value is equal to the req_counter
+		 * value from the state. If not, proceed to critical error flow
+		 * below with error code SEP_REQUEST_OUT_OF_SYNC_ERR. */
+		sep_req_error = DX_SEP_REQUEST_OUT_OF_SYNC_ERR;
+	else if (unlikely((agent_id >= DX_SEP_REQUEST_MAX_AGENTS) ||
+			  (!sep_req_state.agent_valid[agent_id])))
+		/* Verify that the SeP Req Agent ID is registered in the LUT, if
+		 * not proceed to critical error flow below with error code
+		 * SEP_REQUEST_INVALID_AGENT_ID_ERR. */
+		sep_req_error = DX_SEP_REQUEST_INVALID_AGENT_ID_ERR;
+	else if (unlikely(req_len > DX_SEP_REQUEST_BUF_SIZE))
+		/* Verify the request length is not bigger than the maximum
+		 * allocated request buffer size. In bigger, proceed to critical
+		 * error flow below with the SEP_REQUEST_INVALID_REQ_SIZE_ERR
+		 * error code. */
+		sep_req_error = DX_SEP_REQUEST_INVALID_REQ_SIZE_ERR;
+
+	if (likely(sep_req_error == DX_SEP_REQUEST_SUCCESS)) {
+		/* Signal the wake up event according to the LUT */
+		sep_req_state.agent_busy[agent_id] = true;
+		wake_up_interruptible(&sep_req_state.agent_event[agent_id]);
+	} else {
+		/* Critical error flow */
+
+		/* Build the new GPR3 value out of the req_counter from the
+		 * state, the error condition and zero response length value */
+		gpr_val = 0;
+		SEP_REQUEST_SET_COUNTER(gpr_val, sep_req_state.req_counter);
+		SEP_REQUEST_SET_RESP_LEN(gpr_val, 0);
+		SEP_REQUEST_SET_RETURN_CODE(gpr_val, sep_req_error);
+		WRITE_REGISTER(drvdata->cc_base +
+			       HOST_SEP_GPR_REG_OFFSET(DX_SEP_REQUEST_GPR_IDX),
+			       gpr_val);
+	}
+}
+
+/**
+ * dx_sep_req_register_agent() - Regsiter an agent
+ * @agent_id: The agent ID
+ * @max_buf_size: A pointer to the max buffer size
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_register_agent(u8 agent_id, u32 *max_buf_size)
+{
+	pr_debug("Regsiter SeP Request agent (id=%d)\n", agent_id);
+
+	/* Validate agent ID is in range */
+	if (agent_id >= DX_SEP_REQUEST_MAX_AGENTS) {
+		pr_err("Invalid agent ID\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP Req Agent ID is not valid */
+	if (sep_req_state.agent_valid[agent_id] == true) {
+		pr_err("Agent already registered\n");
+		return -EINVAL;
+	}
+
+	/* Verify max_buf_size pointer is not NULL */
+	if (max_buf_size == NULL) {
+		pr_err("max_buf_size is NULL\n");
+		return -EINVAL;
+	}
+
+	/* Set "agent_valid" field to TRUE */
+	sep_req_state.agent_valid[agent_id] = true;
+
+	/* Return the request/response max buffer size */
+	*max_buf_size = DX_SEP_REQUEST_BUF_SIZE;
+
+	return 0;
+}
+EXPORT_SYMBOL(dx_sep_req_register_agent);
+
+/**
+ * dx_sep_req_unregister_agent() - Unregsiter an agent
+ * @agent_id: The agent ID
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_unregister_agent(u8 agent_id)
+{
+	pr_debug("Unregsiter SeP Request agent (id=%d)\n", agent_id);
+
+	/* Validate agent ID is in range */
+	if (agent_id >= DX_SEP_REQUEST_MAX_AGENTS) {
+		pr_err("Invalid agent ID\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP Req Agent ID is valid */
+	if (sep_req_state.agent_valid[agent_id] == false) {
+		pr_err("Agent not registered\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP agent is not busy */
+	if (sep_req_state.agent_busy[agent_id] == true) {
+		pr_err("Agent is busy\n");
+		return -EBUSY;
+	}
+
+	/* Set "agent_valid" field to FALSE */
+	sep_req_state.agent_valid[agent_id] = false;
+
+	return 0;
+}
+EXPORT_SYMBOL(dx_sep_req_unregister_agent);
+
+/**
+ * dx_sep_req_wait_for_request() - Wait from an incoming sep request
+ * @agent_id: The agent ID
+ * @sep_req_buf_p: Pointer to the incoming request buffer
+ * @req_buf_size: Pointer to the incoming request size
+ * @timeout: Time to wait for an incoming request in jiffies
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_wait_for_request(u8 agent_id, u8 *sep_req_buf_p,
+				u32 *req_buf_size, u32 timeout)
+{
+	u32 gpr_val;
+
+	pr_debug("Wait for sep request\n");
+	pr_debug("agent_id=%d sep_req_buf_p=0x%p timeout=%d\n",
+		 agent_id, sep_req_buf_p, timeout);
+
+	/* Validate agent ID is in range */
+	if (agent_id >= DX_SEP_REQUEST_MAX_AGENTS) {
+		pr_err("Invalid agent ID\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP Req Agent ID is valid */
+	if (sep_req_state.agent_valid[agent_id] == false) {
+		pr_err("Agent not registered\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP agent is not busy */
+	if (sep_req_state.agent_busy[agent_id] == true) {
+		pr_err("Agent is busy\n");
+		return -EBUSY;
+	}
+
+	/* Verify that another sep request is not pending */
+	if (sep_req_state.request_pending == true) {
+		pr_err("Agent is busy\n");
+		return -EBUSY;
+	}
+
+	/* Verify sep_req_buf_p pointer is not NULL */
+	if (sep_req_buf_p == NULL) {
+		pr_err("sep_req_buf_p is NULL\n");
+		return -EINVAL;
+	}
+
+	/* Verify req_buf_size pointer is not NULL */
+	if (req_buf_size == NULL) {
+		pr_err("req_buf_size is NULL\n");
+		return -EINVAL;
+	}
+
+	/* Verify *req_buf_size is not zero and not bigger than the
+	 * allocated request buffer */
+	if ((*req_buf_size == 0) || (*req_buf_size > DX_SEP_REQUEST_BUF_SIZE)) {
+		pr_err("Invalid request buffer size\n");
+		return -EINVAL;
+	}
+
+	/* Wait for incoming request */
+	if (wait_event_interruptible_timeout(
+			sep_req_state.agent_event[agent_id],
+			sep_req_state.agent_busy[agent_id] == true,
+			timeout) == 0) {
+		/* operation timed-out */
+		sep_req_state.agent_busy[agent_id] = false;
+
+		pr_err("Request wait timed-out\n");
+		return -EAGAIN;
+	}
+
+	/* Set "request_pending" field to TRUE */
+	sep_req_state.request_pending = true;
+
+	gpr_val = READ_REGISTER(sep_req_state.sep_host_gpr_adr);
+
+	/* If the request length is bigger than the callers' specified
+	 * buffer size, the request is partially copied to the callers'
+	 * buffer (only the first relevant bytes). The caller will not be
+	 * indicated for an error condition in this case. The remaining bytes
+	 * in the callers' request buffer are left as is without clearing.
+	 * If the request length is smaller than the callers' specified buffer
+	 * size, the relevant bytes from the allocated kernel request buffer
+	 * are copied to the callers' request buffer */
+	memcpy(sep_req_buf_p, sep_req_state.sep_req_buf_p,
+	       MIN(*req_buf_size, SEP_REQUEST_GET_REQ_LEN(gpr_val)));
+
+	return 0;
+}
+EXPORT_SYMBOL(dx_sep_req_wait_for_request);
+
+/**
+ * dx_sep_req_send_response() - Send a response to the sep
+ * @agent_id: The agent ID
+ * @host_resp_buf_p: Pointer to the outgoing response buffer
+ * @resp_buf_size: Pointer to the outgoing response size
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_send_response(u8 agent_id, u8 *host_resp_buf_p,
+			     u32 resp_buf_size)
+{
+	u32 gpr_val;
+
+	pr_debug("Send host response\n");
+	pr_debug("agent_id=%d host_resp_buf_p=0x%p resp_buf_size=%d\n",
+		 agent_id, host_resp_buf_p, resp_buf_size);
+
+	/* Validate agent ID is in range */
+	if (agent_id >= DX_SEP_REQUEST_MAX_AGENTS) {
+		pr_err("Invalid agent ID\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP Req Agent ID is valid */
+	if (sep_req_state.agent_valid[agent_id] == false) {
+		pr_err("Agent not registered\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP agent is busy */
+	if (sep_req_state.agent_busy[agent_id] == false) {
+		pr_err("Agent is not busy\n");
+		return -EBUSY;
+	}
+
+	/* Verify that a sep request is pending */
+	if (sep_req_state.request_pending == false) {
+		pr_err("No requests are pending\n");
+		return -EBUSY;
+	}
+
+	/* Verify host_resp_buf_p pointer is not NULL */
+	if (host_resp_buf_p == NULL) {
+		pr_err("host_resp_buf_p is NULL\n");
+		return -EINVAL;
+	}
+
+	/* Verify resp_buf_size is not zero and not bigger than the allocated
+	 * request buffer */
+	if ((resp_buf_size == 0) || (resp_buf_size > DX_SEP_REQUEST_BUF_SIZE)) {
+		pr_err("Invalid response buffer size\n");
+		return -EINVAL;
+	}
+
+	/* The request is copied from the callers' buffer to the global
+	 * response buffer, only up to the callers' response length */
+	memcpy(sep_req_state.host_resp_buf_p, host_resp_buf_p, resp_buf_size);
+
+	/* Clear the request message buffer */
+	memset(sep_req_state.sep_req_buf_p, 0, DX_SEP_REQUEST_BUF_SIZE);
+
+	/* Clear the response message buffer for all remaining bytes
+	 * beyond the response buffer actual size */
+	memset(sep_req_state.host_resp_buf_p + resp_buf_size, 0,
+	       DX_SEP_REQUEST_BUF_SIZE - resp_buf_size);
+
+	/* Build the new GPR3 value out of the req_counter from the state,
+	 * the response length and the DX_SEP_REQUEST_SUCCESS return code.
+	 * Place the value in GPR3. */
+	gpr_val = 0;
+	SEP_REQUEST_SET_COUNTER(gpr_val, sep_req_state.req_counter);
+	SEP_REQUEST_SET_RESP_LEN(gpr_val, resp_buf_size);
+	SEP_REQUEST_SET_RETURN_CODE(gpr_val, DX_SEP_REQUEST_SUCCESS);
+	WRITE_REGISTER(sep_req_state.host_sep_gpr_adr, gpr_val);
+
+	/* Set "agent_busy" field to TRUE */
+	sep_req_state.agent_busy[agent_id] = false;
+
+	/* Set "request_pending" field to TRUE */
+	sep_req_state.request_pending = false;
+
+	return 0;
+}
+EXPORT_SYMBOL(dx_sep_req_send_response);
+
+/**
+ * dx_sep_req_get_sep_init_params() - Setup sep init params
+ * @sep_request_params: The sep init parameters array
+ */
+void dx_sep_req_get_sep_init_params(u32 *sep_request_params)
+{
+	sep_request_params[0] = (u32) sep_req_state.sep_req_buf_dma;
+	sep_request_params[1] = (u32) sep_req_state.host_resp_buf_dma;
+	sep_request_params[2] = DX_SEP_REQUEST_BUF_SIZE;
+}
+
+/**
+ * dx_sep_req_enable() - Enable the sep request interrupt handling
+ * @drvdata: Driver private data
+ */
+void dx_sep_req_enable(struct sep_drvdata *drvdata)
+{
+	/* clear pending interrupts in GPRs of SEP request
+	 * (leftovers from init writes to GPRs..) */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, ICR),
+		       SEP_HOST_GPR_IRQ_MASK(DX_SEP_REQUEST_GPR_IDX));
+
+	/* set IMR register */
+	drvdata->irq_mask |= SEP_HOST_GPR_IRQ_MASK(DX_SEP_REQUEST_GPR_IDX);
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, IMR),
+		       ~drvdata->irq_mask);
+}
+
+/**
+ * dx_sep_req_init() - Initialize the sep request state
+ * @drvdata: Driver private data
+ */
+int dx_sep_req_init(struct sep_drvdata *drvdata)
+{
+	int i;
+
+	pr_debug("Initialize SeP Request state\n");
+
+	sep_req_state.request_pending = false;
+	sep_req_state.req_counter = 0;
+
+	for (i = 0; i < DX_SEP_REQUEST_MAX_AGENTS; i++) {
+		sep_req_state.agent_valid[i] = false;
+		sep_req_state.agent_busy[i] = false;
+		init_waitqueue_head(&sep_req_state.agent_event[i]);
+	}
+
+	/* allocate coherent request buffer */
+	sep_req_state.sep_req_buf_p = dma_alloc_coherent(drvdata->dev,
+						 DX_SEP_REQUEST_BUF_SIZE,
+						 &sep_req_state.
+						 sep_req_buf_dma,
+						 GFP_KERNEL);
+	pr_debug("sep_req_buf_dma=0x%08X sep_req_buf_p=0x%p size=0x%08X\n",
+		      (u32)sep_req_state.sep_req_buf_dma,
+		      sep_req_state.sep_req_buf_p, DX_SEP_REQUEST_BUF_SIZE);
+
+	if (sep_req_state.sep_req_buf_p == NULL) {
+		pr_err("Unable to allocate coherent request buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Clear the request buffer */
+	memset(sep_req_state.sep_req_buf_p, 0, DX_SEP_REQUEST_BUF_SIZE);
+
+	/* allocate coherent response buffer */
+	sep_req_state.host_resp_buf_p = dma_alloc_coherent(drvdata->dev,
+						   DX_SEP_REQUEST_BUF_SIZE,
+						   &sep_req_state.
+						   host_resp_buf_dma,
+						   GFP_KERNEL);
+	pr_debug(
+		      "host_resp_buf_dma=0x%08X host_resp_buf_p=0x%p size=0x%08X\n",
+		      (u32)sep_req_state.host_resp_buf_dma,
+		      sep_req_state.host_resp_buf_p, DX_SEP_REQUEST_BUF_SIZE);
+
+	if (sep_req_state.host_resp_buf_p == NULL) {
+		pr_err("Unable to allocate coherent response buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Clear the response buffer */
+	memset(sep_req_state.host_resp_buf_p, 0, DX_SEP_REQUEST_BUF_SIZE);
+
+	/* Setup the GPR address */
+	sep_req_state.sep_host_gpr_adr = drvdata->cc_base +
+	    SEP_HOST_GPR_REG_OFFSET(DX_SEP_REQUEST_GPR_IDX);
+
+	sep_req_state.host_sep_gpr_adr = drvdata->cc_base +
+	    HOST_SEP_GPR_REG_OFFSET(DX_SEP_REQUEST_GPR_IDX);
+
+	return 0;
+}
+
+/**
+ * dx_sep_req_fini() - Finalize the sep request state
+ * @drvdata: Driver private data
+ */
+void dx_sep_req_fini(struct sep_drvdata *drvdata)
+{
+	int i;
+
+	pr_debug("Finalize SeP Request state\n");
+
+	sep_req_state.request_pending = false;
+	sep_req_state.req_counter = 0;
+	for (i = 0; i < DX_SEP_REQUEST_MAX_AGENTS; i++) {
+		sep_req_state.agent_valid[i] = false;
+		sep_req_state.agent_busy[i] = false;
+	}
+
+	dma_free_coherent(drvdata->dev, DX_SEP_REQUEST_BUF_SIZE,
+			  sep_req_state.sep_req_buf_p,
+			  sep_req_state.sep_req_buf_dma);
+
+	dma_free_coherent(drvdata->dev, DX_SEP_REQUEST_BUF_SIZE,
+			  sep_req_state.host_resp_buf_p,
+			  sep_req_state.host_resp_buf_dma);
+}
diff --git a/drivers/staging/sep54/sep_request_mgr.h b/drivers/staging/sep54/sep_request_mgr.h
new file mode 100644
index 0000000..6d79651
--- /dev/null
+++ b/drivers/staging/sep54/sep_request_mgr.h
@@ -0,0 +1,63 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_REQUEST_KERNEL_API_H_
+#define _SEP_REQUEST_KERNEL_API_H_
+
+#include <linux/types.h>
+#include "dx_driver.h"
+
+/**
+ * dx_sep_req_handler() - SeP request interrupt handler
+ * @drvdata: The driver private info
+ */
+void dx_sep_req_handler(struct sep_drvdata *drvdata);
+
+/**
+ * dx_sep_req_get_sep_init_params() - Setup sep init params
+ * @sep_request_params: The sep init parameters array
+ */
+void dx_sep_req_get_sep_init_params(u32 *sep_request_params);
+
+/**
+ * dx_sep_req_enable() - Enable the sep request interrupt handling
+ * @drvdata: Driver private data
+ */
+void dx_sep_req_enable(struct sep_drvdata *drvdata);
+
+/**
+ * dx_sep_req_init() - Initialize the sep request state
+ * @drvdata: Driver private data
+ */
+int dx_sep_req_init(struct sep_drvdata *drvdata);
+
+/**
+ * dx_sep_req_fini() - Finalize the sep request state
+ * @drvdata: Driver private data
+ */
+void dx_sep_req_fini(struct sep_drvdata *drvdata);
+
+#endif /*_SEP_REQUEST_KERNEL_API_H_*/
diff --git a/drivers/staging/sep54/sep_rpc.h b/drivers/staging/sep54/sep_rpc.h
new file mode 100644
index 0000000..3199a14
--- /dev/null
+++ b/drivers/staging/sep54/sep_rpc.h
@@ -0,0 +1,109 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef __SEP_RPC_H__
+#define __SEP_RPC_H__
+
+/* SeP RPC infrastructure API */
+#ifdef __KERNEL__
+#include <linux/types.h>
+
+#else
+#include "dx_pal_types.h"
+
+#endif /*__KERNEL__*/
+
+/* Maximum size of SeP RPC message in bytes */
+#define SEP_RPC_MAX_MSG_SIZE 8191
+#define SEP_RPC_MAX_WORKSPACE_SIZE 8191
+
+/* The maximum allowed user memory references per function
+   (CRYS requires only 2, but GPAPI/TEE needs up to 4) */
+#define SEP_RPC_MAX_MEMREF_PER_FUNC 4
+
+/* If this macro is not provided by the includer of this file,
+   the log message would be dropped */
+#ifndef SEP_RPC_LOG
+#define SEP_RPC_LOG(format, ...) do {} while (0)
+#endif
+
+#define SEP_RPC_ASSERT(cond, inval_param_retcode) {\
+	if (!(cond)) {\
+		SEP_RPC_LOG("SEP_RPC_ASSERT: %s\n", #cond);\
+		return inval_param_retcode;\
+	} \
+}
+
+/* NOTE:
+   All data must be in little (SeP) endian */
+
+enum seprpc_retcode {
+	SEPRPC_RET_OK = 0,
+	SEPRPC_RET_ERROR,	/*Generic error code (not one of the others) */
+	SEPRPC_RET_EINVAL_AGENT,	/* Unknown agent ID */
+	SEPRPC_RET_EINVAL_FUNC,	/* Unknown function ID for given agent */
+	SEPRPC_RET_EINVAL,	/* Invalid parameter */
+	SEPRPC_RET_ENORSC,	/* Not enough resources to complete request */
+	SEPRPC_RET_RESERVE32 = 0x7FFFFFFF	/* assure this enum is 32b */
+};
+
+enum seprpc_memref_type {
+	SEPRPC_MEMREF_NULL = 0,	/* Invalid memory reference */
+	SEPRPC_MEMREF_EMBED = 1,/* Data embedded in parameters message */
+	SEPRPC_MEMREF_DLLI = 2,
+	SEPRPC_MEMREF_MLLI = 3,
+	SEPRPC_MEMREF_MAX = SEPRPC_MEMREF_MLLI,
+	SEPRPC_MEMREF_RESERVE32 = 0x7FFFFFFF	/* assure this enum is 32b */
+};
+
+#pragma pack(push)
+#pragma pack(4)
+/* A strcuture to pass host memory reference */
+struct seprpc_memref {
+	enum seprpc_memref_type ref_type;
+	u32 location;
+	u32 size;
+	u32 count;
+	/* SEPRPC_MEMREF_EMBED: location= offset in SepRpc_Params .
+	 * size= data size in bytes. count= N/A */
+	/* SEPRPC_MEMREF_DLLI: location= DMA address of data in host memory.
+	 * size= data size in bytes. count= N/A. */
+	/* SEPRPC_MEMREF_MLLI: location= DMA address of first MLLI table.
+	 * size= size in bytes of first table.
+	 * count= Num. of MLLI tables. */
+};
+
+struct seprpc_params {
+	u32 num_of_memrefs;/* Number of elements in the memRef array */
+	struct seprpc_memref memref[1];
+	/* This array is actually in the size of numOfMemRefs
+	 * (i.e., it is just a placeholder that may be void) */
+	/*   Following this array comes the function-specific parameters */
+} __attribute__ ((__may_alias__));
+
+#pragma pack(pop)
+
+#endif /*__SEP_RPC_H__*/
diff --git a/drivers/staging/sep54/sep_sram_map.h b/drivers/staging/sep54/sep_sram_map.h
new file mode 100644
index 0000000..2c7db45
--- /dev/null
+++ b/drivers/staging/sep54/sep_sram_map.h
@@ -0,0 +1,43 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+
+/* This file contains the definitions of the OTP data that the SEP copies
+into the SRAM during the first boot process */
+
+
+#ifndef _SEP_SRAM_MAP_
+#define _SEP_SRAM_MAP_
+
+#define DX_FIRST_OEM_KEY_OFFSET_IN_SRAM         0x0
+#define DX_SECOND_OEM_KEY_OFFSET_IN_SRAM        0x4
+#define DX_THIRD_OEM_KEY_OFFSET_IN_SRAM         0x8
+#define DX_LCS_OFFSET_IN_SRAM                   0xC
+#define DX_MISC_OFFSET_IN_SRAM                  0xD
+#define DX_CC_INIT_MSG_OFFSET_IN_SRAM		0x100
+#define DX_PKA_MEMORY_OFFSET_IN_SRAM		0x200
+
+#endif /*_GEN_SRAM_MAP_*/
diff --git a/drivers/staging/sep54/sep_sw_desc.h b/drivers/staging/sep54/sep_sw_desc.h
new file mode 100644
index 0000000..8538c4f
--- /dev/null
+++ b/drivers/staging/sep54/sep_sw_desc.h
@@ -0,0 +1,468 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_SW_DESC_H_
+#define _SEP_SW_DESC_H_
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+#include "dx_bitops.h"
+#include "sep_rpc.h"
+
+/* Common descriptor fields access (type independent) */
+/* To be used with fields: TYPE, RET_CODE, COOKIE     */
+#define SEP_SW_DESC_GET(desc_p, desc_field) BITFIELD_GET(                     \
+	((u32 *)(desc_p))[SEP_SW_DESC_ ## desc_field ## _WORD_OFFSET],   \
+	SEP_SW_DESC_ ## desc_field ## _BIT_OFFSET,			      \
+	SEP_SW_DESC_ ## desc_field ## _BIT_SIZE)
+#define SEP_SW_DESC_SET(desc_p, desc_field, new_val) BITFIELD_SET(            \
+	((u32 *)(desc_p))[SEP_SW_DESC_ ## desc_field ## _WORD_OFFSET],   \
+	SEP_SW_DESC_ ## desc_field ## _BIT_OFFSET,			      \
+	SEP_SW_DESC_ ## desc_field ## _BIT_SIZE,			      \
+	new_val)
+
+/* Type specific descriptor fields access */
+#define SEP_SW_DESC_GET4TYPE(desc_p, desc_type, desc_field) BITFIELD_GET(     \
+	((u32 *)(desc_p))                                                \
+	[SEP_SW_DESC_ ## desc_type ## _ ## desc_field ## _WORD_OFFSET],	      \
+	SEP_SW_DESC_ ## desc_type ## _ ## desc_field ##  _BIT_OFFSET,	      \
+	SEP_SW_DESC_ ## desc_type ## _ ## desc_field ## _BIT_SIZE)
+#define SEP_SW_DESC_SET4TYPE(desc_p, desc_type, desc_field, new_val)          \
+	BITFIELD_SET(							      \
+	((u32 *)(desc_p))                                                \
+	[SEP_SW_DESC_ ## desc_type ## _ ## desc_field ## _WORD_OFFSET],       \
+	SEP_SW_DESC_ ## desc_type ## _ ## desc_field ##  _BIT_OFFSET,	      \
+	SEP_SW_DESC_ ## desc_type ## _ ## desc_field ## _BIT_SIZE, new_val)
+
+#define SEP_SW_DESC_INIT(desc_p) \
+	memset(desc_p, 0, SEP_SW_DESC_WORD_SIZE * sizeof(u32))
+
+/* Total descriptor size in 32b words */
+#define SEP_SW_DESC_WORD_SIZE 8
+#define SEP_SW_DESC_WORD_SIZE_LOG 3
+
+/***********************************/
+/* Common bit fields definitions   */
+/***********************************/
+ /* Descriptor type: TYPE */
+#define SEP_SW_DESC_TYPE_WORD_OFFSET 0
+#define SEP_SW_DESC_TYPE_BIT_OFFSET 0
+#define SEP_SW_DESC_TYPE_BIT_SIZE 4
+/* Descriptor type encoding */
+enum sep_sw_desc_type {
+	SEP_SW_DESC_TYPE_NULL = 0,
+	SEP_SW_DESC_TYPE_CRYPTO_OP = 0x1,
+	SEP_SW_DESC_TYPE_RPC_MSG = 0x2,
+	SEP_SW_DESC_TYPE_APP_REQ = 0x3,
+	SEP_SW_DESC_TYPE_LOAD_OP = 0x4,
+	SEP_SW_DESC_TYPE_COMBINED_OP = 0x5,
+	SEP_SW_DESC_TYPE_SLEEP_REQ = 0x6,
+	SEP_SW_DESC_TYPE_DEBUG = 0xF
+	    /* Only 4 bits - do not extend to 32b */
+};
+
+enum sep_sw_desc_retcode {
+	SEP_SW_DESC_RET_OK = 0,
+	SEP_SW_DESC_RET_EINVAL_DESC_TYPE	/* Invalid descriptor type */
+};
+
+/* Return code: RET_CODE */
+#define SEP_SW_DESC_RET_CODE_WORD_OFFSET 6
+#define SEP_SW_DESC_RET_CODE_BIT_OFFSET 0
+#define SEP_SW_DESC_RET_CODE_BIT_SIZE 32
+
+/* Descriptor cookie: COOKIE */
+#define SEP_SW_DESC_COOKIE_WORD_OFFSET 7
+#define SEP_SW_DESC_COOKIE_BIT_OFFSET 0
+#define SEP_SW_DESC_COOKIE_BIT_SIZE 32
+
+/****************************************/
+/* Crypto-Op descriptor type: CRYPTO_OP */
+/****************************************/
+
+/* L bit: Load cache: L */
+#define SEP_SW_DESC_CRYPTO_OP_L_WORD_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_L_BIT_OFFSET 31
+#define SEP_SW_DESC_CRYPTO_OP_L_BIT_SIZE 1
+
+/* I bit: Initialize context: I */
+#define SEP_SW_DESC_CRYPTO_OP_I_WORD_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_I_BIT_OFFSET 30
+#define SEP_SW_DESC_CRYPTO_OP_I_BIT_SIZE 1
+
+/* Process mode: PROC_MODE */
+#define SEP_SW_DESC_CRYPTO_OP_PROC_MODE_WORD_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_PROC_MODE_BIT_OFFSET 28
+#define SEP_SW_DESC_CRYPTO_OP_PROC_MODE_BIT_SIZE 2
+/* Process mode field options */
+enum sep_proc_mode {
+	SEP_PROC_MODE_NOP = 0,
+	SEP_PROC_MODE_PROC_T = 1,	/* Process (Text data) */
+	SEP_PROC_MODE_FIN = 2,	/* Finalize (optional: with text data) */
+	SEP_PROC_MODE_PROC_A = 3	/* Process (Additional/Auth. data) */
+	    /* Only 2b - do not extend to 32b */
+};
+
+/* SeP/FW Cache Index: FW_CACHE_IDX */
+#define SEP_SW_DESC_CRYPTO_OP_FW_CACHE_IDX_WORD_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_FW_CACHE_IDX_BIT_OFFSET 4
+#define SEP_SW_DESC_CRYPTO_OP_FW_CACHE_IDX_BIT_SIZE 8
+
+/* HCB address: HCB_ADDR */
+#define SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_WORD_OFFSET 3
+#define SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_BIT_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_BIT_SIZE 32
+
+/* IFT: IFT_ADDR, IFT_SIZE, IFT_NUM */
+#define SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_WORD_OFFSET 1
+#define SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_BIT_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_BIT_SIZE 32
+#define SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_WORD_OFFSET 4
+#define SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_BIT_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_BIT_SIZE 16
+#define SEP_SW_DESC_CRYPTO_OP_IFT_NUM_WORD_OFFSET 5
+#define SEP_SW_DESC_CRYPTO_OP_IFT_NUM_BIT_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_IFT_NUM_BIT_SIZE 16
+
+/* OFT: OFT_ADDR, OFT_SIZE, OFT_NUM */
+#define SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_WORD_OFFSET 2
+#define SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_BIT_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_BIT_SIZE 32
+#define SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_WORD_OFFSET 4
+#define SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_BIT_OFFSET 16
+#define SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_BIT_SIZE 16
+#define SEP_SW_DESC_CRYPTO_OP_OFT_NUM_WORD_OFFSET 5
+#define SEP_SW_DESC_CRYPTO_OP_OFT_NUM_BIT_OFFSET 16
+#define SEP_SW_DESC_CRYPTO_OP_OFT_NUM_BIT_SIZE 16
+
+/********************************************/
+/* Combined-Op descriptor type: COMBINED_OP */
+/********************************************/
+
+/* L bit: Load cache: L */
+#define SEP_SW_DESC_COMBINED_OP_L_WORD_OFFSET \
+	SEP_SW_DESC_CRYPTO_OP_L_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_L_BIT_OFFSET SEP_SW_DESC_CRYPTO_OP_L_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_L_BIT_SIZE SEP_SW_DESC_CRYPTO_OP_L_BIT_SIZE
+
+/* I bit: Initialize context: I */
+#define SEP_SW_DESC_COMBINED_OP_I_WORD_OFFSET \
+	SEP_SW_DESC_CRYPTO_OP_I_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_I_BIT_OFFSET SEP_SW_DESC_CRYPTO_OP_I_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_I_BIT_SIZE SEP_SW_DESC_CRYPTO_OP_I_BIT_SIZE
+
+/* Process mode: PROC_MODE */
+#define SEP_SW_DESC_COMBINED_OP_PROC_MODE_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_PROC_MODE_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_PROC_MODE_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_PROC_MODE_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_PROC_MODE_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_PROC_MODE_BIT_SIZE
+
+/* Configuration scheme: CONFIG_SCHEME */
+#define SEP_SW_DESC_COMBINED_OP_CONFIG_SCHEME_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_CONFIG_SCHEME_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_CONFIG_SCHEME_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_BIT_SIZE
+
+/* IFT: IFT_ADDR, IFT_SIZE, IFT_NUM */
+#define SEP_SW_DESC_COMBINED_OP_IFT_ADDR_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_ADDR_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_ADDR_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_BIT_SIZE
+#define SEP_SW_DESC_COMBINED_OP_IFT_SIZE_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_SIZE_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_SIZE_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_BIT_SIZE
+#define SEP_SW_DESC_COMBINED_OP_IFT_NUM_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_NUM_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_NUM_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_NUM_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_NUM_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_IFT_NUM_BIT_SIZE
+
+/* OFT: OFT_ADDR, OFT_SIZE, OFT_NUM */
+#define SEP_SW_DESC_COMBINED_OP_OFT_ADDR_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_ADDR_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_ADDR_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_BIT_SIZE
+#define SEP_SW_DESC_COMBINED_OP_OFT_SIZE_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_SIZE_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_SIZE_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_BIT_SIZE
+#define SEP_SW_DESC_COMBINED_OP_OFT_NUM_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_NUM_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_NUM_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_NUM_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_NUM_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_OFT_NUM_BIT_SIZE
+
+/* combined scheme macros:
+   these set of macros meant for configuration scheme encoding
+   from the user level interface to the SEP combined driver.
+*/
+#define SEP_ENGINE_TYPE_BIT_SHIFT 0
+#define SEP_ENGINE_TYPE_BIT_SIZE 4
+#define SEP_ENGINE_SRC_BIT_SHIFT 4
+#define SEP_ENGINE_SRC_BIT_SIZE 4
+#define SEP_ENGINE_SLOT_BIT_SIZE \
+		(SEP_ENGINE_SRC_BIT_SIZE + SEP_ENGINE_TYPE_BIT_SIZE)
+
+/******************************* MACROS ***********************************/
+#define _sep_comb_eng_pack_item(eng_src, eng_type) \
+		(((eng_src) << SEP_ENGINE_SRC_BIT_SIZE) | \
+		 ((eng_type) << SEP_ENGINE_TYPE_BIT_SHIFT))
+
+#define _sep_comb_eng_pack_n_shift(src, type, slot) \
+		(_sep_comb_eng_pack_item(src, type) << \
+		(slot * SEP_ENGINE_SLOT_BIT_SIZE))
+
+#define sep_comb_eng_props_set(cfg_p, eng_idx, eng_src, eng_type) do { \
+	BITFIELD_SET(*cfg_p, \
+		(eng_idx * SEP_ENGINE_SLOT_BIT_SIZE), \
+		SEP_ENGINE_SLOT_BIT_SIZE, 0); \
+	BITFIELD_SET(*cfg_p, \
+		(eng_idx * SEP_ENGINE_SLOT_BIT_SIZE), \
+		SEP_ENGINE_SLOT_BIT_SIZE, \
+		_sep_comb_eng_pack_item(eng_src, eng_type)); \
+} while (0)
+
+#define sep_comb_eng_props_get(cfg_p, eng_idx, eng_src, eng_type) do { \
+	*(eng_type) = BITFIELD_GET(*cfg_p, \
+				(eng_idx * SEP_ENGINE_SLOT_BIT_SIZE),\
+				SEP_ENGINE_TYPE_BIT_SIZE); \
+	*(eng_src) = BITFIELD_GET(*cfg_p, \
+				(eng_idx * SEP_ENGINE_SLOT_BIT_SIZE) \
+				+ SEP_ENGINE_TYPE_BIT_SIZE, \
+				SEP_ENGINE_SRC_BIT_SIZE); \
+} while (0)
+
+/******************************************/
+/* Message-Op descriptor type: RPC_MSG    */
+/******************************************/
+
+/* Agent ID: AGENT_ID */
+#define SEP_SW_DESC_RPC_MSG_AGENT_ID_WORD_OFFSET 1
+#define SEP_SW_DESC_RPC_MSG_AGENT_ID_BIT_OFFSET 0
+#define SEP_SW_DESC_RPC_MSG_AGENT_ID_BIT_SIZE 8
+
+/* Function ID: FUNC_ID */
+#define SEP_SW_DESC_RPC_MSG_FUNC_ID_WORD_OFFSET 1
+#define SEP_SW_DESC_RPC_MSG_FUNC_ID_BIT_OFFSET 16
+#define SEP_SW_DESC_RPC_MSG_FUNC_ID_BIT_SIZE 16
+
+/* HMB: HMB_ADDR , HMB_SIZE */
+#define SEP_SW_DESC_RPC_MSG_HMB_ADDR_WORD_OFFSET 2
+#define SEP_SW_DESC_RPC_MSG_HMB_ADDR_BIT_OFFSET 0
+#define SEP_SW_DESC_RPC_MSG_HMB_ADDR_BIT_SIZE 32
+#define SEP_SW_DESC_RPC_MSG_HMB_SIZE_WORD_OFFSET 3
+#define SEP_SW_DESC_RPC_MSG_HMB_SIZE_BIT_OFFSET 0
+#define SEP_SW_DESC_RPC_MSG_HMB_SIZE_BIT_SIZE 13
+
+/************************************************/
+/* SeP Applet Request descriptor type: APP_REQ  */
+/************************************************/
+
+/* Request Type: REQ_TYPE */
+#define SEP_SW_DESC_APP_REQ_REQ_TYPE_WORD_OFFSET 0
+#define SEP_SW_DESC_APP_REQ_REQ_TYPE_BIT_OFFSET 4
+#define SEP_SW_DESC_APP_REQ_REQ_TYPE_BIT_SIZE 2
+
+/* Session ID: SESSION_ID */
+#define SEP_SW_DESC_APP_REQ_SESSION_ID_WORD_OFFSET 0
+#define SEP_SW_DESC_APP_REQ_SESSION_ID_BIT_OFFSET 16
+#define SEP_SW_DESC_APP_REQ_SESSION_ID_BIT_SIZE 12
+
+/* Internal error: INTERNAL_ERR */
+#define SEP_SW_DESC_APP_REQ_INTERNAL_ERR_WORD_OFFSET 0
+#define SEP_SW_DESC_APP_REQ_INTERNAL_ERR_BIT_OFFSET 31
+#define SEP_SW_DESC_APP_REQ_INTERNAL_ERR_BIT_SIZE 1
+
+/* In-Params. Buffer Address: IN_PARAMS_ADDR */
+#define SEP_SW_DESC_APP_REQ_IN_PARAMS_ADDR_WORD_OFFSET 1
+#define SEP_SW_DESC_APP_REQ_IN_PARAMS_ADDR_BIT_OFFSET 0
+#define SEP_SW_DESC_APP_REQ_IN_PARAMS_ADDR_BIT_SIZE 32
+
+/* Return codes for APP_REQ descriptor */
+enum sepapp_retcode {
+	SEPAPP_RET_OK = 0,
+	SEPAPP_RET_EINVAL_QUEUE,	/* Request sent on the wrong SW queue */
+	SEPAPP_RET_EINVAL,	/* Invalid parameters in descriptor */
+};
+
+/* REQ_TYPE field encoding */
+enum sepapp_req_type {
+	SEPAPP_REQ_TYPE_SESSION_OPEN = 1,
+	SEPAPP_REQ_TYPE_SESSION_CLOSE = 2,
+	SEPAPP_REQ_TYPE_COMMAND_INVOKE = 3
+};
+
+/** in-params. data types **/
+
+#define SEPAPP_UUID_SIZE 16
+#define SEPAPP_MAX_PARAMS 4
+#define SEPAPP_MAX_AUTH_DATA_SIZE 16	/* For Application UUID case */
+
+enum sepapp_param_type {
+	SEPAPP_PARAM_TYPE_NULL = 0,
+	SEPAPP_PARAM_TYPE_VAL = 1,
+	SEPAPP_PARAM_TYPE_MEMREF = 2
+};
+
+enum sepapp_data_direction {
+	SEPAPP_DIR_NULL = 0,
+	SEPAPP_DIR_IN = 1,
+	SEPAPP_DIR_OUT = (1 << 1),
+	SEPAPP_DIR_INOUT = SEPAPP_DIR_IN | SEPAPP_DIR_OUT
+};
+
+/* Descriptor for "by value" parameter */
+struct sepapp_val_param {
+	/*enum sepapp_data_direction */ u8 dir;
+	u32 data[2];
+};
+
+/* Depends on seprpc data type defined in sep_rpc.h */
+union sepapp_client_param {
+	struct sepapp_val_param val;
+	struct seprpc_memref memref;
+};
+
+struct sepapp_client_params {
+	u8 params_types[SEPAPP_MAX_PARAMS];
+	union sepapp_client_param params[SEPAPP_MAX_PARAMS];
+};
+
+/* In-params. for SESSION_OPEN request type */
+struct sepapp_in_params_session_open {
+	u8 app_uuid[SEPAPP_UUID_SIZE];
+	u32 auth_method;
+	u8 auth_data[SEPAPP_MAX_AUTH_DATA_SIZE];
+	struct sepapp_client_params client_params;
+};
+
+struct sepapp_in_params_command_invoke {
+	u32 command_id;
+	struct sepapp_client_params client_params;
+};
+
+/* Return codes for SLEEP_REQ descriptor */
+enum sepslp_mode_req_retcode {
+	SEPSLP_MODE_REQ_RET_OK = 0,
+	SEPSLP_MODE_REQ_EGEN,	/* general error */
+	SEPSLP_MODE_REQ_EINVAL_QUEUE,	/* Request sent on the wrong SW queue */
+	SEPSLP_MODE_REQ_EBUSY,/* Request sent while desc. queue is not empty */
+	SEPSLP_MODE_REQ_EABORT	/* Applet requested aborting this request */
+};
+
+/****************************************/
+/* Load-Op descriptor type: LOAD_OP */
+/****************************************/
+
+/* SeP/FW Cache Index: FW_CACHE_IDX */
+#define SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_WORD_OFFSET 1
+#define SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_OFFSET(slot) ((slot) * 8)
+#define SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_SIZE 8
+
+/* HCB address: HCB_ADDR */
+#define SEP_SW_DESC_LOAD_OP_HCB_ADDR_WORD_OFFSET(slot) ((slot) + 2)
+#define SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_OFFSET 1
+#define SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_SIZE 31
+
+/* L bit: Load cache: L */
+#define SEP_SW_DESC_LOAD_OP_L_WORD_OFFSET(slot) ((slot) + 2)
+#define SEP_SW_DESC_LOAD_OP_L_BIT_OFFSET 0
+#define SEP_SW_DESC_LOAD_OP_L_BIT_SIZE 1
+
+/*****************************/
+/*** Descriptor copy flows ***/
+/*****************************/
+/* Copy host descriptor scratchpad to descriptor queue buffer */
+#ifdef __BIG_ENDIAN
+
+/* Verify descriptor copy flows assumptions at compile time:
+   assumes "retcode" and "cookie" are the last words */
+#if (SEP_SW_DESC_RET_CODE_WORD_OFFSET != 6)
+#error SW_DESC_RET_CODE location assumption is broken!
+#endif
+#if (SEP_SW_DESC_COOKIE_WORD_OFFSET != 7)
+#error SW_DESC_COOKIE location assumption is broken!
+#endif
+
+#define SEP_SW_DESC_COPY_TO_SEP(queue_desc_p, spad_desc_p) do {	               \
+	u32 *cur_q_desc_word_p = (u32 *)queue_desc_p;                \
+	u32 *cur_spad_desc_word_p = (u32 *)spad_desc_p;              \
+	int i;	                                                               \
+	/* First 6 words are input data to SeP-FW. Must be in SeP endianess:*/ \
+	/* Copy 7th word too in order to init./clear retcode field	    */ \
+	for (i = 0; i <= SEP_SW_DESC_RET_CODE_WORD_OFFSET; i++) {              \
+		*cur_q_desc_word_p = cpu_to_le32(*cur_spad_desc_word_p);       \
+		cur_spad_desc_word_p++;                                        \
+		cur_q_desc_word_p++;                                           \
+	}                                                                      \
+	/* Word 8 is the cookie which is referenced only by the host */        \
+	/* No need to swap endianess */                                        \
+	*cur_q_desc_word_p = *cur_spad_desc_word_p;                            \
+} while (0)
+
+/* and vice-versa */
+#define SEP_SW_DESC_COPY_FROM_SEP(spad_desc_p, queue_desc_p) do {              \
+	u32 *cur_q_desc_word_p = (u32 *)queue_desc_p;                \
+	u32 *cur_spad_desc_word_p = (u32 *)spad_desc_p;              \
+	int i;	                                                               \
+	/* First 6 words are input data to SeP-FW in SeP endianess:*/          \
+	/* Copy 7th word too in order to get retcode field	   */          \
+	for (i = 0; i <= SEP_SW_DESC_RET_CODE_WORD_OFFSET; i++) {              \
+		*cur_spad_desc_word_p = le32_to_cpu(*cur_q_desc_word_p);       \
+		cur_spad_desc_word_p++;                                        \
+		cur_q_desc_word_p++;                                           \
+	}                                                                      \
+	/* Word 8 is the cookie which is referenced only by the host */        \
+	/* No need to swap endianess */                                        \
+	*cur_spad_desc_word_p = *cur_q_desc_word_p;                            \
+} while (0)
+
+#else				/* __LITTLE_ENDIAN - simple memcpy */
+#define SEP_SW_DESC_COPY_TO_SEP(queue_desc_p, spad_desc_p)                     \
+	memcpy(queue_desc_p, spad_desc_p, SEP_SW_DESC_WORD_SIZE<<2)
+
+#define SEP_SW_DESC_COPY_FROM_SEP(spad_desc_p, queue_desc_p)                 \
+	memcpy(spad_desc_p, queue_desc_p, SEP_SW_DESC_WORD_SIZE<<2)
+#endif
+
+#endif /*_SEP_SW_DESC_H_*/
diff --git a/drivers/staging/sep54/sep_sysfs.c b/drivers/staging/sep54/sep_sysfs.c
new file mode 100644
index 0000000..9e6448b
--- /dev/null
+++ b/drivers/staging/sep54/sep_sysfs.c
@@ -0,0 +1,468 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SYSFS
+
+#include "dx_driver.h"
+#include "dx_driver_abi.h"
+#include "desc_mgr.h"
+#include "sep_log.h"
+#include "sep_sysfs.h"
+
+#define MAX_QUEUE_NAME_LEN 50
+
+struct sep_stats {
+	spinlock_t stat_lock;
+	unsigned long samples_cnt;	/* Total number of samples */
+	unsigned long long accu_time;/* Accum. samples time (for avg. calc.) */
+	unsigned long long min_time;
+	unsigned long long max_time;
+	/* all times in nano-sec. */
+};
+
+#define DESC_TYPE_NUM (1<<SEP_SW_DESC_TYPE_BIT_SIZE)
+static const char *desc_names[DESC_TYPE_NUM] = {
+	"NULL",			/*SEP_SW_DESC_TYPE_NULL */
+	"CRYPTO",		/*SEP_SW_DESC_TYPE_CRYPTO_OP */
+	"MSG",			/*SEP_SW_DESC_TYPE_MSG_OP */
+	/* Next 12 types are invalid */
+	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+	"DEBUG"			/*SEP_SW_DESC_TYPE_DEBUG */
+};
+
+struct kobj_attribute *queue_size[SEP_MAX_NUM_OF_DESC_Q];
+struct attribute *queue_attrs[SEP_MAX_NUM_OF_DESC_Q];
+
+struct sep_stats drv_lat_stats[SEP_MAX_NUM_OF_DESC_Q][DXDI_IOC_NR_MAX + 1];
+struct sep_stats sep_lat_stats[SEP_MAX_NUM_OF_DESC_Q][DESC_TYPE_NUM];
+
+/*
+ * Structure used to create a directory and its attributes in sysfs
+ */
+struct sys_dir {
+	struct kobject *sys_dir_kobj;
+	struct attribute_group sys_dir_attr_group;
+	struct attribute **sys_dir_attr_list;
+	int num_of_attrs;
+	struct sep_drvdata *drvdata;	/* Associated driver context */
+};
+
+/* directory initialization*/
+static int sys_init_dir(struct sys_dir *sys_dir, struct sep_drvdata *drvdata,
+			struct kobject *parent_dir_kobj,
+			const char *dir_name,
+			struct kobj_attribute *attrs, int num_of_attrs);
+
+/* directory deinitialization */
+static void sys_free_dir(struct sys_dir *sys_dir);
+
+/* top level directory structure */
+struct sys_dir sys_top_dir;
+
+/* queue level directory structures array */
+struct sys_dir sys_queue_dirs[SEP_MAX_NUM_OF_DESC_Q];
+
+/**************************************
+ * Statistics functions section       *
+ **************************************/
+
+static void update_stats(struct sep_stats *stats,
+			 unsigned long long start_ns, unsigned long long end_ns)
+{
+	unsigned long long delta;
+	unsigned long flags;
+
+	spin_lock_irqsave(&(stats->stat_lock), flags);
+
+	delta = end_ns - start_ns;
+	stats->samples_cnt++;
+	stats->accu_time += delta;
+	stats->min_time = min(delta, stats->min_time);
+	stats->max_time = max(delta, stats->max_time);
+
+	spin_unlock_irqrestore(&(stats->stat_lock), flags);
+}
+
+void sysfs_update_drv_stats(unsigned int qid, unsigned int ioctl_cmd_type,
+			    unsigned long long start_ns,
+			    unsigned long long end_ns)
+{
+	if ((qid >= SEP_MAX_NUM_OF_DESC_Q) ||
+	    (ioctl_cmd_type > DXDI_IOC_NR_MAX)) {
+		pr_err("IDs out of range: qid=%d , ioctl_cmd=%d\n",
+			    qid, ioctl_cmd_type);
+		return;
+	}
+
+	update_stats(&(drv_lat_stats[qid][ioctl_cmd_type]), start_ns, end_ns);
+}
+
+void sysfs_update_sep_stats(unsigned int qid, enum sep_sw_desc_type desc_type,
+			    unsigned long long start_ns,
+			    unsigned long long end_ns)
+{
+	if ((qid >= SEP_MAX_NUM_OF_DESC_Q) || (desc_type >= DESC_TYPE_NUM)) {
+		pr_err("IDs out of range: qid=%d , descriptor_type=%d\n",
+			    qid, desc_type);
+		return;
+	}
+	update_stats(&(sep_lat_stats[qid][desc_type]), start_ns, end_ns);
+}
+
+/* compute queue number based by kobject passed to attribute show function */
+static int sys_get_queue_num(struct kobject *kobj, struct sys_dir *dirs)
+{
+	int i;
+
+	for (i = 0; i < SEP_MAX_NUM_OF_DESC_Q; ++i) {
+		if (dirs[i].sys_dir_kobj == kobj)
+			break;
+	}
+
+	return i;
+}
+
+static struct sep_drvdata *sys_get_drvdata(struct kobject *kobj)
+{
+	/* TODO: supporting multiple SeP devices would require avoiding
+	 * global "top_dir" and finding associated "top_dir" by traversing
+	 * up the tree to the kobject which matches one of the top_dir's */
+	return sys_top_dir.drvdata;
+}
+
+/**************************************
+ * Attributes show functions section  *
+ **************************************/
+
+static ssize_t sys_fw_ver_show(struct kobject *kobj,
+			       struct kobj_attribute *attr, char *buf)
+{
+	struct sep_drvdata *drvdata = sys_get_drvdata(kobj);
+	return sprintf(buf,
+		       "ROM_VER=0x%08X\nFW_VER=0x%08X\n",
+		       drvdata->rom_ver, drvdata->fw_ver);
+}
+
+static ssize_t sys_queue_size_show(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *buf)
+{
+
+	return sprintf(buf, "<not supported>\n");
+}
+
+static ssize_t sys_queue_dump_show(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *buf)
+{
+#ifdef DESCQ_DUMP_SUPPORT
+	int i;
+
+	i = sys_get_queue_num(kobj, (struct sys_dir *)&sys_queue_dirs);
+#endif
+
+	return sprintf(buf, "DescQ dump not supported, yet.\n");
+}
+
+/* time from write to read is measured */
+static ssize_t sys_queue_stats_drv_lat_show(struct kobject *kobj,
+					    struct kobj_attribute *attr,
+					    char *buf)
+{
+	u64 min_usec, max_usec, avg_usec;
+	int qid, i;
+	char *cur_buf_pos = buf;
+
+	qid = sys_get_queue_num(kobj, (struct sys_dir *)&sys_queue_dirs);
+
+	cur_buf_pos += sprintf(cur_buf_pos,
+			       "ioctl#\tmin[us]\tavg[us]\tmax[us]\t#samples\n");
+
+	if (qid >= SEP_MAX_NUM_OF_DESC_Q) {
+		pr_err("ID out of range: qid=%d\n", qid);
+		return 0;
+	}
+
+	for (i = 0; i < DXDI_IOC_NR_MAX + 1; i++) {
+		/* Because we are doing 64 bit (long long) division we
+		 * need to explicitly invoke do_div() */
+		if (drv_lat_stats[qid][i].samples_cnt > 0) {
+			min_usec = drv_lat_stats[qid][i].min_time;
+			do_div(min_usec, 1000);	/* result goes into dividend */
+			max_usec = drv_lat_stats[qid][i].max_time;
+			do_div(max_usec, 1000);
+			avg_usec = drv_lat_stats[qid][i].accu_time;
+			do_div(avg_usec, drv_lat_stats[qid][i].samples_cnt);
+			do_div(avg_usec, 1000);
+		} else {
+			min_usec = 0;
+			max_usec = 0;
+			avg_usec = 0;
+		}
+
+		cur_buf_pos += sprintf(cur_buf_pos,
+				       "%u:\t%6llu\t%6llu\t%6llu\t%7lu\n", i,
+				       min_usec, avg_usec, max_usec,
+				       drv_lat_stats[qid][i].samples_cnt);
+	}
+	return cur_buf_pos - buf;
+}
+
+/* time from descriptor enqueue to interrupt is measured */
+static ssize_t sys_queue_stats_sep_lat_show(struct kobject *kobj,
+					    struct kobj_attribute *attr,
+					    char *buf)
+{
+	u64 min_usec, max_usec, avg_usec;
+	int qid, i, buf_len;
+	char *line;
+
+	buf_len = sprintf(buf,
+			  "desc-type\tmin[us]\tavg[us]\tmax[us]\t#samples\n");
+
+	qid = sys_get_queue_num(kobj, (struct sys_dir *)&sys_queue_dirs);
+
+	if (qid >= SEP_MAX_NUM_OF_DESC_Q) {
+		pr_err("ID out of range: qid=%d\n", qid);
+		return 0;
+	}
+
+	line = kzalloc(256 * sizeof(char), GFP_KERNEL);
+
+	if (line == NULL) {
+		pr_err("Memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < DESC_TYPE_NUM; ++i) {
+		if (desc_names[i] != NULL) {	/*Only if valid desc. type */
+			/* Because we are doing 64 bit (long long) division we*
+			 * need to explicitly invoke do_div() */
+			if (sep_lat_stats[qid][i].samples_cnt > 0) {
+				min_usec = sep_lat_stats[qid][i].min_time;
+				/* result goes into dividend */
+				do_div(min_usec, 1000);
+				max_usec = sep_lat_stats[qid][i].max_time;
+				do_div(max_usec, 1000);
+				avg_usec = sep_lat_stats[qid][i].accu_time;
+				do_div(avg_usec,
+				       sep_lat_stats[qid][i].samples_cnt);
+				do_div(avg_usec, 1000);
+			} else {
+				min_usec = 0;
+				max_usec = 0;
+				avg_usec = 0;
+			}
+
+			buf_len += sprintf(line,
+					   "%s\t\t%6llu\t%6llu\t%6llu\t%7lu\n",
+					   desc_names[i], min_usec, avg_usec,
+					   max_usec,
+					   sep_lat_stats[qid][i].samples_cnt);
+			strcat(buf, line);
+		}
+	}
+
+	kfree(line);
+
+	return buf_len;
+}
+
+/********************************************************
+ *		SYSFS objects				*
+ ********************************************************/
+
+/* TOP LEVEL ATTRIBUTES */
+
+static struct kobj_attribute sys_top_level_attrs[] = {
+	__ATTR(fw_ver, 0444, sys_fw_ver_show, NULL),
+#ifdef SEP_HWK_UNIT_TEST
+	__ATTR(hwk_self_test, 0664, sys_hwk_st_show, sys_hwk_st_start)
+#endif
+};
+
+struct kobj_attribute sys_queue_level_attrs[] = {
+
+	__ATTR(size, 0444, sys_queue_size_show, NULL),
+	__ATTR(dump, 0444, sys_queue_dump_show, NULL),
+	__ATTR(drv_lat, 0444, sys_queue_stats_drv_lat_show, NULL),
+	__ATTR(sep_lat, 0444, sys_queue_stats_sep_lat_show, NULL)
+};
+
+int sys_init_dir(struct sys_dir *sys_dir, struct sep_drvdata *drvdata,
+		 struct kobject *parent_dir_kobj, const char *dir_name,
+		 struct kobj_attribute *attrs, int num_of_attrs)
+{
+	int i;
+
+	memset(sys_dir, 0, sizeof(struct sys_dir));
+
+	sys_dir->drvdata = drvdata;
+
+	/* initialize directory kobject */
+	sys_dir->sys_dir_kobj =
+	    kobject_create_and_add(dir_name, parent_dir_kobj);
+
+	if (!(sys_dir->sys_dir_kobj))
+		return -ENOMEM;
+	/* allocate memory for directory's attributes list */
+	sys_dir->sys_dir_attr_list =
+	    kzalloc(sizeof(struct attribute *)*(num_of_attrs + 1),
+		    GFP_KERNEL);
+
+	if (!(sys_dir->sys_dir_attr_list)) {
+		kobject_put(sys_dir->sys_dir_kobj);
+		return -ENOMEM;
+	}
+
+	sys_dir->num_of_attrs = num_of_attrs;
+
+	/* initialize attributes list */
+	for (i = 0; i < num_of_attrs; ++i)
+		sys_dir->sys_dir_attr_list[i] = &(attrs[i].attr);
+
+	/* last list entry should be NULL */
+	sys_dir->sys_dir_attr_list[num_of_attrs] = NULL;
+
+	sys_dir->sys_dir_attr_group.attrs = sys_dir->sys_dir_attr_list;
+
+	return sysfs_create_group(sys_dir->sys_dir_kobj,
+				  &(sys_dir->sys_dir_attr_group));
+}
+
+void sys_free_dir(struct sys_dir *sys_dir)
+{
+	if (!sys_dir)
+		return;
+
+	kfree(sys_dir->sys_dir_attr_list);
+
+	if (sys_dir->sys_dir_kobj)
+		kobject_put(sys_dir->sys_dir_kobj);
+}
+
+/* free sysfs directory structures */
+void sep_free_sysfs(void)
+{
+	int j;
+
+	for (j = 0; (j < SEP_MAX_NUM_OF_DESC_Q) &&
+	     (sys_queue_dirs[j].sys_dir_kobj != NULL); ++j) {
+		sys_free_dir(&(sys_queue_dirs[j]));
+	}
+
+	if (sys_top_dir.sys_dir_kobj != NULL)
+		sys_free_dir(&sys_top_dir);
+
+}
+
+/* initialize sysfs directories structures */
+int sep_setup_sysfs(struct kobject *sys_dev_kobj, struct sep_drvdata *drvdata)
+{
+	int retval = 0, i, j;
+	char queue_name[MAX_QUEUE_NAME_LEN];
+
+	pr_debug("setup sysfs under %s\n", sys_dev_kobj->name);
+	/* reset statistics */
+	memset(drv_lat_stats, 0, sizeof(drv_lat_stats));
+	memset(sep_lat_stats, 0, sizeof(sep_lat_stats));
+	for (i = 0; i < SEP_MAX_NUM_OF_DESC_Q; i++) {
+		for (j = 0; j < DXDI_IOC_NR_MAX + 1; j++) {
+			spin_lock_init(&drv_lat_stats[i][j].stat_lock);
+			/* set min_time to largest ULL value so first sample
+			 * becomes the minimum. */
+			drv_lat_stats[i][j].min_time = (unsigned long long)-1;
+		}
+		for (j = 0; j < DESC_TYPE_NUM; j++) {
+			spin_lock_init(&sep_lat_stats[i][j].stat_lock);
+			/* set min_time to largest ULL value so first sample
+			 * becomes the minimum. */
+			sep_lat_stats[i][j].min_time = (unsigned long long)-1;
+		}
+	}
+
+	/* zero all directories structures */
+	memset(&sys_top_dir, 0, sizeof(struct sys_dir));
+	memset(&sys_queue_dirs, 0,
+	       sizeof(struct sys_dir) * SEP_MAX_NUM_OF_DESC_Q);
+
+	/* initialize the top directory */
+	retval =
+	    sys_init_dir(&sys_top_dir, drvdata, sys_dev_kobj,
+			 "sep_info", sys_top_level_attrs,
+			 sizeof(sys_top_level_attrs) /
+			 sizeof(struct kobj_attribute));
+
+	if (retval)
+		return -retval;
+
+	/* initialize decriptor queues directories structures */
+	for (i = 0; i < SEP_MAX_NUM_OF_DESC_Q; ++i) {
+
+		sprintf(queue_name, "queue%d", i);
+
+		retval = sys_init_dir(&(sys_queue_dirs[i]), drvdata,
+				      sys_top_dir.sys_dir_kobj, queue_name,
+				      sys_queue_level_attrs,
+				      sizeof(sys_queue_level_attrs) /
+				      sizeof(struct kobj_attribute));
+
+		if (retval)
+			break;
+
+	}
+
+	if (retval)
+		sep_free_sysfs();
+
+	return -retval;
+}
+
+#ifdef SEP_SYSFS_UNIT_TEST
+
+static int __init sep_init(void)
+{
+	int retval;
+
+	pr_info("i am loading...\n");
+
+	retval = sep_setup_sysfs(kernel_kobj);
+
+	return retval;
+}
+
+static void __exit sep_exit(void)
+{
+	sep_free_sysfs();
+	pr_info("i am unloading...\n");
+}
+
+module_init(sep_init);
+module_exit(sep_exit);
+
+#endif
diff --git a/drivers/staging/sep54/sep_sysfs.h b/drivers/staging/sep54/sep_sysfs.h
new file mode 100644
index 0000000..f8e1959
--- /dev/null
+++ b/drivers/staging/sep54/sep_sysfs.h
@@ -0,0 +1,49 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_SYSFS_H_
+#define _SEP_SYSFS_H_
+
+int sep_setup_sysfs(struct kobject *sys_dev_kobj, struct sep_drvdata *drvdata);
+void sep_free_sysfs(void);
+
+void sysfs_update_drv_stats(unsigned int qid, unsigned int ioctl_cmd_type,
+			    unsigned long long start_ns,
+			    unsigned long long end_ns);
+
+void sysfs_update_sep_stats(unsigned int qid, enum sep_sw_desc_type desc_type,
+			    unsigned long long start_ns,
+			    unsigned long long end_ns);
+
+#ifdef SEP_HWK_UNIT_TEST
+ssize_t sys_hwk_st_show(struct kobject *kobj, struct kobj_attribute *attr,
+			char *buf);
+ssize_t sys_hwk_st_start(struct kobject *kobj, struct kobj_attribute *attr,
+			 const char *buf, size_t count);
+#endif
+
+
+#endif /*_SEP_SYSFS_H_*/
diff --git a/drivers/staging/sep54/sepapp.c b/drivers/staging/sep54/sepapp.c
new file mode 100644
index 0000000..f30a646
--- /dev/null
+++ b/drivers/staging/sep54/sepapp.c
@@ -0,0 +1,1160 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SEP_APP
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+/*#include <linux/export.h>*/
+#include "dx_driver.h"
+#include "dx_sepapp_kapi.h"
+#include "sep_applets.h"
+#include "sep_power.h"
+#include "crypto_api.h"
+
+/* Global drvdata to be used by kernel clients via dx_sepapp_ API */
+static struct sep_drvdata *kapps_drvdata;
+
+/**
+ * sepapp_params_cleanup() - Clean resources allocated for SeP Applet paramters
+ * @client_ctx:	 The associated client context for this operation
+ * @dxdi_params:	The client parameters as passed from the DriverInterface
+ * @sw_desc_params:	The client parameters DMA information for SeP
+ *			(required for value copy back)
+ * @local_memref_idx:	Reference to array of SEPAPP_MAX_PARAMS
+ *			memory reference indexes.
+ *			Set for parameters of MEMREF type.
+ * @mlli_table:	Reference to array of SEPAPP_MAX_PARAMS MLLI tables
+ *		objects to be used for MEMREF parameters.
+ *
+ * Clean resources allocated for SeP Applet paramters
+ * (primarily, MLLI tables and temporary registered user memory)
+ * Returns void
+ */
+static void sepapp_params_cleanup(struct sep_client_ctx *client_ctx,
+				  struct dxdi_sepapp_params *dxdi_params,
+				  struct dxdi_sepapp_kparams *dxdi_kparams,
+				  struct sepapp_client_params *sw_desc_params,
+				  struct client_dma_buffer *local_dma_objs[],
+				  struct mlli_tables_list mlli_tables[])
+{
+	int i;
+	int memref_idx;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+	enum dxdi_sepapp_param_type *params_types;
+	struct dxdi_val_param *cur_val;
+
+	if (dxdi_params != NULL)
+		params_types = dxdi_params->params_types;
+	else if (dxdi_kparams != NULL)	/* kernel parameters */
+		params_types = dxdi_kparams->params_types;
+	else			/* No parameters - nothing to clean */
+		return;
+
+	for (i = 0; (i < SEPAPP_MAX_PARAMS); i++) {
+		if (params_types[i] == DXDI_SEPAPP_PARAM_MEMREF) {
+			/* Can call for all (unitialized MLLI ignored) */
+			llimgr_destroy_mlli(drvdata->sep_data->llimgr,
+					    mlli_tables + i);
+			memref_idx =
+			    DMA_OBJ_TO_MEMREF_IDX(client_ctx,
+						  local_dma_objs[i]);
+			release_dma_obj(client_ctx, local_dma_objs[i]);
+			if ((local_dma_objs[i] != NULL) &&
+			    (((dxdi_params != NULL) && (memref_idx !=
+							dxdi_params->params[i].
+							memref.ref_id)) ||
+			     (dxdi_kparams != NULL))) {
+				/* There is DMA object to free
+				 * (either user params of temp. reg. or
+				 * kernel params - always temp.) */
+				(void)free_client_memref(client_ctx,
+							 memref_idx);
+			}
+			local_dma_objs[i] = NULL;
+		} else if (params_types[i] == DXDI_SEPAPP_PARAM_VAL) {
+			if (dxdi_params != NULL)
+				cur_val = &dxdi_params->params[i].val;
+			else	/* kernel parameters */
+				cur_val = &dxdi_kparams->params[i].val;
+			if (cur_val->copy_dir & DXDI_DATA_FROM_DEVICE) {
+				/* Copy back output values */
+				cur_val->data[0] =
+				    sw_desc_params->params[i].val.data[0];
+				cur_val->data[1] =
+				    sw_desc_params->params[i].val.data[1];
+			}
+		}
+	}
+}
+
+static int kernel_memref_to_sw_desc_memref(struct dxdi_kmemref *cur_memref,
+					   struct sep_client_ctx *client_ctx,
+					   u8 *sep_memref_type_p,
+					   struct seprpc_memref *sep_memref_p,
+					   struct client_dma_buffer
+					   **local_dma_obj_pp,
+					   struct mlli_tables_list
+					   *mlli_table_p)
+{
+	void *llimgr = client_ctx->drv_data->sep_data->llimgr;
+	enum dma_data_direction dma_dir;
+	int memref_idx;
+	int rc = 0;
+
+	/* convert DMA direction to enum dma_data_direction */
+	dma_dir = dxdi_data_dir_to_dma_data_dir(cur_memref->dma_direction);
+	if (unlikely(dma_dir == DMA_NONE)) {
+		pr_err("Invalid DMA direction (%d) for param.\n",
+			    cur_memref->dma_direction);
+		return -EINVAL;
+	}
+
+	/* For kernel parameters always temp. registration */
+	memref_idx = register_client_memref(client_ctx,
+					    NULL, cur_memref->sgl,
+					    cur_memref->nbytes, dma_dir);
+	if (unlikely(!IS_VALID_MEMREF_IDX(memref_idx))) {
+		pr_err("Failed temp. memory registration (rc=%d)\n",
+			    memref_idx);
+		return -ENOMEM;
+	}
+	*local_dma_obj_pp = acquire_dma_obj(client_ctx, memref_idx);
+	if (*local_dma_obj_pp == NULL)
+		rc = -EINVAL;
+	else
+		/* MLLI table creation */
+		rc = llimgr_create_mlli(llimgr,
+				mlli_table_p, dma_dir, *local_dma_obj_pp, 0, 0);
+
+	if (likely(rc == 0)) {
+		llimgr_mlli_to_seprpc_memref(mlli_table_p, sep_memref_p);
+		*sep_memref_type_p = SEPAPP_PARAM_TYPE_MEMREF;
+	}
+
+	return rc;		/* Cleanup on error in caller */
+}
+
+static int user_memref_to_sw_desc_memref(struct dxdi_memref *cur_memref,
+					 struct sep_client_ctx *client_ctx,
+					 u8 *sep_memref_type_p,
+					 struct seprpc_memref *sep_memref_p,
+					 struct client_dma_buffer
+					 **local_dma_obj_pp,
+					 struct mlli_tables_list *mlli_table_p)
+{
+	void *llimgr = client_ctx->drv_data->sep_data->llimgr;
+	enum dma_data_direction dma_dir;
+	int memref_idx;
+	int rc = 0;
+
+	/* convert DMA direction to enum dma_data_direction */
+	dma_dir = dxdi_data_dir_to_dma_data_dir(cur_memref->dma_direction);
+	if (unlikely(dma_dir == DMA_NONE)) {
+		pr_err("Invalid DMA direction (%d) for param.\n",
+			    cur_memref->dma_direction);
+		return -EINVAL;
+	}
+
+	if (IS_VALID_MEMREF_IDX(cur_memref->ref_id)) {
+		/* Registered mem. */
+		*local_dma_obj_pp =
+		    acquire_dma_obj(client_ctx, cur_memref->ref_id);
+		if (unlikely(*local_dma_obj_pp == NULL)) {
+			pr_err("Failed to acquire DMA obj. at ref_id=%d\n",
+				    cur_memref->ref_id);
+			return -EINVAL;
+		}
+		if ((cur_memref->start_or_offset == 0) &&
+		    (cur_memref->size == (*local_dma_obj_pp)->buf_size)) {
+			/* Whole registered mem. */
+			memref_idx = cur_memref->ref_id;
+		} else {	/* Partial reference */
+			/* Handle as unregistered memory at
+			 * different address/len. */
+			INVALIDATE_MEMREF_IDX(cur_memref->ref_id);
+			cur_memref->start_or_offset += (unsigned long)
+			    (*local_dma_obj_pp)->user_buf_ptr;
+			/* Release base memref - not used */
+			release_dma_obj(client_ctx, *local_dma_obj_pp);
+			*local_dma_obj_pp = NULL;
+		}
+	}
+	/* The following is not "else" of previous, because
+	 * previous block may invalidate ref_id for partial
+	 * reference to cause this temp. registartion. */
+	if (!IS_VALID_MEMREF_IDX(cur_memref->ref_id)) {
+		/* Temp. registration */
+		memref_idx =
+		    register_client_memref(client_ctx,
+					   (u8 __user *)cur_memref->
+					   start_or_offset, NULL,
+					   cur_memref->size, dma_dir);
+		if (unlikely(!IS_VALID_MEMREF_IDX(memref_idx))) {
+			pr_err("Failed temp. memory " "registration\n");
+			return -ENOMEM;
+		}
+		*local_dma_obj_pp = acquire_dma_obj(client_ctx, memref_idx);
+	}
+
+	if (*local_dma_obj_pp == NULL)
+		rc = -EINVAL;
+	else
+		/* MLLI table creation */
+		rc = llimgr_create_mlli(llimgr,
+				mlli_table_p, dma_dir, *local_dma_obj_pp, 0, 0);
+
+	if (likely(rc == 0)) {
+		llimgr_mlli_to_seprpc_memref(mlli_table_p, sep_memref_p);
+		*sep_memref_type_p = SEPAPP_PARAM_TYPE_MEMREF;
+	}
+
+	return rc;		/* Cleanup on error in caller */
+}
+
+/**
+ * dxdi_sepapp_params_to_sw_desc_params() - Convert the client input parameters
+ * @client_ctx:	 The associated client context for this operation
+ * @dxdi_params:	The client parameters as passed from the DriverInterface
+ * @sw_desc_params:	The returned client parameters DMA information for SeP
+ * @local_memref_idx:	Reference to array of SEPAPP_MAX_PARAMS
+ *			memory reference indexes.
+ *			Set for parameters of MEMREF type.
+ * @mlli_table:	Reference to array of SEPAPP_MAX_PARAMS MLLI tables
+ *		objects to be used for MEMREF parameters.
+ *
+ * Convert the client input parameters array from dxdi format to the
+ * SW descriptor format while creating the required MLLI tables
+ * Returns int
+ */
+static int dxdi_sepapp_params_to_sw_desc_params(struct sep_client_ctx
+						*client_ctx,
+						struct dxdi_sepapp_params
+						*dxdi_params,
+						struct dxdi_sepapp_kparams
+						*dxdi_kparams,
+						struct sepapp_client_params
+						*sw_desc_params,
+						struct client_dma_buffer
+						*local_dma_objs[],
+						struct mlli_tables_list
+						mlli_tables[])
+{
+	enum dxdi_sepapp_param_type *params_types;
+	struct dxdi_val_param *cur_val;
+	int i;
+	int rc = 0;
+
+	/* Init./clean arrays for proper cleanup in case of failure */
+	for (i = 0; i < SEPAPP_MAX_PARAMS; i++) {
+		MLLI_TABLES_LIST_INIT(mlli_tables + i);
+		local_dma_objs[i] = NULL;
+		sw_desc_params->params_types[i] = SEPAPP_PARAM_TYPE_NULL;
+	}
+
+	if (dxdi_params != NULL)
+		params_types = dxdi_params->params_types;
+	else if (dxdi_kparams != NULL)	/* kernel parameters */
+		params_types = dxdi_kparams->params_types;
+	else	/* No parameters - nothing to beyond initialization (above) */
+		return 0;
+
+	/* Convert each parameter based on its type */
+	for (i = 0; (i < SEPAPP_MAX_PARAMS) && (rc == 0); i++) {
+		switch (params_types[i]) {
+
+		case DXDI_SEPAPP_PARAM_MEMREF:
+			if (dxdi_params != NULL)
+				rc = user_memref_to_sw_desc_memref
+				    (&dxdi_params->params[i].memref, client_ctx,
+				     &sw_desc_params->params_types[i],
+				     &(sw_desc_params->params[i].memref),
+				     local_dma_objs + i, mlli_tables + i);
+			else
+				rc = kernel_memref_to_sw_desc_memref
+				    (&dxdi_kparams->params[i].kmemref,
+				     client_ctx,
+				     &sw_desc_params->params_types[i],
+				     &(sw_desc_params->params[i].memref),
+				     local_dma_objs + i, mlli_tables + i);
+			break;	/* from switch */
+
+		case DXDI_SEPAPP_PARAM_VAL:
+			if (dxdi_params != NULL)
+				cur_val = &dxdi_params->params[i].val;
+			else	/* kernel parameters */
+				cur_val = &dxdi_kparams->params[i].val;
+
+			sw_desc_params->params[i].val.dir = SEPAPP_DIR_NULL;
+			if (cur_val->copy_dir & DXDI_DATA_TO_DEVICE) {
+				sw_desc_params->params[i].val.dir |=
+				    SEPAPP_DIR_IN;
+				sw_desc_params->params[i].val.data[0] =
+				    cur_val->data[0];
+				sw_desc_params->params[i].val.data[1] =
+				    cur_val->data[1];
+			}
+			if (cur_val->copy_dir & DXDI_DATA_FROM_DEVICE) {
+				sw_desc_params->params[i].val.dir |=
+				    SEPAPP_DIR_OUT;
+			}
+			sw_desc_params->params_types[i] = SEPAPP_PARAM_TYPE_VAL;
+			break;	/* from switch */
+
+		case DXDI_SEPAPP_PARAM_NULL:
+			sw_desc_params->params_types[i] =
+			    SEPAPP_PARAM_TYPE_NULL;
+			break;
+
+		default:
+			pr_err(
+				"Invalid parameter type (%d) for #%d\n",
+				params_types[i], i);
+			rc = -EINVAL;
+		}		/*switch */
+	}			/*for parameters */
+
+	/* Cleanup in case of error */
+	if (rc != 0)
+		sepapp_params_cleanup(client_ctx, dxdi_params, dxdi_kparams,
+				      sw_desc_params, local_dma_objs,
+				      mlli_tables);
+	return rc;
+}
+
+/**
+ * sepapp_session_open() - Open a session with given SeP Applet
+ * @op_ctx:
+ * @sepapp_uuid:	 Applet UUID
+ * @auth_method:	 Client authentication method
+ * @auth_data:	 Authentication data
+ * @app_auth_data:	 Applet specific authentication data
+ * @session_id:	 Returned allocated session ID
+ * @sep_ret_origin:	 Origin in SeP of error code
+ *
+ * Returns int
+ */
+static int sepapp_session_open(struct sep_op_ctx *op_ctx,
+			       u8 *sepapp_uuid,
+			       u32 auth_method,
+			       void *auth_data,
+			       struct dxdi_sepapp_params *app_auth_data,
+			       struct dxdi_sepapp_kparams *kapp_auth_data,
+			       int *session_id,
+			       enum dxdi_sep_module *sep_ret_origin)
+{
+	int rc;
+	struct sep_app_session *new_session;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_dma_buffer *local_dma_objs[SEPAPP_MAX_PARAMS];
+	struct mlli_tables_list mlli_tables[SEPAPP_MAX_PARAMS];
+	struct sep_sw_desc desc;
+	struct sepapp_in_params_session_open *sepapp_msg_p;
+
+	/* Verify that given spad_buf size can accomodate the in_params */
+	BUILD_BUG_ON(sizeof(struct sepapp_in_params_session_open) >
+		     USER_SPAD_SIZE);
+
+	op_ctx->op_type = SEP_OP_APP;
+	*sep_ret_origin = DXDI_SEP_MODULE_HOST_DRIVER;
+
+	op_ctx->spad_buf_p = dma_pool_alloc(drvdata->sep_data->spad_buf_pool,
+					    GFP_KERNEL,
+					    &op_ctx->spad_buf_dma_addr);
+	if (unlikely(op_ctx->spad_buf_p == NULL)) {
+		pr_err(
+			    "Failed allocating from spad_buf_pool for SeP Applet Request message\n");
+		INVALIDATE_SESSION_IDX(*session_id);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		return -ENOMEM;
+	}
+	sepapp_msg_p =
+	    (struct sepapp_in_params_session_open *)op_ctx->spad_buf_p;
+
+	/* Find free session entry */
+	for ((*session_id) = 0,
+	     new_session = &op_ctx->client_ctx->sepapp_sessions[0];
+	     (*session_id < MAX_SEPAPP_SESSION_PER_CLIENT_CTX);
+	     new_session++, (*session_id)++) {
+		mutex_lock(&new_session->session_lock);
+		if (new_session->ref_cnt == 0)
+			break;
+		mutex_unlock(&new_session->session_lock);
+	}
+	if (*session_id == MAX_SEPAPP_SESSION_PER_CLIENT_CTX) {
+		pr_err(
+			    "Could not allocate session entry. all %u are in use.\n",
+			    MAX_SEPAPP_SESSION_PER_CLIENT_CTX);
+		INVALIDATE_SESSION_IDX(*session_id);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		return -ENOMEM;
+	}
+
+	new_session->ref_cnt = 1;	/* To be decremented by close_session */
+	/* Invalidate session ID so it cannot ne used until opening the session
+	 * is actually opened */
+	new_session->sep_session_id = SEP_SESSION_ID_INVALID;
+	mutex_unlock(&new_session->session_lock);
+
+	/* Convert parameters to SeP Applet format */
+	rc = dxdi_sepapp_params_to_sw_desc_params(op_ctx->client_ctx,
+						  app_auth_data, kapp_auth_data,
+						  &sepapp_msg_p->client_params,
+						  local_dma_objs, mlli_tables);
+
+	if (likely(rc == 0)) {
+		memcpy(&sepapp_msg_p->app_uuid, sepapp_uuid, SEPAPP_UUID_SIZE);
+		sepapp_msg_p->auth_method = cpu_to_le32(auth_method);
+		/* TODO: Fill msg.auth_data as required for supported methods,
+		 * e.g. client application ID */
+
+		/* Pack SW descriptor */
+		/* Set invalid session ID so in case of error the ID set
+		 * in the session context remains invalid. */
+		desc_q_pack_app_req_desc(&desc, op_ctx,
+					 SEPAPP_REQ_TYPE_SESSION_OPEN,
+					 SEP_SESSION_ID_INVALID,
+					 op_ctx->spad_buf_dma_addr);
+		/* Associate operation with the session */
+		op_ctx->session_ctx = new_session;
+		op_ctx->internal_error = false;
+		rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+		if (likely(!IS_DESCQ_ENQUEUE_ERR(rc)))
+			rc = 0;
+	}
+
+	if (likely(rc == 0)) {
+		rc = wait_for_sep_op_result(op_ctx);
+		/* Process descriptor completion */
+		if (likely(rc == 0)) {
+			if ((op_ctx->error_info != 0) &&
+			    (op_ctx->internal_error)) {
+				*sep_ret_origin = DXDI_SEP_MODULE_APP_MGR;
+			} else {	/* Success or error from applet */
+				*sep_ret_origin = DXDI_SEP_MODULE_APP;
+			}
+		} else {	/* Descriptor processing failed */
+			*sep_ret_origin = DXDI_SEP_MODULE_SW_QUEUE;
+			op_ctx->error_info = DXDI_ERROR_INTERNAL;
+		}
+	}
+
+	if (unlikely((rc != 0) || (op_ctx->error_info != 0))) {
+		mutex_lock(&new_session->session_lock);
+		new_session->ref_cnt = 0;
+		mutex_unlock(&new_session->session_lock);
+		INVALIDATE_SESSION_IDX(*session_id);
+	}
+	op_ctx->op_state = USER_OP_NOP;
+	sepapp_params_cleanup(op_ctx->client_ctx, app_auth_data, kapp_auth_data,
+			      &sepapp_msg_p->client_params, local_dma_objs,
+			      mlli_tables);
+
+	return rc;
+}
+
+/**
+ * sepapp_session_close() - Close given SeP Applet context
+ * @op_ctx:
+ * @session_id:
+ *
+ * Returns int
+ */
+int sepapp_session_close(struct sep_op_ctx *op_ctx, int session_id)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	struct sep_app_session *session_ctx =
+	    &client_ctx->sepapp_sessions[session_id];
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+	struct sep_sw_desc desc;
+	int rc;
+	u16 sep_session_id;
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+	if (!IS_VALID_SESSION_IDX(session_id)) {
+		pr_err("Invalid session_id=%d\n", session_id);
+		rc = -EINVAL;
+		goto end;
+	}
+	op_ctx->op_type = SEP_OP_APP;
+
+	mutex_lock(&session_ctx->session_lock);
+
+	if (!IS_VALID_SESSION_CTX(session_ctx)) {
+		mutex_unlock(&session_ctx->session_lock);
+		pr_err("Invalid session ID %d for user %p\n",
+			    session_id, client_ctx);
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (session_ctx->ref_cnt > 1) {
+		mutex_unlock(&session_ctx->session_lock);
+		pr_err("Invoked while still has pending commands!\n");
+		op_ctx->error_info = DXDI_ERROR_FATAL;
+		rc = -EBUSY;
+		goto end;
+	}
+
+	sep_session_id = session_ctx->sep_session_id;/* save before release */
+	/* Release host resources anyway... */
+	INVALIDATE_SESSION_CTX(session_ctx);
+	mutex_unlock(&session_ctx->session_lock);
+
+	/* Now release session resources on SeP */
+	/* Pack SW descriptor */
+	desc_q_pack_app_req_desc(&desc, op_ctx,
+				 SEPAPP_REQ_TYPE_SESSION_CLOSE, sep_session_id,
+				 0);
+	/* Associate operation with the session */
+	op_ctx->session_ctx = session_ctx;
+	op_ctx->internal_error = false;
+	rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+	if (likely(!IS_DESCQ_ENQUEUE_ERR(rc)))
+		rc = wait_for_sep_op_result(op_ctx);
+
+	if (unlikely(rc != 0)) {	/* Not supposed to happen */
+		pr_err(
+			    "Failure is SESSION_CLOSE operation for sep_session_id=%u\n",
+			    session_ctx->sep_session_id);
+		op_ctx->error_info = DXDI_ERROR_FATAL;
+	}
+
+end:
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+	return rc;
+}
+
+static int sepapp_command_invoke(struct sep_op_ctx *op_ctx,
+				 int session_id,
+				 u32 command_id,
+				 struct dxdi_sepapp_params *command_params,
+				 struct dxdi_sepapp_kparams *command_kparams,
+				 enum dxdi_sep_module *sep_ret_origin,
+				 int async)
+{
+	int rc;
+	struct sep_app_session *session_ctx =
+	    &op_ctx->client_ctx->sepapp_sessions[session_id];
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct sep_sw_desc desc;
+	struct sepapp_in_params_command_invoke *sepapp_msg_p;
+
+	op_ctx->op_type = SEP_OP_APP;
+	/* Verify that given spad_buf size can accomodate the in_params */
+	BUILD_BUG_ON(sizeof(struct sepapp_in_params_command_invoke) >
+		     USER_SPAD_SIZE);
+
+	if (!IS_VALID_SESSION_IDX(session_id)) {
+		pr_err("Invalid session_id=%d\n", session_id);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		return -EINVAL;
+	}
+	mutex_lock(&session_ctx->session_lock);
+	if (!IS_VALID_SESSION_CTX(session_ctx)) {
+		mutex_unlock(&session_ctx->session_lock);
+		pr_err("Invalid session ID %d for user %p\n",
+			    session_id, op_ctx->client_ctx);
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		return -EINVAL;
+	}
+	session_ctx->ref_cnt++;	/* Prevent deletion while in use */
+	/* Unlock to allow concurrent session use from different threads */
+	mutex_unlock(&session_ctx->session_lock);
+
+	op_ctx->spad_buf_p = dma_pool_alloc(drvdata->sep_data->spad_buf_pool,
+					    GFP_KERNEL,
+					    &op_ctx->spad_buf_dma_addr);
+	if (unlikely(op_ctx->spad_buf_p == NULL)) {
+		pr_err(
+			    "Failed allocating from spad_buf_pool for SeP Applet Request message\n");
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		rc = -ENOMEM;
+		goto sepapp_command_exit;
+	}
+	sepapp_msg_p =
+	    (struct sepapp_in_params_command_invoke *)op_ctx->spad_buf_p;
+
+	op_ctx->async_info.dxdi_params = command_params;
+	op_ctx->async_info.dxdi_kparams = command_kparams;
+	op_ctx->async_info.sw_desc_params = &sepapp_msg_p->client_params;
+	op_ctx->async_info.session_id = session_id;
+
+	if (async) {
+		wait_event_interruptible(op_ctx->client_ctx->memref_wq,
+			op_ctx->client_ctx->memref_cnt < 1);
+		mutex_lock(&session_ctx->session_lock);
+		op_ctx->client_ctx->memref_cnt++;
+		mutex_unlock(&session_ctx->session_lock);
+	}
+
+	mutex_lock(&drvdata->desc_queue_sequencer);
+	/* Convert parameters to SeP Applet format */
+	rc = dxdi_sepapp_params_to_sw_desc_params(op_ctx->client_ctx,
+					  command_params,
+					  command_kparams,
+					  &sepapp_msg_p->client_params,
+					  op_ctx->async_info.local_dma_objs,
+					  op_ctx->async_info.mlli_tables);
+
+	if (likely(rc == 0)) {
+		sepapp_msg_p->command_id = cpu_to_le32(command_id);
+		desc_q_pack_app_req_desc(&desc, op_ctx,
+					 SEPAPP_REQ_TYPE_COMMAND_INVOKE,
+					 session_ctx->sep_session_id,
+					 op_ctx->spad_buf_dma_addr);
+		/* Associate operation with the session */
+		op_ctx->session_ctx = session_ctx;
+		op_ctx->internal_error = false;
+		op_ctx->op_state = USER_OP_INPROC;
+		rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+		if (likely(!IS_DESCQ_ENQUEUE_ERR(rc)))
+			rc = 0;
+
+		if (async && rc != 0) {
+			mutex_lock(&session_ctx->session_lock);
+			op_ctx->client_ctx->memref_cnt--;
+			mutex_unlock(&session_ctx->session_lock);
+		}
+	}
+	mutex_unlock(&drvdata->desc_queue_sequencer);
+
+	if (likely(rc == 0) && !async)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	/* Process descriptor completion */
+	if (likely(rc == 0)) {
+		if ((op_ctx->error_info != 0) && (op_ctx->internal_error)) {
+			*sep_ret_origin = DXDI_SEP_MODULE_APP_MGR;
+		} else {	/* Success or error from applet */
+			*sep_ret_origin = DXDI_SEP_MODULE_APP;
+		}
+	} else {		/* Descriptor processing failed */
+		*sep_ret_origin = DXDI_SEP_MODULE_SW_QUEUE;
+		op_ctx->error_info = DXDI_ERROR_INTERNAL;
+	}
+	if (!async) {
+		op_ctx->op_state = USER_OP_NOP;
+		sepapp_params_cleanup(op_ctx->client_ctx,
+				command_params, command_kparams,
+				&sepapp_msg_p->client_params,
+				op_ctx->async_info.local_dma_objs,
+				op_ctx->async_info.mlli_tables);
+	}
+
+sepapp_command_exit:
+	if (!async) {
+		/* Release session */
+		mutex_lock(&session_ctx->session_lock);
+		session_ctx->ref_cnt--;
+		mutex_unlock(&session_ctx->session_lock);
+	}
+
+	return rc;
+
+}
+
+int sep_ioctl_sepapp_session_open(struct sep_client_ctx *client_ctx,
+				  unsigned long arg)
+{
+	struct dxdi_sepapp_session_open_params __user *user_params =
+	    (struct dxdi_sepapp_session_open_params __user *)arg;
+	struct dxdi_sepapp_session_open_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_sepapp_session_open_params, session_id);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sepapp_session_open(&op_ctx,
+				 params.app_uuid, params.auth_method,
+				 &params.auth_data, &params.app_auth_data, NULL,
+				 &params.session_id, &params.sep_ret_origin);
+
+	/* Copying back from app_auth_data in case of output of "by value"... */
+	if (copy_to_user(&user_params->app_auth_data, &params.app_auth_data,
+			   sizeof(struct dxdi_sepapp_params))
+	    || put_user(params.session_id, &user_params->session_id)
+	    || put_user(params.sep_ret_origin,
+			  &user_params->sep_ret_origin)) {
+		pr_err("Failed writing input parameters");
+		return -EFAULT;
+	}
+
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+/**
+ * dx_sepapp_session_open() - Open a session with a SeP applet
+ *
+ * @ctx:		SeP client context
+ * @sepapp_uuid:	Target applet UUID
+ * @auth_method:	Session connection authentication method
+ *			(Currently only 0/Public is supported)
+ * @auth_data:		Pointer to authentication data - Should be NULL
+ * @open_params:	Parameters for session opening
+ * @session_id:		Returned session ID (on success)
+ * @ret_origin:		Return code origin
+ *
+ * If ret_origin is not DXDI_SEP_MODULE_APP (i.e., above the applet), it must
+ * be 0 on success. For DXDI_SEP_MODULE_APP it is an applet-specific return code
+ */
+int dx_sepapp_session_open(void *ctx,
+			   u8 *sepapp_uuid,
+			   u32 auth_method,
+			   void *auth_data,
+			   struct dxdi_sepapp_kparams *open_params,
+			   int *session_id, enum dxdi_sep_module *ret_origin)
+{
+	struct sep_client_ctx *client_ctx = (struct sep_client_ctx *)ctx;
+	struct sep_op_ctx op_ctx;
+	int rc;
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sepapp_session_open(&op_ctx,
+				 sepapp_uuid, auth_method, auth_data,
+				 NULL, open_params, session_id, ret_origin);
+	/* If request operation suceeded return the return code from SeP */
+	if (likely(rc == 0))
+		rc = op_ctx.error_info;
+	op_ctx_fini(&op_ctx);
+	return rc;
+}
+EXPORT_SYMBOL(dx_sepapp_session_open);
+
+int sep_ioctl_sepapp_session_close(struct sep_client_ctx *client_ctx,
+				   unsigned long arg)
+{
+	struct dxdi_sepapp_session_close_params __user *user_params =
+	    (struct dxdi_sepapp_session_close_params __user *)arg;
+	int session_id;
+	struct sep_op_ctx op_ctx;
+	int rc;
+	pr_debug("sep_ioctl_sepapp_session_close user_params->session_id:%ld", arg);
+	/* access permissions to arg was already checked in sep_ioctl */
+	rc = __get_user(session_id, &user_params->session_id);
+	if (rc) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	pr_debug("sep_ioctl_sepapp_session_close session_id:%d", session_id);
+	rc = sepapp_session_close(&op_ctx, session_id);
+	pr_debug("sep_ioctl_sepapp_session_close return:%d", rc);
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+/**
+ * dx_sepapp_session_close() - Close a session with an applet
+ *
+ * @ctx:	SeP client context
+ * @session_id: Session ID as returned from dx_sepapp_open_session()
+ *
+ * Return code would be 0 on success
+ */
+int dx_sepapp_session_close(void *ctx, int session_id)
+{
+	struct sep_client_ctx *client_ctx = (struct sep_client_ctx *)ctx;
+	struct sep_op_ctx op_ctx;
+	int rc;
+
+	op_ctx_init(&op_ctx, client_ctx);
+	pr_debug("dx_sepapp_session_close session_id:%d", session_id);
+	rc = sepapp_session_close(&op_ctx, session_id);
+	pr_debug("dx_sepapp_session_close rc:%d", rc);
+	op_ctx_fini(&op_ctx);
+	return rc;
+}
+EXPORT_SYMBOL(dx_sepapp_session_close);
+
+int sep_ioctl_sepapp_command_invoke(struct sep_client_ctx *client_ctx,
+				    unsigned long arg)
+{
+	struct dxdi_sepapp_command_invoke_params __user *user_params =
+	    (struct dxdi_sepapp_command_invoke_params __user *)arg;
+	struct dxdi_sepapp_command_invoke_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_sepapp_command_invoke_params,
+		     sep_ret_origin);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sepapp_command_invoke(&op_ctx,
+				   params.session_id, params.command_id,
+				   &params.command_params, NULL,
+				   &params.sep_ret_origin, 0);
+
+	/* Copying back from command_params in case of output of "by value" */
+	if (copy_to_user(&user_params->command_params,
+			   &params.command_params,
+			   sizeof(struct dxdi_sepapp_params))
+	    || put_user(params.sep_ret_origin,
+			  &user_params->sep_ret_origin)) {
+		pr_err("Failed writing input parameters");
+		return -EFAULT;
+	}
+
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+	return rc;
+}
+
+/**
+ * dx_sepapp_command_invoke() - Initiate command in the applet associated with
+ *				given session ID
+ *
+ * @ctx:	SeP client context
+ * @session_id:	The target session ID
+ * @command_id:	The ID of the command to initiate (applet-specific)
+ * @command_params:	The command parameters
+ * @ret_origin:	The origin of the return code
+ */
+int dx_sepapp_command_invoke(void *ctx,
+			     int session_id,
+			     u32 command_id,
+			     struct dxdi_sepapp_kparams *command_params,
+			     enum dxdi_sep_module *ret_origin)
+{
+	struct sep_client_ctx *client_ctx = (struct sep_client_ctx *)ctx;
+	struct sep_op_ctx op_ctx;
+	int rc;
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sepapp_command_invoke(&op_ctx, session_id, command_id,
+				   NULL, command_params, ret_origin, 0);
+	/* If request operation suceeded return the return code from SeP */
+	if (likely(rc == 0))
+		rc = op_ctx.error_info;
+	op_ctx_fini(&op_ctx);
+	return rc;
+}
+EXPORT_SYMBOL(dx_sepapp_command_invoke);
+
+static void async_app_handle_op_completion(struct work_struct *work)
+{
+	struct async_req_ctx *areq_ctx =
+	    container_of(work, struct async_req_ctx, comp_work);
+	struct sep_op_ctx *op_ctx = &areq_ctx->op_ctx;
+	struct crypto_async_request *initiating_req = areq_ctx->initiating_req;
+	int err = 0;
+	struct sep_app_session *session_ctx =
+	  &op_ctx->client_ctx->sepapp_sessions[op_ctx->async_info.session_id];
+
+	SEP_LOG_DEBUG("req=%p op_ctx=%p\n", initiating_req, op_ctx);
+	if (op_ctx == NULL) {
+		SEP_LOG_ERR("Invalid work context (%p)\n", work);
+		return;
+	}
+
+	if (op_ctx->op_state == USER_OP_COMPLETED) {
+
+		if (unlikely(op_ctx->error_info != 0)) {
+			SEP_LOG_ERR("SeP crypto-op failed (sep_rc=0x%08X)\n",
+				    op_ctx->error_info);
+		}
+		/* Save ret_code info before cleaning op_ctx */
+		err = -(op_ctx->error_info);
+		if (unlikely(err == -EINPROGRESS)) {
+			/* SeP error code collides with EINPROGRESS */
+			SEP_LOG_ERR("Invalid SeP error code 0x%08X\n",
+				    op_ctx->error_info);
+			err = -EINVAL;	/* fallback */
+		}
+		sepapp_params_cleanup(op_ctx->client_ctx,
+					op_ctx->async_info.dxdi_params,
+					op_ctx->async_info.dxdi_kparams,
+					op_ctx->async_info.sw_desc_params,
+					op_ctx->async_info.local_dma_objs,
+					op_ctx->async_info.mlli_tables);
+
+		mutex_lock(&session_ctx->session_lock);
+		session_ctx->ref_cnt--;
+		mutex_unlock(&session_ctx->session_lock);
+
+		op_ctx->client_ctx->memref_cnt--;
+		wake_up_interruptible(&op_ctx->client_ctx->memref_wq);
+
+		if (op_ctx->async_info.dxdi_kparams != NULL)
+			kfree(op_ctx->async_info.dxdi_kparams);
+		op_ctx_fini(op_ctx);
+	} else if (op_ctx->op_state == USER_OP_INPROC) {
+		/* Report with the callback the dispatch from backlog to
+		   the actual processing in the SW descriptors queue
+		   (Returned -EBUSY when the request was dispatched) */
+		err = -EINPROGRESS;
+	} else {
+		SEP_LOG_ERR("Invalid state (%d) for op_ctx %p\n",
+			    op_ctx->op_state, op_ctx);
+		BUG();
+	}
+
+	if (likely(initiating_req->complete != NULL))
+		initiating_req->complete(initiating_req, err);
+	else
+		SEP_LOG_ERR("Async. operation has no completion callback.\n");
+}
+
+int async_sepapp_command_invoke(void *ctx,
+			     int session_id,
+			     u32 command_id,
+			     struct dxdi_sepapp_kparams *command_params,
+			     enum dxdi_sep_module *ret_origin,
+			     struct async_req_ctx *areq_ctx)
+{
+	struct sep_client_ctx *client_ctx = (struct sep_client_ctx *)ctx;
+	struct sep_op_ctx *op_ctx = &areq_ctx->op_ctx;
+	int rc;
+
+	INIT_WORK(&areq_ctx->comp_work, async_app_handle_op_completion);
+	op_ctx_init(op_ctx, client_ctx);
+	op_ctx->comp_work = &areq_ctx->comp_work;
+	rc = sepapp_command_invoke(op_ctx, session_id, command_id,
+				   NULL, command_params, ret_origin, 1);
+
+	if (rc == 0)
+		return -EINPROGRESS;
+	else
+		return rc;
+}
+
+/**
+ * dx_sepapp_context_alloc() - Allocate client context for SeP applets ops.
+ * Returns DX_SEPAPP_CLIENT_CTX_NULL on failure.
+ */
+void *dx_sepapp_context_alloc(void)
+{
+	struct sep_client_ctx *client_ctx;
+
+	client_ctx = kzalloc(sizeof(struct sep_client_ctx), GFP_KERNEL);
+	if (client_ctx == NULL)
+		return DX_SEPAPP_CLIENT_CTX_NULL;
+
+	/* Always use queue 0 */
+	init_client_ctx(&kapps_drvdata->queue[0], client_ctx);
+
+	return (void *)client_ctx;
+}
+EXPORT_SYMBOL(dx_sepapp_context_alloc);
+
+/**
+ * dx_sepapp_context_free() - Free client context.
+ *
+ * @ctx: Client context to free.
+ *
+ * Returns 0 on success, -EBUSY if resources (sessions) are still allocated
+ */
+void dx_sepapp_context_free(void *ctx)
+{
+	struct sep_client_ctx *client_ctx = (struct sep_client_ctx *)ctx;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+
+	cleanup_client_ctx(drvdata, client_ctx);
+	kfree(client_ctx);
+}
+EXPORT_SYMBOL(dx_sepapp_context_free);
+
+void dx_sepapp_init(struct sep_drvdata *drvdata)
+{
+	kapps_drvdata = drvdata;	/* Save for dx_sepapp_ API */
+}
+
+/**
+ * execute_sep() - Execute command in SEP.
+ *
+ * @command: Command ID to execute in SEP.
+ * @addr: Address of buffer to be passed to SEP.
+ * @size: Size of the buffer.
+ * @data1: Arbitrary data to be passed to SEP.
+ * @data2: Arbitrary data to be passed to SEP.
+ *
+ * Returns 0 on success, -EBUSY if resources (sessions) are still allocated
+ */
+static int execute_sep(u32 command, u8 *addr, u32 size, u32 data1, u32 data2)
+{
+	int sess_id = 0;
+	enum dxdi_sep_module ret_origin;
+	struct sep_client_ctx *sctx = NULL;
+	u8 uuid[16] = DEFAULT_APP_UUID;
+	struct dxdi_sepapp_kparams cmd_params;
+	int rc = 0;
+
+	cmd_params.params_types[0] = DXDI_SEPAPP_PARAM_VAL;
+	/* addr is already a physical address, so this works on
+	 * a system with <= 4GB RAM.
+	 * TODO revisit this if the physical address of IMR can be higher
+	 */
+	cmd_params.params[0].val.data[0] = (unsigned long)addr & (DMA_BIT_MASK(32));
+	cmd_params.params[0].val.data[1] = 0;
+	cmd_params.params[0].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	cmd_params.params_types[1] = DXDI_SEPAPP_PARAM_VAL;
+	cmd_params.params[1].val.data[0] = (u32)size;
+	cmd_params.params[1].val.data[1] = 0;
+	cmd_params.params[1].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	cmd_params.params_types[2] = DXDI_SEPAPP_PARAM_VAL;
+	cmd_params.params[2].val.data[0] = data1;
+	cmd_params.params[2].val.data[1] = 0;
+	cmd_params.params[2].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	cmd_params.params_types[3] = DXDI_SEPAPP_PARAM_VAL;
+	cmd_params.params[3].val.data[0] = data2;
+	cmd_params.params[3].val.data[1] = 0;
+	cmd_params.params[3].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	sctx = dx_sepapp_context_alloc();
+	if (unlikely(!sctx))
+		return -ENOMEM;
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+
+	rc = dx_sepapp_session_open(sctx, uuid, 0, NULL, NULL, &sess_id,
+					&ret_origin);
+	if (unlikely(rc != 0))
+		goto failed;
+
+	rc = dx_sepapp_command_invoke(sctx, sess_id, command,
+					&cmd_params, &ret_origin);
+
+	dx_sepapp_session_close(sctx, sess_id);
+
+failed:
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+
+	dx_sepapp_context_free(sctx);
+	return rc;
+}
+
+int sepapp_image_verify(u8 *addr, ssize_t size, u32 key_index, u32 magic_num)
+{
+	pr_info("image verify: addr 0x%p size: %zd key_index: 0x%08X magic_num: 0x%08X\n",
+		addr, size, key_index, magic_num);
+	return execute_sep(CMD_IMAGE_VERIFY, addr, size, key_index, magic_num);
+}
+EXPORT_SYMBOL(sepapp_image_verify);
+
+/**
+ * sepapp_key_validity_check() - Check VRL header against active key policy.
+ *
+ * @addr: Address of buffer containing the VRL header.
+ * @size: Size of the buffer. (normal VRL 728 bytes)
+ * @flags: VRL_ALLOW_ALL_KEY_INDEXES
+ *          - Ignore key policy and allow all keys found in the device.
+ *
+ * Returns 0 on success
+ */
+int sepapp_key_validity_check(u8 *addr, ssize_t size, u32 flags)
+{
+	pr_info("validity: addr 0x%p size: %zd flags: 0x%08X\n",
+		addr, size, flags);
+	return execute_sep(CMD_KEYPOLICY_CHECK, addr, size, flags, 0);
+}
+EXPORT_SYMBOL(sepapp_key_validity_check);
+
+int sepapp_hdmi_status(u8 status, u8 bksv[5])
+{
+	int sess_id = 0;
+	enum dxdi_sep_module ret_origin;
+	struct sep_client_ctx *sctx = NULL;
+	u8 uuid[16] = HDCP_APP_UUID;
+	struct dxdi_sepapp_kparams cmd_params;
+	int rc = 0;
+
+	pr_info("Hdmi status: status 0x%02x\n", status);
+
+	memset(&cmd_params, 0, sizeof(struct dxdi_sepapp_kparams));
+
+	cmd_params.params_types[0] = DXDI_SEPAPP_PARAM_VAL;
+	cmd_params.params[0].val.data[0] = status;
+	cmd_params.params[0].val.data[1] = 0;
+	cmd_params.params[0].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	cmd_params.params_types[1] = DXDI_SEPAPP_PARAM_VAL;
+	memcpy(&cmd_params.params[1].val.data[0], bksv, sizeof(u32));
+	memcpy((uint8_t *)&cmd_params.params[1].val.data[1],
+		&bksv[4], sizeof(u8));
+	cmd_params.params[1].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	sctx = dx_sepapp_context_alloc();
+	if (unlikely(!sctx))
+		return -ENOMEM;
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+
+	rc = dx_sepapp_session_open(sctx, uuid, 0, NULL, NULL, &sess_id,
+				    &ret_origin);
+	if (unlikely(rc != 0))
+		goto failed;
+
+	rc = dx_sepapp_command_invoke(sctx, sess_id, HDCP_RX_HDMI_STATUS,
+				      &cmd_params, &ret_origin);
+
+	dx_sepapp_session_close(sctx, sess_id);
+
+failed:
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+
+	dx_sepapp_context_free(sctx);
+	return rc;
+}
+EXPORT_SYMBOL(sepapp_hdmi_status);
diff --git a/drivers/staging/sep54/sepapp.h b/drivers/staging/sep54/sepapp.h
new file mode 100644
index 0000000..ebc24d7
--- /dev/null
+++ b/drivers/staging/sep54/sepapp.h
@@ -0,0 +1,52 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/** SeP Applets support module */
+#ifndef _SEPAPP_H_
+#define _SEPAPP_H_
+
+#include "dx_driver_abi.h"
+#include "dx_driver.h"
+
+/* Global drvdata to be used by kernel clients via dx_sepapp_ API */
+int sep_ioctl_sepapp_session_open(struct sep_client_ctx *client_ctx,
+				  unsigned long arg);
+
+int sep_ioctl_sepapp_session_close(struct sep_client_ctx *client_ctx,
+				   unsigned long arg);
+
+int sep_ioctl_sepapp_command_invoke(struct sep_client_ctx *client_ctx,
+				    unsigned long arg);
+
+void dx_sepapp_init(struct sep_drvdata *drvdata);
+
+int sepapp_session_close(struct sep_op_ctx *op_ctx, int session_id);
+
+int sepapp_image_verify(u8 *addr, ssize_t size, u32 key_index, u32 magic_num);
+
+int sepapp_key_validity_check(u8 *addr, ssize_t size, u32 flag);
+
+int sepapp_hdmi_status(u8 status, u8 bksv[5]);
+#endif /*_SEPAPP_H_*/