blob: f26cf934e97fde91d9f731033cf9e188eafc683b [file] [log] [blame]
/*
* Driver for /dev/crypto device (aka CryptoDev)
*
* Copyright (c) 2004 Michal Ludvig <mludvig@logix.net.nz>, SuSE Labs
* Copyright (c) 2009,2010,2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
* Copyright (c) 2010 Phil Sutter
*
* This file is part of linux cryptodev.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/*
* Device /dev/crypto provides an interface for
* accessing kernel CryptoAPI algorithms (ciphers,
* hashes) from userspace programs.
*
* /dev/crypto interface was originally introduced in
* OpenBSD and this module attempts to keep the API.
*
*/
#include <crypto/hash.h>
#include <linux/crypto.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/ioctl.h>
#include <linux/random.h>
#include <linux/syscalls.h>
#include <linux/pagemap.h>
#include <linux/poll.h>
#include <linux/uaccess.h>
#include <crypto/cryptodev.h>
#include <linux/scatterlist.h>
#include "cryptodev_int.h"
#include "zc.h"
#include "version.h"
MODULE_AUTHOR("Nikos Mavrogiannopoulos <nmav@gnutls.org>");
MODULE_DESCRIPTION("CryptoDev driver");
MODULE_LICENSE("GPL");
/* ====== Compile-time config ====== */
/* Default (pre-allocated) and maximum size of the job queue.
* These are free, pending and done items all together. */
#define DEF_COP_RINGSIZE 16
#define MAX_COP_RINGSIZE 64
/* ====== Module parameters ====== */
int cryptodev_verbosity;
module_param(cryptodev_verbosity, int, 0644);
MODULE_PARM_DESC(cryptodev_verbosity, "0: normal, 1: verbose, 2: debug");
/* ====== CryptoAPI ====== */
struct todo_list_item {
struct list_head __hook;
struct kernel_crypt_op kcop;
int result;
};
struct locked_list {
struct list_head list;
struct mutex lock;
};
struct crypt_priv {
struct fcrypt fcrypt;
struct locked_list free, todo, done;
int itemcount;
struct work_struct cryptask;
wait_queue_head_t user_waiter;
};
#define FILL_SG(sg, ptr, len) \
do { \
(sg)->page = virt_to_page(ptr); \
(sg)->offset = offset_in_page(ptr); \
(sg)->length = len; \
(sg)->dma_address = 0; \
} while (0)
/* cryptodev's own workqueue, keeps crypto tasks from disturbing the force */
static struct workqueue_struct *cryptodev_wq;
/* Prepare session for future use. */
static int
crypto_create_session(struct fcrypt *fcr, struct session_op *sop)
{
struct csession *ses_new = NULL, *ses_ptr;
int ret = 0;
const char *alg_name = NULL;
const char *hash_name = NULL;
int hmac_mode = 1, stream = 0, aead = 0;
/* Does the request make sense? */
if (unlikely(!sop->cipher && !sop->mac)) {
dprintk(1, KERN_DEBUG, "Both 'cipher' and 'mac' unset.\n");
return -EINVAL;
}
switch (sop->cipher) {
case 0:
break;
case CRYPTO_DES_CBC:
alg_name = "cbc(des)";
break;
case CRYPTO_3DES_CBC:
alg_name = "cbc(des3_ede)";
break;
case CRYPTO_BLF_CBC:
alg_name = "cbc(blowfish)";
break;
case CRYPTO_AES_CBC:
alg_name = "cbc(aes)";
break;
case CRYPTO_AES_ECB:
alg_name = "ecb(aes)";
break;
case CRYPTO_CAMELLIA_CBC:
alg_name = "cbc(camellia)";
break;
case CRYPTO_AES_CTR:
alg_name = "ctr(aes)";
stream = 1;
break;
case CRYPTO_AES_GCM:
alg_name = "gcm(aes)";
stream = 1;
aead = 1;
break;
case CRYPTO_NULL:
alg_name = "ecb(cipher_null)";
stream = 1;
break;
default:
dprintk(1, KERN_DEBUG, "bad cipher: %d\n", sop->cipher);
return -EINVAL;
}
switch (sop->mac) {
case 0:
break;
case CRYPTO_MD5_HMAC:
hash_name = "hmac(md5)";
break;
case CRYPTO_RIPEMD160_HMAC:
hash_name = "hmac(rmd160)";
break;
case CRYPTO_SHA1_HMAC:
hash_name = "hmac(sha1)";
break;
case CRYPTO_SHA2_224_HMAC:
hash_name = "hmac(sha224)";
break;
case CRYPTO_SHA2_256_HMAC:
hash_name = "hmac(sha256)";
break;
case CRYPTO_SHA2_384_HMAC:
hash_name = "hmac(sha384)";
break;
case CRYPTO_SHA2_512_HMAC:
hash_name = "hmac(sha512)";
break;
/* non-hmac cases */
case CRYPTO_MD5:
hash_name = "md5";
hmac_mode = 0;
break;
case CRYPTO_RIPEMD160:
hash_name = "rmd160";
hmac_mode = 0;
break;
case CRYPTO_SHA1:
hash_name = "sha1";
hmac_mode = 0;
break;
case CRYPTO_SHA2_224:
hash_name = "sha224";
hmac_mode = 0;
break;
case CRYPTO_SHA2_256:
hash_name = "sha256";
hmac_mode = 0;
break;
case CRYPTO_SHA2_384:
hash_name = "sha384";
hmac_mode = 0;
break;
case CRYPTO_SHA2_512:
hash_name = "sha512";
hmac_mode = 0;
break;
default:
dprintk(1, KERN_DEBUG, "bad mac: %d\n", sop->mac);
return -EINVAL;
}
/* Create a session and put it to the list. */
ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
if (!ses_new)
return -ENOMEM;
/* Set-up crypto transform. */
if (alg_name) {
uint8_t keyp[CRYPTO_CIPHER_MAX_KEY_LEN];
if (unlikely(sop->keylen > CRYPTO_CIPHER_MAX_KEY_LEN)) {
dprintk(1, KERN_DEBUG,
"Setting key failed for %s-%zu.\n",
alg_name, (size_t)sop->keylen*8);
ret = -EINVAL;
goto error_cipher;
}
if (unlikely(copy_from_user(keyp, sop->key, sop->keylen))) {
ret = -EFAULT;
goto error_cipher;
}
ret = cryptodev_cipher_init(&ses_new->cdata, alg_name, keyp,
sop->keylen, stream, aead);
if (ret < 0) {
dprintk(1, KERN_DEBUG,
"Failed to load cipher for %s\n", alg_name);
ret = -EINVAL;
goto error_cipher;
}
}
if (hash_name && aead == 0) {
uint8_t keyp[CRYPTO_HMAC_MAX_KEY_LEN];
if (unlikely(sop->mackeylen > CRYPTO_HMAC_MAX_KEY_LEN)) {
dprintk(1, KERN_DEBUG,
"Setting key failed for %s-%zu.\n",
alg_name, (size_t)sop->mackeylen*8);
ret = -EINVAL;
goto error_hash;
}
if (sop->mackey && unlikely(copy_from_user(keyp, sop->mackey,
sop->mackeylen))) {
ret = -EFAULT;
goto error_hash;
}
ret = cryptodev_hash_init(&ses_new->hdata, hash_name, hmac_mode,
keyp, sop->mackeylen);
if (ret != 0) {
dprintk(1, KERN_DEBUG, "Failed to load hash for %s\n", hash_name);
ret = -EINVAL;
goto error_hash;
}
}
ses_new->alignmask = max(ses_new->cdata.alignmask,
ses_new->hdata.alignmask);
dprintk(2, KERN_DEBUG, "got alignmask %d\n", ses_new->alignmask);
ses_new->array_size = DEFAULT_PREALLOC_PAGES;
dprintk(2, KERN_DEBUG, "preallocating for %d user pages\n",
ses_new->array_size);
ses_new->pages = kzalloc(ses_new->array_size *
sizeof(struct page *), GFP_KERNEL);
ses_new->sg = kzalloc(ses_new->array_size *
sizeof(struct scatterlist), GFP_KERNEL);
if (ses_new->sg == NULL || ses_new->pages == NULL) {
dprintk(0, KERN_DEBUG, "Memory error\n");
ret = -ENOMEM;
goto error_hash;
}
/* put the new session to the list */
get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
mutex_init(&ses_new->sem);
mutex_lock(&fcr->sem);
restart:
list_for_each_entry(ses_ptr, &fcr->list, entry) {
/* Check for duplicate SID */
if (unlikely(ses_new->sid == ses_ptr->sid)) {
get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
/* Unless we have a broken RNG this
shouldn't loop forever... ;-) */
goto restart;
}
}
list_add(&ses_new->entry, &fcr->list);
mutex_unlock(&fcr->sem);
/* Fill in some values for the user. */
sop->ses = ses_new->sid;
return 0;
error_hash:
cryptodev_cipher_deinit(&ses_new->cdata);
kfree(ses_new->sg);
kfree(ses_new->pages);
error_cipher:
kfree(ses_new);
return ret;
}
/* Everything that needs to be done when remowing a session. */
static inline void
crypto_destroy_session(struct csession *ses_ptr)
{
if (!mutex_trylock(&ses_ptr->sem)) {
dprintk(2, KERN_DEBUG, "Waiting for semaphore of sid=0x%08X\n",
ses_ptr->sid);
mutex_lock(&ses_ptr->sem);
}
dprintk(2, KERN_DEBUG, "Removed session 0x%08X\n", ses_ptr->sid);
cryptodev_cipher_deinit(&ses_ptr->cdata);
cryptodev_hash_deinit(&ses_ptr->hdata);
dprintk(2, KERN_DEBUG, "freeing space for %d user pages\n",
ses_ptr->array_size);
kfree(ses_ptr->pages);
kfree(ses_ptr->sg);
mutex_unlock(&ses_ptr->sem);
kfree(ses_ptr);
}
/* Look up a session by ID and remove. */
static int
crypto_finish_session(struct fcrypt *fcr, uint32_t sid)
{
struct csession *tmp, *ses_ptr;
struct list_head *head;
int ret = 0;
mutex_lock(&fcr->sem);
head = &fcr->list;
list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
if (ses_ptr->sid == sid) {
list_del(&ses_ptr->entry);
crypto_destroy_session(ses_ptr);
break;
}
}
if (unlikely(!ses_ptr)) {
dprintk(1, KERN_ERR, "Session with sid=0x%08X not found!\n",
sid);
ret = -ENOENT;
}
mutex_unlock(&fcr->sem);
return ret;
}
/* Remove all sessions when closing the file */
static int
crypto_finish_all_sessions(struct fcrypt *fcr)
{
struct csession *tmp, *ses_ptr;
struct list_head *head;
mutex_lock(&fcr->sem);
head = &fcr->list;
list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
list_del(&ses_ptr->entry);
crypto_destroy_session(ses_ptr);
}
mutex_unlock(&fcr->sem);
return 0;
}
/* Look up session by session ID. The returned session is locked. */
struct csession *
crypto_get_session_by_sid(struct fcrypt *fcr, uint32_t sid)
{
struct csession *ses_ptr, *retval = NULL;
if (unlikely(fcr == NULL))
return NULL;
mutex_lock(&fcr->sem);
list_for_each_entry(ses_ptr, &fcr->list, entry) {
if (ses_ptr->sid == sid) {
mutex_lock(&ses_ptr->sem);
retval = ses_ptr;
break;
}
}
mutex_unlock(&fcr->sem);
return retval;
}
static void cryptask_routine(struct work_struct *work)
{
struct crypt_priv *pcr = container_of(work, struct crypt_priv, cryptask);
struct todo_list_item *item;
LIST_HEAD(tmp);
/* fetch all pending jobs into the temporary list */
mutex_lock(&pcr->todo.lock);
list_cut_position(&tmp, &pcr->todo.list, pcr->todo.list.prev);
mutex_unlock(&pcr->todo.lock);
/* handle each job locklessly */
list_for_each_entry(item, &tmp, __hook) {
item->result = crypto_run(&pcr->fcrypt, &item->kcop);
if (unlikely(item->result))
dprintk(0, KERN_ERR, "crypto_run() failed: %d\n",
item->result);
}
/* push all handled jobs to the done list at once */
mutex_lock(&pcr->done.lock);
list_splice_tail(&tmp, &pcr->done.list);
mutex_unlock(&pcr->done.lock);
/* wake for POLLIN */
wake_up_interruptible(&pcr->user_waiter);
}
/* ====== /dev/crypto ====== */
static int
cryptodev_open(struct inode *inode, struct file *filp)
{
struct todo_list_item *tmp;
struct crypt_priv *pcr;
int i;
pcr = kmalloc(sizeof(*pcr), GFP_KERNEL);
if (!pcr)
return -ENOMEM;
memset(pcr, 0, sizeof(*pcr));
mutex_init(&pcr->fcrypt.sem);
INIT_LIST_HEAD(&pcr->fcrypt.list);
INIT_LIST_HEAD(&pcr->free.list);
INIT_LIST_HEAD(&pcr->todo.list);
INIT_LIST_HEAD(&pcr->done.list);
INIT_WORK(&pcr->cryptask, cryptask_routine);
mutex_init(&pcr->free.lock);
mutex_init(&pcr->todo.lock);
mutex_init(&pcr->done.lock);
init_waitqueue_head(&pcr->user_waiter);
for (i = 0; i < DEF_COP_RINGSIZE; i++) {
tmp = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
pcr->itemcount++;
dprintk(2, KERN_DEBUG, "allocated new item at %lx\n",
(unsigned long)tmp);
list_add(&tmp->__hook, &pcr->free.list);
}
filp->private_data = pcr;
dprintk(2, KERN_DEBUG,
"Cryptodev handle initialised, %d elements in queue\n",
DEF_COP_RINGSIZE);
return 0;
}
static int
cryptodev_release(struct inode *inode, struct file *filp)
{
struct crypt_priv *pcr = filp->private_data;
struct todo_list_item *item, *item_safe;
int items_freed = 0;
if (!pcr)
return 0;
cancel_work_sync(&pcr->cryptask);
mutex_destroy(&pcr->todo.lock);
mutex_destroy(&pcr->done.lock);
mutex_destroy(&pcr->free.lock);
list_splice_tail(&pcr->todo.list, &pcr->free.list);
list_splice_tail(&pcr->done.list, &pcr->free.list);
list_for_each_entry_safe(item, item_safe, &pcr->free.list, __hook) {
dprintk(2, KERN_DEBUG, "freeing item at %lx\n",
(unsigned long)item);
list_del(&item->__hook);
kfree(item);
items_freed++;
}
if (items_freed != pcr->itemcount) {
dprintk(0, KERN_ERR,
"freed %d items, but %d should exist!\n",
items_freed, pcr->itemcount);
}
crypto_finish_all_sessions(&pcr->fcrypt);
kfree(pcr);
filp->private_data = NULL;
dprintk(2, KERN_DEBUG,
"Cryptodev handle deinitialised, %d elements freed\n",
items_freed);
return 0;
}
static int
clonefd(struct file *filp)
{
int ret;
ret = get_unused_fd();
if (ret >= 0) {
get_file(filp);
fd_install(ret, filp);
}
return ret;
}
#ifdef ENABLE_ASYNC
/* enqueue a job for asynchronous completion
*
* returns:
* -EBUSY when there are no free queue slots left
* (and the number of slots has reached it MAX_COP_RINGSIZE)
* -EFAULT when there was a memory allocation error
* 0 on success */
static int crypto_async_run(struct crypt_priv *pcr, struct kernel_crypt_op *kcop)
{
struct todo_list_item *item = NULL;
if (unlikely(kcop->cop.flags & COP_FLAG_NO_ZC))
return -EINVAL;
mutex_lock(&pcr->free.lock);
if (likely(!list_empty(&pcr->free.list))) {
item = list_first_entry(&pcr->free.list,
struct todo_list_item, __hook);
list_del(&item->__hook);
} else if (pcr->itemcount < MAX_COP_RINGSIZE) {
pcr->itemcount++;
} else {
mutex_unlock(&pcr->free.lock);
return -EBUSY;
}
mutex_unlock(&pcr->free.lock);
if (unlikely(!item)) {
item = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
if (unlikely(!item))
return -EFAULT;
dprintk(1, KERN_INFO, "increased item count to %d\n",
pcr->itemcount);
}
memcpy(&item->kcop, kcop, sizeof(struct kernel_crypt_op));
mutex_lock(&pcr->todo.lock);
list_add_tail(&item->__hook, &pcr->todo.list);
mutex_unlock(&pcr->todo.lock);
queue_work(cryptodev_wq, &pcr->cryptask);
return 0;
}
/* get the first completed job from the "done" queue
*
* returns:
* -EBUSY if no completed jobs are ready (yet)
* the return value of crypto_run() otherwise */
static int crypto_async_fetch(struct crypt_priv *pcr,
struct kernel_crypt_op *kcop)
{
struct todo_list_item *item;
int retval;
mutex_lock(&pcr->done.lock);
if (list_empty(&pcr->done.list)) {
mutex_unlock(&pcr->done.lock);
return -EBUSY;
}
item = list_first_entry(&pcr->done.list, struct todo_list_item, __hook);
list_del(&item->__hook);
mutex_unlock(&pcr->done.lock);
memcpy(kcop, &item->kcop, sizeof(struct kernel_crypt_op));
retval = item->result;
mutex_lock(&pcr->free.lock);
list_add_tail(&item->__hook, &pcr->free.list);
mutex_unlock(&pcr->free.lock);
/* wake for POLLOUT */
wake_up_interruptible(&pcr->user_waiter);
return retval;
}
#endif
/* this function has to be called from process context */
static int fill_kcop_from_cop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
{
struct crypt_op *cop = &kcop->cop;
struct csession *ses_ptr;
int rc;
/* this also enters ses_ptr->sem */
ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
if (unlikely(!ses_ptr)) {
dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", cop->ses);
return -EINVAL;
}
kcop->ivlen = cop->iv ? ses_ptr->cdata.ivsize : 0;
kcop->digestsize = 0; /* will be updated during operation */
crypto_put_session(ses_ptr);
kcop->task = current;
kcop->mm = current->mm;
if (cop->iv) {
rc = copy_from_user(kcop->iv, cop->iv, kcop->ivlen);
if (unlikely(rc)) {
dprintk(1, KERN_ERR,
"error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
kcop->ivlen, rc, (unsigned long)cop->iv);
return -EFAULT;
}
}
return 0;
}
/* this function has to be called from process context */
static int fill_cop_from_kcop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
{
int ret;
if (kcop->digestsize) {
ret = copy_to_user(kcop->cop.mac,
kcop->hash_output, kcop->digestsize);
if (unlikely(ret))
return -EFAULT;
}
if (kcop->ivlen && kcop->cop.flags & COP_FLAG_WRITE_IV) {
ret = copy_to_user(kcop->cop.iv,
kcop->iv, kcop->ivlen);
if (unlikely(ret))
return -EFAULT;
}
return 0;
}
static int kcop_from_user(struct kernel_crypt_op *kcop,
struct fcrypt *fcr, void __user *arg)
{
if (unlikely(copy_from_user(&kcop->cop, arg, sizeof(kcop->cop))))
return -EFAULT;
return fill_kcop_from_cop(kcop, fcr);
}
static int kcop_to_user(struct kernel_crypt_op *kcop,
struct fcrypt *fcr, void __user *arg)
{
int ret;
ret = fill_cop_from_kcop(kcop, fcr);
if (unlikely(ret)) {
dprintk(1, KERN_ERR, "Error in fill_cop_from_kcop\n");
return ret;
}
if (unlikely(copy_to_user(arg, &kcop->cop, sizeof(kcop->cop)))) {
dprintk(1, KERN_ERR, "Cannot copy to userspace\n");
return -EFAULT;
}
return 0;
}
static inline void tfm_info_to_alg_info(struct alg_info *dst, struct crypto_tfm *tfm)
{
snprintf(dst->cra_name, CRYPTODEV_MAX_ALG_NAME,
"%s", crypto_tfm_alg_name(tfm));
snprintf(dst->cra_driver_name, CRYPTODEV_MAX_ALG_NAME,
"%s", crypto_tfm_alg_driver_name(tfm));
}
static unsigned int is_known_accelerated(struct crypto_tfm *tfm)
{
const char* name = crypto_tfm_alg_driver_name(tfm);
if (name == NULL)
return 1; /* assume accelerated */
if (strstr(name, "-talitos"))
return 1;
else if (strncmp(name, "mv-", 3) == 0)
return 1;
else if (strstr(name, "geode"))
return 1;
else if (strstr(name, "hifn"))
return 1;
else if (strstr(name, "-ixp4xx"))
return 1;
else if (strstr(name, "-omap"))
return 1;
else if (strstr(name, "-picoxcell"))
return 1;
else if (strstr(name, "-s5p"))
return 1;
else if (strstr(name, "-ppc4xx"))
return 1;
else if (strstr(name, "-caam"))
return 1;
else if (strstr(name, "-n2"))
return 1;
return 0;
}
static int get_session_info(struct fcrypt *fcr, struct session_info_op *siop)
{
struct csession *ses_ptr;
struct crypto_tfm *tfm;
/* this also enters ses_ptr->sem */
ses_ptr = crypto_get_session_by_sid(fcr, siop->ses);
if (unlikely(!ses_ptr)) {
dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", siop->ses);
return -EINVAL;
}
siop->flags = 0;
if (ses_ptr->cdata.init) {
if (ses_ptr->cdata.aead == 0) {
tfm = crypto_ablkcipher_tfm(ses_ptr->cdata.async.s);
} else {
tfm = crypto_aead_tfm(ses_ptr->cdata.async.as);
}
tfm_info_to_alg_info(&siop->cipher_info, tfm);
#ifdef CRYPTO_ALG_KERN_DRIVER_ONLY
if (tfm->__crt_alg->cra_flags & CRYPTO_ALG_KERN_DRIVER_ONLY)
siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
#else
if (is_known_accelerated(tfm))
siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
#endif
}
if (ses_ptr->hdata.init) {
tfm = crypto_ahash_tfm(ses_ptr->hdata.async.s);
tfm_info_to_alg_info(&siop->hash_info, tfm);
#ifdef CRYPTO_ALG_KERN_DRIVER_ONLY
if (tfm->__crt_alg->cra_flags & CRYPTO_ALG_KERN_DRIVER_ONLY)
siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
#else
if (is_known_accelerated(tfm))
siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
#endif
}
siop->alignmask = ses_ptr->alignmask;
crypto_put_session(ses_ptr);
return 0;
}
static long
cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
{
void __user *arg = (void __user *)arg_;
int __user *p = arg;
struct session_op sop;
struct kernel_crypt_op kcop;
struct kernel_crypt_auth_op kcaop;
struct crypt_priv *pcr = filp->private_data;
struct fcrypt *fcr;
struct session_info_op siop;
uint32_t ses;
int ret, fd;
if (unlikely(!pcr))
BUG();
fcr = &pcr->fcrypt;
switch (cmd) {
case CIOCASYMFEAT:
return put_user(0, p);
case CRIOGET:
fd = clonefd(filp);
ret = put_user(fd, p);
if (unlikely(ret)) {
sys_close(fd);
return ret;
}
return ret;
case CIOCGSESSION:
if (unlikely(copy_from_user(&sop, arg, sizeof(sop))))
return -EFAULT;
ret = crypto_create_session(fcr, &sop);
if (unlikely(ret))
return ret;
ret = copy_to_user(arg, &sop, sizeof(sop));
if (unlikely(ret)) {
crypto_finish_session(fcr, sop.ses);
return -EFAULT;
}
return ret;
case CIOCFSESSION:
ret = get_user(ses, (uint32_t __user *)arg);
if (unlikely(ret))
return ret;
ret = crypto_finish_session(fcr, ses);
return ret;
case CIOCGSESSINFO:
if (unlikely(copy_from_user(&siop, arg, sizeof(siop))))
return -EFAULT;
ret = get_session_info(fcr, &siop);
if (unlikely(ret))
return ret;
return copy_to_user(arg, &siop, sizeof(siop));
case CIOCCRYPT:
if (unlikely(ret = kcop_from_user(&kcop, fcr, arg))) {
dprintk(1, KERN_WARNING, "Error copying from user\n");
return ret;
}
ret = crypto_run(fcr, &kcop);
if (unlikely(ret)) {
dprintk(1, KERN_WARNING, "Error in crypto_run\n");
return ret;
}
return kcop_to_user(&kcop, fcr, arg);
case CIOCAUTHCRYPT:
if (unlikely(ret = kcaop_from_user(&kcaop, fcr, arg))) {
dprintk(1, KERN_WARNING, "Error copying from user\n");
return ret;
}
ret = crypto_auth_run(fcr, &kcaop);
if (unlikely(ret)) {
dprintk(1, KERN_WARNING, "Error in crypto_auth_run\n");
return ret;
}
return kcaop_to_user(&kcaop, fcr, arg);
#ifdef ENABLE_ASYNC
case CIOCASYNCCRYPT:
if (unlikely(ret = kcop_from_user(&kcop, fcr, arg)))
return ret;
return crypto_async_run(pcr, &kcop);
case CIOCASYNCFETCH:
ret = crypto_async_fetch(pcr, &kcop);
if (unlikely(ret))
return ret;
return kcop_to_user(&kcop, fcr, arg);
#endif
default:
return -EINVAL;
}
}
/* compatibility code for 32bit userlands */
#ifdef CONFIG_COMPAT
static inline void
compat_to_session_op(struct compat_session_op *compat, struct session_op *sop)
{
sop->cipher = compat->cipher;
sop->mac = compat->mac;
sop->keylen = compat->keylen;
sop->key = compat_ptr(compat->key);
sop->mackeylen = compat->mackeylen;
sop->mackey = compat_ptr(compat->mackey);
sop->ses = compat->ses;
}
static inline void
session_op_to_compat(struct session_op *sop, struct compat_session_op *compat)
{
compat->cipher = sop->cipher;
compat->mac = sop->mac;
compat->keylen = sop->keylen;
compat->key = ptr_to_compat(sop->key);
compat->mackeylen = sop->mackeylen;
compat->mackey = ptr_to_compat(sop->mackey);
compat->ses = sop->ses;
}
static inline void
compat_to_crypt_op(struct compat_crypt_op *compat, struct crypt_op *cop)
{
cop->ses = compat->ses;
cop->op = compat->op;
cop->flags = compat->flags;
cop->len = compat->len;
cop->src = compat_ptr(compat->src);
cop->dst = compat_ptr(compat->dst);
cop->mac = compat_ptr(compat->mac);
cop->iv = compat_ptr(compat->iv);
}
static inline void
crypt_op_to_compat(struct crypt_op *cop, struct compat_crypt_op *compat)
{
compat->ses = cop->ses;
compat->op = cop->op;
compat->flags = cop->flags;
compat->len = cop->len;
compat->src = ptr_to_compat(cop->src);
compat->dst = ptr_to_compat(cop->dst);
compat->mac = ptr_to_compat(cop->mac);
compat->iv = ptr_to_compat(cop->iv);
}
static int compat_kcop_from_user(struct kernel_crypt_op *kcop,
struct fcrypt *fcr, void __user *arg)
{
struct compat_crypt_op compat_cop;
if (unlikely(copy_from_user(&compat_cop, arg, sizeof(compat_cop))))
return -EFAULT;
compat_to_crypt_op(&compat_cop, &kcop->cop);
return fill_kcop_from_cop(kcop, fcr);
}
static int compat_kcop_to_user(struct kernel_crypt_op *kcop,
struct fcrypt *fcr, void __user *arg)
{
int ret;
struct compat_crypt_op compat_cop;
ret = fill_cop_from_kcop(kcop, fcr);
if (unlikely(ret)) {
dprintk(1, KERN_WARNING, "Error in fill_cop_from_kcop\n");
return ret;
}
crypt_op_to_compat(&kcop->cop, &compat_cop);
if (unlikely(copy_to_user(arg, &compat_cop, sizeof(compat_cop)))) {
dprintk(1, KERN_WARNING, "Error copying to user\n");
return -EFAULT;
}
return 0;
}
static long
cryptodev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg_)
{
void __user *arg = (void __user *)arg_;
struct crypt_priv *pcr = file->private_data;
struct fcrypt *fcr;
struct session_op sop;
struct compat_session_op compat_sop;
struct kernel_crypt_op kcop;
int ret;
if (unlikely(!pcr))
BUG();
fcr = &pcr->fcrypt;
switch (cmd) {
case CIOCASYMFEAT:
case CRIOGET:
case CIOCFSESSION:
case CIOCGSESSINFO:
return cryptodev_ioctl(file, cmd, arg_);
case COMPAT_CIOCGSESSION:
if (unlikely(copy_from_user(&compat_sop, arg,
sizeof(compat_sop))))
return -EFAULT;
compat_to_session_op(&compat_sop, &sop);
ret = crypto_create_session(fcr, &sop);
if (unlikely(ret))
return ret;
session_op_to_compat(&sop, &compat_sop);
ret = copy_to_user(arg, &compat_sop, sizeof(compat_sop));
if (unlikely(ret)) {
crypto_finish_session(fcr, sop.ses);
return -EFAULT;
}
return ret;
case COMPAT_CIOCCRYPT:
ret = compat_kcop_from_user(&kcop, fcr, arg);
if (unlikely(ret))
return ret;
ret = crypto_run(fcr, &kcop);
if (unlikely(ret))
return ret;
return compat_kcop_to_user(&kcop, fcr, arg);
#ifdef ENABLE_ASYNC
case COMPAT_CIOCASYNCCRYPT:
if (unlikely(ret = compat_kcop_from_user(&kcop, fcr, arg)))
return ret;
return crypto_async_run(pcr, &kcop);
case COMPAT_CIOCASYNCFETCH:
ret = crypto_async_fetch(pcr, &kcop);
if (unlikely(ret))
return ret;
return compat_kcop_to_user(&kcop, fcr, arg);
#endif
default:
return -EINVAL;
}
}
#endif /* CONFIG_COMPAT */
static unsigned int cryptodev_poll(struct file *file, poll_table *wait)
{
struct crypt_priv *pcr = file->private_data;
int ret = 0;
poll_wait(file, &pcr->user_waiter, wait);
if (!list_empty_careful(&pcr->done.list))
ret |= POLLIN | POLLRDNORM;
if (!list_empty_careful(&pcr->free.list) || pcr->itemcount < MAX_COP_RINGSIZE)
ret |= POLLOUT | POLLWRNORM;
return ret;
}
static const struct file_operations cryptodev_fops = {
.owner = THIS_MODULE,
.open = cryptodev_open,
.release = cryptodev_release,
.unlocked_ioctl = cryptodev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = cryptodev_compat_ioctl,
#endif /* CONFIG_COMPAT */
.poll = cryptodev_poll,
};
static struct miscdevice cryptodev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "crypto",
.fops = &cryptodev_fops,
.mode = S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH,
};
static int __init
cryptodev_register(void)
{
int rc;
rc = misc_register(&cryptodev);
if (unlikely(rc)) {
printk(KERN_ERR PFX "registration of /dev/crypto failed\n");
return rc;
}
return 0;
}
static void __exit
cryptodev_deregister(void)
{
misc_deregister(&cryptodev);
}
/* ====== Module init/exit ====== */
static int __init init_cryptodev(void)
{
int rc;
cryptodev_wq = create_workqueue("cryptodev_queue");
if (unlikely(!cryptodev_wq)) {
printk(KERN_ERR PFX "failed to allocate the cryptodev workqueue\n");
return -EFAULT;
}
rc = cryptodev_register();
if (unlikely(rc)) {
destroy_workqueue(cryptodev_wq);
return rc;
}
printk(KERN_INFO PFX "driver %s loaded.\n", VERSION);
return 0;
}
static void __exit exit_cryptodev(void)
{
flush_workqueue(cryptodev_wq);
destroy_workqueue(cryptodev_wq);
cryptodev_deregister();
printk(KERN_INFO PFX "driver unloaded.\n");
}
module_init(init_cryptodev);
module_exit(exit_cryptodev);