crypto: tegra-aes: dual core support
Sanjay Singh Rawat [Thu, 28 Apr 2011 13:25:57 +0000 (18:25 +0530)]
* add bsea engine support for encryption and decryption
* add arbitration semaphore id for bsea

Bug 803932

Original change: http://git-master/r/#change,29672
(cherry picked from commit 0008cdb0f38d0cd0c074671fc067c4321f340b06)

Original-Change-Id: I59fcaab29c47a8b42e7470b30486851cfe90848f
Signed-off-by: Sanjay Singh Rawat <srawat@nvidia.com>
Reviewed-on: http://git-master/r/30190
Tested-by: Varun Wadekar <vwadekar@nvidia.com>
Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>

Rebase-Id: R6f1bf287860a24d0a535e49f516581b31092d182

drivers/crypto/tegra-aes.c
drivers/crypto/tegra-aes.h

index 285c851..c691ee5 100644 (file)
@@ -36,6 +36,7 @@
 
 #include <mach/arb_sema.h>
 #include <mach/clk.h>
+#include "../video/tegra/nvmap/nvmap.h"
 
 #include <crypto/scatterwalk.h>
 #include <crypto/aes.h>
 
 #define FLAGS_MODE_MASK                0x00ff
 #define FLAGS_ENCRYPT          BIT(0)
-#define FLAGS_CBC                      BIT(1)
-#define FLAGS_GIV                      BIT(2)
-#define FLAGS_RNG                      BIT(3)
-#define FLAGS_OFB                      BIT(4)
-#define FLAGS_NEW_KEY                  BIT(5)
-#define FLAGS_NEW_IV                   BIT(6)
-#define FLAGS_INIT                     BIT(7)
-#define FLAGS_FAST                     BIT(8)
-#define FLAGS_BUSY                     9
+#define FLAGS_CBC              BIT(1)
+#define FLAGS_GIV              BIT(2)
+#define FLAGS_RNG              BIT(3)
+#define FLAGS_OFB              BIT(4)
+#define FLAGS_INIT             BIT(5)
+#define FLAGS_BUSY             1
 
 /*
  * Defines AES engine Max process bytes size in one go, which takes 1 msec.
  * cycles of AVP/BSE is 216K. In this duration, AES can process 216/11 ~= 19KB
  * Based on this AES_HW_DMA_BUFFER_SIZE_BYTES is configured to 16KB.
  */
-#define AES_HW_DMA_BUFFER_SIZE_BYTES 0x4000
+#define AES_HW_DMA_BUFFER_SIZE_BYTES   0x4000
 
 /*
  * The key table length is 64 bytes
  * (This includes first upto 32 bytes key + 16 bytes original initial vector
  * and 16 bytes updated initial vector)
  */
-#define AES_HW_KEY_TABLE_LENGTH_BYTES 64
+#define AES_HW_KEY_TABLE_LENGTH_BYTES  64
 
-#define AES_HW_IV_SIZE 16
-#define AES_HW_KEYSCHEDULE_LEN 256
-#define ARB_SEMA_TIMEOUT 500
+#define AES_HW_IV_SIZE 16
+#define AES_HW_KEYSCHEDULE_LEN 256
+#define ARB_SEMA_TIMEOUT       500
 
 /*
  * The memory being used is divides as follows:
  */
 #define AES_IVKEY_SIZE (AES_HW_KEY_TABLE_LENGTH_BYTES + AES_HW_KEYSCHEDULE_LEN)
 
-#define DEFAULT_RNG_BLK_SZ 16
+#define DEFAULT_RNG_BLK_SZ     16
 
 /* As of now only 5 commands are USED for AES encryption/Decryption */
-#define AES_HW_MAX_ICQ_LENGTH 5
+#define AES_HW_MAX_ICQ_LENGTH  4
 
-#define ICQBITSHIFT_BLKCNT 0
+#define ICQBITSHIFT_BLKCNT     0
 
 /* memdma_vd command */
 #define MEMDMA_DIR_DTOVRAM     0
@@ -136,120 +134,196 @@ struct tegra_aes_slot {
        bool available;
 };
 
-static struct tegra_aes_slot ssk = {
-       .slot_num = SSK_SLOT_NUM,
-       .available = true,
-};
-
 struct tegra_aes_reqctx {
        unsigned long mode;
 };
 
-#define TEGRA_AES_QUEUE_LENGTH 50
+#define TEGRA_AES_QUEUE_LENGTH 50
 
-struct tegra_aes_dev {
-       struct device *dev;
-       phys_addr_t phys_base;
-       void __iomem *io_base;
-       dma_addr_t ivkey_phys_base;
-       void __iomem *ivkey_base;
+struct tegra_aes_engine {
+       struct tegra_aes_dev *dd;
        struct clk *iclk;
        struct clk *pclk;
-       struct tegra_aes_ctx *ctx;
-       unsigned long flags;
+       struct ablkcipher_request *req;
+       struct scatterlist *in_sg;
        struct completion op_complete;
-       u32 *buf_in;
+       struct scatterlist *out_sg;
+       void __iomem *io_base;
+       void __iomem *ivkey_base;
+       unsigned long phys_base;
+       dma_addr_t ivkey_phys_base;
        dma_addr_t dma_buf_in;
-       u32 *buf_out;
        dma_addr_t dma_buf_out;
-       u8 *iv;
-       u8 dt[DEFAULT_RNG_BLK_SZ];
-       int ivlen;
-       u64 ctr;
-       int res_id;
-       spinlock_t lock;
-       struct crypto_queue queue;
-       struct tegra_aes_slot *slots;
-       struct ablkcipher_request *req;
        size_t total;
-       struct scatterlist *in_sg;
        size_t in_offset;
-       struct scatterlist *out_sg;
        size_t out_offset;
+       u32 engine_offset;
+       u32 *buf_in;
+       u32 *buf_out;
+       int res_id;
+       int slot_num;
+       int keylen;
+       unsigned long busy;
+       u8 irq;
+       bool new_key;
+       bool use_ssk;
+};
+
+struct tegra_aes_dev {
+       struct device *dev;
+       struct tegra_aes_slot *slots;
+       struct tegra_aes_engine bsev;
+       struct tegra_aes_engine bsea;
+       struct nvmap_client *client;
+       struct nvmap_handle_ref *h_ref;
+       unsigned long bsea_iram_address;
+       void *bsea_iram_base;
+       struct tegra_aes_ctx *ctx;
+       struct crypto_queue queue;
+       spinlock_t lock;
+       u64 ctr;
+       unsigned long flags;
+       u8 dt[DEFAULT_RNG_BLK_SZ];
 };
 
 static struct tegra_aes_dev *aes_dev;
 
 struct tegra_aes_ctx {
        struct tegra_aes_dev *dd;
-       unsigned long flags;
        struct tegra_aes_slot *slot;
-       u8 key[AES_MAX_KEY_SIZE];
-       int keylen;
 };
 
-static struct tegra_aes_ctx rng_ctx = {
-       .flags = FLAGS_NEW_KEY,
-       .keylen = AES_KEYSIZE_128,
-};
+static struct tegra_aes_ctx rng_ctx;
 
 /* keep registered devices data here */
-static LIST_HEAD(dev_list);
+static LIST_HEAD(slot_list);
 static DEFINE_SPINLOCK(list_lock);
 static DEFINE_MUTEX(aes_lock);
 
-static void aes_workqueue_handler(struct work_struct *work);
-static DECLARE_WORK(aes_work, aes_workqueue_handler);
-static struct workqueue_struct *aes_wq;
+/* Engine specific work queues */
+static void bsev_workqueue_handler(struct work_struct *work);
+static void bsea_workqueue_handler(struct work_struct *work);
+
+static DECLARE_WORK(bsev_work, bsev_workqueue_handler);
+static DECLARE_WORK(bsea_work, bsea_workqueue_handler);
+
+static struct workqueue_struct *bsev_wq;
+static struct workqueue_struct *bsea_wq;
 
 extern unsigned long long tegra_chip_uid(void);
 
-static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset)
+static inline u32 aes_readl(struct tegra_aes_engine *engine, u32 offset)
 {
-       return readl(dd->io_base + offset);
+       return readl(engine->io_base + offset);
 }
 
-static inline void aes_writel(struct tegra_aes_dev *dd, u32 val, u32 offset)
+static inline void aes_writel(struct tegra_aes_engine *engine,
+       u32 val, u32 offset)
 {
-       writel(val, dd->io_base + offset);
+       writel(val, engine->io_base + offset);
 }
 
-static int aes_hw_init(struct tegra_aes_dev *dd)
+static int bsea_alloc_iram(struct tegra_aes_dev *dd)
 {
-       int ret = 0;
+       size_t size, align ;
+       int err;
+
+       dd->h_ref = NULL;
+       /* [key+iv+u-iv=64B] * 8 = 512Bytes */
+       size = align = (AES_HW_KEY_TABLE_LENGTH_BYTES * AES_NR_KEYSLOTS);
+       dd->client = nvmap_create_client(nvmap_dev, "aes_bsea");
+       if (IS_ERR(dd->client)) {
+               dev_err(dd->dev, "nvmap_create_client failed\n");
+               goto out;
+       }
 
-       ret = clk_enable(dd->pclk);
-       if (ret < 0) {
-               dev_err(dd->dev, "%s: pclock enable fail(%d)\n", __func__, ret);
-               return ret;
+       dd->h_ref = nvmap_create_handle(dd->client, size);
+       if (IS_ERR(dd->h_ref)) {
+               dev_err(dd->dev, "nvmap_create_handle failed\n");
+               goto out;
        }
 
-       ret = clk_enable(dd->iclk);
-       if (ret < 0) {
-               dev_err(dd->dev, "%s: iclock enable fail(%d)\n", __func__, ret);
-               clk_disable(dd->pclk);
-               return ret;
+       /* Allocate memory in the iram */
+       err = nvmap_alloc_handle_id(dd->client, nvmap_ref_to_id(dd->h_ref),
+               NVMAP_HEAP_CARVEOUT_IRAM, align, 0);
+       if (err) {
+               dev_err(dd->dev, "nvmap_alloc_handle_id failed\n");
+               nvmap_free_handle_id(dd->client, nvmap_ref_to_id(dd->h_ref));
+               goto out;
+       }
+       dd->bsea_iram_address = nvmap_handle_address(dd->client,
+                               nvmap_ref_to_id(dd->h_ref));
+
+       dd->bsea_iram_base = nvmap_mmap(dd->h_ref);     /* get virtual address */
+       if (!dd->bsea_iram_base) {
+               dev_err(dd->dev, "%s: no mem, BSEA IRAM alloc failure\n",
+               __func__);
+               goto out;
+       }
+       memset(dd->bsea_iram_base, 0, dd->h_ref->handle->size);
+
+       return 0;
+out:
+       if (dd->bsea_iram_base)
+               nvmap_munmap(dd->h_ref, dd->bsea_iram_base);
+       if (dd->client)
+               nvmap_client_put(dd->client);
+       return -ENOMEM;
+}
+
+static void bsea_free_iram(struct tegra_aes_dev *dd)
+{
+       if (dd->bsea_iram_base)
+               nvmap_munmap(dd->h_ref, dd->bsea_iram_base);
+       if (dd->client)
+               nvmap_client_put(dd->client);
+}
+
+static int aes_hw_init(struct tegra_aes_engine *engine)
+{
+       struct tegra_aes_dev *dd = aes_dev;
+       int ret = 0;
+
+       if (engine->pclk) {
+               ret = clk_enable(engine->pclk);
+               if (ret < 0) {
+                       dev_err(dd->dev, "%s: pclock enable fail(%d)\n",
+                       __func__, ret);
+                       return ret;
+               }
+       }
+       if (engine->iclk) {
+               ret = clk_enable(engine->iclk);
+               if (ret < 0) {
+                       dev_err(dd->dev, "%s: iclock enable fail(%d)\n",
+                       __func__, ret);
+                       clk_disable(engine->pclk);
+                       return ret;
+               }
        }
 
        return ret;
 }
 
-static void aes_hw_deinit(struct tegra_aes_dev *dd)
+static void aes_hw_deinit(struct tegra_aes_engine *engine)
 {
-       clk_disable(dd->iclk);
-       clk_disable(dd->pclk);
+       if (engine->pclk)
+               clk_disable(engine->pclk);
+
+       if (engine->iclk)
+               clk_disable(engine->iclk);
 }
 
-static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr,
-       int nblocks, int mode, bool upd_iv)
+static int aes_start_crypt(struct tegra_aes_engine *eng, u32 in_addr,
+       u32 out_addr, int nblocks, int mode, bool upd_iv)
 {
        u32 cmdq[AES_HW_MAX_ICQ_LENGTH];
        int qlen = 0, i, eng_busy, icq_empty, ret;
        u32 value;
 
        /* error, dma xfer complete */
-       aes_writel(dd, 0x33, INT_ENB);
-       enable_irq(INT_VDE_BSE_V);
+       aes_writel(eng, 0x33, INT_ENB);
+       enable_irq(eng->irq);
 
        cmdq[qlen++] = UCQOPCODE_DMASETUP << ICQBITSHIFT_OPCODE;
        cmdq[qlen++] = in_addr;
@@ -257,19 +331,17 @@ static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr,
                (nblocks-1) << ICQBITSHIFT_BLKCNT;
        cmdq[qlen++] = UCQOPCODE_DMACOMPLETE << ICQBITSHIFT_OPCODE;
 
-       value = aes_readl(dd, CMDQUE_CONTROL);
+       value = aes_readl(eng, CMDQUE_CONTROL);
        /* access SDRAM through AHB */
-       value &= ~CMDQ_CTRL_SRC_STM_SEL_FIELD;
-       value &= ~CMDQ_CTRL_DST_STM_SEL_FIELD;
+       value &= (~CMDQ_CTRL_SRC_STM_SEL_FIELD & ~CMDQ_CTRL_DST_STM_SEL_FIELD);
        value |= (CMDQ_CTRL_SRC_STM_SEL_FIELD | CMDQ_CTRL_DST_STM_SEL_FIELD |
                CMDQ_CTRL_ICMDQEN_FIELD);
-       aes_writel(dd, value, CMDQUE_CONTROL);
-       dev_dbg(dd->dev, "cmd_q_ctrl=0x%x", value);
+       aes_writel(eng, value, CMDQUE_CONTROL);
 
        value = 0;
        if (mode & FLAGS_CBC) {
                value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
-                       ((dd->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+                       ((eng->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
                        ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
                        (((mode & FLAGS_ENCRYPT) ? 2 : 3)
                                << SECURE_XOR_POS_SHIFT) |
@@ -282,7 +354,7 @@ static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr,
                        (0 << SECURE_HASH_ENB_SHIFT));
        } else if (mode & FLAGS_OFB) {
                value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
-                       ((dd->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+                       ((eng->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
                        ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
                        ((u32)0 << SECURE_IV_SELECT_SHIFT) |
                        (SECURE_XOR_POS_FIELD) |
@@ -293,7 +365,7 @@ static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr,
                        (0 << SECURE_HASH_ENB_SHIFT));
        } else if (mode & FLAGS_RNG){
                value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
-                       ((dd->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+                       ((eng->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
                        ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
                        (0 << SECURE_XOR_POS_SHIFT) |
                        (0 << SECURE_INPUT_SEL_SHIFT) |
@@ -303,7 +375,7 @@ static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr,
                        (0 << SECURE_HASH_ENB_SHIFT));
        } else {
                value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
-                       ((dd->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+                       ((eng->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
                        ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
                        (0 << SECURE_XOR_POS_SHIFT) |
                        (0 << SECURE_INPUT_SEL_SHIFT) |
@@ -312,31 +384,30 @@ static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr,
                        (0 << SECURE_RNG_ENB_SHIFT) |
                                (0 << SECURE_HASH_ENB_SHIFT));
        }
-       dev_dbg(dd->dev, "secure_in_sel=0x%x", value);
-       aes_writel(dd, value, SECURE_INPUT_SELECT);
+       aes_writel(eng, value, SECURE_INPUT_SELECT);
 
-       aes_writel(dd, out_addr, SECURE_DEST_ADDR);
-       INIT_COMPLETION(dd->op_complete);
+       aes_writel(eng, out_addr, SECURE_DEST_ADDR);
+       INIT_COMPLETION(eng->op_complete);
 
        for (i = 0; i < qlen - 1; i++) {
                do {
-                       value = aes_readl(dd, INTR_STATUS);
+                       value = aes_readl(eng, INTR_STATUS);
                        eng_busy = value & BIT(0);
                        icq_empty = value & BIT(3);
-               } while (eng_busy & (!icq_empty));
-               aes_writel(dd, cmdq[i], ICMDQUE_WR);
+               } while (eng_busy || (!icq_empty));
+               aes_writel(eng, cmdq[i], ICMDQUE_WR);
        }
 
-       ret = wait_for_completion_timeout(&dd->op_complete, msecs_to_jiffies(150));
+       ret = wait_for_completion_timeout(&eng->op_complete,
+               msecs_to_jiffies(150));
        if (ret == 0) {
-               dev_err(dd->dev, "timed out (0x%x)\n",
-                       aes_readl(dd, INTR_STATUS));
-               disable_irq(INT_VDE_BSE_V);
+               dev_err(aes_dev->dev, "engine%d timed out (0x%x)\n",
+                       eng->res_id, aes_readl(eng, INTR_STATUS));
+               disable_irq(eng->irq);
                return -ETIMEDOUT;
        }
-
-       disable_irq(INT_VDE_BSE_V);
-       aes_writel(dd, cmdq[qlen - 1], ICMDQUE_WR);
+       disable_irq(eng->irq);
+       aes_writel(eng, cmdq[qlen - 1], ICMDQUE_WR);
        return 0;
 }
 
@@ -351,174 +422,169 @@ static void aes_release_key_slot(struct tegra_aes_ctx *ctx)
 static struct tegra_aes_slot *aes_find_key_slot(struct tegra_aes_dev *dd)
 {
        struct tegra_aes_slot *slot = NULL;
-       bool found = false;
+       bool found = 0;
 
        spin_lock(&list_lock);
-       list_for_each_entry(slot, &dev_list, node) {
+       list_for_each_entry(slot, &slot_list, node) {
                dev_dbg(dd->dev, "empty:%d, num:%d\n", slot->available,
                        slot->slot_num);
                if (slot->available) {
                        slot->available = false;
-                       found = true;
+                       found = 1;
                        break;
                }
        }
+
        spin_unlock(&list_lock);
        return found ? slot : NULL;
 }
 
-static int aes_set_key(struct tegra_aes_dev *dd)
+static int aes_set_key(struct tegra_aes_engine *eng)
 {
+       struct tegra_aes_dev *dd = aes_dev;
        u32 value, cmdq[2];
-       struct tegra_aes_ctx *ctx = dd->ctx;
        int i, eng_busy, icq_empty, dma_busy;
-       bool use_ssk = false;
 
-       if (!ctx) {
+       if (!eng) {
                dev_err(dd->dev, "%s: context invalid\n", __func__);
                return -EINVAL;
        }
 
-       /* use ssk? */
-       if (!dd->ctx->slot) {
-               dev_dbg(dd->dev, "using ssk");
-               dd->ctx->slot = &ssk;
-               use_ssk = true;
-       }
-
        /* enable key schedule generation in hardware */
-       value = aes_readl(dd, SECURE_CONFIG_EXT);
+       value = aes_readl(eng, SECURE_CONFIG_EXT);
        value &= ~SECURE_KEY_SCH_DIS_FIELD;
-       aes_writel(dd, value, SECURE_CONFIG_EXT);
+       aes_writel(eng, value, SECURE_CONFIG_EXT);
 
        /* select the key slot */
-       value = aes_readl(dd, SECURE_CONFIG);
+       value = aes_readl(eng, SECURE_CONFIG);
        value &= ~SECURE_KEY_INDEX_FIELD;
-       value |= (ctx->slot->slot_num << SECURE_KEY_INDEX_SHIFT);
-       aes_writel(dd, value, SECURE_CONFIG);
+       value |= (eng->slot_num << SECURE_KEY_INDEX_SHIFT);
+       aes_writel(eng, value, SECURE_CONFIG);
 
-       if (use_ssk)
-               goto out;
-
-       /* copy the key table from sdram to vram */
-       cmdq[0] = 0;
-       cmdq[0] = UCQOPCODE_MEMDMAVD << ICQBITSHIFT_OPCODE |
-               (MEMDMA_DIR_DTOVRAM << MEMDMABITSHIFT_DIR) |
-               (AES_HW_KEY_TABLE_LENGTH_BYTES/sizeof(u32))
-                       << MEMDMABITSHIFT_NUM_WORDS;
-       cmdq[1] = (u32)dd->ivkey_phys_base;
+       if (eng->res_id == TEGRA_ARB_BSEV) {
 
-       for (i = 0; i < ARRAY_SIZE(cmdq); i++)
-               aes_writel(dd, cmdq[i], ICMDQUE_WR);
-
-       do {
-               value = aes_readl(dd, INTR_STATUS);
-               eng_busy = value & BIT(0);
-               icq_empty = value & BIT(3);
-               dma_busy = value & BIT(23);
-       } while (eng_busy & (!icq_empty) & dma_busy);
+               if (eng->use_ssk)
+                       goto out;
 
-       /* settable command to get key into internal registers */
-       value = 0;
-       value = UCQOPCODE_SETTABLE << ICQBITSHIFT_OPCODE |
-               UCQCMD_CRYPTO_TABLESEL << ICQBITSHIFT_TABLESEL |
-               UCQCMD_VRAM_SEL << ICQBITSHIFT_VRAMSEL |
-               (UCQCMD_KEYTABLESEL | ctx->slot->slot_num)
+               /* copy the key table from sdram to vram */
+               cmdq[0] = 0;
+               cmdq[0] = UCQOPCODE_MEMDMAVD << ICQBITSHIFT_OPCODE |
+                               (MEMDMA_DIR_DTOVRAM << MEMDMABITSHIFT_DIR) |
+                       (AES_HW_KEY_TABLE_LENGTH_BYTES/sizeof(u32))
+                       << MEMDMABITSHIFT_NUM_WORDS;
+               cmdq[1] = (u32)eng->ivkey_phys_base;
+               for (i = 0; i < ARRAY_SIZE(cmdq); i++)
+                       aes_writel(eng, cmdq[i], ICMDQUE_WR);
+               do {
+                       value = aes_readl(eng, INTR_STATUS);
+                       eng_busy = value & BIT(0);
+                       icq_empty = value & BIT(3);
+                       dma_busy = value & BIT(23);
+               } while (eng_busy & (!icq_empty) & dma_busy);
+
+               /* settable command to get key into internal registers */
+               value = 0;
+               value = UCQOPCODE_SETTABLE << ICQBITSHIFT_OPCODE |
+                       UCQCMD_CRYPTO_TABLESEL << ICQBITSHIFT_TABLESEL |
+                       UCQCMD_VRAM_SEL << ICQBITSHIFT_VRAMSEL |
+                       (UCQCMD_KEYTABLESEL | eng->slot_num)
                        << ICQBITSHIFT_KEYTABLEID;
-       aes_writel(dd, value, ICMDQUE_WR);
-       do {
-               value = aes_readl(dd, INTR_STATUS);
-               eng_busy = value & BIT(0);
-               icq_empty = value & BIT(3);
-       } while (eng_busy & (!icq_empty));
+               aes_writel(eng, value, ICMDQUE_WR);
+               do {
+                       value = aes_readl(eng, INTR_STATUS);
+                       eng_busy = value & BIT(0);
+                       icq_empty = value & BIT(3);
+               } while (eng_busy & (!icq_empty));
+       } else {
+               if (eng->use_ssk)
+                       goto out;
 
+               /* settable command to get key into internal registers */
+               value = 0;
+               value = UCQOPCODE_SETTABLE << ICQBITSHIFT_OPCODE |
+                       UCQCMD_CRYPTO_TABLESEL << ICQBITSHIFT_TABLESEL |
+                       (UCQCMD_KEYTABLESEL | eng->slot_num)
+                       << ICQBITSHIFT_KEYTABLEID |
+                       dd->bsea_iram_address >> 2;
+                       aes_writel(eng, value, ICMDQUE_WR);
+               do {
+                       value = aes_readl(eng, INTR_STATUS);
+                       eng_busy = value & BIT(0);
+                       icq_empty = value & BIT(3);
+               } while (eng_busy & (!icq_empty));
+       }
 out:
        return 0;
 }
 
-static int tegra_aes_handle_req(struct tegra_aes_dev *dd)
+static int tegra_aes_handle_req(struct tegra_aes_engine *eng)
 {
-       struct crypto_async_request *async_req, *backlog;
+       struct tegra_aes_dev *dd = aes_dev;
        struct tegra_aes_ctx *ctx;
+       struct crypto_async_request *async_req, *backlog;
        struct tegra_aes_reqctx *rctx;
        struct ablkcipher_request *req;
-       unsigned long flags;
+       unsigned long irq_flags;
        int dma_max = AES_HW_DMA_BUFFER_SIZE_BYTES;
-       int ret = 0, nblocks, total;
-       int count = 0;
+       int nblocks, total, ret = 0, count = 0;
        dma_addr_t addr_in, addr_out;
        struct scatterlist *in_sg, *out_sg;
 
-       if (!dd)
-               return -EINVAL;
-
-       spin_lock_irqsave(&dd->lock, flags);
+       spin_lock_irqsave(&dd->lock, irq_flags);
        backlog = crypto_get_backlog(&dd->queue);
        async_req = crypto_dequeue_request(&dd->queue);
        if (!async_req)
-               clear_bit(FLAGS_BUSY, &dd->flags);
-       spin_unlock_irqrestore(&dd->lock, flags);
+               clear_bit(FLAGS_BUSY, &eng->busy);
 
+       spin_unlock_irqrestore(&dd->lock, irq_flags);
        if (!async_req)
                return -ENODATA;
-
        if (backlog)
                backlog->complete(backlog, -EINPROGRESS);
 
        req = ablkcipher_request_cast(async_req);
-
-       dev_dbg(dd->dev, "%s: get new req\n", __func__);
-
+       dev_dbg(dd->dev, "%s: get new req (engine #%d)\n", __func__,
+               eng->res_id);
        if (!req->src || !req->dst)
                return -EINVAL;
 
        /* take the hardware semaphore */
-       if (tegra_arb_mutex_lock_timeout(dd->res_id, ARB_SEMA_TIMEOUT) < 0) {
-               dev_err(dd->dev, "aes hardware not available\n");
+       if (tegra_arb_mutex_lock_timeout(eng->res_id, ARB_SEMA_TIMEOUT) < 0) {
+               dev_err(dd->dev, "aes hardware (%d) not available\n",
+               eng->res_id);
                return -EBUSY;
        }
-
        /* assign new request to device */
-       dd->req = req;
-       dd->total = req->nbytes;
-       dd->in_offset = 0;
-       dd->in_sg = req->src;
-       dd->out_offset = 0;
-       dd->out_sg = req->dst;
+       eng->req = req;
+       eng->total = req->nbytes;
+       eng->in_offset = 0;
+       eng->in_sg = req->src;
+       eng->out_offset = 0;
+       eng->out_sg = req->dst;
 
-       in_sg = dd->in_sg;
-       out_sg = dd->out_sg;
+       in_sg = eng->in_sg;
+       out_sg = eng->out_sg;
+       total = eng->total;
 
-       total = dd->total;
        rctx = ablkcipher_request_ctx(req);
        ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
        rctx->mode &= FLAGS_MODE_MASK;
        dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 
-       dd->iv = (u8 *)req->info;
-       dd->ivlen = AES_BLOCK_SIZE;
-
-       /* assign new context to device */
-       ctx->dd = dd;
-       dd->ctx = ctx;
-
-       if (ctx->flags & FLAGS_NEW_KEY) {
-               /* copy the key */
-               memset(dd->ivkey_base, 0, AES_HW_KEY_TABLE_LENGTH_BYTES);
-               memcpy(dd->ivkey_base, ctx->key, ctx->keylen);
-               aes_set_key(dd);
-               ctx->flags &= ~FLAGS_NEW_KEY;
+       if (eng->new_key) {
+               aes_set_key(eng);
+               eng->new_key = false;
        }
 
-       if (((dd->flags & FLAGS_CBC) || (dd->flags & FLAGS_OFB)) && dd->iv) {
+       if (((dd->flags & FLAGS_CBC) || (dd->flags & FLAGS_OFB)) && req->info) {
                /* set iv to the aes hw slot
                 * Hw generates updated iv only after iv is set in slot.
                 * So key and iv is passed asynchronously.
                 */
-               memcpy(dd->buf_in, dd->iv, dd->ivlen);
+               memcpy(eng->buf_in, (u8 *)req->info, AES_BLOCK_SIZE);
 
-               ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
-                 (u32)dd->dma_buf_out, 1, FLAGS_CBC, false);
+               ret = aes_start_crypt(eng, (u32)eng->dma_buf_in,
+                       (u32)eng->dma_buf_out, 1, FLAGS_CBC, false);
                if (ret < 0) {
                        dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
                        goto out;
@@ -536,29 +602,25 @@ static int tegra_aes_handle_req(struct tegra_aes_dev *dd)
                ret = dma_map_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE);
                if (!ret) {
                        dev_err(dd->dev, "dma_map_sg() error\n");
-                       dma_unmap_sg(dd->dev, dd->in_sg,
+                       dma_unmap_sg(dd->dev, eng->in_sg,
                                1, DMA_TO_DEVICE);
                        goto out;
                }
 
                addr_in = sg_dma_address(in_sg);
                addr_out = sg_dma_address(out_sg);
-               dd->flags |= FLAGS_FAST;
                count = min((int)sg_dma_len(in_sg), (int)dma_max);
                WARN_ON(sg_dma_len(in_sg) != sg_dma_len(out_sg));
                nblocks = DIV_ROUND_UP(count, AES_BLOCK_SIZE);
-
-               ret = aes_start_crypt(dd, addr_in, addr_out, nblocks,
+               ret = aes_start_crypt(eng, addr_in, addr_out, nblocks,
                        dd->flags, true);
 
                dma_unmap_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE);
                dma_unmap_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE);
-
                if (ret < 0) {
                        dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
                        goto out;
                }
-               dd->flags &= ~FLAGS_FAST;
 
                dev_dbg(dd->dev, "out: copied %d\n", count);
                total -= count;
@@ -569,12 +631,11 @@ static int tegra_aes_handle_req(struct tegra_aes_dev *dd)
 
 out:
        /* release the hardware semaphore */
-       tegra_arb_mutex_unlock(dd->res_id);
-
-       dd->total = total;
+       tegra_arb_mutex_unlock(eng->res_id);
+       eng->total = total;
 
-       if (dd->req->base.complete)
-               dd->req->base.complete(&dd->req->base, ret);
+       if (eng->req->base.complete)
+               eng->req->base.complete(&eng->req->base, ret);
 
        dev_dbg(dd->dev, "%s: exit\n", __func__);
        return ret;
@@ -610,43 +671,95 @@ static int tegra_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
                                dev_err(dd->dev, "no empty slot\n");
                                return -ENOMEM;
                        }
-
                        ctx->slot = key_slot;
                }
 
-               memcpy(ctx->key, key, keylen);
-               ctx->keylen = keylen;
+               /* copy the key */
+               memset(dd->bsev.ivkey_base, 0, AES_HW_KEY_TABLE_LENGTH_BYTES);
+               memset(dd->bsea_iram_base, 0, AES_HW_KEY_TABLE_LENGTH_BYTES);
+               memcpy(dd->bsev.ivkey_base, key, keylen);
+               memcpy(dd->bsea_iram_base, key, keylen);
+       } else {
+               dd->bsev.slot_num = SSK_SLOT_NUM;
+               dd->bsea.slot_num = SSK_SLOT_NUM;
+               dd->bsev.use_ssk = true;
+               dd->bsea.use_ssk = true;
+               keylen = AES_KEYSIZE_128;
        }
 
-       ctx->flags |= FLAGS_NEW_KEY;
+       dd->bsev.keylen = keylen;
+       dd->bsea.keylen = keylen;
+       dd->bsev.new_key = true;
+       dd->bsea.new_key = true;
        dev_dbg(dd->dev, "done\n");
        return 0;
 }
 
-static void aes_workqueue_handler(struct work_struct *work)
+static void bsev_workqueue_handler(struct work_struct *work)
 {
        struct tegra_aes_dev *dd = aes_dev;
        int ret;
 
-       aes_hw_init(dd);
+       aes_hw_init(&dd->bsev);
 
        /* empty the crypto queue and then return */
        do {
-               ret = tegra_aes_handle_req(dd);
+               ret = tegra_aes_handle_req(&dd->bsev);
        } while (!ret);
 
-       aes_hw_deinit(dd);
+       aes_hw_deinit(&dd->bsev);
 }
 
-static irqreturn_t aes_irq(int irq, void *dev_id)
+static void bsea_workqueue_handler(struct work_struct *work)
+{
+       struct tegra_aes_dev *dd = aes_dev;
+       int ret;
+
+       aes_hw_init(&dd->bsea);
+
+       /* empty the crypto queue and then return */
+       do {
+               ret = tegra_aes_handle_req(&dd->bsea);
+       } while (!ret);
+
+       aes_hw_deinit(&dd->bsea);
+}
+
+static irqreturn_t aes_bsev_irq(int irq, void *dev_id)
+{
+       struct tegra_aes_dev *dd = (struct tegra_aes_dev *)dev_id;
+       u32 value = aes_readl(&dd->bsev, INTR_STATUS);
+
+       if (!(value & ENGINE_BUSY_FIELD))
+               complete(&dd->bsev.op_complete);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t aes_bsea_irq(int irq, void *dev_id)
 {
        struct tegra_aes_dev *dd = (struct tegra_aes_dev *)dev_id;
-       u32 value = aes_readl(dd, INTR_STATUS);
+       u32 value = aes_readl(&dd->bsea, INTR_STATUS);
+       u32 intr_err_mask = (value & (BIT(19) | BIT(20)));
+       u32 cmdq_ctrl = 0;
 
        dev_dbg(dd->dev, "irq_stat: 0x%x", value);
-       if (!((value & ENGINE_BUSY_FIELD) & !(value & ICQ_EMPTY_FIELD)))
-               complete(&dd->op_complete);
+       if (intr_err_mask) {
+               dev_err(dd->dev, "BSEA Error Intrrupt: INTR_STATUS=0x%x\n",
+               value);
+               if (value & BIT(2)) {
+                       cmdq_ctrl = aes_readl(&dd->bsea, CMDQUE_CONTROL);
+                       cmdq_ctrl |= BIT(2);
+                       aes_writel(&dd->bsea, cmdq_ctrl, CMDQUE_CONTROL);
+                       aes_writel(&dd->bsea, intr_err_mask, INTR_STATUS);
+               } else
+                       aes_writel(&dd->bsea, intr_err_mask, INTR_STATUS);
+               goto done;
+       }
+       if (!(value & ENGINE_BUSY_FIELD))
+               complete(&dd->bsea.op_complete);
 
+done:
        return IRQ_HANDLED;
 }
 
@@ -656,7 +769,8 @@ static int tegra_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
        struct tegra_aes_dev *dd = aes_dev;
        unsigned long flags;
        int err = 0;
-       int busy;
+       int bsev_busy;
+       int bsea_busy;
 
        dev_dbg(dd->dev, "nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
                !!(mode & FLAGS_ENCRYPT),
@@ -666,11 +780,14 @@ static int tegra_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 
        spin_lock_irqsave(&dd->lock, flags);
        err = ablkcipher_enqueue_request(&dd->queue, req);
-       busy = test_and_set_bit(FLAGS_BUSY, &dd->flags);
+       bsev_busy = test_and_set_bit(FLAGS_BUSY, &dd->bsev.busy);
+       bsea_busy = test_and_set_bit(FLAGS_BUSY, &dd->bsea.busy);
        spin_unlock_irqrestore(&dd->lock, flags);
 
-       if (!busy)
-               queue_work(aes_wq, &aes_work);
+       if (!bsev_busy)
+               queue_work(bsev_wq, &bsev_work);
+       if (!bsea_busy)
+               queue_work(bsea_wq, &bsea_work);
 
        return err;
 }
@@ -707,7 +824,7 @@ static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
        unsigned int dlen)
 {
        struct tegra_aes_dev *dd = aes_dev;
-       struct tegra_aes_ctx *ctx = &rng_ctx;
+       struct tegra_aes_engine *eng = &dd->bsev;
        int ret, i;
        u8 *dest = rdata, *dt = dd->dt;
 
@@ -715,34 +832,31 @@ static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
        mutex_lock(&aes_lock);
 
        /* take the hardware semaphore */
-       if (tegra_arb_mutex_lock_timeout(dd->res_id, ARB_SEMA_TIMEOUT) < 0) {
-               dev_err(dd->dev, "aes hardware not available\n");
+       if (tegra_arb_mutex_lock_timeout(eng->res_id, ARB_SEMA_TIMEOUT) < 0) {
+               dev_err(dd->dev, "aes hardware (%d) not available\n",
+               eng->res_id);
                mutex_unlock(&aes_lock);
                return -EBUSY;
        }
 
-       ret = aes_hw_init(dd);
+       ret = aes_hw_init(eng);
        if (ret < 0) {
                dev_err(dd->dev, "%s: hw init fail(%d)\n", __func__, ret);
                dlen = ret;
                goto fail;
        }
 
-       ctx->dd = dd;
-       dd->ctx = ctx;
-       dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
-
-       memset(dd->buf_in, 0, AES_BLOCK_SIZE);
-       memcpy(dd->buf_in, dt, DEFAULT_RNG_BLK_SZ);
+       memset(eng->buf_in, 0, AES_BLOCK_SIZE);
+       memcpy(eng->buf_in, dt, DEFAULT_RNG_BLK_SZ);
 
-       ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
-               (u32)dd->dma_buf_out, 1, dd->flags, true);
+       ret = aes_start_crypt(eng, (u32)eng->dma_buf_in, (u32)eng->dma_buf_out,
+               1, FLAGS_ENCRYPT | FLAGS_RNG, true);
        if (ret < 0) {
                dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
                dlen = ret;
                goto out;
        }
-       memcpy(dest, dd->buf_out, dlen);
+       memcpy(dest, eng->buf_out, dlen);
 
        /* update the DT */
        for (i = DEFAULT_RNG_BLK_SZ - 1; i >= 0; i--) {
@@ -752,11 +866,11 @@ static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
        }
 
 out:
-       aes_hw_deinit(dd);
+       aes_hw_deinit(eng);
 
 fail:
        /* release the hardware semaphore */
-       tegra_arb_mutex_unlock(dd->res_id);
+       tegra_arb_mutex_unlock(eng->res_id);
        mutex_unlock(&aes_lock);
        dev_dbg(dd->dev, "%s: done\n", __func__);
        return dlen;
@@ -767,23 +881,25 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
 {
        struct tegra_aes_dev *dd = aes_dev;
        struct tegra_aes_ctx *ctx = &rng_ctx;
+       struct tegra_aes_engine *eng = &dd->bsev;
        struct tegra_aes_slot *key_slot;
        struct timespec ts;
        int ret = 0;
        u64 nsec, tmp[2];
        u8 *dt;
 
-       if (!ctx || !dd) {
-               dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n",
-                       (unsigned int)ctx, (unsigned int)dd);
+       if (!eng || !dd) {
+               dev_err(dd->dev, "eng=0x%x, dd=0x%x\n",
+                       (unsigned int)eng, (unsigned int)dd);
                return -EINVAL;
        }
 
        if (slen < (DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
-               dev_err(dd->dev, "seed size invalid");
                return -ENOMEM;
        }
 
+       dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
+
        /* take mutex to access the aes hw */
        mutex_lock(&aes_lock);
 
@@ -797,49 +913,41 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
                ctx->slot = key_slot;
        }
 
-       ctx->dd = dd;
-       dd->ctx = ctx;
-       dd->ctr = 0;
-
-       ctx->keylen = AES_KEYSIZE_128;
-       ctx->flags |= FLAGS_NEW_KEY;
-
        /* copy the key to the key slot */
-       memset(dd->ivkey_base, 0, AES_HW_KEY_TABLE_LENGTH_BYTES);
-       memcpy(dd->ivkey_base, seed + DEFAULT_RNG_BLK_SZ, AES_KEYSIZE_128);
-
-       dd->iv = seed;
-       dd->ivlen = slen;
-
-       dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
+       memset(eng->ivkey_base, 0, AES_HW_KEY_TABLE_LENGTH_BYTES);
+       memcpy(eng->ivkey_base, seed + DEFAULT_RNG_BLK_SZ, AES_KEYSIZE_128);
 
        /* take the hardware semaphore */
-       if (tegra_arb_mutex_lock_timeout(dd->res_id, ARB_SEMA_TIMEOUT) < 0) {
-               dev_err(dd->dev, "aes hardware not available\n");
+       if (tegra_arb_mutex_lock_timeout(eng->res_id, ARB_SEMA_TIMEOUT) < 0) {
+               dev_err(dd->dev, "aes hardware (%d) not available\n",
+               eng->res_id);
                mutex_unlock(&aes_lock);
                return -EBUSY;
        }
 
-       ret = aes_hw_init(dd);
+       ret = aes_hw_init(eng);
        if (ret < 0) {
                dev_err(dd->dev, "%s: hw init fail(%d)\n", __func__, ret);
                goto fail;
        }
 
-       aes_set_key(dd);
+       dd->ctx = ctx;
+       eng->slot_num = ctx->slot->slot_num;
+       eng->keylen = AES_KEYSIZE_128;
+       aes_set_key(eng);
 
        /* set seed to the aes hw slot */
-       memset(dd->buf_in, 0, AES_BLOCK_SIZE);
-       memcpy(dd->buf_in, dd->iv, DEFAULT_RNG_BLK_SZ);
-       ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
-         (u32)dd->dma_buf_out, 1, FLAGS_CBC, false);
+       memset(eng->buf_in, 0, AES_BLOCK_SIZE);
+       memcpy(eng->buf_in, seed, DEFAULT_RNG_BLK_SZ);
+       ret = aes_start_crypt(eng, (u32)eng->dma_buf_in,
+         (u32)eng->dma_buf_out, 1, FLAGS_CBC, false);
        if (ret < 0) {
                dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
                goto out;
        }
 
-       if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
-               dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128;
+       if (slen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
+               dt = seed + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128;
        } else {
                getnstimeofday(&ts);
                nsec = timespec_to_ns(&ts);
@@ -853,11 +961,11 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
        memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
 
 out:
-       aes_hw_deinit(dd);
+       aes_hw_deinit(eng);
 
 fail:
        /* release the hardware semaphore */
-       tegra_arb_mutex_unlock(dd->res_id);
+       tegra_arb_mutex_unlock(eng->res_id);
        mutex_unlock(&aes_lock);
 
        dev_dbg(dd->dev, "%s: done\n", __func__);
@@ -867,7 +975,6 @@ fail:
 static int tegra_aes_cra_init(struct crypto_tfm *tfm)
 {
        tfm->crt_ablkcipher.reqsize = sizeof(struct tegra_aes_reqctx);
-
        return 0;
 }
 
@@ -961,7 +1068,7 @@ static int tegra_aes_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct tegra_aes_dev *dd;
-       struct resource *res;
+       struct resource *res[2];
        int err = -ENOMEM, i = 0, j;
 
        if (aes_dev)
@@ -986,42 +1093,57 @@ static int tegra_aes_probe(struct platform_device *pdev)
        crypto_init_queue(&dd->queue, TEGRA_AES_QUEUE_LENGTH);
 
        /* Get the module base address */
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
+       res[0] = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       res[1] = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (!res[0] || !res[1]) {
                dev_err(dev, "invalid resource type: base\n");
                err = -ENODEV;
                goto out;
        }
-       dd->phys_base = res->start;
+       dd->bsev.phys_base = res[0]->start;
+       dd->bsev.io_base = ioremap(dd->bsev.phys_base, resource_size(res[0]));
+       dd->bsea.phys_base = res[1]->start;
+       dd->bsea.io_base = ioremap(dd->bsea.phys_base, resource_size(res[1]));
 
-       dd->io_base = ioremap(dd->phys_base, resource_size(res));
-       if (!dd->io_base) {
+       if (!dd->bsev.io_base || !dd->bsea.io_base) {
                dev_err(dev, "can't ioremap phys_base\n");
                err = -ENOMEM;
                goto out;
        }
 
-       dd->res_id = TEGRA_ARB_AES;
+       err = bsea_alloc_iram(dd);
+       if (err < 0) {
+               dev_err(dev, "Failed to allocate IRAM for BSEA\n");
+               goto out;
+       }
+
+       dd->bsev.res_id = TEGRA_ARB_BSEV;
+       dd->bsea.res_id = TEGRA_ARB_BSEA;
 
-       /* Initialise the master bsev clock */
-       dd->pclk = clk_get(dev, "bsev");
-       if (!dd->pclk) {
+       dd->bsev.pclk = clk_get(dev, "bsev");
+       if (!dd->bsev.pclk) {
                dev_err(dev, "pclock intialization failed.\n");
                err = -ENODEV;
                goto out;
        }
 
-       /* Initialize the vde clock */
-       dd->iclk = clk_get(dev, "vde");
-       if (!dd->iclk) {
+       dd->bsev.iclk = clk_get(dev, "vde");
+       if (!dd->bsev.iclk) {
                dev_err(dev, "iclock intialization failed.\n");
                err = -ENODEV;
                goto out;
        }
 
-       err = clk_set_rate(dd->iclk, ULONG_MAX);
+       dd->bsea.pclk = clk_get(dev, "bsea");
+       if (!dd->bsea.pclk) {
+               dev_err(dev, "pclock intialization failed.\n");
+               err = -ENODEV;
+               goto out;
+       }
+
+       err = clk_set_rate(dd->bsev.iclk, ULONG_MAX);
        if (err) {
-               dev_err(dd->dev, "iclk set_rate fail(%d)\n", err);
+               dev_err(dd->dev, "bsev iclk set_rate fail(%d)\n", err);
                goto out;
        }
 
@@ -1030,45 +1152,64 @@ static int tegra_aes_probe(struct platform_device *pdev)
         * - hardware key table
         * - key schedule
         */
-       dd->ivkey_base = dma_alloc_coherent(dev, SZ_512, &dd->ivkey_phys_base,
-               GFP_KERNEL);
-       if (!dd->ivkey_base) {
-               dev_err(dev, "can not allocate iv/key buffer\n");
+       dd->bsev.ivkey_base = dma_alloc_coherent(dev, SZ_512,
+               &dd->bsev.ivkey_phys_base, GFP_KERNEL);
+       /* Allocated IRAM for BSEA */
+       dd->bsea.ivkey_base = NULL;
+       if (!dd->bsev.ivkey_base) {
+               dev_err(dev, "can not allocate iv/key buffer for BSEV\n");
                err = -ENOMEM;
                goto out;
        }
 
-       dd->buf_in = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
-               &dd->dma_buf_in, GFP_KERNEL);
-       if (!dd->buf_in) {
+       dd->bsev.buf_in = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+               &dd->bsev.dma_buf_in, GFP_KERNEL);
+       dd->bsea.buf_in = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+               &dd->bsea.dma_buf_in, GFP_KERNEL);
+       if (!dd->bsev.buf_in || !dd->bsea.buf_in) {
                dev_err(dev, "can not allocate dma-in buffer\n");
                err = -ENOMEM;
                goto out;
        }
 
-       dd->buf_out = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
-               &dd->dma_buf_out, GFP_KERNEL);
-       if (!dd->buf_out) {
+       dd->bsev.buf_out = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+               &dd->bsev.dma_buf_out, GFP_KERNEL);
+       dd->bsea.buf_out = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+               &dd->bsea.dma_buf_out, GFP_KERNEL);
+       if (!dd->bsev.buf_out || !dd->bsea.buf_out) {
                dev_err(dev, "can not allocate dma-out buffer\n");
                err = -ENOMEM;
                goto out;
        }
 
-       init_completion(&dd->op_complete);
-       aes_wq = alloc_workqueue("aes_wq", WQ_HIGHPRI, 16);
-       if (!aes_wq) {
+       init_completion(&dd->bsev.op_complete);
+       init_completion(&dd->bsea.op_complete);
+
+       bsev_wq = alloc_workqueue("bsev_wq", WQ_HIGHPRI, 16);
+       bsea_wq = alloc_workqueue("bsea_wq", WQ_HIGHPRI, 16);
+       if (!bsev_wq || !bsea_wq) {
                dev_err(dev, "alloc_workqueue failed\n");
                goto out;
        }
 
        /* get the irq */
-       err = request_irq(INT_VDE_BSE_V, aes_irq, IRQF_TRIGGER_HIGH,
+       dd->bsev.irq = INT_VDE_BSE_V;
+       err = request_irq(dd->bsev.irq, aes_bsev_irq, IRQF_TRIGGER_HIGH,
                "tegra-aes", dd);
        if (err) {
-               dev_err(dev, "request_irq failed\n");
+               dev_err(dev, "request_irq failed fir BSEV Engine\n");
                goto out;
        }
-       disable_irq(INT_VDE_BSE_V);
+       disable_irq(dd->bsev.irq);
+
+       dd->bsea.irq = INT_VDE_BSE_A;
+       err = request_irq(dd->bsea.irq, aes_bsea_irq, IRQF_TRIGGER_HIGH,
+               "tegra-aes", dd);
+       if (err) {
+               dev_err(dev, "request_irq failed for BSEA Engine\n");
+               goto out;
+       }
+       disable_irq(dd->bsea.irq);
 
        spin_lock_init(&list_lock);
        spin_lock(&list_lock);
@@ -1078,11 +1219,12 @@ static int tegra_aes_probe(struct platform_device *pdev)
                dd->slots[i].available = true;
                dd->slots[i].slot_num = i;
                INIT_LIST_HEAD(&dd->slots[i].node);
-               list_add_tail(&dd->slots[i].node, &dev_list);
+               list_add_tail(&dd->slots[i].node, &slot_list);
        }
        spin_unlock(&list_lock);
 
        aes_dev = dd;
+
        for (i = 0; i < ARRAY_SIZE(algs); i++) {
                INIT_LIST_HEAD(&algs[i].cra_list);
                err = crypto_register_alg(&algs[i]);
@@ -1096,31 +1238,61 @@ static int tegra_aes_probe(struct platform_device *pdev)
 out:
        for (j = 0; j < i; j++)
                crypto_unregister_alg(&algs[j]);
-       if (dd->ivkey_base)
-               dma_free_coherent(dev, SZ_512, dd->ivkey_base,
-                       dd->ivkey_phys_base);
-       if (dd->buf_in)
+
+       bsea_free_iram(dd);
+       if (dd->bsev.ivkey_base) {
+               dma_free_coherent(dev, SZ_512, dd->bsev.ivkey_base,
+                       dd->bsev.ivkey_phys_base);
+       }
+
+       if (dd->bsev.buf_in && dd->bsea.buf_in) {
+               dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+                       dd->bsev.buf_in, dd->bsev.dma_buf_in);
+               dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+                       dd->bsea.buf_in, dd->bsea.dma_buf_in);
+       }
+
+       if (dd->bsev.buf_out && dd->bsea.buf_out) {
                dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
-                       dd->buf_in, dd->dma_buf_in);
-       if (dd->buf_out)
+                       dd->bsev.buf_out, dd->bsev.dma_buf_out);
                dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
-                       dd->buf_out, dd->dma_buf_out);
-       if (dd->io_base)
-               iounmap(dd->io_base);
-       if (dd->iclk)
-               clk_put(dd->iclk);
-       if (dd->pclk)
-               clk_put(dd->pclk);
-       if (aes_wq)
-               destroy_workqueue(aes_wq);
-       free_irq(INT_VDE_BSE_V, dd);
+                       dd->bsea.buf_out, dd->bsea.dma_buf_out);
+       }
+
+       if (dd->bsev.io_base && dd->bsea.io_base) {
+               iounmap(dd->bsev.io_base);
+               iounmap(dd->bsea.io_base);
+       }
+
+       if (dd->bsev.pclk)
+               clk_put(dd->bsev.pclk);
+
+       if (dd->bsev.iclk)
+               clk_put(dd->bsev.iclk);
+
+       if (dd->bsea.pclk)
+               clk_put(dd->bsea.pclk);
+
+       if (bsev_wq)
+               destroy_workqueue(bsev_wq);
+
+       if (bsea_wq)
+               destroy_workqueue(bsea_wq);
+
+       if (dd->bsev.irq)
+               free_irq(dd->bsev.irq, dd);
+
+       if (dd->bsea.irq)
+               free_irq(dd->bsea.irq, dd);
+
        spin_lock(&list_lock);
-       list_del(&dev_list);
+       list_del(&slot_list);
        spin_unlock(&list_lock);
 
        kfree(dd->slots);
        kfree(dd);
        aes_dev = NULL;
+
        dev_err(dev, "%s: initialization failed.\n", __func__);
        return err;
 }
@@ -1134,25 +1306,36 @@ static int __devexit tegra_aes_remove(struct platform_device *pdev)
        if (!dd)
                return -ENODEV;
 
-       cancel_work_sync(&aes_work);
-       destroy_workqueue(aes_wq);
-       free_irq(INT_VDE_BSE_V, dd);
+       cancel_work_sync(&bsev_work);
+       cancel_work_sync(&bsea_work);
+       destroy_workqueue(bsev_wq);
+       destroy_workqueue(bsea_wq);
+       free_irq(dd->bsev.irq, dd);
+       free_irq(dd->bsea.irq, dd);
        spin_lock(&list_lock);
-       list_del(&dev_list);
+       list_del(&slot_list);
        spin_unlock(&list_lock);
 
        for (i = 0; i < ARRAY_SIZE(algs); i++)
                crypto_unregister_alg(&algs[i]);
 
-       dma_free_coherent(dev, SZ_512, dd->ivkey_base,
-               dd->ivkey_phys_base);
-       dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
-               dd->buf_in, dd->dma_buf_in);
-       dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
-               dd->buf_out, dd->dma_buf_out);
-       iounmap(dd->io_base);
-       clk_put(dd->iclk);
-       clk_put(dd->pclk);
+       bsea_free_iram(dd);
+       dma_free_coherent(dev, SZ_512, dd->bsev.ivkey_base,
+               dd->bsev.ivkey_phys_base);
+       dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, dd->bsev.buf_in,
+               dd->bsev.dma_buf_in);
+       dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, dd->bsea.buf_in,
+               dd->bsea.dma_buf_in);
+       dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, dd->bsev.buf_out,
+               dd->bsev.dma_buf_out);
+       dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, dd->bsea.buf_out,
+               dd->bsea.dma_buf_out);
+
+       iounmap(dd->bsev.io_base);
+       iounmap(dd->bsea.io_base);
+       clk_put(dd->bsev.iclk);
+       clk_put(dd->bsev.pclk);
+       clk_put(dd->bsea.pclk);
        kfree(dd->slots);
        kfree(dd);
        aes_dev = NULL;
@@ -1172,7 +1355,7 @@ static struct platform_driver tegra_aes_driver = {
 static int __init tegra_aes_mod_init(void)
 {
        mutex_init(&aes_lock);
-       INIT_LIST_HEAD(&dev_list);
+       INIT_LIST_HEAD(&slot_list);
        return  platform_driver_register(&tegra_aes_driver);
 }
 
index bb311bb..9c53fe4 100644 (file)
@@ -22,7 +22,7 @@
 #define ICMDQUE_WR             0x1000
 #define CMDQUE_CONTROL         0x1008
 #define INTR_STATUS            0x1018
-#define INT_ENB                0x1040
+#define INT_ENB                        0x1040
 #define CONFIG                 0x1044
 #define IRAM_ACCESS_CFG                0x10A0
 #define SECURE_DEST_ADDR       0x1100