* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <crypto/internal/rng.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
+#include <linux/pm_runtime.h>
+
#include "tegra-se.h"
-static const char sg_driver_name[] = "tegra-se";
+#define DRIVER_NAME "tegra-se"
+
+/* Security Engine operation modes */
+enum tegra_se_aes_op_mode {
+ SE_AES_OP_MODE_CBC, /* Cipher Block Chaining (CBC) mode */
+ SE_AES_OP_MODE_ECB, /* Electronic Codebook (ECB) mode */
+ SE_AES_OP_MODE_CTR, /* Counter (CTR) mode */
+ SE_AES_OP_MODE_OFB, /* Output feedback (CFB) mode */
+ SE_AES_OP_MODE_RNG_X931, /* Random number generator (RNG) mode */
+ SE_AES_OP_MODE_RNG_DRBG, /* Deterministic Random Bit Generator mode */
+ SE_AES_OP_MODE_CMAC, /* Cipher-based MAC (CMAC) mode */
+ SE_AES_OP_MODE_SHA1, /* Secure Hash Algorithm-1 (SHA1) mode */
+ SE_AES_OP_MODE_SHA224, /* Secure Hash Algorithm-224 (SHA224) mode */
+ SE_AES_OP_MODE_SHA256, /* Secure Hash Algorithm-256 (SHA256) mode */
+ SE_AES_OP_MODE_SHA384, /* Secure Hash Algorithm-384 (SHA384) mode */
+ SE_AES_OP_MODE_SHA512 /* Secure Hash Algorithm-512 (SHA512) mode */
+};
+
+/* Security Engine key table type */
+enum tegra_se_key_table_type {
+ SE_KEY_TABLE_TYPE_KEY, /* Key */
+ SE_KEY_TABLE_TYPE_ORGIV, /* Original IV */
+ SE_KEY_TABLE_TYPE_UPDTDIV /* Updated IV */
+};
+
+/* Security Engine request context */
+struct tegra_se_req_context {
+ enum tegra_se_aes_op_mode op_mode; /* Security Engine operation mode */
+ bool encrypt; /* Operation type */
+};
+
+struct tegra_se_chipdata {
+ bool cprng_supported;
+ bool drbg_supported;
+ bool rsa_supported;
+};
+
+struct tegra_se_dev {
+ struct device *dev;
+ void __iomem *io_reg; /* se device memory/io */
+ void __iomem *pmc_io_reg; /* pmc device memory/io */
+ int irq; /* irq allocated */
+ spinlock_t lock; /* spin lock */
+ struct clk *pclk; /* Security Engine clock */
+ struct crypto_queue queue; /* Security Engine crypto queue */
+ struct tegra_se_slot *slot_list; /* pointer to key slots */
+ struct tegra_se_rsa_slot *rsa_slot_list; /* rsa key slot pointer */
+ u64 ctr;
+ u32 *src_ll_buf; /* pointer to source linked list buffer */
+ dma_addr_t src_ll_buf_adr; /* Source linked list buffer dma address */
+ u32 src_ll_size; /* Size of source linked list buffer */
+ u32 *dst_ll_buf; /* pointer to destination linked list buffer */
+ dma_addr_t dst_ll_buf_adr; /* Destination linked list dma address */
+ u32 dst_ll_size; /* Size of destination linked list buffer */
+ u32 *ctx_save_buf; /* LP context buffer pointer*/
+ dma_addr_t ctx_save_buf_adr; /* LP context buffer dma address*/
+ struct completion complete; /* Tells the task completion */
+ bool work_q_busy; /* Work queue busy status */
+ struct tegra_se_chipdata *chipdata; /* chip specific data */
+};
static struct tegra_se_dev *sg_tegra_se_dev;
+/* Security Engine AES context */
+struct tegra_se_aes_context {
+ struct tegra_se_dev *se_dev; /* Security Engine device */
+ struct tegra_se_slot *slot; /* Security Engine key slot */
+ u32 keylen; /* key length in bits */
+ u32 op_mode; /* AES operation mode */
+};
+
+/* Security Engine random number generator context */
+struct tegra_se_rng_context {
+ struct tegra_se_dev *se_dev; /* Security Engine device */
+ struct tegra_se_slot *slot; /* Security Engine key slot */
+ u32 *dt_buf; /* Destination buffer pointer */
+ dma_addr_t dt_buf_adr; /* Destination buffer dma address */
+ u32 *rng_buf; /* RNG buffer pointer */
+ dma_addr_t rng_buf_adr; /* RNG buffer dma address */
+ bool use_org_iv; /* Tells whether original IV is be used
+ or not. If it is false updated IV is used*/
+};
+
+/* Security Engine SHA context */
+struct tegra_se_sha_context {
+ struct tegra_se_dev *se_dev; /* Security Engine device */
+ u32 op_mode; /* SHA operation mode */
+};
+
+/* Security Engine AES CMAC context */
+struct tegra_se_aes_cmac_context {
+ struct tegra_se_dev *se_dev; /* Security Engine device */
+ struct tegra_se_slot *slot; /* Security Engine key slot */
+ u32 keylen; /* key length in bits */
+ u8 K1[TEGRA_SE_KEY_128_SIZE]; /* Key1 */
+ u8 K2[TEGRA_SE_KEY_128_SIZE]; /* Key2 */
+ dma_addr_t dma_addr; /* DMA address of local buffer */
+ u32 buflen; /* local buffer length */
+ u8 *buffer; /* local buffer pointer */
+};
+
+/* Security Engine key slot */
+struct tegra_se_slot {
+ struct list_head node;
+ u8 slot_num; /* Key slot number */
+ bool available; /* Tells whether key slot is free to use */
+};
+
+static struct tegra_se_slot ssk_slot = {
+ .slot_num = 15,
+ .available = false,
+};
+
+static struct tegra_se_slot srk_slot = {
+ .slot_num = 0,
+ .available = false,
+};
+
+/* Security Engine Linked List */
+struct tegra_se_ll {
+ dma_addr_t addr; /* DMA buffer address */
+ u32 data_len; /* Data length in DMA buffer */
+};
+
static LIST_HEAD(key_slot);
+static LIST_HEAD(rsa_key_slot);
+static DEFINE_SPINLOCK(rsa_key_slot_lock);
+
+#define RSA_MIN_SIZE 64
+#define RSA_MAX_SIZE 256
+#define RNG_RESEED_INTERVAL 100
+
static DEFINE_SPINLOCK(key_slot_lock);
static DEFINE_MUTEX(se_hw_lock);
/* create a work for handling the async transfers */
static void tegra_se_work_handler(struct work_struct *work);
-
static DECLARE_WORK(se_work, tegra_se_work_handler);
static struct workqueue_struct *se_work_q;
-
+#define PMC_SCRATCH43_REG_OFFSET 0x22c
#define GET_MSB(x) ((x) >> (8*sizeof(x)-1))
static void tegra_se_leftshift_onebit(u8 *in_buf, u32 size, u8 *org_msb)
{
}
}
+extern unsigned long long tegra_chip_uid(void);
+
static inline void se_writel(struct tegra_se_dev *se_dev,
unsigned int val, unsigned int reg_offset)
{
static void tegra_se_free_key_slot(struct tegra_se_slot *slot)
{
- spin_lock(&key_slot_lock);
- slot->available = true;
- spin_unlock(&key_slot_lock);
+ if (slot) {
+ spin_lock(&key_slot_lock);
+ slot->available = true;
+ spin_unlock(&key_slot_lock);
+ }
}
static struct tegra_se_slot *tegra_se_alloc_key_slot(void)
spin_lock_init(&key_slot_lock);
spin_lock(&key_slot_lock);
for (i = 0; i < TEGRA_SE_KEYSLOT_COUNT; i++) {
+ /*
+ * Slot 0 and 15 are reserved and will not be added to the
+ * free slots pool. Slot 0 is used for SRK generation and
+ * Slot 15 is used for SSK operation
+ */
+ if ((i == srk_slot.slot_num) || (i == ssk_slot.slot_num))
+ continue;
se_dev->slot_list[i].available = true;
se_dev->slot_list[i].slot_num = i;
INIT_LIST_HEAD(&se_dev->slot_list[i].node);
return 0;
}
+static void tegra_se_key_read_disable(u8 slot_num)
+{
+ struct tegra_se_dev *se_dev = sg_tegra_se_dev;
+ u32 val;
+
+ val = se_readl(se_dev,
+ (SE_KEY_TABLE_ACCESS_REG_OFFSET + (slot_num * 4)));
+ val &= ~(1 << SE_KEY_READ_DISABLE_SHIFT);
+ se_writel(se_dev,
+ val, (SE_KEY_TABLE_ACCESS_REG_OFFSET + (slot_num * 4)));
+}
+
+static void tegra_se_key_read_disable_all(void)
+{
+ struct tegra_se_dev *se_dev = sg_tegra_se_dev;
+ u8 slot_num;
+
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ for (slot_num = 0; slot_num < TEGRA_SE_KEYSLOT_COUNT; slot_num++)
+ tegra_se_key_read_disable(slot_num);
+
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+}
+
static void tegra_se_config_algo(struct tegra_se_dev *se_dev,
enum tegra_se_aes_op_mode mode, bool encrypt, u32 key_len)
{
val |= SE_CONFIG_ENC_MODE(MODE_KEY192);
else
val |= SE_CONFIG_ENC_MODE(MODE_KEY128);
+ val |= SE_CONFIG_DEC_ALG(ALG_NOP);
} else {
val = SE_CONFIG_DEC_ALG(ALG_AES_DEC);
if (key_len == TEGRA_SE_KEY_256_SIZE)
val |= SE_CONFIG_DST(DST_MEMORY);
break;
case SE_AES_OP_MODE_RNG_X931:
+ case SE_AES_OP_MODE_RNG_DRBG:
val = SE_CONFIG_ENC_ALG(ALG_RNG) |
SE_CONFIG_ENC_MODE(MODE_KEY128) |
SE_CONFIG_DST(DST_MEMORY);
u8 pkt = 0, quad = 0;
u32 val = 0, i;
+ if ((type == SE_KEY_TABLE_TYPE_KEY) && (slot_num == ssk_slot.slot_num))
+ return;
+
if (type == SE_KEY_TABLE_TYPE_ORGIV)
quad = QUAD_ORG_IV;
else if (type == SE_KEY_TABLE_TYPE_UPDTDIV)
SE_CRYPTO_XOR_POS(XOR_BYPASS) |
SE_CRYPTO_CORE_SEL(CORE_ENCRYPT);
break;
+ case SE_AES_OP_MODE_RNG_DRBG:
+ val = SE_CRYPTO_INPUT_SEL(INPUT_RANDOM) |
+ SE_CRYPTO_XOR_POS(XOR_BYPASS) |
+ SE_CRYPTO_CORE_SEL(CORE_ENCRYPT)|
+ SE_CRYPTO_KEY_INDEX(slot_num);
+ break;
case SE_AES_OP_MODE_ECB:
if (encrypt) {
val = SE_CRYPTO_INPUT_SEL(INPUT_AHB) |
se_writel(se_dev, val, SE_CRYPTO_REG_OFFSET);
+ if (mode == SE_AES_OP_MODE_RNG_DRBG) {
+ se_writel(se_dev, SE_RNG_CONFIG_MODE(DRBG_MODE_FORCE_RESEED)|
+ SE_RNG_CONFIG_SRC(DRBG_SRC_LFSR), SE_RNG_CONFIG_REG_OFFSET);
+ se_writel(se_dev, RNG_RESEED_INTERVAL, SE_RNG_RESEED_INTERVAL_REG_OFFSET);
+ }
+
if (mode == SE_AES_OP_MODE_CTR)
se_writel(se_dev, 1, SE_SPARE_0_REG_OFFSET);
se_writel(se_dev, SHA_ENABLE, SE_SHA_CONFIG_REG_OFFSET);
}
-static int tegra_se_start_operation(struct tegra_se_dev *se_dev, u32 nbytes)
+static int tegra_se_start_operation(struct tegra_se_dev *se_dev, u32 nbytes,
+ bool context_save)
{
u32 nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE;
int ret = 0;
INIT_COMPLETION(se_dev->complete);
- se_writel(se_dev, SE_OPERATION(OP_SRART), SE_OPERATION_REG_OFFSET);
+ if (context_save)
+ se_writel(se_dev, SE_OPERATION(OP_CTX_SAVE),
+ SE_OPERATION_REG_OFFSET);
+ else
+ se_writel(se_dev, SE_OPERATION(OP_SRART),
+ SE_OPERATION_REG_OFFSET);
ret = wait_for_completion_timeout(&se_dev->complete,
msecs_to_jiffies(1000));
return 0;
}
-
static void tegra_se_read_hash_result(struct tegra_se_dev *se_dev,
u8 *pdata, u32 nbytes, bool swap32)
{
return 0;
do {
- total_bytes -= sl[i].length;
+ if (!sl->length)
+ return 0;
+ total_bytes -= min(sl->length, total_bytes);
i++;
- } while (total_bytes > 0);
+ sl = sg_next(sl);
+ } while (total_bytes && sl);
return i;
}
return 0;
}
-
static void tegra_se_free_ll_buf(struct tegra_se_dev *se_dev)
{
if (se_dev->src_ll_buf) {
}
}
-
static int tegra_se_setup_ablk_req(struct tegra_se_dev *se_dev,
struct ablkcipher_request *req)
{
WARN_ON(src_sg->length != dst_sg->length);
src_ll->addr = sg_dma_address(src_sg);
- src_ll->data_len = src_sg->length;
+ src_ll->data_len = min(src_sg->length, total);
dst_ll->addr = sg_dma_address(dst_sg);
- dst_ll->data_len = dst_sg->length;
+ dst_ll->data_len = min(dst_sg->length, total);
+ total -= min(src_sg->length, total);
- total -= src_sg->length;
src_sg = sg_next(src_sg);
dst_sg = sg_next(dst_sg);
dst_ll++;
return ret;
}
-
static void tegra_se_dequeue_complete_req(struct tegra_se_dev *se_dev,
struct ablkcipher_request *req)
{
while (total) {
dma_unmap_sg(se_dev->dev, dst_sg, 1, DMA_FROM_DEVICE);
dma_unmap_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE);
- total -= src_sg->length;
+ total -= min(src_sg->length, total);
src_sg = sg_next(src_sg);
dst_sg = sg_next(dst_sg);
}
aes_ctx->keylen);
tegra_se_config_crypto(se_dev, req_ctx->op_mode, req_ctx->encrypt,
aes_ctx->slot->slot_num, req->info ? true : false);
- ret = tegra_se_start_operation(se_dev, req->nbytes);
+ ret = tegra_se_start_operation(se_dev, req->nbytes, false);
tegra_se_dequeue_complete_req(se_dev, req);
mutex_unlock(&se_hw_lock);
req->base.complete(&req->base, ret);
}
-
static irqreturn_t tegra_se_irq(int irq, void *dev)
{
struct tegra_se_dev *se_dev = dev;
struct crypto_async_request *async_req = NULL;
struct crypto_async_request *backlog = NULL;
- clk_enable(se_dev->pclk);
+ pm_runtime_get_sync(se_dev->dev);
do {
spin_lock_irq(&se_dev->lock);
async_req = NULL;
}
} while (se_dev->work_q_busy);
- clk_disable(se_dev->pclk);
+ pm_runtime_put(se_dev->dev);
}
static int tegra_se_aes_queue_req(struct ablkcipher_request *req)
bool idle = true;
int err = 0;
+ if (!tegra_se_count_sgs(req->src, req->nbytes))
+ return -EINVAL;
+
spin_lock_irqsave(&se_dev->lock, flags);
err = ablkcipher_enqueue_request(&se_dev->queue, req);
if (se_dev->work_q_busy)
return tegra_se_aes_queue_req(req);
}
-
static int tegra_se_aes_setkey(struct crypto_ablkcipher *tfm,
const u8 *key, u32 keylen)
{
struct tegra_se_aes_context *ctx = crypto_ablkcipher_ctx(tfm);
struct tegra_se_dev *se_dev = ctx->se_dev;
+ struct tegra_se_slot *pslot;
u8 *pdata = (u8 *)key;
if (!ctx) {
return -EINVAL;
}
- if (!key) {
- dev_err(se_dev->dev, "invalid argument key");
+ if ((keylen != TEGRA_SE_KEY_128_SIZE) &&
+ (keylen != TEGRA_SE_KEY_192_SIZE) &&
+ (keylen != TEGRA_SE_KEY_256_SIZE)) {
+ dev_err(se_dev->dev, "invalid key size");
return -EINVAL;
}
- ctx->keylen = keylen;
+ if (key) {
+ if (!ctx->slot || (ctx->slot &&
+ ctx->slot->slot_num == ssk_slot.slot_num)) {
+ pslot = tegra_se_alloc_key_slot();
+ if (!pslot) {
+ dev_err(se_dev->dev, "no free key slot\n");
+ return -ENOMEM;
+ }
+ ctx->slot = pslot;
+ }
+ ctx->keylen = keylen;
+ } else {
+ tegra_se_free_key_slot(ctx->slot);
+ ctx->slot = &ssk_slot;
+ ctx->keylen = AES_KEYSIZE_128;
+ }
/* take access to the hw */
mutex_lock(&se_hw_lock);
- clk_enable(se_dev->pclk);
+ pm_runtime_get_sync(se_dev->dev);
/* load the key */
tegra_se_write_key_table(pdata, keylen, ctx->slot->slot_num,
SE_KEY_TABLE_TYPE_KEY);
- clk_disable(se_dev->pclk);
+ pm_runtime_put(se_dev->dev);
mutex_unlock(&se_hw_lock);
return 0;
}
-
static int tegra_se_aes_cra_init(struct crypto_tfm *tfm)
{
- struct tegra_se_dev *se_dev = sg_tegra_se_dev;
struct tegra_se_aes_context *ctx = crypto_tfm_ctx(tfm);
- struct tegra_se_slot *pslot;
-
- if (!ctx->slot) {
- pslot = tegra_se_alloc_key_slot();
- if (!pslot) {
- dev_err(se_dev->dev, "no free key slot\n");
- return -ENOMEM;
- }
- ctx->slot = pslot;
- }
ctx->se_dev = sg_tegra_se_dev;
tfm->crt_ablkcipher.reqsize = sizeof(struct tegra_se_req_context);
return 0;
}
+
static void tegra_se_aes_cra_exit(struct crypto_tfm *tfm)
{
struct tegra_se_aes_context *ctx = crypto_tfm_ctx(tfm);
ctx->slot = NULL;
}
-
static int tegra_se_rng_init(struct crypto_tfm *tfm)
{
struct tegra_se_rng_context *rng_ctx = crypto_tfm_ctx(tfm);
struct tegra_se_dev *se_dev = rng_ctx->se_dev;
struct tegra_se_ll *src_ll, *dst_ll;
unsigned char *dt_buf = (unsigned char *)rng_ctx->dt_buf;
- int ret = 0, i;
+ u8 *rdata_addr;
+ int ret = 0, i, j, num_blocks, data_len = 0;
- if (dlen > TEGRA_SE_RNG_DT_SIZE)
- return -EINVAL;
+ num_blocks = (dlen / TEGRA_SE_RNG_DT_SIZE);
+
+ data_len = (dlen % TEGRA_SE_RNG_DT_SIZE);
+ if (data_len == 0)
+ num_blocks = num_blocks - 1;
/* take access to the hw */
mutex_lock(&se_hw_lock);
- clk_enable(se_dev->pclk);
+ pm_runtime_get_sync(se_dev->dev);
*se_dev->src_ll_buf = 0;
*se_dev->dst_ll_buf = 0;
src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1);
dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1);
src_ll->addr = rng_ctx->dt_buf_adr;
- src_ll->data_len = dlen;
+ src_ll->data_len = TEGRA_SE_RNG_DT_SIZE;
dst_ll->addr = rng_ctx->rng_buf_adr;
- dst_ll->data_len = dlen;
+ dst_ll->data_len = TEGRA_SE_RNG_DT_SIZE;
tegra_se_config_algo(se_dev, SE_AES_OP_MODE_RNG_X931, true,
TEGRA_SE_KEY_128_SIZE);
tegra_se_config_crypto(se_dev, SE_AES_OP_MODE_RNG_X931, true,
rng_ctx->slot->slot_num, rng_ctx->use_org_iv);
- ret = tegra_se_start_operation(se_dev, dlen);
-
- if (!ret) {
- memcpy(rdata, rng_ctx->rng_buf, dlen);
- /* update DT vector */
- for (i = TEGRA_SE_RNG_DT_SIZE - 1; i >= 0; i--) {
- dt_buf[i] += 1;
- if (dt_buf[i] != 0)
- break;
+ for (j = 0; j <= num_blocks; j++) {
+ ret = tegra_se_start_operation(se_dev,
+ TEGRA_SE_RNG_DT_SIZE, false);
+
+ if (!ret) {
+ rdata_addr = (rdata + (j * TEGRA_SE_RNG_DT_SIZE));
+
+ if (data_len && num_blocks == j) {
+ memcpy(rdata_addr, rng_ctx->rng_buf, data_len);
+ } else {
+ memcpy(rdata_addr,
+ rng_ctx->rng_buf, TEGRA_SE_RNG_DT_SIZE);
+ }
+
+ /* update DT vector */
+ for (i = TEGRA_SE_RNG_DT_SIZE - 1; i >= 0; i--) {
+ dt_buf[i] += 1;
+ if (dt_buf[i] != 0)
+ break;
+ }
+ } else {
+ dlen = 0;
+ }
+ if (rng_ctx->use_org_iv) {
+ rng_ctx->use_org_iv = false;
+ tegra_se_config_crypto(se_dev,
+ SE_AES_OP_MODE_RNG_X931, true,
+ rng_ctx->slot->slot_num, rng_ctx->use_org_iv);
}
- } else {
- dlen = 0;
}
- rng_ctx->use_org_iv = false;
- clk_disable(se_dev->pclk);
+ pm_runtime_put(se_dev->dev);
mutex_unlock(&se_hw_lock);
return dlen;
u8 *iv = seed;
u8 *key = (u8 *)(seed + TEGRA_SE_RNG_IV_SIZE);
u8 *dt = key + TEGRA_SE_RNG_KEY_SIZE;
+ struct timespec ts;
+ u64 nsec, tmp[2];
BUG_ON(!seed);
- if (slen < TEGRA_SE_RNG_SEED_SIZE)
- return -EINVAL;
-
/* take access to the hw */
mutex_lock(&se_hw_lock);
- clk_enable(se_dev->pclk);
+ pm_runtime_get_sync(se_dev->dev);
tegra_se_write_key_table(key, TEGRA_SE_RNG_KEY_SIZE,
rng_ctx->slot->slot_num, SE_KEY_TABLE_TYPE_KEY);
tegra_se_write_key_table(iv, TEGRA_SE_RNG_IV_SIZE,
rng_ctx->slot->slot_num, SE_KEY_TABLE_TYPE_ORGIV);
- clk_disable(se_dev->pclk);
+ pm_runtime_put(se_dev->dev);
mutex_unlock(&se_hw_lock);
- memcpy(rng_ctx->dt_buf, dt, TEGRA_SE_RNG_DT_SIZE);
+ if (slen < TEGRA_SE_RNG_SEED_SIZE) {
+ getnstimeofday(&ts);
+ nsec = timespec_to_ns(&ts);
+ do_div(nsec, 1000);
+ nsec ^= se_dev->ctr << 56;
+ se_dev->ctr++;
+ tmp[0] = nsec;
+ tmp[1] = tegra_chip_uid();
+ memcpy(rng_ctx->dt_buf, (u8 *)tmp, TEGRA_SE_RNG_DT_SIZE);
+ } else {
+ memcpy(rng_ctx->dt_buf, dt, TEGRA_SE_RNG_DT_SIZE);
+ }
+
rng_ctx->use_org_iv = true;
return 0;
}
+static int tegra_se_rng_drbg_init(struct crypto_tfm *tfm)
+{
+ struct tegra_se_rng_context *rng_ctx = crypto_tfm_ctx(tfm);
+ struct tegra_se_dev *se_dev = sg_tegra_se_dev;
+
+ rng_ctx->se_dev = se_dev;
+ rng_ctx->dt_buf = dma_alloc_coherent(se_dev->dev, TEGRA_SE_RNG_DT_SIZE,
+ &rng_ctx->dt_buf_adr, GFP_KERNEL);
+ if (!rng_ctx->dt_buf) {
+ dev_err(se_dev->dev, "can not allocate rng dma buffer");
+ return -ENOMEM;
+ }
+
+ rng_ctx->rng_buf = dma_alloc_coherent(rng_ctx->se_dev->dev,
+ TEGRA_SE_RNG_DT_SIZE, &rng_ctx->rng_buf_adr, GFP_KERNEL);
+ if (!rng_ctx->rng_buf) {
+ dev_err(se_dev->dev, "can not allocate rng dma buffer");
+ dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE,
+ rng_ctx->dt_buf, rng_ctx->dt_buf_adr);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int tegra_se_rng_drbg_get_random(struct crypto_rng *tfm, u8 *rdata, u32 dlen)
+{
+ struct tegra_se_rng_context *rng_ctx = crypto_rng_ctx(tfm);
+ struct tegra_se_dev *se_dev = rng_ctx->se_dev;
+ struct tegra_se_ll *src_ll, *dst_ll;
+ u8 *rdata_addr;
+ int ret = 0, j, num_blocks, data_len = 0;
+
+ num_blocks = (dlen / TEGRA_SE_RNG_DT_SIZE);
+
+ data_len = (dlen % TEGRA_SE_RNG_DT_SIZE);
+ if (data_len == 0)
+ num_blocks = num_blocks - 1;
+
+ /* take access to the hw */
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ *se_dev->src_ll_buf = 0;
+ *se_dev->dst_ll_buf = 0;
+ src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1);
+ dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1);
+ src_ll->addr = rng_ctx->dt_buf_adr;
+ src_ll->data_len = TEGRA_SE_RNG_DT_SIZE;
+ dst_ll->addr = rng_ctx->rng_buf_adr;
+ dst_ll->data_len = TEGRA_SE_RNG_DT_SIZE;
+
+ tegra_se_config_algo(se_dev, SE_AES_OP_MODE_RNG_DRBG, true,
+ TEGRA_SE_KEY_128_SIZE);
+ tegra_se_config_crypto(se_dev, SE_AES_OP_MODE_RNG_DRBG, true,
+ 0, true);
+ for (j = 0; j <= num_blocks; j++) {
+ ret = tegra_se_start_operation(se_dev,
+ TEGRA_SE_RNG_DT_SIZE, false);
+
+ if (!ret) {
+ rdata_addr = (rdata + (j * TEGRA_SE_RNG_DT_SIZE));
+
+ if (data_len && num_blocks == j) {
+ memcpy(rdata_addr, rng_ctx->rng_buf, data_len);
+ } else {
+ memcpy(rdata_addr,
+ rng_ctx->rng_buf, TEGRA_SE_RNG_DT_SIZE);
+ }
+ } else {
+ dlen = 0;
+ }
+ }
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ return dlen;
+}
+
+static int tegra_se_rng_drbg_reset(struct crypto_rng *tfm, u8 *seed, u32 slen)
+{
+ return 0;
+}
+
+static void tegra_se_rng_drbg_exit(struct crypto_tfm *tfm)
+{
+ struct tegra_se_rng_context *rng_ctx = crypto_tfm_ctx(tfm);
+
+ if (rng_ctx->dt_buf) {
+ dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE,
+ rng_ctx->dt_buf, rng_ctx->dt_buf_adr);
+ }
+
+ if (rng_ctx->rng_buf) {
+ dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE,
+ rng_ctx->rng_buf, rng_ctx->rng_buf_adr);
+ }
+ rng_ctx->se_dev = NULL;
+}
int tegra_se_sha_init(struct ahash_request *req)
{
return 0;
}
-
int tegra_se_sha_final(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
u32 total, num_sgs;
int err = 0;
+ if (!req->nbytes)
+ return -EINVAL;
+
if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
sha_ctx->op_mode = SE_AES_OP_MODE_SHA1;
/* take access to the hw */
mutex_lock(&se_hw_lock);
- clk_enable(se_dev->pclk);
+ pm_runtime_get_sync(se_dev->dev);
num_sgs = tegra_se_count_sgs(req->src, req->nbytes);
if ((num_sgs > SE_MAX_SRC_SG_COUNT)) {
dev_err(se_dev->dev, "num of SG buffers are more\n");
- clk_disable(se_dev->pclk);
+ pm_runtime_put(se_dev->dev);
mutex_unlock(&se_hw_lock);
return -EINVAL;
}
err = dma_map_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE);
if (!err) {
dev_err(se_dev->dev, "dma_map_sg() error\n");
- clk_disable(se_dev->pclk);
+ pm_runtime_put(se_dev->dev);
mutex_unlock(&se_hw_lock);
return -EINVAL;
}
tegra_se_config_algo(se_dev, sha_ctx->op_mode, false, 0);
tegra_se_config_sha(se_dev, req->nbytes);
- err = tegra_se_start_operation(se_dev, 0);
+ err = tegra_se_start_operation(se_dev, 0, false);
if (!err) {
tegra_se_read_hash_result(se_dev, req->result,
crypto_ahash_digestsize(tfm), true);
total -= src_sg->length;
src_sg = sg_next(src_sg);
}
- clk_disable(se_dev->pclk);
+ pm_runtime_put(se_dev->dev);
mutex_unlock(&se_hw_lock);
return err;
int tegra_se_sha_cra_init(struct crypto_tfm *tfm)
{
- int err = 0;
-
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct tegra_se_sha_context));
- return err;
+ return 0;
}
void tegra_se_sha_cra_exit(struct crypto_tfm *tfm)
{
+ /* do nothing */
}
-
int tegra_se_aes_cmac_init(struct ahash_request *req)
{
u8 *temp_buffer = NULL;
bool use_orig_iv = true;
-
/* take access to the hw */
mutex_lock(&se_hw_lock);
- clk_enable(se_dev->pclk);
-
+ pm_runtime_get_sync(se_dev->dev);
blocks_to_process = req->nbytes / TEGRA_SE_AES_BLOCK_SIZE;
/* num of bytes less than block size */
tegra_se_config_crypto(se_dev, SE_AES_OP_MODE_CMAC, true,
cmac_ctx->slot->slot_num, true);
tegra_se_start_operation(se_dev,
- blocks_to_process * TEGRA_SE_AES_BLOCK_SIZE);
+ blocks_to_process * TEGRA_SE_AES_BLOCK_SIZE, false);
src_sg = req->src;
while (mapped_sg_count--) {
dma_unmap_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE);
cmac_ctx->keylen);
tegra_se_config_crypto(se_dev, SE_AES_OP_MODE_CMAC, true,
cmac_ctx->slot->slot_num, use_orig_iv);
- tegra_se_start_operation(se_dev, TEGRA_SE_AES_BLOCK_SIZE);
+ tegra_se_start_operation(se_dev, TEGRA_SE_AES_BLOCK_SIZE, false);
tegra_se_read_hash_result(se_dev, req->result,
TEGRA_SE_AES_CMAC_DIGEST_SIZE, false);
out:
- clk_disable(se_dev->pclk);
+ pm_runtime_put(se_dev->dev);
mutex_unlock(&se_hw_lock);
if (cmac_ctx->buffer)
struct tegra_se_aes_cmac_context *ctx = crypto_ahash_ctx(tfm);
struct tegra_se_dev *se_dev = sg_tegra_se_dev;
struct tegra_se_ll *src_ll, *dst_ll;
+ struct tegra_se_slot *pslot;
u8 piv[TEGRA_SE_AES_IV_SIZE];
u32 *pbuf;
dma_addr_t pbuf_adr;
return -EINVAL;
}
- if (!key) {
- dev_err(se_dev->dev, "invalid argument key");
+ if ((keylen != TEGRA_SE_KEY_128_SIZE) &&
+ (keylen != TEGRA_SE_KEY_192_SIZE) &&
+ (keylen != TEGRA_SE_KEY_256_SIZE)) {
+ dev_err(se_dev->dev, "invalid key size");
return -EINVAL;
}
- ctx->keylen = keylen;
+ if (key) {
+ if (!ctx->slot || (ctx->slot &&
+ ctx->slot->slot_num == ssk_slot.slot_num)) {
+ pslot = tegra_se_alloc_key_slot();
+ if (!pslot) {
+ dev_err(se_dev->dev, "no free key slot\n");
+ return -ENOMEM;
+ }
+ ctx->slot = pslot;
+ }
+ ctx->keylen = keylen;
+ } else {
+ tegra_se_free_key_slot(ctx->slot);
+ ctx->slot = &ssk_slot;
+ ctx->keylen = AES_KEYSIZE_128;
+ }
pbuf = dma_alloc_coherent(se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE,
&pbuf_adr, GFP_KERNEL);
/* take access to the hw */
mutex_lock(&se_hw_lock);
- clk_enable(se_dev->pclk);
+ pm_runtime_get_sync(se_dev->dev);
*se_dev->src_ll_buf = 0;
*se_dev->dst_ll_buf = 0;
tegra_se_config_crypto(se_dev, SE_AES_OP_MODE_CBC, true,
ctx->slot->slot_num, true);
- ret = tegra_se_start_operation(se_dev, TEGRA_SE_AES_BLOCK_SIZE);
+ ret = tegra_se_start_operation(se_dev, TEGRA_SE_AES_BLOCK_SIZE, false);
if (ret) {
dev_err(se_dev->dev, "tegra_se_aes_cmac_setkey:: start op failed\n");
goto out;
ctx->K2[TEGRA_SE_AES_BLOCK_SIZE - 1] ^= rb;
out:
- clk_disable(se_dev->pclk);
+ pm_runtime_put(se_dev->dev);
mutex_unlock(&se_hw_lock);
if (pbuf) {
pbuf, pbuf_adr);
}
-
return 0;
}
int tegra_se_aes_cmac_cra_init(struct crypto_tfm *tfm)
{
- struct tegra_se_dev *se_dev = sg_tegra_se_dev;
- struct tegra_se_aes_cmac_context *ctx = crypto_tfm_ctx(tfm);
- struct tegra_se_slot *pslot;
-
- if (!ctx->slot) {
- pslot = tegra_se_alloc_key_slot();
- if (!pslot) {
- dev_err(se_dev->dev, "no free key slot\n");
- return -ENOMEM;
- }
- ctx->slot = pslot;
- }
-
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct tegra_se_aes_cmac_context));
ctx->slot = NULL;
}
+/* Security Engine rsa key slot */
+struct tegra_se_rsa_slot {
+ struct list_head node;
+ u8 slot_num; /* Key slot number */
+ bool available; /* Tells whether key slot is free to use */
+};
-static struct crypto_alg aes_algs[] = {
- {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "tegra-se-aes-cbc",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct tegra_se_aes_context),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = tegra_se_aes_cra_init,
- .cra_exit = tegra_se_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE,
- .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE,
- .ivsize = TEGRA_SE_AES_IV_SIZE,
- .setkey = tegra_se_aes_setkey,
- .encrypt = tegra_se_aes_cbc_encrypt,
- .decrypt = tegra_se_aes_cbc_decrypt,
- }
- }, {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "tegra-se-aes-ecb",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct tegra_se_aes_context),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = tegra_se_aes_cra_init,
- .cra_exit = tegra_se_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE,
- .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE,
- .ivsize = TEGRA_SE_AES_IV_SIZE,
- .setkey = tegra_se_aes_setkey,
- .encrypt = tegra_se_aes_ecb_encrypt,
- .decrypt = tegra_se_aes_ecb_decrypt,
+
+/* Security Engine AES RSA context */
+struct tegra_se_aes_rsa_context {
+ struct tegra_se_dev *se_dev; /* Security Engine device */
+ struct tegra_se_rsa_slot *slot; /* Security Engine rsa key slot */
+ u32 keylen; /* key length in bits */
+};
+
+static void tegra_se_rsa_free_key_slot(struct tegra_se_rsa_slot *slot)
+{
+ if (slot) {
+ spin_lock(&rsa_key_slot_lock);
+ slot->available = true;
+ spin_unlock(&rsa_key_slot_lock);
+ }
+}
+
+static struct tegra_se_rsa_slot *tegra_se_alloc_rsa_key_slot(void)
+{
+ struct tegra_se_rsa_slot *slot = NULL;
+ bool found = false;
+
+ spin_lock(&rsa_key_slot_lock);
+ list_for_each_entry(slot, &key_slot, node) {
+ if (slot->available) {
+ slot->available = false;
+ found = true;
+ break;
}
- }, {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "tegra-se-aes-ctr",
+ }
+ spin_unlock(&rsa_key_slot_lock);
+ return found ? slot : NULL;
+}
+
+static int tegra_init_rsa_key_slot(struct tegra_se_dev *se_dev)
+{
+ int i;
+
+ se_dev->rsa_slot_list = kzalloc(sizeof(struct tegra_se_rsa_slot) *
+ TEGRA_SE_RSA_KEYSLOT_COUNT, GFP_KERNEL);
+ if (se_dev->rsa_slot_list == NULL) {
+ dev_err(se_dev->dev, "rsa slot list memory allocation failed\n");
+ return -ENOMEM;
+ }
+ spin_lock_init(&rsa_key_slot_lock);
+ spin_lock(&rsa_key_slot_lock);
+ for (i = 0; i < TEGRA_SE_RSA_KEYSLOT_COUNT; i++) {
+ se_dev->rsa_slot_list[i].available = true;
+ se_dev->rsa_slot_list[i].slot_num = i;
+ INIT_LIST_HEAD(&se_dev->rsa_slot_list[i].node);
+ list_add_tail(&se_dev->rsa_slot_list[i].node, &key_slot);
+ }
+ spin_unlock(&rsa_key_slot_lock);
+
+ return 0;
+}
+
+int tegra_se_rsa_init(struct ahash_request *req)
+{
+ return 0;
+}
+
+int tegra_se_rsa_update(struct ahash_request *req)
+{
+ return 0;
+}
+
+int tegra_se_rsa_final(struct ahash_request *req)
+{
+ return 0;
+}
+
+int tegra_se_rsa_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct tegra_se_aes_rsa_context *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se_dev *se_dev = sg_tegra_se_dev;
+ u32 module_key_length = 0;
+ u32 exponent_key_length = 0;
+ u32 pkt, val;
+ u32 key_size_words;
+ u32 key_word_size = 4;
+ u32 *pkeydata = (u32 *)key;
+ s32 i = 0;
+ struct tegra_se_rsa_slot *pslot;
+
+ if (!ctx || !key)
+ return -EINVAL;
+
+ /* Allocate rsa key slot */
+ pslot = tegra_se_alloc_rsa_key_slot();
+ if (!pslot) {
+ dev_err(se_dev->dev, "no free key slot\n");
+ return -ENOMEM;
+ }
+ ctx->slot = pslot;
+ ctx->keylen = keylen;
+
+ module_key_length = (keylen >> 16);
+ exponent_key_length = (keylen & (0xFFFF));
+
+ if (!(((module_key_length / 64) >= 1) &&
+ ((module_key_length / 64) <= 4)))
+ return -EINVAL;
+
+ /* take access to the hw */
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ /* Write key length */
+ se_writel(se_dev, ((module_key_length / 64) - 1),
+ SE_RSA_KEY_SIZE_REG_OFFSET);
+
+ /* Write exponent size in 32 bytes */
+ se_writel(se_dev, (exponent_key_length / 4),
+ SE_RSA_EXP_SIZE_REG_OFFSET);
+
+ if (exponent_key_length) {
+ key_size_words = (exponent_key_length / key_word_size);
+ /* Write exponent */
+ for (i = (key_size_words - 1); i >= 0; i--) {
+ se_writel(se_dev, *pkeydata++, SE_RSA_KEYTABLE_DATA);
+ pkt = RSA_KEY_INPUT_MODE(RSA_KEY_INPUT_MODE_REG) |
+ RSA_KEY_NUM(ctx->slot->slot_num) |
+ RSA_KEY_TYPE(RSA_KEY_TYPE_EXP) |
+ RSA_KEY_PKT_WORD_ADDR(i);
+ val = SE_RSA_KEY_OP(RSA_KEY_WRITE) |
+ SE_RSA_KEYTABLE_PKT(pkt);
+
+ se_writel(se_dev, val, SE_RSA_KEYTABLE_ADDR);
+ }
+ }
+
+ if (module_key_length) {
+ key_size_words = (module_key_length / key_word_size);
+ /* Write moduleus */
+ for (i = (key_size_words - 1); i >= 0; i--) {
+ se_writel(se_dev, *pkeydata++, SE_RSA_KEYTABLE_DATA);
+ pkt = RSA_KEY_INPUT_MODE(RSA_KEY_INPUT_MODE_REG) |
+ RSA_KEY_NUM(ctx->slot->slot_num) |
+ RSA_KEY_TYPE(RSA_KEY_TYPE_MOD) |
+ RSA_KEY_PKT_WORD_ADDR(i);
+ val = SE_RSA_KEY_OP(RSA_KEY_WRITE) |
+ SE_RSA_KEYTABLE_PKT(pkt);
+
+ se_writel(se_dev, val, SE_RSA_KEYTABLE_ADDR);
+ }
+ }
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+ return 0;
+}
+
+static void tegra_se_read_rsa_result(struct tegra_se_dev *se_dev,
+ u8 *pdata, unsigned int nbytes)
+{
+ u32 *result = (u32 *)pdata;
+ u32 i;
+
+ for (i = 0; i < nbytes / 4; i++)
+ result[i] = se_readl(se_dev, SE_RSA_OUTPUT +
+ (i * sizeof(u32)));
+}
+
+int tegra_se_rsa_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_se_aes_rsa_context *rsa_ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se_dev *se_dev = sg_tegra_se_dev;
+ struct scatterlist *src_sg;
+ struct tegra_se_ll *src_ll;
+ u32 num_sgs;
+ int total, ret = 0;
+ u32 val = 0;
+
+ if (!req)
+ return -EINVAL;
+
+ if (!req->nbytes)
+ return -EINVAL;
+
+ if ((req->nbytes < TEGRA_SE_RSA512_DIGEST_SIZE) ||
+ (req->nbytes > TEGRA_SE_RSA2048_DIGEST_SIZE))
+ return -EINVAL;
+
+ num_sgs = tegra_se_count_sgs(req->src, req->nbytes);
+ if (num_sgs > SE_MAX_SRC_SG_COUNT) {
+ dev_err(se_dev->dev, "num of SG buffers are more\n");
+ return -EINVAL;
+ }
+
+ *se_dev->src_ll_buf = num_sgs - 1;
+ src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1);
+ src_sg = req->src;
+ total = req->nbytes;
+
+ while (total > 0) {
+ ret = dma_map_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE);
+ if (!ret) {
+ dev_err(se_dev->dev, "dma_map_sg() error\n");
+ goto out;
+ }
+ src_ll->addr = sg_dma_address(src_sg);
+ src_ll->data_len = src_sg->length;
+
+ total -= src_sg->length;
+ if (total > 0) {
+ src_sg = sg_next(src_sg);
+ src_ll++;
+ }
+ WARN_ON(((total != 0) && (!src_sg)));
+ }
+
+ /* take access to the hw */
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ val = SE_CONFIG_ENC_ALG(ALG_RSA) |
+ SE_CONFIG_DEC_ALG(ALG_NOP) |
+ SE_CONFIG_DST(DST_RSAREG);
+ se_writel(se_dev, val, SE_CONFIG_REG_OFFSET);
+ se_writel(se_dev, RSA_KEY_SLOT(rsa_ctx->slot->slot_num), SE_RSA_CONFIG);
+ se_writel(se_dev, SE_CRYPTO_INPUT_SEL(INPUT_AHB), SE_CRYPTO_REG_OFFSET);
+
+ ret = tegra_se_start_operation(se_dev, 256, false);
+ if (ret) {
+ dev_err(se_dev->dev, "tegra_se_aes_rsa_digest:: start op failed\n");
+ pm_runtime_put_sync(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+ goto out;
+ }
+
+ tegra_se_read_rsa_result(se_dev, req->result, req->nbytes);
+
+ pm_runtime_put_sync(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+out:
+ return ret;
+}
+
+int tegra_se_rsa_finup(struct ahash_request *req)
+{
+ return 0;
+}
+
+int tegra_se_rsa_cra_init(struct crypto_tfm *tfm)
+{
+ return 0;
+}
+
+void tegra_se_rsa_cra_exit(struct crypto_tfm *tfm)
+{
+ struct tegra_se_aes_rsa_context *ctx = crypto_tfm_ctx(tfm);
+
+ tegra_se_rsa_free_key_slot(ctx->slot);
+ ctx->slot = NULL;
+}
+
+static struct crypto_alg aes_algs[] = {
+ {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-tegra",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_se_aes_context),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_aes_cra_init,
+ .cra_exit = tegra_se_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE,
+ .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE,
+ .ivsize = TEGRA_SE_AES_IV_SIZE,
+ .setkey = tegra_se_aes_setkey,
+ .encrypt = tegra_se_aes_cbc_encrypt,
+ .decrypt = tegra_se_aes_cbc_decrypt,
+ }
+ }, {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-tegra",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_se_aes_context),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_aes_cra_init,
+ .cra_exit = tegra_se_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE,
+ .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE,
+ .ivsize = TEGRA_SE_AES_IV_SIZE,
+ .setkey = tegra_se_aes_setkey,
+ .encrypt = tegra_se_aes_ecb_encrypt,
+ .decrypt = tegra_se_aes_ecb_decrypt,
+ }
+ }, {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-tegra",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE,
}
}, {
.cra_name = "ofb(aes)",
- .cra_driver_name = "tegra-se-aes-ofb",
+ .cra_driver_name = "ofb-aes-tegra",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE,
}
}, {
.cra_name = "ansi_cprng",
- .cra_driver_name = "tegra_ansi_cprng",
+ .cra_driver_name = "rng-aes-tegra",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_RNG,
.cra_ctxsize = sizeof(struct tegra_se_rng_context),
.seedsize = TEGRA_SE_RNG_SEED_SIZE,
}
}
+ }, {
+ .cra_name = "rng_drbg",
+ .cra_driver_name = "rng_drbg-aes-tegra",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_RNG,
+ .cra_ctxsize = sizeof(struct tegra_se_rng_context),
+ .cra_type = &crypto_rng_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_rng_drbg_init,
+ .cra_exit = tegra_se_rng_drbg_exit,
+ .cra_u = {
+ .rng = {
+ .rng_make_random = tegra_se_rng_drbg_get_random,
+ .rng_reset = tegra_se_rng_drbg_reset,
+ .seedsize = TEGRA_SE_RNG_SEED_SIZE,
+ }
+ }
}
};
.cra_init = tegra_se_sha_cra_init,
.cra_exit = tegra_se_sha_cra_exit,
}
+ }, {
+ .init = tegra_se_rsa_init,
+ .update = tegra_se_rsa_update,
+ .final = tegra_se_rsa_final,
+ .finup = tegra_se_rsa_finup,
+ .digest = tegra_se_rsa_digest,
+ .setkey = tegra_se_rsa_setkey,
+ .halg.digestsize = TEGRA_SE_RSA512_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "rsa512",
+ .cra_driver_name = "tegra-se-rsa512",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = TEGRA_SE_RSA512_DIGEST_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_se_aes_cmac_context),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_rsa_cra_init,
+ .cra_exit = tegra_se_rsa_cra_exit,
+ }
+ }, {
+ .init = tegra_se_rsa_init,
+ .update = tegra_se_rsa_update,
+ .final = tegra_se_rsa_final,
+ .finup = tegra_se_rsa_finup,
+ .digest = tegra_se_rsa_digest,
+ .setkey = tegra_se_rsa_setkey,
+ .halg.digestsize = TEGRA_SE_RSA1024_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "rsa1024",
+ .cra_driver_name = "tegra-se-rsa1024",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = TEGRA_SE_RSA1024_DIGEST_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_se_aes_cmac_context),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_rsa_cra_init,
+ .cra_exit = tegra_se_rsa_cra_exit,
+ }
+ }, {
+ .init = tegra_se_rsa_init,
+ .update = tegra_se_rsa_update,
+ .final = tegra_se_rsa_final,
+ .finup = tegra_se_rsa_finup,
+ .digest = tegra_se_rsa_digest,
+ .setkey = tegra_se_rsa_setkey,
+ .halg.digestsize = TEGRA_SE_RSA1536_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "rsa1536",
+ .cra_driver_name = "tegra-se-rsa1536",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = TEGRA_SE_RSA1536_DIGEST_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_se_aes_cmac_context),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_rsa_cra_init,
+ .cra_exit = tegra_se_rsa_cra_exit,
+ }
+ }, {
+ .init = tegra_se_rsa_init,
+ .update = tegra_se_rsa_update,
+ .final = tegra_se_rsa_final,
+ .finup = tegra_se_rsa_finup,
+ .digest = tegra_se_rsa_digest,
+ .setkey = tegra_se_rsa_setkey,
+ .halg.digestsize = TEGRA_SE_RSA2048_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "rsa2048",
+ .cra_driver_name = "tegra-se-rsa2048",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = TEGRA_SE_RSA2048_DIGEST_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_se_aes_cmac_context),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_rsa_cra_init,
+ .cra_exit = tegra_se_rsa_cra_exit,
+ }
}
};
+bool isAlgoSupported(struct tegra_se_dev *se_dev, const char *algo)
+{
+ if (!strcmp(algo, "ansi_cprng")) {
+ if (se_dev->chipdata->cprng_supported)
+ return true;
+ else
+ return false;
+ }
+
+ if (!strcmp(algo, "drbg")) {
+ if (se_dev->chipdata->drbg_supported)
+ return true;
+ else
+ return false;
+ }
+
+ if (!strcmp(algo, "rsa512") || !strcmp(algo, "rsa1024") ||
+ !strcmp(algo, "rsa1536") || !strcmp(algo, "rsa2048")) {
+ if (se_dev->chipdata->rsa_supported)
+ return true;
+ else
+ return false;
+ }
+
+ return true;
+}
+
static int tegra_se_probe(struct platform_device *pdev)
{
struct tegra_se_dev *se_dev = NULL;
struct resource *res = NULL;
int err = 0, i = 0, j = 0, k = 0;
- dev_info(&pdev->dev, "tegra_se_probe START ");
-
se_dev = kzalloc(sizeof(struct tegra_se_dev), GFP_KERNEL);
if (!se_dev) {
dev_err(&pdev->dev, "memory allocation failed\n");
goto fail;
}
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ err = -ENXIO;
+ dev_err(se_dev->dev, "platform_get_resource failed\n");
+ goto err_pmc;
+ }
+
+ se_dev->pmc_io_reg = ioremap(res->start, resource_size(res));
+ if (!se_dev->pmc_io_reg) {
+ err = -ENOMEM;
+ dev_err(se_dev->dev, "pmc ioremap failed\n");
+ goto err_pmc;
+ }
+
se_dev->irq = platform_get_irq(pdev, 0);
if (!se_dev->irq) {
err = -ENODEV;
goto err_irq;
}
- err = request_irq(se_dev->irq, tegra_se_irq, IRQF_DISABLED,
- sg_driver_name, se_dev);
- if (err) {
- dev_err(se_dev->dev, "request_irq failed - irq[%d] err[%d]\n",
- se_dev->irq, err);
- goto err_irq;
- }
+ se_dev->chipdata =
+ (struct tegra_se_chipdata *)pdev->id_entry->driver_data;
/* Initialize the clock */
- se_dev->pclk = clk_get_sys("tegra-se", NULL);
- if (!se_dev->pclk) {
- dev_err(se_dev->dev, "clock intialization failed.\n");
+ se_dev->pclk = clk_get(se_dev->dev, "se");
+ if (IS_ERR(se_dev->pclk)) {
+ dev_err(se_dev->dev, "clock intialization failed (%d)\n",
+ (int)se_dev->pclk);
err = -ENODEV;
goto clean;
}
goto clean;
}
+ err = tegra_init_rsa_key_slot(se_dev);
+ if (err) {
+ dev_err(se_dev->dev, "init_rsa_key_slot failed\n");
+ goto clean;
+ }
+
init_completion(&se_dev->complete);
- se_work_q = alloc_workqueue("se_work_q", WQ_HIGHPRI, 16);
+ se_work_q = alloc_workqueue("se_work_q", WQ_HIGHPRI | WQ_UNBOUND, 16);
if (!se_work_q) {
dev_err(se_dev->dev, "alloc_workqueue failed\n");
goto clean;
}
sg_tegra_se_dev = se_dev;
+ pm_runtime_enable(se_dev->dev);
+ tegra_se_key_read_disable_all();
+
+ err = request_irq(se_dev->irq, tegra_se_irq, IRQF_DISABLED,
+ DRIVER_NAME, se_dev);
+ if (err) {
+ dev_err(se_dev->dev, "request_irq failed - irq[%d] err[%d]\n",
+ se_dev->irq, err);
+ goto err_irq;
+ }
err = tegra_se_alloc_ll_buf(se_dev, SE_MAX_SRC_SG_COUNT,
SE_MAX_DST_SG_COUNT);
}
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- INIT_LIST_HEAD(&aes_algs[i].cra_list);
- err = crypto_register_alg(&aes_algs[i]);
- if (err) {
- dev_err(se_dev->dev,
+ if (isAlgoSupported(se_dev, aes_algs[i].cra_name)) {
+ INIT_LIST_HEAD(&aes_algs[i].cra_list);
+ err = crypto_register_alg(&aes_algs[i]);
+ if (err) {
+ dev_err(se_dev->dev,
"crypto_register_alg failed index[%d]\n", i);
- goto clean;
+ goto clean;
+ }
}
}
-
for (j = 0; j < ARRAY_SIZE(hash_algs); j++) {
- err = crypto_register_ahash(&hash_algs[j]);
- if (err) {
- dev_err(se_dev->dev,
- "crypto_register_sha alg failed index[%d]\n", i);
- goto clean;
+ if (isAlgoSupported(se_dev, hash_algs[j].halg.base.cra_name)) {
+ err = crypto_register_ahash(&hash_algs[j]);
+ if (err) {
+ dev_err(se_dev->dev,
+ "crypto_register_sha alg failed index[%d]\n", i);
+ goto clean;
+ }
}
}
- dev_info(se_dev->dev, "tegra_se_probe end");
+#if defined(CONFIG_PM)
+ se_dev->ctx_save_buf = dma_alloc_coherent(se_dev->dev,
+ SE_CONTEXT_BUFER_SIZE, &se_dev->ctx_save_buf_adr, GFP_KERNEL);
+ if (!se_dev->ctx_save_buf) {
+ dev_err(se_dev->dev, "Context save buffer alloc filed\n");
+ goto clean;
+ }
+#endif
+
+ dev_info(se_dev->dev, "%s: complete", __func__);
return 0;
clean:
+ pm_runtime_disable(se_dev->dev);
for (k = 0; k < i; k++)
crypto_unregister_alg(&aes_algs[k]);
clk_put(se_dev->pclk);
free_irq(se_dev->irq, &pdev->dev);
+
err_irq:
+ iounmap(se_dev->pmc_io_reg);
+err_pmc:
iounmap(se_dev->io_reg);
+
fail:
platform_set_drvdata(pdev, NULL);
kfree(se_dev);
if (!se_dev)
return -ENODEV;
+ pm_runtime_disable(se_dev->dev);
+
cancel_work_sync(&se_work);
if (se_work_q)
destroy_workqueue(se_work_q);
if (se_dev->pclk)
clk_put(se_dev->pclk);
tegra_se_free_ll_buf(se_dev);
+ if (se_dev->ctx_save_buf) {
+ dma_free_coherent(se_dev->dev, SE_CONTEXT_BUFER_SIZE,
+ se_dev->ctx_save_buf, se_dev->ctx_save_buf_adr);
+ se_dev->ctx_save_buf = NULL;
+ }
iounmap(se_dev->io_reg);
+ iounmap(se_dev->pmc_io_reg);
kfree(se_dev);
sg_tegra_se_dev = NULL;
}
#if defined(CONFIG_PM)
-static int tegra_se_resume(struct platform_device *pdev)
+static int tegra_se_resume(struct device *dev)
{
return 0;
}
-static int tegra_se_suspend(struct platform_device *pdev, pm_message_t state)
+
+static int tegra_se_generate_rng_key(struct tegra_se_dev *se_dev)
{
- return 0;
+ int ret = 0;
+ u32 val = 0;
+
+ *se_dev->src_ll_buf = 0;
+ *se_dev->dst_ll_buf = 0;
+
+ /* Configure algorithm */
+ val = SE_CONFIG_ENC_ALG(ALG_RNG) | SE_CONFIG_ENC_MODE(MODE_KEY128) |
+ SE_CONFIG_DST(DST_KEYTAB);
+ se_writel(se_dev, val, SE_CONFIG_REG_OFFSET);
+
+ /* Configure destination key index number */
+ val = SE_CRYPTO_KEYTABLE_DST_KEY_INDEX(srk_slot.slot_num) |
+ SE_CRYPTO_KEYTABLE_DST_WORD_QUAD(KEYS_0_3);
+ se_writel(se_dev, val, SE_CRYPTO_KEYTABLE_DST_REG_OFFSET);
+
+ /* Configure crypto */
+ val = SE_CRYPTO_INPUT_SEL(INPUT_RANDOM) | SE_CRYPTO_XOR_POS(XOR_BYPASS) |
+ SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) |
+ SE_CRYPTO_HASH(HASH_DISABLE) |
+ SE_CRYPTO_KEY_INDEX(ssk_slot.slot_num) |
+ SE_CRYPTO_IV_SEL(IV_ORIGINAL);
+ se_writel(se_dev, val, SE_CRYPTO_REG_OFFSET);
+
+ ret = tegra_se_start_operation(se_dev, TEGRA_SE_KEY_128_SIZE, false);
+
+ return ret;
+}
+
+static int tegra_se_generate_srk(struct tegra_se_dev *se_dev)
+{
+ int ret = 0;
+ u32 val = 0;
+
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ ret = tegra_se_generate_rng_key(se_dev);
+ if (ret) {
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+ return ret;
+ }
+
+ *se_dev->src_ll_buf = 0;
+ *se_dev->dst_ll_buf = 0;
+
+ val = SE_CONFIG_ENC_ALG(ALG_RNG) | SE_CONFIG_ENC_MODE(MODE_KEY128) |
+ SE_CONFIG_DEC_ALG(ALG_NOP) | SE_CONFIG_DST(DST_SRK);
+
+ se_writel(se_dev, val, SE_CONFIG_REG_OFFSET);
+
+ val = SE_CRYPTO_XOR_POS(XOR_BYPASS) |
+ SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) |
+ SE_CRYPTO_HASH(HASH_DISABLE) |
+ SE_CRYPTO_KEY_INDEX(srk_slot.slot_num) |
+ SE_CRYPTO_IV_SEL(IV_UPDATED);
+
+ se_writel(se_dev, val, SE_CRYPTO_REG_OFFSET);
+ ret = tegra_se_start_operation(se_dev, TEGRA_SE_KEY_128_SIZE, false);
+
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ return ret;
+}
+
+static int tegra_se_lp_generate_random_data(struct tegra_se_dev *se_dev)
+{
+ struct tegra_se_ll *src_ll, *dst_ll;
+ int ret = 0;
+ u32 val;
+
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ *se_dev->src_ll_buf = 0;
+ *se_dev->dst_ll_buf = 0;
+ src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1);
+ dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1);
+ src_ll->addr = se_dev->ctx_save_buf_adr;
+ src_ll->data_len = SE_CONTEXT_SAVE_RANDOM_DATA_SIZE;
+ dst_ll->addr = se_dev->ctx_save_buf_adr;
+ dst_ll->data_len = SE_CONTEXT_SAVE_RANDOM_DATA_SIZE;
+
+ tegra_se_config_algo(se_dev, SE_AES_OP_MODE_RNG_X931, true,
+ TEGRA_SE_KEY_128_SIZE);
+
+ /* Configure crypto */
+ val = SE_CRYPTO_INPUT_SEL(INPUT_RANDOM) | SE_CRYPTO_XOR_POS(XOR_BYPASS) |
+ SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) |
+ SE_CRYPTO_HASH(HASH_DISABLE) |
+ SE_CRYPTO_KEY_INDEX(srk_slot.slot_num) |
+ SE_CRYPTO_IV_SEL(IV_ORIGINAL);
+
+ se_writel(se_dev, val, SE_CRYPTO_REG_OFFSET);
+ ret = tegra_se_start_operation(se_dev,
+ SE_CONTEXT_SAVE_RANDOM_DATA_SIZE, false);
+
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ return ret;
+
+}
+
+static int tegra_se_lp_encrypt_context_data(struct tegra_se_dev *se_dev,
+ u32 context_offset, u32 data_size)
+{
+ struct tegra_se_ll *src_ll, *dst_ll;
+ int ret = 0;
+
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ *se_dev->src_ll_buf = 0;
+ *se_dev->dst_ll_buf = 0;
+ src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1);
+ dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1);
+ src_ll->addr = se_dev->ctx_save_buf_adr + context_offset;
+ src_ll->data_len = data_size;
+ dst_ll->addr = se_dev->ctx_save_buf_adr + context_offset;
+ dst_ll->data_len = data_size;
+
+ se_writel(se_dev, SE_CONTEXT_SAVE_SRC(MEM),
+ SE_CONTEXT_SAVE_CONFIG_REG_OFFSET);
+
+ ret = tegra_se_start_operation(se_dev, data_size, true);
+
+ pm_runtime_put(se_dev->dev);
+
+ mutex_unlock(&se_hw_lock);
+
+ return ret;
+}
+
+static int tegra_se_lp_sticky_bits_context_save(struct tegra_se_dev *se_dev)
+{
+ struct tegra_se_ll *dst_ll;
+ int ret = 0;
+
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ *se_dev->src_ll_buf = 0;
+ *se_dev->dst_ll_buf = 0;
+ dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1);
+ dst_ll->addr = (se_dev->ctx_save_buf_adr +
+ SE_CONTEXT_SAVE_STICKY_BITS_OFFSET);
+ dst_ll->data_len = SE_CONTEXT_SAVE_STICKY_BITS_SIZE;
+
+ se_writel(se_dev, SE_CONTEXT_SAVE_SRC(STICKY_BITS),
+ SE_CONTEXT_SAVE_CONFIG_REG_OFFSET);
+
+ ret = tegra_se_start_operation(se_dev,
+ SE_CONTEXT_SAVE_STICKY_BITS_SIZE, true);
+
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ return ret;
+}
+
+static int tegra_se_lp_keytable_context_save(struct tegra_se_dev *se_dev)
+{
+ struct tegra_se_ll *dst_ll;
+ int ret = 0, i, j;
+ u32 val = 0;
+
+ /* take access to the hw */
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ *se_dev->dst_ll_buf = 0;
+ dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1);
+ dst_ll->addr = (se_dev->ctx_save_buf_adr + SE_CONTEXT_SAVE_KEYS_OFFSET);
+ dst_ll->data_len = TEGRA_SE_KEY_128_SIZE;
+
+ for (i = 0; i < TEGRA_SE_KEYSLOT_COUNT; i++) {
+ for (j = 0; j < 2; j++) {
+ val = SE_CONTEXT_SAVE_SRC(KEYTABLE) |
+ SE_CONTEXT_SAVE_KEY_INDEX(i) |
+ SE_CONTEXT_SAVE_WORD_QUAD(j);
+ se_writel(se_dev,
+ val, SE_CONTEXT_SAVE_CONFIG_REG_OFFSET);
+ ret = tegra_se_start_operation(se_dev,
+ TEGRA_SE_KEY_128_SIZE, true);
+ if (ret)
+ break;
+ dst_ll->addr += TEGRA_SE_KEY_128_SIZE;
+ }
+ }
+
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ return ret;
+}
+
+static int tegra_se_lp_iv_context_save(struct tegra_se_dev *se_dev,
+ bool org_iv, u32 context_offset)
+{
+ struct tegra_se_ll *dst_ll;
+ int ret = 0, i;
+ u32 val = 0;
+
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ *se_dev->dst_ll_buf = 0;
+ dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1);
+ dst_ll->addr = (se_dev->ctx_save_buf_adr + context_offset);
+ dst_ll->data_len = TEGRA_SE_AES_IV_SIZE;
+
+ for (i = 0; i < TEGRA_SE_KEYSLOT_COUNT; i++) {
+ val = SE_CONTEXT_SAVE_SRC(KEYTABLE) |
+ SE_CONTEXT_SAVE_KEY_INDEX(i) |
+ (org_iv ? SE_CONTEXT_SAVE_WORD_QUAD(ORIG_IV) :
+ SE_CONTEXT_SAVE_WORD_QUAD(UPD_IV));
+ se_writel(se_dev, val, SE_CONTEXT_SAVE_CONFIG_REG_OFFSET);
+ ret = tegra_se_start_operation(se_dev,
+ TEGRA_SE_AES_IV_SIZE, true);
+ if (ret)
+ break;
+ dst_ll->addr += TEGRA_SE_AES_IV_SIZE;
+ }
+
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ return ret;
+}
+
+static int tegra_se_save_SRK(struct tegra_se_dev *se_dev)
+{
+ int ret = 0;
+
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ se_writel(se_dev, SE_CONTEXT_SAVE_SRC(SRK),
+ SE_CONTEXT_SAVE_CONFIG_REG_OFFSET);
+ ret = tegra_se_start_operation(se_dev, 0, true);
+
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ return ret;
+}
+
+static int tegra_se_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct tegra_se_dev *se_dev = platform_get_drvdata(pdev);
+ int err = 0, i;
+ unsigned char *dt_buf = NULL;
+ u8 pdata[SE_CONTEXT_KNOWN_PATTERN_SIZE] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f};
+
+ if (!se_dev)
+ return -ENODEV;
+
+ /* Generate SRK */
+ err = tegra_se_generate_srk(se_dev);
+ if (err) {
+ dev_err(se_dev->dev, "\n LP SRK genration failed\n");
+ goto out;
+ }
+
+ /* Generate random data*/
+ err = tegra_se_lp_generate_random_data(se_dev);
+ if (err) {
+ dev_err(se_dev->dev, "\n LP random pattern generation failed\n");
+ goto out;
+ }
+
+ /* Encrypt random data */
+ err = tegra_se_lp_encrypt_context_data(se_dev,
+ SE_CONTEXT_SAVE_RANDOM_DATA_OFFSET,
+ SE_CONTEXT_SAVE_RANDOM_DATA_SIZE);
+ if (err) {
+ dev_err(se_dev->dev, "\n LP random pattern encryption failed\n");
+ goto out;
+ }
+
+ /* Sticky bits context save*/
+ err = tegra_se_lp_sticky_bits_context_save(se_dev);
+ if (err) {
+ dev_err(se_dev->dev, "\n LP sticky bits context save failure\n");
+ goto out;
+ }
+
+ /* Key table context save*/
+ err = tegra_se_lp_keytable_context_save(se_dev);
+ if (err) {
+ dev_err(se_dev->dev, "\n LP key table save failure\n");
+ goto out;
+ }
+
+ /* Original iv context save*/
+ err = tegra_se_lp_iv_context_save(se_dev,
+ true, SE_CONTEXT_ORIGINAL_IV_OFFSET);
+ if (err) {
+ dev_err(se_dev->dev, "\n LP original iv save failure\n");
+ goto out;
+ }
+
+ /* UPdated iv context save*/
+ err = tegra_se_lp_iv_context_save(se_dev,
+ false, SE_CONTEXT_UPDATED_IV_OFFSET);
+ if (err) {
+ dev_err(se_dev->dev, "\n LP updated iv save failure\n");
+ goto out;
+ }
+
+ /* Encrypt known pattern */
+ dt_buf = (unsigned char *)se_dev->ctx_save_buf;
+ dt_buf += SE_CONTEXT_KNOWN_PATTERN_OFFSET;
+ for (i = 0; i < SE_CONTEXT_KNOWN_PATTERN_SIZE; i++)
+ dt_buf[i] = pdata[i];
+ err = tegra_se_lp_encrypt_context_data(se_dev,
+ SE_CONTEXT_KNOWN_PATTERN_OFFSET, SE_CONTEXT_KNOWN_PATTERN_SIZE);
+ if (err) {
+ dev_err(se_dev->dev, "LP known pattern save failure\n");
+ goto out;
+ }
+
+ /* Write lp context buffer address into PMC scratch register */
+ writel(se_dev->ctx_save_buf_adr,
+ se_dev->pmc_io_reg + PMC_SCRATCH43_REG_OFFSET);
+
+ /* Saves SRK in secure scratch */
+ err = tegra_se_save_SRK(se_dev);
+ if (err) {
+ dev_err(se_dev->dev, "LP SRK save failure\n");
+ goto out;
+ }
+
+out:
+ return err;
}
#endif
+#if defined(CONFIG_PM_RUNTIME)
+static int tegra_se_runtime_suspend(struct device *dev)
+{
+ /*
+ * do a dummy read, to avoid scenarios where you have unposted writes
+ * still on the bus, before disabling clocks
+ */
+ se_readl(sg_tegra_se_dev, SE_CONFIG_REG_OFFSET);
-static struct platform_driver tegra_se_driver = {
- .probe = tegra_se_probe,
- .remove = __devexit_p(tegra_se_remove),
+ clk_disable(sg_tegra_se_dev->pclk);
+ return 0;
+}
+
+static int tegra_se_runtime_resume(struct device *dev)
+{
+ clk_enable(sg_tegra_se_dev->pclk);
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_se_dev_pm_ops = {
+ .runtime_suspend = tegra_se_runtime_suspend,
+ .runtime_resume = tegra_se_runtime_resume,
#if defined(CONFIG_PM)
.suspend = tegra_se_suspend,
.resume = tegra_se_resume,
#endif
+};
+#endif
+
+static struct tegra_se_chipdata tegra_se_chipdata = {
+ .rsa_supported = false,
+ .cprng_supported = true,
+ .drbg_supported = false,
+};
+
+static struct tegra_se_chipdata tegra11_se_chipdata = {
+ .rsa_supported = true,
+ .cprng_supported = false,
+ .drbg_supported = true,
+
+};
+
+static struct platform_device_id tegra_dev_se_devtype[] = {
+ {
+ .name = "tegra-se",
+ .driver_data = (unsigned long)&tegra_se_chipdata,
+ },
+ {
+ .name = "tegra11-se",
+ .driver_data = (unsigned long)&tegra11_se_chipdata,
+ }
+};
+
+static struct platform_driver tegra_se_driver = {
+ .probe = tegra_se_probe,
+ .remove = __devexit_p(tegra_se_remove),
+ .id_table = tegra_dev_se_devtype,
.driver = {
- .name = sg_driver_name,
+ .name = "tegra-se",
.owner = THIS_MODULE,
+#if defined(CONFIG_PM_RUNTIME)
+ .pm = &tegra_se_dev_pm_ops,
+#endif
},
};