bridge: Fix double-free in br_add_if.
[linux-2.6.git] / drivers / crypto / amcc / crypto4xx_core.c
1 /**
2  * AMCC SoC PPC4xx Crypto Driver
3  *
4  * Copyright (c) 2008 Applied Micro Circuits Corporation.
5  * All rights reserved. James Hsiao <jhsiao@amcc.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * This file implements AMCC crypto offload Linux device driver for use with
18  * Linux CryptoAPI.
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock_types.h>
24 #include <linux/random.h>
25 #include <linux/scatterlist.h>
26 #include <linux/crypto.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/platform_device.h>
29 #include <linux/init.h>
30 #include <linux/of_platform.h>
31 #include <asm/dcr.h>
32 #include <asm/dcr-regs.h>
33 #include <asm/cacheflush.h>
34 #include <crypto/aes.h>
35 #include <crypto/sha.h>
36 #include "crypto4xx_reg_def.h"
37 #include "crypto4xx_core.h"
38 #include "crypto4xx_sa.h"
39
40 #define PPC4XX_SEC_VERSION_STR                  "0.5"
41
42 /**
43  * PPC4xx Crypto Engine Initialization Routine
44  */
45 static void crypto4xx_hw_init(struct crypto4xx_device *dev)
46 {
47         union ce_ring_size ring_size;
48         union ce_ring_contol ring_ctrl;
49         union ce_part_ring_size part_ring_size;
50         union ce_io_threshold io_threshold;
51         u32 rand_num;
52         union ce_pe_dma_cfg pe_dma_cfg;
53
54         writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
55         /* setup pe dma, include reset sg, pdr and pe, then release reset */
56         pe_dma_cfg.w = 0;
57         pe_dma_cfg.bf.bo_sgpd_en = 1;
58         pe_dma_cfg.bf.bo_data_en = 0;
59         pe_dma_cfg.bf.bo_sa_en = 1;
60         pe_dma_cfg.bf.bo_pd_en = 1;
61         pe_dma_cfg.bf.dynamic_sa_en = 1;
62         pe_dma_cfg.bf.reset_sg = 1;
63         pe_dma_cfg.bf.reset_pdr = 1;
64         pe_dma_cfg.bf.reset_pe = 1;
65         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
66         /* un reset pe,sg and pdr */
67         pe_dma_cfg.bf.pe_mode = 0;
68         pe_dma_cfg.bf.reset_sg = 0;
69         pe_dma_cfg.bf.reset_pdr = 0;
70         pe_dma_cfg.bf.reset_pe = 0;
71         pe_dma_cfg.bf.bo_td_en = 0;
72         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
73         writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
74         writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
75         writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
76         get_random_bytes(&rand_num, sizeof(rand_num));
77         writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
78         get_random_bytes(&rand_num, sizeof(rand_num));
79         writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
80         ring_size.w = 0;
81         ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
82         ring_size.bf.ring_size   = PPC4XX_NUM_PD;
83         writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
84         ring_ctrl.w = 0;
85         writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
86         writel(PPC4XX_DC_3DES_EN, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
87         writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
88         writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
89         part_ring_size.w = 0;
90         part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
91         part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
92         writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
93         writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
94         io_threshold.w = 0;
95         io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
96         io_threshold.bf.input_threshold  = PPC4XX_INPUT_THRESHOLD;
97         writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
98         writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
99         writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
100         writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
101         writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
102         writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
103         writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
104         writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
105         /* un reset pe,sg and pdr */
106         pe_dma_cfg.bf.pe_mode = 1;
107         pe_dma_cfg.bf.reset_sg = 0;
108         pe_dma_cfg.bf.reset_pdr = 0;
109         pe_dma_cfg.bf.reset_pe = 0;
110         pe_dma_cfg.bf.bo_td_en = 0;
111         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
112         /*clear all pending interrupt*/
113         writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
114         writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
115         writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
116         writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
117         writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
118 }
119
120 int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
121 {
122         ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
123                                         &ctx->sa_in_dma_addr, GFP_ATOMIC);
124         if (ctx->sa_in == NULL)
125                 return -ENOMEM;
126
127         ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
128                                          &ctx->sa_out_dma_addr, GFP_ATOMIC);
129         if (ctx->sa_out == NULL) {
130                 dma_free_coherent(ctx->dev->core_dev->device,
131                                   ctx->sa_len * 4,
132                                   ctx->sa_in, ctx->sa_in_dma_addr);
133                 return -ENOMEM;
134         }
135
136         memset(ctx->sa_in, 0, size * 4);
137         memset(ctx->sa_out, 0, size * 4);
138         ctx->sa_len = size;
139
140         return 0;
141 }
142
143 void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
144 {
145         if (ctx->sa_in != NULL)
146                 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
147                                   ctx->sa_in, ctx->sa_in_dma_addr);
148         if (ctx->sa_out != NULL)
149                 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
150                                   ctx->sa_out, ctx->sa_out_dma_addr);
151
152         ctx->sa_in_dma_addr = 0;
153         ctx->sa_out_dma_addr = 0;
154         ctx->sa_len = 0;
155 }
156
157 u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
158 {
159         ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
160                                 sizeof(struct sa_state_record),
161                                 &ctx->state_record_dma_addr, GFP_ATOMIC);
162         if (!ctx->state_record_dma_addr)
163                 return -ENOMEM;
164         memset(ctx->state_record, 0, sizeof(struct sa_state_record));
165
166         return 0;
167 }
168
169 void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
170 {
171         if (ctx->state_record != NULL)
172                 dma_free_coherent(ctx->dev->core_dev->device,
173                                   sizeof(struct sa_state_record),
174                                   ctx->state_record,
175                                   ctx->state_record_dma_addr);
176         ctx->state_record_dma_addr = 0;
177 }
178
179 /**
180  * alloc memory for the gather ring
181  * no need to alloc buf for the ring
182  * gdr_tail, gdr_head and gdr_count are initialized by this function
183  */
184 static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
185 {
186         int i;
187         struct pd_uinfo *pd_uinfo;
188         dev->pdr = dma_alloc_coherent(dev->core_dev->device,
189                                       sizeof(struct ce_pd) * PPC4XX_NUM_PD,
190                                       &dev->pdr_pa, GFP_ATOMIC);
191         if (!dev->pdr)
192                 return -ENOMEM;
193
194         dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
195                                 GFP_KERNEL);
196         if (!dev->pdr_uinfo) {
197                 dma_free_coherent(dev->core_dev->device,
198                                   sizeof(struct ce_pd) * PPC4XX_NUM_PD,
199                                   dev->pdr,
200                                   dev->pdr_pa);
201                 return -ENOMEM;
202         }
203         memset(dev->pdr, 0,  sizeof(struct ce_pd) * PPC4XX_NUM_PD);
204         dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
205                                    256 * PPC4XX_NUM_PD,
206                                    &dev->shadow_sa_pool_pa,
207                                    GFP_ATOMIC);
208         if (!dev->shadow_sa_pool)
209                 return -ENOMEM;
210
211         dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
212                          sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
213                          &dev->shadow_sr_pool_pa, GFP_ATOMIC);
214         if (!dev->shadow_sr_pool)
215                 return -ENOMEM;
216         for (i = 0; i < PPC4XX_NUM_PD; i++) {
217                 pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
218                                                 sizeof(struct pd_uinfo) * i);
219
220                 /* alloc 256 bytes which is enough for any kind of dynamic sa */
221                 pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
222                 pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
223
224                 /* alloc state record */
225                 pd_uinfo->sr_va = dev->shadow_sr_pool +
226                     sizeof(struct sa_state_record) * i;
227                 pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
228                     sizeof(struct sa_state_record) * i;
229         }
230
231         return 0;
232 }
233
234 static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
235 {
236         if (dev->pdr != NULL)
237                 dma_free_coherent(dev->core_dev->device,
238                                   sizeof(struct ce_pd) * PPC4XX_NUM_PD,
239                                   dev->pdr, dev->pdr_pa);
240         if (dev->shadow_sa_pool)
241                 dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
242                                   dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
243         if (dev->shadow_sr_pool)
244                 dma_free_coherent(dev->core_dev->device,
245                         sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
246                         dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
247
248         kfree(dev->pdr_uinfo);
249 }
250
251 static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
252 {
253         u32 retval;
254         u32 tmp;
255
256         retval = dev->pdr_head;
257         tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
258
259         if (tmp == dev->pdr_tail)
260                 return ERING_WAS_FULL;
261
262         dev->pdr_head = tmp;
263
264         return retval;
265 }
266
267 static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
268 {
269         struct pd_uinfo *pd_uinfo;
270         unsigned long flags;
271
272         pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
273                                        sizeof(struct pd_uinfo) * idx);
274         spin_lock_irqsave(&dev->core_dev->lock, flags);
275         if (dev->pdr_tail != PPC4XX_LAST_PD)
276                 dev->pdr_tail++;
277         else
278                 dev->pdr_tail = 0;
279         pd_uinfo->state = PD_ENTRY_FREE;
280         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
281
282         return 0;
283 }
284
285 static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
286                                        dma_addr_t *pd_dma, u32 idx)
287 {
288         *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
289
290         return dev->pdr + sizeof(struct ce_pd) * idx;
291 }
292
293 /**
294  * alloc memory for the gather ring
295  * no need to alloc buf for the ring
296  * gdr_tail, gdr_head and gdr_count are initialized by this function
297  */
298 static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
299 {
300         dev->gdr = dma_alloc_coherent(dev->core_dev->device,
301                                       sizeof(struct ce_gd) * PPC4XX_NUM_GD,
302                                       &dev->gdr_pa, GFP_ATOMIC);
303         if (!dev->gdr)
304                 return -ENOMEM;
305
306         memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
307
308         return 0;
309 }
310
311 static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
312 {
313         dma_free_coherent(dev->core_dev->device,
314                           sizeof(struct ce_gd) * PPC4XX_NUM_GD,
315                           dev->gdr, dev->gdr_pa);
316 }
317
318 /*
319  * when this function is called.
320  * preemption or interrupt must be disabled
321  */
322 u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
323 {
324         u32 retval;
325         u32 tmp;
326         if (n >= PPC4XX_NUM_GD)
327                 return ERING_WAS_FULL;
328
329         retval = dev->gdr_head;
330         tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
331         if (dev->gdr_head > dev->gdr_tail) {
332                 if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
333                         return ERING_WAS_FULL;
334         } else if (dev->gdr_head < dev->gdr_tail) {
335                 if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
336                         return ERING_WAS_FULL;
337         }
338         dev->gdr_head = tmp;
339
340         return retval;
341 }
342
343 static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
344 {
345         unsigned long flags;
346
347         spin_lock_irqsave(&dev->core_dev->lock, flags);
348         if (dev->gdr_tail == dev->gdr_head) {
349                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
350                 return 0;
351         }
352
353         if (dev->gdr_tail != PPC4XX_LAST_GD)
354                 dev->gdr_tail++;
355         else
356                 dev->gdr_tail = 0;
357
358         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
359
360         return 0;
361 }
362
363 static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
364                                               dma_addr_t *gd_dma, u32 idx)
365 {
366         *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
367
368         return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
369 }
370
371 /**
372  * alloc memory for the scatter ring
373  * need to alloc buf for the ring
374  * sdr_tail, sdr_head and sdr_count are initialized by this function
375  */
376 static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
377 {
378         int i;
379         struct ce_sd *sd_array;
380
381         /* alloc memory for scatter descriptor ring */
382         dev->sdr = dma_alloc_coherent(dev->core_dev->device,
383                                       sizeof(struct ce_sd) * PPC4XX_NUM_SD,
384                                       &dev->sdr_pa, GFP_ATOMIC);
385         if (!dev->sdr)
386                 return -ENOMEM;
387
388         dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
389         dev->scatter_buffer_va =
390                 dma_alloc_coherent(dev->core_dev->device,
391                         dev->scatter_buffer_size * PPC4XX_NUM_SD,
392                         &dev->scatter_buffer_pa, GFP_ATOMIC);
393         if (!dev->scatter_buffer_va) {
394                 dma_free_coherent(dev->core_dev->device,
395                                   sizeof(struct ce_sd) * PPC4XX_NUM_SD,
396                                   dev->sdr, dev->sdr_pa);
397                 return -ENOMEM;
398         }
399
400         sd_array = dev->sdr;
401
402         for (i = 0; i < PPC4XX_NUM_SD; i++) {
403                 sd_array[i].ptr = dev->scatter_buffer_pa +
404                                   dev->scatter_buffer_size * i;
405         }
406
407         return 0;
408 }
409
410 static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
411 {
412         if (dev->sdr != NULL)
413                 dma_free_coherent(dev->core_dev->device,
414                                   sizeof(struct ce_sd) * PPC4XX_NUM_SD,
415                                   dev->sdr, dev->sdr_pa);
416
417         if (dev->scatter_buffer_va != NULL)
418                 dma_free_coherent(dev->core_dev->device,
419                                   dev->scatter_buffer_size * PPC4XX_NUM_SD,
420                                   dev->scatter_buffer_va,
421                                   dev->scatter_buffer_pa);
422 }
423
424 /*
425  * when this function is called.
426  * preemption or interrupt must be disabled
427  */
428 static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
429 {
430         u32 retval;
431         u32 tmp;
432
433         if (n >= PPC4XX_NUM_SD)
434                 return ERING_WAS_FULL;
435
436         retval = dev->sdr_head;
437         tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
438         if (dev->sdr_head > dev->gdr_tail) {
439                 if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
440                         return ERING_WAS_FULL;
441         } else if (dev->sdr_head < dev->sdr_tail) {
442                 if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
443                         return ERING_WAS_FULL;
444         } /* the head = tail, or empty case is already take cared */
445         dev->sdr_head = tmp;
446
447         return retval;
448 }
449
450 static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
451 {
452         unsigned long flags;
453
454         spin_lock_irqsave(&dev->core_dev->lock, flags);
455         if (dev->sdr_tail == dev->sdr_head) {
456                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
457                 return 0;
458         }
459         if (dev->sdr_tail != PPC4XX_LAST_SD)
460                 dev->sdr_tail++;
461         else
462                 dev->sdr_tail = 0;
463         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
464
465         return 0;
466 }
467
468 static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
469                                               dma_addr_t *sd_dma, u32 idx)
470 {
471         *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
472
473         return  (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
474 }
475
476 static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
477                                    dma_addr_t *addr, u32 *length,
478                                    u32 *idx, u32 *offset, u32 *nbytes)
479 {
480         u32 len;
481
482         if (*length > dev->scatter_buffer_size) {
483                 memcpy(phys_to_virt(*addr),
484                         dev->scatter_buffer_va +
485                         *idx * dev->scatter_buffer_size + *offset,
486                         dev->scatter_buffer_size);
487                 *offset = 0;
488                 *length -= dev->scatter_buffer_size;
489                 *nbytes -= dev->scatter_buffer_size;
490                 if (*idx == PPC4XX_LAST_SD)
491                         *idx = 0;
492                 else
493                         (*idx)++;
494                 *addr = *addr +  dev->scatter_buffer_size;
495                 return 1;
496         } else if (*length < dev->scatter_buffer_size) {
497                 memcpy(phys_to_virt(*addr),
498                         dev->scatter_buffer_va +
499                         *idx * dev->scatter_buffer_size + *offset, *length);
500                 if ((*offset + *length) == dev->scatter_buffer_size) {
501                         if (*idx == PPC4XX_LAST_SD)
502                                 *idx = 0;
503                         else
504                                 (*idx)++;
505                         *nbytes -= *length;
506                         *offset = 0;
507                 } else {
508                         *nbytes -= *length;
509                         *offset += *length;
510                 }
511
512                 return 0;
513         } else {
514                 len = (*nbytes <= dev->scatter_buffer_size) ?
515                                 (*nbytes) : dev->scatter_buffer_size;
516                 memcpy(phys_to_virt(*addr),
517                         dev->scatter_buffer_va +
518                         *idx * dev->scatter_buffer_size + *offset,
519                         len);
520                 *offset = 0;
521                 *nbytes -= len;
522
523                 if (*idx == PPC4XX_LAST_SD)
524                         *idx = 0;
525                 else
526                         (*idx)++;
527
528                 return 0;
529     }
530 }
531
532 static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
533                                       struct ce_pd *pd,
534                                       struct pd_uinfo *pd_uinfo,
535                                       u32 nbytes,
536                                       struct scatterlist *dst)
537 {
538         dma_addr_t addr;
539         u32 this_sd;
540         u32 offset;
541         u32 len;
542         u32 i;
543         u32 sg_len;
544         struct scatterlist *sg;
545
546         this_sd = pd_uinfo->first_sd;
547         offset = 0;
548         i = 0;
549
550         while (nbytes) {
551                 sg = &dst[i];
552                 sg_len = sg->length;
553                 addr = dma_map_page(dev->core_dev->device, sg_page(sg),
554                                 sg->offset, sg->length, DMA_TO_DEVICE);
555
556                 if (offset == 0) {
557                         len = (nbytes <= sg->length) ? nbytes : sg->length;
558                         while (crypto4xx_fill_one_page(dev, &addr, &len,
559                                 &this_sd, &offset, &nbytes))
560                                 ;
561                         if (!nbytes)
562                                 return;
563                         i++;
564                 } else {
565                         len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
566                                 nbytes : (dev->scatter_buffer_size - offset);
567                         len = (sg->length < len) ? sg->length : len;
568                         while (crypto4xx_fill_one_page(dev, &addr, &len,
569                                                &this_sd, &offset, &nbytes))
570                                 ;
571                         if (!nbytes)
572                                 return;
573                         sg_len -= len;
574                         if (sg_len) {
575                                 addr += len;
576                                 while (crypto4xx_fill_one_page(dev, &addr,
577                                         &sg_len, &this_sd, &offset, &nbytes))
578                                         ;
579                         }
580                         i++;
581                 }
582         }
583 }
584
585 static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
586                                         struct crypto4xx_ctx *ctx)
587 {
588         struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
589         struct sa_state_record *state_record =
590                                 (struct sa_state_record *) pd_uinfo->sr_va;
591
592         if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
593                 memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
594                        SA_HASH_ALG_SHA1_DIGEST_SIZE);
595         }
596
597         return 0;
598 }
599
600 static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
601                                   struct pd_uinfo *pd_uinfo)
602 {
603         int i;
604         if (pd_uinfo->num_gd) {
605                 for (i = 0; i < pd_uinfo->num_gd; i++)
606                         crypto4xx_put_gd_to_gdr(dev);
607                 pd_uinfo->first_gd = 0xffffffff;
608                 pd_uinfo->num_gd = 0;
609         }
610         if (pd_uinfo->num_sd) {
611                 for (i = 0; i < pd_uinfo->num_sd; i++)
612                         crypto4xx_put_sd_to_sdr(dev);
613
614                 pd_uinfo->first_sd = 0xffffffff;
615                 pd_uinfo->num_sd = 0;
616         }
617 }
618
619 static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
620                                      struct pd_uinfo *pd_uinfo,
621                                      struct ce_pd *pd)
622 {
623         struct crypto4xx_ctx *ctx;
624         struct ablkcipher_request *ablk_req;
625         struct scatterlist *dst;
626         dma_addr_t addr;
627
628         ablk_req = ablkcipher_request_cast(pd_uinfo->async_req);
629         ctx  = crypto_tfm_ctx(ablk_req->base.tfm);
630
631         if (pd_uinfo->using_sd) {
632                 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes,
633                                           ablk_req->dst);
634         } else {
635                 dst = pd_uinfo->dest_va;
636                 addr = dma_map_page(dev->core_dev->device, sg_page(dst),
637                                     dst->offset, dst->length, DMA_FROM_DEVICE);
638         }
639         crypto4xx_ret_sg_desc(dev, pd_uinfo);
640         if (ablk_req->base.complete != NULL)
641                 ablk_req->base.complete(&ablk_req->base, 0);
642
643         return 0;
644 }
645
646 static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
647                                 struct pd_uinfo *pd_uinfo)
648 {
649         struct crypto4xx_ctx *ctx;
650         struct ahash_request *ahash_req;
651
652         ahash_req = ahash_request_cast(pd_uinfo->async_req);
653         ctx  = crypto_tfm_ctx(ahash_req->base.tfm);
654
655         crypto4xx_copy_digest_to_dst(pd_uinfo,
656                                      crypto_tfm_ctx(ahash_req->base.tfm));
657         crypto4xx_ret_sg_desc(dev, pd_uinfo);
658         /* call user provided callback function x */
659         if (ahash_req->base.complete != NULL)
660                 ahash_req->base.complete(&ahash_req->base, 0);
661
662         return 0;
663 }
664
665 static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
666 {
667         struct ce_pd *pd;
668         struct pd_uinfo *pd_uinfo;
669
670         pd =  dev->pdr + sizeof(struct ce_pd)*idx;
671         pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
672         if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
673                         CRYPTO_ALG_TYPE_ABLKCIPHER)
674                 return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
675         else
676                 return crypto4xx_ahash_done(dev, pd_uinfo);
677 }
678
679 /**
680  * Note: Only use this function to copy items that is word aligned.
681  */
682 void crypto4xx_memcpy_le(unsigned int *dst,
683                          const unsigned char *buf,
684                          int len)
685 {
686         u8 *tmp;
687         for (; len >= 4; buf += 4, len -= 4)
688                 *dst++ = cpu_to_le32(*(unsigned int *) buf);
689
690         tmp = (u8 *)dst;
691         switch (len) {
692         case 3:
693                 *tmp++ = 0;
694                 *tmp++ = *(buf+2);
695                 *tmp++ = *(buf+1);
696                 *tmp++ = *buf;
697                 break;
698         case 2:
699                 *tmp++ = 0;
700                 *tmp++ = 0;
701                 *tmp++ = *(buf+1);
702                 *tmp++ = *buf;
703                 break;
704         case 1:
705                 *tmp++ = 0;
706                 *tmp++ = 0;
707                 *tmp++ = 0;
708                 *tmp++ = *buf;
709                 break;
710         default:
711                 break;
712         }
713 }
714
715 static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
716 {
717         crypto4xx_destroy_pdr(core_dev->dev);
718         crypto4xx_destroy_gdr(core_dev->dev);
719         crypto4xx_destroy_sdr(core_dev->dev);
720         dev_set_drvdata(core_dev->device, NULL);
721         iounmap(core_dev->dev->ce_base);
722         kfree(core_dev->dev);
723         kfree(core_dev);
724 }
725
726 void crypto4xx_return_pd(struct crypto4xx_device *dev,
727                          u32 pd_entry, struct ce_pd *pd,
728                          struct pd_uinfo *pd_uinfo)
729 {
730         /* irq should be already disabled */
731         dev->pdr_head = pd_entry;
732         pd->pd_ctl.w = 0;
733         pd->pd_ctl_len.w = 0;
734         pd_uinfo->state = PD_ENTRY_FREE;
735 }
736
737 /*
738  * derive number of elements in scatterlist
739  * Shamlessly copy from talitos.c
740  */
741 static int get_sg_count(struct scatterlist *sg_list, int nbytes)
742 {
743         struct scatterlist *sg = sg_list;
744         int sg_nents = 0;
745
746         while (nbytes) {
747                 sg_nents++;
748                 if (sg->length > nbytes)
749                         break;
750                 nbytes -= sg->length;
751                 sg = sg_next(sg);
752         }
753
754         return sg_nents;
755 }
756
757 static u32 get_next_gd(u32 current)
758 {
759         if (current != PPC4XX_LAST_GD)
760                 return current + 1;
761         else
762                 return 0;
763 }
764
765 static u32 get_next_sd(u32 current)
766 {
767         if (current != PPC4XX_LAST_SD)
768                 return current + 1;
769         else
770                 return 0;
771 }
772
773 u32 crypto4xx_build_pd(struct crypto_async_request *req,
774                        struct crypto4xx_ctx *ctx,
775                        struct scatterlist *src,
776                        struct scatterlist *dst,
777                        unsigned int datalen,
778                        void *iv, u32 iv_len)
779 {
780         struct crypto4xx_device *dev = ctx->dev;
781         dma_addr_t addr, pd_dma, sd_dma, gd_dma;
782         struct dynamic_sa_ctl *sa;
783         struct scatterlist *sg;
784         struct ce_gd *gd;
785         struct ce_pd *pd;
786         u32 num_gd, num_sd;
787         u32 fst_gd = 0xffffffff;
788         u32 fst_sd = 0xffffffff;
789         u32 pd_entry;
790         unsigned long flags;
791         struct pd_uinfo *pd_uinfo = NULL;
792         unsigned int nbytes = datalen, idx;
793         unsigned int ivlen = 0;
794         u32 gd_idx = 0;
795
796         /* figure how many gd is needed */
797         num_gd = get_sg_count(src, datalen);
798         if (num_gd == 1)
799                 num_gd = 0;
800
801         /* figure how many sd is needed */
802         if (sg_is_last(dst) || ctx->is_hash) {
803                 num_sd = 0;
804         } else {
805                 if (datalen > PPC4XX_SD_BUFFER_SIZE) {
806                         num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
807                         if (datalen % PPC4XX_SD_BUFFER_SIZE)
808                                 num_sd++;
809                 } else {
810                         num_sd = 1;
811                 }
812         }
813
814         /*
815          * The follow section of code needs to be protected
816          * The gather ring and scatter ring needs to be consecutive
817          * In case of run out of any kind of descriptor, the descriptor
818          * already got must be return the original place.
819          */
820         spin_lock_irqsave(&dev->core_dev->lock, flags);
821         if (num_gd) {
822                 fst_gd = crypto4xx_get_n_gd(dev, num_gd);
823                 if (fst_gd == ERING_WAS_FULL) {
824                         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
825                         return -EAGAIN;
826                 }
827         }
828         if (num_sd) {
829                 fst_sd = crypto4xx_get_n_sd(dev, num_sd);
830                 if (fst_sd == ERING_WAS_FULL) {
831                         if (num_gd)
832                                 dev->gdr_head = fst_gd;
833                         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
834                         return -EAGAIN;
835                 }
836         }
837         pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
838         if (pd_entry == ERING_WAS_FULL) {
839                 if (num_gd)
840                         dev->gdr_head = fst_gd;
841                 if (num_sd)
842                         dev->sdr_head = fst_sd;
843                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
844                 return -EAGAIN;
845         }
846         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
847
848         pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
849                                        sizeof(struct pd_uinfo) * pd_entry);
850         pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
851         pd_uinfo->async_req = req;
852         pd_uinfo->num_gd = num_gd;
853         pd_uinfo->num_sd = num_sd;
854
855         if (iv_len || ctx->is_hash) {
856                 ivlen = iv_len;
857                 pd->sa = pd_uinfo->sa_pa;
858                 sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
859                 if (ctx->direction == DIR_INBOUND)
860                         memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
861                 else
862                         memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
863
864                 memcpy((void *) sa + ctx->offset_to_sr_ptr,
865                         &pd_uinfo->sr_pa, 4);
866
867                 if (iv_len)
868                         crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
869         } else {
870                 if (ctx->direction == DIR_INBOUND) {
871                         pd->sa = ctx->sa_in_dma_addr;
872                         sa = (struct dynamic_sa_ctl *) ctx->sa_in;
873                 } else {
874                         pd->sa = ctx->sa_out_dma_addr;
875                         sa = (struct dynamic_sa_ctl *) ctx->sa_out;
876                 }
877         }
878         pd->sa_len = ctx->sa_len;
879         if (num_gd) {
880                 /* get first gd we are going to use */
881                 gd_idx = fst_gd;
882                 pd_uinfo->first_gd = fst_gd;
883                 pd_uinfo->num_gd = num_gd;
884                 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
885                 pd->src = gd_dma;
886                 /* enable gather */
887                 sa->sa_command_0.bf.gather = 1;
888                 idx = 0;
889                 src = &src[0];
890                 /* walk the sg, and setup gather array */
891                 while (nbytes) {
892                         sg = &src[idx];
893                         addr = dma_map_page(dev->core_dev->device, sg_page(sg),
894                                     sg->offset, sg->length, DMA_TO_DEVICE);
895                         gd->ptr = addr;
896                         gd->ctl_len.len = sg->length;
897                         gd->ctl_len.done = 0;
898                         gd->ctl_len.ready = 1;
899                         if (sg->length >= nbytes)
900                                 break;
901                         nbytes -= sg->length;
902                         gd_idx = get_next_gd(gd_idx);
903                         gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
904                         idx++;
905                 }
906         } else {
907                 pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
908                                 src->offset, src->length, DMA_TO_DEVICE);
909                 /*
910                  * Disable gather in sa command
911                  */
912                 sa->sa_command_0.bf.gather = 0;
913                 /*
914                  * Indicate gather array is not used
915                  */
916                 pd_uinfo->first_gd = 0xffffffff;
917                 pd_uinfo->num_gd = 0;
918         }
919         if (ctx->is_hash || sg_is_last(dst)) {
920                 /*
921                  * we know application give us dst a whole piece of memory
922                  * no need to use scatter ring.
923                  * In case of is_hash, the icv is always at end of src data.
924                  */
925                 pd_uinfo->using_sd = 0;
926                 pd_uinfo->first_sd = 0xffffffff;
927                 pd_uinfo->num_sd = 0;
928                 pd_uinfo->dest_va = dst;
929                 sa->sa_command_0.bf.scatter = 0;
930                 if (ctx->is_hash)
931                         pd->dest = virt_to_phys((void *)dst);
932                 else
933                         pd->dest = (u32)dma_map_page(dev->core_dev->device,
934                                         sg_page(dst), dst->offset,
935                                         dst->length, DMA_TO_DEVICE);
936         } else {
937                 struct ce_sd *sd = NULL;
938                 u32 sd_idx = fst_sd;
939                 nbytes = datalen;
940                 sa->sa_command_0.bf.scatter = 1;
941                 pd_uinfo->using_sd = 1;
942                 pd_uinfo->dest_va = dst;
943                 pd_uinfo->first_sd = fst_sd;
944                 pd_uinfo->num_sd = num_sd;
945                 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
946                 pd->dest = sd_dma;
947                 /* setup scatter descriptor */
948                 sd->ctl.done = 0;
949                 sd->ctl.rdy = 1;
950                 /* sd->ptr should be setup by sd_init routine*/
951                 idx = 0;
952                 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
953                         nbytes -= PPC4XX_SD_BUFFER_SIZE;
954                 else
955                         nbytes = 0;
956                 while (nbytes) {
957                         sd_idx = get_next_sd(sd_idx);
958                         sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
959                         /* setup scatter descriptor */
960                         sd->ctl.done = 0;
961                         sd->ctl.rdy = 1;
962                         if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
963                                 nbytes -= PPC4XX_SD_BUFFER_SIZE;
964                         else
965                                 /*
966                                  * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
967                                  * which is more than nbytes, so done.
968                                  */
969                                 nbytes = 0;
970                 }
971         }
972
973         sa->sa_command_1.bf.hash_crypto_offset = 0;
974         pd->pd_ctl.w = ctx->pd_ctl;
975         pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
976         pd_uinfo->state = PD_ENTRY_INUSE;
977         wmb();
978         /* write any value to push engine to read a pd */
979         writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
980         return -EINPROGRESS;
981 }
982
983 /**
984  * Algorithm Registration Functions
985  */
986 static int crypto4xx_alg_init(struct crypto_tfm *tfm)
987 {
988         struct crypto_alg *alg = tfm->__crt_alg;
989         struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
990         struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
991
992         ctx->dev = amcc_alg->dev;
993         ctx->sa_in = NULL;
994         ctx->sa_out = NULL;
995         ctx->sa_in_dma_addr = 0;
996         ctx->sa_out_dma_addr = 0;
997         ctx->sa_len = 0;
998
999         switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
1000         default:
1001                 tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
1002                 break;
1003         case CRYPTO_ALG_TYPE_AHASH:
1004                 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1005                                          sizeof(struct crypto4xx_ctx));
1006                 break;
1007         }
1008
1009         return 0;
1010 }
1011
1012 static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
1013 {
1014         struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
1015
1016         crypto4xx_free_sa(ctx);
1017         crypto4xx_free_state_record(ctx);
1018 }
1019
1020 int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1021                            struct crypto4xx_alg_common *crypto_alg,
1022                            int array_size)
1023 {
1024         struct crypto4xx_alg *alg;
1025         int i;
1026         int rc = 0;
1027
1028         for (i = 0; i < array_size; i++) {
1029                 alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1030                 if (!alg)
1031                         return -ENOMEM;
1032
1033                 alg->alg = crypto_alg[i];
1034                 alg->dev = sec_dev;
1035
1036                 switch (alg->alg.type) {
1037                 case CRYPTO_ALG_TYPE_AHASH:
1038                         rc = crypto_register_ahash(&alg->alg.u.hash);
1039                         break;
1040
1041                 default:
1042                         rc = crypto_register_alg(&alg->alg.u.cipher);
1043                         break;
1044                 }
1045
1046                 if (rc) {
1047                         list_del(&alg->entry);
1048                         kfree(alg);
1049                 } else {
1050                         list_add_tail(&alg->entry, &sec_dev->alg_list);
1051                 }
1052         }
1053
1054         return 0;
1055 }
1056
1057 static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1058 {
1059         struct crypto4xx_alg *alg, *tmp;
1060
1061         list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1062                 list_del(&alg->entry);
1063                 switch (alg->alg.type) {
1064                 case CRYPTO_ALG_TYPE_AHASH:
1065                         crypto_unregister_ahash(&alg->alg.u.hash);
1066                         break;
1067
1068                 default:
1069                         crypto_unregister_alg(&alg->alg.u.cipher);
1070                 }
1071                 kfree(alg);
1072         }
1073 }
1074
1075 static void crypto4xx_bh_tasklet_cb(unsigned long data)
1076 {
1077         struct device *dev = (struct device *)data;
1078         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1079         struct pd_uinfo *pd_uinfo;
1080         struct ce_pd *pd;
1081         u32 tail;
1082
1083         while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
1084                 tail = core_dev->dev->pdr_tail;
1085                 pd_uinfo = core_dev->dev->pdr_uinfo +
1086                         sizeof(struct pd_uinfo)*tail;
1087                 pd =  core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
1088                 if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
1089                                    pd->pd_ctl.bf.pe_done &&
1090                                    !pd->pd_ctl.bf.host_ready) {
1091                         pd->pd_ctl.bf.pe_done = 0;
1092                         crypto4xx_pd_done(core_dev->dev, tail);
1093                         crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1094                         pd_uinfo->state = PD_ENTRY_FREE;
1095                 } else {
1096                         /* if tail not done, break */
1097                         break;
1098                 }
1099         }
1100 }
1101
1102 /**
1103  * Top Half of isr.
1104  */
1105 static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1106 {
1107         struct device *dev = (struct device *)data;
1108         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1109
1110         if (core_dev->dev->ce_base == 0)
1111                 return 0;
1112
1113         writel(PPC4XX_INTERRUPT_CLR,
1114                core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1115         tasklet_schedule(&core_dev->tasklet);
1116
1117         return IRQ_HANDLED;
1118 }
1119
1120 /**
1121  * Supported Crypto Algorithms
1122  */
1123 struct crypto4xx_alg_common crypto4xx_alg[] = {
1124         /* Crypto AES modes */
1125         { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
1126                 .cra_name       = "cbc(aes)",
1127                 .cra_driver_name = "cbc-aes-ppc4xx",
1128                 .cra_priority   = CRYPTO4XX_CRYPTO_PRIORITY,
1129                 .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1130                 .cra_blocksize  = AES_BLOCK_SIZE,
1131                 .cra_ctxsize    = sizeof(struct crypto4xx_ctx),
1132                 .cra_type       = &crypto_ablkcipher_type,
1133                 .cra_init       = crypto4xx_alg_init,
1134                 .cra_exit       = crypto4xx_alg_exit,
1135                 .cra_module     = THIS_MODULE,
1136                 .cra_u          = {
1137                         .ablkcipher = {
1138                                 .min_keysize    = AES_MIN_KEY_SIZE,
1139                                 .max_keysize    = AES_MAX_KEY_SIZE,
1140                                 .ivsize         = AES_IV_SIZE,
1141                                 .setkey         = crypto4xx_setkey_aes_cbc,
1142                                 .encrypt        = crypto4xx_encrypt,
1143                                 .decrypt        = crypto4xx_decrypt,
1144                         }
1145                 }
1146         }},
1147 };
1148
1149 /**
1150  * Module Initialization Routine
1151  */
1152 static int __init crypto4xx_probe(struct of_device *ofdev,
1153                                   const struct of_device_id *match)
1154 {
1155         int rc;
1156         struct resource res;
1157         struct device *dev = &ofdev->dev;
1158         struct crypto4xx_core_device *core_dev;
1159
1160         rc = of_address_to_resource(ofdev->node, 0, &res);
1161         if (rc)
1162                 return -ENODEV;
1163
1164         if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
1165                 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1166                        mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1167                 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1168                        mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1169         } else if (of_find_compatible_node(NULL, NULL,
1170                         "amcc,ppc405ex-crypto")) {
1171                 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1172                        mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1173                 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1174                        mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1175         } else if (of_find_compatible_node(NULL, NULL,
1176                         "amcc,ppc460sx-crypto")) {
1177                 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1178                        mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1179                 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1180                        mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1181         } else {
1182                 printk(KERN_ERR "Crypto Function Not supported!\n");
1183                 return -EINVAL;
1184         }
1185
1186         core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1187         if (!core_dev)
1188                 return -ENOMEM;
1189
1190         dev_set_drvdata(dev, core_dev);
1191         core_dev->ofdev = ofdev;
1192         core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1193         if (!core_dev->dev)
1194                 goto err_alloc_dev;
1195
1196         core_dev->dev->core_dev = core_dev;
1197         core_dev->device = dev;
1198         spin_lock_init(&core_dev->lock);
1199         INIT_LIST_HEAD(&core_dev->dev->alg_list);
1200         rc = crypto4xx_build_pdr(core_dev->dev);
1201         if (rc)
1202                 goto err_build_pdr;
1203
1204         rc = crypto4xx_build_gdr(core_dev->dev);
1205         if (rc)
1206                 goto err_build_gdr;
1207
1208         rc = crypto4xx_build_sdr(core_dev->dev);
1209         if (rc)
1210                 goto err_build_sdr;
1211
1212         /* Init tasklet for bottom half processing */
1213         tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1214                      (unsigned long) dev);
1215
1216         /* Register for Crypto isr, Crypto Engine IRQ */
1217         core_dev->irq = irq_of_parse_and_map(ofdev->node, 0);
1218         rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
1219                          core_dev->dev->name, dev);
1220         if (rc)
1221                 goto err_request_irq;
1222
1223         core_dev->dev->ce_base = of_iomap(ofdev->node, 0);
1224         if (!core_dev->dev->ce_base) {
1225                 dev_err(dev, "failed to of_iomap\n");
1226                 goto err_iomap;
1227         }
1228
1229         /* need to setup pdr, rdr, gdr and sdr before this */
1230         crypto4xx_hw_init(core_dev->dev);
1231
1232         /* Register security algorithms with Linux CryptoAPI */
1233         rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1234                                ARRAY_SIZE(crypto4xx_alg));
1235         if (rc)
1236                 goto err_start_dev;
1237
1238         return 0;
1239
1240 err_start_dev:
1241         iounmap(core_dev->dev->ce_base);
1242 err_iomap:
1243         free_irq(core_dev->irq, dev);
1244         irq_dispose_mapping(core_dev->irq);
1245         tasklet_kill(&core_dev->tasklet);
1246 err_request_irq:
1247         crypto4xx_destroy_sdr(core_dev->dev);
1248 err_build_sdr:
1249         crypto4xx_destroy_gdr(core_dev->dev);
1250 err_build_gdr:
1251         crypto4xx_destroy_pdr(core_dev->dev);
1252 err_build_pdr:
1253         kfree(core_dev->dev);
1254 err_alloc_dev:
1255         kfree(core_dev);
1256
1257         return rc;
1258 }
1259
1260 static int __exit crypto4xx_remove(struct of_device *ofdev)
1261 {
1262         struct device *dev = &ofdev->dev;
1263         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1264
1265         free_irq(core_dev->irq, dev);
1266         irq_dispose_mapping(core_dev->irq);
1267
1268         tasklet_kill(&core_dev->tasklet);
1269         /* Un-register with Linux CryptoAPI */
1270         crypto4xx_unregister_alg(core_dev->dev);
1271         /* Free all allocated memory */
1272         crypto4xx_stop_all(core_dev);
1273
1274         return 0;
1275 }
1276
1277 static struct of_device_id crypto4xx_match[] = {
1278         { .compatible      = "amcc,ppc4xx-crypto",},
1279         { },
1280 };
1281
1282 static struct of_platform_driver crypto4xx_driver = {
1283         .name           = "crypto4xx",
1284         .match_table    = crypto4xx_match,
1285         .probe          = crypto4xx_probe,
1286         .remove         = crypto4xx_remove,
1287 };
1288
1289 static int __init crypto4xx_init(void)
1290 {
1291         return of_register_platform_driver(&crypto4xx_driver);
1292 }
1293
1294 static void __exit crypto4xx_exit(void)
1295 {
1296         of_unregister_platform_driver(&crypto4xx_driver);
1297 }
1298
1299 module_init(crypto4xx_init);
1300 module_exit(crypto4xx_exit);
1301
1302 MODULE_LICENSE("GPL");
1303 MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1304 MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");
1305