sahara.c revision 9e95275cf351ebbb02316addfa2d8d87173a4cd7
1/*
2 * Cryptographic API.
3 *
4 * Support for SAHARA cryptographic accelerator.
5 *
6 * Copyright (c) 2013 Vista Silicon S.L.
7 * Author: Javier Martin <javier.martin@vista-silicon.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Based on omap-aes.c and tegra-aes.c
14 */
15
16#include <crypto/algapi.h>
17#include <crypto/aes.h>
18
19#include <linux/clk.h>
20#include <linux/crypto.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/irq.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/of.h>
27#include <linux/platform_device.h>
28
29#define SAHARA_NAME "sahara"
30#define SAHARA_VERSION_3	3
31#define SAHARA_TIMEOUT_MS	1000
32#define SAHARA_MAX_HW_DESC	2
33#define SAHARA_MAX_HW_LINK	20
34
35#define FLAGS_MODE_MASK		0x000f
36#define FLAGS_ENCRYPT		BIT(0)
37#define FLAGS_CBC		BIT(1)
38#define FLAGS_NEW_KEY		BIT(3)
39#define FLAGS_BUSY		4
40
41#define SAHARA_HDR_BASE			0x00800000
42#define SAHARA_HDR_SKHA_ALG_AES	0
43#define SAHARA_HDR_SKHA_OP_ENC		(1 << 2)
44#define SAHARA_HDR_SKHA_MODE_ECB	(0 << 3)
45#define SAHARA_HDR_SKHA_MODE_CBC	(1 << 3)
46#define SAHARA_HDR_FORM_DATA		(5 << 16)
47#define SAHARA_HDR_FORM_KEY		(8 << 16)
48#define SAHARA_HDR_LLO			(1 << 24)
49#define SAHARA_HDR_CHA_SKHA		(1 << 28)
50#define SAHARA_HDR_CHA_MDHA		(2 << 28)
51#define SAHARA_HDR_PARITY_BIT		(1 << 31)
52
53/* SAHARA can only process one request at a time */
54#define SAHARA_QUEUE_LENGTH	1
55
56#define SAHARA_REG_VERSION	0x00
57#define SAHARA_REG_DAR		0x04
58#define SAHARA_REG_CONTROL	0x08
59#define		SAHARA_CONTROL_SET_THROTTLE(x)	(((x) & 0xff) << 24)
60#define		SAHARA_CONTROL_SET_MAXBURST(x)	(((x) & 0xff) << 16)
61#define		SAHARA_CONTROL_RNG_AUTORSD	(1 << 7)
62#define		SAHARA_CONTROL_ENABLE_INT	(1 << 4)
63#define SAHARA_REG_CMD		0x0C
64#define		SAHARA_CMD_RESET		(1 << 0)
65#define		SAHARA_CMD_CLEAR_INT		(1 << 8)
66#define		SAHARA_CMD_CLEAR_ERR		(1 << 9)
67#define		SAHARA_CMD_SINGLE_STEP		(1 << 10)
68#define		SAHARA_CMD_MODE_BATCH		(1 << 16)
69#define		SAHARA_CMD_MODE_DEBUG		(1 << 18)
70#define	SAHARA_REG_STATUS	0x10
71#define		SAHARA_STATUS_GET_STATE(x)	((x) & 0x7)
72#define			SAHARA_STATE_IDLE	0
73#define			SAHARA_STATE_BUSY	1
74#define			SAHARA_STATE_ERR	2
75#define			SAHARA_STATE_FAULT	3
76#define			SAHARA_STATE_COMPLETE	4
77#define			SAHARA_STATE_COMP_FLAG	(1 << 2)
78#define		SAHARA_STATUS_DAR_FULL		(1 << 3)
79#define		SAHARA_STATUS_ERROR		(1 << 4)
80#define		SAHARA_STATUS_SECURE		(1 << 5)
81#define		SAHARA_STATUS_FAIL		(1 << 6)
82#define		SAHARA_STATUS_INIT		(1 << 7)
83#define		SAHARA_STATUS_RNG_RESEED	(1 << 8)
84#define		SAHARA_STATUS_ACTIVE_RNG	(1 << 9)
85#define		SAHARA_STATUS_ACTIVE_MDHA	(1 << 10)
86#define		SAHARA_STATUS_ACTIVE_SKHA	(1 << 11)
87#define		SAHARA_STATUS_MODE_BATCH	(1 << 16)
88#define		SAHARA_STATUS_MODE_DEDICATED	(1 << 17)
89#define		SAHARA_STATUS_MODE_DEBUG	(1 << 18)
90#define		SAHARA_STATUS_GET_ISTATE(x)	(((x) >> 24) & 0xff)
91#define SAHARA_REG_ERRSTATUS	0x14
92#define		SAHARA_ERRSTATUS_GET_SOURCE(x)	((x) & 0xf)
93#define			SAHARA_ERRSOURCE_CHA	14
94#define			SAHARA_ERRSOURCE_DMA	15
95#define		SAHARA_ERRSTATUS_DMA_DIR	(1 << 8)
96#define		SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
97#define		SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
98#define		SAHARA_ERRSTATUS_GET_CHASRC(x)	(((x) >> 16) & 0xfff)
99#define		SAHARA_ERRSTATUS_GET_CHAERR(x)	(((x) >> 28) & 0x3)
100#define SAHARA_REG_FADDR	0x18
101#define SAHARA_REG_CDAR		0x1C
102#define SAHARA_REG_IDAR		0x20
103
104struct sahara_hw_desc {
105	u32		hdr;
106	u32		len1;
107	dma_addr_t	p1;
108	u32		len2;
109	dma_addr_t	p2;
110	dma_addr_t	next;
111};
112
113struct sahara_hw_link {
114	u32		len;
115	dma_addr_t	p;
116	dma_addr_t	next;
117};
118
119struct sahara_ctx {
120	struct sahara_dev *dev;
121	unsigned long flags;
122	int keylen;
123	u8 key[AES_KEYSIZE_128];
124	struct crypto_ablkcipher *fallback;
125};
126
127struct sahara_aes_reqctx {
128	unsigned long mode;
129};
130
131struct sahara_dev {
132	struct device		*device;
133	void __iomem		*regs_base;
134	struct clk		*clk_ipg;
135	struct clk		*clk_ahb;
136
137	struct sahara_ctx	*ctx;
138	spinlock_t		lock;
139	struct crypto_queue	queue;
140	unsigned long		flags;
141
142	struct tasklet_struct	done_task;
143	struct tasklet_struct	queue_task;
144
145	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
146	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
147
148	u8			*key_base;
149	dma_addr_t		key_phys_base;
150
151	u8			*iv_base;
152	dma_addr_t		iv_phys_base;
153
154	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
155	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
156
157	struct ablkcipher_request *req;
158	size_t			total;
159	struct scatterlist	*in_sg;
160	unsigned int		nb_in_sg;
161	struct scatterlist	*out_sg;
162	unsigned int		nb_out_sg;
163
164	u32			error;
165	struct timer_list	watchdog;
166};
167
168static struct sahara_dev *dev_ptr;
169
170static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
171{
172	writel(data, dev->regs_base + reg);
173}
174
175static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
176{
177	return readl(dev->regs_base + reg);
178}
179
180static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
181{
182	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
183			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
184			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
185
186	if (dev->flags & FLAGS_CBC) {
187		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
188		hdr ^= SAHARA_HDR_PARITY_BIT;
189	}
190
191	if (dev->flags & FLAGS_ENCRYPT) {
192		hdr |= SAHARA_HDR_SKHA_OP_ENC;
193		hdr ^= SAHARA_HDR_PARITY_BIT;
194	}
195
196	return hdr;
197}
198
199static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
200{
201	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
202			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
203}
204
205static int sahara_sg_length(struct scatterlist *sg,
206			    unsigned int total)
207{
208	int sg_nb;
209	unsigned int len;
210	struct scatterlist *sg_list;
211
212	sg_nb = 0;
213	sg_list = sg;
214
215	while (total) {
216		len = min(sg_list->length, total);
217
218		sg_nb++;
219		total -= len;
220
221		sg_list = sg_next(sg_list);
222		if (!sg_list)
223			total = 0;
224	}
225
226	return sg_nb;
227}
228
229static char *sahara_err_src[16] = {
230	"No error",
231	"Header error",
232	"Descriptor length error",
233	"Descriptor length or pointer error",
234	"Link length error",
235	"Link pointer error",
236	"Input buffer error",
237	"Output buffer error",
238	"Output buffer starvation",
239	"Internal state fault",
240	"General descriptor problem",
241	"Reserved",
242	"Descriptor address error",
243	"Link address error",
244	"CHA error",
245	"DMA error"
246};
247
248static char *sahara_err_dmasize[4] = {
249	"Byte transfer",
250	"Half-word transfer",
251	"Word transfer",
252	"Reserved"
253};
254
255static char *sahara_err_dmasrc[8] = {
256	"No error",
257	"AHB bus error",
258	"Internal IP bus error",
259	"Parity error",
260	"DMA crosses 256 byte boundary",
261	"DMA is busy",
262	"Reserved",
263	"DMA HW error"
264};
265
266static char *sahara_cha_errsrc[12] = {
267	"Input buffer non-empty",
268	"Illegal address",
269	"Illegal mode",
270	"Illegal data size",
271	"Illegal key size",
272	"Write during processing",
273	"CTX read during processing",
274	"HW error",
275	"Input buffer disabled/underflow",
276	"Output buffer disabled/overflow",
277	"DES key parity error",
278	"Reserved"
279};
280
281static char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
282
283static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
284{
285	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
286	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
287
288	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
289
290	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
291
292	if (source == SAHARA_ERRSOURCE_DMA) {
293		if (error & SAHARA_ERRSTATUS_DMA_DIR)
294			dev_err(dev->device, "		* DMA read.\n");
295		else
296			dev_err(dev->device, "		* DMA write.\n");
297
298		dev_err(dev->device, "		* %s.\n",
299		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
300		dev_err(dev->device, "		* %s.\n",
301		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
302	} else if (source == SAHARA_ERRSOURCE_CHA) {
303		dev_err(dev->device, "		* %s.\n",
304			sahara_cha_errsrc[chasrc]);
305		dev_err(dev->device, "		* %s.\n",
306		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
307	}
308	dev_err(dev->device, "\n");
309}
310
311static char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
312
313static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
314{
315	u8 state;
316
317	if (!IS_ENABLED(DEBUG))
318		return;
319
320	state = SAHARA_STATUS_GET_STATE(status);
321
322	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
323		__func__, status);
324
325	dev_dbg(dev->device, "	- State = %d:\n", state);
326	if (state & SAHARA_STATE_COMP_FLAG)
327		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
328
329	dev_dbg(dev->device, "		* %s.\n",
330	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
331
332	if (status & SAHARA_STATUS_DAR_FULL)
333		dev_dbg(dev->device, "	- DAR Full.\n");
334	if (status & SAHARA_STATUS_ERROR)
335		dev_dbg(dev->device, "	- Error.\n");
336	if (status & SAHARA_STATUS_SECURE)
337		dev_dbg(dev->device, "	- Secure.\n");
338	if (status & SAHARA_STATUS_FAIL)
339		dev_dbg(dev->device, "	- Fail.\n");
340	if (status & SAHARA_STATUS_RNG_RESEED)
341		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
342	if (status & SAHARA_STATUS_ACTIVE_RNG)
343		dev_dbg(dev->device, "	- RNG Active.\n");
344	if (status & SAHARA_STATUS_ACTIVE_MDHA)
345		dev_dbg(dev->device, "	- MDHA Active.\n");
346	if (status & SAHARA_STATUS_ACTIVE_SKHA)
347		dev_dbg(dev->device, "	- SKHA Active.\n");
348
349	if (status & SAHARA_STATUS_MODE_BATCH)
350		dev_dbg(dev->device, "	- Batch Mode.\n");
351	else if (status & SAHARA_STATUS_MODE_DEDICATED)
352		dev_dbg(dev->device, "	- Decidated Mode.\n");
353	else if (status & SAHARA_STATUS_MODE_DEBUG)
354		dev_dbg(dev->device, "	- Debug Mode.\n");
355
356	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
357	       SAHARA_STATUS_GET_ISTATE(status));
358
359	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
360		sahara_read(dev, SAHARA_REG_CDAR));
361	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
362		sahara_read(dev, SAHARA_REG_IDAR));
363}
364
365static void sahara_dump_descriptors(struct sahara_dev *dev)
366{
367	int i;
368
369	if (!IS_ENABLED(DEBUG))
370		return;
371
372	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
373		dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n",
374			i, dev->hw_phys_desc[i]);
375		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
376		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
377		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
378		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
379		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
380		dev_dbg(dev->device, "\tnext = 0x%08x\n",
381			dev->hw_desc[i]->next);
382	}
383	dev_dbg(dev->device, "\n");
384}
385
386static void sahara_dump_links(struct sahara_dev *dev)
387{
388	int i;
389
390	if (!IS_ENABLED(DEBUG))
391		return;
392
393	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
394		dev_dbg(dev->device, "Link (%d) (0x%08x):\n",
395			i, dev->hw_phys_link[i]);
396		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
397		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
398		dev_dbg(dev->device, "\tnext = 0x%08x\n",
399			dev->hw_link[i]->next);
400	}
401	dev_dbg(dev->device, "\n");
402}
403
404static void sahara_aes_done_task(unsigned long data)
405{
406	struct sahara_dev *dev = (struct sahara_dev *)data;
407
408	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
409		DMA_TO_DEVICE);
410	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
411		DMA_FROM_DEVICE);
412
413	spin_lock(&dev->lock);
414	clear_bit(FLAGS_BUSY, &dev->flags);
415	spin_unlock(&dev->lock);
416
417	dev->req->base.complete(&dev->req->base, dev->error);
418}
419
420static void sahara_watchdog(unsigned long data)
421{
422	struct sahara_dev *dev = (struct sahara_dev *)data;
423	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
424	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
425
426	sahara_decode_status(dev, stat);
427	sahara_decode_error(dev, err);
428	dev->error = -ETIMEDOUT;
429	sahara_aes_done_task(data);
430}
431
432static int sahara_hw_descriptor_create(struct sahara_dev *dev)
433{
434	struct sahara_ctx *ctx = dev->ctx;
435	struct scatterlist *sg;
436	int ret;
437	int i, j;
438
439	/* Copy new key if necessary */
440	if (ctx->flags & FLAGS_NEW_KEY) {
441		memcpy(dev->key_base, ctx->key, ctx->keylen);
442		ctx->flags &= ~FLAGS_NEW_KEY;
443
444		if (dev->flags & FLAGS_CBC) {
445			dev->hw_desc[0]->len1 = AES_BLOCK_SIZE;
446			dev->hw_desc[0]->p1 = dev->iv_phys_base;
447		} else {
448			dev->hw_desc[0]->len1 = 0;
449			dev->hw_desc[0]->p1 = 0;
450		}
451		dev->hw_desc[0]->len2 = ctx->keylen;
452		dev->hw_desc[0]->p2 = dev->key_phys_base;
453		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
454	}
455	dev->hw_desc[0]->hdr = sahara_aes_key_hdr(dev);
456
457	dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total);
458	dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total);
459	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
460		dev_err(dev->device, "not enough hw links (%d)\n",
461			dev->nb_in_sg + dev->nb_out_sg);
462		return -EINVAL;
463	}
464
465	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
466			 DMA_TO_DEVICE);
467	if (ret != dev->nb_in_sg) {
468		dev_err(dev->device, "couldn't map in sg\n");
469		goto unmap_in;
470	}
471	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
472			 DMA_FROM_DEVICE);
473	if (ret != dev->nb_out_sg) {
474		dev_err(dev->device, "couldn't map out sg\n");
475		goto unmap_out;
476	}
477
478	/* Create input links */
479	dev->hw_desc[1]->p1 = dev->hw_phys_link[0];
480	sg = dev->in_sg;
481	for (i = 0; i < dev->nb_in_sg; i++) {
482		dev->hw_link[i]->len = sg->length;
483		dev->hw_link[i]->p = sg->dma_address;
484		if (i == (dev->nb_in_sg - 1)) {
485			dev->hw_link[i]->next = 0;
486		} else {
487			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
488			sg = sg_next(sg);
489		}
490	}
491
492	/* Create output links */
493	dev->hw_desc[1]->p2 = dev->hw_phys_link[i];
494	sg = dev->out_sg;
495	for (j = i; j < dev->nb_out_sg + i; j++) {
496		dev->hw_link[j]->len = sg->length;
497		dev->hw_link[j]->p = sg->dma_address;
498		if (j == (dev->nb_out_sg + i - 1)) {
499			dev->hw_link[j]->next = 0;
500		} else {
501			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
502			sg = sg_next(sg);
503		}
504	}
505
506	/* Fill remaining fields of hw_desc[1] */
507	dev->hw_desc[1]->hdr = sahara_aes_data_link_hdr(dev);
508	dev->hw_desc[1]->len1 = dev->total;
509	dev->hw_desc[1]->len2 = dev->total;
510	dev->hw_desc[1]->next = 0;
511
512	sahara_dump_descriptors(dev);
513	sahara_dump_links(dev);
514
515	/* Start processing descriptor chain. */
516	mod_timer(&dev->watchdog,
517		  jiffies + msecs_to_jiffies(SAHARA_TIMEOUT_MS));
518	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
519
520	return 0;
521
522unmap_out:
523	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
524		DMA_TO_DEVICE);
525unmap_in:
526	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
527		DMA_FROM_DEVICE);
528
529	return -EINVAL;
530}
531
532static void sahara_aes_queue_task(unsigned long data)
533{
534	struct sahara_dev *dev = (struct sahara_dev *)data;
535	struct crypto_async_request *async_req, *backlog;
536	struct sahara_ctx *ctx;
537	struct sahara_aes_reqctx *rctx;
538	struct ablkcipher_request *req;
539	int ret;
540
541	spin_lock(&dev->lock);
542	backlog = crypto_get_backlog(&dev->queue);
543	async_req = crypto_dequeue_request(&dev->queue);
544	if (!async_req)
545		clear_bit(FLAGS_BUSY, &dev->flags);
546	spin_unlock(&dev->lock);
547
548	if (!async_req)
549		return;
550
551	if (backlog)
552		backlog->complete(backlog, -EINPROGRESS);
553
554	req = ablkcipher_request_cast(async_req);
555
556	/* Request is ready to be dispatched by the device */
557	dev_dbg(dev->device,
558		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
559		req->nbytes, req->src, req->dst);
560
561	/* assign new request to device */
562	dev->req = req;
563	dev->total = req->nbytes;
564	dev->in_sg = req->src;
565	dev->out_sg = req->dst;
566
567	rctx = ablkcipher_request_ctx(req);
568	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
569	rctx->mode &= FLAGS_MODE_MASK;
570	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
571
572	if ((dev->flags & FLAGS_CBC) && req->info)
573		memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
574
575	/* assign new context to device */
576	ctx->dev = dev;
577	dev->ctx = ctx;
578
579	ret = sahara_hw_descriptor_create(dev);
580	if (ret < 0) {
581		spin_lock(&dev->lock);
582		clear_bit(FLAGS_BUSY, &dev->flags);
583		spin_unlock(&dev->lock);
584		dev->req->base.complete(&dev->req->base, ret);
585	}
586}
587
588static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
589			     unsigned int keylen)
590{
591	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
592	int ret;
593
594	ctx->keylen = keylen;
595
596	/* SAHARA only supports 128bit keys */
597	if (keylen == AES_KEYSIZE_128) {
598		memcpy(ctx->key, key, keylen);
599		ctx->flags |= FLAGS_NEW_KEY;
600		return 0;
601	}
602
603	if (keylen != AES_KEYSIZE_128 &&
604	    keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
605		return -EINVAL;
606
607	/*
608	 * The requested key size is not supported by HW, do a fallback.
609	 */
610	ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
611	ctx->fallback->base.crt_flags |=
612		(tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
613
614	ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
615	if (ret) {
616		struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
617
618		tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
619		tfm_aux->crt_flags |=
620			(ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
621	}
622	return ret;
623}
624
625static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
626{
627	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
628		crypto_ablkcipher_reqtfm(req));
629	struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
630	struct sahara_dev *dev = dev_ptr;
631	int err = 0;
632	int busy;
633
634	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
635		req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
636
637	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
638		dev_err(dev->device,
639			"request size is not exact amount of AES blocks\n");
640		return -EINVAL;
641	}
642
643	ctx->dev = dev;
644
645	rctx->mode = mode;
646	spin_lock_bh(&dev->lock);
647	err = ablkcipher_enqueue_request(&dev->queue, req);
648	busy = test_and_set_bit(FLAGS_BUSY, &dev->flags);
649	spin_unlock_bh(&dev->lock);
650
651	if (!busy)
652		tasklet_schedule(&dev->queue_task);
653
654	return err;
655}
656
657static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
658{
659	struct crypto_tfm *tfm =
660		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
661	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
662		crypto_ablkcipher_reqtfm(req));
663	int err;
664
665	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
666		ablkcipher_request_set_tfm(req, ctx->fallback);
667		err = crypto_ablkcipher_encrypt(req);
668		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
669		return err;
670	}
671
672	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
673}
674
675static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
676{
677	struct crypto_tfm *tfm =
678		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
679	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
680		crypto_ablkcipher_reqtfm(req));
681	int err;
682
683	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
684		ablkcipher_request_set_tfm(req, ctx->fallback);
685		err = crypto_ablkcipher_decrypt(req);
686		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
687		return err;
688	}
689
690	return sahara_aes_crypt(req, 0);
691}
692
693static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
694{
695	struct crypto_tfm *tfm =
696		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
697	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
698		crypto_ablkcipher_reqtfm(req));
699	int err;
700
701	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
702		ablkcipher_request_set_tfm(req, ctx->fallback);
703		err = crypto_ablkcipher_encrypt(req);
704		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
705		return err;
706	}
707
708	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
709}
710
711static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
712{
713	struct crypto_tfm *tfm =
714		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
715	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
716		crypto_ablkcipher_reqtfm(req));
717	int err;
718
719	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
720		ablkcipher_request_set_tfm(req, ctx->fallback);
721		err = crypto_ablkcipher_decrypt(req);
722		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
723		return err;
724	}
725
726	return sahara_aes_crypt(req, FLAGS_CBC);
727}
728
729static int sahara_aes_cra_init(struct crypto_tfm *tfm)
730{
731	const char *name = tfm->__crt_alg->cra_name;
732	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
733
734	ctx->fallback = crypto_alloc_ablkcipher(name, 0,
735				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
736	if (IS_ERR(ctx->fallback)) {
737		pr_err("Error allocating fallback algo %s\n", name);
738		return PTR_ERR(ctx->fallback);
739	}
740
741	tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
742
743	return 0;
744}
745
746static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
747{
748	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
749
750	if (ctx->fallback)
751		crypto_free_ablkcipher(ctx->fallback);
752	ctx->fallback = NULL;
753}
754
755static struct crypto_alg aes_algs[] = {
756{
757	.cra_name		= "ecb(aes)",
758	.cra_driver_name	= "sahara-ecb-aes",
759	.cra_priority		= 300,
760	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
761			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
762	.cra_blocksize		= AES_BLOCK_SIZE,
763	.cra_ctxsize		= sizeof(struct sahara_ctx),
764	.cra_alignmask		= 0x0,
765	.cra_type		= &crypto_ablkcipher_type,
766	.cra_module		= THIS_MODULE,
767	.cra_init		= sahara_aes_cra_init,
768	.cra_exit		= sahara_aes_cra_exit,
769	.cra_u.ablkcipher = {
770		.min_keysize	= AES_MIN_KEY_SIZE ,
771		.max_keysize	= AES_MAX_KEY_SIZE,
772		.setkey		= sahara_aes_setkey,
773		.encrypt	= sahara_aes_ecb_encrypt,
774		.decrypt	= sahara_aes_ecb_decrypt,
775	}
776}, {
777	.cra_name		= "cbc(aes)",
778	.cra_driver_name	= "sahara-cbc-aes",
779	.cra_priority		= 300,
780	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
781			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
782	.cra_blocksize		= AES_BLOCK_SIZE,
783	.cra_ctxsize		= sizeof(struct sahara_ctx),
784	.cra_alignmask		= 0x0,
785	.cra_type		= &crypto_ablkcipher_type,
786	.cra_module		= THIS_MODULE,
787	.cra_init		= sahara_aes_cra_init,
788	.cra_exit		= sahara_aes_cra_exit,
789	.cra_u.ablkcipher = {
790		.min_keysize	= AES_MIN_KEY_SIZE ,
791		.max_keysize	= AES_MAX_KEY_SIZE,
792		.ivsize		= AES_BLOCK_SIZE,
793		.setkey		= sahara_aes_setkey,
794		.encrypt	= sahara_aes_cbc_encrypt,
795		.decrypt	= sahara_aes_cbc_decrypt,
796	}
797}
798};
799
800static irqreturn_t sahara_irq_handler(int irq, void *data)
801{
802	struct sahara_dev *dev = (struct sahara_dev *)data;
803	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
804	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
805
806	del_timer(&dev->watchdog);
807
808	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
809		     SAHARA_REG_CMD);
810
811	sahara_decode_status(dev, stat);
812
813	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
814		return IRQ_NONE;
815	} else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
816		dev->error = 0;
817	} else {
818		sahara_decode_error(dev, err);
819		dev->error = -EINVAL;
820	}
821
822	tasklet_schedule(&dev->done_task);
823
824	return IRQ_HANDLED;
825}
826
827
828static int sahara_register_algs(struct sahara_dev *dev)
829{
830	int err, i, j;
831
832	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
833		INIT_LIST_HEAD(&aes_algs[i].cra_list);
834		err = crypto_register_alg(&aes_algs[i]);
835		if (err)
836			goto err_aes_algs;
837	}
838
839	return 0;
840
841err_aes_algs:
842	for (j = 0; j < i; j++)
843		crypto_unregister_alg(&aes_algs[j]);
844
845	return err;
846}
847
848static void sahara_unregister_algs(struct sahara_dev *dev)
849{
850	int i;
851
852	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
853		crypto_unregister_alg(&aes_algs[i]);
854}
855
856static struct platform_device_id sahara_platform_ids[] = {
857	{ .name = "sahara-imx27" },
858	{ /* sentinel */ }
859};
860MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
861
862static struct of_device_id sahara_dt_ids[] = {
863	{ .compatible = "fsl,imx27-sahara" },
864	{ /* sentinel */ }
865};
866MODULE_DEVICE_TABLE(of, sahara_dt_ids);
867
868static int sahara_probe(struct platform_device *pdev)
869{
870	struct sahara_dev *dev;
871	struct resource *res;
872	u32 version;
873	int irq;
874	int err;
875	int i;
876
877	dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
878	if (dev == NULL) {
879		dev_err(&pdev->dev, "unable to alloc data struct.\n");
880		return -ENOMEM;
881	}
882
883	dev->device = &pdev->dev;
884	platform_set_drvdata(pdev, dev);
885
886	/* Get the base address */
887	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
888	dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
889	if (IS_ERR(dev->regs_base))
890		return PTR_ERR(dev->regs_base);
891
892	/* Get the IRQ */
893	irq = platform_get_irq(pdev,  0);
894	if (irq < 0) {
895		dev_err(&pdev->dev, "failed to get irq resource\n");
896		return irq;
897	}
898
899	if (devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
900		0, SAHARA_NAME, dev) < 0) {
901		dev_err(&pdev->dev, "failed to request irq\n");
902		return -ENOENT;
903	}
904
905	/* clocks */
906	dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
907	if (IS_ERR(dev->clk_ipg)) {
908		dev_err(&pdev->dev, "Could not get ipg clock\n");
909		return PTR_ERR(dev->clk_ipg);
910	}
911
912	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
913	if (IS_ERR(dev->clk_ahb)) {
914		dev_err(&pdev->dev, "Could not get ahb clock\n");
915		return PTR_ERR(dev->clk_ahb);
916	}
917
918	/* Allocate HW descriptors */
919	dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev,
920			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
921			&dev->hw_phys_desc[0], GFP_KERNEL);
922	if (!dev->hw_desc[0]) {
923		dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
924		return -ENOMEM;
925	}
926	dev->hw_desc[1] = dev->hw_desc[0] + 1;
927	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
928				sizeof(struct sahara_hw_desc);
929
930	/* Allocate space for iv and key */
931	dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
932				&dev->key_phys_base, GFP_KERNEL);
933	if (!dev->key_base) {
934		dev_err(&pdev->dev, "Could not allocate memory for key\n");
935		err = -ENOMEM;
936		goto err_key;
937	}
938	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
939	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
940
941	/* Allocate space for HW links */
942	dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
943			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
944			&dev->hw_phys_link[0], GFP_KERNEL);
945	if (!dev->hw_link[0]) {
946		dev_err(&pdev->dev, "Could not allocate hw links\n");
947		err = -ENOMEM;
948		goto err_link;
949	}
950	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
951		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
952					sizeof(struct sahara_hw_link);
953		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
954	}
955
956	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
957
958	dev_ptr = dev;
959
960	tasklet_init(&dev->queue_task, sahara_aes_queue_task,
961		     (unsigned long)dev);
962	tasklet_init(&dev->done_task, sahara_aes_done_task,
963		     (unsigned long)dev);
964
965	init_timer(&dev->watchdog);
966	dev->watchdog.function = &sahara_watchdog;
967	dev->watchdog.data = (unsigned long)dev;
968
969	clk_prepare_enable(dev->clk_ipg);
970	clk_prepare_enable(dev->clk_ahb);
971
972	version = sahara_read(dev, SAHARA_REG_VERSION);
973	if (version != SAHARA_VERSION_3) {
974		dev_err(&pdev->dev, "SAHARA version %d not supported\n",
975			version);
976		err = -ENODEV;
977		goto err_algs;
978	}
979
980	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
981		     SAHARA_REG_CMD);
982	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
983			SAHARA_CONTROL_SET_MAXBURST(8) |
984			SAHARA_CONTROL_RNG_AUTORSD |
985			SAHARA_CONTROL_ENABLE_INT,
986			SAHARA_REG_CONTROL);
987
988	err = sahara_register_algs(dev);
989	if (err)
990		goto err_algs;
991
992	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
993
994	return 0;
995
996err_algs:
997	dma_free_coherent(&pdev->dev,
998			  SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
999			  dev->hw_link[0], dev->hw_phys_link[0]);
1000	clk_disable_unprepare(dev->clk_ipg);
1001	clk_disable_unprepare(dev->clk_ahb);
1002	dev_ptr = NULL;
1003err_link:
1004	dma_free_coherent(&pdev->dev,
1005			  2 * AES_KEYSIZE_128,
1006			  dev->key_base, dev->key_phys_base);
1007err_key:
1008	dma_free_coherent(&pdev->dev,
1009			  SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1010			  dev->hw_desc[0], dev->hw_phys_desc[0]);
1011
1012	return err;
1013}
1014
1015static int sahara_remove(struct platform_device *pdev)
1016{
1017	struct sahara_dev *dev = platform_get_drvdata(pdev);
1018
1019	dma_free_coherent(&pdev->dev,
1020			  SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1021			  dev->hw_link[0], dev->hw_phys_link[0]);
1022	dma_free_coherent(&pdev->dev,
1023			  2 * AES_KEYSIZE_128,
1024			  dev->key_base, dev->key_phys_base);
1025	dma_free_coherent(&pdev->dev,
1026			  SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1027			  dev->hw_desc[0], dev->hw_phys_desc[0]);
1028
1029	tasklet_kill(&dev->done_task);
1030	tasklet_kill(&dev->queue_task);
1031
1032	sahara_unregister_algs(dev);
1033
1034	clk_disable_unprepare(dev->clk_ipg);
1035	clk_disable_unprepare(dev->clk_ahb);
1036
1037	dev_ptr = NULL;
1038
1039	return 0;
1040}
1041
1042static struct platform_driver sahara_driver = {
1043	.probe		= sahara_probe,
1044	.remove		= sahara_remove,
1045	.driver		= {
1046		.name	= SAHARA_NAME,
1047		.owner	= THIS_MODULE,
1048		.of_match_table = sahara_dt_ids,
1049	},
1050	.id_table = sahara_platform_ids,
1051};
1052
1053module_platform_driver(sahara_driver);
1054
1055MODULE_LICENSE("GPL");
1056MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1057MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1058