Lines Matching refs:dd

83 	struct atmel_aes_dev *dd;
215 static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
217 return readl_relaxed(dd->io_base + offset);
220 static inline void atmel_aes_write(struct atmel_aes_dev *dd,
223 writel_relaxed(value, dd->io_base + offset);
226 static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
230 *value = atmel_aes_read(dd, offset);
233 static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
237 atmel_aes_write(dd, offset, *value);
246 if (!ctx->dd) {
251 ctx->dd = aes_dd;
253 aes_dd = ctx->dd;
261 static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
263 clk_prepare_enable(dd->iclk);
265 if (!(dd->flags & AES_FLAGS_INIT)) {
266 atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
267 atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
268 dd->flags |= AES_FLAGS_INIT;
269 dd->err = 0;
275 static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
277 return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
280 static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
282 atmel_aes_hw_init(dd);
284 dd->hw_version = atmel_aes_get_version(dd);
286 dev_info(dd->dev,
287 "version: 0x%x\n", dd->hw_version);
289 clk_disable_unprepare(dd->iclk);
292 static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
294 struct ablkcipher_request *req = dd->req;
296 clk_disable_unprepare(dd->iclk);
297 dd->flags &= ~AES_FLAGS_BUSY;
304 struct atmel_aes_dev *dd = data;
307 tasklet_schedule(&dd->done_task);
310 static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
316 dd->dma_size = length;
318 if (!(dd->flags & AES_FLAGS_FAST)) {
319 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
323 if (dd->flags & AES_FLAGS_CFB8) {
324 dd->dma_lch_in.dma_conf.dst_addr_width =
326 dd->dma_lch_out.dma_conf.src_addr_width =
328 } else if (dd->flags & AES_FLAGS_CFB16) {
329 dd->dma_lch_in.dma_conf.dst_addr_width =
331 dd->dma_lch_out.dma_conf.src_addr_width =
334 dd->dma_lch_in.dma_conf.dst_addr_width =
336 dd->dma_lch_out.dma_conf.src_addr_width =
340 if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 |
342 dd->dma_lch_in.dma_conf.src_maxburst = 1;
343 dd->dma_lch_in.dma_conf.dst_maxburst = 1;
344 dd->dma_lch_out.dma_conf.src_maxburst = 1;
345 dd->dma_lch_out.dma_conf.dst_maxburst = 1;
347 dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
348 dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
349 dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
350 dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
353 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
354 dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
356 dd->flags |= AES_FLAGS_DMA;
366 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
372 out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
379 out_desc->callback_param = dd;
382 dma_async_issue_pending(dd->dma_lch_out.chan);
385 dma_async_issue_pending(dd->dma_lch_in.chan);
390 static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
392 dd->flags &= ~AES_FLAGS_DMA;
395 dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
396 if (!dd->nb_in_sg)
399 dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
400 if (!dd->nb_out_sg)
403 dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
404 dd->buf_in, dd->total);
406 if (!dd->bufcnt)
409 dd->total -= dd->bufcnt;
411 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
412 atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
413 dd->bufcnt >> 2);
418 static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
424 if ((!dd->in_offset) && (!dd->out_offset)) {
426 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
427 IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
428 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
429 IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
432 if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
438 count = min(dd->total, sg_dma_len(dd->in_sg));
439 count = min(count, sg_dma_len(dd->out_sg));
441 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
443 dev_err(dd->dev, "dma_map_sg() error\n");
447 err = dma_map_sg(dd->dev, dd->out_sg, 1,
450 dev_err(dd->dev, "dma_map_sg() error\n");
451 dma_unmap_sg(dd->dev, dd->in_sg, 1,
456 addr_in = sg_dma_address(dd->in_sg);
457 addr_out = sg_dma_address(dd->out_sg);
459 dd->flags |= AES_FLAGS_FAST;
463 count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
464 dd->buf_in, dd->buflen, dd->total, 0);
466 addr_in = dd->dma_addr_in;
467 addr_out = dd->dma_addr_out;
469 dd->flags &= ~AES_FLAGS_FAST;
472 dd->total -= count;
474 err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count);
476 if (err && (dd->flags & AES_FLAGS_FAST)) {
477 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
478 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
484 static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
489 err = atmel_aes_hw_init(dd);
495 if (dd->ctx->keylen == AES_KEYSIZE_128)
497 else if (dd->ctx->keylen == AES_KEYSIZE_192)
502 if (dd->flags & AES_FLAGS_CBC) {
504 } else if (dd->flags & AES_FLAGS_CFB) {
506 if (dd->flags & AES_FLAGS_CFB8)
508 else if (dd->flags & AES_FLAGS_CFB16)
510 else if (dd->flags & AES_FLAGS_CFB32)
512 else if (dd->flags & AES_FLAGS_CFB64)
514 else if (dd->flags & AES_FLAGS_CFB128)
516 } else if (dd->flags & AES_FLAGS_OFB) {
518 } else if (dd->flags & AES_FLAGS_CTR) {
524 if (dd->flags & AES_FLAGS_ENCRYPT)
527 if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
529 if (dd->caps.has_dualbuff)
535 atmel_aes_write(dd, AES_CR, valcr);
536 atmel_aes_write(dd, AES_MR, valmr);
538 atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
539 dd->ctx->keylen >> 2);
541 if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) ||
542 (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) &&
543 dd->req->info) {
544 atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4);
550 static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
559 spin_lock_irqsave(&dd->lock, flags);
561 ret = ablkcipher_enqueue_request(&dd->queue, req);
562 if (dd->flags & AES_FLAGS_BUSY) {
563 spin_unlock_irqrestore(&dd->lock, flags);
566 backlog = crypto_get_backlog(&dd->queue);
567 async_req = crypto_dequeue_request(&dd->queue);
569 dd->flags |= AES_FLAGS_BUSY;
570 spin_unlock_irqrestore(&dd->lock, flags);
581 dd->req = req;
582 dd->total = req->nbytes;
583 dd->in_offset = 0;
584 dd->in_sg = req->src;
585 dd->out_offset = 0;
586 dd->out_sg = req->dst;
591 dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
592 dd->ctx = ctx;
593 ctx->dd = dd;
595 err = atmel_aes_write_ctrl(dd);
597 if (dd->total > ATMEL_AES_DMA_THRESHOLD)
598 err = atmel_aes_crypt_dma_start(dd);
600 err = atmel_aes_crypt_cpu_start(dd);
604 atmel_aes_finish_req(dd, err);
605 tasklet_schedule(&dd->queue_task);
611 static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
616 if (dd->flags & AES_FLAGS_DMA) {
618 if (dd->flags & AES_FLAGS_FAST) {
619 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
620 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
622 dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
623 dd->dma_size, DMA_FROM_DEVICE);
626 count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset,
627 dd->buf_out, dd->buflen, dd->dma_size, 1);
628 if (count != dd->dma_size) {
639 static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
643 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
644 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
645 dd->buflen = PAGE_SIZE;
646 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
648 if (!dd->buf_in || !dd->buf_out) {
649 dev_err(dd->dev, "unable to alloc pages.\n");
654 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
655 dd->buflen, DMA_TO_DEVICE);
656 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
657 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
662 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
663 dd->buflen, DMA_FROM_DEVICE);
664 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
665 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
673 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
676 free_page((unsigned long)dd->buf_out);
677 free_page((unsigned long)dd->buf_in);
684 static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
686 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
688 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
690 free_page((unsigned long)dd->buf_out);
691 free_page((unsigned long)dd->buf_in);
699 struct atmel_aes_dev *dd;
733 dd = atmel_aes_find_dev(ctx);
734 if (!dd)
739 return atmel_aes_handle_queue(dd, req);
754 static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
764 dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
765 atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
766 if (!dd->dma_lch_in.chan)
769 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
770 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
772 dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
773 dd->dma_lch_in.dma_conf.src_addr_width =
775 dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
776 dd->dma_lch_in.dma_conf.dst_addr_width =
778 dd->dma_lch_in.dma_conf.device_fc = false;
780 dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
781 atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
782 if (!dd->dma_lch_out.chan)
785 dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
786 dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
788 dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
789 dd->dma_lch_out.dma_conf.src_addr_width =
791 dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
792 dd->dma_lch_out.dma_conf.dst_addr_width =
794 dd->dma_lch_out.dma_conf.device_fc = false;
799 dma_release_channel(dd->dma_lch_in.chan);
801 dev_warn(dd->dev, "no DMA channel available\n");
805 static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
807 dma_release_channel(dd->dma_lch_in.chan);
808 dma_release_channel(dd->dma_lch_out.chan);
1141 struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1143 atmel_aes_handle_queue(dd, NULL);
1148 struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
1151 if (!(dd->flags & AES_FLAGS_DMA)) {
1152 atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
1153 dd->bufcnt >> 2);
1155 if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
1156 dd->buf_out, dd->bufcnt))
1164 err = atmel_aes_crypt_dma_stop(dd);
1166 err = dd->err ? : err;
1168 if (dd->total && !err) {
1169 if (dd->flags & AES_FLAGS_FAST) {
1170 dd->in_sg = sg_next(dd->in_sg);
1171 dd->out_sg = sg_next(dd->out_sg);
1172 if (!dd->in_sg || !dd->out_sg)
1176 err = atmel_aes_crypt_dma_start(dd);
1182 atmel_aes_finish_req(dd, err);
1183 atmel_aes_handle_queue(dd, NULL);
1204 static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
1210 if (dd->caps.has_cfb64)
1214 static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
1224 if (dd->caps.has_cfb64) {
1241 static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
1243 dd->caps.has_dualbuff = 0;
1244 dd->caps.has_cfb64 = 0;
1245 dd->caps.max_burst_size = 1;
1248 switch (dd->hw_version & 0xff0) {
1250 dd->caps.has_dualbuff = 1;
1251 dd->caps.has_cfb64 = 1;
1252 dd->caps.max_burst_size = 4;
1257 dev_warn(dd->dev,