1/* 2 * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 */ 18#include <crypto/aead.h> 19#include <crypto/aes.h> 20#include <crypto/algapi.h> 21#include <crypto/authenc.h> 22#include <crypto/des.h> 23#include <crypto/md5.h> 24#include <crypto/sha.h> 25#include <crypto/internal/skcipher.h> 26#include <linux/clk.h> 27#include <linux/crypto.h> 28#include <linux/delay.h> 29#include <linux/dma-mapping.h> 30#include <linux/dmapool.h> 31#include <linux/err.h> 32#include <linux/init.h> 33#include <linux/interrupt.h> 34#include <linux/io.h> 35#include <linux/list.h> 36#include <linux/module.h> 37#include <linux/of.h> 38#include <linux/platform_device.h> 39#include <linux/pm.h> 40#include <linux/rtnetlink.h> 41#include <linux/scatterlist.h> 42#include <linux/sched.h> 43#include <linux/slab.h> 44#include <linux/timer.h> 45 46#include "picoxcell_crypto_regs.h" 47 48/* 49 * The threshold for the number of entries in the CMD FIFO available before 50 * the CMD0_CNT interrupt is raised. Increasing this value will reduce the 51 * number of interrupts raised to the CPU. 52 */ 53#define CMD0_IRQ_THRESHOLD 1 54 55/* 56 * The timeout period (in jiffies) for a PDU. When the the number of PDUs in 57 * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled. 58 * When there are packets in flight but lower than the threshold, we enable 59 * the timer and at expiry, attempt to remove any processed packets from the 60 * queue and if there are still packets left, schedule the timer again. 61 */ 62#define PACKET_TIMEOUT 1 63 64/* The priority to register each algorithm with. */ 65#define SPACC_CRYPTO_ALG_PRIORITY 10000 66 67#define SPACC_CRYPTO_KASUMI_F8_KEY_LEN 16 68#define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64 69#define SPACC_CRYPTO_IPSEC_HASH_PG_SZ 64 70#define SPACC_CRYPTO_IPSEC_MAX_CTXS 32 71#define SPACC_CRYPTO_IPSEC_FIFO_SZ 32 72#define SPACC_CRYPTO_L2_CIPHER_PG_SZ 64 73#define SPACC_CRYPTO_L2_HASH_PG_SZ 64 74#define SPACC_CRYPTO_L2_MAX_CTXS 128 75#define SPACC_CRYPTO_L2_FIFO_SZ 128 76 77#define MAX_DDT_LEN 16 78 79/* DDT format. This must match the hardware DDT format exactly. */ 80struct spacc_ddt { 81 dma_addr_t p; 82 u32 len; 83}; 84 85/* 86 * Asynchronous crypto request structure. 87 * 88 * This structure defines a request that is either queued for processing or 89 * being processed. 90 */ 91struct spacc_req { 92 struct list_head list; 93 struct spacc_engine *engine; 94 struct crypto_async_request *req; 95 int result; 96 bool is_encrypt; 97 unsigned ctx_id; 98 dma_addr_t src_addr, dst_addr; 99 struct spacc_ddt *src_ddt, *dst_ddt; 100 void (*complete)(struct spacc_req *req); 101 102 /* AEAD specific bits. */ 103 u8 *giv; 104 size_t giv_len; 105 dma_addr_t giv_pa; 106}; 107 108struct spacc_engine { 109 void __iomem *regs; 110 struct list_head pending; 111 int next_ctx; 112 spinlock_t hw_lock; 113 int in_flight; 114 struct list_head completed; 115 struct list_head in_progress; 116 struct tasklet_struct complete; 117 unsigned long fifo_sz; 118 void __iomem *cipher_ctx_base; 119 void __iomem *hash_key_base; 120 struct spacc_alg *algs; 121 unsigned num_algs; 122 struct list_head registered_algs; 123 size_t cipher_pg_sz; 124 size_t hash_pg_sz; 125 const char *name; 126 struct clk *clk; 127 struct device *dev; 128 unsigned max_ctxs; 129 struct timer_list packet_timeout; 130 unsigned stat_irq_thresh; 131 struct dma_pool *req_pool; 132}; 133 134/* Algorithm type mask. */ 135#define SPACC_CRYPTO_ALG_MASK 0x7 136 137/* SPACC definition of a crypto algorithm. */ 138struct spacc_alg { 139 unsigned long ctrl_default; 140 unsigned long type; 141 struct crypto_alg alg; 142 struct spacc_engine *engine; 143 struct list_head entry; 144 int key_offs; 145 int iv_offs; 146}; 147 148/* Generic context structure for any algorithm type. */ 149struct spacc_generic_ctx { 150 struct spacc_engine *engine; 151 int flags; 152 int key_offs; 153 int iv_offs; 154}; 155 156/* Block cipher context. */ 157struct spacc_ablk_ctx { 158 struct spacc_generic_ctx generic; 159 u8 key[AES_MAX_KEY_SIZE]; 160 u8 key_len; 161 /* 162 * The fallback cipher. If the operation can't be done in hardware, 163 * fallback to a software version. 164 */ 165 struct crypto_ablkcipher *sw_cipher; 166}; 167 168/* AEAD cipher context. */ 169struct spacc_aead_ctx { 170 struct spacc_generic_ctx generic; 171 u8 cipher_key[AES_MAX_KEY_SIZE]; 172 u8 hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ]; 173 u8 cipher_key_len; 174 u8 hash_key_len; 175 struct crypto_aead *sw_cipher; 176 size_t auth_size; 177 u8 salt[AES_BLOCK_SIZE]; 178}; 179 180static int spacc_ablk_submit(struct spacc_req *req); 181 182static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg) 183{ 184 return alg ? container_of(alg, struct spacc_alg, alg) : NULL; 185} 186 187static inline int spacc_fifo_cmd_full(struct spacc_engine *engine) 188{ 189 u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET); 190 191 return fifo_stat & SPA_FIFO_CMD_FULL; 192} 193 194/* 195 * Given a cipher context, and a context number, get the base address of the 196 * context page. 197 * 198 * Returns the address of the context page where the key/context may 199 * be written. 200 */ 201static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx, 202 unsigned indx, 203 bool is_cipher_ctx) 204{ 205 return is_cipher_ctx ? ctx->engine->cipher_ctx_base + 206 (indx * ctx->engine->cipher_pg_sz) : 207 ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz); 208} 209 210/* The context pages can only be written with 32-bit accesses. */ 211static inline void memcpy_toio32(u32 __iomem *dst, const void *src, 212 unsigned count) 213{ 214 const u32 *src32 = (const u32 *) src; 215 216 while (count--) 217 writel(*src32++, dst++); 218} 219 220static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx, 221 void __iomem *page_addr, const u8 *key, 222 size_t key_len, const u8 *iv, size_t iv_len) 223{ 224 void __iomem *key_ptr = page_addr + ctx->key_offs; 225 void __iomem *iv_ptr = page_addr + ctx->iv_offs; 226 227 memcpy_toio32(key_ptr, key, key_len / 4); 228 memcpy_toio32(iv_ptr, iv, iv_len / 4); 229} 230 231/* 232 * Load a context into the engines context memory. 233 * 234 * Returns the index of the context page where the context was loaded. 235 */ 236static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx, 237 const u8 *ciph_key, size_t ciph_len, 238 const u8 *iv, size_t ivlen, const u8 *hash_key, 239 size_t hash_len) 240{ 241 unsigned indx = ctx->engine->next_ctx++; 242 void __iomem *ciph_page_addr, *hash_page_addr; 243 244 ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1); 245 hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0); 246 247 ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1; 248 spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv, 249 ivlen); 250 writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) | 251 (1 << SPA_KEY_SZ_CIPHER_OFFSET), 252 ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET); 253 254 if (hash_key) { 255 memcpy_toio32(hash_page_addr, hash_key, hash_len / 4); 256 writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET), 257 ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET); 258 } 259 260 return indx; 261} 262 263/* Count the number of scatterlist entries in a scatterlist. */ 264static int sg_count(struct scatterlist *sg_list, int nbytes) 265{ 266 struct scatterlist *sg = sg_list; 267 int sg_nents = 0; 268 269 while (nbytes > 0) { 270 ++sg_nents; 271 nbytes -= sg->length; 272 sg = sg_next(sg); 273 } 274 275 return sg_nents; 276} 277 278static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len) 279{ 280 ddt->p = phys; 281 ddt->len = len; 282} 283 284/* 285 * Take a crypto request and scatterlists for the data and turn them into DDTs 286 * for passing to the crypto engines. This also DMA maps the data so that the 287 * crypto engines can DMA to/from them. 288 */ 289static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine, 290 struct scatterlist *payload, 291 unsigned nbytes, 292 enum dma_data_direction dir, 293 dma_addr_t *ddt_phys) 294{ 295 unsigned nents, mapped_ents; 296 struct scatterlist *cur; 297 struct spacc_ddt *ddt; 298 int i; 299 300 nents = sg_count(payload, nbytes); 301 mapped_ents = dma_map_sg(engine->dev, payload, nents, dir); 302 303 if (mapped_ents + 1 > MAX_DDT_LEN) 304 goto out; 305 306 ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys); 307 if (!ddt) 308 goto out; 309 310 for_each_sg(payload, cur, mapped_ents, i) 311 ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur)); 312 ddt_set(&ddt[mapped_ents], 0, 0); 313 314 return ddt; 315 316out: 317 dma_unmap_sg(engine->dev, payload, nents, dir); 318 return NULL; 319} 320 321static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv) 322{ 323 struct aead_request *areq = container_of(req->req, struct aead_request, 324 base); 325 struct spacc_engine *engine = req->engine; 326 struct spacc_ddt *src_ddt, *dst_ddt; 327 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq)); 328 unsigned nents = sg_count(areq->src, areq->cryptlen); 329 dma_addr_t iv_addr; 330 struct scatterlist *cur; 331 int i, dst_ents, src_ents, assoc_ents; 332 u8 *iv = giv ? giv : areq->iv; 333 334 src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr); 335 if (!src_ddt) 336 return -ENOMEM; 337 338 dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr); 339 if (!dst_ddt) { 340 dma_pool_free(engine->req_pool, src_ddt, req->src_addr); 341 return -ENOMEM; 342 } 343 344 req->src_ddt = src_ddt; 345 req->dst_ddt = dst_ddt; 346 347 assoc_ents = dma_map_sg(engine->dev, areq->assoc, 348 sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE); 349 if (areq->src != areq->dst) { 350 src_ents = dma_map_sg(engine->dev, areq->src, nents, 351 DMA_TO_DEVICE); 352 dst_ents = dma_map_sg(engine->dev, areq->dst, nents, 353 DMA_FROM_DEVICE); 354 } else { 355 src_ents = dma_map_sg(engine->dev, areq->src, nents, 356 DMA_BIDIRECTIONAL); 357 dst_ents = 0; 358 } 359 360 /* 361 * Map the IV/GIV. For the GIV it needs to be bidirectional as it is 362 * formed by the crypto block and sent as the ESP IV for IPSEC. 363 */ 364 iv_addr = dma_map_single(engine->dev, iv, ivsize, 365 giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 366 req->giv_pa = iv_addr; 367 368 /* 369 * Map the associated data. For decryption we don't copy the 370 * associated data. 371 */ 372 for_each_sg(areq->assoc, cur, assoc_ents, i) { 373 ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); 374 if (req->is_encrypt) 375 ddt_set(dst_ddt++, sg_dma_address(cur), 376 sg_dma_len(cur)); 377 } 378 ddt_set(src_ddt++, iv_addr, ivsize); 379 380 if (giv || req->is_encrypt) 381 ddt_set(dst_ddt++, iv_addr, ivsize); 382 383 /* 384 * Now map in the payload for the source and destination and terminate 385 * with the NULL pointers. 386 */ 387 for_each_sg(areq->src, cur, src_ents, i) { 388 ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); 389 if (areq->src == areq->dst) 390 ddt_set(dst_ddt++, sg_dma_address(cur), 391 sg_dma_len(cur)); 392 } 393 394 for_each_sg(areq->dst, cur, dst_ents, i) 395 ddt_set(dst_ddt++, sg_dma_address(cur), 396 sg_dma_len(cur)); 397 398 ddt_set(src_ddt, 0, 0); 399 ddt_set(dst_ddt, 0, 0); 400 401 return 0; 402} 403 404static void spacc_aead_free_ddts(struct spacc_req *req) 405{ 406 struct aead_request *areq = container_of(req->req, struct aead_request, 407 base); 408 struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg); 409 struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm); 410 struct spacc_engine *engine = aead_ctx->generic.engine; 411 unsigned ivsize = alg->alg.cra_aead.ivsize; 412 unsigned nents = sg_count(areq->src, areq->cryptlen); 413 414 if (areq->src != areq->dst) { 415 dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE); 416 dma_unmap_sg(engine->dev, areq->dst, 417 sg_count(areq->dst, areq->cryptlen), 418 DMA_FROM_DEVICE); 419 } else 420 dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL); 421 422 dma_unmap_sg(engine->dev, areq->assoc, 423 sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE); 424 425 dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL); 426 427 dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr); 428 dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr); 429} 430 431static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt, 432 dma_addr_t ddt_addr, struct scatterlist *payload, 433 unsigned nbytes, enum dma_data_direction dir) 434{ 435 unsigned nents = sg_count(payload, nbytes); 436 437 dma_unmap_sg(req->engine->dev, payload, nents, dir); 438 dma_pool_free(req->engine->req_pool, ddt, ddt_addr); 439} 440 441/* 442 * Set key for a DES operation in an AEAD cipher. This also performs weak key 443 * checking if required. 444 */ 445static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key, 446 unsigned int len) 447{ 448 struct crypto_tfm *tfm = crypto_aead_tfm(aead); 449 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); 450 u32 tmp[DES_EXPKEY_WORDS]; 451 452 if (unlikely(!des_ekey(tmp, key)) && 453 (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) { 454 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 455 return -EINVAL; 456 } 457 458 memcpy(ctx->cipher_key, key, len); 459 ctx->cipher_key_len = len; 460 461 return 0; 462} 463 464/* Set the key for the AES block cipher component of the AEAD transform. */ 465static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key, 466 unsigned int len) 467{ 468 struct crypto_tfm *tfm = crypto_aead_tfm(aead); 469 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); 470 471 /* 472 * IPSec engine only supports 128 and 256 bit AES keys. If we get a 473 * request for any other size (192 bits) then we need to do a software 474 * fallback. 475 */ 476 if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) { 477 /* 478 * Set the fallback transform to use the same request flags as 479 * the hardware transform. 480 */ 481 ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 482 ctx->sw_cipher->base.crt_flags |= 483 tfm->crt_flags & CRYPTO_TFM_REQ_MASK; 484 return crypto_aead_setkey(ctx->sw_cipher, key, len); 485 } 486 487 memcpy(ctx->cipher_key, key, len); 488 ctx->cipher_key_len = len; 489 490 return 0; 491} 492 493static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, 494 unsigned int keylen) 495{ 496 struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); 497 struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); 498 struct crypto_authenc_keys keys; 499 int err = -EINVAL; 500 501 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 502 goto badkey; 503 504 if (keys.enckeylen > AES_MAX_KEY_SIZE) 505 goto badkey; 506 507 if (keys.authkeylen > sizeof(ctx->hash_ctx)) 508 goto badkey; 509 510 if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == 511 SPA_CTRL_CIPH_ALG_AES) 512 err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen); 513 else 514 err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen); 515 516 if (err) 517 goto badkey; 518 519 memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen); 520 ctx->hash_key_len = keys.authkeylen; 521 522 return 0; 523 524badkey: 525 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 526 return -EINVAL; 527} 528 529static int spacc_aead_setauthsize(struct crypto_aead *tfm, 530 unsigned int authsize) 531{ 532 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm)); 533 534 ctx->auth_size = authsize; 535 536 return 0; 537} 538 539/* 540 * Check if an AEAD request requires a fallback operation. Some requests can't 541 * be completed in hardware because the hardware may not support certain key 542 * sizes. In these cases we need to complete the request in software. 543 */ 544static int spacc_aead_need_fallback(struct spacc_req *req) 545{ 546 struct aead_request *aead_req; 547 struct crypto_tfm *tfm = req->req->tfm; 548 struct crypto_alg *alg = req->req->tfm->__crt_alg; 549 struct spacc_alg *spacc_alg = to_spacc_alg(alg); 550 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); 551 552 aead_req = container_of(req->req, struct aead_request, base); 553 /* 554 * If we have a non-supported key-length, then we need to do a 555 * software fallback. 556 */ 557 if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == 558 SPA_CTRL_CIPH_ALG_AES && 559 ctx->cipher_key_len != AES_KEYSIZE_128 && 560 ctx->cipher_key_len != AES_KEYSIZE_256) 561 return 1; 562 563 return 0; 564} 565 566static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type, 567 bool is_encrypt) 568{ 569 struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req)); 570 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm); 571 int err; 572 573 if (ctx->sw_cipher) { 574 /* 575 * Change the request to use the software fallback transform, 576 * and once the ciphering has completed, put the old transform 577 * back into the request. 578 */ 579 aead_request_set_tfm(req, ctx->sw_cipher); 580 err = is_encrypt ? crypto_aead_encrypt(req) : 581 crypto_aead_decrypt(req); 582 aead_request_set_tfm(req, __crypto_aead_cast(old_tfm)); 583 } else 584 err = -EINVAL; 585 586 return err; 587} 588 589static void spacc_aead_complete(struct spacc_req *req) 590{ 591 spacc_aead_free_ddts(req); 592 req->req->complete(req->req, req->result); 593} 594 595static int spacc_aead_submit(struct spacc_req *req) 596{ 597 struct crypto_tfm *tfm = req->req->tfm; 598 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); 599 struct crypto_alg *alg = req->req->tfm->__crt_alg; 600 struct spacc_alg *spacc_alg = to_spacc_alg(alg); 601 struct spacc_engine *engine = ctx->generic.engine; 602 u32 ctrl, proc_len, assoc_len; 603 struct aead_request *aead_req = 604 container_of(req->req, struct aead_request, base); 605 606 req->result = -EINPROGRESS; 607 req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key, 608 ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize, 609 ctx->hash_ctx, ctx->hash_key_len); 610 611 /* Set the source and destination DDT pointers. */ 612 writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET); 613 writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET); 614 writel(0, engine->regs + SPA_OFFSET_REG_OFFSET); 615 616 assoc_len = aead_req->assoclen; 617 proc_len = aead_req->cryptlen + assoc_len; 618 619 /* 620 * If we aren't generating an IV, then we need to include the IV in the 621 * associated data so that it is included in the hash. 622 */ 623 if (!req->giv) { 624 assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req)); 625 proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req)); 626 } else 627 proc_len += req->giv_len; 628 629 /* 630 * If we are decrypting, we need to take the length of the ICV out of 631 * the processing length. 632 */ 633 if (!req->is_encrypt) 634 proc_len -= ctx->auth_size; 635 636 writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET); 637 writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET); 638 writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET); 639 writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET); 640 writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET); 641 642 ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) | 643 (1 << SPA_CTRL_ICV_APPEND); 644 if (req->is_encrypt) 645 ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY); 646 else 647 ctrl |= (1 << SPA_CTRL_KEY_EXP); 648 649 mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); 650 651 writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET); 652 653 return -EINPROGRESS; 654} 655 656static int spacc_req_submit(struct spacc_req *req); 657 658static void spacc_push(struct spacc_engine *engine) 659{ 660 struct spacc_req *req; 661 662 while (!list_empty(&engine->pending) && 663 engine->in_flight + 1 <= engine->fifo_sz) { 664 665 ++engine->in_flight; 666 req = list_first_entry(&engine->pending, struct spacc_req, 667 list); 668 list_move_tail(&req->list, &engine->in_progress); 669 670 req->result = spacc_req_submit(req); 671 } 672} 673 674/* 675 * Setup an AEAD request for processing. This will configure the engine, load 676 * the context and then start the packet processing. 677 * 678 * @giv Pointer to destination address for a generated IV. If the 679 * request does not need to generate an IV then this should be set to NULL. 680 */ 681static int spacc_aead_setup(struct aead_request *req, u8 *giv, 682 unsigned alg_type, bool is_encrypt) 683{ 684 struct crypto_alg *alg = req->base.tfm->__crt_alg; 685 struct spacc_engine *engine = to_spacc_alg(alg)->engine; 686 struct spacc_req *dev_req = aead_request_ctx(req); 687 int err = -EINPROGRESS; 688 unsigned long flags; 689 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); 690 691 dev_req->giv = giv; 692 dev_req->giv_len = ivsize; 693 dev_req->req = &req->base; 694 dev_req->is_encrypt = is_encrypt; 695 dev_req->result = -EBUSY; 696 dev_req->engine = engine; 697 dev_req->complete = spacc_aead_complete; 698 699 if (unlikely(spacc_aead_need_fallback(dev_req))) 700 return spacc_aead_do_fallback(req, alg_type, is_encrypt); 701 702 spacc_aead_make_ddts(dev_req, dev_req->giv); 703 704 err = -EINPROGRESS; 705 spin_lock_irqsave(&engine->hw_lock, flags); 706 if (unlikely(spacc_fifo_cmd_full(engine)) || 707 engine->in_flight + 1 > engine->fifo_sz) { 708 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { 709 err = -EBUSY; 710 spin_unlock_irqrestore(&engine->hw_lock, flags); 711 goto out_free_ddts; 712 } 713 list_add_tail(&dev_req->list, &engine->pending); 714 } else { 715 list_add_tail(&dev_req->list, &engine->pending); 716 spacc_push(engine); 717 } 718 spin_unlock_irqrestore(&engine->hw_lock, flags); 719 720 goto out; 721 722out_free_ddts: 723 spacc_aead_free_ddts(dev_req); 724out: 725 return err; 726} 727 728static int spacc_aead_encrypt(struct aead_request *req) 729{ 730 struct crypto_aead *aead = crypto_aead_reqtfm(req); 731 struct crypto_tfm *tfm = crypto_aead_tfm(aead); 732 struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); 733 734 return spacc_aead_setup(req, NULL, alg->type, 1); 735} 736 737static int spacc_aead_givencrypt(struct aead_givcrypt_request *req) 738{ 739 struct crypto_aead *tfm = aead_givcrypt_reqtfm(req); 740 struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); 741 size_t ivsize = crypto_aead_ivsize(tfm); 742 struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); 743 unsigned len; 744 __be64 seq; 745 746 memcpy(req->areq.iv, ctx->salt, ivsize); 747 len = ivsize; 748 if (ivsize > sizeof(u64)) { 749 memset(req->giv, 0, ivsize - sizeof(u64)); 750 len = sizeof(u64); 751 } 752 seq = cpu_to_be64(req->seq); 753 memcpy(req->giv + ivsize - len, &seq, len); 754 755 return spacc_aead_setup(&req->areq, req->giv, alg->type, 1); 756} 757 758static int spacc_aead_decrypt(struct aead_request *req) 759{ 760 struct crypto_aead *aead = crypto_aead_reqtfm(req); 761 struct crypto_tfm *tfm = crypto_aead_tfm(aead); 762 struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); 763 764 return spacc_aead_setup(req, NULL, alg->type, 0); 765} 766 767/* 768 * Initialise a new AEAD context. This is responsible for allocating the 769 * fallback cipher and initialising the context. 770 */ 771static int spacc_aead_cra_init(struct crypto_tfm *tfm) 772{ 773 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); 774 struct crypto_alg *alg = tfm->__crt_alg; 775 struct spacc_alg *spacc_alg = to_spacc_alg(alg); 776 struct spacc_engine *engine = spacc_alg->engine; 777 778 ctx->generic.flags = spacc_alg->type; 779 ctx->generic.engine = engine; 780 ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0, 781 CRYPTO_ALG_ASYNC | 782 CRYPTO_ALG_NEED_FALLBACK); 783 if (IS_ERR(ctx->sw_cipher)) { 784 dev_warn(engine->dev, "failed to allocate fallback for %s\n", 785 alg->cra_name); 786 ctx->sw_cipher = NULL; 787 } 788 ctx->generic.key_offs = spacc_alg->key_offs; 789 ctx->generic.iv_offs = spacc_alg->iv_offs; 790 791 get_random_bytes(ctx->salt, sizeof(ctx->salt)); 792 793 tfm->crt_aead.reqsize = sizeof(struct spacc_req); 794 795 return 0; 796} 797 798/* 799 * Destructor for an AEAD context. This is called when the transform is freed 800 * and must free the fallback cipher. 801 */ 802static void spacc_aead_cra_exit(struct crypto_tfm *tfm) 803{ 804 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); 805 806 if (ctx->sw_cipher) 807 crypto_free_aead(ctx->sw_cipher); 808 ctx->sw_cipher = NULL; 809} 810 811/* 812 * Set the DES key for a block cipher transform. This also performs weak key 813 * checking if the transform has requested it. 814 */ 815static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 816 unsigned int len) 817{ 818 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 819 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); 820 u32 tmp[DES_EXPKEY_WORDS]; 821 822 if (len > DES3_EDE_KEY_SIZE) { 823 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 824 return -EINVAL; 825 } 826 827 if (unlikely(!des_ekey(tmp, key)) && 828 (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) { 829 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 830 return -EINVAL; 831 } 832 833 memcpy(ctx->key, key, len); 834 ctx->key_len = len; 835 836 return 0; 837} 838 839/* 840 * Set the key for an AES block cipher. Some key lengths are not supported in 841 * hardware so this must also check whether a fallback is needed. 842 */ 843static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 844 unsigned int len) 845{ 846 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 847 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); 848 int err = 0; 849 850 if (len > AES_MAX_KEY_SIZE) { 851 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 852 return -EINVAL; 853 } 854 855 /* 856 * IPSec engine only supports 128 and 256 bit AES keys. If we get a 857 * request for any other size (192 bits) then we need to do a software 858 * fallback. 859 */ 860 if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 && 861 ctx->sw_cipher) { 862 /* 863 * Set the fallback transform to use the same request flags as 864 * the hardware transform. 865 */ 866 ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 867 ctx->sw_cipher->base.crt_flags |= 868 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK; 869 870 err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len); 871 if (err) 872 goto sw_setkey_failed; 873 } else if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 && 874 !ctx->sw_cipher) 875 err = -EINVAL; 876 877 memcpy(ctx->key, key, len); 878 ctx->key_len = len; 879 880sw_setkey_failed: 881 if (err && ctx->sw_cipher) { 882 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 883 tfm->crt_flags |= 884 ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK; 885 } 886 887 return err; 888} 889 890static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher, 891 const u8 *key, unsigned int len) 892{ 893 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 894 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); 895 int err = 0; 896 897 if (len > AES_MAX_KEY_SIZE) { 898 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 899 err = -EINVAL; 900 goto out; 901 } 902 903 memcpy(ctx->key, key, len); 904 ctx->key_len = len; 905 906out: 907 return err; 908} 909 910static int spacc_ablk_need_fallback(struct spacc_req *req) 911{ 912 struct spacc_ablk_ctx *ctx; 913 struct crypto_tfm *tfm = req->req->tfm; 914 struct crypto_alg *alg = req->req->tfm->__crt_alg; 915 struct spacc_alg *spacc_alg = to_spacc_alg(alg); 916 917 ctx = crypto_tfm_ctx(tfm); 918 919 return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == 920 SPA_CTRL_CIPH_ALG_AES && 921 ctx->key_len != AES_KEYSIZE_128 && 922 ctx->key_len != AES_KEYSIZE_256; 923} 924 925static void spacc_ablk_complete(struct spacc_req *req) 926{ 927 struct ablkcipher_request *ablk_req = 928 container_of(req->req, struct ablkcipher_request, base); 929 930 if (ablk_req->src != ablk_req->dst) { 931 spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src, 932 ablk_req->nbytes, DMA_TO_DEVICE); 933 spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst, 934 ablk_req->nbytes, DMA_FROM_DEVICE); 935 } else 936 spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst, 937 ablk_req->nbytes, DMA_BIDIRECTIONAL); 938 939 req->req->complete(req->req, req->result); 940} 941 942static int spacc_ablk_submit(struct spacc_req *req) 943{ 944 struct crypto_tfm *tfm = req->req->tfm; 945 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); 946 struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req); 947 struct crypto_alg *alg = req->req->tfm->__crt_alg; 948 struct spacc_alg *spacc_alg = to_spacc_alg(alg); 949 struct spacc_engine *engine = ctx->generic.engine; 950 u32 ctrl; 951 952 req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key, 953 ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize, 954 NULL, 0); 955 956 writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET); 957 writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET); 958 writel(0, engine->regs + SPA_OFFSET_REG_OFFSET); 959 960 writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET); 961 writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET); 962 writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET); 963 writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET); 964 965 ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) | 966 (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) : 967 (1 << SPA_CTRL_KEY_EXP)); 968 969 mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); 970 971 writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET); 972 973 return -EINPROGRESS; 974} 975 976static int spacc_ablk_do_fallback(struct ablkcipher_request *req, 977 unsigned alg_type, bool is_encrypt) 978{ 979 struct crypto_tfm *old_tfm = 980 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); 981 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm); 982 int err; 983 984 if (!ctx->sw_cipher) 985 return -EINVAL; 986 987 /* 988 * Change the request to use the software fallback transform, and once 989 * the ciphering has completed, put the old transform back into the 990 * request. 991 */ 992 ablkcipher_request_set_tfm(req, ctx->sw_cipher); 993 err = is_encrypt ? crypto_ablkcipher_encrypt(req) : 994 crypto_ablkcipher_decrypt(req); 995 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm)); 996 997 return err; 998} 999 1000static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type, 1001 bool is_encrypt) 1002{ 1003 struct crypto_alg *alg = req->base.tfm->__crt_alg; 1004 struct spacc_engine *engine = to_spacc_alg(alg)->engine; 1005 struct spacc_req *dev_req = ablkcipher_request_ctx(req); 1006 unsigned long flags; 1007 int err = -ENOMEM; 1008 1009 dev_req->req = &req->base; 1010 dev_req->is_encrypt = is_encrypt; 1011 dev_req->engine = engine; 1012 dev_req->complete = spacc_ablk_complete; 1013 dev_req->result = -EINPROGRESS; 1014 1015 if (unlikely(spacc_ablk_need_fallback(dev_req))) 1016 return spacc_ablk_do_fallback(req, alg_type, is_encrypt); 1017 1018 /* 1019 * Create the DDT's for the engine. If we share the same source and 1020 * destination then we can optimize by reusing the DDT's. 1021 */ 1022 if (req->src != req->dst) { 1023 dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src, 1024 req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr); 1025 if (!dev_req->src_ddt) 1026 goto out; 1027 1028 dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst, 1029 req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr); 1030 if (!dev_req->dst_ddt) 1031 goto out_free_src; 1032 } else { 1033 dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst, 1034 req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr); 1035 if (!dev_req->dst_ddt) 1036 goto out; 1037 1038 dev_req->src_ddt = NULL; 1039 dev_req->src_addr = dev_req->dst_addr; 1040 } 1041 1042 err = -EINPROGRESS; 1043 spin_lock_irqsave(&engine->hw_lock, flags); 1044 /* 1045 * Check if the engine will accept the operation now. If it won't then 1046 * we either stick it on the end of a pending list if we can backlog, 1047 * or bailout with an error if not. 1048 */ 1049 if (unlikely(spacc_fifo_cmd_full(engine)) || 1050 engine->in_flight + 1 > engine->fifo_sz) { 1051 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { 1052 err = -EBUSY; 1053 spin_unlock_irqrestore(&engine->hw_lock, flags); 1054 goto out_free_ddts; 1055 } 1056 list_add_tail(&dev_req->list, &engine->pending); 1057 } else { 1058 list_add_tail(&dev_req->list, &engine->pending); 1059 spacc_push(engine); 1060 } 1061 spin_unlock_irqrestore(&engine->hw_lock, flags); 1062 1063 goto out; 1064 1065out_free_ddts: 1066 spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst, 1067 req->nbytes, req->src == req->dst ? 1068 DMA_BIDIRECTIONAL : DMA_FROM_DEVICE); 1069out_free_src: 1070 if (req->src != req->dst) 1071 spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr, 1072 req->src, req->nbytes, DMA_TO_DEVICE); 1073out: 1074 return err; 1075} 1076 1077static int spacc_ablk_cra_init(struct crypto_tfm *tfm) 1078{ 1079 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); 1080 struct crypto_alg *alg = tfm->__crt_alg; 1081 struct spacc_alg *spacc_alg = to_spacc_alg(alg); 1082 struct spacc_engine *engine = spacc_alg->engine; 1083 1084 ctx->generic.flags = spacc_alg->type; 1085 ctx->generic.engine = engine; 1086 if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { 1087 ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0, 1088 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); 1089 if (IS_ERR(ctx->sw_cipher)) { 1090 dev_warn(engine->dev, "failed to allocate fallback for %s\n", 1091 alg->cra_name); 1092 ctx->sw_cipher = NULL; 1093 } 1094 } 1095 ctx->generic.key_offs = spacc_alg->key_offs; 1096 ctx->generic.iv_offs = spacc_alg->iv_offs; 1097 1098 tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req); 1099 1100 return 0; 1101} 1102 1103static void spacc_ablk_cra_exit(struct crypto_tfm *tfm) 1104{ 1105 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); 1106 1107 if (ctx->sw_cipher) 1108 crypto_free_ablkcipher(ctx->sw_cipher); 1109 ctx->sw_cipher = NULL; 1110} 1111 1112static int spacc_ablk_encrypt(struct ablkcipher_request *req) 1113{ 1114 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req); 1115 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 1116 struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); 1117 1118 return spacc_ablk_setup(req, alg->type, 1); 1119} 1120 1121static int spacc_ablk_decrypt(struct ablkcipher_request *req) 1122{ 1123 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req); 1124 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 1125 struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); 1126 1127 return spacc_ablk_setup(req, alg->type, 0); 1128} 1129 1130static inline int spacc_fifo_stat_empty(struct spacc_engine *engine) 1131{ 1132 return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) & 1133 SPA_FIFO_STAT_EMPTY; 1134} 1135 1136static void spacc_process_done(struct spacc_engine *engine) 1137{ 1138 struct spacc_req *req; 1139 unsigned long flags; 1140 1141 spin_lock_irqsave(&engine->hw_lock, flags); 1142 1143 while (!spacc_fifo_stat_empty(engine)) { 1144 req = list_first_entry(&engine->in_progress, struct spacc_req, 1145 list); 1146 list_move_tail(&req->list, &engine->completed); 1147 --engine->in_flight; 1148 1149 /* POP the status register. */ 1150 writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET); 1151 req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) & 1152 SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET; 1153 1154 /* 1155 * Convert the SPAcc error status into the standard POSIX error 1156 * codes. 1157 */ 1158 if (unlikely(req->result)) { 1159 switch (req->result) { 1160 case SPA_STATUS_ICV_FAIL: 1161 req->result = -EBADMSG; 1162 break; 1163 1164 case SPA_STATUS_MEMORY_ERROR: 1165 dev_warn(engine->dev, 1166 "memory error triggered\n"); 1167 req->result = -EFAULT; 1168 break; 1169 1170 case SPA_STATUS_BLOCK_ERROR: 1171 dev_warn(engine->dev, 1172 "block error triggered\n"); 1173 req->result = -EIO; 1174 break; 1175 } 1176 } 1177 } 1178 1179 tasklet_schedule(&engine->complete); 1180 1181 spin_unlock_irqrestore(&engine->hw_lock, flags); 1182} 1183 1184static irqreturn_t spacc_spacc_irq(int irq, void *dev) 1185{ 1186 struct spacc_engine *engine = (struct spacc_engine *)dev; 1187 u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET); 1188 1189 writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET); 1190 spacc_process_done(engine); 1191 1192 return IRQ_HANDLED; 1193} 1194 1195static void spacc_packet_timeout(unsigned long data) 1196{ 1197 struct spacc_engine *engine = (struct spacc_engine *)data; 1198 1199 spacc_process_done(engine); 1200} 1201 1202static int spacc_req_submit(struct spacc_req *req) 1203{ 1204 struct crypto_alg *alg = req->req->tfm->__crt_alg; 1205 1206 if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags)) 1207 return spacc_aead_submit(req); 1208 else 1209 return spacc_ablk_submit(req); 1210} 1211 1212static void spacc_spacc_complete(unsigned long data) 1213{ 1214 struct spacc_engine *engine = (struct spacc_engine *)data; 1215 struct spacc_req *req, *tmp; 1216 unsigned long flags; 1217 LIST_HEAD(completed); 1218 1219 spin_lock_irqsave(&engine->hw_lock, flags); 1220 1221 list_splice_init(&engine->completed, &completed); 1222 spacc_push(engine); 1223 if (engine->in_flight) 1224 mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); 1225 1226 spin_unlock_irqrestore(&engine->hw_lock, flags); 1227 1228 list_for_each_entry_safe(req, tmp, &completed, list) { 1229 list_del(&req->list); 1230 req->complete(req); 1231 } 1232} 1233 1234#ifdef CONFIG_PM 1235static int spacc_suspend(struct device *dev) 1236{ 1237 struct platform_device *pdev = to_platform_device(dev); 1238 struct spacc_engine *engine = platform_get_drvdata(pdev); 1239 1240 /* 1241 * We only support standby mode. All we have to do is gate the clock to 1242 * the spacc. The hardware will preserve state until we turn it back 1243 * on again. 1244 */ 1245 clk_disable(engine->clk); 1246 1247 return 0; 1248} 1249 1250static int spacc_resume(struct device *dev) 1251{ 1252 struct platform_device *pdev = to_platform_device(dev); 1253 struct spacc_engine *engine = platform_get_drvdata(pdev); 1254 1255 return clk_enable(engine->clk); 1256} 1257 1258static const struct dev_pm_ops spacc_pm_ops = { 1259 .suspend = spacc_suspend, 1260 .resume = spacc_resume, 1261}; 1262#endif /* CONFIG_PM */ 1263 1264static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev) 1265{ 1266 return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL; 1267} 1268 1269static ssize_t spacc_stat_irq_thresh_show(struct device *dev, 1270 struct device_attribute *attr, 1271 char *buf) 1272{ 1273 struct spacc_engine *engine = spacc_dev_to_engine(dev); 1274 1275 return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh); 1276} 1277 1278static ssize_t spacc_stat_irq_thresh_store(struct device *dev, 1279 struct device_attribute *attr, 1280 const char *buf, size_t len) 1281{ 1282 struct spacc_engine *engine = spacc_dev_to_engine(dev); 1283 unsigned long thresh; 1284 1285 if (kstrtoul(buf, 0, &thresh)) 1286 return -EINVAL; 1287 1288 thresh = clamp(thresh, 1UL, engine->fifo_sz - 1); 1289 1290 engine->stat_irq_thresh = thresh; 1291 writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET, 1292 engine->regs + SPA_IRQ_CTRL_REG_OFFSET); 1293 1294 return len; 1295} 1296static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show, 1297 spacc_stat_irq_thresh_store); 1298 1299static struct spacc_alg ipsec_engine_algs[] = { 1300 { 1301 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC, 1302 .key_offs = 0, 1303 .iv_offs = AES_MAX_KEY_SIZE, 1304 .alg = { 1305 .cra_name = "cbc(aes)", 1306 .cra_driver_name = "cbc-aes-picoxcell", 1307 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1308 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1309 CRYPTO_ALG_KERN_DRIVER_ONLY | 1310 CRYPTO_ALG_ASYNC | 1311 CRYPTO_ALG_NEED_FALLBACK, 1312 .cra_blocksize = AES_BLOCK_SIZE, 1313 .cra_ctxsize = sizeof(struct spacc_ablk_ctx), 1314 .cra_type = &crypto_ablkcipher_type, 1315 .cra_module = THIS_MODULE, 1316 .cra_ablkcipher = { 1317 .setkey = spacc_aes_setkey, 1318 .encrypt = spacc_ablk_encrypt, 1319 .decrypt = spacc_ablk_decrypt, 1320 .min_keysize = AES_MIN_KEY_SIZE, 1321 .max_keysize = AES_MAX_KEY_SIZE, 1322 .ivsize = AES_BLOCK_SIZE, 1323 }, 1324 .cra_init = spacc_ablk_cra_init, 1325 .cra_exit = spacc_ablk_cra_exit, 1326 }, 1327 }, 1328 { 1329 .key_offs = 0, 1330 .iv_offs = AES_MAX_KEY_SIZE, 1331 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB, 1332 .alg = { 1333 .cra_name = "ecb(aes)", 1334 .cra_driver_name = "ecb-aes-picoxcell", 1335 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1336 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1337 CRYPTO_ALG_KERN_DRIVER_ONLY | 1338 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, 1339 .cra_blocksize = AES_BLOCK_SIZE, 1340 .cra_ctxsize = sizeof(struct spacc_ablk_ctx), 1341 .cra_type = &crypto_ablkcipher_type, 1342 .cra_module = THIS_MODULE, 1343 .cra_ablkcipher = { 1344 .setkey = spacc_aes_setkey, 1345 .encrypt = spacc_ablk_encrypt, 1346 .decrypt = spacc_ablk_decrypt, 1347 .min_keysize = AES_MIN_KEY_SIZE, 1348 .max_keysize = AES_MAX_KEY_SIZE, 1349 }, 1350 .cra_init = spacc_ablk_cra_init, 1351 .cra_exit = spacc_ablk_cra_exit, 1352 }, 1353 }, 1354 { 1355 .key_offs = DES_BLOCK_SIZE, 1356 .iv_offs = 0, 1357 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC, 1358 .alg = { 1359 .cra_name = "cbc(des)", 1360 .cra_driver_name = "cbc-des-picoxcell", 1361 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1362 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1363 CRYPTO_ALG_ASYNC | 1364 CRYPTO_ALG_KERN_DRIVER_ONLY, 1365 .cra_blocksize = DES_BLOCK_SIZE, 1366 .cra_ctxsize = sizeof(struct spacc_ablk_ctx), 1367 .cra_type = &crypto_ablkcipher_type, 1368 .cra_module = THIS_MODULE, 1369 .cra_ablkcipher = { 1370 .setkey = spacc_des_setkey, 1371 .encrypt = spacc_ablk_encrypt, 1372 .decrypt = spacc_ablk_decrypt, 1373 .min_keysize = DES_KEY_SIZE, 1374 .max_keysize = DES_KEY_SIZE, 1375 .ivsize = DES_BLOCK_SIZE, 1376 }, 1377 .cra_init = spacc_ablk_cra_init, 1378 .cra_exit = spacc_ablk_cra_exit, 1379 }, 1380 }, 1381 { 1382 .key_offs = DES_BLOCK_SIZE, 1383 .iv_offs = 0, 1384 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB, 1385 .alg = { 1386 .cra_name = "ecb(des)", 1387 .cra_driver_name = "ecb-des-picoxcell", 1388 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1389 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1390 CRYPTO_ALG_ASYNC | 1391 CRYPTO_ALG_KERN_DRIVER_ONLY, 1392 .cra_blocksize = DES_BLOCK_SIZE, 1393 .cra_ctxsize = sizeof(struct spacc_ablk_ctx), 1394 .cra_type = &crypto_ablkcipher_type, 1395 .cra_module = THIS_MODULE, 1396 .cra_ablkcipher = { 1397 .setkey = spacc_des_setkey, 1398 .encrypt = spacc_ablk_encrypt, 1399 .decrypt = spacc_ablk_decrypt, 1400 .min_keysize = DES_KEY_SIZE, 1401 .max_keysize = DES_KEY_SIZE, 1402 }, 1403 .cra_init = spacc_ablk_cra_init, 1404 .cra_exit = spacc_ablk_cra_exit, 1405 }, 1406 }, 1407 { 1408 .key_offs = DES_BLOCK_SIZE, 1409 .iv_offs = 0, 1410 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC, 1411 .alg = { 1412 .cra_name = "cbc(des3_ede)", 1413 .cra_driver_name = "cbc-des3-ede-picoxcell", 1414 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1415 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1416 CRYPTO_ALG_ASYNC | 1417 CRYPTO_ALG_KERN_DRIVER_ONLY, 1418 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1419 .cra_ctxsize = sizeof(struct spacc_ablk_ctx), 1420 .cra_type = &crypto_ablkcipher_type, 1421 .cra_module = THIS_MODULE, 1422 .cra_ablkcipher = { 1423 .setkey = spacc_des_setkey, 1424 .encrypt = spacc_ablk_encrypt, 1425 .decrypt = spacc_ablk_decrypt, 1426 .min_keysize = DES3_EDE_KEY_SIZE, 1427 .max_keysize = DES3_EDE_KEY_SIZE, 1428 .ivsize = DES3_EDE_BLOCK_SIZE, 1429 }, 1430 .cra_init = spacc_ablk_cra_init, 1431 .cra_exit = spacc_ablk_cra_exit, 1432 }, 1433 }, 1434 { 1435 .key_offs = DES_BLOCK_SIZE, 1436 .iv_offs = 0, 1437 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB, 1438 .alg = { 1439 .cra_name = "ecb(des3_ede)", 1440 .cra_driver_name = "ecb-des3-ede-picoxcell", 1441 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1442 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1443 CRYPTO_ALG_ASYNC | 1444 CRYPTO_ALG_KERN_DRIVER_ONLY, 1445 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1446 .cra_ctxsize = sizeof(struct spacc_ablk_ctx), 1447 .cra_type = &crypto_ablkcipher_type, 1448 .cra_module = THIS_MODULE, 1449 .cra_ablkcipher = { 1450 .setkey = spacc_des_setkey, 1451 .encrypt = spacc_ablk_encrypt, 1452 .decrypt = spacc_ablk_decrypt, 1453 .min_keysize = DES3_EDE_KEY_SIZE, 1454 .max_keysize = DES3_EDE_KEY_SIZE, 1455 }, 1456 .cra_init = spacc_ablk_cra_init, 1457 .cra_exit = spacc_ablk_cra_exit, 1458 }, 1459 }, 1460 { 1461 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | 1462 SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC, 1463 .key_offs = 0, 1464 .iv_offs = AES_MAX_KEY_SIZE, 1465 .alg = { 1466 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1467 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell", 1468 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1469 .cra_flags = CRYPTO_ALG_TYPE_AEAD | 1470 CRYPTO_ALG_ASYNC | 1471 CRYPTO_ALG_KERN_DRIVER_ONLY, 1472 .cra_blocksize = AES_BLOCK_SIZE, 1473 .cra_ctxsize = sizeof(struct spacc_aead_ctx), 1474 .cra_type = &crypto_aead_type, 1475 .cra_module = THIS_MODULE, 1476 .cra_aead = { 1477 .setkey = spacc_aead_setkey, 1478 .setauthsize = spacc_aead_setauthsize, 1479 .encrypt = spacc_aead_encrypt, 1480 .decrypt = spacc_aead_decrypt, 1481 .givencrypt = spacc_aead_givencrypt, 1482 .ivsize = AES_BLOCK_SIZE, 1483 .maxauthsize = SHA1_DIGEST_SIZE, 1484 }, 1485 .cra_init = spacc_aead_cra_init, 1486 .cra_exit = spacc_aead_cra_exit, 1487 }, 1488 }, 1489 { 1490 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | 1491 SPA_CTRL_HASH_ALG_SHA256 | 1492 SPA_CTRL_HASH_MODE_HMAC, 1493 .key_offs = 0, 1494 .iv_offs = AES_MAX_KEY_SIZE, 1495 .alg = { 1496 .cra_name = "authenc(hmac(sha256),cbc(aes))", 1497 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell", 1498 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1499 .cra_flags = CRYPTO_ALG_TYPE_AEAD | 1500 CRYPTO_ALG_ASYNC | 1501 CRYPTO_ALG_KERN_DRIVER_ONLY, 1502 .cra_blocksize = AES_BLOCK_SIZE, 1503 .cra_ctxsize = sizeof(struct spacc_aead_ctx), 1504 .cra_type = &crypto_aead_type, 1505 .cra_module = THIS_MODULE, 1506 .cra_aead = { 1507 .setkey = spacc_aead_setkey, 1508 .setauthsize = spacc_aead_setauthsize, 1509 .encrypt = spacc_aead_encrypt, 1510 .decrypt = spacc_aead_decrypt, 1511 .givencrypt = spacc_aead_givencrypt, 1512 .ivsize = AES_BLOCK_SIZE, 1513 .maxauthsize = SHA256_DIGEST_SIZE, 1514 }, 1515 .cra_init = spacc_aead_cra_init, 1516 .cra_exit = spacc_aead_cra_exit, 1517 }, 1518 }, 1519 { 1520 .key_offs = 0, 1521 .iv_offs = AES_MAX_KEY_SIZE, 1522 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | 1523 SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC, 1524 .alg = { 1525 .cra_name = "authenc(hmac(md5),cbc(aes))", 1526 .cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell", 1527 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1528 .cra_flags = CRYPTO_ALG_TYPE_AEAD | 1529 CRYPTO_ALG_ASYNC | 1530 CRYPTO_ALG_KERN_DRIVER_ONLY, 1531 .cra_blocksize = AES_BLOCK_SIZE, 1532 .cra_ctxsize = sizeof(struct spacc_aead_ctx), 1533 .cra_type = &crypto_aead_type, 1534 .cra_module = THIS_MODULE, 1535 .cra_aead = { 1536 .setkey = spacc_aead_setkey, 1537 .setauthsize = spacc_aead_setauthsize, 1538 .encrypt = spacc_aead_encrypt, 1539 .decrypt = spacc_aead_decrypt, 1540 .givencrypt = spacc_aead_givencrypt, 1541 .ivsize = AES_BLOCK_SIZE, 1542 .maxauthsize = MD5_DIGEST_SIZE, 1543 }, 1544 .cra_init = spacc_aead_cra_init, 1545 .cra_exit = spacc_aead_cra_exit, 1546 }, 1547 }, 1548 { 1549 .key_offs = DES_BLOCK_SIZE, 1550 .iv_offs = 0, 1551 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC | 1552 SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC, 1553 .alg = { 1554 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", 1555 .cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell", 1556 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1557 .cra_flags = CRYPTO_ALG_TYPE_AEAD | 1558 CRYPTO_ALG_ASYNC | 1559 CRYPTO_ALG_KERN_DRIVER_ONLY, 1560 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1561 .cra_ctxsize = sizeof(struct spacc_aead_ctx), 1562 .cra_type = &crypto_aead_type, 1563 .cra_module = THIS_MODULE, 1564 .cra_aead = { 1565 .setkey = spacc_aead_setkey, 1566 .setauthsize = spacc_aead_setauthsize, 1567 .encrypt = spacc_aead_encrypt, 1568 .decrypt = spacc_aead_decrypt, 1569 .givencrypt = spacc_aead_givencrypt, 1570 .ivsize = DES3_EDE_BLOCK_SIZE, 1571 .maxauthsize = SHA1_DIGEST_SIZE, 1572 }, 1573 .cra_init = spacc_aead_cra_init, 1574 .cra_exit = spacc_aead_cra_exit, 1575 }, 1576 }, 1577 { 1578 .key_offs = DES_BLOCK_SIZE, 1579 .iv_offs = 0, 1580 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | 1581 SPA_CTRL_HASH_ALG_SHA256 | 1582 SPA_CTRL_HASH_MODE_HMAC, 1583 .alg = { 1584 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", 1585 .cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell", 1586 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1587 .cra_flags = CRYPTO_ALG_TYPE_AEAD | 1588 CRYPTO_ALG_ASYNC | 1589 CRYPTO_ALG_KERN_DRIVER_ONLY, 1590 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1591 .cra_ctxsize = sizeof(struct spacc_aead_ctx), 1592 .cra_type = &crypto_aead_type, 1593 .cra_module = THIS_MODULE, 1594 .cra_aead = { 1595 .setkey = spacc_aead_setkey, 1596 .setauthsize = spacc_aead_setauthsize, 1597 .encrypt = spacc_aead_encrypt, 1598 .decrypt = spacc_aead_decrypt, 1599 .givencrypt = spacc_aead_givencrypt, 1600 .ivsize = DES3_EDE_BLOCK_SIZE, 1601 .maxauthsize = SHA256_DIGEST_SIZE, 1602 }, 1603 .cra_init = spacc_aead_cra_init, 1604 .cra_exit = spacc_aead_cra_exit, 1605 }, 1606 }, 1607 { 1608 .key_offs = DES_BLOCK_SIZE, 1609 .iv_offs = 0, 1610 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC | 1611 SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC, 1612 .alg = { 1613 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 1614 .cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell", 1615 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1616 .cra_flags = CRYPTO_ALG_TYPE_AEAD | 1617 CRYPTO_ALG_ASYNC | 1618 CRYPTO_ALG_KERN_DRIVER_ONLY, 1619 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1620 .cra_ctxsize = sizeof(struct spacc_aead_ctx), 1621 .cra_type = &crypto_aead_type, 1622 .cra_module = THIS_MODULE, 1623 .cra_aead = { 1624 .setkey = spacc_aead_setkey, 1625 .setauthsize = spacc_aead_setauthsize, 1626 .encrypt = spacc_aead_encrypt, 1627 .decrypt = spacc_aead_decrypt, 1628 .givencrypt = spacc_aead_givencrypt, 1629 .ivsize = DES3_EDE_BLOCK_SIZE, 1630 .maxauthsize = MD5_DIGEST_SIZE, 1631 }, 1632 .cra_init = spacc_aead_cra_init, 1633 .cra_exit = spacc_aead_cra_exit, 1634 }, 1635 }, 1636}; 1637 1638static struct spacc_alg l2_engine_algs[] = { 1639 { 1640 .key_offs = 0, 1641 .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN, 1642 .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI | 1643 SPA_CTRL_CIPH_MODE_F8, 1644 .alg = { 1645 .cra_name = "f8(kasumi)", 1646 .cra_driver_name = "f8-kasumi-picoxcell", 1647 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1648 .cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | 1649 CRYPTO_ALG_ASYNC | 1650 CRYPTO_ALG_KERN_DRIVER_ONLY, 1651 .cra_blocksize = 8, 1652 .cra_ctxsize = sizeof(struct spacc_ablk_ctx), 1653 .cra_type = &crypto_ablkcipher_type, 1654 .cra_module = THIS_MODULE, 1655 .cra_ablkcipher = { 1656 .setkey = spacc_kasumi_f8_setkey, 1657 .encrypt = spacc_ablk_encrypt, 1658 .decrypt = spacc_ablk_decrypt, 1659 .min_keysize = 16, 1660 .max_keysize = 16, 1661 .ivsize = 8, 1662 }, 1663 .cra_init = spacc_ablk_cra_init, 1664 .cra_exit = spacc_ablk_cra_exit, 1665 }, 1666 }, 1667}; 1668 1669#ifdef CONFIG_OF 1670static const struct of_device_id spacc_of_id_table[] = { 1671 { .compatible = "picochip,spacc-ipsec" }, 1672 { .compatible = "picochip,spacc-l2" }, 1673 {} 1674}; 1675#endif /* CONFIG_OF */ 1676 1677static bool spacc_is_compatible(struct platform_device *pdev, 1678 const char *spacc_type) 1679{ 1680 const struct platform_device_id *platid = platform_get_device_id(pdev); 1681 1682 if (platid && !strcmp(platid->name, spacc_type)) 1683 return true; 1684 1685#ifdef CONFIG_OF 1686 if (of_device_is_compatible(pdev->dev.of_node, spacc_type)) 1687 return true; 1688#endif /* CONFIG_OF */ 1689 1690 return false; 1691} 1692 1693static int spacc_probe(struct platform_device *pdev) 1694{ 1695 int i, err, ret = -EINVAL; 1696 struct resource *mem, *irq; 1697 struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine), 1698 GFP_KERNEL); 1699 if (!engine) 1700 return -ENOMEM; 1701 1702 if (spacc_is_compatible(pdev, "picochip,spacc-ipsec")) { 1703 engine->max_ctxs = SPACC_CRYPTO_IPSEC_MAX_CTXS; 1704 engine->cipher_pg_sz = SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ; 1705 engine->hash_pg_sz = SPACC_CRYPTO_IPSEC_HASH_PG_SZ; 1706 engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ; 1707 engine->algs = ipsec_engine_algs; 1708 engine->num_algs = ARRAY_SIZE(ipsec_engine_algs); 1709 } else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) { 1710 engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS; 1711 engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ; 1712 engine->hash_pg_sz = SPACC_CRYPTO_L2_HASH_PG_SZ; 1713 engine->fifo_sz = SPACC_CRYPTO_L2_FIFO_SZ; 1714 engine->algs = l2_engine_algs; 1715 engine->num_algs = ARRAY_SIZE(l2_engine_algs); 1716 } else { 1717 return -EINVAL; 1718 } 1719 1720 engine->name = dev_name(&pdev->dev); 1721 1722 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1723 engine->regs = devm_ioremap_resource(&pdev->dev, mem); 1724 if (IS_ERR(engine->regs)) 1725 return PTR_ERR(engine->regs); 1726 1727 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1728 if (!irq) { 1729 dev_err(&pdev->dev, "no memory/irq resource for engine\n"); 1730 return -ENXIO; 1731 } 1732 1733 if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0, 1734 engine->name, engine)) { 1735 dev_err(engine->dev, "failed to request IRQ\n"); 1736 return -EBUSY; 1737 } 1738 1739 engine->dev = &pdev->dev; 1740 engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET; 1741 engine->hash_key_base = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET; 1742 1743 engine->req_pool = dmam_pool_create(engine->name, engine->dev, 1744 MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K); 1745 if (!engine->req_pool) 1746 return -ENOMEM; 1747 1748 spin_lock_init(&engine->hw_lock); 1749 1750 engine->clk = clk_get(&pdev->dev, "ref"); 1751 if (IS_ERR(engine->clk)) { 1752 dev_info(&pdev->dev, "clk unavailable\n"); 1753 device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); 1754 return PTR_ERR(engine->clk); 1755 } 1756 1757 if (clk_enable(engine->clk)) { 1758 dev_info(&pdev->dev, "unable to enable clk\n"); 1759 clk_put(engine->clk); 1760 return -EIO; 1761 } 1762 1763 err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); 1764 if (err) { 1765 clk_disable(engine->clk); 1766 clk_put(engine->clk); 1767 return err; 1768 } 1769 1770 1771 /* 1772 * Use an IRQ threshold of 50% as a default. This seems to be a 1773 * reasonable trade off of latency against throughput but can be 1774 * changed at runtime. 1775 */ 1776 engine->stat_irq_thresh = (engine->fifo_sz / 2); 1777 1778 /* 1779 * Configure the interrupts. We only use the STAT_CNT interrupt as we 1780 * only submit a new packet for processing when we complete another in 1781 * the queue. This minimizes time spent in the interrupt handler. 1782 */ 1783 writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET, 1784 engine->regs + SPA_IRQ_CTRL_REG_OFFSET); 1785 writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN, 1786 engine->regs + SPA_IRQ_EN_REG_OFFSET); 1787 1788 setup_timer(&engine->packet_timeout, spacc_packet_timeout, 1789 (unsigned long)engine); 1790 1791 INIT_LIST_HEAD(&engine->pending); 1792 INIT_LIST_HEAD(&engine->completed); 1793 INIT_LIST_HEAD(&engine->in_progress); 1794 engine->in_flight = 0; 1795 tasklet_init(&engine->complete, spacc_spacc_complete, 1796 (unsigned long)engine); 1797 1798 platform_set_drvdata(pdev, engine); 1799 1800 INIT_LIST_HEAD(&engine->registered_algs); 1801 for (i = 0; i < engine->num_algs; ++i) { 1802 engine->algs[i].engine = engine; 1803 err = crypto_register_alg(&engine->algs[i].alg); 1804 if (!err) { 1805 list_add_tail(&engine->algs[i].entry, 1806 &engine->registered_algs); 1807 ret = 0; 1808 } 1809 if (err) 1810 dev_err(engine->dev, "failed to register alg \"%s\"\n", 1811 engine->algs[i].alg.cra_name); 1812 else 1813 dev_dbg(engine->dev, "registered alg \"%s\"\n", 1814 engine->algs[i].alg.cra_name); 1815 } 1816 1817 return ret; 1818} 1819 1820static int spacc_remove(struct platform_device *pdev) 1821{ 1822 struct spacc_alg *alg, *next; 1823 struct spacc_engine *engine = platform_get_drvdata(pdev); 1824 1825 del_timer_sync(&engine->packet_timeout); 1826 device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); 1827 1828 list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) { 1829 list_del(&alg->entry); 1830 crypto_unregister_alg(&alg->alg); 1831 } 1832 1833 clk_disable(engine->clk); 1834 clk_put(engine->clk); 1835 1836 return 0; 1837} 1838 1839static const struct platform_device_id spacc_id_table[] = { 1840 { "picochip,spacc-ipsec", }, 1841 { "picochip,spacc-l2", }, 1842 { } 1843}; 1844 1845static struct platform_driver spacc_driver = { 1846 .probe = spacc_probe, 1847 .remove = spacc_remove, 1848 .driver = { 1849 .name = "picochip,spacc", 1850#ifdef CONFIG_PM 1851 .pm = &spacc_pm_ops, 1852#endif /* CONFIG_PM */ 1853 .of_match_table = of_match_ptr(spacc_of_id_table), 1854 }, 1855 .id_table = spacc_id_table, 1856}; 1857 1858module_platform_driver(spacc_driver); 1859 1860MODULE_LICENSE("GPL"); 1861MODULE_AUTHOR("Jamie Iles"); 1862