geode-aes.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607
  1. /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License as published by
  5. * the Free Software Foundation; either version 2 of the License, or
  6. * (at your option) any later version.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/kernel.h>
  10. #include <linux/pci.h>
  11. #include <linux/pci_ids.h>
  12. #include <linux/crypto.h>
  13. #include <linux/spinlock.h>
  14. #include <crypto/algapi.h>
  15. #include <crypto/aes.h>
  16. #include <linux/io.h>
  17. #include <linux/delay.h>
  18. #include "geode-aes.h"
  19. /* Static structures */
  20. static void __iomem *_iobase;
  21. static spinlock_t lock;
  22. /* Write a 128 bit field (either a writable key or IV) */
  23. static inline void
  24. _writefield(u32 offset, void *value)
  25. {
  26. int i;
  27. for (i = 0; i < 4; i++)
  28. iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
  29. }
  30. /* Read a 128 bit field (either a writable key or IV) */
  31. static inline void
  32. _readfield(u32 offset, void *value)
  33. {
  34. int i;
  35. for (i = 0; i < 4; i++)
  36. ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
  37. }
  38. static int
  39. do_crypt(void *src, void *dst, int len, u32 flags)
  40. {
  41. u32 status;
  42. u32 counter = AES_OP_TIMEOUT;
  43. iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
  44. iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
  45. iowrite32(len, _iobase + AES_LENA_REG);
  46. /* Start the operation */
  47. iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
  48. do {
  49. status = ioread32(_iobase + AES_INTR_REG);
  50. cpu_relax();
  51. } while (!(status & AES_INTRA_PENDING) && --counter);
  52. /* Clear the event */
  53. iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
  54. return counter ? 0 : 1;
  55. }
  56. static unsigned int
  57. geode_aes_crypt(struct geode_aes_op *op)
  58. {
  59. u32 flags = 0;
  60. unsigned long iflags;
  61. int ret;
  62. if (op->len == 0)
  63. return 0;
  64. /* If the source and destination is the same, then
  65. * we need to turn on the coherent flags, otherwise
  66. * we don't need to worry
  67. */
  68. flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
  69. if (op->dir == AES_DIR_ENCRYPT)
  70. flags |= AES_CTRL_ENCRYPT;
  71. /* Start the critical section */
  72. spin_lock_irqsave(&lock, iflags);
  73. if (op->mode == AES_MODE_CBC) {
  74. flags |= AES_CTRL_CBC;
  75. _writefield(AES_WRITEIV0_REG, op->iv);
  76. }
  77. if (!(op->flags & AES_FLAGS_HIDDENKEY)) {
  78. flags |= AES_CTRL_WRKEY;
  79. _writefield(AES_WRITEKEY0_REG, op->key);
  80. }
  81. ret = do_crypt(op->src, op->dst, op->len, flags);
  82. BUG_ON(ret);
  83. if (op->mode == AES_MODE_CBC)
  84. _readfield(AES_WRITEIV0_REG, op->iv);
  85. spin_unlock_irqrestore(&lock, iflags);
  86. return op->len;
  87. }
  88. /* CRYPTO-API Functions */
  89. static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
  90. unsigned int len)
  91. {
  92. struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  93. unsigned int ret;
  94. op->keylen = len;
  95. if (len == AES_KEYSIZE_128) {
  96. memcpy(op->key, key, len);
  97. return 0;
  98. }
  99. if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
  100. /* not supported at all */
  101. tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  102. return -EINVAL;
  103. }
  104. /*
  105. * The requested key size is not supported by HW, do a fallback
  106. */
  107. op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  108. op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
  109. ret = crypto_cipher_setkey(op->fallback.cip, key, len);
  110. if (ret) {
  111. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  112. tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK);
  113. }
  114. return ret;
  115. }
  116. static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
  117. unsigned int len)
  118. {
  119. struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  120. unsigned int ret;
  121. op->keylen = len;
  122. if (len == AES_KEYSIZE_128) {
  123. memcpy(op->key, key, len);
  124. return 0;
  125. }
  126. if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
  127. /* not supported at all */
  128. tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  129. return -EINVAL;
  130. }
  131. /*
  132. * The requested key size is not supported by HW, do a fallback
  133. */
  134. op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  135. op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
  136. ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
  137. if (ret) {
  138. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  139. tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
  140. }
  141. return ret;
  142. }
  143. static int fallback_blk_dec(struct blkcipher_desc *desc,
  144. struct scatterlist *dst, struct scatterlist *src,
  145. unsigned int nbytes)
  146. {
  147. unsigned int ret;
  148. struct crypto_blkcipher *tfm;
  149. struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
  150. tfm = desc->tfm;
  151. desc->tfm = op->fallback.blk;
  152. ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
  153. desc->tfm = tfm;
  154. return ret;
  155. }
  156. static int fallback_blk_enc(struct blkcipher_desc *desc,
  157. struct scatterlist *dst, struct scatterlist *src,
  158. unsigned int nbytes)
  159. {
  160. unsigned int ret;
  161. struct crypto_blkcipher *tfm;
  162. struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
  163. tfm = desc->tfm;
  164. desc->tfm = op->fallback.blk;
  165. ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
  166. desc->tfm = tfm;
  167. return ret;
  168. }
  169. static void
  170. geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
  171. {
  172. struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  173. if (unlikely(op->keylen != AES_KEYSIZE_128)) {
  174. crypto_cipher_encrypt_one(op->fallback.cip, out, in);
  175. return;
  176. }
  177. op->src = (void *) in;
  178. op->dst = (void *) out;
  179. op->mode = AES_MODE_ECB;
  180. op->flags = 0;
  181. op->len = AES_MIN_BLOCK_SIZE;
  182. op->dir = AES_DIR_ENCRYPT;
  183. geode_aes_crypt(op);
  184. }
  185. static void
  186. geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
  187. {
  188. struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  189. if (unlikely(op->keylen != AES_KEYSIZE_128)) {
  190. crypto_cipher_decrypt_one(op->fallback.cip, out, in);
  191. return;
  192. }
  193. op->src = (void *) in;
  194. op->dst = (void *) out;
  195. op->mode = AES_MODE_ECB;
  196. op->flags = 0;
  197. op->len = AES_MIN_BLOCK_SIZE;
  198. op->dir = AES_DIR_DECRYPT;
  199. geode_aes_crypt(op);
  200. }
  201. static int fallback_init_cip(struct crypto_tfm *tfm)
  202. {
  203. const char *name = tfm->__crt_alg->cra_name;
  204. struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  205. op->fallback.cip = crypto_alloc_cipher(name, 0,
  206. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  207. if (IS_ERR(op->fallback.cip)) {
  208. printk(KERN_ERR "Error allocating fallback algo %s\n", name);
  209. return PTR_ERR(op->fallback.cip);
  210. }
  211. return 0;
  212. }
  213. static void fallback_exit_cip(struct crypto_tfm *tfm)
  214. {
  215. struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  216. crypto_free_cipher(op->fallback.cip);
  217. op->fallback.cip = NULL;
  218. }
  219. static struct crypto_alg geode_alg = {
  220. .cra_name = "aes",
  221. .cra_driver_name = "geode-aes",
  222. .cra_priority = 300,
  223. .cra_alignmask = 15,
  224. .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
  225. CRYPTO_ALG_NEED_FALLBACK,
  226. .cra_init = fallback_init_cip,
  227. .cra_exit = fallback_exit_cip,
  228. .cra_blocksize = AES_MIN_BLOCK_SIZE,
  229. .cra_ctxsize = sizeof(struct geode_aes_op),
  230. .cra_module = THIS_MODULE,
  231. .cra_list = LIST_HEAD_INIT(geode_alg.cra_list),
  232. .cra_u = {
  233. .cipher = {
  234. .cia_min_keysize = AES_MIN_KEY_SIZE,
  235. .cia_max_keysize = AES_MAX_KEY_SIZE,
  236. .cia_setkey = geode_setkey_cip,
  237. .cia_encrypt = geode_encrypt,
  238. .cia_decrypt = geode_decrypt
  239. }
  240. }
  241. };
  242. static int
  243. geode_cbc_decrypt(struct blkcipher_desc *desc,
  244. struct scatterlist *dst, struct scatterlist *src,
  245. unsigned int nbytes)
  246. {
  247. struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
  248. struct blkcipher_walk walk;
  249. int err, ret;
  250. if (unlikely(op->keylen != AES_KEYSIZE_128))
  251. return fallback_blk_dec(desc, dst, src, nbytes);
  252. blkcipher_walk_init(&walk, dst, src, nbytes);
  253. err = blkcipher_walk_virt(desc, &walk);
  254. op->iv = walk.iv;
  255. while ((nbytes = walk.nbytes)) {
  256. op->src = walk.src.virt.addr,
  257. op->dst = walk.dst.virt.addr;
  258. op->mode = AES_MODE_CBC;
  259. op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
  260. op->dir = AES_DIR_DECRYPT;
  261. ret = geode_aes_crypt(op);
  262. nbytes -= ret;
  263. err = blkcipher_walk_done(desc, &walk, nbytes);
  264. }
  265. return err;
  266. }
  267. static int
  268. geode_cbc_encrypt(struct blkcipher_desc *desc,
  269. struct scatterlist *dst, struct scatterlist *src,
  270. unsigned int nbytes)
  271. {
  272. struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
  273. struct blkcipher_walk walk;
  274. int err, ret;
  275. if (unlikely(op->keylen != AES_KEYSIZE_128))
  276. return fallback_blk_enc(desc, dst, src, nbytes);
  277. blkcipher_walk_init(&walk, dst, src, nbytes);
  278. err = blkcipher_walk_virt(desc, &walk);
  279. op->iv = walk.iv;
  280. while ((nbytes = walk.nbytes)) {
  281. op->src = walk.src.virt.addr,
  282. op->dst = walk.dst.virt.addr;
  283. op->mode = AES_MODE_CBC;
  284. op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
  285. op->dir = AES_DIR_ENCRYPT;
  286. ret = geode_aes_crypt(op);
  287. nbytes -= ret;
  288. err = blkcipher_walk_done(desc, &walk, nbytes);
  289. }
  290. return err;
  291. }
  292. static int fallback_init_blk(struct crypto_tfm *tfm)
  293. {
  294. const char *name = tfm->__crt_alg->cra_name;
  295. struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  296. op->fallback.blk = crypto_alloc_blkcipher(name, 0,
  297. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  298. if (IS_ERR(op->fallback.blk)) {
  299. printk(KERN_ERR "Error allocating fallback algo %s\n", name);
  300. return PTR_ERR(op->fallback.blk);
  301. }
  302. return 0;
  303. }
  304. static void fallback_exit_blk(struct crypto_tfm *tfm)
  305. {
  306. struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  307. crypto_free_blkcipher(op->fallback.blk);
  308. op->fallback.blk = NULL;
  309. }
  310. static struct crypto_alg geode_cbc_alg = {
  311. .cra_name = "cbc(aes)",
  312. .cra_driver_name = "cbc-aes-geode",
  313. .cra_priority = 400,
  314. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  315. CRYPTO_ALG_NEED_FALLBACK,
  316. .cra_init = fallback_init_blk,
  317. .cra_exit = fallback_exit_blk,
  318. .cra_blocksize = AES_MIN_BLOCK_SIZE,
  319. .cra_ctxsize = sizeof(struct geode_aes_op),
  320. .cra_alignmask = 15,
  321. .cra_type = &crypto_blkcipher_type,
  322. .cra_module = THIS_MODULE,
  323. .cra_list = LIST_HEAD_INIT(geode_cbc_alg.cra_list),
  324. .cra_u = {
  325. .blkcipher = {
  326. .min_keysize = AES_MIN_KEY_SIZE,
  327. .max_keysize = AES_MAX_KEY_SIZE,
  328. .setkey = geode_setkey_blk,
  329. .encrypt = geode_cbc_encrypt,
  330. .decrypt = geode_cbc_decrypt,
  331. .ivsize = AES_IV_LENGTH,
  332. }
  333. }
  334. };
  335. static int
  336. geode_ecb_decrypt(struct blkcipher_desc *desc,
  337. struct scatterlist *dst, struct scatterlist *src,
  338. unsigned int nbytes)
  339. {
  340. struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
  341. struct blkcipher_walk walk;
  342. int err, ret;
  343. if (unlikely(op->keylen != AES_KEYSIZE_128))
  344. return fallback_blk_dec(desc, dst, src, nbytes);
  345. blkcipher_walk_init(&walk, dst, src, nbytes);
  346. err = blkcipher_walk_virt(desc, &walk);
  347. while ((nbytes = walk.nbytes)) {
  348. op->src = walk.src.virt.addr,
  349. op->dst = walk.dst.virt.addr;
  350. op->mode = AES_MODE_ECB;
  351. op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
  352. op->dir = AES_DIR_DECRYPT;
  353. ret = geode_aes_crypt(op);
  354. nbytes -= ret;
  355. err = blkcipher_walk_done(desc, &walk, nbytes);
  356. }
  357. return err;
  358. }
  359. static int
  360. geode_ecb_encrypt(struct blkcipher_desc *desc,
  361. struct scatterlist *dst, struct scatterlist *src,
  362. unsigned int nbytes)
  363. {
  364. struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
  365. struct blkcipher_walk walk;
  366. int err, ret;
  367. if (unlikely(op->keylen != AES_KEYSIZE_128))
  368. return fallback_blk_enc(desc, dst, src, nbytes);
  369. blkcipher_walk_init(&walk, dst, src, nbytes);
  370. err = blkcipher_walk_virt(desc, &walk);
  371. while ((nbytes = walk.nbytes)) {
  372. op->src = walk.src.virt.addr,
  373. op->dst = walk.dst.virt.addr;
  374. op->mode = AES_MODE_ECB;
  375. op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
  376. op->dir = AES_DIR_ENCRYPT;
  377. ret = geode_aes_crypt(op);
  378. nbytes -= ret;
  379. ret = blkcipher_walk_done(desc, &walk, nbytes);
  380. }
  381. return err;
  382. }
  383. static struct crypto_alg geode_ecb_alg = {
  384. .cra_name = "ecb(aes)",
  385. .cra_driver_name = "ecb-aes-geode",
  386. .cra_priority = 400,
  387. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  388. CRYPTO_ALG_NEED_FALLBACK,
  389. .cra_init = fallback_init_blk,
  390. .cra_exit = fallback_exit_blk,
  391. .cra_blocksize = AES_MIN_BLOCK_SIZE,
  392. .cra_ctxsize = sizeof(struct geode_aes_op),
  393. .cra_alignmask = 15,
  394. .cra_type = &crypto_blkcipher_type,
  395. .cra_module = THIS_MODULE,
  396. .cra_list = LIST_HEAD_INIT(geode_ecb_alg.cra_list),
  397. .cra_u = {
  398. .blkcipher = {
  399. .min_keysize = AES_MIN_KEY_SIZE,
  400. .max_keysize = AES_MAX_KEY_SIZE,
  401. .setkey = geode_setkey_blk,
  402. .encrypt = geode_ecb_encrypt,
  403. .decrypt = geode_ecb_decrypt,
  404. }
  405. }
  406. };
  407. static void __devexit
  408. geode_aes_remove(struct pci_dev *dev)
  409. {
  410. crypto_unregister_alg(&geode_alg);
  411. crypto_unregister_alg(&geode_ecb_alg);
  412. crypto_unregister_alg(&geode_cbc_alg);
  413. pci_iounmap(dev, _iobase);
  414. _iobase = NULL;
  415. pci_release_regions(dev);
  416. pci_disable_device(dev);
  417. }
  418. static int __devinit
  419. geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
  420. {
  421. int ret;
  422. ret = pci_enable_device(dev);
  423. if (ret)
  424. return ret;
  425. ret = pci_request_regions(dev, "geode-aes");
  426. if (ret)
  427. goto eenable;
  428. _iobase = pci_iomap(dev, 0, 0);
  429. if (_iobase == NULL) {
  430. ret = -ENOMEM;
  431. goto erequest;
  432. }
  433. spin_lock_init(&lock);
  434. /* Clear any pending activity */
  435. iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
  436. ret = crypto_register_alg(&geode_alg);
  437. if (ret)
  438. goto eiomap;
  439. ret = crypto_register_alg(&geode_ecb_alg);
  440. if (ret)
  441. goto ealg;
  442. ret = crypto_register_alg(&geode_cbc_alg);
  443. if (ret)
  444. goto eecb;
  445. printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
  446. return 0;
  447. eecb:
  448. crypto_unregister_alg(&geode_ecb_alg);
  449. ealg:
  450. crypto_unregister_alg(&geode_alg);
  451. eiomap:
  452. pci_iounmap(dev, _iobase);
  453. erequest:
  454. pci_release_regions(dev);
  455. eenable:
  456. pci_disable_device(dev);
  457. printk(KERN_ERR "geode-aes: GEODE AES initialization failed.\n");
  458. return ret;
  459. }
  460. static struct pci_device_id geode_aes_tbl[] = {
  461. { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), } ,
  462. { 0, }
  463. };
  464. MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
  465. static struct pci_driver geode_aes_driver = {
  466. .name = "Geode LX AES",
  467. .id_table = geode_aes_tbl,
  468. .probe = geode_aes_probe,
  469. .remove = __devexit_p(geode_aes_remove)
  470. };
  471. static int __init
  472. geode_aes_init(void)
  473. {
  474. return pci_register_driver(&geode_aes_driver);
  475. }
  476. static void __exit
  477. geode_aes_exit(void)
  478. {
  479. pci_unregister_driver(&geode_aes_driver);
  480. }
  481. MODULE_AUTHOR("Advanced Micro Devices, Inc.");
  482. MODULE_DESCRIPTION("Geode LX Hardware AES driver");
  483. MODULE_LICENSE("GPL");
  484. module_init(geode_aes_init);
  485. module_exit(geode_aes_exit);