tpm_ibmvtpm.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708
  1. /*
  2. * Copyright (C) 2012 IBM Corporation
  3. *
  4. * Author: Ashley Lai <ashleydlai@gmail.com>
  5. *
  6. * Maintained by: <tpmdd-devel@lists.sourceforge.net>
  7. *
  8. * Device driver for TCG/TCPA TPM (trusted platform module).
  9. * Specifications at www.trustedcomputinggroup.org
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License as
  13. * published by the Free Software Foundation, version 2 of the
  14. * License.
  15. *
  16. */
  17. #include <linux/dma-mapping.h>
  18. #include <linux/dmapool.h>
  19. #include <linux/slab.h>
  20. #include <asm/vio.h>
  21. #include <asm/irq.h>
  22. #include <linux/types.h>
  23. #include <linux/list.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/wait.h>
  27. #include <asm/prom.h>
  28. #include "tpm.h"
  29. #include "tpm_ibmvtpm.h"
  30. static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
  31. static struct vio_device_id tpm_ibmvtpm_device_table[] = {
  32. { "IBM,vtpm", "IBM,vtpm"},
  33. { "", "" }
  34. };
  35. MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
  36. /**
  37. * ibmvtpm_send_crq - Send a CRQ request
  38. * @vdev: vio device struct
  39. * @w1: first word
  40. * @w2: second word
  41. *
  42. * Return value:
  43. * 0 -Sucess
  44. * Non-zero - Failure
  45. */
  46. static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2)
  47. {
  48. return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2);
  49. }
  50. /**
  51. * tpm_ibmvtpm_recv - Receive data after send
  52. * @chip: tpm chip struct
  53. * @buf: buffer to read
  54. * count: size of buffer
  55. *
  56. * Return value:
  57. * Number of bytes read
  58. */
  59. static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
  60. {
  61. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  62. u16 len;
  63. int sig;
  64. if (!ibmvtpm->rtce_buf) {
  65. dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
  66. return 0;
  67. }
  68. sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
  69. if (sig)
  70. return -EINTR;
  71. len = ibmvtpm->res_len;
  72. if (count < len) {
  73. dev_err(ibmvtpm->dev,
  74. "Invalid size in recv: count=%zd, crq_size=%d\n",
  75. count, len);
  76. return -EIO;
  77. }
  78. spin_lock(&ibmvtpm->rtce_lock);
  79. memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
  80. memset(ibmvtpm->rtce_buf, 0, len);
  81. ibmvtpm->res_len = 0;
  82. spin_unlock(&ibmvtpm->rtce_lock);
  83. return len;
  84. }
  85. /**
  86. * tpm_ibmvtpm_send - Send tpm request
  87. * @chip: tpm chip struct
  88. * @buf: buffer contains data to send
  89. * count: size of buffer
  90. *
  91. * Return value:
  92. * Number of bytes sent
  93. */
  94. static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
  95. {
  96. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  97. struct ibmvtpm_crq crq;
  98. __be64 *word = (__be64 *)&crq;
  99. int rc, sig;
  100. if (!ibmvtpm->rtce_buf) {
  101. dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
  102. return 0;
  103. }
  104. if (count > ibmvtpm->rtce_size) {
  105. dev_err(ibmvtpm->dev,
  106. "Invalid size in send: count=%zd, rtce_size=%d\n",
  107. count, ibmvtpm->rtce_size);
  108. return -EIO;
  109. }
  110. if (ibmvtpm->tpm_processing_cmd) {
  111. dev_info(ibmvtpm->dev,
  112. "Need to wait for TPM to finish\n");
  113. /* wait for previous command to finish */
  114. sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
  115. if (sig)
  116. return -EINTR;
  117. }
  118. spin_lock(&ibmvtpm->rtce_lock);
  119. ibmvtpm->res_len = 0;
  120. memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
  121. crq.valid = (u8)IBMVTPM_VALID_CMD;
  122. crq.msg = (u8)VTPM_TPM_COMMAND;
  123. crq.len = cpu_to_be16(count);
  124. crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
  125. /*
  126. * set the processing flag before the Hcall, since we may get the
  127. * result (interrupt) before even being able to check rc.
  128. */
  129. ibmvtpm->tpm_processing_cmd = true;
  130. rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
  131. be64_to_cpu(word[1]));
  132. if (rc != H_SUCCESS) {
  133. dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
  134. rc = 0;
  135. ibmvtpm->tpm_processing_cmd = false;
  136. } else
  137. rc = count;
  138. spin_unlock(&ibmvtpm->rtce_lock);
  139. return rc;
  140. }
  141. static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
  142. {
  143. return;
  144. }
  145. static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
  146. {
  147. return 0;
  148. }
  149. /**
  150. * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
  151. * @ibmvtpm: vtpm device struct
  152. *
  153. * Return value:
  154. * 0 - Success
  155. * Non-zero - Failure
  156. */
  157. static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
  158. {
  159. struct ibmvtpm_crq crq;
  160. u64 *buf = (u64 *) &crq;
  161. int rc;
  162. crq.valid = (u8)IBMVTPM_VALID_CMD;
  163. crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
  164. rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
  165. cpu_to_be64(buf[1]));
  166. if (rc != H_SUCCESS)
  167. dev_err(ibmvtpm->dev,
  168. "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
  169. return rc;
  170. }
  171. /**
  172. * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
  173. * - Note that this is vtpm version and not tpm version
  174. * @ibmvtpm: vtpm device struct
  175. *
  176. * Return value:
  177. * 0 - Success
  178. * Non-zero - Failure
  179. */
  180. static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
  181. {
  182. struct ibmvtpm_crq crq;
  183. u64 *buf = (u64 *) &crq;
  184. int rc;
  185. crq.valid = (u8)IBMVTPM_VALID_CMD;
  186. crq.msg = (u8)VTPM_GET_VERSION;
  187. rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
  188. cpu_to_be64(buf[1]));
  189. if (rc != H_SUCCESS)
  190. dev_err(ibmvtpm->dev,
  191. "ibmvtpm_crq_get_version failed rc=%d\n", rc);
  192. return rc;
  193. }
  194. /**
  195. * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
  196. * @ibmvtpm: vtpm device struct
  197. *
  198. * Return value:
  199. * 0 - Success
  200. * Non-zero - Failure
  201. */
  202. static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
  203. {
  204. int rc;
  205. rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0);
  206. if (rc != H_SUCCESS)
  207. dev_err(ibmvtpm->dev,
  208. "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
  209. return rc;
  210. }
  211. /**
  212. * ibmvtpm_crq_send_init - Send a CRQ initialize message
  213. * @ibmvtpm: vtpm device struct
  214. *
  215. * Return value:
  216. * 0 - Success
  217. * Non-zero - Failure
  218. */
  219. static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
  220. {
  221. int rc;
  222. rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0);
  223. if (rc != H_SUCCESS)
  224. dev_err(ibmvtpm->dev,
  225. "ibmvtpm_crq_send_init failed rc=%d\n", rc);
  226. return rc;
  227. }
  228. /**
  229. * tpm_ibmvtpm_remove - ibm vtpm remove entry point
  230. * @vdev: vio device struct
  231. *
  232. * Return value:
  233. * 0
  234. */
  235. static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
  236. {
  237. struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
  238. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  239. int rc = 0;
  240. tpm_chip_unregister(chip);
  241. free_irq(vdev->irq, ibmvtpm);
  242. do {
  243. if (rc)
  244. msleep(100);
  245. rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
  246. } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
  247. dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
  248. CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
  249. free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
  250. if (ibmvtpm->rtce_buf) {
  251. dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
  252. ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
  253. kfree(ibmvtpm->rtce_buf);
  254. }
  255. kfree(ibmvtpm);
  256. /* For tpm_ibmvtpm_get_desired_dma */
  257. dev_set_drvdata(&vdev->dev, NULL);
  258. return 0;
  259. }
  260. /**
  261. * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
  262. * @vdev: vio device struct
  263. *
  264. * Return value:
  265. * Number of bytes the driver needs to DMA map
  266. */
  267. static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
  268. {
  269. struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
  270. struct ibmvtpm_dev *ibmvtpm;
  271. /*
  272. * ibmvtpm initializes at probe time, so the data we are
  273. * asking for may not be set yet. Estimate that 4K required
  274. * for TCE-mapped buffer in addition to CRQ.
  275. */
  276. if (chip)
  277. ibmvtpm = dev_get_drvdata(&chip->dev);
  278. else
  279. return CRQ_RES_BUF_SIZE + PAGE_SIZE;
  280. return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
  281. }
  282. /**
  283. * tpm_ibmvtpm_suspend - Suspend
  284. * @dev: device struct
  285. *
  286. * Return value:
  287. * 0
  288. */
  289. static int tpm_ibmvtpm_suspend(struct device *dev)
  290. {
  291. struct tpm_chip *chip = dev_get_drvdata(dev);
  292. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  293. struct ibmvtpm_crq crq;
  294. u64 *buf = (u64 *) &crq;
  295. int rc = 0;
  296. crq.valid = (u8)IBMVTPM_VALID_CMD;
  297. crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
  298. rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
  299. cpu_to_be64(buf[1]));
  300. if (rc != H_SUCCESS)
  301. dev_err(ibmvtpm->dev,
  302. "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
  303. return rc;
  304. }
  305. /**
  306. * ibmvtpm_reset_crq - Reset CRQ
  307. * @ibmvtpm: ibm vtpm struct
  308. *
  309. * Return value:
  310. * 0 - Success
  311. * Non-zero - Failure
  312. */
  313. static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
  314. {
  315. int rc = 0;
  316. do {
  317. if (rc)
  318. msleep(100);
  319. rc = plpar_hcall_norets(H_FREE_CRQ,
  320. ibmvtpm->vdev->unit_address);
  321. } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
  322. memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
  323. ibmvtpm->crq_queue.index = 0;
  324. return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
  325. ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
  326. }
  327. /**
  328. * tpm_ibmvtpm_resume - Resume from suspend
  329. * @dev: device struct
  330. *
  331. * Return value:
  332. * 0
  333. */
  334. static int tpm_ibmvtpm_resume(struct device *dev)
  335. {
  336. struct tpm_chip *chip = dev_get_drvdata(dev);
  337. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  338. int rc = 0;
  339. do {
  340. if (rc)
  341. msleep(100);
  342. rc = plpar_hcall_norets(H_ENABLE_CRQ,
  343. ibmvtpm->vdev->unit_address);
  344. } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
  345. if (rc) {
  346. dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
  347. return rc;
  348. }
  349. rc = vio_enable_interrupts(ibmvtpm->vdev);
  350. if (rc) {
  351. dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
  352. return rc;
  353. }
  354. rc = ibmvtpm_crq_send_init(ibmvtpm);
  355. if (rc)
  356. dev_err(dev, "Error send_init rc=%d\n", rc);
  357. return rc;
  358. }
  359. static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
  360. {
  361. return (status == 0);
  362. }
  363. static const struct tpm_class_ops tpm_ibmvtpm = {
  364. .recv = tpm_ibmvtpm_recv,
  365. .send = tpm_ibmvtpm_send,
  366. .cancel = tpm_ibmvtpm_cancel,
  367. .status = tpm_ibmvtpm_status,
  368. .req_complete_mask = 0,
  369. .req_complete_val = 0,
  370. .req_canceled = tpm_ibmvtpm_req_canceled,
  371. };
  372. static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
  373. .suspend = tpm_ibmvtpm_suspend,
  374. .resume = tpm_ibmvtpm_resume,
  375. };
  376. /**
  377. * ibmvtpm_crq_get_next - Get next responded crq
  378. * @ibmvtpm vtpm device struct
  379. *
  380. * Return value:
  381. * vtpm crq pointer
  382. */
  383. static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
  384. {
  385. struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
  386. struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
  387. if (crq->valid & VTPM_MSG_RES) {
  388. if (++crq_q->index == crq_q->num_entry)
  389. crq_q->index = 0;
  390. smp_rmb();
  391. } else
  392. crq = NULL;
  393. return crq;
  394. }
  395. /**
  396. * ibmvtpm_crq_process - Process responded crq
  397. * @crq crq to be processed
  398. * @ibmvtpm vtpm device struct
  399. *
  400. * Return value:
  401. * Nothing
  402. */
  403. static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
  404. struct ibmvtpm_dev *ibmvtpm)
  405. {
  406. int rc = 0;
  407. switch (crq->valid) {
  408. case VALID_INIT_CRQ:
  409. switch (crq->msg) {
  410. case INIT_CRQ_RES:
  411. dev_info(ibmvtpm->dev, "CRQ initialized\n");
  412. rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
  413. if (rc)
  414. dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
  415. return;
  416. case INIT_CRQ_COMP_RES:
  417. dev_info(ibmvtpm->dev,
  418. "CRQ initialization completed\n");
  419. return;
  420. default:
  421. dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
  422. return;
  423. }
  424. case IBMVTPM_VALID_CMD:
  425. switch (crq->msg) {
  426. case VTPM_GET_RTCE_BUFFER_SIZE_RES:
  427. if (be16_to_cpu(crq->len) <= 0) {
  428. dev_err(ibmvtpm->dev, "Invalid rtce size\n");
  429. return;
  430. }
  431. ibmvtpm->rtce_size = be16_to_cpu(crq->len);
  432. ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
  433. GFP_ATOMIC);
  434. if (!ibmvtpm->rtce_buf) {
  435. dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
  436. return;
  437. }
  438. ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
  439. ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
  440. DMA_BIDIRECTIONAL);
  441. if (dma_mapping_error(ibmvtpm->dev,
  442. ibmvtpm->rtce_dma_handle)) {
  443. kfree(ibmvtpm->rtce_buf);
  444. ibmvtpm->rtce_buf = NULL;
  445. dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
  446. }
  447. return;
  448. case VTPM_GET_VERSION_RES:
  449. ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
  450. return;
  451. case VTPM_TPM_COMMAND_RES:
  452. /* len of the data in rtce buffer */
  453. ibmvtpm->res_len = be16_to_cpu(crq->len);
  454. ibmvtpm->tpm_processing_cmd = false;
  455. wake_up_interruptible(&ibmvtpm->wq);
  456. return;
  457. default:
  458. return;
  459. }
  460. }
  461. return;
  462. }
  463. /**
  464. * ibmvtpm_interrupt - Interrupt handler
  465. * @irq: irq number to handle
  466. * @vtpm_instance: vtpm that received interrupt
  467. *
  468. * Returns:
  469. * IRQ_HANDLED
  470. **/
  471. static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
  472. {
  473. struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
  474. struct ibmvtpm_crq *crq;
  475. /* while loop is needed for initial setup (get version and
  476. * get rtce_size). There should be only one tpm request at any
  477. * given time.
  478. */
  479. while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
  480. ibmvtpm_crq_process(crq, ibmvtpm);
  481. crq->valid = 0;
  482. smp_wmb();
  483. }
  484. return IRQ_HANDLED;
  485. }
  486. /**
  487. * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
  488. * @vio_dev: vio device struct
  489. * @id: vio device id struct
  490. *
  491. * Return value:
  492. * 0 - Success
  493. * Non-zero - Failure
  494. */
  495. static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
  496. const struct vio_device_id *id)
  497. {
  498. struct ibmvtpm_dev *ibmvtpm;
  499. struct device *dev = &vio_dev->dev;
  500. struct ibmvtpm_crq_queue *crq_q;
  501. struct tpm_chip *chip;
  502. int rc = -ENOMEM, rc1;
  503. chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
  504. if (IS_ERR(chip))
  505. return PTR_ERR(chip);
  506. ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
  507. if (!ibmvtpm) {
  508. dev_err(dev, "kzalloc for ibmvtpm failed\n");
  509. goto cleanup;
  510. }
  511. ibmvtpm->dev = dev;
  512. ibmvtpm->vdev = vio_dev;
  513. crq_q = &ibmvtpm->crq_queue;
  514. crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
  515. if (!crq_q->crq_addr) {
  516. dev_err(dev, "Unable to allocate memory for crq_addr\n");
  517. goto cleanup;
  518. }
  519. crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
  520. ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
  521. CRQ_RES_BUF_SIZE,
  522. DMA_BIDIRECTIONAL);
  523. if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
  524. dev_err(dev, "dma mapping failed\n");
  525. goto cleanup;
  526. }
  527. rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
  528. ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
  529. if (rc == H_RESOURCE)
  530. rc = ibmvtpm_reset_crq(ibmvtpm);
  531. if (rc) {
  532. dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
  533. goto reg_crq_cleanup;
  534. }
  535. rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
  536. tpm_ibmvtpm_driver_name, ibmvtpm);
  537. if (rc) {
  538. dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
  539. goto init_irq_cleanup;
  540. }
  541. rc = vio_enable_interrupts(vio_dev);
  542. if (rc) {
  543. dev_err(dev, "Error %d enabling interrupts\n", rc);
  544. goto init_irq_cleanup;
  545. }
  546. init_waitqueue_head(&ibmvtpm->wq);
  547. crq_q->index = 0;
  548. dev_set_drvdata(&chip->dev, ibmvtpm);
  549. spin_lock_init(&ibmvtpm->rtce_lock);
  550. rc = ibmvtpm_crq_send_init(ibmvtpm);
  551. if (rc)
  552. goto init_irq_cleanup;
  553. rc = ibmvtpm_crq_get_version(ibmvtpm);
  554. if (rc)
  555. goto init_irq_cleanup;
  556. rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
  557. if (rc)
  558. goto init_irq_cleanup;
  559. return tpm_chip_register(chip);
  560. init_irq_cleanup:
  561. do {
  562. rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
  563. } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
  564. reg_crq_cleanup:
  565. dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
  566. DMA_BIDIRECTIONAL);
  567. cleanup:
  568. if (ibmvtpm) {
  569. if (crq_q->crq_addr)
  570. free_page((unsigned long)crq_q->crq_addr);
  571. kfree(ibmvtpm);
  572. }
  573. return rc;
  574. }
  575. static struct vio_driver ibmvtpm_driver = {
  576. .id_table = tpm_ibmvtpm_device_table,
  577. .probe = tpm_ibmvtpm_probe,
  578. .remove = tpm_ibmvtpm_remove,
  579. .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
  580. .name = tpm_ibmvtpm_driver_name,
  581. .pm = &tpm_ibmvtpm_pm_ops,
  582. };
  583. /**
  584. * ibmvtpm_module_init - Initialize ibm vtpm module
  585. *
  586. * Return value:
  587. * 0 -Success
  588. * Non-zero - Failure
  589. */
  590. static int __init ibmvtpm_module_init(void)
  591. {
  592. return vio_register_driver(&ibmvtpm_driver);
  593. }
  594. /**
  595. * ibmvtpm_module_exit - Teardown ibm vtpm module
  596. *
  597. * Return value:
  598. * Nothing
  599. */
  600. static void __exit ibmvtpm_module_exit(void)
  601. {
  602. vio_unregister_driver(&ibmvtpm_driver);
  603. }
  604. module_init(ibmvtpm_module_init);
  605. module_exit(ibmvtpm_module_exit);
  606. MODULE_AUTHOR("adlai@us.ibm.com");
  607. MODULE_DESCRIPTION("IBM vTPM Driver");
  608. MODULE_VERSION("1.0");
  609. MODULE_LICENSE("GPL");