hidma.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771
  1. /*
  2. * Qualcomm Technologies HIDMA DMA engine interface
  3. *
  4. * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 and
  8. * only version 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. /*
  16. * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
  17. * Copyright (C) Semihalf 2009
  18. * Copyright (C) Ilya Yanok, Emcraft Systems 2010
  19. * Copyright (C) Alexander Popov, Promcontroller 2014
  20. *
  21. * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
  22. * (defines, structures and comments) was taken from MPC5121 DMA driver
  23. * written by Hongjun Chen <hong-jun.chen@freescale.com>.
  24. *
  25. * Approved as OSADL project by a majority of OSADL members and funded
  26. * by OSADL membership fees in 2009; for details see www.osadl.org.
  27. *
  28. * This program is free software; you can redistribute it and/or modify it
  29. * under the terms of the GNU General Public License as published by the Free
  30. * Software Foundation; either version 2 of the License, or (at your option)
  31. * any later version.
  32. *
  33. * This program is distributed in the hope that it will be useful, but WITHOUT
  34. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  35. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  36. * more details.
  37. *
  38. * The full GNU General Public License is included in this distribution in the
  39. * file called COPYING.
  40. */
  41. /* Linux Foundation elects GPLv2 license only. */
  42. #include <linux/dmaengine.h>
  43. #include <linux/dma-mapping.h>
  44. #include <linux/list.h>
  45. #include <linux/module.h>
  46. #include <linux/platform_device.h>
  47. #include <linux/slab.h>
  48. #include <linux/spinlock.h>
  49. #include <linux/of_dma.h>
  50. #include <linux/property.h>
  51. #include <linux/delay.h>
  52. #include <linux/acpi.h>
  53. #include <linux/irq.h>
  54. #include <linux/atomic.h>
  55. #include <linux/pm_runtime.h>
  56. #include "../dmaengine.h"
  57. #include "hidma.h"
  58. /*
  59. * Default idle time is 2 seconds. This parameter can
  60. * be overridden by changing the following
  61. * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
  62. * during kernel boot.
  63. */
  64. #define HIDMA_AUTOSUSPEND_TIMEOUT 2000
  65. #define HIDMA_ERR_INFO_SW 0xFF
  66. #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
  67. #define HIDMA_NR_DEFAULT_DESC 10
  68. static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
  69. {
  70. return container_of(dmadev, struct hidma_dev, ddev);
  71. }
  72. static inline
  73. struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
  74. {
  75. return container_of(_lldevp, struct hidma_dev, lldev);
  76. }
  77. static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
  78. {
  79. return container_of(dmach, struct hidma_chan, chan);
  80. }
  81. static inline
  82. struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
  83. {
  84. return container_of(t, struct hidma_desc, desc);
  85. }
  86. static void hidma_free(struct hidma_dev *dmadev)
  87. {
  88. INIT_LIST_HEAD(&dmadev->ddev.channels);
  89. }
  90. static unsigned int nr_desc_prm;
  91. module_param(nr_desc_prm, uint, 0644);
  92. MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
  93. /* process completed descriptors */
  94. static void hidma_process_completed(struct hidma_chan *mchan)
  95. {
  96. struct dma_device *ddev = mchan->chan.device;
  97. struct hidma_dev *mdma = to_hidma_dev(ddev);
  98. struct dma_async_tx_descriptor *desc;
  99. dma_cookie_t last_cookie;
  100. struct hidma_desc *mdesc;
  101. struct hidma_desc *next;
  102. unsigned long irqflags;
  103. struct list_head list;
  104. INIT_LIST_HEAD(&list);
  105. /* Get all completed descriptors */
  106. spin_lock_irqsave(&mchan->lock, irqflags);
  107. list_splice_tail_init(&mchan->completed, &list);
  108. spin_unlock_irqrestore(&mchan->lock, irqflags);
  109. /* Execute callbacks and run dependencies */
  110. list_for_each_entry_safe(mdesc, next, &list, node) {
  111. enum dma_status llstat;
  112. struct dmaengine_desc_callback cb;
  113. struct dmaengine_result result;
  114. desc = &mdesc->desc;
  115. last_cookie = desc->cookie;
  116. spin_lock_irqsave(&mchan->lock, irqflags);
  117. dma_cookie_complete(desc);
  118. spin_unlock_irqrestore(&mchan->lock, irqflags);
  119. llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
  120. dmaengine_desc_get_callback(desc, &cb);
  121. dma_run_dependencies(desc);
  122. spin_lock_irqsave(&mchan->lock, irqflags);
  123. list_move(&mdesc->node, &mchan->free);
  124. if (llstat == DMA_COMPLETE) {
  125. mchan->last_success = last_cookie;
  126. result.result = DMA_TRANS_NOERROR;
  127. } else
  128. result.result = DMA_TRANS_ABORTED;
  129. spin_unlock_irqrestore(&mchan->lock, irqflags);
  130. dmaengine_desc_callback_invoke(&cb, &result);
  131. }
  132. }
  133. /*
  134. * Called once for each submitted descriptor.
  135. * PM is locked once for each descriptor that is currently
  136. * in execution.
  137. */
  138. static void hidma_callback(void *data)
  139. {
  140. struct hidma_desc *mdesc = data;
  141. struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
  142. struct dma_device *ddev = mchan->chan.device;
  143. struct hidma_dev *dmadev = to_hidma_dev(ddev);
  144. unsigned long irqflags;
  145. bool queued = false;
  146. spin_lock_irqsave(&mchan->lock, irqflags);
  147. if (mdesc->node.next) {
  148. /* Delete from the active list, add to completed list */
  149. list_move_tail(&mdesc->node, &mchan->completed);
  150. queued = true;
  151. /* calculate the next running descriptor */
  152. mchan->running = list_first_entry(&mchan->active,
  153. struct hidma_desc, node);
  154. }
  155. spin_unlock_irqrestore(&mchan->lock, irqflags);
  156. hidma_process_completed(mchan);
  157. if (queued) {
  158. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  159. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  160. }
  161. }
  162. static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
  163. {
  164. struct hidma_chan *mchan;
  165. struct dma_device *ddev;
  166. mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
  167. if (!mchan)
  168. return -ENOMEM;
  169. ddev = &dmadev->ddev;
  170. mchan->dma_sig = dma_sig;
  171. mchan->dmadev = dmadev;
  172. mchan->chan.device = ddev;
  173. dma_cookie_init(&mchan->chan);
  174. INIT_LIST_HEAD(&mchan->free);
  175. INIT_LIST_HEAD(&mchan->prepared);
  176. INIT_LIST_HEAD(&mchan->active);
  177. INIT_LIST_HEAD(&mchan->completed);
  178. spin_lock_init(&mchan->lock);
  179. list_add_tail(&mchan->chan.device_node, &ddev->channels);
  180. dmadev->ddev.chancnt++;
  181. return 0;
  182. }
  183. static void hidma_issue_task(unsigned long arg)
  184. {
  185. struct hidma_dev *dmadev = (struct hidma_dev *)arg;
  186. pm_runtime_get_sync(dmadev->ddev.dev);
  187. hidma_ll_start(dmadev->lldev);
  188. }
  189. static void hidma_issue_pending(struct dma_chan *dmach)
  190. {
  191. struct hidma_chan *mchan = to_hidma_chan(dmach);
  192. struct hidma_dev *dmadev = mchan->dmadev;
  193. unsigned long flags;
  194. int status;
  195. spin_lock_irqsave(&mchan->lock, flags);
  196. if (!mchan->running) {
  197. struct hidma_desc *desc = list_first_entry(&mchan->active,
  198. struct hidma_desc,
  199. node);
  200. mchan->running = desc;
  201. }
  202. spin_unlock_irqrestore(&mchan->lock, flags);
  203. /* PM will be released in hidma_callback function. */
  204. status = pm_runtime_get(dmadev->ddev.dev);
  205. if (status < 0)
  206. tasklet_schedule(&dmadev->task);
  207. else
  208. hidma_ll_start(dmadev->lldev);
  209. }
  210. static inline bool hidma_txn_is_success(dma_cookie_t cookie,
  211. dma_cookie_t last_success, dma_cookie_t last_used)
  212. {
  213. if (last_success <= last_used) {
  214. if ((cookie <= last_success) || (cookie > last_used))
  215. return true;
  216. } else {
  217. if ((cookie <= last_success) && (cookie > last_used))
  218. return true;
  219. }
  220. return false;
  221. }
  222. static enum dma_status hidma_tx_status(struct dma_chan *dmach,
  223. dma_cookie_t cookie,
  224. struct dma_tx_state *txstate)
  225. {
  226. struct hidma_chan *mchan = to_hidma_chan(dmach);
  227. enum dma_status ret;
  228. ret = dma_cookie_status(dmach, cookie, txstate);
  229. if (ret == DMA_COMPLETE) {
  230. bool is_success;
  231. is_success = hidma_txn_is_success(cookie, mchan->last_success,
  232. dmach->cookie);
  233. return is_success ? ret : DMA_ERROR;
  234. }
  235. if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
  236. unsigned long flags;
  237. dma_cookie_t runcookie;
  238. spin_lock_irqsave(&mchan->lock, flags);
  239. if (mchan->running)
  240. runcookie = mchan->running->desc.cookie;
  241. else
  242. runcookie = -EINVAL;
  243. if (runcookie == cookie)
  244. ret = DMA_PAUSED;
  245. spin_unlock_irqrestore(&mchan->lock, flags);
  246. }
  247. return ret;
  248. }
  249. /*
  250. * Submit descriptor to hardware.
  251. * Lock the PM for each descriptor we are sending.
  252. */
  253. static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
  254. {
  255. struct hidma_chan *mchan = to_hidma_chan(txd->chan);
  256. struct hidma_dev *dmadev = mchan->dmadev;
  257. struct hidma_desc *mdesc;
  258. unsigned long irqflags;
  259. dma_cookie_t cookie;
  260. pm_runtime_get_sync(dmadev->ddev.dev);
  261. if (!hidma_ll_isenabled(dmadev->lldev)) {
  262. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  263. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  264. return -ENODEV;
  265. }
  266. mdesc = container_of(txd, struct hidma_desc, desc);
  267. spin_lock_irqsave(&mchan->lock, irqflags);
  268. /* Move descriptor to active */
  269. list_move_tail(&mdesc->node, &mchan->active);
  270. /* Update cookie */
  271. cookie = dma_cookie_assign(txd);
  272. hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch);
  273. spin_unlock_irqrestore(&mchan->lock, irqflags);
  274. return cookie;
  275. }
  276. static int hidma_alloc_chan_resources(struct dma_chan *dmach)
  277. {
  278. struct hidma_chan *mchan = to_hidma_chan(dmach);
  279. struct hidma_dev *dmadev = mchan->dmadev;
  280. struct hidma_desc *mdesc, *tmp;
  281. unsigned long irqflags;
  282. LIST_HEAD(descs);
  283. unsigned int i;
  284. int rc = 0;
  285. if (mchan->allocated)
  286. return 0;
  287. /* Alloc descriptors for this channel */
  288. for (i = 0; i < dmadev->nr_descriptors; i++) {
  289. mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
  290. if (!mdesc) {
  291. rc = -ENOMEM;
  292. break;
  293. }
  294. dma_async_tx_descriptor_init(&mdesc->desc, dmach);
  295. mdesc->desc.tx_submit = hidma_tx_submit;
  296. rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
  297. "DMA engine", hidma_callback, mdesc,
  298. &mdesc->tre_ch);
  299. if (rc) {
  300. dev_err(dmach->device->dev,
  301. "channel alloc failed at %u\n", i);
  302. kfree(mdesc);
  303. break;
  304. }
  305. list_add_tail(&mdesc->node, &descs);
  306. }
  307. if (rc) {
  308. /* return the allocated descriptors */
  309. list_for_each_entry_safe(mdesc, tmp, &descs, node) {
  310. hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
  311. kfree(mdesc);
  312. }
  313. return rc;
  314. }
  315. spin_lock_irqsave(&mchan->lock, irqflags);
  316. list_splice_tail_init(&descs, &mchan->free);
  317. mchan->allocated = true;
  318. spin_unlock_irqrestore(&mchan->lock, irqflags);
  319. return 1;
  320. }
  321. static struct dma_async_tx_descriptor *
  322. hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
  323. size_t len, unsigned long flags)
  324. {
  325. struct hidma_chan *mchan = to_hidma_chan(dmach);
  326. struct hidma_desc *mdesc = NULL;
  327. struct hidma_dev *mdma = mchan->dmadev;
  328. unsigned long irqflags;
  329. /* Get free descriptor */
  330. spin_lock_irqsave(&mchan->lock, irqflags);
  331. if (!list_empty(&mchan->free)) {
  332. mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
  333. list_del(&mdesc->node);
  334. }
  335. spin_unlock_irqrestore(&mchan->lock, irqflags);
  336. if (!mdesc)
  337. return NULL;
  338. hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
  339. src, dest, len, flags);
  340. /* Place descriptor in prepared list */
  341. spin_lock_irqsave(&mchan->lock, irqflags);
  342. list_add_tail(&mdesc->node, &mchan->prepared);
  343. spin_unlock_irqrestore(&mchan->lock, irqflags);
  344. return &mdesc->desc;
  345. }
  346. static int hidma_terminate_channel(struct dma_chan *chan)
  347. {
  348. struct hidma_chan *mchan = to_hidma_chan(chan);
  349. struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
  350. struct hidma_desc *tmp, *mdesc;
  351. unsigned long irqflags;
  352. LIST_HEAD(list);
  353. int rc;
  354. pm_runtime_get_sync(dmadev->ddev.dev);
  355. /* give completed requests a chance to finish */
  356. hidma_process_completed(mchan);
  357. spin_lock_irqsave(&mchan->lock, irqflags);
  358. mchan->last_success = 0;
  359. list_splice_init(&mchan->active, &list);
  360. list_splice_init(&mchan->prepared, &list);
  361. list_splice_init(&mchan->completed, &list);
  362. spin_unlock_irqrestore(&mchan->lock, irqflags);
  363. /* this suspends the existing transfer */
  364. rc = hidma_ll_disable(dmadev->lldev);
  365. if (rc) {
  366. dev_err(dmadev->ddev.dev, "channel did not pause\n");
  367. goto out;
  368. }
  369. /* return all user requests */
  370. list_for_each_entry_safe(mdesc, tmp, &list, node) {
  371. struct dma_async_tx_descriptor *txd = &mdesc->desc;
  372. dma_descriptor_unmap(txd);
  373. dmaengine_desc_get_callback_invoke(txd, NULL);
  374. dma_run_dependencies(txd);
  375. /* move myself to free_list */
  376. list_move(&mdesc->node, &mchan->free);
  377. }
  378. rc = hidma_ll_enable(dmadev->lldev);
  379. out:
  380. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  381. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  382. return rc;
  383. }
  384. static int hidma_terminate_all(struct dma_chan *chan)
  385. {
  386. struct hidma_chan *mchan = to_hidma_chan(chan);
  387. struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
  388. int rc;
  389. rc = hidma_terminate_channel(chan);
  390. if (rc)
  391. return rc;
  392. /* reinitialize the hardware */
  393. pm_runtime_get_sync(dmadev->ddev.dev);
  394. rc = hidma_ll_setup(dmadev->lldev);
  395. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  396. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  397. return rc;
  398. }
  399. static void hidma_free_chan_resources(struct dma_chan *dmach)
  400. {
  401. struct hidma_chan *mchan = to_hidma_chan(dmach);
  402. struct hidma_dev *mdma = mchan->dmadev;
  403. struct hidma_desc *mdesc, *tmp;
  404. unsigned long irqflags;
  405. LIST_HEAD(descs);
  406. /* terminate running transactions and free descriptors */
  407. hidma_terminate_channel(dmach);
  408. spin_lock_irqsave(&mchan->lock, irqflags);
  409. /* Move data */
  410. list_splice_tail_init(&mchan->free, &descs);
  411. /* Free descriptors */
  412. list_for_each_entry_safe(mdesc, tmp, &descs, node) {
  413. hidma_ll_free(mdma->lldev, mdesc->tre_ch);
  414. list_del(&mdesc->node);
  415. kfree(mdesc);
  416. }
  417. mchan->allocated = 0;
  418. spin_unlock_irqrestore(&mchan->lock, irqflags);
  419. }
  420. static int hidma_pause(struct dma_chan *chan)
  421. {
  422. struct hidma_chan *mchan;
  423. struct hidma_dev *dmadev;
  424. mchan = to_hidma_chan(chan);
  425. dmadev = to_hidma_dev(mchan->chan.device);
  426. if (!mchan->paused) {
  427. pm_runtime_get_sync(dmadev->ddev.dev);
  428. if (hidma_ll_disable(dmadev->lldev))
  429. dev_warn(dmadev->ddev.dev, "channel did not stop\n");
  430. mchan->paused = true;
  431. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  432. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  433. }
  434. return 0;
  435. }
  436. static int hidma_resume(struct dma_chan *chan)
  437. {
  438. struct hidma_chan *mchan;
  439. struct hidma_dev *dmadev;
  440. int rc = 0;
  441. mchan = to_hidma_chan(chan);
  442. dmadev = to_hidma_dev(mchan->chan.device);
  443. if (mchan->paused) {
  444. pm_runtime_get_sync(dmadev->ddev.dev);
  445. rc = hidma_ll_enable(dmadev->lldev);
  446. if (!rc)
  447. mchan->paused = false;
  448. else
  449. dev_err(dmadev->ddev.dev,
  450. "failed to resume the channel");
  451. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  452. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  453. }
  454. return rc;
  455. }
  456. static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
  457. {
  458. struct hidma_lldev *lldev = arg;
  459. /*
  460. * All interrupts are request driven.
  461. * HW doesn't send an interrupt by itself.
  462. */
  463. return hidma_ll_inthandler(chirq, lldev);
  464. }
  465. static ssize_t hidma_show_values(struct device *dev,
  466. struct device_attribute *attr, char *buf)
  467. {
  468. struct platform_device *pdev = to_platform_device(dev);
  469. struct hidma_dev *mdev = platform_get_drvdata(pdev);
  470. buf[0] = 0;
  471. if (strcmp(attr->attr.name, "chid") == 0)
  472. sprintf(buf, "%d\n", mdev->chidx);
  473. return strlen(buf);
  474. }
  475. static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
  476. int mode)
  477. {
  478. struct device_attribute *attrs;
  479. char *name_copy;
  480. attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
  481. GFP_KERNEL);
  482. if (!attrs)
  483. return -ENOMEM;
  484. name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
  485. if (!name_copy)
  486. return -ENOMEM;
  487. attrs->attr.name = name_copy;
  488. attrs->attr.mode = mode;
  489. attrs->show = hidma_show_values;
  490. sysfs_attr_init(&attrs->attr);
  491. return device_create_file(dev->ddev.dev, attrs);
  492. }
  493. static int hidma_probe(struct platform_device *pdev)
  494. {
  495. struct hidma_dev *dmadev;
  496. struct resource *trca_resource;
  497. struct resource *evca_resource;
  498. int chirq;
  499. void __iomem *evca;
  500. void __iomem *trca;
  501. int rc;
  502. pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
  503. pm_runtime_use_autosuspend(&pdev->dev);
  504. pm_runtime_set_active(&pdev->dev);
  505. pm_runtime_enable(&pdev->dev);
  506. trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  507. trca = devm_ioremap_resource(&pdev->dev, trca_resource);
  508. if (IS_ERR(trca)) {
  509. rc = -ENOMEM;
  510. goto bailout;
  511. }
  512. evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  513. evca = devm_ioremap_resource(&pdev->dev, evca_resource);
  514. if (IS_ERR(evca)) {
  515. rc = -ENOMEM;
  516. goto bailout;
  517. }
  518. /*
  519. * This driver only handles the channel IRQs.
  520. * Common IRQ is handled by the management driver.
  521. */
  522. chirq = platform_get_irq(pdev, 0);
  523. if (chirq < 0) {
  524. rc = -ENODEV;
  525. goto bailout;
  526. }
  527. dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
  528. if (!dmadev) {
  529. rc = -ENOMEM;
  530. goto bailout;
  531. }
  532. INIT_LIST_HEAD(&dmadev->ddev.channels);
  533. spin_lock_init(&dmadev->lock);
  534. dmadev->ddev.dev = &pdev->dev;
  535. pm_runtime_get_sync(dmadev->ddev.dev);
  536. dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
  537. if (WARN_ON(!pdev->dev.dma_mask)) {
  538. rc = -ENXIO;
  539. goto dmafree;
  540. }
  541. dmadev->dev_evca = evca;
  542. dmadev->evca_resource = evca_resource;
  543. dmadev->dev_trca = trca;
  544. dmadev->trca_resource = trca_resource;
  545. dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
  546. dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
  547. dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
  548. dmadev->ddev.device_tx_status = hidma_tx_status;
  549. dmadev->ddev.device_issue_pending = hidma_issue_pending;
  550. dmadev->ddev.device_pause = hidma_pause;
  551. dmadev->ddev.device_resume = hidma_resume;
  552. dmadev->ddev.device_terminate_all = hidma_terminate_all;
  553. dmadev->ddev.copy_align = 8;
  554. device_property_read_u32(&pdev->dev, "desc-count",
  555. &dmadev->nr_descriptors);
  556. if (!dmadev->nr_descriptors && nr_desc_prm)
  557. dmadev->nr_descriptors = nr_desc_prm;
  558. if (!dmadev->nr_descriptors)
  559. dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
  560. dmadev->chidx = readl(dmadev->dev_trca + 0x28);
  561. /* Set DMA mask to 64 bits. */
  562. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  563. if (rc) {
  564. dev_warn(&pdev->dev, "unable to set coherent mask to 64");
  565. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  566. if (rc)
  567. goto dmafree;
  568. }
  569. dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
  570. dmadev->nr_descriptors, dmadev->dev_trca,
  571. dmadev->dev_evca, dmadev->chidx);
  572. if (!dmadev->lldev) {
  573. rc = -EPROBE_DEFER;
  574. goto dmafree;
  575. }
  576. rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0,
  577. "qcom-hidma", dmadev->lldev);
  578. if (rc)
  579. goto uninit;
  580. INIT_LIST_HEAD(&dmadev->ddev.channels);
  581. rc = hidma_chan_init(dmadev, 0);
  582. if (rc)
  583. goto uninit;
  584. rc = dma_async_device_register(&dmadev->ddev);
  585. if (rc)
  586. goto uninit;
  587. dmadev->irq = chirq;
  588. tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
  589. hidma_debug_init(dmadev);
  590. hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO);
  591. dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
  592. platform_set_drvdata(pdev, dmadev);
  593. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  594. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  595. return 0;
  596. uninit:
  597. hidma_debug_uninit(dmadev);
  598. hidma_ll_uninit(dmadev->lldev);
  599. dmafree:
  600. if (dmadev)
  601. hidma_free(dmadev);
  602. bailout:
  603. pm_runtime_put_sync(&pdev->dev);
  604. pm_runtime_disable(&pdev->dev);
  605. return rc;
  606. }
  607. static int hidma_remove(struct platform_device *pdev)
  608. {
  609. struct hidma_dev *dmadev = platform_get_drvdata(pdev);
  610. pm_runtime_get_sync(dmadev->ddev.dev);
  611. dma_async_device_unregister(&dmadev->ddev);
  612. devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
  613. tasklet_kill(&dmadev->task);
  614. hidma_debug_uninit(dmadev);
  615. hidma_ll_uninit(dmadev->lldev);
  616. hidma_free(dmadev);
  617. dev_info(&pdev->dev, "HI-DMA engine removed\n");
  618. pm_runtime_put_sync_suspend(&pdev->dev);
  619. pm_runtime_disable(&pdev->dev);
  620. return 0;
  621. }
  622. #if IS_ENABLED(CONFIG_ACPI)
  623. static const struct acpi_device_id hidma_acpi_ids[] = {
  624. {"QCOM8061"},
  625. {},
  626. };
  627. #endif
  628. static const struct of_device_id hidma_match[] = {
  629. {.compatible = "qcom,hidma-1.0",},
  630. {},
  631. };
  632. MODULE_DEVICE_TABLE(of, hidma_match);
  633. static struct platform_driver hidma_driver = {
  634. .probe = hidma_probe,
  635. .remove = hidma_remove,
  636. .driver = {
  637. .name = "hidma",
  638. .of_match_table = hidma_match,
  639. .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
  640. },
  641. };
  642. module_platform_driver(hidma_driver);
  643. MODULE_LICENSE("GPL v2");