fsl_asrc_dma.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. /*
  2. * Freescale ASRC ALSA SoC Platform (DMA) driver
  3. *
  4. * Copyright (C) 2014 Freescale Semiconductor, Inc.
  5. *
  6. * Author: Nicolin Chen <nicoleotsuka@gmail.com>
  7. *
  8. * This file is licensed under the terms of the GNU General Public License
  9. * version 2. This program is licensed "as is" without any warranty of any
  10. * kind, whether express or implied.
  11. */
  12. #include <linux/dma-mapping.h>
  13. #include <linux/module.h>
  14. #include <linux/platform_data/dma-imx.h>
  15. #include <sound/dmaengine_pcm.h>
  16. #include <sound/pcm_params.h>
  17. #include "fsl_asrc.h"
  18. #define FSL_ASRC_DMABUF_SIZE (256 * 1024)
  19. static struct snd_pcm_hardware snd_imx_hardware = {
  20. .info = SNDRV_PCM_INFO_INTERLEAVED |
  21. SNDRV_PCM_INFO_BLOCK_TRANSFER |
  22. SNDRV_PCM_INFO_MMAP |
  23. SNDRV_PCM_INFO_MMAP_VALID |
  24. SNDRV_PCM_INFO_PAUSE |
  25. SNDRV_PCM_INFO_RESUME,
  26. .buffer_bytes_max = FSL_ASRC_DMABUF_SIZE,
  27. .period_bytes_min = 128,
  28. .period_bytes_max = 65535, /* Limited by SDMA engine */
  29. .periods_min = 2,
  30. .periods_max = 255,
  31. .fifo_size = 0,
  32. };
  33. static bool filter(struct dma_chan *chan, void *param)
  34. {
  35. if (!imx_dma_is_general_purpose(chan))
  36. return false;
  37. chan->private = param;
  38. return true;
  39. }
  40. static void fsl_asrc_dma_complete(void *arg)
  41. {
  42. struct snd_pcm_substream *substream = arg;
  43. struct snd_pcm_runtime *runtime = substream->runtime;
  44. struct fsl_asrc_pair *pair = runtime->private_data;
  45. pair->pos += snd_pcm_lib_period_bytes(substream);
  46. if (pair->pos >= snd_pcm_lib_buffer_bytes(substream))
  47. pair->pos = 0;
  48. snd_pcm_period_elapsed(substream);
  49. }
  50. static int fsl_asrc_dma_prepare_and_submit(struct snd_pcm_substream *substream)
  51. {
  52. u8 dir = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? OUT : IN;
  53. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  54. struct snd_pcm_runtime *runtime = substream->runtime;
  55. struct fsl_asrc_pair *pair = runtime->private_data;
  56. struct device *dev = rtd->platform->dev;
  57. unsigned long flags = DMA_CTRL_ACK;
  58. /* Prepare and submit Front-End DMA channel */
  59. if (!substream->runtime->no_period_wakeup)
  60. flags |= DMA_PREP_INTERRUPT;
  61. pair->pos = 0;
  62. pair->desc[!dir] = dmaengine_prep_dma_cyclic(
  63. pair->dma_chan[!dir], runtime->dma_addr,
  64. snd_pcm_lib_buffer_bytes(substream),
  65. snd_pcm_lib_period_bytes(substream),
  66. dir == OUT ? DMA_TO_DEVICE : DMA_FROM_DEVICE, flags);
  67. if (!pair->desc[!dir]) {
  68. dev_err(dev, "failed to prepare slave DMA for Front-End\n");
  69. return -ENOMEM;
  70. }
  71. pair->desc[!dir]->callback = fsl_asrc_dma_complete;
  72. pair->desc[!dir]->callback_param = substream;
  73. dmaengine_submit(pair->desc[!dir]);
  74. /* Prepare and submit Back-End DMA channel */
  75. pair->desc[dir] = dmaengine_prep_dma_cyclic(
  76. pair->dma_chan[dir], 0xffff, 64, 64, DMA_DEV_TO_DEV, 0);
  77. if (!pair->desc[dir]) {
  78. dev_err(dev, "failed to prepare slave DMA for Back-End\n");
  79. return -ENOMEM;
  80. }
  81. dmaengine_submit(pair->desc[dir]);
  82. return 0;
  83. }
  84. static int fsl_asrc_dma_trigger(struct snd_pcm_substream *substream, int cmd)
  85. {
  86. struct snd_pcm_runtime *runtime = substream->runtime;
  87. struct fsl_asrc_pair *pair = runtime->private_data;
  88. int ret;
  89. switch (cmd) {
  90. case SNDRV_PCM_TRIGGER_START:
  91. case SNDRV_PCM_TRIGGER_RESUME:
  92. case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
  93. ret = fsl_asrc_dma_prepare_and_submit(substream);
  94. if (ret)
  95. return ret;
  96. dma_async_issue_pending(pair->dma_chan[IN]);
  97. dma_async_issue_pending(pair->dma_chan[OUT]);
  98. break;
  99. case SNDRV_PCM_TRIGGER_STOP:
  100. case SNDRV_PCM_TRIGGER_SUSPEND:
  101. case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
  102. dmaengine_terminate_all(pair->dma_chan[OUT]);
  103. dmaengine_terminate_all(pair->dma_chan[IN]);
  104. break;
  105. default:
  106. return -EINVAL;
  107. }
  108. return 0;
  109. }
  110. static int fsl_asrc_dma_hw_params(struct snd_pcm_substream *substream,
  111. struct snd_pcm_hw_params *params)
  112. {
  113. enum dma_slave_buswidth buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
  114. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  115. bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
  116. struct snd_dmaengine_dai_dma_data *dma_params_fe = NULL;
  117. struct snd_dmaengine_dai_dma_data *dma_params_be = NULL;
  118. struct snd_pcm_runtime *runtime = substream->runtime;
  119. struct fsl_asrc_pair *pair = runtime->private_data;
  120. struct fsl_asrc *asrc_priv = pair->asrc_priv;
  121. struct dma_slave_config config_fe, config_be;
  122. enum asrc_pair_index index = pair->index;
  123. struct device *dev = rtd->platform->dev;
  124. int stream = substream->stream;
  125. struct imx_dma_data *tmp_data;
  126. struct snd_soc_dpcm *dpcm;
  127. struct dma_chan *tmp_chan;
  128. struct device *dev_be;
  129. u8 dir = tx ? OUT : IN;
  130. dma_cap_mask_t mask;
  131. int ret;
  132. /* Fetch the Back-End dma_data from DPCM */
  133. list_for_each_entry(dpcm, &rtd->dpcm[stream].be_clients, list_be) {
  134. struct snd_soc_pcm_runtime *be = dpcm->be;
  135. struct snd_pcm_substream *substream_be;
  136. struct snd_soc_dai *dai = be->cpu_dai;
  137. if (dpcm->fe != rtd)
  138. continue;
  139. substream_be = snd_soc_dpcm_get_substream(be, stream);
  140. dma_params_be = snd_soc_dai_get_dma_data(dai, substream_be);
  141. dev_be = dai->dev;
  142. break;
  143. }
  144. if (!dma_params_be) {
  145. dev_err(dev, "failed to get the substream of Back-End\n");
  146. return -EINVAL;
  147. }
  148. /* Override dma_data of the Front-End and config its dmaengine */
  149. dma_params_fe = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
  150. dma_params_fe->addr = asrc_priv->paddr + REG_ASRDx(!dir, index);
  151. dma_params_fe->maxburst = dma_params_be->maxburst;
  152. pair->dma_chan[!dir] = fsl_asrc_get_dma_channel(pair, !dir);
  153. if (!pair->dma_chan[!dir]) {
  154. dev_err(dev, "failed to request DMA channel\n");
  155. return -EINVAL;
  156. }
  157. memset(&config_fe, 0, sizeof(config_fe));
  158. ret = snd_dmaengine_pcm_prepare_slave_config(substream, params, &config_fe);
  159. if (ret) {
  160. dev_err(dev, "failed to prepare DMA config for Front-End\n");
  161. return ret;
  162. }
  163. ret = dmaengine_slave_config(pair->dma_chan[!dir], &config_fe);
  164. if (ret) {
  165. dev_err(dev, "failed to config DMA channel for Front-End\n");
  166. return ret;
  167. }
  168. /* Request and config DMA channel for Back-End */
  169. dma_cap_zero(mask);
  170. dma_cap_set(DMA_SLAVE, mask);
  171. dma_cap_set(DMA_CYCLIC, mask);
  172. /* Get DMA request of Back-End */
  173. tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx");
  174. tmp_data = tmp_chan->private;
  175. pair->dma_data.dma_request = tmp_data->dma_request;
  176. dma_release_channel(tmp_chan);
  177. /* Get DMA request of Front-End */
  178. tmp_chan = fsl_asrc_get_dma_channel(pair, dir);
  179. tmp_data = tmp_chan->private;
  180. pair->dma_data.dma_request2 = tmp_data->dma_request;
  181. pair->dma_data.peripheral_type = tmp_data->peripheral_type;
  182. pair->dma_data.priority = tmp_data->priority;
  183. dma_release_channel(tmp_chan);
  184. pair->dma_chan[dir] = dma_request_channel(mask, filter, &pair->dma_data);
  185. if (!pair->dma_chan[dir]) {
  186. dev_err(dev, "failed to request DMA channel for Back-End\n");
  187. return -EINVAL;
  188. }
  189. if (asrc_priv->asrc_width == 16)
  190. buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
  191. else
  192. buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
  193. config_be.direction = DMA_DEV_TO_DEV;
  194. config_be.src_addr_width = buswidth;
  195. config_be.src_maxburst = dma_params_be->maxburst;
  196. config_be.dst_addr_width = buswidth;
  197. config_be.dst_maxburst = dma_params_be->maxburst;
  198. if (tx) {
  199. config_be.src_addr = asrc_priv->paddr + REG_ASRDO(index);
  200. config_be.dst_addr = dma_params_be->addr;
  201. } else {
  202. config_be.dst_addr = asrc_priv->paddr + REG_ASRDI(index);
  203. config_be.src_addr = dma_params_be->addr;
  204. }
  205. ret = dmaengine_slave_config(pair->dma_chan[dir], &config_be);
  206. if (ret) {
  207. dev_err(dev, "failed to config DMA channel for Back-End\n");
  208. return ret;
  209. }
  210. snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
  211. return 0;
  212. }
  213. static int fsl_asrc_dma_hw_free(struct snd_pcm_substream *substream)
  214. {
  215. struct snd_pcm_runtime *runtime = substream->runtime;
  216. struct fsl_asrc_pair *pair = runtime->private_data;
  217. snd_pcm_set_runtime_buffer(substream, NULL);
  218. if (pair->dma_chan[IN])
  219. dma_release_channel(pair->dma_chan[IN]);
  220. if (pair->dma_chan[OUT])
  221. dma_release_channel(pair->dma_chan[OUT]);
  222. pair->dma_chan[IN] = NULL;
  223. pair->dma_chan[OUT] = NULL;
  224. return 0;
  225. }
  226. static int fsl_asrc_dma_startup(struct snd_pcm_substream *substream)
  227. {
  228. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  229. struct snd_pcm_runtime *runtime = substream->runtime;
  230. struct device *dev = rtd->platform->dev;
  231. struct fsl_asrc *asrc_priv = dev_get_drvdata(dev);
  232. struct fsl_asrc_pair *pair;
  233. pair = kzalloc(sizeof(struct fsl_asrc_pair), GFP_KERNEL);
  234. if (!pair) {
  235. dev_err(dev, "failed to allocate pair\n");
  236. return -ENOMEM;
  237. }
  238. pair->asrc_priv = asrc_priv;
  239. runtime->private_data = pair;
  240. snd_pcm_hw_constraint_integer(substream->runtime,
  241. SNDRV_PCM_HW_PARAM_PERIODS);
  242. snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware);
  243. return 0;
  244. }
  245. static int fsl_asrc_dma_shutdown(struct snd_pcm_substream *substream)
  246. {
  247. struct snd_pcm_runtime *runtime = substream->runtime;
  248. struct fsl_asrc_pair *pair = runtime->private_data;
  249. struct fsl_asrc *asrc_priv;
  250. if (!pair)
  251. return 0;
  252. asrc_priv = pair->asrc_priv;
  253. if (asrc_priv->pair[pair->index] == pair)
  254. asrc_priv->pair[pair->index] = NULL;
  255. kfree(pair);
  256. return 0;
  257. }
  258. static snd_pcm_uframes_t fsl_asrc_dma_pcm_pointer(struct snd_pcm_substream *substream)
  259. {
  260. struct snd_pcm_runtime *runtime = substream->runtime;
  261. struct fsl_asrc_pair *pair = runtime->private_data;
  262. return bytes_to_frames(substream->runtime, pair->pos);
  263. }
  264. static const struct snd_pcm_ops fsl_asrc_dma_pcm_ops = {
  265. .ioctl = snd_pcm_lib_ioctl,
  266. .hw_params = fsl_asrc_dma_hw_params,
  267. .hw_free = fsl_asrc_dma_hw_free,
  268. .trigger = fsl_asrc_dma_trigger,
  269. .open = fsl_asrc_dma_startup,
  270. .close = fsl_asrc_dma_shutdown,
  271. .pointer = fsl_asrc_dma_pcm_pointer,
  272. };
  273. static int fsl_asrc_dma_pcm_new(struct snd_soc_pcm_runtime *rtd)
  274. {
  275. struct snd_card *card = rtd->card->snd_card;
  276. struct snd_pcm_substream *substream;
  277. struct snd_pcm *pcm = rtd->pcm;
  278. int ret, i;
  279. ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
  280. if (ret) {
  281. dev_err(card->dev, "failed to set DMA mask\n");
  282. return ret;
  283. }
  284. for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_LAST; i++) {
  285. substream = pcm->streams[i].substream;
  286. if (!substream)
  287. continue;
  288. ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
  289. FSL_ASRC_DMABUF_SIZE, &substream->dma_buffer);
  290. if (ret) {
  291. dev_err(card->dev, "failed to allocate DMA buffer\n");
  292. goto err;
  293. }
  294. }
  295. return 0;
  296. err:
  297. if (--i == 0 && pcm->streams[i].substream)
  298. snd_dma_free_pages(&pcm->streams[i].substream->dma_buffer);
  299. return ret;
  300. }
  301. static void fsl_asrc_dma_pcm_free(struct snd_pcm *pcm)
  302. {
  303. struct snd_pcm_substream *substream;
  304. int i;
  305. for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_LAST; i++) {
  306. substream = pcm->streams[i].substream;
  307. if (!substream)
  308. continue;
  309. snd_dma_free_pages(&substream->dma_buffer);
  310. substream->dma_buffer.area = NULL;
  311. substream->dma_buffer.addr = 0;
  312. }
  313. }
  314. struct snd_soc_platform_driver fsl_asrc_platform = {
  315. .ops = &fsl_asrc_dma_pcm_ops,
  316. .pcm_new = fsl_asrc_dma_pcm_new,
  317. .pcm_free = fsl_asrc_dma_pcm_free,
  318. };
  319. EXPORT_SYMBOL_GPL(fsl_asrc_platform);