skl-messages.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207
  1. /*
  2. * skl-message.c - HDA DSP interface for FW registration, Pipe and Module
  3. * configurations
  4. *
  5. * Copyright (C) 2015 Intel Corp
  6. * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
  7. * Jeeja KP <jeeja.kp@intel.com>
  8. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as version 2, as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. */
  19. #include <linux/slab.h>
  20. #include <linux/pci.h>
  21. #include <sound/core.h>
  22. #include <sound/pcm.h>
  23. #include "skl-sst-dsp.h"
  24. #include "skl-sst-ipc.h"
  25. #include "skl.h"
  26. #include "../common/sst-dsp.h"
  27. #include "../common/sst-dsp-priv.h"
  28. #include "skl-topology.h"
  29. #include "skl-tplg-interface.h"
  30. static int skl_alloc_dma_buf(struct device *dev,
  31. struct snd_dma_buffer *dmab, size_t size)
  32. {
  33. struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
  34. struct hdac_bus *bus = ebus_to_hbus(ebus);
  35. if (!bus)
  36. return -ENODEV;
  37. return bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, size, dmab);
  38. }
  39. static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
  40. {
  41. struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
  42. struct hdac_bus *bus = ebus_to_hbus(ebus);
  43. if (!bus)
  44. return -ENODEV;
  45. bus->io_ops->dma_free_pages(bus, dmab);
  46. return 0;
  47. }
  48. #define NOTIFICATION_PARAM_ID 3
  49. #define NOTIFICATION_MASK 0xf
  50. /* disable notfication for underruns/overruns from firmware module */
  51. static void skl_dsp_enable_notification(struct skl_sst *ctx, bool enable)
  52. {
  53. struct notification_mask mask;
  54. struct skl_ipc_large_config_msg msg = {0};
  55. mask.notify = NOTIFICATION_MASK;
  56. mask.enable = enable;
  57. msg.large_param_id = NOTIFICATION_PARAM_ID;
  58. msg.param_data_size = sizeof(mask);
  59. skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)&mask);
  60. }
  61. static int skl_dsp_setup_spib(struct device *dev, unsigned int size,
  62. int stream_tag, int enable)
  63. {
  64. struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
  65. struct hdac_bus *bus = ebus_to_hbus(ebus);
  66. struct hdac_stream *stream = snd_hdac_get_stream(bus,
  67. SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
  68. struct hdac_ext_stream *estream;
  69. if (!stream)
  70. return -EINVAL;
  71. estream = stream_to_hdac_ext_stream(stream);
  72. /* enable/disable SPIB for this hdac stream */
  73. snd_hdac_ext_stream_spbcap_enable(ebus, enable, stream->index);
  74. /* set the spib value */
  75. snd_hdac_ext_stream_set_spib(ebus, estream, size);
  76. return 0;
  77. }
  78. static int skl_dsp_prepare(struct device *dev, unsigned int format,
  79. unsigned int size, struct snd_dma_buffer *dmab)
  80. {
  81. struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
  82. struct hdac_bus *bus = ebus_to_hbus(ebus);
  83. struct hdac_ext_stream *estream;
  84. struct hdac_stream *stream;
  85. struct snd_pcm_substream substream;
  86. int ret;
  87. if (!bus)
  88. return -ENODEV;
  89. memset(&substream, 0, sizeof(substream));
  90. substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
  91. estream = snd_hdac_ext_stream_assign(ebus, &substream,
  92. HDAC_EXT_STREAM_TYPE_HOST);
  93. if (!estream)
  94. return -ENODEV;
  95. stream = hdac_stream(estream);
  96. /* assign decouple host dma channel */
  97. ret = snd_hdac_dsp_prepare(stream, format, size, dmab);
  98. if (ret < 0)
  99. return ret;
  100. skl_dsp_setup_spib(dev, size, stream->stream_tag, true);
  101. return stream->stream_tag;
  102. }
  103. static int skl_dsp_trigger(struct device *dev, bool start, int stream_tag)
  104. {
  105. struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
  106. struct hdac_stream *stream;
  107. struct hdac_bus *bus = ebus_to_hbus(ebus);
  108. if (!bus)
  109. return -ENODEV;
  110. stream = snd_hdac_get_stream(bus,
  111. SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
  112. if (!stream)
  113. return -EINVAL;
  114. snd_hdac_dsp_trigger(stream, start);
  115. return 0;
  116. }
  117. static int skl_dsp_cleanup(struct device *dev,
  118. struct snd_dma_buffer *dmab, int stream_tag)
  119. {
  120. struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
  121. struct hdac_stream *stream;
  122. struct hdac_ext_stream *estream;
  123. struct hdac_bus *bus = ebus_to_hbus(ebus);
  124. if (!bus)
  125. return -ENODEV;
  126. stream = snd_hdac_get_stream(bus,
  127. SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
  128. if (!stream)
  129. return -EINVAL;
  130. estream = stream_to_hdac_ext_stream(stream);
  131. skl_dsp_setup_spib(dev, 0, stream_tag, false);
  132. snd_hdac_ext_stream_release(estream, HDAC_EXT_STREAM_TYPE_HOST);
  133. snd_hdac_dsp_cleanup(stream, dmab);
  134. return 0;
  135. }
  136. static struct skl_dsp_loader_ops skl_get_loader_ops(void)
  137. {
  138. struct skl_dsp_loader_ops loader_ops;
  139. memset(&loader_ops, 0, sizeof(struct skl_dsp_loader_ops));
  140. loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
  141. loader_ops.free_dma_buf = skl_free_dma_buf;
  142. return loader_ops;
  143. };
  144. static struct skl_dsp_loader_ops bxt_get_loader_ops(void)
  145. {
  146. struct skl_dsp_loader_ops loader_ops;
  147. memset(&loader_ops, 0, sizeof(loader_ops));
  148. loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
  149. loader_ops.free_dma_buf = skl_free_dma_buf;
  150. loader_ops.prepare = skl_dsp_prepare;
  151. loader_ops.trigger = skl_dsp_trigger;
  152. loader_ops.cleanup = skl_dsp_cleanup;
  153. return loader_ops;
  154. };
  155. static const struct skl_dsp_ops dsp_ops[] = {
  156. {
  157. .id = 0x9d70,
  158. .loader_ops = skl_get_loader_ops,
  159. .init = skl_sst_dsp_init,
  160. .init_fw = skl_sst_init_fw,
  161. .cleanup = skl_sst_dsp_cleanup
  162. },
  163. {
  164. .id = 0x9d71,
  165. .loader_ops = skl_get_loader_ops,
  166. .init = skl_sst_dsp_init,
  167. .init_fw = skl_sst_init_fw,
  168. .cleanup = skl_sst_dsp_cleanup
  169. },
  170. {
  171. .id = 0x5a98,
  172. .loader_ops = bxt_get_loader_ops,
  173. .init = bxt_sst_dsp_init,
  174. .init_fw = bxt_sst_init_fw,
  175. .cleanup = bxt_sst_dsp_cleanup
  176. },
  177. };
  178. const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id)
  179. {
  180. int i;
  181. for (i = 0; i < ARRAY_SIZE(dsp_ops); i++) {
  182. if (dsp_ops[i].id == pci_id)
  183. return &dsp_ops[i];
  184. }
  185. return NULL;
  186. }
  187. int skl_init_dsp(struct skl *skl)
  188. {
  189. void __iomem *mmio_base;
  190. struct hdac_ext_bus *ebus = &skl->ebus;
  191. struct hdac_bus *bus = ebus_to_hbus(ebus);
  192. struct skl_dsp_loader_ops loader_ops;
  193. int irq = bus->irq;
  194. const struct skl_dsp_ops *ops;
  195. int ret;
  196. /* enable ppcap interrupt */
  197. snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
  198. snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
  199. /* read the BAR of the ADSP MMIO */
  200. mmio_base = pci_ioremap_bar(skl->pci, 4);
  201. if (mmio_base == NULL) {
  202. dev_err(bus->dev, "ioremap error\n");
  203. return -ENXIO;
  204. }
  205. ops = skl_get_dsp_ops(skl->pci->device);
  206. if (!ops)
  207. return -EIO;
  208. loader_ops = ops->loader_ops();
  209. ret = ops->init(bus->dev, mmio_base, irq,
  210. skl->fw_name, loader_ops,
  211. &skl->skl_sst);
  212. if (ret < 0)
  213. return ret;
  214. dev_dbg(bus->dev, "dsp registration status=%d\n", ret);
  215. return ret;
  216. }
  217. int skl_free_dsp(struct skl *skl)
  218. {
  219. struct hdac_ext_bus *ebus = &skl->ebus;
  220. struct hdac_bus *bus = ebus_to_hbus(ebus);
  221. struct skl_sst *ctx = skl->skl_sst;
  222. const struct skl_dsp_ops *ops;
  223. /* disable ppcap interrupt */
  224. snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
  225. ops = skl_get_dsp_ops(skl->pci->device);
  226. if (!ops)
  227. return -EIO;
  228. ops->cleanup(bus->dev, ctx);
  229. if (ctx->dsp->addr.lpe)
  230. iounmap(ctx->dsp->addr.lpe);
  231. return 0;
  232. }
  233. int skl_suspend_dsp(struct skl *skl)
  234. {
  235. struct skl_sst *ctx = skl->skl_sst;
  236. int ret;
  237. /* if ppcap is not supported return 0 */
  238. if (!skl->ebus.bus.ppcap)
  239. return 0;
  240. ret = skl_dsp_sleep(ctx->dsp);
  241. if (ret < 0)
  242. return ret;
  243. /* disable ppcap interrupt */
  244. snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
  245. snd_hdac_ext_bus_ppcap_enable(&skl->ebus, false);
  246. return 0;
  247. }
  248. int skl_resume_dsp(struct skl *skl)
  249. {
  250. struct skl_sst *ctx = skl->skl_sst;
  251. int ret;
  252. /* if ppcap is not supported return 0 */
  253. if (!skl->ebus.bus.ppcap)
  254. return 0;
  255. /* enable ppcap interrupt */
  256. snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
  257. snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
  258. /* check if DSP 1st boot is done */
  259. if (skl->skl_sst->is_first_boot == true)
  260. return 0;
  261. /* disable dynamic clock gating during fw and lib download */
  262. ctx->enable_miscbdcge(ctx->dev, false);
  263. ret = skl_dsp_wake(ctx->dsp);
  264. ctx->enable_miscbdcge(ctx->dev, true);
  265. if (ret < 0)
  266. return ret;
  267. skl_dsp_enable_notification(skl->skl_sst, false);
  268. return ret;
  269. }
  270. enum skl_bitdepth skl_get_bit_depth(int params)
  271. {
  272. switch (params) {
  273. case 8:
  274. return SKL_DEPTH_8BIT;
  275. case 16:
  276. return SKL_DEPTH_16BIT;
  277. case 24:
  278. return SKL_DEPTH_24BIT;
  279. case 32:
  280. return SKL_DEPTH_32BIT;
  281. default:
  282. return SKL_DEPTH_INVALID;
  283. }
  284. }
  285. /*
  286. * Each module in DSP expects a base module configuration, which consists of
  287. * PCM format information, which we calculate in driver and resource values
  288. * which are read from widget information passed through topology binary
  289. * This is send when we create a module with INIT_INSTANCE IPC msg
  290. */
  291. static void skl_set_base_module_format(struct skl_sst *ctx,
  292. struct skl_module_cfg *mconfig,
  293. struct skl_base_cfg *base_cfg)
  294. {
  295. struct skl_module_fmt *format = &mconfig->in_fmt[0];
  296. base_cfg->audio_fmt.number_of_channels = (u8)format->channels;
  297. base_cfg->audio_fmt.s_freq = format->s_freq;
  298. base_cfg->audio_fmt.bit_depth = format->bit_depth;
  299. base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth;
  300. base_cfg->audio_fmt.ch_cfg = format->ch_cfg;
  301. dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n",
  302. format->bit_depth, format->valid_bit_depth,
  303. format->ch_cfg);
  304. base_cfg->audio_fmt.channel_map = format->ch_map;
  305. base_cfg->audio_fmt.interleaving = format->interleaving_style;
  306. base_cfg->cps = mconfig->mcps;
  307. base_cfg->ibs = mconfig->ibs;
  308. base_cfg->obs = mconfig->obs;
  309. base_cfg->is_pages = mconfig->mem_pages;
  310. }
  311. /*
  312. * Copies copier capabilities into copier module and updates copier module
  313. * config size.
  314. */
  315. static void skl_copy_copier_caps(struct skl_module_cfg *mconfig,
  316. struct skl_cpr_cfg *cpr_mconfig)
  317. {
  318. if (mconfig->formats_config.caps_size == 0)
  319. return;
  320. memcpy(cpr_mconfig->gtw_cfg.config_data,
  321. mconfig->formats_config.caps,
  322. mconfig->formats_config.caps_size);
  323. cpr_mconfig->gtw_cfg.config_length =
  324. (mconfig->formats_config.caps_size) / 4;
  325. }
  326. #define SKL_NON_GATEWAY_CPR_NODE_ID 0xFFFFFFFF
  327. /*
  328. * Calculate the gatewat settings required for copier module, type of
  329. * gateway and index of gateway to use
  330. */
  331. static u32 skl_get_node_id(struct skl_sst *ctx,
  332. struct skl_module_cfg *mconfig)
  333. {
  334. union skl_connector_node_id node_id = {0};
  335. union skl_ssp_dma_node ssp_node = {0};
  336. struct skl_pipe_params *params = mconfig->pipe->p_params;
  337. switch (mconfig->dev_type) {
  338. case SKL_DEVICE_BT:
  339. node_id.node.dma_type =
  340. (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
  341. SKL_DMA_I2S_LINK_OUTPUT_CLASS :
  342. SKL_DMA_I2S_LINK_INPUT_CLASS;
  343. node_id.node.vindex = params->host_dma_id +
  344. (mconfig->vbus_id << 3);
  345. break;
  346. case SKL_DEVICE_I2S:
  347. node_id.node.dma_type =
  348. (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
  349. SKL_DMA_I2S_LINK_OUTPUT_CLASS :
  350. SKL_DMA_I2S_LINK_INPUT_CLASS;
  351. ssp_node.dma_node.time_slot_index = mconfig->time_slot;
  352. ssp_node.dma_node.i2s_instance = mconfig->vbus_id;
  353. node_id.node.vindex = ssp_node.val;
  354. break;
  355. case SKL_DEVICE_DMIC:
  356. node_id.node.dma_type = SKL_DMA_DMIC_LINK_INPUT_CLASS;
  357. node_id.node.vindex = mconfig->vbus_id +
  358. (mconfig->time_slot);
  359. break;
  360. case SKL_DEVICE_HDALINK:
  361. node_id.node.dma_type =
  362. (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
  363. SKL_DMA_HDA_LINK_OUTPUT_CLASS :
  364. SKL_DMA_HDA_LINK_INPUT_CLASS;
  365. node_id.node.vindex = params->link_dma_id;
  366. break;
  367. case SKL_DEVICE_HDAHOST:
  368. node_id.node.dma_type =
  369. (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
  370. SKL_DMA_HDA_HOST_OUTPUT_CLASS :
  371. SKL_DMA_HDA_HOST_INPUT_CLASS;
  372. node_id.node.vindex = params->host_dma_id;
  373. break;
  374. default:
  375. node_id.val = 0xFFFFFFFF;
  376. break;
  377. }
  378. return node_id.val;
  379. }
  380. static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
  381. struct skl_module_cfg *mconfig,
  382. struct skl_cpr_cfg *cpr_mconfig)
  383. {
  384. cpr_mconfig->gtw_cfg.node_id = skl_get_node_id(ctx, mconfig);
  385. if (cpr_mconfig->gtw_cfg.node_id == SKL_NON_GATEWAY_CPR_NODE_ID) {
  386. cpr_mconfig->cpr_feature_mask = 0;
  387. return;
  388. }
  389. if (SKL_CONN_SOURCE == mconfig->hw_conn_type)
  390. cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->obs;
  391. else
  392. cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->ibs;
  393. cpr_mconfig->cpr_feature_mask = 0;
  394. cpr_mconfig->gtw_cfg.config_length = 0;
  395. skl_copy_copier_caps(mconfig, cpr_mconfig);
  396. }
  397. #define DMA_CONTROL_ID 5
  398. int skl_dsp_set_dma_control(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
  399. {
  400. struct skl_dma_control *dma_ctrl;
  401. struct skl_i2s_config_blob config_blob;
  402. struct skl_ipc_large_config_msg msg = {0};
  403. int err = 0;
  404. /*
  405. * if blob size is same as capablity size, then no dma control
  406. * present so return
  407. */
  408. if (mconfig->formats_config.caps_size == sizeof(config_blob))
  409. return 0;
  410. msg.large_param_id = DMA_CONTROL_ID;
  411. msg.param_data_size = sizeof(struct skl_dma_control) +
  412. mconfig->formats_config.caps_size;
  413. dma_ctrl = kzalloc(msg.param_data_size, GFP_KERNEL);
  414. if (dma_ctrl == NULL)
  415. return -ENOMEM;
  416. dma_ctrl->node_id = skl_get_node_id(ctx, mconfig);
  417. /* size in dwords */
  418. dma_ctrl->config_length = sizeof(config_blob) / 4;
  419. memcpy(dma_ctrl->config_data, mconfig->formats_config.caps,
  420. mconfig->formats_config.caps_size);
  421. err = skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)dma_ctrl);
  422. kfree(dma_ctrl);
  423. return err;
  424. }
  425. static void skl_setup_out_format(struct skl_sst *ctx,
  426. struct skl_module_cfg *mconfig,
  427. struct skl_audio_data_format *out_fmt)
  428. {
  429. struct skl_module_fmt *format = &mconfig->out_fmt[0];
  430. out_fmt->number_of_channels = (u8)format->channels;
  431. out_fmt->s_freq = format->s_freq;
  432. out_fmt->bit_depth = format->bit_depth;
  433. out_fmt->valid_bit_depth = format->valid_bit_depth;
  434. out_fmt->ch_cfg = format->ch_cfg;
  435. out_fmt->channel_map = format->ch_map;
  436. out_fmt->interleaving = format->interleaving_style;
  437. out_fmt->sample_type = format->sample_type;
  438. dev_dbg(ctx->dev, "copier out format chan=%d fre=%d bitdepth=%d\n",
  439. out_fmt->number_of_channels, format->s_freq, format->bit_depth);
  440. }
  441. /*
  442. * DSP needs SRC module for frequency conversion, SRC takes base module
  443. * configuration and the target frequency as extra parameter passed as src
  444. * config
  445. */
  446. static void skl_set_src_format(struct skl_sst *ctx,
  447. struct skl_module_cfg *mconfig,
  448. struct skl_src_module_cfg *src_mconfig)
  449. {
  450. struct skl_module_fmt *fmt = &mconfig->out_fmt[0];
  451. skl_set_base_module_format(ctx, mconfig,
  452. (struct skl_base_cfg *)src_mconfig);
  453. src_mconfig->src_cfg = fmt->s_freq;
  454. }
  455. /*
  456. * DSP needs updown module to do channel conversion. updown module take base
  457. * module configuration and channel configuration
  458. * It also take coefficients and now we have defaults applied here
  459. */
  460. static void skl_set_updown_mixer_format(struct skl_sst *ctx,
  461. struct skl_module_cfg *mconfig,
  462. struct skl_up_down_mixer_cfg *mixer_mconfig)
  463. {
  464. struct skl_module_fmt *fmt = &mconfig->out_fmt[0];
  465. int i = 0;
  466. skl_set_base_module_format(ctx, mconfig,
  467. (struct skl_base_cfg *)mixer_mconfig);
  468. mixer_mconfig->out_ch_cfg = fmt->ch_cfg;
  469. /* Select F/W default coefficient */
  470. mixer_mconfig->coeff_sel = 0x0;
  471. /* User coeff, don't care since we are selecting F/W defaults */
  472. for (i = 0; i < UP_DOWN_MIXER_MAX_COEFF; i++)
  473. mixer_mconfig->coeff[i] = 0xDEADBEEF;
  474. }
  475. /*
  476. * 'copier' is DSP internal module which copies data from Host DMA (HDA host
  477. * dma) or link (hda link, SSP, PDM)
  478. * Here we calculate the copier module parameters, like PCM format, output
  479. * format, gateway settings
  480. * copier_module_config is sent as input buffer with INIT_INSTANCE IPC msg
  481. */
  482. static void skl_set_copier_format(struct skl_sst *ctx,
  483. struct skl_module_cfg *mconfig,
  484. struct skl_cpr_cfg *cpr_mconfig)
  485. {
  486. struct skl_audio_data_format *out_fmt = &cpr_mconfig->out_fmt;
  487. struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)cpr_mconfig;
  488. skl_set_base_module_format(ctx, mconfig, base_cfg);
  489. skl_setup_out_format(ctx, mconfig, out_fmt);
  490. skl_setup_cpr_gateway_cfg(ctx, mconfig, cpr_mconfig);
  491. }
  492. /*
  493. * Algo module are DSP pre processing modules. Algo module take base module
  494. * configuration and params
  495. */
  496. static void skl_set_algo_format(struct skl_sst *ctx,
  497. struct skl_module_cfg *mconfig,
  498. struct skl_algo_cfg *algo_mcfg)
  499. {
  500. struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)algo_mcfg;
  501. skl_set_base_module_format(ctx, mconfig, base_cfg);
  502. if (mconfig->formats_config.caps_size == 0)
  503. return;
  504. memcpy(algo_mcfg->params,
  505. mconfig->formats_config.caps,
  506. mconfig->formats_config.caps_size);
  507. }
  508. /*
  509. * Mic select module allows selecting one or many input channels, thus
  510. * acting as a demux.
  511. *
  512. * Mic select module take base module configuration and out-format
  513. * configuration
  514. */
  515. static void skl_set_base_outfmt_format(struct skl_sst *ctx,
  516. struct skl_module_cfg *mconfig,
  517. struct skl_base_outfmt_cfg *base_outfmt_mcfg)
  518. {
  519. struct skl_audio_data_format *out_fmt = &base_outfmt_mcfg->out_fmt;
  520. struct skl_base_cfg *base_cfg =
  521. (struct skl_base_cfg *)base_outfmt_mcfg;
  522. skl_set_base_module_format(ctx, mconfig, base_cfg);
  523. skl_setup_out_format(ctx, mconfig, out_fmt);
  524. }
  525. static u16 skl_get_module_param_size(struct skl_sst *ctx,
  526. struct skl_module_cfg *mconfig)
  527. {
  528. u16 param_size;
  529. switch (mconfig->m_type) {
  530. case SKL_MODULE_TYPE_COPIER:
  531. param_size = sizeof(struct skl_cpr_cfg);
  532. param_size += mconfig->formats_config.caps_size;
  533. return param_size;
  534. case SKL_MODULE_TYPE_SRCINT:
  535. return sizeof(struct skl_src_module_cfg);
  536. case SKL_MODULE_TYPE_UPDWMIX:
  537. return sizeof(struct skl_up_down_mixer_cfg);
  538. case SKL_MODULE_TYPE_ALGO:
  539. param_size = sizeof(struct skl_base_cfg);
  540. param_size += mconfig->formats_config.caps_size;
  541. return param_size;
  542. case SKL_MODULE_TYPE_BASE_OUTFMT:
  543. case SKL_MODULE_TYPE_KPB:
  544. return sizeof(struct skl_base_outfmt_cfg);
  545. default:
  546. /*
  547. * return only base cfg when no specific module type is
  548. * specified
  549. */
  550. return sizeof(struct skl_base_cfg);
  551. }
  552. return 0;
  553. }
  554. /*
  555. * DSP firmware supports various modules like copier, SRC, updown etc.
  556. * These modules required various parameters to be calculated and sent for
  557. * the module initialization to DSP. By default a generic module needs only
  558. * base module format configuration
  559. */
  560. static int skl_set_module_format(struct skl_sst *ctx,
  561. struct skl_module_cfg *module_config,
  562. u16 *module_config_size,
  563. void **param_data)
  564. {
  565. u16 param_size;
  566. param_size = skl_get_module_param_size(ctx, module_config);
  567. *param_data = kzalloc(param_size, GFP_KERNEL);
  568. if (NULL == *param_data)
  569. return -ENOMEM;
  570. *module_config_size = param_size;
  571. switch (module_config->m_type) {
  572. case SKL_MODULE_TYPE_COPIER:
  573. skl_set_copier_format(ctx, module_config, *param_data);
  574. break;
  575. case SKL_MODULE_TYPE_SRCINT:
  576. skl_set_src_format(ctx, module_config, *param_data);
  577. break;
  578. case SKL_MODULE_TYPE_UPDWMIX:
  579. skl_set_updown_mixer_format(ctx, module_config, *param_data);
  580. break;
  581. case SKL_MODULE_TYPE_ALGO:
  582. skl_set_algo_format(ctx, module_config, *param_data);
  583. break;
  584. case SKL_MODULE_TYPE_BASE_OUTFMT:
  585. case SKL_MODULE_TYPE_KPB:
  586. skl_set_base_outfmt_format(ctx, module_config, *param_data);
  587. break;
  588. default:
  589. skl_set_base_module_format(ctx, module_config, *param_data);
  590. break;
  591. }
  592. dev_dbg(ctx->dev, "Module type=%d config size: %d bytes\n",
  593. module_config->id.module_id, param_size);
  594. print_hex_dump_debug("Module params:", DUMP_PREFIX_OFFSET, 8, 4,
  595. *param_data, param_size, false);
  596. return 0;
  597. }
  598. static int skl_get_queue_index(struct skl_module_pin *mpin,
  599. struct skl_module_inst_id id, int max)
  600. {
  601. int i;
  602. for (i = 0; i < max; i++) {
  603. if (mpin[i].id.module_id == id.module_id &&
  604. mpin[i].id.instance_id == id.instance_id)
  605. return i;
  606. }
  607. return -EINVAL;
  608. }
  609. /*
  610. * Allocates queue for each module.
  611. * if dynamic, the pin_index is allocated 0 to max_pin.
  612. * In static, the pin_index is fixed based on module_id and instance id
  613. */
  614. static int skl_alloc_queue(struct skl_module_pin *mpin,
  615. struct skl_module_cfg *tgt_cfg, int max)
  616. {
  617. int i;
  618. struct skl_module_inst_id id = tgt_cfg->id;
  619. /*
  620. * if pin in dynamic, find first free pin
  621. * otherwise find match module and instance id pin as topology will
  622. * ensure a unique pin is assigned to this so no need to
  623. * allocate/free
  624. */
  625. for (i = 0; i < max; i++) {
  626. if (mpin[i].is_dynamic) {
  627. if (!mpin[i].in_use &&
  628. mpin[i].pin_state == SKL_PIN_UNBIND) {
  629. mpin[i].in_use = true;
  630. mpin[i].id.module_id = id.module_id;
  631. mpin[i].id.instance_id = id.instance_id;
  632. mpin[i].id.pvt_id = id.pvt_id;
  633. mpin[i].tgt_mcfg = tgt_cfg;
  634. return i;
  635. }
  636. } else {
  637. if (mpin[i].id.module_id == id.module_id &&
  638. mpin[i].id.instance_id == id.instance_id &&
  639. mpin[i].pin_state == SKL_PIN_UNBIND) {
  640. mpin[i].tgt_mcfg = tgt_cfg;
  641. return i;
  642. }
  643. }
  644. }
  645. return -EINVAL;
  646. }
  647. static void skl_free_queue(struct skl_module_pin *mpin, int q_index)
  648. {
  649. if (mpin[q_index].is_dynamic) {
  650. mpin[q_index].in_use = false;
  651. mpin[q_index].id.module_id = 0;
  652. mpin[q_index].id.instance_id = 0;
  653. mpin[q_index].id.pvt_id = 0;
  654. }
  655. mpin[q_index].pin_state = SKL_PIN_UNBIND;
  656. mpin[q_index].tgt_mcfg = NULL;
  657. }
  658. /* Module state will be set to unint, if all the out pin state is UNBIND */
  659. static void skl_clear_module_state(struct skl_module_pin *mpin, int max,
  660. struct skl_module_cfg *mcfg)
  661. {
  662. int i;
  663. bool found = false;
  664. for (i = 0; i < max; i++) {
  665. if (mpin[i].pin_state == SKL_PIN_UNBIND)
  666. continue;
  667. found = true;
  668. break;
  669. }
  670. if (!found)
  671. mcfg->m_state = SKL_MODULE_UNINIT;
  672. return;
  673. }
  674. /*
  675. * A module needs to be instanataited in DSP. A mdoule is present in a
  676. * collection of module referred as a PIPE.
  677. * We first calculate the module format, based on module type and then
  678. * invoke the DSP by sending IPC INIT_INSTANCE using ipc helper
  679. */
  680. int skl_init_module(struct skl_sst *ctx,
  681. struct skl_module_cfg *mconfig)
  682. {
  683. u16 module_config_size = 0;
  684. void *param_data = NULL;
  685. int ret;
  686. struct skl_ipc_init_instance_msg msg;
  687. dev_dbg(ctx->dev, "%s: module_id = %d instance=%d\n", __func__,
  688. mconfig->id.module_id, mconfig->id.pvt_id);
  689. if (mconfig->pipe->state != SKL_PIPE_CREATED) {
  690. dev_err(ctx->dev, "Pipe not created state= %d pipe_id= %d\n",
  691. mconfig->pipe->state, mconfig->pipe->ppl_id);
  692. return -EIO;
  693. }
  694. ret = skl_set_module_format(ctx, mconfig,
  695. &module_config_size, &param_data);
  696. if (ret < 0) {
  697. dev_err(ctx->dev, "Failed to set module format ret=%d\n", ret);
  698. return ret;
  699. }
  700. msg.module_id = mconfig->id.module_id;
  701. msg.instance_id = mconfig->id.pvt_id;
  702. msg.ppl_instance_id = mconfig->pipe->ppl_id;
  703. msg.param_data_size = module_config_size;
  704. msg.core_id = mconfig->core_id;
  705. msg.domain = mconfig->domain;
  706. ret = skl_ipc_init_instance(&ctx->ipc, &msg, param_data);
  707. if (ret < 0) {
  708. dev_err(ctx->dev, "Failed to init instance ret=%d\n", ret);
  709. kfree(param_data);
  710. return ret;
  711. }
  712. mconfig->m_state = SKL_MODULE_INIT_DONE;
  713. kfree(param_data);
  714. return ret;
  715. }
  716. static void skl_dump_bind_info(struct skl_sst *ctx, struct skl_module_cfg
  717. *src_module, struct skl_module_cfg *dst_module)
  718. {
  719. dev_dbg(ctx->dev, "%s: src module_id = %d src_instance=%d\n",
  720. __func__, src_module->id.module_id, src_module->id.pvt_id);
  721. dev_dbg(ctx->dev, "%s: dst_module=%d dst_instacne=%d\n", __func__,
  722. dst_module->id.module_id, dst_module->id.pvt_id);
  723. dev_dbg(ctx->dev, "src_module state = %d dst module state = %d\n",
  724. src_module->m_state, dst_module->m_state);
  725. }
  726. /*
  727. * On module freeup, we need to unbind the module with modules
  728. * it is already bind.
  729. * Find the pin allocated and unbind then using bind_unbind IPC
  730. */
  731. int skl_unbind_modules(struct skl_sst *ctx,
  732. struct skl_module_cfg *src_mcfg,
  733. struct skl_module_cfg *dst_mcfg)
  734. {
  735. int ret;
  736. struct skl_ipc_bind_unbind_msg msg;
  737. struct skl_module_inst_id src_id = src_mcfg->id;
  738. struct skl_module_inst_id dst_id = dst_mcfg->id;
  739. int in_max = dst_mcfg->max_in_queue;
  740. int out_max = src_mcfg->max_out_queue;
  741. int src_index, dst_index, src_pin_state, dst_pin_state;
  742. skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
  743. /* get src queue index */
  744. src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max);
  745. if (src_index < 0)
  746. return 0;
  747. msg.src_queue = src_index;
  748. /* get dst queue index */
  749. dst_index = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max);
  750. if (dst_index < 0)
  751. return 0;
  752. msg.dst_queue = dst_index;
  753. src_pin_state = src_mcfg->m_out_pin[src_index].pin_state;
  754. dst_pin_state = dst_mcfg->m_in_pin[dst_index].pin_state;
  755. if (src_pin_state != SKL_PIN_BIND_DONE ||
  756. dst_pin_state != SKL_PIN_BIND_DONE)
  757. return 0;
  758. msg.module_id = src_mcfg->id.module_id;
  759. msg.instance_id = src_mcfg->id.pvt_id;
  760. msg.dst_module_id = dst_mcfg->id.module_id;
  761. msg.dst_instance_id = dst_mcfg->id.pvt_id;
  762. msg.bind = false;
  763. ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
  764. if (!ret) {
  765. /* free queue only if unbind is success */
  766. skl_free_queue(src_mcfg->m_out_pin, src_index);
  767. skl_free_queue(dst_mcfg->m_in_pin, dst_index);
  768. /*
  769. * check only if src module bind state, bind is
  770. * always from src -> sink
  771. */
  772. skl_clear_module_state(src_mcfg->m_out_pin, out_max, src_mcfg);
  773. }
  774. return ret;
  775. }
  776. /*
  777. * Once a module is instantiated it need to be 'bind' with other modules in
  778. * the pipeline. For binding we need to find the module pins which are bind
  779. * together
  780. * This function finds the pins and then sends bund_unbind IPC message to
  781. * DSP using IPC helper
  782. */
  783. int skl_bind_modules(struct skl_sst *ctx,
  784. struct skl_module_cfg *src_mcfg,
  785. struct skl_module_cfg *dst_mcfg)
  786. {
  787. int ret;
  788. struct skl_ipc_bind_unbind_msg msg;
  789. int in_max = dst_mcfg->max_in_queue;
  790. int out_max = src_mcfg->max_out_queue;
  791. int src_index, dst_index;
  792. skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
  793. if (src_mcfg->m_state < SKL_MODULE_INIT_DONE ||
  794. dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
  795. return 0;
  796. src_index = skl_alloc_queue(src_mcfg->m_out_pin, dst_mcfg, out_max);
  797. if (src_index < 0)
  798. return -EINVAL;
  799. msg.src_queue = src_index;
  800. dst_index = skl_alloc_queue(dst_mcfg->m_in_pin, src_mcfg, in_max);
  801. if (dst_index < 0) {
  802. skl_free_queue(src_mcfg->m_out_pin, src_index);
  803. return -EINVAL;
  804. }
  805. msg.dst_queue = dst_index;
  806. dev_dbg(ctx->dev, "src queue = %d dst queue =%d\n",
  807. msg.src_queue, msg.dst_queue);
  808. msg.module_id = src_mcfg->id.module_id;
  809. msg.instance_id = src_mcfg->id.pvt_id;
  810. msg.dst_module_id = dst_mcfg->id.module_id;
  811. msg.dst_instance_id = dst_mcfg->id.pvt_id;
  812. msg.bind = true;
  813. ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
  814. if (!ret) {
  815. src_mcfg->m_state = SKL_MODULE_BIND_DONE;
  816. src_mcfg->m_out_pin[src_index].pin_state = SKL_PIN_BIND_DONE;
  817. dst_mcfg->m_in_pin[dst_index].pin_state = SKL_PIN_BIND_DONE;
  818. } else {
  819. /* error case , if IPC fails, clear the queue index */
  820. skl_free_queue(src_mcfg->m_out_pin, src_index);
  821. skl_free_queue(dst_mcfg->m_in_pin, dst_index);
  822. }
  823. return ret;
  824. }
  825. static int skl_set_pipe_state(struct skl_sst *ctx, struct skl_pipe *pipe,
  826. enum skl_ipc_pipeline_state state)
  827. {
  828. dev_dbg(ctx->dev, "%s: pipe_satate = %d\n", __func__, state);
  829. return skl_ipc_set_pipeline_state(&ctx->ipc, pipe->ppl_id, state);
  830. }
  831. /*
  832. * A pipeline is a collection of modules. Before a module in instantiated a
  833. * pipeline needs to be created for it.
  834. * This function creates pipeline, by sending create pipeline IPC messages
  835. * to FW
  836. */
  837. int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe)
  838. {
  839. int ret;
  840. dev_dbg(ctx->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id);
  841. ret = skl_ipc_create_pipeline(&ctx->ipc, pipe->memory_pages,
  842. pipe->pipe_priority, pipe->ppl_id);
  843. if (ret < 0) {
  844. dev_err(ctx->dev, "Failed to create pipeline\n");
  845. return ret;
  846. }
  847. pipe->state = SKL_PIPE_CREATED;
  848. return 0;
  849. }
  850. /*
  851. * A pipeline needs to be deleted on cleanup. If a pipeline is running, then
  852. * pause the pipeline first and then delete it
  853. * The pipe delete is done by sending delete pipeline IPC. DSP will stop the
  854. * DMA engines and releases resources
  855. */
  856. int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
  857. {
  858. int ret;
  859. dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
  860. /* If pipe is started, do stop the pipe in FW. */
  861. if (pipe->state > SKL_PIPE_STARTED) {
  862. ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
  863. if (ret < 0) {
  864. dev_err(ctx->dev, "Failed to stop pipeline\n");
  865. return ret;
  866. }
  867. pipe->state = SKL_PIPE_PAUSED;
  868. }
  869. /* If pipe was not created in FW, do not try to delete it */
  870. if (pipe->state < SKL_PIPE_CREATED)
  871. return 0;
  872. ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id);
  873. if (ret < 0) {
  874. dev_err(ctx->dev, "Failed to delete pipeline\n");
  875. return ret;
  876. }
  877. pipe->state = SKL_PIPE_INVALID;
  878. return ret;
  879. }
  880. /*
  881. * A pipeline is also a scheduling entity in DSP which can be run, stopped
  882. * For processing data the pipe need to be run by sending IPC set pipe state
  883. * to DSP
  884. */
  885. int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
  886. {
  887. int ret;
  888. dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
  889. /* If pipe was not created in FW, do not try to pause or delete */
  890. if (pipe->state < SKL_PIPE_CREATED)
  891. return 0;
  892. /* Pipe has to be paused before it is started */
  893. ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
  894. if (ret < 0) {
  895. dev_err(ctx->dev, "Failed to pause pipe\n");
  896. return ret;
  897. }
  898. pipe->state = SKL_PIPE_PAUSED;
  899. ret = skl_set_pipe_state(ctx, pipe, PPL_RUNNING);
  900. if (ret < 0) {
  901. dev_err(ctx->dev, "Failed to start pipe\n");
  902. return ret;
  903. }
  904. pipe->state = SKL_PIPE_STARTED;
  905. return 0;
  906. }
  907. /*
  908. * Stop the pipeline by sending set pipe state IPC
  909. * DSP doesnt implement stop so we always send pause message
  910. */
  911. int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
  912. {
  913. int ret;
  914. dev_dbg(ctx->dev, "In %s pipe=%d\n", __func__, pipe->ppl_id);
  915. /* If pipe was not created in FW, do not try to pause or delete */
  916. if (pipe->state < SKL_PIPE_PAUSED)
  917. return 0;
  918. ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
  919. if (ret < 0) {
  920. dev_dbg(ctx->dev, "Failed to stop pipe\n");
  921. return ret;
  922. }
  923. pipe->state = SKL_PIPE_PAUSED;
  924. return 0;
  925. }
  926. /*
  927. * Reset the pipeline by sending set pipe state IPC this will reset the DMA
  928. * from the DSP side
  929. */
  930. int skl_reset_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
  931. {
  932. int ret;
  933. /* If pipe was not created in FW, do not try to pause or delete */
  934. if (pipe->state < SKL_PIPE_PAUSED)
  935. return 0;
  936. ret = skl_set_pipe_state(ctx, pipe, PPL_RESET);
  937. if (ret < 0) {
  938. dev_dbg(ctx->dev, "Failed to reset pipe ret=%d\n", ret);
  939. return ret;
  940. }
  941. pipe->state = SKL_PIPE_RESET;
  942. return 0;
  943. }
  944. /* Algo parameter set helper function */
  945. int skl_set_module_params(struct skl_sst *ctx, u32 *params, int size,
  946. u32 param_id, struct skl_module_cfg *mcfg)
  947. {
  948. struct skl_ipc_large_config_msg msg;
  949. msg.module_id = mcfg->id.module_id;
  950. msg.instance_id = mcfg->id.pvt_id;
  951. msg.param_data_size = size;
  952. msg.large_param_id = param_id;
  953. return skl_ipc_set_large_config(&ctx->ipc, &msg, params);
  954. }
  955. int skl_get_module_params(struct skl_sst *ctx, u32 *params, int size,
  956. u32 param_id, struct skl_module_cfg *mcfg)
  957. {
  958. struct skl_ipc_large_config_msg msg;
  959. msg.module_id = mcfg->id.module_id;
  960. msg.instance_id = mcfg->id.pvt_id;
  961. msg.param_data_size = size;
  962. msg.large_param_id = param_id;
  963. return skl_ipc_get_large_config(&ctx->ipc, &msg, params);
  964. }