sst-firmware.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280
  1. /*
  2. * Intel SST Firmware Loader
  3. *
  4. * Copyright (C) 2013, Intel Corporation. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/slab.h>
  18. #include <linux/sched.h>
  19. #include <linux/firmware.h>
  20. #include <linux/export.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/dma-mapping.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/pci.h>
  25. #include <linux/acpi.h>
  26. /* supported DMA engine drivers */
  27. #include <linux/dma/dw.h>
  28. #include <asm/page.h>
  29. #include <asm/pgtable.h>
  30. #include "sst-dsp.h"
  31. #include "sst-dsp-priv.h"
  32. #define SST_DMA_RESOURCES 2
  33. #define SST_DSP_DMA_MAX_BURST 0x3
  34. #define SST_HSW_BLOCK_ANY 0xffffffff
  35. #define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
  36. struct sst_dma {
  37. struct sst_dsp *sst;
  38. struct dw_dma_chip *chip;
  39. struct dma_async_tx_descriptor *desc;
  40. struct dma_chan *ch;
  41. };
  42. static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
  43. {
  44. u32 tmp = 0;
  45. int i, m, n;
  46. const u8 *src_byte = src;
  47. m = bytes / 4;
  48. n = bytes % 4;
  49. /* __iowrite32_copy use 32bit size values so divide by 4 */
  50. __iowrite32_copy((void *)dest, src, m);
  51. if (n) {
  52. for (i = 0; i < n; i++)
  53. tmp |= (u32)*(src_byte + m * 4 + i) << (i * 8);
  54. __iowrite32_copy((void *)(dest + m * 4), &tmp, 1);
  55. }
  56. }
  57. static void sst_dma_transfer_complete(void *arg)
  58. {
  59. struct sst_dsp *sst = (struct sst_dsp *)arg;
  60. dev_dbg(sst->dev, "DMA: callback\n");
  61. }
  62. static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
  63. dma_addr_t src_addr, size_t size)
  64. {
  65. struct dma_async_tx_descriptor *desc;
  66. struct sst_dma *dma = sst->dma;
  67. if (dma->ch == NULL) {
  68. dev_err(sst->dev, "error: no DMA channel\n");
  69. return -ENODEV;
  70. }
  71. dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
  72. (unsigned long)src_addr, (unsigned long)dest_addr, size);
  73. desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
  74. src_addr, size, DMA_CTRL_ACK);
  75. if (!desc){
  76. dev_err(sst->dev, "error: dma prep memcpy failed\n");
  77. return -EINVAL;
  78. }
  79. desc->callback = sst_dma_transfer_complete;
  80. desc->callback_param = sst;
  81. desc->tx_submit(desc);
  82. dma_wait_for_async_tx(desc);
  83. return 0;
  84. }
  85. /* copy to DSP */
  86. int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
  87. dma_addr_t src_addr, size_t size)
  88. {
  89. return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
  90. src_addr, size);
  91. }
  92. EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
  93. /* copy from DSP */
  94. int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
  95. dma_addr_t src_addr, size_t size)
  96. {
  97. return sst_dsp_dma_copy(sst, dest_addr,
  98. src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
  99. }
  100. EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
  101. /* remove module from memory - callers hold locks */
  102. static void block_list_remove(struct sst_dsp *dsp,
  103. struct list_head *block_list)
  104. {
  105. struct sst_mem_block *block, *tmp;
  106. int err;
  107. /* disable each block */
  108. list_for_each_entry(block, block_list, module_list) {
  109. if (block->ops && block->ops->disable) {
  110. err = block->ops->disable(block);
  111. if (err < 0)
  112. dev_err(dsp->dev,
  113. "error: cant disable block %d:%d\n",
  114. block->type, block->index);
  115. }
  116. }
  117. /* mark each block as free */
  118. list_for_each_entry_safe(block, tmp, block_list, module_list) {
  119. list_del(&block->module_list);
  120. list_move(&block->list, &dsp->free_block_list);
  121. dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
  122. block->type, block->index, block->offset);
  123. }
  124. }
  125. /* prepare the memory block to receive data from host - callers hold locks */
  126. static int block_list_prepare(struct sst_dsp *dsp,
  127. struct list_head *block_list)
  128. {
  129. struct sst_mem_block *block;
  130. int ret = 0;
  131. /* enable each block so that's it'e ready for data */
  132. list_for_each_entry(block, block_list, module_list) {
  133. if (block->ops && block->ops->enable && !block->users) {
  134. ret = block->ops->enable(block);
  135. if (ret < 0) {
  136. dev_err(dsp->dev,
  137. "error: cant disable block %d:%d\n",
  138. block->type, block->index);
  139. goto err;
  140. }
  141. }
  142. }
  143. return ret;
  144. err:
  145. list_for_each_entry(block, block_list, module_list) {
  146. if (block->ops && block->ops->disable)
  147. block->ops->disable(block);
  148. }
  149. return ret;
  150. }
  151. static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
  152. int irq)
  153. {
  154. struct dw_dma_chip *chip;
  155. int err;
  156. chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
  157. if (!chip)
  158. return ERR_PTR(-ENOMEM);
  159. chip->irq = irq;
  160. chip->regs = devm_ioremap_resource(dev, mem);
  161. if (IS_ERR(chip->regs))
  162. return ERR_CAST(chip->regs);
  163. err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
  164. if (err)
  165. return ERR_PTR(err);
  166. chip->dev = dev;
  167. err = dw_dma_probe(chip);
  168. if (err)
  169. return ERR_PTR(err);
  170. return chip;
  171. }
  172. static void dw_remove(struct dw_dma_chip *chip)
  173. {
  174. dw_dma_remove(chip);
  175. }
  176. static bool dma_chan_filter(struct dma_chan *chan, void *param)
  177. {
  178. struct sst_dsp *dsp = (struct sst_dsp *)param;
  179. return chan->device->dev == dsp->dma_dev;
  180. }
  181. int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
  182. {
  183. struct sst_dma *dma = dsp->dma;
  184. struct dma_slave_config slave;
  185. dma_cap_mask_t mask;
  186. int ret;
  187. dma_cap_zero(mask);
  188. dma_cap_set(DMA_SLAVE, mask);
  189. dma_cap_set(DMA_MEMCPY, mask);
  190. dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
  191. if (dma->ch == NULL) {
  192. dev_err(dsp->dev, "error: DMA request channel failed\n");
  193. return -EIO;
  194. }
  195. memset(&slave, 0, sizeof(slave));
  196. slave.direction = DMA_MEM_TO_DEV;
  197. slave.src_addr_width =
  198. slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  199. slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
  200. ret = dmaengine_slave_config(dma->ch, &slave);
  201. if (ret) {
  202. dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
  203. ret);
  204. dma_release_channel(dma->ch);
  205. dma->ch = NULL;
  206. }
  207. return ret;
  208. }
  209. EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
  210. void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
  211. {
  212. struct sst_dma *dma = dsp->dma;
  213. if (!dma->ch)
  214. return;
  215. dma_release_channel(dma->ch);
  216. dma->ch = NULL;
  217. }
  218. EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
  219. int sst_dma_new(struct sst_dsp *sst)
  220. {
  221. struct sst_pdata *sst_pdata = sst->pdata;
  222. struct sst_dma *dma;
  223. struct resource mem;
  224. int ret = 0;
  225. if (sst->pdata->resindex_dma_base == -1)
  226. /* DMA is not used, return and squelsh error messages */
  227. return 0;
  228. /* configure the correct platform data for whatever DMA engine
  229. * is attached to the ADSP IP. */
  230. switch (sst->pdata->dma_engine) {
  231. case SST_DMA_TYPE_DW:
  232. break;
  233. default:
  234. dev_err(sst->dev, "error: invalid DMA engine %d\n",
  235. sst->pdata->dma_engine);
  236. return -EINVAL;
  237. }
  238. dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
  239. if (!dma)
  240. return -ENOMEM;
  241. dma->sst = sst;
  242. memset(&mem, 0, sizeof(mem));
  243. mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
  244. mem.end = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
  245. mem.flags = IORESOURCE_MEM;
  246. /* now register DMA engine device */
  247. dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
  248. if (IS_ERR(dma->chip)) {
  249. dev_err(sst->dev, "error: DMA device register failed\n");
  250. ret = PTR_ERR(dma->chip);
  251. goto err_dma_dev;
  252. }
  253. sst->dma = dma;
  254. sst->fw_use_dma = true;
  255. return 0;
  256. err_dma_dev:
  257. devm_kfree(sst->dev, dma);
  258. return ret;
  259. }
  260. EXPORT_SYMBOL(sst_dma_new);
  261. void sst_dma_free(struct sst_dma *dma)
  262. {
  263. if (dma == NULL)
  264. return;
  265. if (dma->ch)
  266. dma_release_channel(dma->ch);
  267. if (dma->chip)
  268. dw_remove(dma->chip);
  269. }
  270. EXPORT_SYMBOL(sst_dma_free);
  271. /* create new generic firmware object */
  272. struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
  273. const struct firmware *fw, void *private)
  274. {
  275. struct sst_fw *sst_fw;
  276. int err;
  277. if (!dsp->ops->parse_fw)
  278. return NULL;
  279. sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
  280. if (sst_fw == NULL)
  281. return NULL;
  282. sst_fw->dsp = dsp;
  283. sst_fw->private = private;
  284. sst_fw->size = fw->size;
  285. /* allocate DMA buffer to store FW data */
  286. sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
  287. &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
  288. if (!sst_fw->dma_buf) {
  289. dev_err(dsp->dev, "error: DMA alloc failed\n");
  290. kfree(sst_fw);
  291. return NULL;
  292. }
  293. /* copy FW data to DMA-able memory */
  294. memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
  295. if (dsp->fw_use_dma) {
  296. err = sst_dsp_dma_get_channel(dsp, 0);
  297. if (err < 0)
  298. goto chan_err;
  299. }
  300. /* call core specific FW paser to load FW data into DSP */
  301. err = dsp->ops->parse_fw(sst_fw);
  302. if (err < 0) {
  303. dev_err(dsp->dev, "error: parse fw failed %d\n", err);
  304. goto parse_err;
  305. }
  306. if (dsp->fw_use_dma)
  307. sst_dsp_dma_put_channel(dsp);
  308. mutex_lock(&dsp->mutex);
  309. list_add(&sst_fw->list, &dsp->fw_list);
  310. mutex_unlock(&dsp->mutex);
  311. return sst_fw;
  312. parse_err:
  313. if (dsp->fw_use_dma)
  314. sst_dsp_dma_put_channel(dsp);
  315. chan_err:
  316. dma_free_coherent(dsp->dma_dev, sst_fw->size,
  317. sst_fw->dma_buf,
  318. sst_fw->dmable_fw_paddr);
  319. sst_fw->dma_buf = NULL;
  320. kfree(sst_fw);
  321. return NULL;
  322. }
  323. EXPORT_SYMBOL_GPL(sst_fw_new);
  324. int sst_fw_reload(struct sst_fw *sst_fw)
  325. {
  326. struct sst_dsp *dsp = sst_fw->dsp;
  327. int ret;
  328. dev_dbg(dsp->dev, "reloading firmware\n");
  329. /* call core specific FW paser to load FW data into DSP */
  330. ret = dsp->ops->parse_fw(sst_fw);
  331. if (ret < 0)
  332. dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
  333. return ret;
  334. }
  335. EXPORT_SYMBOL_GPL(sst_fw_reload);
  336. void sst_fw_unload(struct sst_fw *sst_fw)
  337. {
  338. struct sst_dsp *dsp = sst_fw->dsp;
  339. struct sst_module *module, *mtmp;
  340. struct sst_module_runtime *runtime, *rtmp;
  341. dev_dbg(dsp->dev, "unloading firmware\n");
  342. mutex_lock(&dsp->mutex);
  343. /* check module by module */
  344. list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
  345. if (module->sst_fw == sst_fw) {
  346. /* remove runtime modules */
  347. list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
  348. block_list_remove(dsp, &runtime->block_list);
  349. list_del(&runtime->list);
  350. kfree(runtime);
  351. }
  352. /* now remove the module */
  353. block_list_remove(dsp, &module->block_list);
  354. list_del(&module->list);
  355. kfree(module);
  356. }
  357. }
  358. /* remove all scratch blocks */
  359. block_list_remove(dsp, &dsp->scratch_block_list);
  360. mutex_unlock(&dsp->mutex);
  361. }
  362. EXPORT_SYMBOL_GPL(sst_fw_unload);
  363. /* free single firmware object */
  364. void sst_fw_free(struct sst_fw *sst_fw)
  365. {
  366. struct sst_dsp *dsp = sst_fw->dsp;
  367. mutex_lock(&dsp->mutex);
  368. list_del(&sst_fw->list);
  369. mutex_unlock(&dsp->mutex);
  370. if (sst_fw->dma_buf)
  371. dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
  372. sst_fw->dmable_fw_paddr);
  373. kfree(sst_fw);
  374. }
  375. EXPORT_SYMBOL_GPL(sst_fw_free);
  376. /* free all firmware objects */
  377. void sst_fw_free_all(struct sst_dsp *dsp)
  378. {
  379. struct sst_fw *sst_fw, *t;
  380. mutex_lock(&dsp->mutex);
  381. list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
  382. list_del(&sst_fw->list);
  383. dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
  384. sst_fw->dmable_fw_paddr);
  385. kfree(sst_fw);
  386. }
  387. mutex_unlock(&dsp->mutex);
  388. }
  389. EXPORT_SYMBOL_GPL(sst_fw_free_all);
  390. /* create a new SST generic module from FW template */
  391. struct sst_module *sst_module_new(struct sst_fw *sst_fw,
  392. struct sst_module_template *template, void *private)
  393. {
  394. struct sst_dsp *dsp = sst_fw->dsp;
  395. struct sst_module *sst_module;
  396. sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
  397. if (sst_module == NULL)
  398. return NULL;
  399. sst_module->id = template->id;
  400. sst_module->dsp = dsp;
  401. sst_module->sst_fw = sst_fw;
  402. sst_module->scratch_size = template->scratch_size;
  403. sst_module->persistent_size = template->persistent_size;
  404. sst_module->entry = template->entry;
  405. sst_module->state = SST_MODULE_STATE_UNLOADED;
  406. INIT_LIST_HEAD(&sst_module->block_list);
  407. INIT_LIST_HEAD(&sst_module->runtime_list);
  408. mutex_lock(&dsp->mutex);
  409. list_add(&sst_module->list, &dsp->module_list);
  410. mutex_unlock(&dsp->mutex);
  411. return sst_module;
  412. }
  413. EXPORT_SYMBOL_GPL(sst_module_new);
  414. /* free firmware module and remove from available list */
  415. void sst_module_free(struct sst_module *sst_module)
  416. {
  417. struct sst_dsp *dsp = sst_module->dsp;
  418. mutex_lock(&dsp->mutex);
  419. list_del(&sst_module->list);
  420. mutex_unlock(&dsp->mutex);
  421. kfree(sst_module);
  422. }
  423. EXPORT_SYMBOL_GPL(sst_module_free);
  424. struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
  425. int id, void *private)
  426. {
  427. struct sst_dsp *dsp = module->dsp;
  428. struct sst_module_runtime *runtime;
  429. runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
  430. if (runtime == NULL)
  431. return NULL;
  432. runtime->id = id;
  433. runtime->dsp = dsp;
  434. runtime->module = module;
  435. INIT_LIST_HEAD(&runtime->block_list);
  436. mutex_lock(&dsp->mutex);
  437. list_add(&runtime->list, &module->runtime_list);
  438. mutex_unlock(&dsp->mutex);
  439. return runtime;
  440. }
  441. EXPORT_SYMBOL_GPL(sst_module_runtime_new);
  442. void sst_module_runtime_free(struct sst_module_runtime *runtime)
  443. {
  444. struct sst_dsp *dsp = runtime->dsp;
  445. mutex_lock(&dsp->mutex);
  446. list_del(&runtime->list);
  447. mutex_unlock(&dsp->mutex);
  448. kfree(runtime);
  449. }
  450. EXPORT_SYMBOL_GPL(sst_module_runtime_free);
  451. static struct sst_mem_block *find_block(struct sst_dsp *dsp,
  452. struct sst_block_allocator *ba)
  453. {
  454. struct sst_mem_block *block;
  455. list_for_each_entry(block, &dsp->free_block_list, list) {
  456. if (block->type == ba->type && block->offset == ba->offset)
  457. return block;
  458. }
  459. return NULL;
  460. }
  461. /* Block allocator must be on block boundary */
  462. static int block_alloc_contiguous(struct sst_dsp *dsp,
  463. struct sst_block_allocator *ba, struct list_head *block_list)
  464. {
  465. struct list_head tmp = LIST_HEAD_INIT(tmp);
  466. struct sst_mem_block *block;
  467. u32 block_start = SST_HSW_BLOCK_ANY;
  468. int size = ba->size, offset = ba->offset;
  469. while (ba->size > 0) {
  470. block = find_block(dsp, ba);
  471. if (!block) {
  472. list_splice(&tmp, &dsp->free_block_list);
  473. ba->size = size;
  474. ba->offset = offset;
  475. return -ENOMEM;
  476. }
  477. list_move_tail(&block->list, &tmp);
  478. ba->offset += block->size;
  479. ba->size -= block->size;
  480. }
  481. ba->size = size;
  482. ba->offset = offset;
  483. list_for_each_entry(block, &tmp, list) {
  484. if (block->offset < block_start)
  485. block_start = block->offset;
  486. list_add(&block->module_list, block_list);
  487. dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
  488. block->type, block->index, block->offset);
  489. }
  490. list_splice(&tmp, &dsp->used_block_list);
  491. return 0;
  492. }
  493. /* allocate first free DSP blocks for data - callers hold locks */
  494. static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
  495. struct list_head *block_list)
  496. {
  497. struct sst_mem_block *block, *tmp;
  498. int ret = 0;
  499. if (ba->size == 0)
  500. return 0;
  501. /* find first free whole blocks that can hold module */
  502. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  503. /* ignore blocks with wrong type */
  504. if (block->type != ba->type)
  505. continue;
  506. if (ba->size > block->size)
  507. continue;
  508. ba->offset = block->offset;
  509. block->bytes_used = ba->size % block->size;
  510. list_add(&block->module_list, block_list);
  511. list_move(&block->list, &dsp->used_block_list);
  512. dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
  513. block->type, block->index, block->offset);
  514. return 0;
  515. }
  516. /* then find free multiple blocks that can hold module */
  517. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  518. /* ignore blocks with wrong type */
  519. if (block->type != ba->type)
  520. continue;
  521. /* do we span > 1 blocks */
  522. if (ba->size > block->size) {
  523. /* align ba to block boundary */
  524. ba->offset = block->offset;
  525. ret = block_alloc_contiguous(dsp, ba, block_list);
  526. if (ret == 0)
  527. return ret;
  528. }
  529. }
  530. /* not enough free block space */
  531. return -ENOMEM;
  532. }
  533. int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
  534. struct list_head *block_list)
  535. {
  536. int ret;
  537. dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
  538. ba->size, ba->offset, ba->type);
  539. mutex_lock(&dsp->mutex);
  540. ret = block_alloc(dsp, ba, block_list);
  541. if (ret < 0) {
  542. dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
  543. goto out;
  544. }
  545. /* prepare DSP blocks for module usage */
  546. ret = block_list_prepare(dsp, block_list);
  547. if (ret < 0)
  548. dev_err(dsp->dev, "error: prepare failed\n");
  549. out:
  550. mutex_unlock(&dsp->mutex);
  551. return ret;
  552. }
  553. EXPORT_SYMBOL_GPL(sst_alloc_blocks);
  554. int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
  555. {
  556. mutex_lock(&dsp->mutex);
  557. block_list_remove(dsp, block_list);
  558. mutex_unlock(&dsp->mutex);
  559. return 0;
  560. }
  561. EXPORT_SYMBOL_GPL(sst_free_blocks);
  562. /* allocate memory blocks for static module addresses - callers hold locks */
  563. static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
  564. struct list_head *block_list)
  565. {
  566. struct sst_mem_block *block, *tmp;
  567. struct sst_block_allocator ba_tmp = *ba;
  568. u32 end = ba->offset + ba->size, block_end;
  569. int err;
  570. /* only IRAM/DRAM blocks are managed */
  571. if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
  572. return 0;
  573. /* are blocks already attached to this module */
  574. list_for_each_entry_safe(block, tmp, block_list, module_list) {
  575. /* ignore blocks with wrong type */
  576. if (block->type != ba->type)
  577. continue;
  578. block_end = block->offset + block->size;
  579. /* find block that holds section */
  580. if (ba->offset >= block->offset && end <= block_end)
  581. return 0;
  582. /* does block span more than 1 section */
  583. if (ba->offset >= block->offset && ba->offset < block_end) {
  584. /* align ba to block boundary */
  585. ba_tmp.size -= block_end - ba->offset;
  586. ba_tmp.offset = block_end;
  587. err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
  588. if (err < 0)
  589. return -ENOMEM;
  590. /* module already owns blocks */
  591. return 0;
  592. }
  593. }
  594. /* find first free blocks that can hold section in free list */
  595. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  596. block_end = block->offset + block->size;
  597. /* ignore blocks with wrong type */
  598. if (block->type != ba->type)
  599. continue;
  600. /* find block that holds section */
  601. if (ba->offset >= block->offset && end <= block_end) {
  602. /* add block */
  603. list_move(&block->list, &dsp->used_block_list);
  604. list_add(&block->module_list, block_list);
  605. dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
  606. block->type, block->index, block->offset);
  607. return 0;
  608. }
  609. /* does block span more than 1 section */
  610. if (ba->offset >= block->offset && ba->offset < block_end) {
  611. /* add block */
  612. list_move(&block->list, &dsp->used_block_list);
  613. list_add(&block->module_list, block_list);
  614. /* align ba to block boundary */
  615. ba_tmp.size -= block_end - ba->offset;
  616. ba_tmp.offset = block_end;
  617. err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
  618. if (err < 0)
  619. return -ENOMEM;
  620. return 0;
  621. }
  622. }
  623. return -ENOMEM;
  624. }
  625. /* Load fixed module data into DSP memory blocks */
  626. int sst_module_alloc_blocks(struct sst_module *module)
  627. {
  628. struct sst_dsp *dsp = module->dsp;
  629. struct sst_fw *sst_fw = module->sst_fw;
  630. struct sst_block_allocator ba;
  631. int ret;
  632. memset(&ba, 0, sizeof(ba));
  633. ba.size = module->size;
  634. ba.type = module->type;
  635. ba.offset = module->offset;
  636. dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
  637. ba.size, ba.offset, ba.type);
  638. mutex_lock(&dsp->mutex);
  639. /* alloc blocks that includes this section */
  640. ret = block_alloc_fixed(dsp, &ba, &module->block_list);
  641. if (ret < 0) {
  642. dev_err(dsp->dev,
  643. "error: no free blocks for section at offset 0x%x size 0x%x\n",
  644. module->offset, module->size);
  645. mutex_unlock(&dsp->mutex);
  646. return -ENOMEM;
  647. }
  648. /* prepare DSP blocks for module copy */
  649. ret = block_list_prepare(dsp, &module->block_list);
  650. if (ret < 0) {
  651. dev_err(dsp->dev, "error: fw module prepare failed\n");
  652. goto err;
  653. }
  654. /* copy partial module data to blocks */
  655. if (dsp->fw_use_dma) {
  656. ret = sst_dsp_dma_copyto(dsp,
  657. dsp->addr.lpe_base + module->offset,
  658. sst_fw->dmable_fw_paddr + module->data_offset,
  659. module->size);
  660. if (ret < 0) {
  661. dev_err(dsp->dev, "error: module copy failed\n");
  662. goto err;
  663. }
  664. } else
  665. sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
  666. module->size);
  667. mutex_unlock(&dsp->mutex);
  668. return ret;
  669. err:
  670. block_list_remove(dsp, &module->block_list);
  671. mutex_unlock(&dsp->mutex);
  672. return ret;
  673. }
  674. EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
  675. /* Unload entire module from DSP memory */
  676. int sst_module_free_blocks(struct sst_module *module)
  677. {
  678. struct sst_dsp *dsp = module->dsp;
  679. mutex_lock(&dsp->mutex);
  680. block_list_remove(dsp, &module->block_list);
  681. mutex_unlock(&dsp->mutex);
  682. return 0;
  683. }
  684. EXPORT_SYMBOL_GPL(sst_module_free_blocks);
  685. int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
  686. int offset)
  687. {
  688. struct sst_dsp *dsp = runtime->dsp;
  689. struct sst_module *module = runtime->module;
  690. struct sst_block_allocator ba;
  691. int ret;
  692. if (module->persistent_size == 0)
  693. return 0;
  694. memset(&ba, 0, sizeof(ba));
  695. ba.size = module->persistent_size;
  696. ba.type = SST_MEM_DRAM;
  697. mutex_lock(&dsp->mutex);
  698. /* do we need to allocate at a fixed address ? */
  699. if (offset != 0) {
  700. ba.offset = offset;
  701. dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
  702. ba.size, ba.type, ba.offset);
  703. /* alloc blocks that includes this section */
  704. ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
  705. } else {
  706. dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
  707. ba.size, ba.type);
  708. /* alloc blocks that includes this section */
  709. ret = block_alloc(dsp, &ba, &runtime->block_list);
  710. }
  711. if (ret < 0) {
  712. dev_err(dsp->dev,
  713. "error: no free blocks for runtime module size 0x%x\n",
  714. module->persistent_size);
  715. mutex_unlock(&dsp->mutex);
  716. return -ENOMEM;
  717. }
  718. runtime->persistent_offset = ba.offset;
  719. /* prepare DSP blocks for module copy */
  720. ret = block_list_prepare(dsp, &runtime->block_list);
  721. if (ret < 0) {
  722. dev_err(dsp->dev, "error: runtime block prepare failed\n");
  723. goto err;
  724. }
  725. mutex_unlock(&dsp->mutex);
  726. return ret;
  727. err:
  728. block_list_remove(dsp, &module->block_list);
  729. mutex_unlock(&dsp->mutex);
  730. return ret;
  731. }
  732. EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
  733. int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
  734. {
  735. struct sst_dsp *dsp = runtime->dsp;
  736. mutex_lock(&dsp->mutex);
  737. block_list_remove(dsp, &runtime->block_list);
  738. mutex_unlock(&dsp->mutex);
  739. return 0;
  740. }
  741. EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
  742. int sst_module_runtime_save(struct sst_module_runtime *runtime,
  743. struct sst_module_runtime_context *context)
  744. {
  745. struct sst_dsp *dsp = runtime->dsp;
  746. struct sst_module *module = runtime->module;
  747. int ret = 0;
  748. dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
  749. runtime->id, runtime->persistent_offset,
  750. module->persistent_size);
  751. context->buffer = dma_alloc_coherent(dsp->dma_dev,
  752. module->persistent_size,
  753. &context->dma_buffer, GFP_DMA | GFP_KERNEL);
  754. if (!context->buffer) {
  755. dev_err(dsp->dev, "error: DMA context alloc failed\n");
  756. return -ENOMEM;
  757. }
  758. mutex_lock(&dsp->mutex);
  759. if (dsp->fw_use_dma) {
  760. ret = sst_dsp_dma_get_channel(dsp, 0);
  761. if (ret < 0)
  762. goto err;
  763. ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
  764. dsp->addr.lpe_base + runtime->persistent_offset,
  765. module->persistent_size);
  766. sst_dsp_dma_put_channel(dsp);
  767. if (ret < 0) {
  768. dev_err(dsp->dev, "error: context copy failed\n");
  769. goto err;
  770. }
  771. } else
  772. sst_memcpy32(context->buffer, dsp->addr.lpe +
  773. runtime->persistent_offset,
  774. module->persistent_size);
  775. err:
  776. mutex_unlock(&dsp->mutex);
  777. return ret;
  778. }
  779. EXPORT_SYMBOL_GPL(sst_module_runtime_save);
  780. int sst_module_runtime_restore(struct sst_module_runtime *runtime,
  781. struct sst_module_runtime_context *context)
  782. {
  783. struct sst_dsp *dsp = runtime->dsp;
  784. struct sst_module *module = runtime->module;
  785. int ret = 0;
  786. dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
  787. runtime->id, runtime->persistent_offset,
  788. module->persistent_size);
  789. mutex_lock(&dsp->mutex);
  790. if (!context->buffer) {
  791. dev_info(dsp->dev, "no context buffer need to restore!\n");
  792. goto err;
  793. }
  794. if (dsp->fw_use_dma) {
  795. ret = sst_dsp_dma_get_channel(dsp, 0);
  796. if (ret < 0)
  797. goto err;
  798. ret = sst_dsp_dma_copyto(dsp,
  799. dsp->addr.lpe_base + runtime->persistent_offset,
  800. context->dma_buffer, module->persistent_size);
  801. sst_dsp_dma_put_channel(dsp);
  802. if (ret < 0) {
  803. dev_err(dsp->dev, "error: module copy failed\n");
  804. goto err;
  805. }
  806. } else
  807. sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
  808. context->buffer, module->persistent_size);
  809. dma_free_coherent(dsp->dma_dev, module->persistent_size,
  810. context->buffer, context->dma_buffer);
  811. context->buffer = NULL;
  812. err:
  813. mutex_unlock(&dsp->mutex);
  814. return ret;
  815. }
  816. EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
  817. /* register a DSP memory block for use with FW based modules */
  818. struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
  819. u32 size, enum sst_mem_type type, const struct sst_block_ops *ops,
  820. u32 index, void *private)
  821. {
  822. struct sst_mem_block *block;
  823. block = kzalloc(sizeof(*block), GFP_KERNEL);
  824. if (block == NULL)
  825. return NULL;
  826. block->offset = offset;
  827. block->size = size;
  828. block->index = index;
  829. block->type = type;
  830. block->dsp = dsp;
  831. block->private = private;
  832. block->ops = ops;
  833. mutex_lock(&dsp->mutex);
  834. list_add(&block->list, &dsp->free_block_list);
  835. mutex_unlock(&dsp->mutex);
  836. return block;
  837. }
  838. EXPORT_SYMBOL_GPL(sst_mem_block_register);
  839. /* unregister all DSP memory blocks */
  840. void sst_mem_block_unregister_all(struct sst_dsp *dsp)
  841. {
  842. struct sst_mem_block *block, *tmp;
  843. mutex_lock(&dsp->mutex);
  844. /* unregister used blocks */
  845. list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
  846. list_del(&block->list);
  847. kfree(block);
  848. }
  849. /* unregister free blocks */
  850. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  851. list_del(&block->list);
  852. kfree(block);
  853. }
  854. mutex_unlock(&dsp->mutex);
  855. }
  856. EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
  857. /* allocate scratch buffer blocks */
  858. int sst_block_alloc_scratch(struct sst_dsp *dsp)
  859. {
  860. struct sst_module *module;
  861. struct sst_block_allocator ba;
  862. int ret;
  863. mutex_lock(&dsp->mutex);
  864. /* calculate required scratch size */
  865. dsp->scratch_size = 0;
  866. list_for_each_entry(module, &dsp->module_list, list) {
  867. dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
  868. module->id, module->scratch_size);
  869. if (dsp->scratch_size < module->scratch_size)
  870. dsp->scratch_size = module->scratch_size;
  871. }
  872. dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
  873. dsp->scratch_size);
  874. if (dsp->scratch_size == 0) {
  875. dev_info(dsp->dev, "no modules need scratch buffer\n");
  876. mutex_unlock(&dsp->mutex);
  877. return 0;
  878. }
  879. /* allocate blocks for module scratch buffers */
  880. dev_dbg(dsp->dev, "allocating scratch blocks\n");
  881. ba.size = dsp->scratch_size;
  882. ba.type = SST_MEM_DRAM;
  883. /* do we need to allocate at fixed offset */
  884. if (dsp->scratch_offset != 0) {
  885. dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
  886. ba.size, ba.type, ba.offset);
  887. ba.offset = dsp->scratch_offset;
  888. /* alloc blocks that includes this section */
  889. ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
  890. } else {
  891. dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
  892. ba.size, ba.type);
  893. ba.offset = 0;
  894. ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
  895. }
  896. if (ret < 0) {
  897. dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
  898. mutex_unlock(&dsp->mutex);
  899. return ret;
  900. }
  901. ret = block_list_prepare(dsp, &dsp->scratch_block_list);
  902. if (ret < 0) {
  903. dev_err(dsp->dev, "error: scratch block prepare failed\n");
  904. mutex_unlock(&dsp->mutex);
  905. return ret;
  906. }
  907. /* assign the same offset of scratch to each module */
  908. dsp->scratch_offset = ba.offset;
  909. mutex_unlock(&dsp->mutex);
  910. return dsp->scratch_size;
  911. }
  912. EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
  913. /* free all scratch blocks */
  914. void sst_block_free_scratch(struct sst_dsp *dsp)
  915. {
  916. mutex_lock(&dsp->mutex);
  917. block_list_remove(dsp, &dsp->scratch_block_list);
  918. mutex_unlock(&dsp->mutex);
  919. }
  920. EXPORT_SYMBOL_GPL(sst_block_free_scratch);
  921. /* get a module from it's unique ID */
  922. struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
  923. {
  924. struct sst_module *module;
  925. mutex_lock(&dsp->mutex);
  926. list_for_each_entry(module, &dsp->module_list, list) {
  927. if (module->id == id) {
  928. mutex_unlock(&dsp->mutex);
  929. return module;
  930. }
  931. }
  932. mutex_unlock(&dsp->mutex);
  933. return NULL;
  934. }
  935. EXPORT_SYMBOL_GPL(sst_module_get_from_id);
  936. struct sst_module_runtime *sst_module_runtime_get_from_id(
  937. struct sst_module *module, u32 id)
  938. {
  939. struct sst_module_runtime *runtime;
  940. struct sst_dsp *dsp = module->dsp;
  941. mutex_lock(&dsp->mutex);
  942. list_for_each_entry(runtime, &module->runtime_list, list) {
  943. if (runtime->id == id) {
  944. mutex_unlock(&dsp->mutex);
  945. return runtime;
  946. }
  947. }
  948. mutex_unlock(&dsp->mutex);
  949. return NULL;
  950. }
  951. EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
  952. /* returns block address in DSP address space */
  953. u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
  954. enum sst_mem_type type)
  955. {
  956. switch (type) {
  957. case SST_MEM_IRAM:
  958. return offset - dsp->addr.iram_offset +
  959. dsp->addr.dsp_iram_offset;
  960. case SST_MEM_DRAM:
  961. return offset - dsp->addr.dram_offset +
  962. dsp->addr.dsp_dram_offset;
  963. default:
  964. return 0;
  965. }
  966. }
  967. EXPORT_SYMBOL_GPL(sst_dsp_get_offset);
  968. struct sst_dsp *sst_dsp_new(struct device *dev,
  969. struct sst_dsp_device *sst_dev, struct sst_pdata *pdata)
  970. {
  971. struct sst_dsp *sst;
  972. int err;
  973. dev_dbg(dev, "initialising audio DSP id 0x%x\n", pdata->id);
  974. sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
  975. if (sst == NULL)
  976. return NULL;
  977. spin_lock_init(&sst->spinlock);
  978. mutex_init(&sst->mutex);
  979. sst->dev = dev;
  980. sst->dma_dev = pdata->dma_dev;
  981. sst->thread_context = sst_dev->thread_context;
  982. sst->sst_dev = sst_dev;
  983. sst->id = pdata->id;
  984. sst->irq = pdata->irq;
  985. sst->ops = sst_dev->ops;
  986. sst->pdata = pdata;
  987. INIT_LIST_HEAD(&sst->used_block_list);
  988. INIT_LIST_HEAD(&sst->free_block_list);
  989. INIT_LIST_HEAD(&sst->module_list);
  990. INIT_LIST_HEAD(&sst->fw_list);
  991. INIT_LIST_HEAD(&sst->scratch_block_list);
  992. /* Initialise SST Audio DSP */
  993. if (sst->ops->init) {
  994. err = sst->ops->init(sst, pdata);
  995. if (err < 0)
  996. return NULL;
  997. }
  998. /* Register the ISR */
  999. err = request_threaded_irq(sst->irq, sst->ops->irq_handler,
  1000. sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
  1001. if (err)
  1002. goto irq_err;
  1003. err = sst_dma_new(sst);
  1004. if (err)
  1005. dev_warn(dev, "sst_dma_new failed %d\n", err);
  1006. return sst;
  1007. irq_err:
  1008. if (sst->ops->free)
  1009. sst->ops->free(sst);
  1010. return NULL;
  1011. }
  1012. EXPORT_SYMBOL_GPL(sst_dsp_new);
  1013. void sst_dsp_free(struct sst_dsp *sst)
  1014. {
  1015. free_irq(sst->irq, sst);
  1016. if (sst->ops->free)
  1017. sst->ops->free(sst);
  1018. sst_dma_free(sst->dma);
  1019. }
  1020. EXPORT_SYMBOL_GPL(sst_dsp_free);
  1021. MODULE_DESCRIPTION("Intel SST Firmware Loader");
  1022. MODULE_LICENSE("GPL v2");