q6adm.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799
  1. /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/slab.h>
  13. #include <linux/wait.h>
  14. #include <linux/sched.h>
  15. #include <linux/jiffies.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/atomic.h>
  18. #include <linux/wait.h>
  19. #include <sound/apr_audio-v2.h>
  20. #include <mach/qdsp6v2/apr.h>
  21. #include <sound/q6adm-v2.h>
  22. #include <sound/q6audio-v2.h>
  23. #include <sound/q6afe-v2.h>
  24. #include "audio_acdb.h"
  25. #if defined(CONFIG_SEC_MILLETWIFI_COMMON) || defined(CONFIG_SEC_MATISSEWIFI_COMMON)
  26. #ifdef pr_debug
  27. #undef pr_debug
  28. #define pr_debug pr_err
  29. #endif
  30. #endif
  31. #define TIMEOUT_MS 1000
  32. #define RESET_COPP_ID 99
  33. #define INVALID_COPP_ID 0xFF
  34. /* Used for inband payload copy, max size is 4k */
  35. /* 2 is to account for module & param ID in payload */
  36. #define ADM_GET_PARAMETER_LENGTH (4096 - APR_HDR_SIZE - 2 * sizeof(uint32_t))
  37. #define ULL_SUPPORTED_SAMPLE_RATE 48000
  38. #define ULL_MAX_SUPPORTED_CHANNEL 2
  39. enum {
  40. ADM_RX_AUDPROC_CAL,
  41. ADM_TX_AUDPROC_CAL,
  42. ADM_RX_AUDVOL_CAL,
  43. ADM_TX_AUDVOL_CAL,
  44. ADM_CUSTOM_TOP_CAL,
  45. ADM_RTAC,
  46. ADM_MAX_CAL_TYPES
  47. };
  48. struct adm_ctl {
  49. void *apr;
  50. atomic_t copp_id[AFE_MAX_PORTS];
  51. atomic_t copp_cnt[AFE_MAX_PORTS];
  52. atomic_t copp_low_latency_id[AFE_MAX_PORTS];
  53. atomic_t copp_low_latency_cnt[AFE_MAX_PORTS];
  54. atomic_t copp_perf_mode[AFE_MAX_PORTS];
  55. atomic_t copp_stat[AFE_MAX_PORTS];
  56. wait_queue_head_t wait[AFE_MAX_PORTS];
  57. struct acdb_cal_block mem_addr_audproc[MAX_AUDPROC_TYPES];
  58. struct acdb_cal_block mem_addr_audvol[MAX_AUDPROC_TYPES];
  59. atomic_t mem_map_cal_handles[ADM_MAX_CAL_TYPES];
  60. atomic_t mem_map_cal_index;
  61. int set_custom_topology;
  62. int ec_ref_rx;
  63. };
  64. static struct adm_ctl this_adm;
  65. struct adm_multi_ch_map {
  66. bool set_channel_map;
  67. char channel_mapping[PCM_FORMAT_MAX_NUM_CHANNEL];
  68. };
  69. static struct adm_multi_ch_map multi_ch_map = { false,
  70. {0, 0, 0, 0, 0, 0, 0, 0}
  71. };
  72. static int adm_get_parameters[ADM_GET_PARAMETER_LENGTH];
  73. int srs_trumedia_open(int port_id, int srs_tech_id, void *srs_params)
  74. {
  75. struct adm_cmd_set_pp_params_inband_v5 *adm_params = NULL;
  76. int ret = 0, sz = 0;
  77. int index;
  78. pr_debug("SRS - %s", __func__);
  79. switch (srs_tech_id) {
  80. case SRS_ID_GLOBAL: {
  81. struct srs_trumedia_params_GLOBAL *glb_params = NULL;
  82. sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
  83. sizeof(struct srs_trumedia_params_GLOBAL);
  84. adm_params = kzalloc(sz, GFP_KERNEL);
  85. if (!adm_params) {
  86. pr_err("%s, adm params memory alloc failed\n",
  87. __func__);
  88. return -ENOMEM;
  89. }
  90. adm_params->payload_size =
  91. sizeof(struct srs_trumedia_params_GLOBAL) +
  92. sizeof(struct adm_param_data_v5);
  93. adm_params->params.param_id = SRS_TRUMEDIA_PARAMS;
  94. adm_params->params.param_size =
  95. sizeof(struct srs_trumedia_params_GLOBAL);
  96. glb_params = (struct srs_trumedia_params_GLOBAL *)
  97. ((u8 *)adm_params +
  98. sizeof(struct adm_cmd_set_pp_params_inband_v5));
  99. memcpy(glb_params, srs_params,
  100. sizeof(struct srs_trumedia_params_GLOBAL));
  101. pr_debug("SRS - %s: Global params - 1 = %x, 2 = %x, 3 = %x, 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x\n",
  102. __func__, (int)glb_params->v1,
  103. (int)glb_params->v2, (int)glb_params->v3,
  104. (int)glb_params->v4, (int)glb_params->v5,
  105. (int)glb_params->v6, (int)glb_params->v7,
  106. (int)glb_params->v8);
  107. break;
  108. }
  109. case SRS_ID_WOWHD: {
  110. struct srs_trumedia_params_WOWHD *whd_params = NULL;
  111. sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
  112. sizeof(struct srs_trumedia_params_WOWHD);
  113. adm_params = kzalloc(sz, GFP_KERNEL);
  114. if (!adm_params) {
  115. pr_err("%s, adm params memory alloc failed\n",
  116. __func__);
  117. return -ENOMEM;
  118. }
  119. adm_params->payload_size =
  120. sizeof(struct srs_trumedia_params_WOWHD) +
  121. sizeof(struct adm_param_data_v5);
  122. adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_WOWHD;
  123. adm_params->params.param_size =
  124. sizeof(struct srs_trumedia_params_WOWHD);
  125. whd_params = (struct srs_trumedia_params_WOWHD *)
  126. ((u8 *)adm_params +
  127. sizeof(struct adm_cmd_set_pp_params_inband_v5));
  128. memcpy(whd_params, srs_params,
  129. sizeof(struct srs_trumedia_params_WOWHD));
  130. pr_debug("SRS - %s: WOWHD params - 1 = %x, 2 = %x, 3 = %x, 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x, 9 = %x, 10 = %x, 11 = %x\n",
  131. __func__, (int)whd_params->v1,
  132. (int)whd_params->v2, (int)whd_params->v3,
  133. (int)whd_params->v4, (int)whd_params->v5,
  134. (int)whd_params->v6, (int)whd_params->v7,
  135. (int)whd_params->v8, (int)whd_params->v9,
  136. (int)whd_params->v10, (int)whd_params->v11);
  137. break;
  138. }
  139. case SRS_ID_CSHP: {
  140. struct srs_trumedia_params_CSHP *chp_params = NULL;
  141. sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
  142. sizeof(struct srs_trumedia_params_CSHP);
  143. adm_params = kzalloc(sz, GFP_KERNEL);
  144. if (!adm_params) {
  145. pr_err("%s, adm params memory alloc failed\n",
  146. __func__);
  147. return -ENOMEM;
  148. }
  149. adm_params->payload_size =
  150. sizeof(struct srs_trumedia_params_CSHP) +
  151. sizeof(struct adm_param_data_v5);
  152. adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_CSHP;
  153. adm_params->params.param_size =
  154. sizeof(struct srs_trumedia_params_CSHP);
  155. chp_params = (struct srs_trumedia_params_CSHP *)
  156. ((u8 *)adm_params +
  157. sizeof(struct adm_cmd_set_pp_params_inband_v5));
  158. memcpy(chp_params, srs_params,
  159. sizeof(struct srs_trumedia_params_CSHP));
  160. pr_debug("SRS - %s: CSHP params - 1 = %x, 2 = %x, 3 = %x, 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x, 9 = %x\n",
  161. __func__, (int)chp_params->v1,
  162. (int)chp_params->v2, (int)chp_params->v3,
  163. (int)chp_params->v4, (int)chp_params->v5,
  164. (int)chp_params->v6, (int)chp_params->v7,
  165. (int)chp_params->v8, (int)chp_params->v9);
  166. break;
  167. }
  168. case SRS_ID_HPF: {
  169. struct srs_trumedia_params_HPF *hpf_params = NULL;
  170. sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
  171. sizeof(struct srs_trumedia_params_HPF);
  172. adm_params = kzalloc(sz, GFP_KERNEL);
  173. if (!adm_params) {
  174. pr_err("%s, adm params memory alloc failed\n",
  175. __func__);
  176. return -ENOMEM;
  177. }
  178. adm_params->payload_size =
  179. sizeof(struct srs_trumedia_params_HPF) +
  180. sizeof(struct adm_param_data_v5);
  181. adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_HPF;
  182. adm_params->params.param_size =
  183. sizeof(struct srs_trumedia_params_HPF);
  184. hpf_params = (struct srs_trumedia_params_HPF *)
  185. ((u8 *)adm_params +
  186. sizeof(struct adm_cmd_set_pp_params_inband_v5));
  187. memcpy(hpf_params, srs_params,
  188. sizeof(struct srs_trumedia_params_HPF));
  189. pr_debug("SRS - %s: HPF params - 1 = %x\n", __func__,
  190. (int)hpf_params->v1);
  191. break;
  192. }
  193. case SRS_ID_PEQ: {
  194. struct srs_trumedia_params_PEQ *peq_params = NULL;
  195. sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
  196. sizeof(struct srs_trumedia_params_PEQ);
  197. adm_params = kzalloc(sz, GFP_KERNEL);
  198. if (!adm_params) {
  199. pr_err("%s, adm params memory alloc failed\n",
  200. __func__);
  201. return -ENOMEM;
  202. }
  203. adm_params->payload_size =
  204. sizeof(struct srs_trumedia_params_PEQ) +
  205. sizeof(struct adm_param_data_v5);
  206. adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_PEQ;
  207. adm_params->params.param_size =
  208. sizeof(struct srs_trumedia_params_PEQ);
  209. peq_params = (struct srs_trumedia_params_PEQ *)
  210. ((u8 *)adm_params +
  211. sizeof(struct adm_cmd_set_pp_params_inband_v5));
  212. memcpy(peq_params, srs_params,
  213. sizeof(struct srs_trumedia_params_PEQ));
  214. pr_debug("SRS - %s: PEQ params - 1 = %x 2 = %x, 3 = %x, 4 = %x\n",
  215. __func__, (int)peq_params->v1,
  216. (int)peq_params->v2, (int)peq_params->v3,
  217. (int)peq_params->v4);
  218. break;
  219. }
  220. case SRS_ID_HL: {
  221. struct srs_trumedia_params_HL *hl_params = NULL;
  222. sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
  223. sizeof(struct srs_trumedia_params_HL);
  224. adm_params = kzalloc(sz, GFP_KERNEL);
  225. if (!adm_params) {
  226. pr_err("%s, adm params memory alloc failed\n",
  227. __func__);
  228. return -ENOMEM;
  229. }
  230. adm_params->payload_size =
  231. sizeof(struct srs_trumedia_params_HL) +
  232. sizeof(struct adm_param_data_v5);
  233. adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_HL;
  234. adm_params->params.param_size =
  235. sizeof(struct srs_trumedia_params_HL);
  236. hl_params = (struct srs_trumedia_params_HL *)
  237. ((u8 *)adm_params +
  238. sizeof(struct adm_cmd_set_pp_params_inband_v5));
  239. memcpy(hl_params, srs_params,
  240. sizeof(struct srs_trumedia_params_HL));
  241. pr_debug("SRS - %s: HL params - 1 = %x, 2 = %x, 3 = %x, 4 = %x, 5 = %x, 6 = %x, 7 = %x\n",
  242. __func__, (int)hl_params->v1,
  243. (int)hl_params->v2, (int)hl_params->v3,
  244. (int)hl_params->v4, (int)hl_params->v5,
  245. (int)hl_params->v6, (int)hl_params->v7);
  246. break;
  247. }
  248. default:
  249. goto fail_cmd;
  250. }
  251. adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  252. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  253. adm_params->hdr.pkt_size = sz;
  254. adm_params->hdr.src_svc = APR_SVC_ADM;
  255. adm_params->hdr.src_domain = APR_DOMAIN_APPS;
  256. adm_params->hdr.src_port = port_id;
  257. adm_params->hdr.dest_svc = APR_SVC_ADM;
  258. adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
  259. index = afe_get_port_index(port_id);
  260. if (index < 0 || index >= AFE_MAX_PORTS) {
  261. pr_err("%s: invalid port idx %d portid %#x\n",
  262. __func__, index, port_id);
  263. ret = -EINVAL;
  264. goto fail_cmd;
  265. }
  266. adm_params->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
  267. adm_params->hdr.token = port_id;
  268. adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
  269. adm_params->payload_addr_lsw = 0;
  270. adm_params->payload_addr_msw = 0;
  271. adm_params->mem_map_handle = 0;
  272. adm_params->params.module_id = SRS_TRUMEDIA_MODULE_ID;
  273. adm_params->params.reserved = 0;
  274. pr_debug("SRS - %s: Command was sent now check Q6 - port id = %d, size %d, module id %x, param id %x.\n",
  275. __func__, adm_params->hdr.dest_port,
  276. adm_params->payload_size, adm_params->params.module_id,
  277. adm_params->params.param_id);
  278. ret = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
  279. if (ret < 0) {
  280. pr_err("SRS - %s: ADM enable for port %d failed\n", __func__,
  281. port_id);
  282. ret = -EINVAL;
  283. goto fail_cmd;
  284. }
  285. /* Wait for the callback with copp id */
  286. ret = wait_event_timeout(this_adm.wait[index], 1,
  287. msecs_to_jiffies(TIMEOUT_MS));
  288. if (!ret) {
  289. pr_err("%s: SRS set params timed out port = %d\n",
  290. __func__, port_id);
  291. ret = -EINVAL;
  292. goto fail_cmd;
  293. }
  294. fail_cmd:
  295. kfree(adm_params);
  296. return ret;
  297. }
  298. int adm_dolby_dap_send_params(int port_id, char *params, uint32_t params_length)
  299. {
  300. struct adm_cmd_set_pp_params_v5 *adm_params = NULL;
  301. int sz, rc = 0, index = afe_get_port_index(port_id);
  302. pr_debug("%s\n", __func__);
  303. if (index < 0 || index >= AFE_MAX_PORTS) {
  304. pr_err("%s: invalid port idx %d portid %#x\n",
  305. __func__, index, port_id);
  306. return -EINVAL;
  307. }
  308. sz = sizeof(struct adm_cmd_set_pp_params_v5) + params_length;
  309. adm_params = kzalloc(sz, GFP_KERNEL);
  310. if (!adm_params) {
  311. pr_err("%s, adm params memory alloc failed", __func__);
  312. return -ENOMEM;
  313. }
  314. memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)),
  315. params, params_length);
  316. adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  317. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  318. adm_params->hdr.pkt_size = sz;
  319. adm_params->hdr.src_svc = APR_SVC_ADM;
  320. adm_params->hdr.src_domain = APR_DOMAIN_APPS;
  321. adm_params->hdr.src_port = port_id;
  322. adm_params->hdr.dest_svc = APR_SVC_ADM;
  323. adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
  324. adm_params->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
  325. adm_params->hdr.token = port_id;
  326. adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
  327. adm_params->payload_addr_lsw = 0;
  328. adm_params->payload_addr_msw = 0;
  329. adm_params->mem_map_handle = 0;
  330. adm_params->payload_size = params_length;
  331. atomic_set(&this_adm.copp_stat[index], 0);
  332. rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
  333. if (rc < 0) {
  334. pr_err("%s: Set params failed port = %#x\n",
  335. __func__, port_id);
  336. rc = -EINVAL;
  337. goto dolby_dap_send_param_return;
  338. }
  339. /* Wait for the callback */
  340. rc = wait_event_timeout(this_adm.wait[index],
  341. atomic_read(&this_adm.copp_stat[index]),
  342. msecs_to_jiffies(TIMEOUT_MS));
  343. if (!rc) {
  344. pr_err("%s: Set params timed out port = %#x\n",
  345. __func__, port_id);
  346. rc = -EINVAL;
  347. goto dolby_dap_send_param_return;
  348. }
  349. rc = 0;
  350. dolby_dap_send_param_return:
  351. kfree(adm_params);
  352. return rc;
  353. }
  354. int adm_get_params(int port_id, uint32_t module_id, uint32_t param_id,
  355. uint32_t params_length, char *params)
  356. {
  357. struct adm_cmd_get_pp_params_v5 *adm_params = NULL;
  358. int sz, rc = 0, i = 0, index = afe_get_port_index(port_id);
  359. int *params_data = (int *)params;
  360. if (index < 0 || index >= AFE_MAX_PORTS) {
  361. pr_err("%s: invalid port idx %d portid %#x\n",
  362. __func__, index, port_id);
  363. return -EINVAL;
  364. }
  365. sz = sizeof(struct adm_cmd_get_pp_params_v5) + params_length;
  366. adm_params = kzalloc(sz, GFP_KERNEL);
  367. if (!adm_params) {
  368. pr_err("%s, adm params memory alloc failed", __func__);
  369. return -ENOMEM;
  370. }
  371. memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_get_pp_params_v5)),
  372. params, params_length);
  373. adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  374. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  375. adm_params->hdr.pkt_size = sz;
  376. adm_params->hdr.src_svc = APR_SVC_ADM;
  377. adm_params->hdr.src_domain = APR_DOMAIN_APPS;
  378. adm_params->hdr.src_port = port_id;
  379. adm_params->hdr.dest_svc = APR_SVC_ADM;
  380. adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
  381. adm_params->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
  382. adm_params->hdr.token = port_id;
  383. adm_params->hdr.opcode = ADM_CMD_GET_PP_PARAMS_V5;
  384. adm_params->data_payload_addr_lsw = 0;
  385. adm_params->data_payload_addr_msw = 0;
  386. adm_params->mem_map_handle = 0;
  387. adm_params->module_id = module_id;
  388. adm_params->param_id = param_id;
  389. adm_params->param_max_size = params_length;
  390. adm_params->reserved = 0;
  391. atomic_set(&this_adm.copp_stat[index], 0);
  392. rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
  393. if (rc < 0) {
  394. pr_err("%s: Failed to Get Params on port %d\n", __func__,
  395. port_id);
  396. rc = -EINVAL;
  397. goto adm_get_param_return;
  398. }
  399. /* Wait for the callback with copp id */
  400. rc = wait_event_timeout(this_adm.wait[index],
  401. atomic_read(&this_adm.copp_stat[index]),
  402. msecs_to_jiffies(TIMEOUT_MS));
  403. if (!rc) {
  404. pr_err("%s: get params timed out port = %d\n", __func__,
  405. port_id);
  406. rc = -EINVAL;
  407. goto adm_get_param_return;
  408. }
  409. if ((params_data) && (ARRAY_SIZE(adm_get_parameters) >=
  410. (1+adm_get_parameters[0])) &&
  411. (params_length/sizeof(uint32_t) >=
  412. adm_get_parameters[0])) {
  413. for (i = 0; i < adm_get_parameters[0]; i++)
  414. params_data[i] = adm_get_parameters[1+i];
  415. } else {
  416. pr_err("%s: Get param data not copied! get_param array size %zd, index %d, params array size %zd, index %d\n",
  417. __func__, ARRAY_SIZE(adm_get_parameters),
  418. (1+adm_get_parameters[0]),
  419. params_length/sizeof(int),
  420. adm_get_parameters[0]);
  421. }
  422. rc = 0;
  423. adm_get_param_return:
  424. kfree(adm_params);
  425. return rc;
  426. }
  427. static void adm_callback_debug_print(struct apr_client_data *data)
  428. {
  429. uint32_t *payload;
  430. payload = data->payload;
  431. if (data->payload_size >= 8)
  432. pr_debug("%s: code = 0x%x PL#0[%x], PL#1[%x], size = %d\n",
  433. __func__, data->opcode, payload[0], payload[1],
  434. data->payload_size);
  435. else if (data->payload_size >= 4)
  436. pr_debug("%s: code = 0x%x PL#0[%x], size = %d\n",
  437. __func__, data->opcode, payload[0],
  438. data->payload_size);
  439. else
  440. pr_debug("%s: code = 0x%x, size = %d\n",
  441. __func__, data->opcode, data->payload_size);
  442. }
  443. void adm_set_multi_ch_map(char *channel_map)
  444. {
  445. memcpy(multi_ch_map.channel_mapping, channel_map,
  446. PCM_FORMAT_MAX_NUM_CHANNEL);
  447. multi_ch_map.set_channel_map = true;
  448. }
  449. void adm_get_multi_ch_map(char *channel_map)
  450. {
  451. if (multi_ch_map.set_channel_map) {
  452. memcpy(channel_map, multi_ch_map.channel_mapping,
  453. PCM_FORMAT_MAX_NUM_CHANNEL);
  454. }
  455. }
  456. static int32_t adm_callback(struct apr_client_data *data, void *priv)
  457. {
  458. uint32_t *payload;
  459. int i, index;
  460. if (data == NULL) {
  461. pr_err("%s: data paramter is null\n", __func__);
  462. return -EINVAL;
  463. }
  464. payload = data->payload;
  465. if (data->opcode == RESET_EVENTS) {
  466. pr_debug("adm_callback: Reset event is received: %d %d apr[%p]\n",
  467. data->reset_event, data->reset_proc,
  468. this_adm.apr);
  469. if (this_adm.apr) {
  470. apr_reset(this_adm.apr);
  471. for (i = 0; i < AFE_MAX_PORTS; i++) {
  472. atomic_set(&this_adm.copp_id[i],
  473. RESET_COPP_ID);
  474. atomic_set(&this_adm.copp_low_latency_id[i],
  475. RESET_COPP_ID);
  476. atomic_set(&this_adm.copp_cnt[i], 0);
  477. atomic_set(&this_adm.copp_low_latency_cnt[i],
  478. 0);
  479. atomic_set(&this_adm.copp_perf_mode[i], 0);
  480. atomic_set(&this_adm.copp_stat[i], 0);
  481. }
  482. this_adm.apr = NULL;
  483. reset_custom_topology_flags();
  484. this_adm.set_custom_topology = 1;
  485. for (i = 0; i < ADM_MAX_CAL_TYPES; i++)
  486. atomic_set(&this_adm.mem_map_cal_handles[i],
  487. 0);
  488. rtac_clear_mapping(ADM_RTAC_CAL);
  489. }
  490. pr_debug("Resetting calibration blocks");
  491. for (i = 0; i < MAX_AUDPROC_TYPES; i++) {
  492. /* Device calibration */
  493. this_adm.mem_addr_audproc[i].cal_size = 0;
  494. this_adm.mem_addr_audproc[i].cal_kvaddr = 0;
  495. this_adm.mem_addr_audproc[i].cal_paddr = 0;
  496. /* Volume calibration */
  497. this_adm.mem_addr_audvol[i].cal_size = 0;
  498. this_adm.mem_addr_audvol[i].cal_kvaddr = 0;
  499. this_adm.mem_addr_audvol[i].cal_paddr = 0;
  500. }
  501. return 0;
  502. }
  503. adm_callback_debug_print(data);
  504. if (data->payload_size) {
  505. index = q6audio_get_port_index(data->token);
  506. if (index < 0 || index >= AFE_MAX_PORTS) {
  507. pr_err("%s: invalid port idx %d token %d\n",
  508. __func__, index, data->token);
  509. return 0;
  510. }
  511. if (data->opcode == APR_BASIC_RSP_RESULT) {
  512. pr_debug("APR_BASIC_RSP_RESULT id %x\n", payload[0]);
  513. if (payload[1] != 0) {
  514. pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
  515. __func__, payload[0], payload[1]);
  516. }
  517. switch (payload[0]) {
  518. case ADM_CMD_SET_PP_PARAMS_V5:
  519. pr_debug("%s: ADM_CMD_SET_PP_PARAMS_V5\n",
  520. __func__);
  521. if (rtac_make_adm_callback(
  522. payload, data->payload_size)) {
  523. break;
  524. }
  525. case ADM_CMD_DEVICE_CLOSE_V5:
  526. case ADM_CMD_SHARED_MEM_UNMAP_REGIONS:
  527. case ADM_CMD_MATRIX_MAP_ROUTINGS_V5:
  528. case ADM_CMD_ADD_TOPOLOGIES:
  529. pr_debug("%s: Basic callback received, wake up.\n",
  530. __func__);
  531. atomic_set(&this_adm.copp_stat[index], 1);
  532. wake_up(&this_adm.wait[index]);
  533. break;
  534. case ADM_CMD_SHARED_MEM_MAP_REGIONS:
  535. pr_debug("%s: ADM_CMD_SHARED_MEM_MAP_REGIONS\n",
  536. __func__);
  537. /* Should only come here if there is an APR */
  538. /* error or malformed APR packet. Otherwise */
  539. /* response will be returned as */
  540. if (payload[1] != 0) {
  541. pr_err("%s: ADM map error, resuming\n",
  542. __func__);
  543. atomic_set(&this_adm.copp_stat[index],
  544. 1);
  545. wake_up(&this_adm.wait[index]);
  546. }
  547. break;
  548. case ADM_CMD_GET_PP_PARAMS_V5:
  549. pr_debug("%s: ADM_CMD_GET_PP_PARAMS_V5\n",
  550. __func__);
  551. /* Should only come here if there is an APR */
  552. /* error or malformed APR packet. Otherwise */
  553. /* response will be returned as */
  554. /* ADM_CMDRSP_GET_PP_PARAMS_V5 */
  555. if (payload[1] != 0) {
  556. pr_err("%s: ADM get param error = %d, resuming\n",
  557. __func__, payload[1]);
  558. rtac_make_adm_callback(payload,
  559. data->payload_size);
  560. }
  561. break;
  562. default:
  563. pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
  564. payload[0]);
  565. #if defined(CONFIG_SEC_MILLETWIFI_COMMON) || defined(CONFIG_SEC_MATISSEWIFI_COMMON)
  566. panic("Q6 ADM Error...\n");
  567. #endif
  568. break;
  569. }
  570. return 0;
  571. }
  572. switch (data->opcode) {
  573. case ADM_CMDRSP_DEVICE_OPEN_V5: {
  574. struct adm_cmd_rsp_device_open_v5 *open =
  575. (struct adm_cmd_rsp_device_open_v5 *)data->payload;
  576. if (open->copp_id == INVALID_COPP_ID) {
  577. pr_err("%s: invalid coppid rxed %d\n",
  578. __func__, open->copp_id);
  579. atomic_set(&this_adm.copp_stat[index], 1);
  580. wake_up(&this_adm.wait[index]);
  581. break;
  582. }
  583. if (atomic_read(&this_adm.copp_perf_mode[index])) {
  584. atomic_set(&this_adm.copp_low_latency_id[index],
  585. open->copp_id);
  586. } else {
  587. atomic_set(&this_adm.copp_id[index],
  588. open->copp_id);
  589. }
  590. atomic_set(&this_adm.copp_stat[index], 1);
  591. pr_debug("%s: coppid rxed=%d\n", __func__,
  592. open->copp_id);
  593. wake_up(&this_adm.wait[index]);
  594. }
  595. break;
  596. case ADM_CMDRSP_GET_PP_PARAMS_V5:
  597. pr_debug("%s: ADM_CMDRSP_GET_PP_PARAMS_V5\n", __func__);
  598. if (payload[0] != 0)
  599. pr_err("%s: ADM_CMDRSP_GET_PP_PARAMS_V5 returned error = 0x%x\n",
  600. __func__, payload[0]);
  601. if (rtac_make_adm_callback(payload,
  602. data->payload_size))
  603. break;
  604. /* payload[3] is the param size, check if payload */
  605. /* is big enough and has a valid param size */
  606. if ((payload[0] == 0) && (data->payload_size >
  607. (4 * sizeof(*payload))) &&
  608. (data->payload_size - 4 >=
  609. payload[3]) &&
  610. (ARRAY_SIZE(adm_get_parameters)-1 >=
  611. payload[3])) {
  612. adm_get_parameters[0] = payload[3] /
  613. sizeof(uint32_t);
  614. /*
  615. * payload[3] is param_size which is
  616. * expressed in number of bytes
  617. */
  618. pr_debug("%s: GET_PP PARAM:received parameter length: 0x%x\n",
  619. __func__, adm_get_parameters[0]);
  620. /* storing param size then params */
  621. for (i = 0; i < payload[3] /
  622. sizeof(uint32_t); i++)
  623. adm_get_parameters[1+i] =
  624. payload[4+i];
  625. } else {
  626. adm_get_parameters[0] = -1;
  627. pr_err("%s: GET_PP_PARAMS failed, setting size to %d\n",
  628. __func__, adm_get_parameters[0]);
  629. }
  630. atomic_set(&this_adm.copp_stat[index], 1);
  631. wake_up(&this_adm.wait[index]);
  632. break;
  633. case ADM_CMDRSP_SHARED_MEM_MAP_REGIONS:
  634. pr_debug("%s: ADM_CMDRSP_SHARED_MEM_MAP_REGIONS\n",
  635. __func__);
  636. atomic_set(&this_adm.mem_map_cal_handles[
  637. atomic_read(&this_adm.mem_map_cal_index)],
  638. *payload);
  639. atomic_set(&this_adm.copp_stat[index], 1);
  640. wake_up(&this_adm.wait[index]);
  641. break;
  642. default:
  643. pr_err("%s: Unknown cmd:0x%x\n", __func__,
  644. data->opcode);
  645. #if defined(CONFIG_SEC_MILLETWIFI_COMMON) || defined(CONFIG_SEC_MATISSEWIFI_COMMON)
  646. panic("Q6 ADM Error...\n");
  647. #endif
  648. break;
  649. }
  650. }
  651. return 0;
  652. }
  653. void send_adm_custom_topology(int port_id)
  654. {
  655. struct acdb_cal_block cal_block;
  656. struct cmd_set_topologies adm_top;
  657. int index;
  658. int result;
  659. int size = 4096;
  660. get_adm_custom_topology(&cal_block);
  661. if (cal_block.cal_size == 0) {
  662. pr_debug("%s: no cal to send addr= 0x%x\n",
  663. __func__, cal_block.cal_paddr);
  664. goto done;
  665. }
  666. index = afe_get_port_index(port_id);
  667. if (index < 0 || index >= AFE_MAX_PORTS) {
  668. pr_err("%s: invalid port idx %d portid %#x\n",
  669. __func__, index, port_id);
  670. goto done;
  671. }
  672. if (this_adm.set_custom_topology) {
  673. /* specific index 4 for adm topology memory */
  674. atomic_set(&this_adm.mem_map_cal_index, ADM_CUSTOM_TOP_CAL);
  675. /* Only call this once */
  676. this_adm.set_custom_topology = 0;
  677. result = adm_memory_map_regions(port_id,
  678. &cal_block.cal_paddr, 0, &size, 1);
  679. if (result < 0) {
  680. pr_err("%s: mmap did not work! addr = 0x%x, size = %d\n",
  681. __func__, cal_block.cal_paddr,
  682. cal_block.cal_size);
  683. goto done;
  684. }
  685. }
  686. adm_top.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  687. APR_HDR_LEN(20), APR_PKT_VER);
  688. adm_top.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
  689. sizeof(adm_top));
  690. adm_top.hdr.src_svc = APR_SVC_ADM;
  691. adm_top.hdr.src_domain = APR_DOMAIN_APPS;
  692. adm_top.hdr.src_port = port_id;
  693. adm_top.hdr.dest_svc = APR_SVC_ADM;
  694. adm_top.hdr.dest_domain = APR_DOMAIN_ADSP;
  695. adm_top.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
  696. adm_top.hdr.token = port_id;
  697. adm_top.hdr.opcode = ADM_CMD_ADD_TOPOLOGIES;
  698. adm_top.payload_addr_lsw = cal_block.cal_paddr;
  699. adm_top.payload_addr_msw = 0;
  700. adm_top.mem_map_handle =
  701. atomic_read(&this_adm.mem_map_cal_handles[ADM_CUSTOM_TOP_CAL]);
  702. adm_top.payload_size = cal_block.cal_size;
  703. atomic_set(&this_adm.copp_stat[index], 0);
  704. pr_debug("%s: Sending ADM_CMD_ADD_TOPOLOGIES payload = 0x%x, size = %d\n",
  705. __func__, adm_top.payload_addr_lsw,
  706. adm_top.payload_size);
  707. result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_top);
  708. if (result < 0) {
  709. pr_err("%s: Set topologies failed port = 0x%x payload = 0x%x\n",
  710. __func__, port_id, cal_block.cal_paddr);
  711. goto done;
  712. }
  713. /* Wait for the callback */
  714. result = wait_event_timeout(this_adm.wait[index],
  715. atomic_read(&this_adm.copp_stat[index]),
  716. msecs_to_jiffies(TIMEOUT_MS));
  717. if (!result) {
  718. pr_err("%s: Set topologies timed out port = 0x%x, payload = 0x%x\n",
  719. __func__, port_id, cal_block.cal_paddr);
  720. goto done;
  721. }
  722. done:
  723. return;
  724. }
  725. static int send_adm_cal_block(int port_id, struct acdb_cal_block *aud_cal,
  726. int perf_mode)
  727. {
  728. s32 result = 0;
  729. struct adm_cmd_set_pp_params_v5 adm_params;
  730. int index = afe_get_port_index(port_id);
  731. if (index < 0 || index >= AFE_MAX_PORTS) {
  732. pr_err("%s: invalid port idx %d portid %#x\n",
  733. __func__, index, port_id);
  734. return 0;
  735. }
  736. pr_debug("%s: Port id %#x, index %d\n", __func__, port_id, index);
  737. if (!aud_cal || aud_cal->cal_size == 0) {
  738. pr_debug("%s: No ADM cal to send for port_id = %#x!\n",
  739. __func__, port_id);
  740. result = -EINVAL;
  741. goto done;
  742. }
  743. adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  744. APR_HDR_LEN(20), APR_PKT_VER);
  745. adm_params.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
  746. sizeof(adm_params));
  747. adm_params.hdr.src_svc = APR_SVC_ADM;
  748. adm_params.hdr.src_domain = APR_DOMAIN_APPS;
  749. adm_params.hdr.src_port = port_id;
  750. adm_params.hdr.dest_svc = APR_SVC_ADM;
  751. adm_params.hdr.dest_domain = APR_DOMAIN_ADSP;
  752. if (perf_mode == LEGACY_PCM_MODE)
  753. adm_params.hdr.dest_port =
  754. atomic_read(&this_adm.copp_id[index]);
  755. else
  756. adm_params.hdr.dest_port =
  757. atomic_read(&this_adm.copp_low_latency_id[index]);
  758. adm_params.hdr.token = port_id;
  759. adm_params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
  760. adm_params.payload_addr_lsw = aud_cal->cal_paddr;
  761. adm_params.payload_addr_msw = 0;
  762. adm_params.mem_map_handle = atomic_read(&this_adm.mem_map_cal_handles[
  763. atomic_read(&this_adm.mem_map_cal_index)]);
  764. adm_params.payload_size = aud_cal->cal_size;
  765. atomic_set(&this_adm.copp_stat[index], 0);
  766. pr_debug("%s: Sending SET_PARAMS payload = 0x%x, size = %d\n",
  767. __func__, adm_params.payload_addr_lsw,
  768. adm_params.payload_size);
  769. result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
  770. if (result < 0) {
  771. pr_err("%s: Set params failed port = %#x payload = 0x%x\n",
  772. __func__, port_id, aud_cal->cal_paddr);
  773. result = -EINVAL;
  774. goto done;
  775. }
  776. /* Wait for the callback */
  777. result = wait_event_timeout(this_adm.wait[index],
  778. atomic_read(&this_adm.copp_stat[index]),
  779. msecs_to_jiffies(TIMEOUT_MS));
  780. if (!result) {
  781. pr_err("%s: Set params timed out port = %#x, payload = 0x%x\n",
  782. __func__, port_id, aud_cal->cal_paddr);
  783. result = -EINVAL;
  784. goto done;
  785. }
  786. result = 0;
  787. done:
  788. return result;
  789. }
  790. static void send_adm_cal(int port_id, int path, int perf_mode)
  791. {
  792. int result = 0;
  793. s32 acdb_path;
  794. struct acdb_cal_block aud_cal;
  795. int size;
  796. pr_debug("%s\n", __func__);
  797. /* Maps audio_dev_ctrl path definition to ACDB definition */
  798. acdb_path = path - 1;
  799. if (acdb_path == TX_CAL)
  800. size = 4096 * 4;
  801. else
  802. size = 4096;
  803. pr_debug("%s: Sending audproc cal\n", __func__);
  804. get_audproc_cal(acdb_path, &aud_cal);
  805. /* map & cache buffers used */
  806. atomic_set(&this_adm.mem_map_cal_index, acdb_path);
  807. if (((this_adm.mem_addr_audproc[acdb_path].cal_paddr !=
  808. aud_cal.cal_paddr) && (aud_cal.cal_size > 0)) ||
  809. (aud_cal.cal_size >
  810. this_adm.mem_addr_audproc[acdb_path].cal_size)) {
  811. if (this_adm.mem_addr_audproc[acdb_path].cal_paddr != 0)
  812. adm_memory_unmap_regions(port_id);
  813. result = adm_memory_map_regions(port_id, &aud_cal.cal_paddr,
  814. 0, &size, 1);
  815. if (result < 0) {
  816. pr_err("ADM audproc mmap did not work! path = %d, addr = 0x%x, size = %d\n",
  817. acdb_path, aud_cal.cal_paddr,
  818. aud_cal.cal_size);
  819. } else {
  820. this_adm.mem_addr_audproc[acdb_path].cal_paddr =
  821. aud_cal.cal_paddr;
  822. this_adm.mem_addr_audproc[acdb_path].cal_size = size;
  823. }
  824. }
  825. if (!send_adm_cal_block(port_id, &aud_cal, perf_mode))
  826. pr_debug("%s: Audproc cal sent for port id: %#x, path %d\n",
  827. __func__, port_id, acdb_path);
  828. else
  829. pr_debug("%s: Audproc cal not sent for port id: %#x, path %d\n",
  830. __func__, port_id, acdb_path);
  831. pr_debug("%s: Sending audvol cal\n", __func__);
  832. get_audvol_cal(acdb_path, &aud_cal);
  833. /* map & cache buffers used */
  834. atomic_set(&this_adm.mem_map_cal_index,
  835. (acdb_path + MAX_AUDPROC_TYPES));
  836. if (((this_adm.mem_addr_audvol[acdb_path].cal_paddr !=
  837. aud_cal.cal_paddr) && (aud_cal.cal_size > 0)) ||
  838. (aud_cal.cal_size >
  839. this_adm.mem_addr_audvol[acdb_path].cal_size)) {
  840. if (this_adm.mem_addr_audvol[acdb_path].cal_paddr != 0)
  841. adm_memory_unmap_regions(port_id);
  842. result = adm_memory_map_regions(port_id, &aud_cal.cal_paddr,
  843. 0, &size, 1);
  844. if (result < 0) {
  845. pr_err("ADM audvol mmap did not work! path = %d, addr = 0x%x, size = %d\n",
  846. acdb_path, aud_cal.cal_paddr,
  847. aud_cal.cal_size);
  848. } else {
  849. this_adm.mem_addr_audvol[acdb_path].cal_paddr =
  850. aud_cal.cal_paddr;
  851. this_adm.mem_addr_audvol[acdb_path].cal_size = size;
  852. }
  853. }
  854. if (!send_adm_cal_block(port_id, &aud_cal, perf_mode))
  855. pr_debug("%s: Audvol cal sent for port id: %#x, path %d\n",
  856. __func__, port_id, acdb_path);
  857. else
  858. pr_debug("%s: Audvol cal not sent for port id: %#x, path %d\n",
  859. __func__, port_id, acdb_path);
  860. }
  861. int adm_map_rtac_block(struct rtac_cal_block_data *cal_block)
  862. {
  863. int result = 0;
  864. pr_debug("%s\n", __func__);
  865. if (cal_block == NULL) {
  866. pr_err("%s: cal_block is NULL!\n",
  867. __func__);
  868. result = -EINVAL;
  869. goto done;
  870. }
  871. if (cal_block->cal_data.paddr == 0) {
  872. pr_debug("%s: No address to map!\n",
  873. __func__);
  874. result = -EINVAL;
  875. goto done;
  876. }
  877. if (cal_block->map_data.map_size == 0) {
  878. pr_debug("%s: map size is 0!\n",
  879. __func__);
  880. result = -EINVAL;
  881. goto done;
  882. }
  883. /* valid port ID needed for callback use primary I2S */
  884. atomic_set(&this_adm.mem_map_cal_index, ADM_RTAC);
  885. result = adm_memory_map_regions(PRIMARY_I2S_RX,
  886. &cal_block->cal_data.paddr, 0,
  887. &cal_block->map_data.map_size, 1);
  888. if (result < 0) {
  889. pr_err("%s: RTAC mmap did not work! addr = 0x%x, size = %d\n",
  890. __func__, cal_block->cal_data.paddr,
  891. cal_block->map_data.map_size);
  892. goto done;
  893. }
  894. cal_block->map_data.map_handle = atomic_read(
  895. &this_adm.mem_map_cal_handles[ADM_RTAC]);
  896. done:
  897. return result;
  898. }
  899. int adm_unmap_rtac_block(uint32_t *mem_map_handle)
  900. {
  901. int result = 0;
  902. pr_debug("%s\n", __func__);
  903. if (mem_map_handle == NULL) {
  904. pr_debug("%s: Map handle is NULL, nothing to unmap\n",
  905. __func__);
  906. goto done;
  907. }
  908. if (*mem_map_handle == 0) {
  909. pr_debug("%s: Map handle is 0, nothing to unmap\n",
  910. __func__);
  911. goto done;
  912. }
  913. if (*mem_map_handle != atomic_read(
  914. &this_adm.mem_map_cal_handles[ADM_RTAC])) {
  915. pr_err("%s: Map handles do not match! Unmapping RTAC, RTAC map 0x%x, ADM map 0x%x\n",
  916. __func__, *mem_map_handle, atomic_read(
  917. &this_adm.mem_map_cal_handles[ADM_RTAC]));
  918. /* if mismatch use handle passed in to unmap */
  919. atomic_set(&this_adm.mem_map_cal_handles[ADM_RTAC],
  920. *mem_map_handle);
  921. }
  922. /* valid port ID needed for callback use primary I2S */
  923. atomic_set(&this_adm.mem_map_cal_index, ADM_RTAC);
  924. result = adm_memory_unmap_regions(PRIMARY_I2S_RX);
  925. if (result < 0) {
  926. pr_debug("%s: adm_memory_unmap_regions failed, error %d\n",
  927. __func__, result);
  928. } else {
  929. atomic_set(&this_adm.mem_map_cal_handles[ADM_RTAC], 0);
  930. *mem_map_handle = 0;
  931. }
  932. done:
  933. return result;
  934. }
  935. int adm_unmap_cal_blocks(void)
  936. {
  937. int i;
  938. int result = 0;
  939. int result2 = 0;
  940. for (i = 0; i < ADM_MAX_CAL_TYPES; i++) {
  941. if (atomic_read(&this_adm.mem_map_cal_handles[i]) != 0) {
  942. if (i <= ADM_TX_AUDPROC_CAL) {
  943. this_adm.mem_addr_audproc[i].cal_paddr = 0;
  944. this_adm.mem_addr_audproc[i].cal_size = 0;
  945. } else if (i <= ADM_TX_AUDVOL_CAL) {
  946. this_adm.mem_addr_audvol
  947. [(i - ADM_RX_AUDVOL_CAL)].cal_paddr
  948. = 0;
  949. this_adm.mem_addr_audvol
  950. [(i - ADM_RX_AUDVOL_CAL)].cal_size
  951. = 0;
  952. } else if (i == ADM_CUSTOM_TOP_CAL) {
  953. this_adm.set_custom_topology = 1;
  954. } else {
  955. continue;
  956. }
  957. /* valid port ID needed for callback use primary I2S */
  958. atomic_set(&this_adm.mem_map_cal_index, i);
  959. result2 = adm_memory_unmap_regions(PRIMARY_I2S_RX);
  960. if (result2 < 0) {
  961. pr_err("%s: adm_memory_unmap_regions failed, err %d\n",
  962. __func__, result2);
  963. result = result2;
  964. } else {
  965. atomic_set(&this_adm.mem_map_cal_handles[i],
  966. 0);
  967. }
  968. }
  969. }
  970. return result;
  971. }
  972. int adm_connect_afe_port(int mode, int session_id, int port_id)
  973. {
  974. struct adm_cmd_connect_afe_port_v5 cmd;
  975. int ret = 0;
  976. int index;
  977. pr_debug("%s: port %d session id:%d mode:%d\n", __func__,
  978. port_id, session_id, mode);
  979. port_id = afe_convert_virtual_to_portid(port_id);
  980. if (afe_validate_port(port_id) < 0) {
  981. pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
  982. return -ENODEV;
  983. }
  984. if (this_adm.apr == NULL) {
  985. this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
  986. 0xFFFFFFFF, &this_adm);
  987. if (this_adm.apr == NULL) {
  988. pr_err("%s: Unable to register ADM\n", __func__);
  989. ret = -ENODEV;
  990. return ret;
  991. }
  992. rtac_set_adm_handle(this_adm.apr);
  993. }
  994. index = afe_get_port_index(port_id);
  995. pr_debug("%s: Port ID %#x, index %d\n", __func__, port_id, index);
  996. cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  997. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  998. cmd.hdr.pkt_size = sizeof(cmd);
  999. cmd.hdr.src_svc = APR_SVC_ADM;
  1000. cmd.hdr.src_domain = APR_DOMAIN_APPS;
  1001. cmd.hdr.src_port = port_id;
  1002. cmd.hdr.dest_svc = APR_SVC_ADM;
  1003. cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
  1004. cmd.hdr.dest_port = port_id;
  1005. cmd.hdr.token = port_id;
  1006. cmd.hdr.opcode = ADM_CMD_CONNECT_AFE_PORT_V5;
  1007. cmd.mode = mode;
  1008. cmd.session_id = session_id;
  1009. cmd.afe_port_id = port_id;
  1010. atomic_set(&this_adm.copp_stat[index], 0);
  1011. ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd);
  1012. if (ret < 0) {
  1013. pr_err("%s:ADM enable for port %#x failed\n",
  1014. __func__, port_id);
  1015. ret = -EINVAL;
  1016. goto fail_cmd;
  1017. }
  1018. /* Wait for the callback with copp id */
  1019. ret = wait_event_timeout(this_adm.wait[index],
  1020. atomic_read(&this_adm.copp_stat[index]),
  1021. msecs_to_jiffies(TIMEOUT_MS));
  1022. if (!ret) {
  1023. pr_err("%s ADM connect AFE failed for port %#x\n", __func__,
  1024. port_id);
  1025. ret = -EINVAL;
  1026. goto fail_cmd;
  1027. }
  1028. atomic_inc(&this_adm.copp_cnt[index]);
  1029. return 0;
  1030. fail_cmd:
  1031. return ret;
  1032. }
  1033. int adm_open(int port_id, int path, int rate, int channel_mode, int topology,
  1034. int perf_mode, uint16_t bits_per_sample)
  1035. {
  1036. struct adm_cmd_device_open_v5 open;
  1037. int ret = 0;
  1038. int index;
  1039. int tmp_port = q6audio_get_port_id(port_id);
  1040. pr_debug("%s: port %#x path:%d rate:%d mode:%d perf_mode:%d\n",
  1041. __func__, port_id, path, rate, channel_mode, perf_mode);
  1042. port_id = q6audio_convert_virtual_to_portid(port_id);
  1043. if (q6audio_validate_port(port_id) < 0) {
  1044. pr_err("%s port idi[%#x] is invalid\n", __func__, port_id);
  1045. return -ENODEV;
  1046. }
  1047. index = q6audio_get_port_index(port_id);
  1048. pr_debug("%s: Port ID %#x, index %d\n", __func__, port_id, index);
  1049. if (this_adm.apr == NULL) {
  1050. this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
  1051. 0xFFFFFFFF, &this_adm);
  1052. if (this_adm.apr == NULL) {
  1053. pr_err("%s: Unable to register ADM\n", __func__);
  1054. ret = -ENODEV;
  1055. return ret;
  1056. }
  1057. rtac_set_adm_handle(this_adm.apr);
  1058. }
  1059. if (perf_mode == LEGACY_PCM_MODE) {
  1060. atomic_set(&this_adm.copp_perf_mode[index], 0);
  1061. send_adm_custom_topology(port_id);
  1062. } else {
  1063. atomic_set(&this_adm.copp_perf_mode[index], 1);
  1064. }
  1065. /* Create a COPP if port id are not enabled */
  1066. if ((perf_mode == LEGACY_PCM_MODE &&
  1067. (atomic_read(&this_adm.copp_cnt[index]) == 0)) ||
  1068. (perf_mode != LEGACY_PCM_MODE &&
  1069. (atomic_read(&this_adm.copp_low_latency_cnt[index]) == 0))) {
  1070. pr_debug("%s:opening ADM: perf_mode: %d\n", __func__,
  1071. perf_mode);
  1072. open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  1073. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  1074. open.hdr.pkt_size = sizeof(open);
  1075. open.hdr.src_svc = APR_SVC_ADM;
  1076. open.hdr.src_domain = APR_DOMAIN_APPS;
  1077. open.hdr.src_port = tmp_port;
  1078. open.hdr.dest_svc = APR_SVC_ADM;
  1079. open.hdr.dest_domain = APR_DOMAIN_ADSP;
  1080. open.hdr.dest_port = tmp_port;
  1081. open.hdr.token = port_id;
  1082. open.hdr.opcode = ADM_CMD_DEVICE_OPEN_V5;
  1083. if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE)
  1084. open.flags = ADM_ULTRA_LOW_LATENCY_DEVICE_SESSION;
  1085. else if (perf_mode == LOW_LATENCY_PCM_MODE)
  1086. open.flags = ADM_LOW_LATENCY_DEVICE_SESSION;
  1087. else
  1088. open.flags = ADM_LEGACY_DEVICE_SESSION;
  1089. open.mode_of_operation = path;
  1090. open.endpoint_id_1 = tmp_port;
  1091. if (this_adm.ec_ref_rx == -1) {
  1092. open.endpoint_id_2 = 0xFFFF;
  1093. } else if (this_adm.ec_ref_rx && (path != 1)) {
  1094. open.endpoint_id_2 = this_adm.ec_ref_rx;
  1095. this_adm.ec_ref_rx = -1;
  1096. }
  1097. open.topology_id = topology;
  1098. if ((open.topology_id == VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
  1099. (open.topology_id == VPM_TX_DM_FLUENCE_COPP_TOPOLOGY) ||
  1100. (open.topology_id == VPM_TX_DM_RFECNS_COPP_TOPOLOGY) ||
  1101. (open.topology_id == VPM_TX_SM_LVVE_COPP_TOPOLOGY) ||
  1102. /* LVVE for Barge-in */
  1103. (open.topology_id == 0x1000BFF0) ||
  1104. (open.topology_id == 0x1000BFF1))
  1105. rate = 16000;
  1106. if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE) {
  1107. open.topology_id = NULL_COPP_TOPOLOGY;
  1108. rate = ULL_SUPPORTED_SAMPLE_RATE;
  1109. if(channel_mode > ULL_MAX_SUPPORTED_CHANNEL)
  1110. channel_mode = ULL_MAX_SUPPORTED_CHANNEL;
  1111. } else if (perf_mode == LOW_LATENCY_PCM_MODE) {
  1112. if ((open.topology_id == DOLBY_ADM_COPP_TOPOLOGY_ID) ||
  1113. (open.topology_id == SRS_TRUMEDIA_TOPOLOGY_ID))
  1114. open.topology_id = DEFAULT_COPP_TOPOLOGY;
  1115. }
  1116. open.dev_num_channel = channel_mode & 0x00FF;
  1117. open.bit_width = bits_per_sample;
  1118. WARN_ON(perf_mode == ULTRA_LOW_LATENCY_PCM_MODE &&
  1119. (rate != 48000));
  1120. open.sample_rate = rate;
  1121. memset(open.dev_channel_mapping, 0, 8);
  1122. if (channel_mode == 1) {
  1123. open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
  1124. } else if (channel_mode == 2) {
  1125. open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
  1126. open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
  1127. } else if (channel_mode == 3) {
  1128. open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
  1129. open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
  1130. open.dev_channel_mapping[2] = PCM_CHANNEL_FC;
  1131. } else if (channel_mode == 4) {
  1132. open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
  1133. open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
  1134. open.dev_channel_mapping[2] = PCM_CHANNEL_RB;
  1135. open.dev_channel_mapping[3] = PCM_CHANNEL_LB;
  1136. } else if (channel_mode == 5) {
  1137. open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
  1138. open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
  1139. open.dev_channel_mapping[2] = PCM_CHANNEL_FC;
  1140. open.dev_channel_mapping[3] = PCM_CHANNEL_LB;
  1141. open.dev_channel_mapping[4] = PCM_CHANNEL_RB;
  1142. } else if (channel_mode == 6) {
  1143. open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
  1144. open.dev_channel_mapping[1] = PCM_CHANNEL_FL;
  1145. open.dev_channel_mapping[2] = PCM_CHANNEL_LB;
  1146. open.dev_channel_mapping[3] = PCM_CHANNEL_FR;
  1147. open.dev_channel_mapping[4] = PCM_CHANNEL_RB;
  1148. open.dev_channel_mapping[5] = PCM_CHANNEL_LFE;
  1149. } else if (channel_mode == 8) {
  1150. open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
  1151. open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
  1152. open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
  1153. open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
  1154. open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
  1155. open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
  1156. open.dev_channel_mapping[6] = PCM_CHANNEL_FLC;
  1157. open.dev_channel_mapping[7] = PCM_CHANNEL_FRC;
  1158. } else {
  1159. pr_err("%s invalid num_chan %d\n", __func__,
  1160. channel_mode);
  1161. return -EINVAL;
  1162. }
  1163. if ((open.dev_num_channel > 2) &&
  1164. multi_ch_map.set_channel_map)
  1165. memcpy(open.dev_channel_mapping,
  1166. multi_ch_map.channel_mapping,
  1167. PCM_FORMAT_MAX_NUM_CHANNEL);
  1168. pr_debug("%s: port_id=%#x rate=%d topology_id=0x%X\n",
  1169. __func__, open.endpoint_id_1, open.sample_rate,
  1170. open.topology_id);
  1171. atomic_set(&this_adm.copp_stat[index], 0);
  1172. ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
  1173. if (ret < 0) {
  1174. pr_err("%s:ADM enable for port %#x for[%d] failed\n",
  1175. __func__, tmp_port, port_id);
  1176. ret = -EINVAL;
  1177. goto fail_cmd;
  1178. }
  1179. /* Wait for the callback with copp id */
  1180. ret = wait_event_timeout(this_adm.wait[index],
  1181. atomic_read(&this_adm.copp_stat[index]),
  1182. msecs_to_jiffies(TIMEOUT_MS));
  1183. if (!ret) {
  1184. pr_err("%s ADM open failed for port %#x for [%d]\n",
  1185. __func__, tmp_port, port_id);
  1186. ret = -EINVAL;
  1187. goto fail_cmd;
  1188. }
  1189. }
  1190. if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
  1191. perf_mode == LOW_LATENCY_PCM_MODE) {
  1192. atomic_inc(&this_adm.copp_low_latency_cnt[index]);
  1193. pr_debug("%s: index: %d coppid: %d", __func__, index,
  1194. atomic_read(&this_adm.copp_low_latency_id[index]));
  1195. } else {
  1196. atomic_inc(&this_adm.copp_cnt[index]);
  1197. pr_debug("%s: index: %d coppid: %d", __func__, index,
  1198. atomic_read(&this_adm.copp_id[index]));
  1199. }
  1200. return 0;
  1201. fail_cmd:
  1202. return ret;
  1203. }
  1204. int adm_multi_ch_copp_open(int port_id, int path, int rate, int channel_mode,
  1205. int topology, int perf_mode, uint16_t bits_per_sample)
  1206. {
  1207. int ret = 0;
  1208. ret = adm_open(port_id, path, rate, channel_mode,
  1209. topology, perf_mode, bits_per_sample);
  1210. return ret;
  1211. }
  1212. int adm_matrix_map(int session_id, int path, int num_copps,
  1213. unsigned int *port_id, int copp_id, int perf_mode)
  1214. {
  1215. struct adm_cmd_matrix_map_routings_v5 *route;
  1216. struct adm_session_map_node_v5 *node;
  1217. uint16_t *copps_list;
  1218. int cmd_size = 0;
  1219. int ret = 0, i = 0;
  1220. void *payload = NULL;
  1221. void *matrix_map = NULL;
  1222. /* Assumes port_ids have already been validated during adm_open */
  1223. int index = q6audio_get_port_index(copp_id);
  1224. if (index < 0 || index >= AFE_MAX_PORTS) {
  1225. pr_err("%s: invalid port idx %d token %d\n",
  1226. __func__, index, copp_id);
  1227. return 0;
  1228. }
  1229. cmd_size = (sizeof(struct adm_cmd_matrix_map_routings_v5) +
  1230. sizeof(struct adm_session_map_node_v5) +
  1231. (sizeof(uint32_t) * num_copps));
  1232. matrix_map = kzalloc(cmd_size, GFP_KERNEL);
  1233. if (matrix_map == NULL) {
  1234. pr_err("%s: Mem alloc failed\n", __func__);
  1235. ret = -EINVAL;
  1236. return ret;
  1237. }
  1238. route = (struct adm_cmd_matrix_map_routings_v5 *)matrix_map;
  1239. pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%#x coppid[%d]\n",
  1240. __func__, session_id, path, num_copps, port_id[0], copp_id);
  1241. route->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  1242. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  1243. route->hdr.pkt_size = cmd_size;
  1244. route->hdr.src_svc = 0;
  1245. route->hdr.src_domain = APR_DOMAIN_APPS;
  1246. route->hdr.src_port = copp_id;
  1247. route->hdr.dest_svc = APR_SVC_ADM;
  1248. route->hdr.dest_domain = APR_DOMAIN_ADSP;
  1249. if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
  1250. perf_mode == LOW_LATENCY_PCM_MODE) {
  1251. route->hdr.dest_port =
  1252. atomic_read(&this_adm.copp_low_latency_id[index]);
  1253. } else {
  1254. route->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
  1255. }
  1256. route->hdr.token = copp_id;
  1257. route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
  1258. route->num_sessions = 1;
  1259. switch (path) {
  1260. case 0x1:
  1261. route->matrix_id = ADM_MATRIX_ID_AUDIO_RX;
  1262. break;
  1263. case 0x2:
  1264. case 0x3:
  1265. route->matrix_id = ADM_MATRIX_ID_AUDIO_TX;
  1266. break;
  1267. default:
  1268. pr_err("%s: Wrong path set[%d]\n", __func__, path);
  1269. break;
  1270. }
  1271. payload = ((u8 *)matrix_map +
  1272. sizeof(struct adm_cmd_matrix_map_routings_v5));
  1273. node = (struct adm_session_map_node_v5 *)payload;
  1274. node->session_id = session_id;
  1275. node->num_copps = num_copps;
  1276. payload = (u8 *)node + sizeof(struct adm_session_map_node_v5);
  1277. copps_list = (uint16_t *)payload;
  1278. for (i = 0; i < num_copps; i++) {
  1279. int tmp;
  1280. port_id[i] = q6audio_convert_virtual_to_portid(port_id[i]);
  1281. tmp = q6audio_get_port_index(port_id[i]);
  1282. if (tmp >= 0 && tmp < AFE_MAX_PORTS) {
  1283. if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
  1284. perf_mode == LOW_LATENCY_PCM_MODE)
  1285. copps_list[i] =
  1286. atomic_read(&this_adm.copp_low_latency_id[tmp]);
  1287. else
  1288. copps_list[i] =
  1289. atomic_read(&this_adm.copp_id[tmp]);
  1290. }
  1291. else
  1292. continue;
  1293. pr_debug("%s: port_id[%#x]: %d, index: %d act coppid[0x%x]\n",
  1294. __func__, i, port_id[i], tmp, copps_list[i]);
  1295. }
  1296. atomic_set(&this_adm.copp_stat[index], 0);
  1297. ret = apr_send_pkt(this_adm.apr, (uint32_t *)matrix_map);
  1298. if (ret < 0) {
  1299. pr_err("%s: ADM routing for port %#x failed\n",
  1300. __func__, port_id[0]);
  1301. ret = -EINVAL;
  1302. goto fail_cmd;
  1303. }
  1304. ret = wait_event_timeout(this_adm.wait[index],
  1305. atomic_read(&this_adm.copp_stat[index]),
  1306. msecs_to_jiffies(TIMEOUT_MS));
  1307. if (!ret) {
  1308. pr_err("%s: ADM cmd Route failed for port %#x\n",
  1309. __func__, port_id[0]);
  1310. ret = -EINVAL;
  1311. goto fail_cmd;
  1312. }
  1313. if (perf_mode != ULTRA_LOW_LATENCY_PCM_MODE) {
  1314. for (i = 0; i < num_copps; i++)
  1315. send_adm_cal(port_id[i], path, perf_mode);
  1316. for (i = 0; i < num_copps; i++) {
  1317. int tmp, copp_id;
  1318. tmp = afe_get_port_index(port_id[i]);
  1319. if (tmp >= 0 && tmp < AFE_MAX_PORTS) {
  1320. if (perf_mode == LEGACY_PCM_MODE)
  1321. copp_id = atomic_read(
  1322. &this_adm.copp_id[tmp]);
  1323. else
  1324. copp_id = atomic_read(
  1325. &this_adm.copp_low_latency_id[tmp]);
  1326. rtac_add_adm_device(port_id[i],
  1327. copp_id, path, session_id);
  1328. pr_debug("%s, copp_id: %d\n",
  1329. __func__, copp_id);
  1330. } else
  1331. pr_debug("%s: Invalid port index %d",
  1332. __func__, tmp);
  1333. }
  1334. }
  1335. fail_cmd:
  1336. kfree(matrix_map);
  1337. return ret;
  1338. }
  1339. int adm_memory_map_regions(int port_id,
  1340. uint32_t *buf_add, uint32_t mempool_id,
  1341. uint32_t *bufsz, uint32_t bufcnt)
  1342. {
  1343. struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
  1344. struct avs_shared_map_region_payload *mregions = NULL;
  1345. void *mmap_region_cmd = NULL;
  1346. void *payload = NULL;
  1347. int ret = 0;
  1348. int i = 0;
  1349. int cmd_size = 0;
  1350. int index = 0;
  1351. pr_debug("%s\n", __func__);
  1352. if (this_adm.apr == NULL) {
  1353. this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
  1354. 0xFFFFFFFF, &this_adm);
  1355. if (this_adm.apr == NULL) {
  1356. pr_err("%s: Unable to register ADM\n", __func__);
  1357. ret = -ENODEV;
  1358. return ret;
  1359. }
  1360. rtac_set_adm_handle(this_adm.apr);
  1361. }
  1362. port_id = q6audio_convert_virtual_to_portid(port_id);
  1363. if (q6audio_validate_port(port_id) < 0) {
  1364. pr_err("%s port id[%#x] is invalid\n", __func__, port_id);
  1365. return -ENODEV;
  1366. }
  1367. index = q6audio_get_port_index(port_id);
  1368. cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
  1369. + sizeof(struct avs_shared_map_region_payload)
  1370. * bufcnt;
  1371. mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
  1372. if (!mmap_region_cmd) {
  1373. pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
  1374. return -ENOMEM;
  1375. }
  1376. mmap_regions = (struct avs_cmd_shared_mem_map_regions *)mmap_region_cmd;
  1377. mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  1378. APR_HDR_LEN(APR_HDR_SIZE),
  1379. APR_PKT_VER);
  1380. mmap_regions->hdr.pkt_size = cmd_size;
  1381. mmap_regions->hdr.src_port = 0;
  1382. mmap_regions->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
  1383. mmap_regions->hdr.token = port_id;
  1384. mmap_regions->hdr.opcode = ADM_CMD_SHARED_MEM_MAP_REGIONS;
  1385. mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL & 0x00ff;
  1386. mmap_regions->num_regions = bufcnt & 0x00ff;
  1387. mmap_regions->property_flag = 0x00;
  1388. pr_debug("%s: map_regions->num_regions = %d\n", __func__,
  1389. mmap_regions->num_regions);
  1390. payload = ((u8 *) mmap_region_cmd +
  1391. sizeof(struct avs_cmd_shared_mem_map_regions));
  1392. mregions = (struct avs_shared_map_region_payload *)payload;
  1393. for (i = 0; i < bufcnt; i++) {
  1394. mregions->shm_addr_lsw = buf_add[i];
  1395. mregions->shm_addr_msw = 0x00;
  1396. mregions->mem_size_bytes = bufsz[i];
  1397. ++mregions;
  1398. }
  1399. atomic_set(&this_adm.copp_stat[index], 0);
  1400. ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
  1401. if (ret < 0) {
  1402. pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
  1403. mmap_regions->hdr.opcode, ret);
  1404. ret = -EINVAL;
  1405. goto fail_cmd;
  1406. }
  1407. ret = wait_event_timeout(this_adm.wait[index],
  1408. atomic_read(&this_adm.copp_stat[index]), 5 * HZ);
  1409. if (!ret) {
  1410. pr_err("%s: timeout. waited for memory_map\n", __func__);
  1411. ret = -EINVAL;
  1412. goto fail_cmd;
  1413. }
  1414. fail_cmd:
  1415. kfree(mmap_region_cmd);
  1416. return ret;
  1417. }
  1418. int adm_memory_unmap_regions(int32_t port_id)
  1419. {
  1420. struct avs_cmd_shared_mem_unmap_regions unmap_regions;
  1421. int ret = 0;
  1422. int index = 0;
  1423. pr_debug("%s\n", __func__);
  1424. if (this_adm.apr == NULL) {
  1425. pr_err("%s APR handle NULL\n", __func__);
  1426. return -EINVAL;
  1427. }
  1428. port_id = q6audio_convert_virtual_to_portid(port_id);
  1429. if (q6audio_validate_port(port_id) < 0) {
  1430. pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
  1431. return -ENODEV;
  1432. }
  1433. index = q6audio_get_port_index(port_id);
  1434. unmap_regions.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  1435. APR_HDR_LEN(APR_HDR_SIZE),
  1436. APR_PKT_VER);
  1437. unmap_regions.hdr.pkt_size = sizeof(unmap_regions);
  1438. unmap_regions.hdr.src_port = 0;
  1439. unmap_regions.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
  1440. unmap_regions.hdr.token = port_id;
  1441. unmap_regions.hdr.opcode = ADM_CMD_SHARED_MEM_UNMAP_REGIONS;
  1442. unmap_regions.mem_map_handle = atomic_read(&this_adm.
  1443. mem_map_cal_handles[atomic_read(&this_adm.mem_map_cal_index)]);
  1444. atomic_set(&this_adm.copp_stat[index], 0);
  1445. ret = apr_send_pkt(this_adm.apr, (uint32_t *) &unmap_regions);
  1446. if (ret < 0) {
  1447. pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
  1448. unmap_regions.hdr.opcode, ret);
  1449. ret = -EINVAL;
  1450. goto fail_cmd;
  1451. }
  1452. ret = wait_event_timeout(this_adm.wait[index],
  1453. atomic_read(&this_adm.copp_stat[index]),
  1454. 5 * HZ);
  1455. if (!ret) {
  1456. pr_err("%s: timeout. waited for memory_unmap index %d\n",
  1457. __func__, index);
  1458. ret = -EINVAL;
  1459. goto fail_cmd;
  1460. } else {
  1461. pr_debug("%s: Unmap handle 0x%x succeeded\n", __func__,
  1462. unmap_regions.mem_map_handle);
  1463. }
  1464. fail_cmd:
  1465. return ret;
  1466. }
  1467. #ifdef CONFIG_RTAC
  1468. int adm_get_copp_id(int port_index)
  1469. {
  1470. int copp_id;
  1471. pr_debug("%s\n", __func__);
  1472. if (port_index < 0) {
  1473. pr_err("%s: invalid port_id = %d\n", __func__, port_index);
  1474. return -EINVAL;
  1475. }
  1476. copp_id = atomic_read(&this_adm.copp_id[port_index]);
  1477. if (copp_id == RESET_COPP_ID)
  1478. copp_id = atomic_read(
  1479. &this_adm.copp_low_latency_id[port_index]);
  1480. return copp_id;
  1481. }
  1482. int adm_get_lowlatency_copp_id(int port_index)
  1483. {
  1484. pr_debug("%s\n", __func__);
  1485. if (port_index < 0) {
  1486. pr_err("%s: invalid port_id = %d\n", __func__, port_index);
  1487. return -EINVAL;
  1488. }
  1489. return atomic_read(&this_adm.copp_low_latency_id[port_index]);
  1490. }
  1491. #else
  1492. int adm_get_copp_id(int port_index)
  1493. {
  1494. return -EINVAL;
  1495. }
  1496. int adm_get_lowlatency_copp_id(int port_index)
  1497. {
  1498. return -EINVAL;
  1499. }
  1500. #endif /* #ifdef CONFIG_RTAC */
  1501. void adm_ec_ref_rx_id(int port_id)
  1502. {
  1503. this_adm.ec_ref_rx = port_id;
  1504. pr_debug("%s ec_ref_rx:%d", __func__, this_adm.ec_ref_rx);
  1505. }
  1506. int adm_close(int port_id, int perf_mode)
  1507. {
  1508. struct apr_hdr close;
  1509. int ret = 0;
  1510. int index = 0;
  1511. int copp_id = RESET_COPP_ID;
  1512. port_id = q6audio_convert_virtual_to_portid(port_id);
  1513. index = q6audio_get_port_index(port_id);
  1514. if (q6audio_validate_port(port_id) < 0)
  1515. return -EINVAL;
  1516. pr_debug("%s port_id=%#x index %d perf_mode: %d\n", __func__, port_id,
  1517. index, perf_mode);
  1518. if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
  1519. perf_mode == LOW_LATENCY_PCM_MODE) {
  1520. if (!(atomic_read(&this_adm.copp_low_latency_cnt[index]))) {
  1521. pr_err("%s: copp count for port[%#x]is 0\n", __func__,
  1522. port_id);
  1523. goto fail_cmd;
  1524. }
  1525. atomic_dec(&this_adm.copp_low_latency_cnt[index]);
  1526. } else {
  1527. if (!(atomic_read(&this_adm.copp_cnt[index]))) {
  1528. pr_err("%s: copp count for port[%#x]is 0\n", __func__,
  1529. port_id);
  1530. goto fail_cmd;
  1531. }
  1532. atomic_dec(&this_adm.copp_cnt[index]);
  1533. }
  1534. if ((perf_mode == LEGACY_PCM_MODE &&
  1535. !(atomic_read(&this_adm.copp_cnt[index]))) ||
  1536. ((perf_mode != LEGACY_PCM_MODE) &&
  1537. !(atomic_read(&this_adm.copp_low_latency_cnt[index])))) {
  1538. pr_debug("%s:Closing ADM: perf_mode: %d\n", __func__,
  1539. perf_mode);
  1540. close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  1541. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  1542. close.pkt_size = sizeof(close);
  1543. close.src_svc = APR_SVC_ADM;
  1544. close.src_domain = APR_DOMAIN_APPS;
  1545. close.src_port = port_id;
  1546. close.dest_svc = APR_SVC_ADM;
  1547. close.dest_domain = APR_DOMAIN_ADSP;
  1548. if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
  1549. perf_mode == LOW_LATENCY_PCM_MODE)
  1550. close.dest_port =
  1551. atomic_read(&this_adm.copp_low_latency_id[index]);
  1552. else
  1553. close.dest_port = atomic_read(&this_adm.copp_id[index]);
  1554. close.token = port_id;
  1555. close.opcode = ADM_CMD_DEVICE_CLOSE_V5;
  1556. atomic_set(&this_adm.copp_stat[index], 0);
  1557. if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
  1558. perf_mode == LOW_LATENCY_PCM_MODE) {
  1559. copp_id = atomic_read(
  1560. &this_adm.copp_low_latency_id[index]);
  1561. pr_debug("%s:coppid %d portid=%#x index=%d coppcnt=%d\n",
  1562. __func__,
  1563. copp_id,
  1564. port_id, index,
  1565. atomic_read(
  1566. &this_adm.copp_low_latency_cnt[index]));
  1567. atomic_set(&this_adm.copp_low_latency_id[index],
  1568. RESET_COPP_ID);
  1569. } else {
  1570. copp_id = atomic_read(&this_adm.copp_id[index]);
  1571. pr_debug("%s:coppid %d portid=%#x index=%d coppcnt=%d\n",
  1572. __func__,
  1573. copp_id,
  1574. port_id, index,
  1575. atomic_read(&this_adm.copp_cnt[index]));
  1576. atomic_set(&this_adm.copp_id[index],
  1577. RESET_COPP_ID);
  1578. }
  1579. ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
  1580. if (ret < 0) {
  1581. pr_err("%s ADM close failed\n", __func__);
  1582. ret = -EINVAL;
  1583. goto fail_cmd;
  1584. }
  1585. ret = wait_event_timeout(this_adm.wait[index],
  1586. atomic_read(&this_adm.copp_stat[index]),
  1587. msecs_to_jiffies(TIMEOUT_MS));
  1588. if (!ret) {
  1589. pr_err("%s: ADM cmd Route failed for port %#x\n",
  1590. __func__, port_id);
  1591. ret = -EINVAL;
  1592. goto fail_cmd;
  1593. }
  1594. }
  1595. if (perf_mode != ULTRA_LOW_LATENCY_PCM_MODE) {
  1596. pr_debug("%s: remove adm device from rtac\n", __func__);
  1597. rtac_remove_adm_device(port_id, copp_id);
  1598. }
  1599. fail_cmd:
  1600. return ret;
  1601. }
  1602. static int __init adm_init(void)
  1603. {
  1604. int i = 0;
  1605. this_adm.apr = NULL;
  1606. this_adm.set_custom_topology = 1;
  1607. this_adm.ec_ref_rx = -1;
  1608. for (i = 0; i < AFE_MAX_PORTS; i++) {
  1609. atomic_set(&this_adm.copp_id[i], RESET_COPP_ID);
  1610. atomic_set(&this_adm.copp_low_latency_id[i], RESET_COPP_ID);
  1611. atomic_set(&this_adm.copp_cnt[i], 0);
  1612. atomic_set(&this_adm.copp_low_latency_cnt[i], 0);
  1613. atomic_set(&this_adm.copp_stat[i], 0);
  1614. atomic_set(&this_adm.copp_perf_mode[i], 0);
  1615. init_waitqueue_head(&this_adm.wait[i]);
  1616. }
  1617. return 0;
  1618. }
  1619. device_initcall(adm_init);