q6adm.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800
  1. /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/slab.h>
  13. #include <linux/wait.h>
  14. #include <linux/sched.h>
  15. #include <linux/jiffies.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/atomic.h>
  18. #include <linux/wait.h>
  19. #include <sound/apr_audio-v2.h>
  20. #include <mach/qdsp6v2/apr.h>
  21. #include <sound/q6adm-v2.h>
  22. #include <sound/q6audio-v2.h>
  23. #include <sound/q6afe-v2.h>
  24. #include "audio_acdb.h"
  25. #if defined(CONFIG_SEC_MILLETWIFI_COMMON) || defined(CONFIG_SEC_MATISSEWIFI_COMMON)
  26. #ifdef pr_debug
  27. #undef pr_debug
  28. #define pr_debug pr_err
  29. #endif
  30. #endif
  31. #define TIMEOUT_MS 1000
  32. #define RESET_COPP_ID 99
  33. #define INVALID_COPP_ID 0xFF
  34. /* Used for inband payload copy, max size is 4k */
  35. /* 2 is to account for module & param ID in payload */
  36. #define ADM_GET_PARAMETER_LENGTH (4096 - APR_HDR_SIZE - 2 * sizeof(uint32_t))
  37. #define ULL_SUPPORTED_SAMPLE_RATE 48000
  38. #define ULL_MAX_SUPPORTED_CHANNEL 2
  39. enum {
  40. ADM_RX_AUDPROC_CAL,
  41. ADM_TX_AUDPROC_CAL,
  42. ADM_RX_AUDVOL_CAL,
  43. ADM_TX_AUDVOL_CAL,
  44. ADM_CUSTOM_TOP_CAL,
  45. ADM_RTAC,
  46. ADM_MAX_CAL_TYPES
  47. };
  48. struct adm_ctl {
  49. void *apr;
  50. atomic_t copp_id[AFE_MAX_PORTS];
  51. atomic_t copp_cnt[AFE_MAX_PORTS];
  52. atomic_t copp_low_latency_id[AFE_MAX_PORTS];
  53. atomic_t copp_low_latency_cnt[AFE_MAX_PORTS];
  54. atomic_t copp_perf_mode[AFE_MAX_PORTS];
  55. atomic_t copp_stat[AFE_MAX_PORTS];
  56. wait_queue_head_t wait[AFE_MAX_PORTS];
  57. struct acdb_cal_block mem_addr_audproc[MAX_AUDPROC_TYPES];
  58. struct acdb_cal_block mem_addr_audvol[MAX_AUDPROC_TYPES];
  59. atomic_t mem_map_cal_handles[ADM_MAX_CAL_TYPES];
  60. atomic_t mem_map_cal_index;
  61. int set_custom_topology;
  62. int ec_ref_rx;
  63. };
  64. static struct adm_ctl this_adm;
  65. struct adm_multi_ch_map {
  66. bool set_channel_map;
  67. char channel_mapping[PCM_FORMAT_MAX_NUM_CHANNEL];
  68. };
  69. static struct adm_multi_ch_map multi_ch_map = { false,
  70. {0, 0, 0, 0, 0, 0, 0, 0}
  71. };
  72. static int adm_get_parameters[ADM_GET_PARAMETER_LENGTH];
  73. int srs_trumedia_open(int port_id, int srs_tech_id, void *srs_params)
  74. {
  75. struct adm_cmd_set_pp_params_inband_v5 *adm_params = NULL;
  76. int ret = 0, sz = 0;
  77. int index;
  78. pr_debug("SRS - %s", __func__);
  79. switch (srs_tech_id) {
  80. case SRS_ID_GLOBAL: {
  81. struct srs_trumedia_params_GLOBAL *glb_params = NULL;
  82. sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
  83. sizeof(struct srs_trumedia_params_GLOBAL);
  84. adm_params = kzalloc(sz, GFP_KERNEL);
  85. if (!adm_params) {
  86. pr_err("%s, adm params memory alloc failed\n",
  87. __func__);
  88. return -ENOMEM;
  89. }
  90. adm_params->payload_size =
  91. sizeof(struct srs_trumedia_params_GLOBAL) +
  92. sizeof(struct adm_param_data_v5);
  93. adm_params->params.param_id = SRS_TRUMEDIA_PARAMS;
  94. adm_params->params.param_size =
  95. sizeof(struct srs_trumedia_params_GLOBAL);
  96. glb_params = (struct srs_trumedia_params_GLOBAL *)
  97. ((u8 *)adm_params +
  98. sizeof(struct adm_cmd_set_pp_params_inband_v5));
  99. memcpy(glb_params, srs_params,
  100. sizeof(struct srs_trumedia_params_GLOBAL));
  101. pr_debug("SRS - %s: Global params - 1 = %x, 2 = %x, 3 = %x, 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x\n",
  102. __func__, (int)glb_params->v1,
  103. (int)glb_params->v2, (int)glb_params->v3,
  104. (int)glb_params->v4, (int)glb_params->v5,
  105. (int)glb_params->v6, (int)glb_params->v7,
  106. (int)glb_params->v8);
  107. break;
  108. }
  109. case SRS_ID_WOWHD: {
  110. struct srs_trumedia_params_WOWHD *whd_params = NULL;
  111. sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
  112. sizeof(struct srs_trumedia_params_WOWHD);
  113. adm_params = kzalloc(sz, GFP_KERNEL);
  114. if (!adm_params) {
  115. pr_err("%s, adm params memory alloc failed\n",
  116. __func__);
  117. return -ENOMEM;
  118. }
  119. adm_params->payload_size =
  120. sizeof(struct srs_trumedia_params_WOWHD) +
  121. sizeof(struct adm_param_data_v5);
  122. adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_WOWHD;
  123. adm_params->params.param_size =
  124. sizeof(struct srs_trumedia_params_WOWHD);
  125. whd_params = (struct srs_trumedia_params_WOWHD *)
  126. ((u8 *)adm_params +
  127. sizeof(struct adm_cmd_set_pp_params_inband_v5));
  128. memcpy(whd_params, srs_params,
  129. sizeof(struct srs_trumedia_params_WOWHD));
  130. pr_debug("SRS - %s: WOWHD params - 1 = %x, 2 = %x, 3 = %x, 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x, 9 = %x, 10 = %x, 11 = %x\n",
  131. __func__, (int)whd_params->v1,
  132. (int)whd_params->v2, (int)whd_params->v3,
  133. (int)whd_params->v4, (int)whd_params->v5,
  134. (int)whd_params->v6, (int)whd_params->v7,
  135. (int)whd_params->v8, (int)whd_params->v9,
  136. (int)whd_params->v10, (int)whd_params->v11);
  137. break;
  138. }
  139. case SRS_ID_CSHP: {
  140. struct srs_trumedia_params_CSHP *chp_params = NULL;
  141. sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
  142. sizeof(struct srs_trumedia_params_CSHP);
  143. adm_params = kzalloc(sz, GFP_KERNEL);
  144. if (!adm_params) {
  145. pr_err("%s, adm params memory alloc failed\n",
  146. __func__);
  147. return -ENOMEM;
  148. }
  149. adm_params->payload_size =
  150. sizeof(struct srs_trumedia_params_CSHP) +
  151. sizeof(struct adm_param_data_v5);
  152. adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_CSHP;
  153. adm_params->params.param_size =
  154. sizeof(struct srs_trumedia_params_CSHP);
  155. chp_params = (struct srs_trumedia_params_CSHP *)
  156. ((u8 *)adm_params +
  157. sizeof(struct adm_cmd_set_pp_params_inband_v5));
  158. memcpy(chp_params, srs_params,
  159. sizeof(struct srs_trumedia_params_CSHP));
  160. pr_debug("SRS - %s: CSHP params - 1 = %x, 2 = %x, 3 = %x, 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x, 9 = %x\n",
  161. __func__, (int)chp_params->v1,
  162. (int)chp_params->v2, (int)chp_params->v3,
  163. (int)chp_params->v4, (int)chp_params->v5,
  164. (int)chp_params->v6, (int)chp_params->v7,
  165. (int)chp_params->v8, (int)chp_params->v9);
  166. break;
  167. }
  168. case SRS_ID_HPF: {
  169. struct srs_trumedia_params_HPF *hpf_params = NULL;
  170. sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
  171. sizeof(struct srs_trumedia_params_HPF);
  172. adm_params = kzalloc(sz, GFP_KERNEL);
  173. if (!adm_params) {
  174. pr_err("%s, adm params memory alloc failed\n",
  175. __func__);
  176. return -ENOMEM;
  177. }
  178. adm_params->payload_size =
  179. sizeof(struct srs_trumedia_params_HPF) +
  180. sizeof(struct adm_param_data_v5);
  181. adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_HPF;
  182. adm_params->params.param_size =
  183. sizeof(struct srs_trumedia_params_HPF);
  184. hpf_params = (struct srs_trumedia_params_HPF *)
  185. ((u8 *)adm_params +
  186. sizeof(struct adm_cmd_set_pp_params_inband_v5));
  187. memcpy(hpf_params, srs_params,
  188. sizeof(struct srs_trumedia_params_HPF));
  189. pr_debug("SRS - %s: HPF params - 1 = %x\n", __func__,
  190. (int)hpf_params->v1);
  191. break;
  192. }
  193. case SRS_ID_PEQ: {
  194. struct srs_trumedia_params_PEQ *peq_params = NULL;
  195. sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
  196. sizeof(struct srs_trumedia_params_PEQ);
  197. adm_params = kzalloc(sz, GFP_KERNEL);
  198. if (!adm_params) {
  199. pr_err("%s, adm params memory alloc failed\n",
  200. __func__);
  201. return -ENOMEM;
  202. }
  203. adm_params->payload_size =
  204. sizeof(struct srs_trumedia_params_PEQ) +
  205. sizeof(struct adm_param_data_v5);
  206. adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_PEQ;
  207. adm_params->params.param_size =
  208. sizeof(struct srs_trumedia_params_PEQ);
  209. peq_params = (struct srs_trumedia_params_PEQ *)
  210. ((u8 *)adm_params +
  211. sizeof(struct adm_cmd_set_pp_params_inband_v5));
  212. memcpy(peq_params, srs_params,
  213. sizeof(struct srs_trumedia_params_PEQ));
  214. pr_debug("SRS - %s: PEQ params - 1 = %x 2 = %x, 3 = %x, 4 = %x\n",
  215. __func__, (int)peq_params->v1,
  216. (int)peq_params->v2, (int)peq_params->v3,
  217. (int)peq_params->v4);
  218. break;
  219. }
  220. case SRS_ID_HL: {
  221. struct srs_trumedia_params_HL *hl_params = NULL;
  222. sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
  223. sizeof(struct srs_trumedia_params_HL);
  224. adm_params = kzalloc(sz, GFP_KERNEL);
  225. if (!adm_params) {
  226. pr_err("%s, adm params memory alloc failed\n",
  227. __func__);
  228. return -ENOMEM;
  229. }
  230. adm_params->payload_size =
  231. sizeof(struct srs_trumedia_params_HL) +
  232. sizeof(struct adm_param_data_v5);
  233. adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_HL;
  234. adm_params->params.param_size =
  235. sizeof(struct srs_trumedia_params_HL);
  236. hl_params = (struct srs_trumedia_params_HL *)
  237. ((u8 *)adm_params +
  238. sizeof(struct adm_cmd_set_pp_params_inband_v5));
  239. memcpy(hl_params, srs_params,
  240. sizeof(struct srs_trumedia_params_HL));
  241. pr_debug("SRS - %s: HL params - 1 = %x, 2 = %x, 3 = %x, 4 = %x, 5 = %x, 6 = %x, 7 = %x\n",
  242. __func__, (int)hl_params->v1,
  243. (int)hl_params->v2, (int)hl_params->v3,
  244. (int)hl_params->v4, (int)hl_params->v5,
  245. (int)hl_params->v6, (int)hl_params->v7);
  246. break;
  247. }
  248. default:
  249. goto fail_cmd;
  250. }
  251. adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  252. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  253. adm_params->hdr.pkt_size = sz;
  254. adm_params->hdr.src_svc = APR_SVC_ADM;
  255. adm_params->hdr.src_domain = APR_DOMAIN_APPS;
  256. adm_params->hdr.src_port = port_id;
  257. adm_params->hdr.dest_svc = APR_SVC_ADM;
  258. adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
  259. index = afe_get_port_index(port_id);
  260. if (index < 0 || index >= AFE_MAX_PORTS) {
  261. pr_err("%s: invalid port idx %d portid %#x\n",
  262. __func__, index, port_id);
  263. ret = -EINVAL;
  264. goto fail_cmd;
  265. }
  266. adm_params->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
  267. adm_params->hdr.token = port_id;
  268. adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
  269. adm_params->payload_addr_lsw = 0;
  270. adm_params->payload_addr_msw = 0;
  271. adm_params->mem_map_handle = 0;
  272. adm_params->params.module_id = SRS_TRUMEDIA_MODULE_ID;
  273. adm_params->params.reserved = 0;
  274. pr_debug("SRS - %s: Command was sent now check Q6 - port id = %d, size %d, module id %x, param id %x.\n",
  275. __func__, adm_params->hdr.dest_port,
  276. adm_params->payload_size, adm_params->params.module_id,
  277. adm_params->params.param_id);
  278. ret = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
  279. if (ret < 0) {
  280. pr_err("SRS - %s: ADM enable for port %d failed\n", __func__,
  281. port_id);
  282. ret = -EINVAL;
  283. goto fail_cmd;
  284. }
  285. /* Wait for the callback with copp id */
  286. ret = wait_event_timeout(this_adm.wait[index], 1,
  287. msecs_to_jiffies(TIMEOUT_MS));
  288. if (!ret) {
  289. pr_err("%s: SRS set params timed out port = %d\n",
  290. __func__, port_id);
  291. ret = -EINVAL;
  292. goto fail_cmd;
  293. }
  294. fail_cmd:
  295. kfree(adm_params);
  296. return ret;
  297. }
  298. int adm_dolby_dap_send_params(int port_id, char *params, uint32_t params_length)
  299. {
  300. struct adm_cmd_set_pp_params_v5 *adm_params = NULL;
  301. int sz, rc = 0, index = afe_get_port_index(port_id);
  302. pr_debug("%s\n", __func__);
  303. if (index < 0 || index >= AFE_MAX_PORTS) {
  304. pr_err("%s: invalid port idx %d portid %#x\n",
  305. __func__, index, port_id);
  306. return -EINVAL;
  307. }
  308. sz = sizeof(struct adm_cmd_set_pp_params_v5) + params_length;
  309. adm_params = kzalloc(sz, GFP_KERNEL);
  310. if (!adm_params) {
  311. pr_err("%s, adm params memory alloc failed", __func__);
  312. return -ENOMEM;
  313. }
  314. memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)),
  315. params, params_length);
  316. adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  317. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  318. adm_params->hdr.pkt_size = sz;
  319. adm_params->hdr.src_svc = APR_SVC_ADM;
  320. adm_params->hdr.src_domain = APR_DOMAIN_APPS;
  321. adm_params->hdr.src_port = port_id;
  322. adm_params->hdr.dest_svc = APR_SVC_ADM;
  323. adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
  324. adm_params->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
  325. adm_params->hdr.token = port_id;
  326. adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
  327. adm_params->payload_addr_lsw = 0;
  328. adm_params->payload_addr_msw = 0;
  329. adm_params->mem_map_handle = 0;
  330. adm_params->payload_size = params_length;
  331. atomic_set(&this_adm.copp_stat[index], 0);
  332. rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
  333. if (rc < 0) {
  334. pr_err("%s: Set params failed port = %#x\n",
  335. __func__, port_id);
  336. rc = -EINVAL;
  337. goto dolby_dap_send_param_return;
  338. }
  339. /* Wait for the callback */
  340. rc = wait_event_timeout(this_adm.wait[index],
  341. atomic_read(&this_adm.copp_stat[index]),
  342. msecs_to_jiffies(TIMEOUT_MS));
  343. if (!rc) {
  344. pr_err("%s: Set params timed out port = %#x\n",
  345. __func__, port_id);
  346. rc = -EINVAL;
  347. goto dolby_dap_send_param_return;
  348. }
  349. rc = 0;
  350. dolby_dap_send_param_return:
  351. kfree(adm_params);
  352. return rc;
  353. }
  354. int adm_get_params(int port_id, uint32_t module_id, uint32_t param_id,
  355. uint32_t params_length, char *params)
  356. {
  357. struct adm_cmd_get_pp_params_v5 *adm_params = NULL;
  358. int sz, rc = 0, i = 0, index = afe_get_port_index(port_id);
  359. int *params_data = (int *)params;
  360. if (index < 0 || index >= AFE_MAX_PORTS) {
  361. pr_err("%s: invalid port idx %d portid %#x\n",
  362. __func__, index, port_id);
  363. return -EINVAL;
  364. }
  365. sz = sizeof(struct adm_cmd_get_pp_params_v5) + params_length;
  366. adm_params = kzalloc(sz, GFP_KERNEL);
  367. if (!adm_params) {
  368. pr_err("%s, adm params memory alloc failed", __func__);
  369. return -ENOMEM;
  370. }
  371. memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_get_pp_params_v5)),
  372. params, params_length);
  373. adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  374. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  375. adm_params->hdr.pkt_size = sz;
  376. adm_params->hdr.src_svc = APR_SVC_ADM;
  377. adm_params->hdr.src_domain = APR_DOMAIN_APPS;
  378. adm_params->hdr.src_port = port_id;
  379. adm_params->hdr.dest_svc = APR_SVC_ADM;
  380. adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
  381. adm_params->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
  382. adm_params->hdr.token = port_id;
  383. adm_params->hdr.opcode = ADM_CMD_GET_PP_PARAMS_V5;
  384. adm_params->data_payload_addr_lsw = 0;
  385. adm_params->data_payload_addr_msw = 0;
  386. adm_params->mem_map_handle = 0;
  387. adm_params->module_id = module_id;
  388. adm_params->param_id = param_id;
  389. adm_params->param_max_size = params_length;
  390. adm_params->reserved = 0;
  391. atomic_set(&this_adm.copp_stat[index], 0);
  392. rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
  393. if (rc < 0) {
  394. pr_err("%s: Failed to Get Params on port %d\n", __func__,
  395. port_id);
  396. rc = -EINVAL;
  397. goto adm_get_param_return;
  398. }
  399. /* Wait for the callback with copp id */
  400. rc = wait_event_timeout(this_adm.wait[index],
  401. atomic_read(&this_adm.copp_stat[index]),
  402. msecs_to_jiffies(TIMEOUT_MS));
  403. if (!rc) {
  404. pr_err("%s: get params timed out port = %d\n", __func__,
  405. port_id);
  406. rc = -EINVAL;
  407. goto adm_get_param_return;
  408. }
  409. if ((params_data) && (ARRAY_SIZE(adm_get_parameters) >=
  410. (1+adm_get_parameters[0])) &&
  411. (params_length/sizeof(uint32_t) >=
  412. adm_get_parameters[0])) {
  413. for (i = 0; i < adm_get_parameters[0]; i++)
  414. params_data[i] = adm_get_parameters[1+i];
  415. } else {
  416. pr_err("%s: Get param data not copied! get_param array size %zd, index %d, params array size %zd, index %d\n",
  417. __func__, ARRAY_SIZE(adm_get_parameters),
  418. (1+adm_get_parameters[0]),
  419. params_length/sizeof(int),
  420. adm_get_parameters[0]);
  421. }
  422. rc = 0;
  423. adm_get_param_return:
  424. kfree(adm_params);
  425. return rc;
  426. }
  427. static void adm_callback_debug_print(struct apr_client_data *data)
  428. {
  429. uint32_t *payload;
  430. payload = data->payload;
  431. if (data->payload_size >= 8)
  432. pr_debug("%s: code = 0x%x PL#0[%x], PL#1[%x], size = %d\n",
  433. __func__, data->opcode, payload[0], payload[1],
  434. data->payload_size);
  435. else if (data->payload_size >= 4)
  436. pr_debug("%s: code = 0x%x PL#0[%x], size = %d\n",
  437. __func__, data->opcode, payload[0],
  438. data->payload_size);
  439. else
  440. pr_debug("%s: code = 0x%x, size = %d\n",
  441. __func__, data->opcode, data->payload_size);
  442. }
  443. void adm_set_multi_ch_map(char *channel_map)
  444. {
  445. memcpy(multi_ch_map.channel_mapping, channel_map,
  446. PCM_FORMAT_MAX_NUM_CHANNEL);
  447. multi_ch_map.set_channel_map = true;
  448. }
  449. void adm_get_multi_ch_map(char *channel_map)
  450. {
  451. if (multi_ch_map.set_channel_map) {
  452. memcpy(channel_map, multi_ch_map.channel_mapping,
  453. PCM_FORMAT_MAX_NUM_CHANNEL);
  454. }
  455. }
  456. static int32_t adm_callback(struct apr_client_data *data, void *priv)
  457. {
  458. uint32_t *payload;
  459. int i, index;
  460. if (data == NULL) {
  461. pr_err("%s: data paramter is null\n", __func__);
  462. return -EINVAL;
  463. }
  464. payload = data->payload;
  465. if (data->opcode == RESET_EVENTS) {
  466. pr_debug("adm_callback: Reset event is received: %d %d apr[%p]\n",
  467. data->reset_event, data->reset_proc,
  468. this_adm.apr);
  469. if (this_adm.apr) {
  470. apr_reset(this_adm.apr);
  471. for (i = 0; i < AFE_MAX_PORTS; i++) {
  472. atomic_set(&this_adm.copp_id[i],
  473. RESET_COPP_ID);
  474. atomic_set(&this_adm.copp_low_latency_id[i],
  475. RESET_COPP_ID);
  476. atomic_set(&this_adm.copp_cnt[i], 0);
  477. atomic_set(&this_adm.copp_low_latency_cnt[i],
  478. 0);
  479. atomic_set(&this_adm.copp_perf_mode[i], 0);
  480. atomic_set(&this_adm.copp_stat[i], 0);
  481. }
  482. this_adm.apr = NULL;
  483. reset_custom_topology_flags();
  484. this_adm.set_custom_topology = 1;
  485. for (i = 0; i < ADM_MAX_CAL_TYPES; i++)
  486. atomic_set(&this_adm.mem_map_cal_handles[i],
  487. 0);
  488. rtac_clear_mapping(ADM_RTAC_CAL);
  489. }
  490. pr_debug("Resetting calibration blocks");
  491. for (i = 0; i < MAX_AUDPROC_TYPES; i++) {
  492. /* Device calibration */
  493. this_adm.mem_addr_audproc[i].cal_size = 0;
  494. this_adm.mem_addr_audproc[i].cal_kvaddr = 0;
  495. this_adm.mem_addr_audproc[i].cal_paddr = 0;
  496. /* Volume calibration */
  497. this_adm.mem_addr_audvol[i].cal_size = 0;
  498. this_adm.mem_addr_audvol[i].cal_kvaddr = 0;
  499. this_adm.mem_addr_audvol[i].cal_paddr = 0;
  500. }
  501. return 0;
  502. }
  503. adm_callback_debug_print(data);
  504. if (data->payload_size) {
  505. index = q6audio_get_port_index(data->token);
  506. if (index < 0 || index >= AFE_MAX_PORTS) {
  507. pr_err("%s: invalid port idx %d token %d\n",
  508. __func__, index, data->token);
  509. return 0;
  510. }
  511. if (data->opcode == APR_BASIC_RSP_RESULT) {
  512. pr_debug("APR_BASIC_RSP_RESULT id %x\n", payload[0]);
  513. if (payload[1] != 0) {
  514. pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
  515. __func__, payload[0], payload[1]);
  516. }
  517. switch (payload[0]) {
  518. case ADM_CMD_SET_PP_PARAMS_V5:
  519. pr_debug("%s: ADM_CMD_SET_PP_PARAMS_V5\n",
  520. __func__);
  521. if (rtac_make_adm_callback(
  522. payload, data->payload_size)) {
  523. break;
  524. }
  525. case ADM_CMD_DEVICE_CLOSE_V5:
  526. case ADM_CMD_SHARED_MEM_UNMAP_REGIONS:
  527. case ADM_CMD_MATRIX_MAP_ROUTINGS_V5:
  528. case ADM_CMD_ADD_TOPOLOGIES:
  529. pr_debug("%s: Basic callback received, wake up.\n",
  530. __func__);
  531. atomic_set(&this_adm.copp_stat[index], 1);
  532. wake_up(&this_adm.wait[index]);
  533. break;
  534. case ADM_CMD_SHARED_MEM_MAP_REGIONS:
  535. pr_debug("%s: ADM_CMD_SHARED_MEM_MAP_REGIONS\n",
  536. __func__);
  537. /* Should only come here if there is an APR */
  538. /* error or malformed APR packet. Otherwise */
  539. /* response will be returned as */
  540. if (payload[1] != 0) {
  541. pr_err("%s: ADM map error, resuming\n",
  542. __func__);
  543. atomic_set(&this_adm.copp_stat[index],
  544. 1);
  545. wake_up(&this_adm.wait[index]);
  546. }
  547. break;
  548. case ADM_CMD_GET_PP_PARAMS_V5:
  549. pr_debug("%s: ADM_CMD_GET_PP_PARAMS_V5\n",
  550. __func__);
  551. /* Should only come here if there is an APR */
  552. /* error or malformed APR packet. Otherwise */
  553. /* response will be returned as */
  554. /* ADM_CMDRSP_GET_PP_PARAMS_V5 */
  555. if (payload[1] != 0) {
  556. pr_err("%s: ADM get param error = %d, resuming\n",
  557. __func__, payload[1]);
  558. rtac_make_adm_callback(payload,
  559. data->payload_size);
  560. }
  561. break;
  562. default:
  563. pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
  564. payload[0]);
  565. #if defined(CONFIG_SEC_MILLETWIFI_COMMON) || defined(CONFIG_SEC_MATISSEWIFI_COMMON)
  566. panic("Q6 ADM Error...\n");
  567. #endif
  568. break;
  569. }
  570. return 0;
  571. }
  572. switch (data->opcode) {
  573. case ADM_CMDRSP_DEVICE_OPEN_V5: {
  574. struct adm_cmd_rsp_device_open_v5 *open =
  575. (struct adm_cmd_rsp_device_open_v5 *)data->payload;
  576. if (open->copp_id == INVALID_COPP_ID) {
  577. pr_err("%s: invalid coppid rxed %d\n",
  578. __func__, open->copp_id);
  579. atomic_set(&this_adm.copp_stat[index], 1);
  580. wake_up(&this_adm.wait[index]);
  581. break;
  582. }
  583. if (atomic_read(&this_adm.copp_perf_mode[index])) {
  584. atomic_set(&this_adm.copp_low_latency_id[index],
  585. open->copp_id);
  586. } else {
  587. atomic_set(&this_adm.copp_id[index],
  588. open->copp_id);
  589. }
  590. atomic_set(&this_adm.copp_stat[index], 1);
  591. pr_debug("%s: coppid rxed=%d\n", __func__,
  592. open->copp_id);
  593. wake_up(&this_adm.wait[index]);
  594. }
  595. break;
  596. case ADM_CMDRSP_GET_PP_PARAMS_V5:
  597. pr_debug("%s: ADM_CMDRSP_GET_PP_PARAMS_V5\n", __func__);
  598. if (payload[0] != 0)
  599. pr_err("%s: ADM_CMDRSP_GET_PP_PARAMS_V5 returned error = 0x%x\n",
  600. __func__, payload[0]);
  601. if (rtac_make_adm_callback(payload,
  602. data->payload_size))
  603. break;
  604. /* payload[3] is the param size, check if payload */
  605. /* is big enough and has a valid param size */
  606. if ((payload[0] == 0) && (data->payload_size >
  607. (4 * sizeof(*payload))) &&
  608. (data->payload_size -
  609. (4 * sizeof(*payload)) >=
  610. payload[3]) &&
  611. (ARRAY_SIZE(adm_get_parameters)-1 >=
  612. payload[3])) {
  613. adm_get_parameters[0] = payload[3] /
  614. sizeof(uint32_t);
  615. /*
  616. * payload[3] is param_size which is
  617. * expressed in number of bytes
  618. */
  619. pr_debug("%s: GET_PP PARAM:received parameter length: 0x%x\n",
  620. __func__, adm_get_parameters[0]);
  621. /* storing param size then params */
  622. for (i = 0; i < payload[3] /
  623. sizeof(uint32_t); i++)
  624. adm_get_parameters[1+i] =
  625. payload[4+i];
  626. } else {
  627. adm_get_parameters[0] = -1;
  628. pr_err("%s: GET_PP_PARAMS failed, setting size to %d\n",
  629. __func__, adm_get_parameters[0]);
  630. }
  631. atomic_set(&this_adm.copp_stat[index], 1);
  632. wake_up(&this_adm.wait[index]);
  633. break;
  634. case ADM_CMDRSP_SHARED_MEM_MAP_REGIONS:
  635. pr_debug("%s: ADM_CMDRSP_SHARED_MEM_MAP_REGIONS\n",
  636. __func__);
  637. atomic_set(&this_adm.mem_map_cal_handles[
  638. atomic_read(&this_adm.mem_map_cal_index)],
  639. *payload);
  640. atomic_set(&this_adm.copp_stat[index], 1);
  641. wake_up(&this_adm.wait[index]);
  642. break;
  643. default:
  644. pr_err("%s: Unknown cmd:0x%x\n", __func__,
  645. data->opcode);
  646. #if defined(CONFIG_SEC_MILLETWIFI_COMMON) || defined(CONFIG_SEC_MATISSEWIFI_COMMON)
  647. panic("Q6 ADM Error...\n");
  648. #endif
  649. break;
  650. }
  651. }
  652. return 0;
  653. }
  654. void send_adm_custom_topology(int port_id)
  655. {
  656. struct acdb_cal_block cal_block;
  657. struct cmd_set_topologies adm_top;
  658. int index;
  659. int result;
  660. int size = 4096;
  661. get_adm_custom_topology(&cal_block);
  662. if (cal_block.cal_size == 0) {
  663. pr_debug("%s: no cal to send addr= 0x%x\n",
  664. __func__, cal_block.cal_paddr);
  665. goto done;
  666. }
  667. index = afe_get_port_index(port_id);
  668. if (index < 0 || index >= AFE_MAX_PORTS) {
  669. pr_err("%s: invalid port idx %d portid %#x\n",
  670. __func__, index, port_id);
  671. goto done;
  672. }
  673. if (this_adm.set_custom_topology) {
  674. /* specific index 4 for adm topology memory */
  675. atomic_set(&this_adm.mem_map_cal_index, ADM_CUSTOM_TOP_CAL);
  676. /* Only call this once */
  677. this_adm.set_custom_topology = 0;
  678. result = adm_memory_map_regions(port_id,
  679. &cal_block.cal_paddr, 0, &size, 1);
  680. if (result < 0) {
  681. pr_err("%s: mmap did not work! addr = 0x%x, size = %d\n",
  682. __func__, cal_block.cal_paddr,
  683. cal_block.cal_size);
  684. goto done;
  685. }
  686. }
  687. adm_top.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  688. APR_HDR_LEN(20), APR_PKT_VER);
  689. adm_top.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
  690. sizeof(adm_top));
  691. adm_top.hdr.src_svc = APR_SVC_ADM;
  692. adm_top.hdr.src_domain = APR_DOMAIN_APPS;
  693. adm_top.hdr.src_port = port_id;
  694. adm_top.hdr.dest_svc = APR_SVC_ADM;
  695. adm_top.hdr.dest_domain = APR_DOMAIN_ADSP;
  696. adm_top.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
  697. adm_top.hdr.token = port_id;
  698. adm_top.hdr.opcode = ADM_CMD_ADD_TOPOLOGIES;
  699. adm_top.payload_addr_lsw = cal_block.cal_paddr;
  700. adm_top.payload_addr_msw = 0;
  701. adm_top.mem_map_handle =
  702. atomic_read(&this_adm.mem_map_cal_handles[ADM_CUSTOM_TOP_CAL]);
  703. adm_top.payload_size = cal_block.cal_size;
  704. atomic_set(&this_adm.copp_stat[index], 0);
  705. pr_debug("%s: Sending ADM_CMD_ADD_TOPOLOGIES payload = 0x%x, size = %d\n",
  706. __func__, adm_top.payload_addr_lsw,
  707. adm_top.payload_size);
  708. result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_top);
  709. if (result < 0) {
  710. pr_err("%s: Set topologies failed port = 0x%x payload = 0x%x\n",
  711. __func__, port_id, cal_block.cal_paddr);
  712. goto done;
  713. }
  714. /* Wait for the callback */
  715. result = wait_event_timeout(this_adm.wait[index],
  716. atomic_read(&this_adm.copp_stat[index]),
  717. msecs_to_jiffies(TIMEOUT_MS));
  718. if (!result) {
  719. pr_err("%s: Set topologies timed out port = 0x%x, payload = 0x%x\n",
  720. __func__, port_id, cal_block.cal_paddr);
  721. goto done;
  722. }
  723. done:
  724. return;
  725. }
  726. static int send_adm_cal_block(int port_id, struct acdb_cal_block *aud_cal,
  727. int perf_mode)
  728. {
  729. s32 result = 0;
  730. struct adm_cmd_set_pp_params_v5 adm_params;
  731. int index = afe_get_port_index(port_id);
  732. if (index < 0 || index >= AFE_MAX_PORTS) {
  733. pr_err("%s: invalid port idx %d portid %#x\n",
  734. __func__, index, port_id);
  735. return 0;
  736. }
  737. pr_debug("%s: Port id %#x, index %d\n", __func__, port_id, index);
  738. if (!aud_cal || aud_cal->cal_size == 0) {
  739. pr_debug("%s: No ADM cal to send for port_id = %#x!\n",
  740. __func__, port_id);
  741. result = -EINVAL;
  742. goto done;
  743. }
  744. adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  745. APR_HDR_LEN(20), APR_PKT_VER);
  746. adm_params.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
  747. sizeof(adm_params));
  748. adm_params.hdr.src_svc = APR_SVC_ADM;
  749. adm_params.hdr.src_domain = APR_DOMAIN_APPS;
  750. adm_params.hdr.src_port = port_id;
  751. adm_params.hdr.dest_svc = APR_SVC_ADM;
  752. adm_params.hdr.dest_domain = APR_DOMAIN_ADSP;
  753. if (perf_mode == LEGACY_PCM_MODE)
  754. adm_params.hdr.dest_port =
  755. atomic_read(&this_adm.copp_id[index]);
  756. else
  757. adm_params.hdr.dest_port =
  758. atomic_read(&this_adm.copp_low_latency_id[index]);
  759. adm_params.hdr.token = port_id;
  760. adm_params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
  761. adm_params.payload_addr_lsw = aud_cal->cal_paddr;
  762. adm_params.payload_addr_msw = 0;
  763. adm_params.mem_map_handle = atomic_read(&this_adm.mem_map_cal_handles[
  764. atomic_read(&this_adm.mem_map_cal_index)]);
  765. adm_params.payload_size = aud_cal->cal_size;
  766. atomic_set(&this_adm.copp_stat[index], 0);
  767. pr_debug("%s: Sending SET_PARAMS payload = 0x%x, size = %d\n",
  768. __func__, adm_params.payload_addr_lsw,
  769. adm_params.payload_size);
  770. result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
  771. if (result < 0) {
  772. pr_err("%s: Set params failed port = %#x payload = 0x%x\n",
  773. __func__, port_id, aud_cal->cal_paddr);
  774. result = -EINVAL;
  775. goto done;
  776. }
  777. /* Wait for the callback */
  778. result = wait_event_timeout(this_adm.wait[index],
  779. atomic_read(&this_adm.copp_stat[index]),
  780. msecs_to_jiffies(TIMEOUT_MS));
  781. if (!result) {
  782. pr_err("%s: Set params timed out port = %#x, payload = 0x%x\n",
  783. __func__, port_id, aud_cal->cal_paddr);
  784. result = -EINVAL;
  785. goto done;
  786. }
  787. result = 0;
  788. done:
  789. return result;
  790. }
  791. static void send_adm_cal(int port_id, int path, int perf_mode)
  792. {
  793. int result = 0;
  794. s32 acdb_path;
  795. struct acdb_cal_block aud_cal;
  796. int size;
  797. pr_debug("%s\n", __func__);
  798. /* Maps audio_dev_ctrl path definition to ACDB definition */
  799. acdb_path = path - 1;
  800. if (acdb_path == TX_CAL)
  801. size = 4096 * 4;
  802. else
  803. size = 4096;
  804. pr_debug("%s: Sending audproc cal\n", __func__);
  805. get_audproc_cal(acdb_path, &aud_cal);
  806. /* map & cache buffers used */
  807. atomic_set(&this_adm.mem_map_cal_index, acdb_path);
  808. if (((this_adm.mem_addr_audproc[acdb_path].cal_paddr !=
  809. aud_cal.cal_paddr) && (aud_cal.cal_size > 0)) ||
  810. (aud_cal.cal_size >
  811. this_adm.mem_addr_audproc[acdb_path].cal_size)) {
  812. if (this_adm.mem_addr_audproc[acdb_path].cal_paddr != 0)
  813. adm_memory_unmap_regions(port_id);
  814. result = adm_memory_map_regions(port_id, &aud_cal.cal_paddr,
  815. 0, &size, 1);
  816. if (result < 0) {
  817. pr_err("ADM audproc mmap did not work! path = %d, addr = 0x%x, size = %d\n",
  818. acdb_path, aud_cal.cal_paddr,
  819. aud_cal.cal_size);
  820. } else {
  821. this_adm.mem_addr_audproc[acdb_path].cal_paddr =
  822. aud_cal.cal_paddr;
  823. this_adm.mem_addr_audproc[acdb_path].cal_size = size;
  824. }
  825. }
  826. if (!send_adm_cal_block(port_id, &aud_cal, perf_mode))
  827. pr_debug("%s: Audproc cal sent for port id: %#x, path %d\n",
  828. __func__, port_id, acdb_path);
  829. else
  830. pr_debug("%s: Audproc cal not sent for port id: %#x, path %d\n",
  831. __func__, port_id, acdb_path);
  832. pr_debug("%s: Sending audvol cal\n", __func__);
  833. get_audvol_cal(acdb_path, &aud_cal);
  834. /* map & cache buffers used */
  835. atomic_set(&this_adm.mem_map_cal_index,
  836. (acdb_path + MAX_AUDPROC_TYPES));
  837. if (((this_adm.mem_addr_audvol[acdb_path].cal_paddr !=
  838. aud_cal.cal_paddr) && (aud_cal.cal_size > 0)) ||
  839. (aud_cal.cal_size >
  840. this_adm.mem_addr_audvol[acdb_path].cal_size)) {
  841. if (this_adm.mem_addr_audvol[acdb_path].cal_paddr != 0)
  842. adm_memory_unmap_regions(port_id);
  843. result = adm_memory_map_regions(port_id, &aud_cal.cal_paddr,
  844. 0, &size, 1);
  845. if (result < 0) {
  846. pr_err("ADM audvol mmap did not work! path = %d, addr = 0x%x, size = %d\n",
  847. acdb_path, aud_cal.cal_paddr,
  848. aud_cal.cal_size);
  849. } else {
  850. this_adm.mem_addr_audvol[acdb_path].cal_paddr =
  851. aud_cal.cal_paddr;
  852. this_adm.mem_addr_audvol[acdb_path].cal_size = size;
  853. }
  854. }
  855. if (!send_adm_cal_block(port_id, &aud_cal, perf_mode))
  856. pr_debug("%s: Audvol cal sent for port id: %#x, path %d\n",
  857. __func__, port_id, acdb_path);
  858. else
  859. pr_debug("%s: Audvol cal not sent for port id: %#x, path %d\n",
  860. __func__, port_id, acdb_path);
  861. }
  862. int adm_map_rtac_block(struct rtac_cal_block_data *cal_block)
  863. {
  864. int result = 0;
  865. pr_debug("%s\n", __func__);
  866. if (cal_block == NULL) {
  867. pr_err("%s: cal_block is NULL!\n",
  868. __func__);
  869. result = -EINVAL;
  870. goto done;
  871. }
  872. if (cal_block->cal_data.paddr == 0) {
  873. pr_debug("%s: No address to map!\n",
  874. __func__);
  875. result = -EINVAL;
  876. goto done;
  877. }
  878. if (cal_block->map_data.map_size == 0) {
  879. pr_debug("%s: map size is 0!\n",
  880. __func__);
  881. result = -EINVAL;
  882. goto done;
  883. }
  884. /* valid port ID needed for callback use primary I2S */
  885. atomic_set(&this_adm.mem_map_cal_index, ADM_RTAC);
  886. result = adm_memory_map_regions(PRIMARY_I2S_RX,
  887. &cal_block->cal_data.paddr, 0,
  888. &cal_block->map_data.map_size, 1);
  889. if (result < 0) {
  890. pr_err("%s: RTAC mmap did not work! addr = 0x%x, size = %d\n",
  891. __func__, cal_block->cal_data.paddr,
  892. cal_block->map_data.map_size);
  893. goto done;
  894. }
  895. cal_block->map_data.map_handle = atomic_read(
  896. &this_adm.mem_map_cal_handles[ADM_RTAC]);
  897. done:
  898. return result;
  899. }
  900. int adm_unmap_rtac_block(uint32_t *mem_map_handle)
  901. {
  902. int result = 0;
  903. pr_debug("%s\n", __func__);
  904. if (mem_map_handle == NULL) {
  905. pr_debug("%s: Map handle is NULL, nothing to unmap\n",
  906. __func__);
  907. goto done;
  908. }
  909. if (*mem_map_handle == 0) {
  910. pr_debug("%s: Map handle is 0, nothing to unmap\n",
  911. __func__);
  912. goto done;
  913. }
  914. if (*mem_map_handle != atomic_read(
  915. &this_adm.mem_map_cal_handles[ADM_RTAC])) {
  916. pr_err("%s: Map handles do not match! Unmapping RTAC, RTAC map 0x%x, ADM map 0x%x\n",
  917. __func__, *mem_map_handle, atomic_read(
  918. &this_adm.mem_map_cal_handles[ADM_RTAC]));
  919. /* if mismatch use handle passed in to unmap */
  920. atomic_set(&this_adm.mem_map_cal_handles[ADM_RTAC],
  921. *mem_map_handle);
  922. }
  923. /* valid port ID needed for callback use primary I2S */
  924. atomic_set(&this_adm.mem_map_cal_index, ADM_RTAC);
  925. result = adm_memory_unmap_regions(PRIMARY_I2S_RX);
  926. if (result < 0) {
  927. pr_debug("%s: adm_memory_unmap_regions failed, error %d\n",
  928. __func__, result);
  929. } else {
  930. atomic_set(&this_adm.mem_map_cal_handles[ADM_RTAC], 0);
  931. *mem_map_handle = 0;
  932. }
  933. done:
  934. return result;
  935. }
  936. int adm_unmap_cal_blocks(void)
  937. {
  938. int i;
  939. int result = 0;
  940. int result2 = 0;
  941. for (i = 0; i < ADM_MAX_CAL_TYPES; i++) {
  942. if (atomic_read(&this_adm.mem_map_cal_handles[i]) != 0) {
  943. if (i <= ADM_TX_AUDPROC_CAL) {
  944. this_adm.mem_addr_audproc[i].cal_paddr = 0;
  945. this_adm.mem_addr_audproc[i].cal_size = 0;
  946. } else if (i <= ADM_TX_AUDVOL_CAL) {
  947. this_adm.mem_addr_audvol
  948. [(i - ADM_RX_AUDVOL_CAL)].cal_paddr
  949. = 0;
  950. this_adm.mem_addr_audvol
  951. [(i - ADM_RX_AUDVOL_CAL)].cal_size
  952. = 0;
  953. } else if (i == ADM_CUSTOM_TOP_CAL) {
  954. this_adm.set_custom_topology = 1;
  955. } else {
  956. continue;
  957. }
  958. /* valid port ID needed for callback use primary I2S */
  959. atomic_set(&this_adm.mem_map_cal_index, i);
  960. result2 = adm_memory_unmap_regions(PRIMARY_I2S_RX);
  961. if (result2 < 0) {
  962. pr_err("%s: adm_memory_unmap_regions failed, err %d\n",
  963. __func__, result2);
  964. result = result2;
  965. } else {
  966. atomic_set(&this_adm.mem_map_cal_handles[i],
  967. 0);
  968. }
  969. }
  970. }
  971. return result;
  972. }
  973. int adm_connect_afe_port(int mode, int session_id, int port_id)
  974. {
  975. struct adm_cmd_connect_afe_port_v5 cmd;
  976. int ret = 0;
  977. int index;
  978. pr_debug("%s: port %d session id:%d mode:%d\n", __func__,
  979. port_id, session_id, mode);
  980. port_id = afe_convert_virtual_to_portid(port_id);
  981. if (afe_validate_port(port_id) < 0) {
  982. pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
  983. return -ENODEV;
  984. }
  985. if (this_adm.apr == NULL) {
  986. this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
  987. 0xFFFFFFFF, &this_adm);
  988. if (this_adm.apr == NULL) {
  989. pr_err("%s: Unable to register ADM\n", __func__);
  990. ret = -ENODEV;
  991. return ret;
  992. }
  993. rtac_set_adm_handle(this_adm.apr);
  994. }
  995. index = afe_get_port_index(port_id);
  996. pr_debug("%s: Port ID %#x, index %d\n", __func__, port_id, index);
  997. cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  998. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  999. cmd.hdr.pkt_size = sizeof(cmd);
  1000. cmd.hdr.src_svc = APR_SVC_ADM;
  1001. cmd.hdr.src_domain = APR_DOMAIN_APPS;
  1002. cmd.hdr.src_port = port_id;
  1003. cmd.hdr.dest_svc = APR_SVC_ADM;
  1004. cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
  1005. cmd.hdr.dest_port = port_id;
  1006. cmd.hdr.token = port_id;
  1007. cmd.hdr.opcode = ADM_CMD_CONNECT_AFE_PORT_V5;
  1008. cmd.mode = mode;
  1009. cmd.session_id = session_id;
  1010. cmd.afe_port_id = port_id;
  1011. atomic_set(&this_adm.copp_stat[index], 0);
  1012. ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd);
  1013. if (ret < 0) {
  1014. pr_err("%s:ADM enable for port %#x failed\n",
  1015. __func__, port_id);
  1016. ret = -EINVAL;
  1017. goto fail_cmd;
  1018. }
  1019. /* Wait for the callback with copp id */
  1020. ret = wait_event_timeout(this_adm.wait[index],
  1021. atomic_read(&this_adm.copp_stat[index]),
  1022. msecs_to_jiffies(TIMEOUT_MS));
  1023. if (!ret) {
  1024. pr_err("%s ADM connect AFE failed for port %#x\n", __func__,
  1025. port_id);
  1026. ret = -EINVAL;
  1027. goto fail_cmd;
  1028. }
  1029. atomic_inc(&this_adm.copp_cnt[index]);
  1030. return 0;
  1031. fail_cmd:
  1032. return ret;
  1033. }
  1034. int adm_open(int port_id, int path, int rate, int channel_mode, int topology,
  1035. int perf_mode, uint16_t bits_per_sample)
  1036. {
  1037. struct adm_cmd_device_open_v5 open;
  1038. int ret = 0;
  1039. int index;
  1040. int tmp_port = q6audio_get_port_id(port_id);
  1041. pr_debug("%s: port %#x path:%d rate:%d mode:%d perf_mode:%d\n",
  1042. __func__, port_id, path, rate, channel_mode, perf_mode);
  1043. port_id = q6audio_convert_virtual_to_portid(port_id);
  1044. if (q6audio_validate_port(port_id) < 0) {
  1045. pr_err("%s port idi[%#x] is invalid\n", __func__, port_id);
  1046. return -ENODEV;
  1047. }
  1048. index = q6audio_get_port_index(port_id);
  1049. pr_debug("%s: Port ID %#x, index %d\n", __func__, port_id, index);
  1050. if (this_adm.apr == NULL) {
  1051. this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
  1052. 0xFFFFFFFF, &this_adm);
  1053. if (this_adm.apr == NULL) {
  1054. pr_err("%s: Unable to register ADM\n", __func__);
  1055. ret = -ENODEV;
  1056. return ret;
  1057. }
  1058. rtac_set_adm_handle(this_adm.apr);
  1059. }
  1060. if (perf_mode == LEGACY_PCM_MODE) {
  1061. atomic_set(&this_adm.copp_perf_mode[index], 0);
  1062. send_adm_custom_topology(port_id);
  1063. } else {
  1064. atomic_set(&this_adm.copp_perf_mode[index], 1);
  1065. }
  1066. /* Create a COPP if port id are not enabled */
  1067. if ((perf_mode == LEGACY_PCM_MODE &&
  1068. (atomic_read(&this_adm.copp_cnt[index]) == 0)) ||
  1069. (perf_mode != LEGACY_PCM_MODE &&
  1070. (atomic_read(&this_adm.copp_low_latency_cnt[index]) == 0))) {
  1071. pr_debug("%s:opening ADM: perf_mode: %d\n", __func__,
  1072. perf_mode);
  1073. open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  1074. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  1075. open.hdr.pkt_size = sizeof(open);
  1076. open.hdr.src_svc = APR_SVC_ADM;
  1077. open.hdr.src_domain = APR_DOMAIN_APPS;
  1078. open.hdr.src_port = tmp_port;
  1079. open.hdr.dest_svc = APR_SVC_ADM;
  1080. open.hdr.dest_domain = APR_DOMAIN_ADSP;
  1081. open.hdr.dest_port = tmp_port;
  1082. open.hdr.token = port_id;
  1083. open.hdr.opcode = ADM_CMD_DEVICE_OPEN_V5;
  1084. if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE)
  1085. open.flags = ADM_ULTRA_LOW_LATENCY_DEVICE_SESSION;
  1086. else if (perf_mode == LOW_LATENCY_PCM_MODE)
  1087. open.flags = ADM_LOW_LATENCY_DEVICE_SESSION;
  1088. else
  1089. open.flags = ADM_LEGACY_DEVICE_SESSION;
  1090. open.mode_of_operation = path;
  1091. open.endpoint_id_1 = tmp_port;
  1092. if (this_adm.ec_ref_rx == -1) {
  1093. open.endpoint_id_2 = 0xFFFF;
  1094. } else if (this_adm.ec_ref_rx && (path != 1)) {
  1095. open.endpoint_id_2 = this_adm.ec_ref_rx;
  1096. this_adm.ec_ref_rx = -1;
  1097. }
  1098. open.topology_id = topology;
  1099. if ((open.topology_id == VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
  1100. (open.topology_id == VPM_TX_DM_FLUENCE_COPP_TOPOLOGY) ||
  1101. (open.topology_id == VPM_TX_DM_RFECNS_COPP_TOPOLOGY) ||
  1102. (open.topology_id == VPM_TX_SM_LVVE_COPP_TOPOLOGY) ||
  1103. /* LVVE for Barge-in */
  1104. (open.topology_id == 0x1000BFF0) ||
  1105. (open.topology_id == 0x1000BFF1))
  1106. rate = 16000;
  1107. if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE) {
  1108. open.topology_id = NULL_COPP_TOPOLOGY;
  1109. rate = ULL_SUPPORTED_SAMPLE_RATE;
  1110. if(channel_mode > ULL_MAX_SUPPORTED_CHANNEL)
  1111. channel_mode = ULL_MAX_SUPPORTED_CHANNEL;
  1112. } else if (perf_mode == LOW_LATENCY_PCM_MODE) {
  1113. if ((open.topology_id == DOLBY_ADM_COPP_TOPOLOGY_ID) ||
  1114. (open.topology_id == SRS_TRUMEDIA_TOPOLOGY_ID))
  1115. open.topology_id = DEFAULT_COPP_TOPOLOGY;
  1116. }
  1117. open.dev_num_channel = channel_mode & 0x00FF;
  1118. open.bit_width = bits_per_sample;
  1119. WARN_ON(perf_mode == ULTRA_LOW_LATENCY_PCM_MODE &&
  1120. (rate != 48000));
  1121. open.sample_rate = rate;
  1122. memset(open.dev_channel_mapping, 0, 8);
  1123. if (channel_mode == 1) {
  1124. open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
  1125. } else if (channel_mode == 2) {
  1126. open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
  1127. open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
  1128. } else if (channel_mode == 3) {
  1129. open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
  1130. open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
  1131. open.dev_channel_mapping[2] = PCM_CHANNEL_FC;
  1132. } else if (channel_mode == 4) {
  1133. open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
  1134. open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
  1135. open.dev_channel_mapping[2] = PCM_CHANNEL_RB;
  1136. open.dev_channel_mapping[3] = PCM_CHANNEL_LB;
  1137. } else if (channel_mode == 5) {
  1138. open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
  1139. open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
  1140. open.dev_channel_mapping[2] = PCM_CHANNEL_FC;
  1141. open.dev_channel_mapping[3] = PCM_CHANNEL_LB;
  1142. open.dev_channel_mapping[4] = PCM_CHANNEL_RB;
  1143. } else if (channel_mode == 6) {
  1144. open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
  1145. open.dev_channel_mapping[1] = PCM_CHANNEL_FL;
  1146. open.dev_channel_mapping[2] = PCM_CHANNEL_LB;
  1147. open.dev_channel_mapping[3] = PCM_CHANNEL_FR;
  1148. open.dev_channel_mapping[4] = PCM_CHANNEL_RB;
  1149. open.dev_channel_mapping[5] = PCM_CHANNEL_LFE;
  1150. } else if (channel_mode == 8) {
  1151. open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
  1152. open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
  1153. open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
  1154. open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
  1155. open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
  1156. open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
  1157. open.dev_channel_mapping[6] = PCM_CHANNEL_FLC;
  1158. open.dev_channel_mapping[7] = PCM_CHANNEL_FRC;
  1159. } else {
  1160. pr_err("%s invalid num_chan %d\n", __func__,
  1161. channel_mode);
  1162. return -EINVAL;
  1163. }
  1164. if ((open.dev_num_channel > 2) &&
  1165. multi_ch_map.set_channel_map)
  1166. memcpy(open.dev_channel_mapping,
  1167. multi_ch_map.channel_mapping,
  1168. PCM_FORMAT_MAX_NUM_CHANNEL);
  1169. pr_debug("%s: port_id=%#x rate=%d topology_id=0x%X\n",
  1170. __func__, open.endpoint_id_1, open.sample_rate,
  1171. open.topology_id);
  1172. atomic_set(&this_adm.copp_stat[index], 0);
  1173. ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
  1174. if (ret < 0) {
  1175. pr_err("%s:ADM enable for port %#x for[%d] failed\n",
  1176. __func__, tmp_port, port_id);
  1177. ret = -EINVAL;
  1178. goto fail_cmd;
  1179. }
  1180. /* Wait for the callback with copp id */
  1181. ret = wait_event_timeout(this_adm.wait[index],
  1182. atomic_read(&this_adm.copp_stat[index]),
  1183. msecs_to_jiffies(TIMEOUT_MS));
  1184. if (!ret) {
  1185. pr_err("%s ADM open failed for port %#x for [%d]\n",
  1186. __func__, tmp_port, port_id);
  1187. ret = -EINVAL;
  1188. goto fail_cmd;
  1189. }
  1190. }
  1191. if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
  1192. perf_mode == LOW_LATENCY_PCM_MODE) {
  1193. atomic_inc(&this_adm.copp_low_latency_cnt[index]);
  1194. pr_debug("%s: index: %d coppid: %d", __func__, index,
  1195. atomic_read(&this_adm.copp_low_latency_id[index]));
  1196. } else {
  1197. atomic_inc(&this_adm.copp_cnt[index]);
  1198. pr_debug("%s: index: %d coppid: %d", __func__, index,
  1199. atomic_read(&this_adm.copp_id[index]));
  1200. }
  1201. return 0;
  1202. fail_cmd:
  1203. return ret;
  1204. }
  1205. int adm_multi_ch_copp_open(int port_id, int path, int rate, int channel_mode,
  1206. int topology, int perf_mode, uint16_t bits_per_sample)
  1207. {
  1208. int ret = 0;
  1209. ret = adm_open(port_id, path, rate, channel_mode,
  1210. topology, perf_mode, bits_per_sample);
  1211. return ret;
  1212. }
  1213. int adm_matrix_map(int session_id, int path, int num_copps,
  1214. unsigned int *port_id, int copp_id, int perf_mode)
  1215. {
  1216. struct adm_cmd_matrix_map_routings_v5 *route;
  1217. struct adm_session_map_node_v5 *node;
  1218. uint16_t *copps_list;
  1219. int cmd_size = 0;
  1220. int ret = 0, i = 0;
  1221. void *payload = NULL;
  1222. void *matrix_map = NULL;
  1223. /* Assumes port_ids have already been validated during adm_open */
  1224. int index = q6audio_get_port_index(copp_id);
  1225. if (index < 0 || index >= AFE_MAX_PORTS) {
  1226. pr_err("%s: invalid port idx %d token %d\n",
  1227. __func__, index, copp_id);
  1228. return 0;
  1229. }
  1230. cmd_size = (sizeof(struct adm_cmd_matrix_map_routings_v5) +
  1231. sizeof(struct adm_session_map_node_v5) +
  1232. (sizeof(uint32_t) * num_copps));
  1233. matrix_map = kzalloc(cmd_size, GFP_KERNEL);
  1234. if (matrix_map == NULL) {
  1235. pr_err("%s: Mem alloc failed\n", __func__);
  1236. ret = -EINVAL;
  1237. return ret;
  1238. }
  1239. route = (struct adm_cmd_matrix_map_routings_v5 *)matrix_map;
  1240. pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%#x coppid[%d]\n",
  1241. __func__, session_id, path, num_copps, port_id[0], copp_id);
  1242. route->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  1243. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  1244. route->hdr.pkt_size = cmd_size;
  1245. route->hdr.src_svc = 0;
  1246. route->hdr.src_domain = APR_DOMAIN_APPS;
  1247. route->hdr.src_port = copp_id;
  1248. route->hdr.dest_svc = APR_SVC_ADM;
  1249. route->hdr.dest_domain = APR_DOMAIN_ADSP;
  1250. if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
  1251. perf_mode == LOW_LATENCY_PCM_MODE) {
  1252. route->hdr.dest_port =
  1253. atomic_read(&this_adm.copp_low_latency_id[index]);
  1254. } else {
  1255. route->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
  1256. }
  1257. route->hdr.token = copp_id;
  1258. route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
  1259. route->num_sessions = 1;
  1260. switch (path) {
  1261. case 0x1:
  1262. route->matrix_id = ADM_MATRIX_ID_AUDIO_RX;
  1263. break;
  1264. case 0x2:
  1265. case 0x3:
  1266. route->matrix_id = ADM_MATRIX_ID_AUDIO_TX;
  1267. break;
  1268. default:
  1269. pr_err("%s: Wrong path set[%d]\n", __func__, path);
  1270. break;
  1271. }
  1272. payload = ((u8 *)matrix_map +
  1273. sizeof(struct adm_cmd_matrix_map_routings_v5));
  1274. node = (struct adm_session_map_node_v5 *)payload;
  1275. node->session_id = session_id;
  1276. node->num_copps = num_copps;
  1277. payload = (u8 *)node + sizeof(struct adm_session_map_node_v5);
  1278. copps_list = (uint16_t *)payload;
  1279. for (i = 0; i < num_copps; i++) {
  1280. int tmp;
  1281. port_id[i] = q6audio_convert_virtual_to_portid(port_id[i]);
  1282. tmp = q6audio_get_port_index(port_id[i]);
  1283. if (tmp >= 0 && tmp < AFE_MAX_PORTS) {
  1284. if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
  1285. perf_mode == LOW_LATENCY_PCM_MODE)
  1286. copps_list[i] =
  1287. atomic_read(&this_adm.copp_low_latency_id[tmp]);
  1288. else
  1289. copps_list[i] =
  1290. atomic_read(&this_adm.copp_id[tmp]);
  1291. }
  1292. else
  1293. continue;
  1294. pr_debug("%s: port_id[%#x]: %d, index: %d act coppid[0x%x]\n",
  1295. __func__, i, port_id[i], tmp, copps_list[i]);
  1296. }
  1297. atomic_set(&this_adm.copp_stat[index], 0);
  1298. ret = apr_send_pkt(this_adm.apr, (uint32_t *)matrix_map);
  1299. if (ret < 0) {
  1300. pr_err("%s: ADM routing for port %#x failed\n",
  1301. __func__, port_id[0]);
  1302. ret = -EINVAL;
  1303. goto fail_cmd;
  1304. }
  1305. ret = wait_event_timeout(this_adm.wait[index],
  1306. atomic_read(&this_adm.copp_stat[index]),
  1307. msecs_to_jiffies(TIMEOUT_MS));
  1308. if (!ret) {
  1309. pr_err("%s: ADM cmd Route failed for port %#x\n",
  1310. __func__, port_id[0]);
  1311. ret = -EINVAL;
  1312. goto fail_cmd;
  1313. }
  1314. if (perf_mode != ULTRA_LOW_LATENCY_PCM_MODE) {
  1315. for (i = 0; i < num_copps; i++)
  1316. send_adm_cal(port_id[i], path, perf_mode);
  1317. for (i = 0; i < num_copps; i++) {
  1318. int tmp, copp_id;
  1319. tmp = afe_get_port_index(port_id[i]);
  1320. if (tmp >= 0 && tmp < AFE_MAX_PORTS) {
  1321. if (perf_mode == LEGACY_PCM_MODE)
  1322. copp_id = atomic_read(
  1323. &this_adm.copp_id[tmp]);
  1324. else
  1325. copp_id = atomic_read(
  1326. &this_adm.copp_low_latency_id[tmp]);
  1327. rtac_add_adm_device(port_id[i],
  1328. copp_id, path, session_id);
  1329. pr_debug("%s, copp_id: %d\n",
  1330. __func__, copp_id);
  1331. } else
  1332. pr_debug("%s: Invalid port index %d",
  1333. __func__, tmp);
  1334. }
  1335. }
  1336. fail_cmd:
  1337. kfree(matrix_map);
  1338. return ret;
  1339. }
  1340. int adm_memory_map_regions(int port_id,
  1341. uint32_t *buf_add, uint32_t mempool_id,
  1342. uint32_t *bufsz, uint32_t bufcnt)
  1343. {
  1344. struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
  1345. struct avs_shared_map_region_payload *mregions = NULL;
  1346. void *mmap_region_cmd = NULL;
  1347. void *payload = NULL;
  1348. int ret = 0;
  1349. int i = 0;
  1350. int cmd_size = 0;
  1351. int index = 0;
  1352. pr_debug("%s\n", __func__);
  1353. if (this_adm.apr == NULL) {
  1354. this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
  1355. 0xFFFFFFFF, &this_adm);
  1356. if (this_adm.apr == NULL) {
  1357. pr_err("%s: Unable to register ADM\n", __func__);
  1358. ret = -ENODEV;
  1359. return ret;
  1360. }
  1361. rtac_set_adm_handle(this_adm.apr);
  1362. }
  1363. port_id = q6audio_convert_virtual_to_portid(port_id);
  1364. if (q6audio_validate_port(port_id) < 0) {
  1365. pr_err("%s port id[%#x] is invalid\n", __func__, port_id);
  1366. return -ENODEV;
  1367. }
  1368. index = q6audio_get_port_index(port_id);
  1369. cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
  1370. + sizeof(struct avs_shared_map_region_payload)
  1371. * bufcnt;
  1372. mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
  1373. if (!mmap_region_cmd) {
  1374. pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
  1375. return -ENOMEM;
  1376. }
  1377. mmap_regions = (struct avs_cmd_shared_mem_map_regions *)mmap_region_cmd;
  1378. mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  1379. APR_HDR_LEN(APR_HDR_SIZE),
  1380. APR_PKT_VER);
  1381. mmap_regions->hdr.pkt_size = cmd_size;
  1382. mmap_regions->hdr.src_port = 0;
  1383. mmap_regions->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
  1384. mmap_regions->hdr.token = port_id;
  1385. mmap_regions->hdr.opcode = ADM_CMD_SHARED_MEM_MAP_REGIONS;
  1386. mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL & 0x00ff;
  1387. mmap_regions->num_regions = bufcnt & 0x00ff;
  1388. mmap_regions->property_flag = 0x00;
  1389. pr_debug("%s: map_regions->num_regions = %d\n", __func__,
  1390. mmap_regions->num_regions);
  1391. payload = ((u8 *) mmap_region_cmd +
  1392. sizeof(struct avs_cmd_shared_mem_map_regions));
  1393. mregions = (struct avs_shared_map_region_payload *)payload;
  1394. for (i = 0; i < bufcnt; i++) {
  1395. mregions->shm_addr_lsw = buf_add[i];
  1396. mregions->shm_addr_msw = 0x00;
  1397. mregions->mem_size_bytes = bufsz[i];
  1398. ++mregions;
  1399. }
  1400. atomic_set(&this_adm.copp_stat[index], 0);
  1401. ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
  1402. if (ret < 0) {
  1403. pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
  1404. mmap_regions->hdr.opcode, ret);
  1405. ret = -EINVAL;
  1406. goto fail_cmd;
  1407. }
  1408. ret = wait_event_timeout(this_adm.wait[index],
  1409. atomic_read(&this_adm.copp_stat[index]), 5 * HZ);
  1410. if (!ret) {
  1411. pr_err("%s: timeout. waited for memory_map\n", __func__);
  1412. ret = -EINVAL;
  1413. goto fail_cmd;
  1414. }
  1415. fail_cmd:
  1416. kfree(mmap_region_cmd);
  1417. return ret;
  1418. }
  1419. int adm_memory_unmap_regions(int32_t port_id)
  1420. {
  1421. struct avs_cmd_shared_mem_unmap_regions unmap_regions;
  1422. int ret = 0;
  1423. int index = 0;
  1424. pr_debug("%s\n", __func__);
  1425. if (this_adm.apr == NULL) {
  1426. pr_err("%s APR handle NULL\n", __func__);
  1427. return -EINVAL;
  1428. }
  1429. port_id = q6audio_convert_virtual_to_portid(port_id);
  1430. if (q6audio_validate_port(port_id) < 0) {
  1431. pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
  1432. return -ENODEV;
  1433. }
  1434. index = q6audio_get_port_index(port_id);
  1435. unmap_regions.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  1436. APR_HDR_LEN(APR_HDR_SIZE),
  1437. APR_PKT_VER);
  1438. unmap_regions.hdr.pkt_size = sizeof(unmap_regions);
  1439. unmap_regions.hdr.src_port = 0;
  1440. unmap_regions.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
  1441. unmap_regions.hdr.token = port_id;
  1442. unmap_regions.hdr.opcode = ADM_CMD_SHARED_MEM_UNMAP_REGIONS;
  1443. unmap_regions.mem_map_handle = atomic_read(&this_adm.
  1444. mem_map_cal_handles[atomic_read(&this_adm.mem_map_cal_index)]);
  1445. atomic_set(&this_adm.copp_stat[index], 0);
  1446. ret = apr_send_pkt(this_adm.apr, (uint32_t *) &unmap_regions);
  1447. if (ret < 0) {
  1448. pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
  1449. unmap_regions.hdr.opcode, ret);
  1450. ret = -EINVAL;
  1451. goto fail_cmd;
  1452. }
  1453. ret = wait_event_timeout(this_adm.wait[index],
  1454. atomic_read(&this_adm.copp_stat[index]),
  1455. 5 * HZ);
  1456. if (!ret) {
  1457. pr_err("%s: timeout. waited for memory_unmap index %d\n",
  1458. __func__, index);
  1459. ret = -EINVAL;
  1460. goto fail_cmd;
  1461. } else {
  1462. pr_debug("%s: Unmap handle 0x%x succeeded\n", __func__,
  1463. unmap_regions.mem_map_handle);
  1464. }
  1465. fail_cmd:
  1466. return ret;
  1467. }
  1468. #ifdef CONFIG_RTAC
  1469. int adm_get_copp_id(int port_index)
  1470. {
  1471. int copp_id;
  1472. pr_debug("%s\n", __func__);
  1473. if (port_index < 0) {
  1474. pr_err("%s: invalid port_id = %d\n", __func__, port_index);
  1475. return -EINVAL;
  1476. }
  1477. copp_id = atomic_read(&this_adm.copp_id[port_index]);
  1478. if (copp_id == RESET_COPP_ID)
  1479. copp_id = atomic_read(
  1480. &this_adm.copp_low_latency_id[port_index]);
  1481. return copp_id;
  1482. }
  1483. int adm_get_lowlatency_copp_id(int port_index)
  1484. {
  1485. pr_debug("%s\n", __func__);
  1486. if (port_index < 0) {
  1487. pr_err("%s: invalid port_id = %d\n", __func__, port_index);
  1488. return -EINVAL;
  1489. }
  1490. return atomic_read(&this_adm.copp_low_latency_id[port_index]);
  1491. }
  1492. #else
  1493. int adm_get_copp_id(int port_index)
  1494. {
  1495. return -EINVAL;
  1496. }
  1497. int adm_get_lowlatency_copp_id(int port_index)
  1498. {
  1499. return -EINVAL;
  1500. }
  1501. #endif /* #ifdef CONFIG_RTAC */
  1502. void adm_ec_ref_rx_id(int port_id)
  1503. {
  1504. this_adm.ec_ref_rx = port_id;
  1505. pr_debug("%s ec_ref_rx:%d", __func__, this_adm.ec_ref_rx);
  1506. }
  1507. int adm_close(int port_id, int perf_mode)
  1508. {
  1509. struct apr_hdr close;
  1510. int ret = 0;
  1511. int index = 0;
  1512. int copp_id = RESET_COPP_ID;
  1513. port_id = q6audio_convert_virtual_to_portid(port_id);
  1514. index = q6audio_get_port_index(port_id);
  1515. if (q6audio_validate_port(port_id) < 0)
  1516. return -EINVAL;
  1517. pr_debug("%s port_id=%#x index %d perf_mode: %d\n", __func__, port_id,
  1518. index, perf_mode);
  1519. if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
  1520. perf_mode == LOW_LATENCY_PCM_MODE) {
  1521. if (!(atomic_read(&this_adm.copp_low_latency_cnt[index]))) {
  1522. pr_err("%s: copp count for port[%#x]is 0\n", __func__,
  1523. port_id);
  1524. goto fail_cmd;
  1525. }
  1526. atomic_dec(&this_adm.copp_low_latency_cnt[index]);
  1527. } else {
  1528. if (!(atomic_read(&this_adm.copp_cnt[index]))) {
  1529. pr_err("%s: copp count for port[%#x]is 0\n", __func__,
  1530. port_id);
  1531. goto fail_cmd;
  1532. }
  1533. atomic_dec(&this_adm.copp_cnt[index]);
  1534. }
  1535. if ((perf_mode == LEGACY_PCM_MODE &&
  1536. !(atomic_read(&this_adm.copp_cnt[index]))) ||
  1537. ((perf_mode != LEGACY_PCM_MODE) &&
  1538. !(atomic_read(&this_adm.copp_low_latency_cnt[index])))) {
  1539. pr_debug("%s:Closing ADM: perf_mode: %d\n", __func__,
  1540. perf_mode);
  1541. close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  1542. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  1543. close.pkt_size = sizeof(close);
  1544. close.src_svc = APR_SVC_ADM;
  1545. close.src_domain = APR_DOMAIN_APPS;
  1546. close.src_port = port_id;
  1547. close.dest_svc = APR_SVC_ADM;
  1548. close.dest_domain = APR_DOMAIN_ADSP;
  1549. if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
  1550. perf_mode == LOW_LATENCY_PCM_MODE)
  1551. close.dest_port =
  1552. atomic_read(&this_adm.copp_low_latency_id[index]);
  1553. else
  1554. close.dest_port = atomic_read(&this_adm.copp_id[index]);
  1555. close.token = port_id;
  1556. close.opcode = ADM_CMD_DEVICE_CLOSE_V5;
  1557. atomic_set(&this_adm.copp_stat[index], 0);
  1558. if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
  1559. perf_mode == LOW_LATENCY_PCM_MODE) {
  1560. copp_id = atomic_read(
  1561. &this_adm.copp_low_latency_id[index]);
  1562. pr_debug("%s:coppid %d portid=%#x index=%d coppcnt=%d\n",
  1563. __func__,
  1564. copp_id,
  1565. port_id, index,
  1566. atomic_read(
  1567. &this_adm.copp_low_latency_cnt[index]));
  1568. atomic_set(&this_adm.copp_low_latency_id[index],
  1569. RESET_COPP_ID);
  1570. } else {
  1571. copp_id = atomic_read(&this_adm.copp_id[index]);
  1572. pr_debug("%s:coppid %d portid=%#x index=%d coppcnt=%d\n",
  1573. __func__,
  1574. copp_id,
  1575. port_id, index,
  1576. atomic_read(&this_adm.copp_cnt[index]));
  1577. atomic_set(&this_adm.copp_id[index],
  1578. RESET_COPP_ID);
  1579. }
  1580. ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
  1581. if (ret < 0) {
  1582. pr_err("%s ADM close failed\n", __func__);
  1583. ret = -EINVAL;
  1584. goto fail_cmd;
  1585. }
  1586. ret = wait_event_timeout(this_adm.wait[index],
  1587. atomic_read(&this_adm.copp_stat[index]),
  1588. msecs_to_jiffies(TIMEOUT_MS));
  1589. if (!ret) {
  1590. pr_err("%s: ADM cmd Route failed for port %#x\n",
  1591. __func__, port_id);
  1592. ret = -EINVAL;
  1593. goto fail_cmd;
  1594. }
  1595. }
  1596. if (perf_mode != ULTRA_LOW_LATENCY_PCM_MODE) {
  1597. pr_debug("%s: remove adm device from rtac\n", __func__);
  1598. rtac_remove_adm_device(port_id, copp_id);
  1599. }
  1600. fail_cmd:
  1601. return ret;
  1602. }
  1603. static int __init adm_init(void)
  1604. {
  1605. int i = 0;
  1606. this_adm.apr = NULL;
  1607. this_adm.set_custom_topology = 1;
  1608. this_adm.ec_ref_rx = -1;
  1609. for (i = 0; i < AFE_MAX_PORTS; i++) {
  1610. atomic_set(&this_adm.copp_id[i], RESET_COPP_ID);
  1611. atomic_set(&this_adm.copp_low_latency_id[i], RESET_COPP_ID);
  1612. atomic_set(&this_adm.copp_cnt[i], 0);
  1613. atomic_set(&this_adm.copp_low_latency_cnt[i], 0);
  1614. atomic_set(&this_adm.copp_stat[i], 0);
  1615. atomic_set(&this_adm.copp_perf_mode[i], 0);
  1616. init_waitqueue_head(&this_adm.wait[i]);
  1617. }
  1618. return 0;
  1619. }
  1620. device_initcall(adm_init);