lx_core.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358
  1. /* -*- linux-c -*- *
  2. *
  3. * ALSA driver for the digigram lx6464es interface
  4. * low-level interface
  5. *
  6. * Copyright (c) 2009 Tim Blechmann <tim@klingt.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; see the file COPYING. If not, write to
  20. * the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  21. * Boston, MA 02111-1307, USA.
  22. *
  23. */
  24. /* #define RMH_DEBUG 1 */
  25. #include <linux/module.h>
  26. #include <linux/pci.h>
  27. #include <linux/delay.h>
  28. #include "lx6464es.h"
  29. #include "lx_core.h"
  30. /* low-level register access */
  31. static const unsigned long dsp_port_offsets[] = {
  32. 0,
  33. 0x400,
  34. 0x401,
  35. 0x402,
  36. 0x403,
  37. 0x404,
  38. 0x405,
  39. 0x406,
  40. 0x407,
  41. 0x408,
  42. 0x409,
  43. 0x40a,
  44. 0x40b,
  45. 0x40c,
  46. 0x410,
  47. 0x411,
  48. 0x412,
  49. 0x413,
  50. 0x414,
  51. 0x415,
  52. 0x416,
  53. 0x420,
  54. 0x430,
  55. 0x431,
  56. 0x432,
  57. 0x433,
  58. 0x434,
  59. 0x440
  60. };
  61. static void __iomem *lx_dsp_register(struct lx6464es *chip, int port)
  62. {
  63. void __iomem *base_address = chip->port_dsp_bar;
  64. return base_address + dsp_port_offsets[port]*4;
  65. }
  66. unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
  67. {
  68. void __iomem *address = lx_dsp_register(chip, port);
  69. return ioread32(address);
  70. }
  71. static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data,
  72. u32 len)
  73. {
  74. u32 __iomem *address = lx_dsp_register(chip, port);
  75. int i;
  76. /* we cannot use memcpy_fromio */
  77. for (i = 0; i != len; ++i)
  78. data[i] = ioread32(address + i);
  79. }
  80. void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
  81. {
  82. void __iomem *address = lx_dsp_register(chip, port);
  83. iowrite32(data, address);
  84. }
  85. static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port,
  86. const u32 *data, u32 len)
  87. {
  88. u32 __iomem *address = lx_dsp_register(chip, port);
  89. int i;
  90. /* we cannot use memcpy_to */
  91. for (i = 0; i != len; ++i)
  92. iowrite32(data[i], address + i);
  93. }
  94. static const unsigned long plx_port_offsets[] = {
  95. 0x04,
  96. 0x40,
  97. 0x44,
  98. 0x48,
  99. 0x4c,
  100. 0x50,
  101. 0x54,
  102. 0x58,
  103. 0x5c,
  104. 0x64,
  105. 0x68,
  106. 0x6C
  107. };
  108. static void __iomem *lx_plx_register(struct lx6464es *chip, int port)
  109. {
  110. void __iomem *base_address = chip->port_plx_remapped;
  111. return base_address + plx_port_offsets[port];
  112. }
  113. unsigned long lx_plx_reg_read(struct lx6464es *chip, int port)
  114. {
  115. void __iomem *address = lx_plx_register(chip, port);
  116. return ioread32(address);
  117. }
  118. void lx_plx_reg_write(struct lx6464es *chip, int port, u32 data)
  119. {
  120. void __iomem *address = lx_plx_register(chip, port);
  121. iowrite32(data, address);
  122. }
  123. u32 lx_plx_mbox_read(struct lx6464es *chip, int mbox_nr)
  124. {
  125. int index;
  126. switch (mbox_nr) {
  127. case 1:
  128. index = ePLX_MBOX1; break;
  129. case 2:
  130. index = ePLX_MBOX2; break;
  131. case 3:
  132. index = ePLX_MBOX3; break;
  133. case 4:
  134. index = ePLX_MBOX4; break;
  135. case 5:
  136. index = ePLX_MBOX5; break;
  137. case 6:
  138. index = ePLX_MBOX6; break;
  139. case 7:
  140. index = ePLX_MBOX7; break;
  141. case 0: /* reserved for HF flags */
  142. snd_BUG();
  143. default:
  144. return 0xdeadbeef;
  145. }
  146. return lx_plx_reg_read(chip, index);
  147. }
  148. int lx_plx_mbox_write(struct lx6464es *chip, int mbox_nr, u32 value)
  149. {
  150. int index = -1;
  151. switch (mbox_nr) {
  152. case 1:
  153. index = ePLX_MBOX1; break;
  154. case 3:
  155. index = ePLX_MBOX3; break;
  156. case 4:
  157. index = ePLX_MBOX4; break;
  158. case 5:
  159. index = ePLX_MBOX5; break;
  160. case 6:
  161. index = ePLX_MBOX6; break;
  162. case 7:
  163. index = ePLX_MBOX7; break;
  164. case 0: /* reserved for HF flags */
  165. case 2: /* reserved for Pipe States
  166. * the DSP keeps an image of it */
  167. snd_BUG();
  168. return -EBADRQC;
  169. }
  170. lx_plx_reg_write(chip, index, value);
  171. return 0;
  172. }
  173. /* rmh */
  174. #ifdef CONFIG_SND_DEBUG
  175. #define CMD_NAME(a) a
  176. #else
  177. #define CMD_NAME(a) NULL
  178. #endif
  179. #define Reg_CSM_MR 0x00000002
  180. #define Reg_CSM_MC 0x00000001
  181. struct dsp_cmd_info {
  182. u32 dcCodeOp; /* Op Code of the command (usually 1st 24-bits
  183. * word).*/
  184. u16 dcCmdLength; /* Command length in words of 24 bits.*/
  185. u16 dcStatusType; /* Status type: 0 for fixed length, 1 for
  186. * random. */
  187. u16 dcStatusLength; /* Status length (if fixed).*/
  188. char *dcOpName;
  189. };
  190. /*
  191. Initialization and control data for the Microblaze interface
  192. - OpCode:
  193. the opcode field of the command set at the proper offset
  194. - CmdLength
  195. the number of command words
  196. - StatusType
  197. offset in the status registers: 0 means that the return value may be
  198. different from 0, and must be read
  199. - StatusLength
  200. the number of status words (in addition to the return value)
  201. */
  202. static struct dsp_cmd_info dsp_commands[] =
  203. {
  204. { (CMD_00_INFO_DEBUG << OPCODE_OFFSET) , 1 /*custom*/
  205. , 1 , 0 /**/ , CMD_NAME("INFO_DEBUG") },
  206. { (CMD_01_GET_SYS_CFG << OPCODE_OFFSET) , 1 /**/
  207. , 1 , 2 /**/ , CMD_NAME("GET_SYS_CFG") },
  208. { (CMD_02_SET_GRANULARITY << OPCODE_OFFSET) , 1 /**/
  209. , 1 , 0 /**/ , CMD_NAME("SET_GRANULARITY") },
  210. { (CMD_03_SET_TIMER_IRQ << OPCODE_OFFSET) , 1 /**/
  211. , 1 , 0 /**/ , CMD_NAME("SET_TIMER_IRQ") },
  212. { (CMD_04_GET_EVENT << OPCODE_OFFSET) , 1 /**/
  213. , 1 , 0 /*up to 10*/ , CMD_NAME("GET_EVENT") },
  214. { (CMD_05_GET_PIPES << OPCODE_OFFSET) , 1 /**/
  215. , 1 , 2 /*up to 4*/ , CMD_NAME("GET_PIPES") },
  216. { (CMD_06_ALLOCATE_PIPE << OPCODE_OFFSET) , 1 /**/
  217. , 0 , 0 /**/ , CMD_NAME("ALLOCATE_PIPE") },
  218. { (CMD_07_RELEASE_PIPE << OPCODE_OFFSET) , 1 /**/
  219. , 0 , 0 /**/ , CMD_NAME("RELEASE_PIPE") },
  220. { (CMD_08_ASK_BUFFERS << OPCODE_OFFSET) , 1 /**/
  221. , 1 , MAX_STREAM_BUFFER , CMD_NAME("ASK_BUFFERS") },
  222. { (CMD_09_STOP_PIPE << OPCODE_OFFSET) , 1 /**/
  223. , 0 , 0 /*up to 2*/ , CMD_NAME("STOP_PIPE") },
  224. { (CMD_0A_GET_PIPE_SPL_COUNT << OPCODE_OFFSET) , 1 /**/
  225. , 1 , 1 /*up to 2*/ , CMD_NAME("GET_PIPE_SPL_COUNT") },
  226. { (CMD_0B_TOGGLE_PIPE_STATE << OPCODE_OFFSET) , 1 /*up to 5*/
  227. , 1 , 0 /**/ , CMD_NAME("TOGGLE_PIPE_STATE") },
  228. { (CMD_0C_DEF_STREAM << OPCODE_OFFSET) , 1 /*up to 4*/
  229. , 1 , 0 /**/ , CMD_NAME("DEF_STREAM") },
  230. { (CMD_0D_SET_MUTE << OPCODE_OFFSET) , 3 /**/
  231. , 1 , 0 /**/ , CMD_NAME("SET_MUTE") },
  232. { (CMD_0E_GET_STREAM_SPL_COUNT << OPCODE_OFFSET) , 1/**/
  233. , 1 , 2 /**/ , CMD_NAME("GET_STREAM_SPL_COUNT") },
  234. { (CMD_0F_UPDATE_BUFFER << OPCODE_OFFSET) , 3 /*up to 4*/
  235. , 0 , 1 /**/ , CMD_NAME("UPDATE_BUFFER") },
  236. { (CMD_10_GET_BUFFER << OPCODE_OFFSET) , 1 /**/
  237. , 1 , 4 /**/ , CMD_NAME("GET_BUFFER") },
  238. { (CMD_11_CANCEL_BUFFER << OPCODE_OFFSET) , 1 /**/
  239. , 1 , 1 /*up to 4*/ , CMD_NAME("CANCEL_BUFFER") },
  240. { (CMD_12_GET_PEAK << OPCODE_OFFSET) , 1 /**/
  241. , 1 , 1 /**/ , CMD_NAME("GET_PEAK") },
  242. { (CMD_13_SET_STREAM_STATE << OPCODE_OFFSET) , 1 /**/
  243. , 1 , 0 /**/ , CMD_NAME("SET_STREAM_STATE") },
  244. };
  245. static void lx_message_init(struct lx_rmh *rmh, enum cmd_mb_opcodes cmd)
  246. {
  247. snd_BUG_ON(cmd >= CMD_14_INVALID);
  248. rmh->cmd[0] = dsp_commands[cmd].dcCodeOp;
  249. rmh->cmd_len = dsp_commands[cmd].dcCmdLength;
  250. rmh->stat_len = dsp_commands[cmd].dcStatusLength;
  251. rmh->dsp_stat = dsp_commands[cmd].dcStatusType;
  252. rmh->cmd_idx = cmd;
  253. memset(&rmh->cmd[1], 0, (REG_CRM_NUMBER - 1) * sizeof(u32));
  254. #ifdef CONFIG_SND_DEBUG
  255. memset(rmh->stat, 0, REG_CRM_NUMBER * sizeof(u32));
  256. #endif
  257. #ifdef RMH_DEBUG
  258. rmh->cmd_idx = cmd;
  259. #endif
  260. }
  261. #ifdef RMH_DEBUG
  262. #define LXRMH "lx6464es rmh: "
  263. static void lx_message_dump(struct lx_rmh *rmh)
  264. {
  265. u8 idx = rmh->cmd_idx;
  266. int i;
  267. snd_printk(LXRMH "command %s\n", dsp_commands[idx].dcOpName);
  268. for (i = 0; i != rmh->cmd_len; ++i)
  269. snd_printk(LXRMH "\tcmd[%d] %08x\n", i, rmh->cmd[i]);
  270. for (i = 0; i != rmh->stat_len; ++i)
  271. snd_printk(LXRMH "\tstat[%d]: %08x\n", i, rmh->stat[i]);
  272. snd_printk("\n");
  273. }
  274. #else
  275. static inline void lx_message_dump(struct lx_rmh *rmh)
  276. {}
  277. #endif
  278. /* sleep 500 - 100 = 400 times 100us -> the timeout is >= 40 ms */
  279. #define XILINX_TIMEOUT_MS 40
  280. #define XILINX_POLL_NO_SLEEP 100
  281. #define XILINX_POLL_ITERATIONS 150
  282. static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh)
  283. {
  284. u32 reg = ED_DSP_TIMED_OUT;
  285. int dwloop;
  286. if (lx_dsp_reg_read(chip, eReg_CSM) & (Reg_CSM_MC | Reg_CSM_MR)) {
  287. snd_printk(KERN_ERR LXP "PIOSendMessage eReg_CSM %x\n", reg);
  288. return -EBUSY;
  289. }
  290. /* write command */
  291. lx_dsp_reg_writebuf(chip, eReg_CRM1, rmh->cmd, rmh->cmd_len);
  292. /* MicoBlaze gogogo */
  293. lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC);
  294. /* wait for device to answer */
  295. for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS * 1000; ++dwloop) {
  296. if (lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR) {
  297. if (rmh->dsp_stat == 0)
  298. reg = lx_dsp_reg_read(chip, eReg_CRM1);
  299. else
  300. reg = 0;
  301. goto polling_successful;
  302. } else
  303. udelay(1);
  304. }
  305. snd_printk(KERN_WARNING LXP "TIMEOUT lx_message_send_atomic! "
  306. "polling failed\n");
  307. polling_successful:
  308. if ((reg & ERROR_VALUE) == 0) {
  309. /* read response */
  310. if (rmh->stat_len) {
  311. snd_BUG_ON(rmh->stat_len >= (REG_CRM_NUMBER-1));
  312. lx_dsp_reg_readbuf(chip, eReg_CRM2, rmh->stat,
  313. rmh->stat_len);
  314. }
  315. } else
  316. snd_printk(LXP "rmh error: %08x\n", reg);
  317. /* clear Reg_CSM_MR */
  318. lx_dsp_reg_write(chip, eReg_CSM, 0);
  319. switch (reg) {
  320. case ED_DSP_TIMED_OUT:
  321. snd_printk(KERN_WARNING LXP "lx_message_send: dsp timeout\n");
  322. return -ETIMEDOUT;
  323. case ED_DSP_CRASHED:
  324. snd_printk(KERN_WARNING LXP "lx_message_send: dsp crashed\n");
  325. return -EAGAIN;
  326. }
  327. lx_message_dump(rmh);
  328. return reg;
  329. }
  330. /* low-level dsp access */
  331. int __devinit lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
  332. {
  333. u16 ret;
  334. unsigned long flags;
  335. spin_lock_irqsave(&chip->msg_lock, flags);
  336. lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
  337. ret = lx_message_send_atomic(chip, &chip->rmh);
  338. *rdsp_version = chip->rmh.stat[1];
  339. spin_unlock_irqrestore(&chip->msg_lock, flags);
  340. return ret;
  341. }
  342. int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq)
  343. {
  344. u16 ret = 0;
  345. unsigned long flags;
  346. u32 freq_raw = 0;
  347. u32 freq = 0;
  348. u32 frequency = 0;
  349. spin_lock_irqsave(&chip->msg_lock, flags);
  350. lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
  351. ret = lx_message_send_atomic(chip, &chip->rmh);
  352. if (ret == 0) {
  353. freq_raw = chip->rmh.stat[0] >> FREQ_FIELD_OFFSET;
  354. freq = freq_raw & XES_FREQ_COUNT8_MASK;
  355. if ((freq < XES_FREQ_COUNT8_48_MAX) ||
  356. (freq > XES_FREQ_COUNT8_44_MIN))
  357. frequency = 0; /* unknown */
  358. else if (freq >= XES_FREQ_COUNT8_44_MAX)
  359. frequency = 44100;
  360. else
  361. frequency = 48000;
  362. }
  363. spin_unlock_irqrestore(&chip->msg_lock, flags);
  364. *rfreq = frequency * chip->freq_ratio;
  365. return ret;
  366. }
  367. int lx_dsp_get_mac(struct lx6464es *chip)
  368. {
  369. u32 macmsb, maclsb;
  370. macmsb = lx_dsp_reg_read(chip, eReg_ADMACESMSB) & 0x00FFFFFF;
  371. maclsb = lx_dsp_reg_read(chip, eReg_ADMACESLSB) & 0x00FFFFFF;
  372. /* todo: endianess handling */
  373. chip->mac_address[5] = ((u8 *)(&maclsb))[0];
  374. chip->mac_address[4] = ((u8 *)(&maclsb))[1];
  375. chip->mac_address[3] = ((u8 *)(&maclsb))[2];
  376. chip->mac_address[2] = ((u8 *)(&macmsb))[0];
  377. chip->mac_address[1] = ((u8 *)(&macmsb))[1];
  378. chip->mac_address[0] = ((u8 *)(&macmsb))[2];
  379. return 0;
  380. }
  381. int lx_dsp_set_granularity(struct lx6464es *chip, u32 gran)
  382. {
  383. unsigned long flags;
  384. int ret;
  385. spin_lock_irqsave(&chip->msg_lock, flags);
  386. lx_message_init(&chip->rmh, CMD_02_SET_GRANULARITY);
  387. chip->rmh.cmd[0] |= gran;
  388. ret = lx_message_send_atomic(chip, &chip->rmh);
  389. spin_unlock_irqrestore(&chip->msg_lock, flags);
  390. return ret;
  391. }
  392. int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data)
  393. {
  394. unsigned long flags;
  395. int ret;
  396. spin_lock_irqsave(&chip->msg_lock, flags);
  397. lx_message_init(&chip->rmh, CMD_04_GET_EVENT);
  398. chip->rmh.stat_len = 9; /* we don't necessarily need the full length */
  399. ret = lx_message_send_atomic(chip, &chip->rmh);
  400. if (!ret)
  401. memcpy(data, chip->rmh.stat, chip->rmh.stat_len * sizeof(u32));
  402. spin_unlock_irqrestore(&chip->msg_lock, flags);
  403. return ret;
  404. }
  405. #define CSES_TIMEOUT 100 /* microseconds */
  406. #define CSES_CE 0x0001
  407. #define CSES_BROADCAST 0x0002
  408. #define CSES_UPDATE_LDSV 0x0004
  409. int lx_dsp_es_check_pipeline(struct lx6464es *chip)
  410. {
  411. int i;
  412. for (i = 0; i != CSES_TIMEOUT; ++i) {
  413. /*
  414. * le bit CSES_UPDATE_LDSV est à 1 dés que le macprog
  415. * est pret. il re-passe à 0 lorsque le premier read a
  416. * été fait. pour l'instant on retire le test car ce bit
  417. * passe a 1 environ 200 à 400 ms aprés que le registre
  418. * confES à été écrit (kick du xilinx ES).
  419. *
  420. * On ne teste que le bit CE.
  421. * */
  422. u32 cses = lx_dsp_reg_read(chip, eReg_CSES);
  423. if ((cses & CSES_CE) == 0)
  424. return 0;
  425. udelay(1);
  426. }
  427. return -ETIMEDOUT;
  428. }
  429. #define PIPE_INFO_TO_CMD(capture, pipe) \
  430. ((u32)((u32)(pipe) | ((capture) ? ID_IS_CAPTURE : 0L)) << ID_OFFSET)
  431. /* low-level pipe handling */
  432. int lx_pipe_allocate(struct lx6464es *chip, u32 pipe, int is_capture,
  433. int channels)
  434. {
  435. int err;
  436. unsigned long flags;
  437. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  438. spin_lock_irqsave(&chip->msg_lock, flags);
  439. lx_message_init(&chip->rmh, CMD_06_ALLOCATE_PIPE);
  440. chip->rmh.cmd[0] |= pipe_cmd;
  441. chip->rmh.cmd[0] |= channels;
  442. err = lx_message_send_atomic(chip, &chip->rmh);
  443. spin_unlock_irqrestore(&chip->msg_lock, flags);
  444. if (err != 0)
  445. snd_printk(KERN_ERR "lx6464es: could not allocate pipe\n");
  446. return err;
  447. }
  448. int lx_pipe_release(struct lx6464es *chip, u32 pipe, int is_capture)
  449. {
  450. int err;
  451. unsigned long flags;
  452. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  453. spin_lock_irqsave(&chip->msg_lock, flags);
  454. lx_message_init(&chip->rmh, CMD_07_RELEASE_PIPE);
  455. chip->rmh.cmd[0] |= pipe_cmd;
  456. err = lx_message_send_atomic(chip, &chip->rmh);
  457. spin_unlock_irqrestore(&chip->msg_lock, flags);
  458. return err;
  459. }
  460. int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
  461. u32 *r_needed, u32 *r_freed, u32 *size_array)
  462. {
  463. int err;
  464. unsigned long flags;
  465. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  466. #ifdef CONFIG_SND_DEBUG
  467. if (size_array)
  468. memset(size_array, 0, sizeof(u32)*MAX_STREAM_BUFFER);
  469. #endif
  470. *r_needed = 0;
  471. *r_freed = 0;
  472. spin_lock_irqsave(&chip->msg_lock, flags);
  473. lx_message_init(&chip->rmh, CMD_08_ASK_BUFFERS);
  474. chip->rmh.cmd[0] |= pipe_cmd;
  475. err = lx_message_send_atomic(chip, &chip->rmh);
  476. if (!err) {
  477. int i;
  478. for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
  479. u32 stat = chip->rmh.stat[i];
  480. if (stat & (BF_EOB << BUFF_FLAGS_OFFSET)) {
  481. /* finished */
  482. *r_freed += 1;
  483. if (size_array)
  484. size_array[i] = stat & MASK_DATA_SIZE;
  485. } else if ((stat & (BF_VALID << BUFF_FLAGS_OFFSET))
  486. == 0)
  487. /* free */
  488. *r_needed += 1;
  489. }
  490. #if 0
  491. snd_printdd(LXP "CMD_08_ASK_BUFFERS: needed %d, freed %d\n",
  492. *r_needed, *r_freed);
  493. for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
  494. for (i = 0; i != chip->rmh.stat_len; ++i)
  495. snd_printdd(" stat[%d]: %x, %x\n", i,
  496. chip->rmh.stat[i],
  497. chip->rmh.stat[i] & MASK_DATA_SIZE);
  498. }
  499. #endif
  500. }
  501. spin_unlock_irqrestore(&chip->msg_lock, flags);
  502. return err;
  503. }
  504. int lx_pipe_stop(struct lx6464es *chip, u32 pipe, int is_capture)
  505. {
  506. int err;
  507. unsigned long flags;
  508. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  509. spin_lock_irqsave(&chip->msg_lock, flags);
  510. lx_message_init(&chip->rmh, CMD_09_STOP_PIPE);
  511. chip->rmh.cmd[0] |= pipe_cmd;
  512. err = lx_message_send_atomic(chip, &chip->rmh);
  513. spin_unlock_irqrestore(&chip->msg_lock, flags);
  514. return err;
  515. }
  516. static int lx_pipe_toggle_state(struct lx6464es *chip, u32 pipe, int is_capture)
  517. {
  518. int err;
  519. unsigned long flags;
  520. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  521. spin_lock_irqsave(&chip->msg_lock, flags);
  522. lx_message_init(&chip->rmh, CMD_0B_TOGGLE_PIPE_STATE);
  523. chip->rmh.cmd[0] |= pipe_cmd;
  524. err = lx_message_send_atomic(chip, &chip->rmh);
  525. spin_unlock_irqrestore(&chip->msg_lock, flags);
  526. return err;
  527. }
  528. int lx_pipe_start(struct lx6464es *chip, u32 pipe, int is_capture)
  529. {
  530. int err;
  531. err = lx_pipe_wait_for_idle(chip, pipe, is_capture);
  532. if (err < 0)
  533. return err;
  534. err = lx_pipe_toggle_state(chip, pipe, is_capture);
  535. return err;
  536. }
  537. int lx_pipe_pause(struct lx6464es *chip, u32 pipe, int is_capture)
  538. {
  539. int err = 0;
  540. err = lx_pipe_wait_for_start(chip, pipe, is_capture);
  541. if (err < 0)
  542. return err;
  543. err = lx_pipe_toggle_state(chip, pipe, is_capture);
  544. return err;
  545. }
  546. int lx_pipe_sample_count(struct lx6464es *chip, u32 pipe, int is_capture,
  547. u64 *rsample_count)
  548. {
  549. int err;
  550. unsigned long flags;
  551. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  552. spin_lock_irqsave(&chip->msg_lock, flags);
  553. lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
  554. chip->rmh.cmd[0] |= pipe_cmd;
  555. chip->rmh.stat_len = 2; /* need all words here! */
  556. err = lx_message_send_atomic(chip, &chip->rmh); /* don't sleep! */
  557. if (err != 0)
  558. snd_printk(KERN_ERR
  559. "lx6464es: could not query pipe's sample count\n");
  560. else {
  561. *rsample_count = ((u64)(chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
  562. << 24) /* hi part */
  563. + chip->rmh.stat[1]; /* lo part */
  564. }
  565. spin_unlock_irqrestore(&chip->msg_lock, flags);
  566. return err;
  567. }
  568. int lx_pipe_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 *rstate)
  569. {
  570. int err;
  571. unsigned long flags;
  572. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  573. spin_lock_irqsave(&chip->msg_lock, flags);
  574. lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
  575. chip->rmh.cmd[0] |= pipe_cmd;
  576. err = lx_message_send_atomic(chip, &chip->rmh);
  577. if (err != 0)
  578. snd_printk(KERN_ERR "lx6464es: could not query pipe's state\n");
  579. else
  580. *rstate = (chip->rmh.stat[0] >> PSTATE_OFFSET) & 0x0F;
  581. spin_unlock_irqrestore(&chip->msg_lock, flags);
  582. return err;
  583. }
  584. static int lx_pipe_wait_for_state(struct lx6464es *chip, u32 pipe,
  585. int is_capture, u16 state)
  586. {
  587. int i;
  588. /* max 2*PCMOnlyGranularity = 2*1024 at 44100 = < 50 ms:
  589. * timeout 50 ms */
  590. for (i = 0; i != 50; ++i) {
  591. u16 current_state;
  592. int err = lx_pipe_state(chip, pipe, is_capture, &current_state);
  593. if (err < 0)
  594. return err;
  595. if (current_state == state)
  596. return 0;
  597. mdelay(1);
  598. }
  599. return -ETIMEDOUT;
  600. }
  601. int lx_pipe_wait_for_start(struct lx6464es *chip, u32 pipe, int is_capture)
  602. {
  603. return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_RUN);
  604. }
  605. int lx_pipe_wait_for_idle(struct lx6464es *chip, u32 pipe, int is_capture)
  606. {
  607. return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_IDLE);
  608. }
  609. /* low-level stream handling */
  610. int lx_stream_set_state(struct lx6464es *chip, u32 pipe,
  611. int is_capture, enum stream_state_t state)
  612. {
  613. int err;
  614. unsigned long flags;
  615. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  616. spin_lock_irqsave(&chip->msg_lock, flags);
  617. lx_message_init(&chip->rmh, CMD_13_SET_STREAM_STATE);
  618. chip->rmh.cmd[0] |= pipe_cmd;
  619. chip->rmh.cmd[0] |= state;
  620. err = lx_message_send_atomic(chip, &chip->rmh);
  621. spin_unlock_irqrestore(&chip->msg_lock, flags);
  622. return err;
  623. }
  624. int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime,
  625. u32 pipe, int is_capture)
  626. {
  627. int err;
  628. unsigned long flags;
  629. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  630. u32 channels = runtime->channels;
  631. if (runtime->channels != channels)
  632. snd_printk(KERN_ERR LXP "channel count mismatch: %d vs %d",
  633. runtime->channels, channels);
  634. spin_lock_irqsave(&chip->msg_lock, flags);
  635. lx_message_init(&chip->rmh, CMD_0C_DEF_STREAM);
  636. chip->rmh.cmd[0] |= pipe_cmd;
  637. if (runtime->sample_bits == 16)
  638. /* 16 bit format */
  639. chip->rmh.cmd[0] |= (STREAM_FMT_16b << STREAM_FMT_OFFSET);
  640. if (snd_pcm_format_little_endian(runtime->format))
  641. /* little endian/intel format */
  642. chip->rmh.cmd[0] |= (STREAM_FMT_intel << STREAM_FMT_OFFSET);
  643. chip->rmh.cmd[0] |= channels-1;
  644. err = lx_message_send_atomic(chip, &chip->rmh);
  645. spin_unlock_irqrestore(&chip->msg_lock, flags);
  646. return err;
  647. }
  648. int lx_stream_state(struct lx6464es *chip, u32 pipe, int is_capture,
  649. int *rstate)
  650. {
  651. int err;
  652. unsigned long flags;
  653. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  654. spin_lock_irqsave(&chip->msg_lock, flags);
  655. lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
  656. chip->rmh.cmd[0] |= pipe_cmd;
  657. err = lx_message_send_atomic(chip, &chip->rmh);
  658. *rstate = (chip->rmh.stat[0] & SF_START) ? START_STATE : PAUSE_STATE;
  659. spin_unlock_irqrestore(&chip->msg_lock, flags);
  660. return err;
  661. }
  662. int lx_stream_sample_position(struct lx6464es *chip, u32 pipe, int is_capture,
  663. u64 *r_bytepos)
  664. {
  665. int err;
  666. unsigned long flags;
  667. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  668. spin_lock_irqsave(&chip->msg_lock, flags);
  669. lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
  670. chip->rmh.cmd[0] |= pipe_cmd;
  671. err = lx_message_send_atomic(chip, &chip->rmh);
  672. *r_bytepos = ((u64) (chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
  673. << 32) /* hi part */
  674. + chip->rmh.stat[1]; /* lo part */
  675. spin_unlock_irqrestore(&chip->msg_lock, flags);
  676. return err;
  677. }
  678. /* low-level buffer handling */
  679. int lx_buffer_give(struct lx6464es *chip, u32 pipe, int is_capture,
  680. u32 buffer_size, u32 buf_address_lo, u32 buf_address_hi,
  681. u32 *r_buffer_index)
  682. {
  683. int err;
  684. unsigned long flags;
  685. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  686. spin_lock_irqsave(&chip->msg_lock, flags);
  687. lx_message_init(&chip->rmh, CMD_0F_UPDATE_BUFFER);
  688. chip->rmh.cmd[0] |= pipe_cmd;
  689. chip->rmh.cmd[0] |= BF_NOTIFY_EOB; /* request interrupt notification */
  690. /* todo: pause request, circular buffer */
  691. chip->rmh.cmd[1] = buffer_size & MASK_DATA_SIZE;
  692. chip->rmh.cmd[2] = buf_address_lo;
  693. if (buf_address_hi) {
  694. chip->rmh.cmd_len = 4;
  695. chip->rmh.cmd[3] = buf_address_hi;
  696. chip->rmh.cmd[0] |= BF_64BITS_ADR;
  697. }
  698. err = lx_message_send_atomic(chip, &chip->rmh);
  699. if (err == 0) {
  700. *r_buffer_index = chip->rmh.stat[0];
  701. goto done;
  702. }
  703. if (err == EB_RBUFFERS_TABLE_OVERFLOW)
  704. snd_printk(LXP "lx_buffer_give EB_RBUFFERS_TABLE_OVERFLOW\n");
  705. if (err == EB_INVALID_STREAM)
  706. snd_printk(LXP "lx_buffer_give EB_INVALID_STREAM\n");
  707. if (err == EB_CMD_REFUSED)
  708. snd_printk(LXP "lx_buffer_give EB_CMD_REFUSED\n");
  709. done:
  710. spin_unlock_irqrestore(&chip->msg_lock, flags);
  711. return err;
  712. }
  713. int lx_buffer_free(struct lx6464es *chip, u32 pipe, int is_capture,
  714. u32 *r_buffer_size)
  715. {
  716. int err;
  717. unsigned long flags;
  718. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  719. spin_lock_irqsave(&chip->msg_lock, flags);
  720. lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
  721. chip->rmh.cmd[0] |= pipe_cmd;
  722. chip->rmh.cmd[0] |= MASK_BUFFER_ID; /* ask for the current buffer: the
  723. * microblaze will seek for it */
  724. err = lx_message_send_atomic(chip, &chip->rmh);
  725. if (err == 0)
  726. *r_buffer_size = chip->rmh.stat[0] & MASK_DATA_SIZE;
  727. spin_unlock_irqrestore(&chip->msg_lock, flags);
  728. return err;
  729. }
  730. int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture,
  731. u32 buffer_index)
  732. {
  733. int err;
  734. unsigned long flags;
  735. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  736. spin_lock_irqsave(&chip->msg_lock, flags);
  737. lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
  738. chip->rmh.cmd[0] |= pipe_cmd;
  739. chip->rmh.cmd[0] |= buffer_index;
  740. err = lx_message_send_atomic(chip, &chip->rmh);
  741. spin_unlock_irqrestore(&chip->msg_lock, flags);
  742. return err;
  743. }
  744. /* low-level gain/peak handling
  745. *
  746. * \todo: can we unmute capture/playback channels independently?
  747. *
  748. * */
  749. int lx_level_unmute(struct lx6464es *chip, int is_capture, int unmute)
  750. {
  751. int err;
  752. unsigned long flags;
  753. /* bit set to 1: channel muted */
  754. u64 mute_mask = unmute ? 0 : 0xFFFFFFFFFFFFFFFFLLU;
  755. spin_lock_irqsave(&chip->msg_lock, flags);
  756. lx_message_init(&chip->rmh, CMD_0D_SET_MUTE);
  757. chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, 0);
  758. chip->rmh.cmd[1] = (u32)(mute_mask >> (u64)32); /* hi part */
  759. chip->rmh.cmd[2] = (u32)(mute_mask & (u64)0xFFFFFFFF); /* lo part */
  760. snd_printk("mute %x %x %x\n", chip->rmh.cmd[0], chip->rmh.cmd[1],
  761. chip->rmh.cmd[2]);
  762. err = lx_message_send_atomic(chip, &chip->rmh);
  763. spin_unlock_irqrestore(&chip->msg_lock, flags);
  764. return err;
  765. }
  766. static u32 peak_map[] = {
  767. 0x00000109, /* -90.308dB */
  768. 0x0000083B, /* -72.247dB */
  769. 0x000020C4, /* -60.205dB */
  770. 0x00008273, /* -48.030dB */
  771. 0x00020756, /* -36.005dB */
  772. 0x00040C37, /* -30.001dB */
  773. 0x00081385, /* -24.002dB */
  774. 0x00101D3F, /* -18.000dB */
  775. 0x0016C310, /* -15.000dB */
  776. 0x002026F2, /* -12.001dB */
  777. 0x002D6A86, /* -9.000dB */
  778. 0x004026E6, /* -6.004dB */
  779. 0x005A9DF6, /* -3.000dB */
  780. 0x0065AC8B, /* -2.000dB */
  781. 0x00721481, /* -1.000dB */
  782. 0x007FFFFF, /* FS */
  783. };
  784. int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels,
  785. u32 *r_levels)
  786. {
  787. int err = 0;
  788. unsigned long flags;
  789. int i;
  790. spin_lock_irqsave(&chip->msg_lock, flags);
  791. for (i = 0; i < channels; i += 4) {
  792. u32 s0, s1, s2, s3;
  793. lx_message_init(&chip->rmh, CMD_12_GET_PEAK);
  794. chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, i);
  795. err = lx_message_send_atomic(chip, &chip->rmh);
  796. if (err == 0) {
  797. s0 = peak_map[chip->rmh.stat[0] & 0x0F];
  798. s1 = peak_map[(chip->rmh.stat[0] >> 4) & 0xf];
  799. s2 = peak_map[(chip->rmh.stat[0] >> 8) & 0xf];
  800. s3 = peak_map[(chip->rmh.stat[0] >> 12) & 0xf];
  801. } else
  802. s0 = s1 = s2 = s3 = 0;
  803. r_levels[0] = s0;
  804. r_levels[1] = s1;
  805. r_levels[2] = s2;
  806. r_levels[3] = s3;
  807. r_levels += 4;
  808. }
  809. spin_unlock_irqrestore(&chip->msg_lock, flags);
  810. return err;
  811. }
  812. /* interrupt handling */
  813. #define PCX_IRQ_NONE 0
  814. #define IRQCS_ACTIVE_PCIDB 0x00002000L /* Bit nø 13 */
  815. #define IRQCS_ENABLE_PCIIRQ 0x00000100L /* Bit nø 08 */
  816. #define IRQCS_ENABLE_PCIDB 0x00000200L /* Bit nø 09 */
  817. static u32 lx_interrupt_test_ack(struct lx6464es *chip)
  818. {
  819. u32 irqcs = lx_plx_reg_read(chip, ePLX_IRQCS);
  820. /* Test if PCI Doorbell interrupt is active */
  821. if (irqcs & IRQCS_ACTIVE_PCIDB) {
  822. u32 temp;
  823. irqcs = PCX_IRQ_NONE;
  824. while ((temp = lx_plx_reg_read(chip, ePLX_L2PCIDB))) {
  825. /* RAZ interrupt */
  826. irqcs |= temp;
  827. lx_plx_reg_write(chip, ePLX_L2PCIDB, temp);
  828. }
  829. return irqcs;
  830. }
  831. return PCX_IRQ_NONE;
  832. }
  833. static int lx_interrupt_ack(struct lx6464es *chip, u32 *r_irqsrc,
  834. int *r_async_pending, int *r_async_escmd)
  835. {
  836. u32 irq_async;
  837. u32 irqsrc = lx_interrupt_test_ack(chip);
  838. if (irqsrc == PCX_IRQ_NONE)
  839. return 0;
  840. *r_irqsrc = irqsrc;
  841. irq_async = irqsrc & MASK_SYS_ASYNC_EVENTS; /* + EtherSound response
  842. * (set by xilinx) + EOB */
  843. if (irq_async & MASK_SYS_STATUS_ESA) {
  844. irq_async &= ~MASK_SYS_STATUS_ESA;
  845. *r_async_escmd = 1;
  846. }
  847. if (irq_async) {
  848. /* snd_printd("interrupt: async event pending\n"); */
  849. *r_async_pending = 1;
  850. }
  851. return 1;
  852. }
  853. static int lx_interrupt_handle_async_events(struct lx6464es *chip, u32 irqsrc,
  854. int *r_freq_changed,
  855. u64 *r_notified_in_pipe_mask,
  856. u64 *r_notified_out_pipe_mask)
  857. {
  858. int err;
  859. u32 stat[9]; /* answer from CMD_04_GET_EVENT */
  860. /* On peut optimiser pour ne pas lire les evenements vides
  861. * les mots de réponse sont dans l'ordre suivant :
  862. * Stat[0] mot de status général
  863. * Stat[1] fin de buffer OUT pF
  864. * Stat[2] fin de buffer OUT pf
  865. * Stat[3] fin de buffer IN pF
  866. * Stat[4] fin de buffer IN pf
  867. * Stat[5] underrun poid fort
  868. * Stat[6] underrun poid faible
  869. * Stat[7] overrun poid fort
  870. * Stat[8] overrun poid faible
  871. * */
  872. u64 orun_mask;
  873. u64 urun_mask;
  874. #if 0
  875. int has_underrun = (irqsrc & MASK_SYS_STATUS_URUN) ? 1 : 0;
  876. int has_overrun = (irqsrc & MASK_SYS_STATUS_ORUN) ? 1 : 0;
  877. #endif
  878. int eb_pending_out = (irqsrc & MASK_SYS_STATUS_EOBO) ? 1 : 0;
  879. int eb_pending_in = (irqsrc & MASK_SYS_STATUS_EOBI) ? 1 : 0;
  880. *r_freq_changed = (irqsrc & MASK_SYS_STATUS_FREQ) ? 1 : 0;
  881. err = lx_dsp_read_async_events(chip, stat);
  882. if (err < 0)
  883. return err;
  884. if (eb_pending_in) {
  885. *r_notified_in_pipe_mask = ((u64)stat[3] << 32)
  886. + stat[4];
  887. snd_printdd(LXP "interrupt: EOBI pending %llx\n",
  888. *r_notified_in_pipe_mask);
  889. }
  890. if (eb_pending_out) {
  891. *r_notified_out_pipe_mask = ((u64)stat[1] << 32)
  892. + stat[2];
  893. snd_printdd(LXP "interrupt: EOBO pending %llx\n",
  894. *r_notified_out_pipe_mask);
  895. }
  896. orun_mask = ((u64)stat[7] << 32) + stat[8];
  897. urun_mask = ((u64)stat[5] << 32) + stat[6];
  898. /* todo: handle xrun notification */
  899. return err;
  900. }
  901. static int lx_interrupt_request_new_buffer(struct lx6464es *chip,
  902. struct lx_stream *lx_stream)
  903. {
  904. struct snd_pcm_substream *substream = lx_stream->stream;
  905. const unsigned int is_capture = lx_stream->is_capture;
  906. int err;
  907. unsigned long flags;
  908. const u32 channels = substream->runtime->channels;
  909. const u32 bytes_per_frame = channels * 3;
  910. const u32 period_size = substream->runtime->period_size;
  911. const u32 period_bytes = period_size * bytes_per_frame;
  912. const u32 pos = lx_stream->frame_pos;
  913. const u32 next_pos = ((pos+1) == substream->runtime->periods) ?
  914. 0 : pos + 1;
  915. dma_addr_t buf = substream->dma_buffer.addr + pos * period_bytes;
  916. u32 buf_hi = 0;
  917. u32 buf_lo = 0;
  918. u32 buffer_index = 0;
  919. u32 needed, freed;
  920. u32 size_array[MAX_STREAM_BUFFER];
  921. snd_printdd("->lx_interrupt_request_new_buffer\n");
  922. spin_lock_irqsave(&chip->lock, flags);
  923. err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array);
  924. snd_printdd(LXP "interrupt: needed %d, freed %d\n", needed, freed);
  925. unpack_pointer(buf, &buf_lo, &buf_hi);
  926. err = lx_buffer_give(chip, 0, is_capture, period_bytes, buf_lo, buf_hi,
  927. &buffer_index);
  928. snd_printdd(LXP "interrupt: gave buffer index %x on %p (%d bytes)\n",
  929. buffer_index, (void *)buf, period_bytes);
  930. lx_stream->frame_pos = next_pos;
  931. spin_unlock_irqrestore(&chip->lock, flags);
  932. return err;
  933. }
  934. void lx_tasklet_playback(unsigned long data)
  935. {
  936. struct lx6464es *chip = (struct lx6464es *)data;
  937. struct lx_stream *lx_stream = &chip->playback_stream;
  938. int err;
  939. snd_printdd("->lx_tasklet_playback\n");
  940. err = lx_interrupt_request_new_buffer(chip, lx_stream);
  941. if (err < 0)
  942. snd_printk(KERN_ERR LXP
  943. "cannot request new buffer for playback\n");
  944. snd_pcm_period_elapsed(lx_stream->stream);
  945. }
  946. void lx_tasklet_capture(unsigned long data)
  947. {
  948. struct lx6464es *chip = (struct lx6464es *)data;
  949. struct lx_stream *lx_stream = &chip->capture_stream;
  950. int err;
  951. snd_printdd("->lx_tasklet_capture\n");
  952. err = lx_interrupt_request_new_buffer(chip, lx_stream);
  953. if (err < 0)
  954. snd_printk(KERN_ERR LXP
  955. "cannot request new buffer for capture\n");
  956. snd_pcm_period_elapsed(lx_stream->stream);
  957. }
  958. static int lx_interrupt_handle_audio_transfer(struct lx6464es *chip,
  959. u64 notified_in_pipe_mask,
  960. u64 notified_out_pipe_mask)
  961. {
  962. int err = 0;
  963. if (notified_in_pipe_mask) {
  964. snd_printdd(LXP "requesting audio transfer for capture\n");
  965. tasklet_hi_schedule(&chip->tasklet_capture);
  966. }
  967. if (notified_out_pipe_mask) {
  968. snd_printdd(LXP "requesting audio transfer for playback\n");
  969. tasklet_hi_schedule(&chip->tasklet_playback);
  970. }
  971. return err;
  972. }
  973. irqreturn_t lx_interrupt(int irq, void *dev_id)
  974. {
  975. struct lx6464es *chip = dev_id;
  976. int async_pending, async_escmd;
  977. u32 irqsrc;
  978. spin_lock(&chip->lock);
  979. snd_printdd("**************************************************\n");
  980. if (!lx_interrupt_ack(chip, &irqsrc, &async_pending, &async_escmd)) {
  981. spin_unlock(&chip->lock);
  982. snd_printdd("IRQ_NONE\n");
  983. return IRQ_NONE; /* this device did not cause the interrupt */
  984. }
  985. if (irqsrc & MASK_SYS_STATUS_CMD_DONE)
  986. goto exit;
  987. #if 0
  988. if (irqsrc & MASK_SYS_STATUS_EOBI)
  989. snd_printdd(LXP "interrupt: EOBI\n");
  990. if (irqsrc & MASK_SYS_STATUS_EOBO)
  991. snd_printdd(LXP "interrupt: EOBO\n");
  992. if (irqsrc & MASK_SYS_STATUS_URUN)
  993. snd_printdd(LXP "interrupt: URUN\n");
  994. if (irqsrc & MASK_SYS_STATUS_ORUN)
  995. snd_printdd(LXP "interrupt: ORUN\n");
  996. #endif
  997. if (async_pending) {
  998. u64 notified_in_pipe_mask = 0;
  999. u64 notified_out_pipe_mask = 0;
  1000. int freq_changed;
  1001. int err;
  1002. /* handle async events */
  1003. err = lx_interrupt_handle_async_events(chip, irqsrc,
  1004. &freq_changed,
  1005. &notified_in_pipe_mask,
  1006. &notified_out_pipe_mask);
  1007. if (err)
  1008. snd_printk(KERN_ERR LXP
  1009. "error handling async events\n");
  1010. err = lx_interrupt_handle_audio_transfer(chip,
  1011. notified_in_pipe_mask,
  1012. notified_out_pipe_mask
  1013. );
  1014. if (err)
  1015. snd_printk(KERN_ERR LXP
  1016. "error during audio transfer\n");
  1017. }
  1018. if (async_escmd) {
  1019. #if 0
  1020. /* backdoor for ethersound commands
  1021. *
  1022. * for now, we do not need this
  1023. *
  1024. * */
  1025. snd_printdd("lx6464es: interrupt requests escmd handling\n");
  1026. #endif
  1027. }
  1028. exit:
  1029. spin_unlock(&chip->lock);
  1030. return IRQ_HANDLED; /* this device caused the interrupt */
  1031. }
  1032. static void lx_irq_set(struct lx6464es *chip, int enable)
  1033. {
  1034. u32 reg = lx_plx_reg_read(chip, ePLX_IRQCS);
  1035. /* enable/disable interrupts
  1036. *
  1037. * Set the Doorbell and PCI interrupt enable bits
  1038. *
  1039. * */
  1040. if (enable)
  1041. reg |= (IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
  1042. else
  1043. reg &= ~(IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
  1044. lx_plx_reg_write(chip, ePLX_IRQCS, reg);
  1045. }
  1046. void lx_irq_enable(struct lx6464es *chip)
  1047. {
  1048. snd_printdd("->lx_irq_enable\n");
  1049. lx_irq_set(chip, 1);
  1050. }
  1051. void lx_irq_disable(struct lx6464es *chip)
  1052. {
  1053. snd_printdd("->lx_irq_disable\n");
  1054. lx_irq_set(chip, 0);
  1055. }