sync_serial.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557
  1. /*
  2. * Simple synchronous serial port driver for ETRAX FS and Artpec-3.
  3. *
  4. * Copyright (c) 2005 Axis Communications AB
  5. *
  6. * Author: Mikael Starvik
  7. *
  8. */
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include <linux/types.h>
  12. #include <linux/errno.h>
  13. #include <linux/major.h>
  14. #include <linux/sched.h>
  15. #include <linux/mutex.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/poll.h>
  18. #include <linux/init.h>
  19. #include <linux/timer.h>
  20. #include <linux/spinlock.h>
  21. #include <asm/io.h>
  22. #include <dma.h>
  23. #include <pinmux.h>
  24. #include <hwregs/reg_rdwr.h>
  25. #include <hwregs/sser_defs.h>
  26. #include <hwregs/dma_defs.h>
  27. #include <hwregs/dma.h>
  28. #include <hwregs/intr_vect_defs.h>
  29. #include <hwregs/intr_vect.h>
  30. #include <hwregs/reg_map.h>
  31. #include <asm/sync_serial.h>
  32. /* The receiver is a bit tricky because of the continuous stream of data.*/
  33. /* */
  34. /* Three DMA descriptors are linked together. Each DMA descriptor is */
  35. /* responsible for port->bufchunk of a common buffer. */
  36. /* */
  37. /* +---------------------------------------------+ */
  38. /* | +----------+ +----------+ +----------+ | */
  39. /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */
  40. /* +----------+ +----------+ +----------+ */
  41. /* | | | */
  42. /* v v v */
  43. /* +-------------------------------------+ */
  44. /* | BUFFER | */
  45. /* +-------------------------------------+ */
  46. /* |<- data_avail ->| */
  47. /* readp writep */
  48. /* */
  49. /* If the application keeps up the pace readp will be right after writep.*/
  50. /* If the application can't keep the pace we have to throw away data. */
  51. /* The idea is that readp should be ready with the data pointed out by */
  52. /* Descr[i] when the DMA has filled in Descr[i+1]. */
  53. /* Otherwise we will discard */
  54. /* the rest of the data pointed out by Descr1 and set readp to the start */
  55. /* of Descr2 */
  56. #define SYNC_SERIAL_MAJOR 125
  57. /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
  58. /* words can be handled */
  59. #define IN_BUFFER_SIZE 12288
  60. #define IN_DESCR_SIZE 256
  61. #define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
  62. #define OUT_BUFFER_SIZE 1024*8
  63. #define NBR_OUT_DESCR 8
  64. #define DEFAULT_FRAME_RATE 0
  65. #define DEFAULT_WORD_RATE 7
  66. /* NOTE: Enabling some debug will likely cause overrun or underrun,
  67. * especially if manual mode is use.
  68. */
  69. #define DEBUG(x)
  70. #define DEBUGREAD(x)
  71. #define DEBUGWRITE(x)
  72. #define DEBUGPOLL(x)
  73. #define DEBUGRXINT(x)
  74. #define DEBUGTXINT(x)
  75. #define DEBUGTRDMA(x)
  76. #define DEBUGOUTBUF(x)
  77. typedef struct sync_port
  78. {
  79. reg_scope_instances regi_sser;
  80. reg_scope_instances regi_dmain;
  81. reg_scope_instances regi_dmaout;
  82. char started; /* 1 if port has been started */
  83. char port_nbr; /* Port 0 or 1 */
  84. char busy; /* 1 if port is busy */
  85. char enabled; /* 1 if port is enabled */
  86. char use_dma; /* 1 if port uses dma */
  87. char tr_running;
  88. char init_irqs;
  89. int output;
  90. int input;
  91. /* Next byte to be read by application */
  92. volatile unsigned char *volatile readp;
  93. /* Next byte to be written by etrax */
  94. volatile unsigned char *volatile writep;
  95. unsigned int in_buffer_size;
  96. unsigned int inbufchunk;
  97. unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32)));
  98. unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32)));
  99. unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32)));
  100. struct dma_descr_data* next_rx_desc;
  101. struct dma_descr_data* prev_rx_desc;
  102. /* Pointer to the first available descriptor in the ring,
  103. * unless active_tr_descr == catch_tr_descr and a dma
  104. * transfer is active */
  105. struct dma_descr_data *active_tr_descr;
  106. /* Pointer to the first allocated descriptor in the ring */
  107. struct dma_descr_data *catch_tr_descr;
  108. /* Pointer to the descriptor with the current end-of-list */
  109. struct dma_descr_data *prev_tr_descr;
  110. int full;
  111. /* Pointer to the first byte being read by DMA
  112. * or current position in out_buffer if not using DMA. */
  113. unsigned char *out_rd_ptr;
  114. /* Number of bytes currently locked for being read by DMA */
  115. int out_buf_count;
  116. dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16)));
  117. dma_descr_context in_context __attribute__ ((__aligned__(32)));
  118. dma_descr_data out_descr[NBR_OUT_DESCR]
  119. __attribute__ ((__aligned__(16)));
  120. dma_descr_context out_context __attribute__ ((__aligned__(32)));
  121. wait_queue_head_t out_wait_q;
  122. wait_queue_head_t in_wait_q;
  123. spinlock_t lock;
  124. } sync_port;
  125. static DEFINE_MUTEX(sync_serial_mutex);
  126. static int etrax_sync_serial_init(void);
  127. static void initialize_port(int portnbr);
  128. static inline int sync_data_avail(struct sync_port *port);
  129. static int sync_serial_open(struct inode *, struct file*);
  130. static int sync_serial_release(struct inode*, struct file*);
  131. static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
  132. static int sync_serial_ioctl(struct file *,
  133. unsigned int cmd, unsigned long arg);
  134. static ssize_t sync_serial_write(struct file * file, const char * buf,
  135. size_t count, loff_t *ppos);
  136. static ssize_t sync_serial_read(struct file *file, char *buf,
  137. size_t count, loff_t *ppos);
  138. #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
  139. defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
  140. (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
  141. defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
  142. #define SYNC_SER_DMA
  143. #endif
  144. static void send_word(sync_port* port);
  145. static void start_dma_out(struct sync_port *port, const char *data, int count);
  146. static void start_dma_in(sync_port* port);
  147. #ifdef SYNC_SER_DMA
  148. static irqreturn_t tr_interrupt(int irq, void *dev_id);
  149. static irqreturn_t rx_interrupt(int irq, void *dev_id);
  150. #endif
  151. #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
  152. !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
  153. (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
  154. !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
  155. #define SYNC_SER_MANUAL
  156. #endif
  157. #ifdef SYNC_SER_MANUAL
  158. static irqreturn_t manual_interrupt(int irq, void *dev_id);
  159. #endif
  160. #ifdef CONFIG_ETRAXFS /* ETRAX FS */
  161. #define OUT_DMA_NBR 4
  162. #define IN_DMA_NBR 5
  163. #define PINMUX_SSER pinmux_sser0
  164. #define SYNCSER_INST regi_sser0
  165. #define SYNCSER_INTR_VECT SSER0_INTR_VECT
  166. #define OUT_DMA_INST regi_dma4
  167. #define IN_DMA_INST regi_dma5
  168. #define DMA_OUT_INTR_VECT DMA4_INTR_VECT
  169. #define DMA_IN_INTR_VECT DMA5_INTR_VECT
  170. #define REQ_DMA_SYNCSER dma_sser0
  171. #else /* Artpec-3 */
  172. #define OUT_DMA_NBR 6
  173. #define IN_DMA_NBR 7
  174. #define PINMUX_SSER pinmux_sser
  175. #define SYNCSER_INST regi_sser
  176. #define SYNCSER_INTR_VECT SSER_INTR_VECT
  177. #define OUT_DMA_INST regi_dma6
  178. #define IN_DMA_INST regi_dma7
  179. #define DMA_OUT_INTR_VECT DMA6_INTR_VECT
  180. #define DMA_IN_INTR_VECT DMA7_INTR_VECT
  181. #define REQ_DMA_SYNCSER dma_sser
  182. #endif
  183. /* The ports */
  184. static struct sync_port ports[]=
  185. {
  186. {
  187. .regi_sser = SYNCSER_INST,
  188. .regi_dmaout = OUT_DMA_INST,
  189. .regi_dmain = IN_DMA_INST,
  190. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
  191. .use_dma = 1,
  192. #else
  193. .use_dma = 0,
  194. #endif
  195. }
  196. #ifdef CONFIG_ETRAXFS
  197. ,
  198. {
  199. .regi_sser = regi_sser1,
  200. .regi_dmaout = regi_dma6,
  201. .regi_dmain = regi_dma7,
  202. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
  203. .use_dma = 1,
  204. #else
  205. .use_dma = 0,
  206. #endif
  207. }
  208. #endif
  209. };
  210. #define NBR_PORTS ARRAY_SIZE(ports)
  211. static const struct file_operations sync_serial_fops = {
  212. .owner = THIS_MODULE,
  213. .write = sync_serial_write,
  214. .read = sync_serial_read,
  215. .poll = sync_serial_poll,
  216. .unlocked_ioctl = sync_serial_ioctl,
  217. .open = sync_serial_open,
  218. .release = sync_serial_release,
  219. .llseek = noop_llseek,
  220. };
  221. static int __init etrax_sync_serial_init(void)
  222. {
  223. ports[0].enabled = 0;
  224. #ifdef CONFIG_ETRAXFS
  225. ports[1].enabled = 0;
  226. #endif
  227. if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial",
  228. &sync_serial_fops) < 0) {
  229. printk(KERN_WARNING
  230. "Unable to get major for synchronous serial port\n");
  231. return -EBUSY;
  232. }
  233. /* Initialize Ports */
  234. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
  235. if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) {
  236. printk(KERN_WARNING
  237. "Unable to alloc pins for synchronous serial port 0\n");
  238. return -EIO;
  239. }
  240. ports[0].enabled = 1;
  241. initialize_port(0);
  242. #endif
  243. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
  244. if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) {
  245. printk(KERN_WARNING
  246. "Unable to alloc pins for synchronous serial port 0\n");
  247. return -EIO;
  248. }
  249. ports[1].enabled = 1;
  250. initialize_port(1);
  251. #endif
  252. #ifdef CONFIG_ETRAXFS
  253. printk(KERN_INFO "ETRAX FS synchronous serial port driver\n");
  254. #else
  255. printk(KERN_INFO "Artpec-3 synchronous serial port driver\n");
  256. #endif
  257. return 0;
  258. }
  259. static void __init initialize_port(int portnbr)
  260. {
  261. int __attribute__((unused)) i;
  262. struct sync_port *port = &ports[portnbr];
  263. reg_sser_rw_cfg cfg = {0};
  264. reg_sser_rw_frm_cfg frm_cfg = {0};
  265. reg_sser_rw_tr_cfg tr_cfg = {0};
  266. reg_sser_rw_rec_cfg rec_cfg = {0};
  267. DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr));
  268. port->port_nbr = portnbr;
  269. port->init_irqs = 1;
  270. port->out_rd_ptr = port->out_buffer;
  271. port->out_buf_count = 0;
  272. port->output = 1;
  273. port->input = 0;
  274. port->readp = port->flip;
  275. port->writep = port->flip;
  276. port->in_buffer_size = IN_BUFFER_SIZE;
  277. port->inbufchunk = IN_DESCR_SIZE;
  278. port->next_rx_desc = &port->in_descr[0];
  279. port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1];
  280. port->prev_rx_desc->eol = 1;
  281. init_waitqueue_head(&port->out_wait_q);
  282. init_waitqueue_head(&port->in_wait_q);
  283. spin_lock_init(&port->lock);
  284. cfg.out_clk_src = regk_sser_intern_clk;
  285. cfg.out_clk_pol = regk_sser_pos;
  286. cfg.clk_od_mode = regk_sser_no;
  287. cfg.clk_dir = regk_sser_out;
  288. cfg.gate_clk = regk_sser_no;
  289. cfg.base_freq = regk_sser_f29_493;
  290. cfg.clk_div = 256;
  291. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  292. frm_cfg.wordrate = DEFAULT_WORD_RATE;
  293. frm_cfg.type = regk_sser_edge;
  294. frm_cfg.frame_pin_dir = regk_sser_out;
  295. frm_cfg.frame_pin_use = regk_sser_frm;
  296. frm_cfg.status_pin_dir = regk_sser_in;
  297. frm_cfg.status_pin_use = regk_sser_hold;
  298. frm_cfg.out_on = regk_sser_tr;
  299. frm_cfg.tr_delay = 1;
  300. REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
  301. tr_cfg.urun_stop = regk_sser_no;
  302. tr_cfg.sample_size = 7;
  303. tr_cfg.sh_dir = regk_sser_msbfirst;
  304. tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
  305. #if 0
  306. tr_cfg.rate_ctrl = regk_sser_bulk;
  307. tr_cfg.data_pin_use = regk_sser_dout;
  308. #else
  309. tr_cfg.rate_ctrl = regk_sser_iso;
  310. tr_cfg.data_pin_use = regk_sser_dout;
  311. #endif
  312. tr_cfg.bulk_wspace = 1;
  313. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  314. rec_cfg.sample_size = 7;
  315. rec_cfg.sh_dir = regk_sser_msbfirst;
  316. rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
  317. rec_cfg.fifo_thr = regk_sser_inf;
  318. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  319. #ifdef SYNC_SER_DMA
  320. /* Setup the descriptor ring for dma out/transmit. */
  321. for (i = 0; i < NBR_OUT_DESCR; i++) {
  322. port->out_descr[i].wait = 0;
  323. port->out_descr[i].intr = 1;
  324. port->out_descr[i].eol = 0;
  325. port->out_descr[i].out_eop = 0;
  326. port->out_descr[i].next =
  327. (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]);
  328. }
  329. /* Create a ring from the list. */
  330. port->out_descr[NBR_OUT_DESCR-1].next =
  331. (dma_descr_data *)virt_to_phys(&port->out_descr[0]);
  332. /* Setup context for traversing the ring. */
  333. port->active_tr_descr = &port->out_descr[0];
  334. port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1];
  335. port->catch_tr_descr = &port->out_descr[0];
  336. #endif
  337. }
  338. static inline int sync_data_avail(struct sync_port *port)
  339. {
  340. int avail;
  341. unsigned char *start;
  342. unsigned char *end;
  343. start = (unsigned char*)port->readp; /* cast away volatile */
  344. end = (unsigned char*)port->writep; /* cast away volatile */
  345. /* 0123456789 0123456789
  346. * ----- - -----
  347. * ^rp ^wp ^wp ^rp
  348. */
  349. if (end >= start)
  350. avail = end - start;
  351. else
  352. avail = port->in_buffer_size - (start - end);
  353. return avail;
  354. }
  355. static inline int sync_data_avail_to_end(struct sync_port *port)
  356. {
  357. int avail;
  358. unsigned char *start;
  359. unsigned char *end;
  360. start = (unsigned char*)port->readp; /* cast away volatile */
  361. end = (unsigned char*)port->writep; /* cast away volatile */
  362. /* 0123456789 0123456789
  363. * ----- -----
  364. * ^rp ^wp ^wp ^rp
  365. */
  366. if (end >= start)
  367. avail = end - start;
  368. else
  369. avail = port->flip + port->in_buffer_size - start;
  370. return avail;
  371. }
  372. static int sync_serial_open(struct inode *inode, struct file *file)
  373. {
  374. int dev = iminor(inode);
  375. int ret = -EBUSY;
  376. sync_port *port;
  377. reg_dma_rw_cfg cfg = {.en = regk_dma_yes};
  378. reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes};
  379. mutex_lock(&sync_serial_mutex);
  380. DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
  381. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
  382. {
  383. DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev));
  384. ret = -ENODEV;
  385. goto out;
  386. }
  387. port = &ports[dev];
  388. /* Allow open this device twice (assuming one reader and one writer) */
  389. if (port->busy == 2)
  390. {
  391. DEBUG(printk(KERN_DEBUG "Device is busy.. \n"));
  392. goto out;
  393. }
  394. if (port->init_irqs) {
  395. if (port->use_dma) {
  396. if (port == &ports[0]) {
  397. #ifdef SYNC_SER_DMA
  398. if (request_irq(DMA_OUT_INTR_VECT,
  399. tr_interrupt,
  400. 0,
  401. "synchronous serial 0 dma tr",
  402. &ports[0])) {
  403. printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
  404. goto out;
  405. } else if (request_irq(DMA_IN_INTR_VECT,
  406. rx_interrupt,
  407. 0,
  408. "synchronous serial 1 dma rx",
  409. &ports[0])) {
  410. free_irq(DMA_OUT_INTR_VECT, &port[0]);
  411. printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
  412. goto out;
  413. } else if (crisv32_request_dma(OUT_DMA_NBR,
  414. "synchronous serial 0 dma tr",
  415. DMA_VERBOSE_ON_ERROR,
  416. 0,
  417. REQ_DMA_SYNCSER)) {
  418. free_irq(DMA_OUT_INTR_VECT, &port[0]);
  419. free_irq(DMA_IN_INTR_VECT, &port[0]);
  420. printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
  421. goto out;
  422. } else if (crisv32_request_dma(IN_DMA_NBR,
  423. "synchronous serial 0 dma rec",
  424. DMA_VERBOSE_ON_ERROR,
  425. 0,
  426. REQ_DMA_SYNCSER)) {
  427. crisv32_free_dma(OUT_DMA_NBR);
  428. free_irq(DMA_OUT_INTR_VECT, &port[0]);
  429. free_irq(DMA_IN_INTR_VECT, &port[0]);
  430. printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
  431. goto out;
  432. }
  433. #endif
  434. }
  435. #ifdef CONFIG_ETRAXFS
  436. else if (port == &ports[1]) {
  437. #ifdef SYNC_SER_DMA
  438. if (request_irq(DMA6_INTR_VECT,
  439. tr_interrupt,
  440. 0,
  441. "synchronous serial 1 dma tr",
  442. &ports[1])) {
  443. printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
  444. goto out;
  445. } else if (request_irq(DMA7_INTR_VECT,
  446. rx_interrupt,
  447. 0,
  448. "synchronous serial 1 dma rx",
  449. &ports[1])) {
  450. free_irq(DMA6_INTR_VECT, &ports[1]);
  451. printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
  452. goto out;
  453. } else if (crisv32_request_dma(
  454. SYNC_SER1_TX_DMA_NBR,
  455. "synchronous serial 1 dma tr",
  456. DMA_VERBOSE_ON_ERROR,
  457. 0,
  458. dma_sser1)) {
  459. free_irq(DMA6_INTR_VECT, &ports[1]);
  460. free_irq(DMA7_INTR_VECT, &ports[1]);
  461. printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
  462. goto out;
  463. } else if (crisv32_request_dma(
  464. SYNC_SER1_RX_DMA_NBR,
  465. "synchronous serial 3 dma rec",
  466. DMA_VERBOSE_ON_ERROR,
  467. 0,
  468. dma_sser1)) {
  469. crisv32_free_dma(SYNC_SER1_TX_DMA_NBR);
  470. free_irq(DMA6_INTR_VECT, &ports[1]);
  471. free_irq(DMA7_INTR_VECT, &ports[1]);
  472. printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
  473. goto out;
  474. }
  475. #endif
  476. }
  477. #endif
  478. /* Enable DMAs */
  479. REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
  480. REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
  481. /* Enable DMA IRQs */
  482. REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
  483. REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
  484. /* Set up wordsize = 1 for DMAs. */
  485. DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
  486. DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
  487. start_dma_in(port);
  488. port->init_irqs = 0;
  489. } else { /* !port->use_dma */
  490. #ifdef SYNC_SER_MANUAL
  491. if (port == &ports[0]) {
  492. if (request_irq(SYNCSER_INTR_VECT,
  493. manual_interrupt,
  494. 0,
  495. "synchronous serial manual irq",
  496. &ports[0])) {
  497. printk("Can't allocate sync serial manual irq");
  498. goto out;
  499. }
  500. }
  501. #ifdef CONFIG_ETRAXFS
  502. else if (port == &ports[1]) {
  503. if (request_irq(SSER1_INTR_VECT,
  504. manual_interrupt,
  505. 0,
  506. "synchronous serial manual irq",
  507. &ports[1])) {
  508. printk(KERN_CRIT "Can't allocate sync serial manual irq");
  509. goto out;
  510. }
  511. }
  512. #endif
  513. port->init_irqs = 0;
  514. #else
  515. panic("sync_serial: Manual mode not supported.\n");
  516. #endif /* SYNC_SER_MANUAL */
  517. }
  518. } /* port->init_irqs */
  519. port->busy++;
  520. ret = 0;
  521. out:
  522. mutex_unlock(&sync_serial_mutex);
  523. return ret;
  524. }
  525. static int sync_serial_release(struct inode *inode, struct file *file)
  526. {
  527. int dev = iminor(inode);
  528. sync_port *port;
  529. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
  530. {
  531. DEBUG(printk("Invalid minor %d\n", dev));
  532. return -ENODEV;
  533. }
  534. port = &ports[dev];
  535. if (port->busy)
  536. port->busy--;
  537. if (!port->busy)
  538. /* XXX */ ;
  539. return 0;
  540. }
  541. static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
  542. {
  543. int dev = iminor(file->f_path.dentry->d_inode);
  544. unsigned int mask = 0;
  545. sync_port *port;
  546. DEBUGPOLL( static unsigned int prev_mask = 0; );
  547. port = &ports[dev];
  548. if (!port->started) {
  549. reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  550. reg_sser_rw_rec_cfg rec_cfg =
  551. REG_RD(sser, port->regi_sser, rw_rec_cfg);
  552. cfg.en = regk_sser_yes;
  553. rec_cfg.rec_en = port->input;
  554. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  555. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  556. port->started = 1;
  557. }
  558. poll_wait(file, &port->out_wait_q, wait);
  559. poll_wait(file, &port->in_wait_q, wait);
  560. /* No active transfer, descriptors are available */
  561. if (port->output && !port->tr_running)
  562. mask |= POLLOUT | POLLWRNORM;
  563. /* Descriptor and buffer space available. */
  564. if (port->output &&
  565. port->active_tr_descr != port->catch_tr_descr &&
  566. port->out_buf_count < OUT_BUFFER_SIZE)
  567. mask |= POLLOUT | POLLWRNORM;
  568. /* At least an inbufchunk of data */
  569. if (port->input && sync_data_avail(port) >= port->inbufchunk)
  570. mask |= POLLIN | POLLRDNORM;
  571. DEBUGPOLL(if (mask != prev_mask)
  572. printk("sync_serial_poll: mask 0x%08X %s %s\n", mask,
  573. mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":"");
  574. prev_mask = mask;
  575. );
  576. return mask;
  577. }
  578. static int sync_serial_ioctl(struct file *file,
  579. unsigned int cmd, unsigned long arg)
  580. {
  581. int return_val = 0;
  582. int dma_w_size = regk_dma_set_w_size1;
  583. int dev = iminor(file->f_path.dentry->d_inode);
  584. sync_port *port;
  585. reg_sser_rw_tr_cfg tr_cfg;
  586. reg_sser_rw_rec_cfg rec_cfg;
  587. reg_sser_rw_frm_cfg frm_cfg;
  588. reg_sser_rw_cfg gen_cfg;
  589. reg_sser_rw_intr_mask intr_mask;
  590. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
  591. {
  592. DEBUG(printk("Invalid minor %d\n", dev));
  593. return -1;
  594. }
  595. port = &ports[dev];
  596. spin_lock_irq(&port->lock);
  597. tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
  598. rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
  599. frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg);
  600. gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  601. intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
  602. switch(cmd)
  603. {
  604. case SSP_SPEED:
  605. if (GET_SPEED(arg) == CODEC)
  606. {
  607. unsigned int freq;
  608. gen_cfg.base_freq = regk_sser_f32;
  609. /* Clock divider will internally be
  610. * gen_cfg.clk_div + 1.
  611. */
  612. freq = GET_FREQ(arg);
  613. switch (freq) {
  614. case FREQ_32kHz:
  615. case FREQ_64kHz:
  616. case FREQ_128kHz:
  617. case FREQ_256kHz:
  618. gen_cfg.clk_div = 125 *
  619. (1 << (freq - FREQ_256kHz)) - 1;
  620. break;
  621. case FREQ_512kHz:
  622. gen_cfg.clk_div = 62;
  623. break;
  624. case FREQ_1MHz:
  625. case FREQ_2MHz:
  626. case FREQ_4MHz:
  627. gen_cfg.clk_div = 8 * (1 << freq) - 1;
  628. break;
  629. }
  630. } else {
  631. gen_cfg.base_freq = regk_sser_f29_493;
  632. switch (GET_SPEED(arg)) {
  633. case SSP150:
  634. gen_cfg.clk_div = 29493000 / (150 * 8) - 1;
  635. break;
  636. case SSP300:
  637. gen_cfg.clk_div = 29493000 / (300 * 8) - 1;
  638. break;
  639. case SSP600:
  640. gen_cfg.clk_div = 29493000 / (600 * 8) - 1;
  641. break;
  642. case SSP1200:
  643. gen_cfg.clk_div = 29493000 / (1200 * 8) - 1;
  644. break;
  645. case SSP2400:
  646. gen_cfg.clk_div = 29493000 / (2400 * 8) - 1;
  647. break;
  648. case SSP4800:
  649. gen_cfg.clk_div = 29493000 / (4800 * 8) - 1;
  650. break;
  651. case SSP9600:
  652. gen_cfg.clk_div = 29493000 / (9600 * 8) - 1;
  653. break;
  654. case SSP19200:
  655. gen_cfg.clk_div = 29493000 / (19200 * 8) - 1;
  656. break;
  657. case SSP28800:
  658. gen_cfg.clk_div = 29493000 / (28800 * 8) - 1;
  659. break;
  660. case SSP57600:
  661. gen_cfg.clk_div = 29493000 / (57600 * 8) - 1;
  662. break;
  663. case SSP115200:
  664. gen_cfg.clk_div = 29493000 / (115200 * 8) - 1;
  665. break;
  666. case SSP230400:
  667. gen_cfg.clk_div = 29493000 / (230400 * 8) - 1;
  668. break;
  669. case SSP460800:
  670. gen_cfg.clk_div = 29493000 / (460800 * 8) - 1;
  671. break;
  672. case SSP921600:
  673. gen_cfg.clk_div = 29493000 / (921600 * 8) - 1;
  674. break;
  675. case SSP3125000:
  676. gen_cfg.base_freq = regk_sser_f100;
  677. gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1;
  678. break;
  679. }
  680. }
  681. frm_cfg.wordrate = GET_WORD_RATE(arg);
  682. break;
  683. case SSP_MODE:
  684. switch(arg)
  685. {
  686. case MASTER_OUTPUT:
  687. port->output = 1;
  688. port->input = 0;
  689. frm_cfg.out_on = regk_sser_tr;
  690. frm_cfg.frame_pin_dir = regk_sser_out;
  691. gen_cfg.clk_dir = regk_sser_out;
  692. break;
  693. case SLAVE_OUTPUT:
  694. port->output = 1;
  695. port->input = 0;
  696. frm_cfg.frame_pin_dir = regk_sser_in;
  697. gen_cfg.clk_dir = regk_sser_in;
  698. break;
  699. case MASTER_INPUT:
  700. port->output = 0;
  701. port->input = 1;
  702. frm_cfg.frame_pin_dir = regk_sser_out;
  703. frm_cfg.out_on = regk_sser_intern_tb;
  704. gen_cfg.clk_dir = regk_sser_out;
  705. break;
  706. case SLAVE_INPUT:
  707. port->output = 0;
  708. port->input = 1;
  709. frm_cfg.frame_pin_dir = regk_sser_in;
  710. gen_cfg.clk_dir = regk_sser_in;
  711. break;
  712. case MASTER_BIDIR:
  713. port->output = 1;
  714. port->input = 1;
  715. frm_cfg.frame_pin_dir = regk_sser_out;
  716. frm_cfg.out_on = regk_sser_intern_tb;
  717. gen_cfg.clk_dir = regk_sser_out;
  718. break;
  719. case SLAVE_BIDIR:
  720. port->output = 1;
  721. port->input = 1;
  722. frm_cfg.frame_pin_dir = regk_sser_in;
  723. gen_cfg.clk_dir = regk_sser_in;
  724. break;
  725. default:
  726. spin_unlock_irq(&port->lock);
  727. return -EINVAL;
  728. }
  729. if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT))
  730. intr_mask.rdav = regk_sser_yes;
  731. break;
  732. case SSP_FRAME_SYNC:
  733. if (arg & NORMAL_SYNC) {
  734. frm_cfg.rec_delay = 1;
  735. frm_cfg.tr_delay = 1;
  736. }
  737. else if (arg & EARLY_SYNC)
  738. frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
  739. else if (arg & SECOND_WORD_SYNC) {
  740. frm_cfg.rec_delay = 7;
  741. frm_cfg.tr_delay = 1;
  742. }
  743. tr_cfg.bulk_wspace = frm_cfg.tr_delay;
  744. frm_cfg.early_wend = regk_sser_yes;
  745. if (arg & BIT_SYNC)
  746. frm_cfg.type = regk_sser_edge;
  747. else if (arg & WORD_SYNC)
  748. frm_cfg.type = regk_sser_level;
  749. else if (arg & EXTENDED_SYNC)
  750. frm_cfg.early_wend = regk_sser_no;
  751. if (arg & SYNC_ON)
  752. frm_cfg.frame_pin_use = regk_sser_frm;
  753. else if (arg & SYNC_OFF)
  754. frm_cfg.frame_pin_use = regk_sser_gio0;
  755. dma_w_size = regk_dma_set_w_size2;
  756. if (arg & WORD_SIZE_8) {
  757. rec_cfg.sample_size = tr_cfg.sample_size = 7;
  758. dma_w_size = regk_dma_set_w_size1;
  759. } else if (arg & WORD_SIZE_12)
  760. rec_cfg.sample_size = tr_cfg.sample_size = 11;
  761. else if (arg & WORD_SIZE_16)
  762. rec_cfg.sample_size = tr_cfg.sample_size = 15;
  763. else if (arg & WORD_SIZE_24)
  764. rec_cfg.sample_size = tr_cfg.sample_size = 23;
  765. else if (arg & WORD_SIZE_32)
  766. rec_cfg.sample_size = tr_cfg.sample_size = 31;
  767. if (arg & BIT_ORDER_MSB)
  768. rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
  769. else if (arg & BIT_ORDER_LSB)
  770. rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst;
  771. if (arg & FLOW_CONTROL_ENABLE) {
  772. frm_cfg.status_pin_use = regk_sser_frm;
  773. rec_cfg.fifo_thr = regk_sser_thr16;
  774. } else if (arg & FLOW_CONTROL_DISABLE) {
  775. frm_cfg.status_pin_use = regk_sser_gio0;
  776. rec_cfg.fifo_thr = regk_sser_inf;
  777. }
  778. if (arg & CLOCK_NOT_GATED)
  779. gen_cfg.gate_clk = regk_sser_no;
  780. else if (arg & CLOCK_GATED)
  781. gen_cfg.gate_clk = regk_sser_yes;
  782. break;
  783. case SSP_IPOLARITY:
  784. /* NOTE!! negedge is considered NORMAL */
  785. if (arg & CLOCK_NORMAL)
  786. rec_cfg.clk_pol = regk_sser_neg;
  787. else if (arg & CLOCK_INVERT)
  788. rec_cfg.clk_pol = regk_sser_pos;
  789. if (arg & FRAME_NORMAL)
  790. frm_cfg.level = regk_sser_pos_hi;
  791. else if (arg & FRAME_INVERT)
  792. frm_cfg.level = regk_sser_neg_lo;
  793. if (arg & STATUS_NORMAL)
  794. gen_cfg.hold_pol = regk_sser_pos;
  795. else if (arg & STATUS_INVERT)
  796. gen_cfg.hold_pol = regk_sser_neg;
  797. break;
  798. case SSP_OPOLARITY:
  799. if (arg & CLOCK_NORMAL)
  800. gen_cfg.out_clk_pol = regk_sser_pos;
  801. else if (arg & CLOCK_INVERT)
  802. gen_cfg.out_clk_pol = regk_sser_neg;
  803. if (arg & FRAME_NORMAL)
  804. frm_cfg.level = regk_sser_pos_hi;
  805. else if (arg & FRAME_INVERT)
  806. frm_cfg.level = regk_sser_neg_lo;
  807. if (arg & STATUS_NORMAL)
  808. gen_cfg.hold_pol = regk_sser_pos;
  809. else if (arg & STATUS_INVERT)
  810. gen_cfg.hold_pol = regk_sser_neg;
  811. break;
  812. case SSP_SPI:
  813. rec_cfg.fifo_thr = regk_sser_inf;
  814. rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
  815. rec_cfg.sample_size = tr_cfg.sample_size = 7;
  816. frm_cfg.frame_pin_use = regk_sser_frm;
  817. frm_cfg.type = regk_sser_level;
  818. frm_cfg.tr_delay = 1;
  819. frm_cfg.level = regk_sser_neg_lo;
  820. if (arg & SPI_SLAVE)
  821. {
  822. rec_cfg.clk_pol = regk_sser_neg;
  823. gen_cfg.clk_dir = regk_sser_in;
  824. port->input = 1;
  825. port->output = 0;
  826. }
  827. else
  828. {
  829. gen_cfg.out_clk_pol = regk_sser_pos;
  830. port->input = 0;
  831. port->output = 1;
  832. gen_cfg.clk_dir = regk_sser_out;
  833. }
  834. break;
  835. case SSP_INBUFCHUNK:
  836. break;
  837. default:
  838. return_val = -1;
  839. }
  840. if (port->started) {
  841. rec_cfg.rec_en = port->input;
  842. gen_cfg.en = (port->output | port->input);
  843. }
  844. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  845. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  846. REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
  847. REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
  848. REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
  849. if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 |
  850. WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) {
  851. int en = gen_cfg.en;
  852. gen_cfg.en = 0;
  853. REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
  854. /* ##### Should DMA be stoped before we change dma size? */
  855. DMA_WR_CMD(port->regi_dmain, dma_w_size);
  856. DMA_WR_CMD(port->regi_dmaout, dma_w_size);
  857. gen_cfg.en = en;
  858. REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
  859. }
  860. spin_unlock_irq(&port->lock);
  861. return return_val;
  862. }
  863. static long sync_serial_ioctl(struct file *file,
  864. unsigned int cmd, unsigned long arg)
  865. {
  866. long ret;
  867. mutex_lock(&sync_serial_mutex);
  868. ret = sync_serial_ioctl_unlocked(file, cmd, arg);
  869. mutex_unlock(&sync_serial_mutex);
  870. return ret;
  871. }
  872. /* NOTE: sync_serial_write does not support concurrency */
  873. static ssize_t sync_serial_write(struct file *file, const char *buf,
  874. size_t count, loff_t *ppos)
  875. {
  876. int dev = iminor(file->f_path.dentry->d_inode);
  877. DECLARE_WAITQUEUE(wait, current);
  878. struct sync_port *port;
  879. int trunc_count;
  880. unsigned long flags;
  881. int bytes_free;
  882. int out_buf_count;
  883. unsigned char *rd_ptr; /* First allocated byte in the buffer */
  884. unsigned char *wr_ptr; /* First free byte in the buffer */
  885. unsigned char *buf_stop_ptr; /* Last byte + 1 */
  886. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
  887. DEBUG(printk("Invalid minor %d\n", dev));
  888. return -ENODEV;
  889. }
  890. port = &ports[dev];
  891. /* |<- OUT_BUFFER_SIZE ->|
  892. * |<- out_buf_count ->|
  893. * |<- trunc_count ->| ...->|
  894. * ______________________________________________________
  895. * | free | data | free |
  896. * |_________|___________________|________________________|
  897. * ^ rd_ptr ^ wr_ptr
  898. */
  899. DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n",
  900. port->port_nbr, count, port->active_tr_descr,
  901. port->catch_tr_descr));
  902. /* Read variables that may be updated by interrupts */
  903. spin_lock_irqsave(&port->lock, flags);
  904. rd_ptr = port->out_rd_ptr;
  905. out_buf_count = port->out_buf_count;
  906. spin_unlock_irqrestore(&port->lock, flags);
  907. /* Check if resources are available */
  908. if (port->tr_running &&
  909. ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
  910. out_buf_count >= OUT_BUFFER_SIZE)) {
  911. DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev));
  912. return -EAGAIN;
  913. }
  914. buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE;
  915. /* Determine pointer to the first free byte, before copying. */
  916. wr_ptr = rd_ptr + out_buf_count;
  917. if (wr_ptr >= buf_stop_ptr)
  918. wr_ptr -= OUT_BUFFER_SIZE;
  919. /* If we wrap the ring buffer, let the user space program handle it by
  920. * truncating the data. This could be more elegant, small buffer
  921. * fragments may occur.
  922. */
  923. bytes_free = OUT_BUFFER_SIZE - out_buf_count;
  924. if (wr_ptr + bytes_free > buf_stop_ptr)
  925. bytes_free = buf_stop_ptr - wr_ptr;
  926. trunc_count = (count < bytes_free) ? count : bytes_free;
  927. if (copy_from_user(wr_ptr, buf, trunc_count))
  928. return -EFAULT;
  929. DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n",
  930. out_buf_count, trunc_count,
  931. port->out_buf_count, port->out_buffer,
  932. wr_ptr, buf_stop_ptr));
  933. /* Make sure transmitter/receiver is running */
  934. if (!port->started) {
  935. reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  936. reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
  937. cfg.en = regk_sser_yes;
  938. rec_cfg.rec_en = port->input;
  939. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  940. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  941. port->started = 1;
  942. }
  943. /* Setup wait if blocking */
  944. if (!(file->f_flags & O_NONBLOCK)) {
  945. add_wait_queue(&port->out_wait_q, &wait);
  946. set_current_state(TASK_INTERRUPTIBLE);
  947. }
  948. spin_lock_irqsave(&port->lock, flags);
  949. port->out_buf_count += trunc_count;
  950. if (port->use_dma) {
  951. start_dma_out(port, wr_ptr, trunc_count);
  952. } else if (!port->tr_running) {
  953. reg_sser_rw_intr_mask intr_mask;
  954. intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
  955. /* Start sender by writing data */
  956. send_word(port);
  957. /* and enable transmitter ready IRQ */
  958. intr_mask.trdy = 1;
  959. REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
  960. }
  961. spin_unlock_irqrestore(&port->lock, flags);
  962. /* Exit if non blocking */
  963. if (file->f_flags & O_NONBLOCK) {
  964. DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n",
  965. port->port_nbr, trunc_count,
  966. REG_RD_INT(dma, port->regi_dmaout, r_intr)));
  967. return trunc_count;
  968. }
  969. schedule();
  970. set_current_state(TASK_RUNNING);
  971. remove_wait_queue(&port->out_wait_q, &wait);
  972. if (signal_pending(current))
  973. return -EINTR;
  974. DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n",
  975. port->port_nbr, trunc_count));
  976. return trunc_count;
  977. }
  978. static ssize_t sync_serial_read(struct file * file, char * buf,
  979. size_t count, loff_t *ppos)
  980. {
  981. int dev = iminor(file->f_path.dentry->d_inode);
  982. int avail;
  983. sync_port *port;
  984. unsigned char* start;
  985. unsigned char* end;
  986. unsigned long flags;
  987. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
  988. {
  989. DEBUG(printk("Invalid minor %d\n", dev));
  990. return -ENODEV;
  991. }
  992. port = &ports[dev];
  993. DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
  994. if (!port->started)
  995. {
  996. reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  997. reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
  998. reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
  999. cfg.en = regk_sser_yes;
  1000. tr_cfg.tr_en = regk_sser_yes;
  1001. rec_cfg.rec_en = regk_sser_yes;
  1002. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  1003. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  1004. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  1005. port->started = 1;
  1006. }
  1007. /* Calculate number of available bytes */
  1008. /* Save pointers to avoid that they are modified by interrupt */
  1009. spin_lock_irqsave(&port->lock, flags);
  1010. start = (unsigned char*)port->readp; /* cast away volatile */
  1011. end = (unsigned char*)port->writep; /* cast away volatile */
  1012. spin_unlock_irqrestore(&port->lock, flags);
  1013. while ((start == end) && !port->full) /* No data */
  1014. {
  1015. DEBUGREAD(printk(KERN_DEBUG "&"));
  1016. if (file->f_flags & O_NONBLOCK)
  1017. return -EAGAIN;
  1018. interruptible_sleep_on(&port->in_wait_q);
  1019. if (signal_pending(current))
  1020. return -EINTR;
  1021. spin_lock_irqsave(&port->lock, flags);
  1022. start = (unsigned char*)port->readp; /* cast away volatile */
  1023. end = (unsigned char*)port->writep; /* cast away volatile */
  1024. spin_unlock_irqrestore(&port->lock, flags);
  1025. }
  1026. /* Lazy read, never return wrapped data. */
  1027. if (port->full)
  1028. avail = port->in_buffer_size;
  1029. else if (end > start)
  1030. avail = end - start;
  1031. else
  1032. avail = port->flip + port->in_buffer_size - start;
  1033. count = count > avail ? avail : count;
  1034. if (copy_to_user(buf, start, count))
  1035. return -EFAULT;
  1036. /* Disable interrupts while updating readp */
  1037. spin_lock_irqsave(&port->lock, flags);
  1038. port->readp += count;
  1039. if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
  1040. port->readp = port->flip;
  1041. port->full = 0;
  1042. spin_unlock_irqrestore(&port->lock, flags);
  1043. DEBUGREAD(printk("r %d\n", count));
  1044. return count;
  1045. }
  1046. static void send_word(sync_port* port)
  1047. {
  1048. reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
  1049. reg_sser_rw_tr_data tr_data = {0};
  1050. switch(tr_cfg.sample_size)
  1051. {
  1052. case 8:
  1053. port->out_buf_count--;
  1054. tr_data.data = *port->out_rd_ptr++;
  1055. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1056. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1057. port->out_rd_ptr = port->out_buffer;
  1058. break;
  1059. case 12:
  1060. {
  1061. int data = (*port->out_rd_ptr++) << 8;
  1062. data |= *port->out_rd_ptr++;
  1063. port->out_buf_count -= 2;
  1064. tr_data.data = data;
  1065. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1066. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1067. port->out_rd_ptr = port->out_buffer;
  1068. }
  1069. break;
  1070. case 16:
  1071. port->out_buf_count -= 2;
  1072. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1073. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1074. port->out_rd_ptr += 2;
  1075. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1076. port->out_rd_ptr = port->out_buffer;
  1077. break;
  1078. case 24:
  1079. port->out_buf_count -= 3;
  1080. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1081. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1082. port->out_rd_ptr += 2;
  1083. tr_data.data = *port->out_rd_ptr++;
  1084. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1085. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1086. port->out_rd_ptr = port->out_buffer;
  1087. break;
  1088. case 32:
  1089. port->out_buf_count -= 4;
  1090. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1091. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1092. port->out_rd_ptr += 2;
  1093. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1094. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1095. port->out_rd_ptr += 2;
  1096. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1097. port->out_rd_ptr = port->out_buffer;
  1098. break;
  1099. }
  1100. }
  1101. static void start_dma_out(struct sync_port *port,
  1102. const char *data, int count)
  1103. {
  1104. port->active_tr_descr->buf = (char *) virt_to_phys((char *) data);
  1105. port->active_tr_descr->after = port->active_tr_descr->buf + count;
  1106. port->active_tr_descr->intr = 1;
  1107. port->active_tr_descr->eol = 1;
  1108. port->prev_tr_descr->eol = 0;
  1109. DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n",
  1110. port->prev_tr_descr, port->active_tr_descr));
  1111. port->prev_tr_descr = port->active_tr_descr;
  1112. port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next);
  1113. if (!port->tr_running) {
  1114. reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
  1115. rw_tr_cfg);
  1116. port->out_context.next = 0;
  1117. port->out_context.saved_data =
  1118. (dma_descr_data *)virt_to_phys(port->prev_tr_descr);
  1119. port->out_context.saved_data_buf = port->prev_tr_descr->buf;
  1120. DMA_START_CONTEXT(port->regi_dmaout,
  1121. virt_to_phys((char *)&port->out_context));
  1122. tr_cfg.tr_en = regk_sser_yes;
  1123. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  1124. DEBUGTRDMA(printk(KERN_DEBUG "dma s\n"););
  1125. } else {
  1126. DMA_CONTINUE_DATA(port->regi_dmaout);
  1127. DEBUGTRDMA(printk(KERN_DEBUG "dma c\n"););
  1128. }
  1129. port->tr_running = 1;
  1130. }
  1131. static void start_dma_in(sync_port *port)
  1132. {
  1133. int i;
  1134. char *buf;
  1135. port->writep = port->flip;
  1136. if (port->writep > port->flip + port->in_buffer_size) {
  1137. panic("Offset too large in sync serial driver\n");
  1138. return;
  1139. }
  1140. buf = (char*)virt_to_phys(port->in_buffer);
  1141. for (i = 0; i < NBR_IN_DESCR; i++) {
  1142. port->in_descr[i].buf = buf;
  1143. port->in_descr[i].after = buf + port->inbufchunk;
  1144. port->in_descr[i].intr = 1;
  1145. port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]);
  1146. port->in_descr[i].buf = buf;
  1147. buf += port->inbufchunk;
  1148. }
  1149. /* Link the last descriptor to the first */
  1150. port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
  1151. port->in_descr[i-1].eol = regk_sser_yes;
  1152. port->next_rx_desc = &port->in_descr[0];
  1153. port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
  1154. port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
  1155. port->in_context.saved_data_buf = port->in_descr[0].buf;
  1156. DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
  1157. }
  1158. #ifdef SYNC_SER_DMA
  1159. static irqreturn_t tr_interrupt(int irq, void *dev_id)
  1160. {
  1161. reg_dma_r_masked_intr masked;
  1162. reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
  1163. reg_dma_rw_stat stat;
  1164. int i;
  1165. int found = 0;
  1166. int stop_sser = 0;
  1167. for (i = 0; i < NBR_PORTS; i++) {
  1168. sync_port *port = &ports[i];
  1169. if (!port->enabled || !port->use_dma)
  1170. continue;
  1171. /* IRQ active for the port? */
  1172. masked = REG_RD(dma, port->regi_dmaout, r_masked_intr);
  1173. if (!masked.data)
  1174. continue;
  1175. found = 1;
  1176. /* Check if we should stop the DMA transfer */
  1177. stat = REG_RD(dma, port->regi_dmaout, rw_stat);
  1178. if (stat.list_state == regk_dma_data_at_eol)
  1179. stop_sser = 1;
  1180. /* Clear IRQ */
  1181. REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr);
  1182. if (!stop_sser) {
  1183. /* The DMA has completed a descriptor, EOL was not
  1184. * encountered, so step relevant descriptor and
  1185. * datapointers forward. */
  1186. int sent;
  1187. sent = port->catch_tr_descr->after -
  1188. port->catch_tr_descr->buf;
  1189. DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t"
  1190. "in descr %p (ac: %p)\n",
  1191. port->out_buf_count, sent,
  1192. port->out_buf_count - sent,
  1193. port->catch_tr_descr,
  1194. port->active_tr_descr););
  1195. port->out_buf_count -= sent;
  1196. port->catch_tr_descr =
  1197. phys_to_virt((int) port->catch_tr_descr->next);
  1198. port->out_rd_ptr =
  1199. phys_to_virt((int) port->catch_tr_descr->buf);
  1200. } else {
  1201. int i, sent;
  1202. /* EOL handler.
  1203. * Note that if an EOL was encountered during the irq
  1204. * locked section of sync_ser_write the DMA will be
  1205. * restarted and the eol flag will be cleared.
  1206. * The remaining descriptors will be traversed by
  1207. * the descriptor interrupts as usual.
  1208. */
  1209. i = 0;
  1210. while (!port->catch_tr_descr->eol) {
  1211. sent = port->catch_tr_descr->after -
  1212. port->catch_tr_descr->buf;
  1213. DEBUGOUTBUF(printk(KERN_DEBUG
  1214. "traversing descr %p -%d (%d)\n",
  1215. port->catch_tr_descr,
  1216. sent,
  1217. port->out_buf_count));
  1218. port->out_buf_count -= sent;
  1219. port->catch_tr_descr = phys_to_virt(
  1220. (int)port->catch_tr_descr->next);
  1221. i++;
  1222. if (i >= NBR_OUT_DESCR) {
  1223. /* TODO: Reset and recover */
  1224. panic("sync_serial: missing eol");
  1225. }
  1226. }
  1227. sent = port->catch_tr_descr->after -
  1228. port->catch_tr_descr->buf;
  1229. DEBUGOUTBUF(printk(KERN_DEBUG
  1230. "eol at descr %p -%d (%d)\n",
  1231. port->catch_tr_descr,
  1232. sent,
  1233. port->out_buf_count));
  1234. port->out_buf_count -= sent;
  1235. /* Update read pointer to first free byte, we
  1236. * may already be writing data there. */
  1237. port->out_rd_ptr =
  1238. phys_to_virt((int) port->catch_tr_descr->after);
  1239. if (port->out_rd_ptr > port->out_buffer +
  1240. OUT_BUFFER_SIZE)
  1241. port->out_rd_ptr = port->out_buffer;
  1242. reg_sser_rw_tr_cfg tr_cfg =
  1243. REG_RD(sser, port->regi_sser, rw_tr_cfg);
  1244. DEBUGTXINT(printk(KERN_DEBUG
  1245. "tr_int DMA stop %d, set catch @ %p\n",
  1246. port->out_buf_count,
  1247. port->active_tr_descr));
  1248. if (port->out_buf_count != 0)
  1249. printk(KERN_CRIT "sync_ser: buffer not "
  1250. "empty after eol.\n");
  1251. port->catch_tr_descr = port->active_tr_descr;
  1252. port->tr_running = 0;
  1253. tr_cfg.tr_en = regk_sser_no;
  1254. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  1255. }
  1256. /* wake up the waiting process */
  1257. wake_up_interruptible(&port->out_wait_q);
  1258. }
  1259. return IRQ_RETVAL(found);
  1260. } /* tr_interrupt */
  1261. static irqreturn_t rx_interrupt(int irq, void *dev_id)
  1262. {
  1263. reg_dma_r_masked_intr masked;
  1264. reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
  1265. int i;
  1266. int found = 0;
  1267. for (i = 0; i < NBR_PORTS; i++)
  1268. {
  1269. sync_port *port = &ports[i];
  1270. if (!port->enabled || !port->use_dma )
  1271. continue;
  1272. masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
  1273. if (masked.data) /* Descriptor interrupt */
  1274. {
  1275. found = 1;
  1276. while (REG_RD(dma, port->regi_dmain, rw_data) !=
  1277. virt_to_phys(port->next_rx_desc)) {
  1278. DEBUGRXINT(printk(KERN_DEBUG "!"));
  1279. if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) {
  1280. int first_size = port->flip + port->in_buffer_size - port->writep;
  1281. memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size);
  1282. memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size);
  1283. port->writep = port->flip + port->inbufchunk - first_size;
  1284. } else {
  1285. memcpy((char*)port->writep,
  1286. phys_to_virt((unsigned)port->next_rx_desc->buf),
  1287. port->inbufchunk);
  1288. port->writep += port->inbufchunk;
  1289. if (port->writep >= port->flip + port->in_buffer_size)
  1290. port->writep = port->flip;
  1291. }
  1292. if (port->writep == port->readp)
  1293. {
  1294. port->full = 1;
  1295. }
  1296. port->next_rx_desc->eol = 1;
  1297. port->prev_rx_desc->eol = 0;
  1298. /* Cache bug workaround */
  1299. flush_dma_descr(port->prev_rx_desc, 0);
  1300. port->prev_rx_desc = port->next_rx_desc;
  1301. port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
  1302. /* Cache bug workaround */
  1303. flush_dma_descr(port->prev_rx_desc, 1);
  1304. /* wake up the waiting process */
  1305. wake_up_interruptible(&port->in_wait_q);
  1306. DMA_CONTINUE(port->regi_dmain);
  1307. REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
  1308. }
  1309. }
  1310. }
  1311. return IRQ_RETVAL(found);
  1312. } /* rx_interrupt */
  1313. #endif /* SYNC_SER_DMA */
  1314. #ifdef SYNC_SER_MANUAL
  1315. static irqreturn_t manual_interrupt(int irq, void *dev_id)
  1316. {
  1317. int i;
  1318. int found = 0;
  1319. reg_sser_r_masked_intr masked;
  1320. for (i = 0; i < NBR_PORTS; i++)
  1321. {
  1322. sync_port *port = &ports[i];
  1323. if (!port->enabled || port->use_dma)
  1324. {
  1325. continue;
  1326. }
  1327. masked = REG_RD(sser, port->regi_sser, r_masked_intr);
  1328. if (masked.rdav) /* Data received? */
  1329. {
  1330. reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
  1331. reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data);
  1332. found = 1;
  1333. /* Read data */
  1334. switch(rec_cfg.sample_size)
  1335. {
  1336. case 8:
  1337. *port->writep++ = data.data & 0xff;
  1338. break;
  1339. case 12:
  1340. *port->writep = (data.data & 0x0ff0) >> 4;
  1341. *(port->writep + 1) = data.data & 0x0f;
  1342. port->writep+=2;
  1343. break;
  1344. case 16:
  1345. *(unsigned short*)port->writep = data.data;
  1346. port->writep+=2;
  1347. break;
  1348. case 24:
  1349. *(unsigned int*)port->writep = data.data;
  1350. port->writep+=3;
  1351. break;
  1352. case 32:
  1353. *(unsigned int*)port->writep = data.data;
  1354. port->writep+=4;
  1355. break;
  1356. }
  1357. if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */
  1358. port->writep = port->flip;
  1359. if (port->writep == port->readp) {
  1360. /* receive buffer overrun, discard oldest data
  1361. */
  1362. port->readp++;
  1363. if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
  1364. port->readp = port->flip;
  1365. }
  1366. if (sync_data_avail(port) >= port->inbufchunk)
  1367. wake_up_interruptible(&port->in_wait_q); /* Wake up application */
  1368. }
  1369. if (masked.trdy) /* Transmitter ready? */
  1370. {
  1371. found = 1;
  1372. if (port->out_buf_count > 0) /* More data to send */
  1373. send_word(port);
  1374. else /* transmission finished */
  1375. {
  1376. reg_sser_rw_intr_mask intr_mask;
  1377. intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
  1378. intr_mask.trdy = 0;
  1379. REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
  1380. wake_up_interruptible(&port->out_wait_q); /* Wake up application */
  1381. }
  1382. }
  1383. }
  1384. return IRQ_RETVAL(found);
  1385. }
  1386. #endif
  1387. module_init(etrax_sync_serial_init);