sync_serial.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716
  1. /*
  2. * Simple synchronous serial port driver for ETRAX FS and ARTPEC-3.
  3. *
  4. * Copyright (c) 2005, 2008 Axis Communications AB
  5. * Author: Mikael Starvik
  6. *
  7. */
  8. #include <linux/module.h>
  9. #include <linux/kernel.h>
  10. #include <linux/types.h>
  11. #include <linux/errno.h>
  12. #include <linux/major.h>
  13. #include <linux/sched.h>
  14. #include <linux/mutex.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/poll.h>
  17. #include <linux/fs.h>
  18. #include <linux/cdev.h>
  19. #include <linux/device.h>
  20. #include <linux/wait.h>
  21. #include <asm/io.h>
  22. #include <mach/dma.h>
  23. #include <pinmux.h>
  24. #include <hwregs/reg_rdwr.h>
  25. #include <hwregs/sser_defs.h>
  26. #include <hwregs/timer_defs.h>
  27. #include <hwregs/dma_defs.h>
  28. #include <hwregs/dma.h>
  29. #include <hwregs/intr_vect_defs.h>
  30. #include <hwregs/intr_vect.h>
  31. #include <hwregs/reg_map.h>
  32. #include <asm/sync_serial.h>
  33. /* The receiver is a bit tricky because of the continuous stream of data.*/
  34. /* */
  35. /* Three DMA descriptors are linked together. Each DMA descriptor is */
  36. /* responsible for port->bufchunk of a common buffer. */
  37. /* */
  38. /* +---------------------------------------------+ */
  39. /* | +----------+ +----------+ +----------+ | */
  40. /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */
  41. /* +----------+ +----------+ +----------+ */
  42. /* | | | */
  43. /* v v v */
  44. /* +-------------------------------------+ */
  45. /* | BUFFER | */
  46. /* +-------------------------------------+ */
  47. /* |<- data_avail ->| */
  48. /* readp writep */
  49. /* */
  50. /* If the application keeps up the pace readp will be right after writep.*/
  51. /* If the application can't keep the pace we have to throw away data. */
  52. /* The idea is that readp should be ready with the data pointed out by */
  53. /* Descr[i] when the DMA has filled in Descr[i+1]. */
  54. /* Otherwise we will discard */
  55. /* the rest of the data pointed out by Descr1 and set readp to the start */
  56. /* of Descr2 */
  57. /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
  58. /* words can be handled */
  59. #define IN_DESCR_SIZE SSP_INPUT_CHUNK_SIZE
  60. #define NBR_IN_DESCR (8*6)
  61. #define IN_BUFFER_SIZE (IN_DESCR_SIZE * NBR_IN_DESCR)
  62. #define NBR_OUT_DESCR 8
  63. #define OUT_BUFFER_SIZE (1024 * NBR_OUT_DESCR)
  64. #define DEFAULT_FRAME_RATE 0
  65. #define DEFAULT_WORD_RATE 7
  66. /* To be removed when we move to pure udev. */
  67. #define SYNC_SERIAL_MAJOR 125
  68. /* NOTE: Enabling some debug will likely cause overrun or underrun,
  69. * especially if manual mode is used.
  70. */
  71. #define DEBUG(x)
  72. #define DEBUGREAD(x)
  73. #define DEBUGWRITE(x)
  74. #define DEBUGPOLL(x)
  75. #define DEBUGRXINT(x)
  76. #define DEBUGTXINT(x)
  77. #define DEBUGTRDMA(x)
  78. #define DEBUGOUTBUF(x)
  79. enum syncser_irq_setup {
  80. no_irq_setup = 0,
  81. dma_irq_setup = 1,
  82. manual_irq_setup = 2,
  83. };
  84. struct sync_port {
  85. unsigned long regi_sser;
  86. unsigned long regi_dmain;
  87. unsigned long regi_dmaout;
  88. /* Interrupt vectors. */
  89. unsigned long dma_in_intr_vect; /* Used for DMA in. */
  90. unsigned long dma_out_intr_vect; /* Used for DMA out. */
  91. unsigned long syncser_intr_vect; /* Used when no DMA. */
  92. /* DMA number for in and out. */
  93. unsigned int dma_in_nbr;
  94. unsigned int dma_out_nbr;
  95. /* DMA owner. */
  96. enum dma_owner req_dma;
  97. char started; /* 1 if port has been started */
  98. char port_nbr; /* Port 0 or 1 */
  99. char busy; /* 1 if port is busy */
  100. char enabled; /* 1 if port is enabled */
  101. char use_dma; /* 1 if port uses dma */
  102. char tr_running;
  103. enum syncser_irq_setup init_irqs;
  104. int output;
  105. int input;
  106. /* Next byte to be read by application */
  107. unsigned char *readp;
  108. /* Next byte to be written by etrax */
  109. unsigned char *writep;
  110. unsigned int in_buffer_size;
  111. unsigned int in_buffer_len;
  112. unsigned int inbufchunk;
  113. /* Data buffers for in and output. */
  114. unsigned char out_buffer[OUT_BUFFER_SIZE] __aligned(32);
  115. unsigned char in_buffer[IN_BUFFER_SIZE] __aligned(32);
  116. unsigned char flip[IN_BUFFER_SIZE] __aligned(32);
  117. struct timespec timestamp[NBR_IN_DESCR];
  118. struct dma_descr_data *next_rx_desc;
  119. struct dma_descr_data *prev_rx_desc;
  120. struct timeval last_timestamp;
  121. int read_ts_idx;
  122. int write_ts_idx;
  123. /* Pointer to the first available descriptor in the ring,
  124. * unless active_tr_descr == catch_tr_descr and a dma
  125. * transfer is active */
  126. struct dma_descr_data *active_tr_descr;
  127. /* Pointer to the first allocated descriptor in the ring */
  128. struct dma_descr_data *catch_tr_descr;
  129. /* Pointer to the descriptor with the current end-of-list */
  130. struct dma_descr_data *prev_tr_descr;
  131. int full;
  132. /* Pointer to the first byte being read by DMA
  133. * or current position in out_buffer if not using DMA. */
  134. unsigned char *out_rd_ptr;
  135. /* Number of bytes currently locked for being read by DMA */
  136. int out_buf_count;
  137. dma_descr_context in_context __aligned(32);
  138. dma_descr_context out_context __aligned(32);
  139. dma_descr_data in_descr[NBR_IN_DESCR] __aligned(16);
  140. dma_descr_data out_descr[NBR_OUT_DESCR] __aligned(16);
  141. wait_queue_head_t out_wait_q;
  142. wait_queue_head_t in_wait_q;
  143. spinlock_t lock;
  144. };
  145. static DEFINE_MUTEX(sync_serial_mutex);
  146. static int etrax_sync_serial_init(void);
  147. static void initialize_port(int portnbr);
  148. static inline int sync_data_avail(struct sync_port *port);
  149. static int sync_serial_open(struct inode *, struct file *);
  150. static int sync_serial_release(struct inode *, struct file *);
  151. static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
  152. static long sync_serial_ioctl(struct file *file,
  153. unsigned int cmd, unsigned long arg);
  154. static int sync_serial_ioctl_unlocked(struct file *file,
  155. unsigned int cmd, unsigned long arg);
  156. static ssize_t sync_serial_write(struct file *file, const char __user *buf,
  157. size_t count, loff_t *ppos);
  158. static ssize_t sync_serial_read(struct file *file, char __user *buf,
  159. size_t count, loff_t *ppos);
  160. #if ((defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
  161. defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
  162. (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
  163. defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)))
  164. #define SYNC_SER_DMA
  165. #else
  166. #define SYNC_SER_MANUAL
  167. #endif
  168. #ifdef SYNC_SER_DMA
  169. static void start_dma_out(struct sync_port *port, const char *data, int count);
  170. static void start_dma_in(struct sync_port *port);
  171. static irqreturn_t tr_interrupt(int irq, void *dev_id);
  172. static irqreturn_t rx_interrupt(int irq, void *dev_id);
  173. #endif
  174. #ifdef SYNC_SER_MANUAL
  175. static void send_word(struct sync_port *port);
  176. static irqreturn_t manual_interrupt(int irq, void *dev_id);
  177. #endif
  178. #define artpec_pinmux_alloc_fixed crisv32_pinmux_alloc_fixed
  179. #define artpec_request_dma crisv32_request_dma
  180. #define artpec_free_dma crisv32_free_dma
  181. #ifdef CONFIG_ETRAXFS
  182. /* ETRAX FS */
  183. #define DMA_OUT_NBR0 SYNC_SER0_TX_DMA_NBR
  184. #define DMA_IN_NBR0 SYNC_SER0_RX_DMA_NBR
  185. #define DMA_OUT_NBR1 SYNC_SER1_TX_DMA_NBR
  186. #define DMA_IN_NBR1 SYNC_SER1_RX_DMA_NBR
  187. #define PINMUX_SSER0 pinmux_sser0
  188. #define PINMUX_SSER1 pinmux_sser1
  189. #define SYNCSER_INST0 regi_sser0
  190. #define SYNCSER_INST1 regi_sser1
  191. #define SYNCSER_INTR_VECT0 SSER0_INTR_VECT
  192. #define SYNCSER_INTR_VECT1 SSER1_INTR_VECT
  193. #define OUT_DMA_INST0 regi_dma4
  194. #define IN_DMA_INST0 regi_dma5
  195. #define DMA_OUT_INTR_VECT0 DMA4_INTR_VECT
  196. #define DMA_OUT_INTR_VECT1 DMA7_INTR_VECT
  197. #define DMA_IN_INTR_VECT0 DMA5_INTR_VECT
  198. #define DMA_IN_INTR_VECT1 DMA6_INTR_VECT
  199. #define REQ_DMA_SYNCSER0 dma_sser0
  200. #define REQ_DMA_SYNCSER1 dma_sser1
  201. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
  202. #define PORT1_DMA 1
  203. #else
  204. #define PORT1_DMA 0
  205. #endif
  206. #elif defined(CONFIG_CRIS_MACH_ARTPEC3)
  207. /* ARTPEC-3 */
  208. #define DMA_OUT_NBR0 SYNC_SER_TX_DMA_NBR
  209. #define DMA_IN_NBR0 SYNC_SER_RX_DMA_NBR
  210. #define PINMUX_SSER0 pinmux_sser
  211. #define SYNCSER_INST0 regi_sser
  212. #define SYNCSER_INTR_VECT0 SSER_INTR_VECT
  213. #define OUT_DMA_INST0 regi_dma6
  214. #define IN_DMA_INST0 regi_dma7
  215. #define DMA_OUT_INTR_VECT0 DMA6_INTR_VECT
  216. #define DMA_IN_INTR_VECT0 DMA7_INTR_VECT
  217. #define REQ_DMA_SYNCSER0 dma_sser
  218. #define REQ_DMA_SYNCSER1 dma_sser
  219. #endif
  220. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
  221. #define PORT0_DMA 1
  222. #else
  223. #define PORT0_DMA 0
  224. #endif
  225. /* The ports */
  226. static struct sync_port ports[] = {
  227. {
  228. .regi_sser = SYNCSER_INST0,
  229. .regi_dmaout = OUT_DMA_INST0,
  230. .regi_dmain = IN_DMA_INST0,
  231. .use_dma = PORT0_DMA,
  232. .dma_in_intr_vect = DMA_IN_INTR_VECT0,
  233. .dma_out_intr_vect = DMA_OUT_INTR_VECT0,
  234. .dma_in_nbr = DMA_IN_NBR0,
  235. .dma_out_nbr = DMA_OUT_NBR0,
  236. .req_dma = REQ_DMA_SYNCSER0,
  237. .syncser_intr_vect = SYNCSER_INTR_VECT0,
  238. },
  239. #ifdef CONFIG_ETRAXFS
  240. {
  241. .regi_sser = SYNCSER_INST1,
  242. .regi_dmaout = regi_dma6,
  243. .regi_dmain = regi_dma7,
  244. .use_dma = PORT1_DMA,
  245. .dma_in_intr_vect = DMA_IN_INTR_VECT1,
  246. .dma_out_intr_vect = DMA_OUT_INTR_VECT1,
  247. .dma_in_nbr = DMA_IN_NBR1,
  248. .dma_out_nbr = DMA_OUT_NBR1,
  249. .req_dma = REQ_DMA_SYNCSER1,
  250. .syncser_intr_vect = SYNCSER_INTR_VECT1,
  251. },
  252. #endif
  253. };
  254. #define NBR_PORTS ARRAY_SIZE(ports)
  255. static const struct file_operations syncser_fops = {
  256. .owner = THIS_MODULE,
  257. .write = sync_serial_write,
  258. .read = sync_serial_read,
  259. .poll = sync_serial_poll,
  260. .unlocked_ioctl = sync_serial_ioctl,
  261. .open = sync_serial_open,
  262. .release = sync_serial_release,
  263. .llseek = noop_llseek,
  264. };
  265. static dev_t syncser_first;
  266. static int minor_count = NBR_PORTS;
  267. #define SYNCSER_NAME "syncser"
  268. static struct cdev *syncser_cdev;
  269. static struct class *syncser_class;
  270. static void sync_serial_start_port(struct sync_port *port)
  271. {
  272. reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  273. reg_sser_rw_tr_cfg tr_cfg =
  274. REG_RD(sser, port->regi_sser, rw_tr_cfg);
  275. reg_sser_rw_rec_cfg rec_cfg =
  276. REG_RD(sser, port->regi_sser, rw_rec_cfg);
  277. cfg.en = regk_sser_yes;
  278. tr_cfg.tr_en = regk_sser_yes;
  279. rec_cfg.rec_en = regk_sser_yes;
  280. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  281. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  282. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  283. port->started = 1;
  284. }
  285. static void __init initialize_port(int portnbr)
  286. {
  287. struct sync_port *port = &ports[portnbr];
  288. reg_sser_rw_cfg cfg = { 0 };
  289. reg_sser_rw_frm_cfg frm_cfg = { 0 };
  290. reg_sser_rw_tr_cfg tr_cfg = { 0 };
  291. reg_sser_rw_rec_cfg rec_cfg = { 0 };
  292. DEBUG(pr_info("Init sync serial port %d\n", portnbr));
  293. port->port_nbr = portnbr;
  294. port->init_irqs = no_irq_setup;
  295. port->out_rd_ptr = port->out_buffer;
  296. port->out_buf_count = 0;
  297. port->output = 1;
  298. port->input = 0;
  299. port->readp = port->flip;
  300. port->writep = port->flip;
  301. port->in_buffer_size = IN_BUFFER_SIZE;
  302. port->in_buffer_len = 0;
  303. port->inbufchunk = IN_DESCR_SIZE;
  304. port->read_ts_idx = 0;
  305. port->write_ts_idx = 0;
  306. init_waitqueue_head(&port->out_wait_q);
  307. init_waitqueue_head(&port->in_wait_q);
  308. spin_lock_init(&port->lock);
  309. cfg.out_clk_src = regk_sser_intern_clk;
  310. cfg.out_clk_pol = regk_sser_pos;
  311. cfg.clk_od_mode = regk_sser_no;
  312. cfg.clk_dir = regk_sser_out;
  313. cfg.gate_clk = regk_sser_no;
  314. cfg.base_freq = regk_sser_f29_493;
  315. cfg.clk_div = 256;
  316. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  317. frm_cfg.wordrate = DEFAULT_WORD_RATE;
  318. frm_cfg.type = regk_sser_edge;
  319. frm_cfg.frame_pin_dir = regk_sser_out;
  320. frm_cfg.frame_pin_use = regk_sser_frm;
  321. frm_cfg.status_pin_dir = regk_sser_in;
  322. frm_cfg.status_pin_use = regk_sser_hold;
  323. frm_cfg.out_on = regk_sser_tr;
  324. frm_cfg.tr_delay = 1;
  325. REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
  326. tr_cfg.urun_stop = regk_sser_no;
  327. tr_cfg.sample_size = 7;
  328. tr_cfg.sh_dir = regk_sser_msbfirst;
  329. tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
  330. #if 0
  331. tr_cfg.rate_ctrl = regk_sser_bulk;
  332. tr_cfg.data_pin_use = regk_sser_dout;
  333. #else
  334. tr_cfg.rate_ctrl = regk_sser_iso;
  335. tr_cfg.data_pin_use = regk_sser_dout;
  336. #endif
  337. tr_cfg.bulk_wspace = 1;
  338. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  339. rec_cfg.sample_size = 7;
  340. rec_cfg.sh_dir = regk_sser_msbfirst;
  341. rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
  342. rec_cfg.fifo_thr = regk_sser_inf;
  343. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  344. #ifdef SYNC_SER_DMA
  345. {
  346. int i;
  347. /* Setup the descriptor ring for dma out/transmit. */
  348. for (i = 0; i < NBR_OUT_DESCR; i++) {
  349. dma_descr_data *descr = &port->out_descr[i];
  350. descr->wait = 0;
  351. descr->intr = 1;
  352. descr->eol = 0;
  353. descr->out_eop = 0;
  354. descr->next =
  355. (dma_descr_data *)virt_to_phys(&descr[i+1]);
  356. }
  357. }
  358. /* Create a ring from the list. */
  359. port->out_descr[NBR_OUT_DESCR-1].next =
  360. (dma_descr_data *)virt_to_phys(&port->out_descr[0]);
  361. /* Setup context for traversing the ring. */
  362. port->active_tr_descr = &port->out_descr[0];
  363. port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1];
  364. port->catch_tr_descr = &port->out_descr[0];
  365. #endif
  366. }
  367. static inline int sync_data_avail(struct sync_port *port)
  368. {
  369. return port->in_buffer_len;
  370. }
  371. static int sync_serial_open(struct inode *inode, struct file *file)
  372. {
  373. int ret = 0;
  374. int dev = iminor(inode);
  375. struct sync_port *port;
  376. #ifdef SYNC_SER_DMA
  377. reg_dma_rw_cfg cfg = { .en = regk_dma_yes };
  378. reg_dma_rw_intr_mask intr_mask = { .data = regk_dma_yes };
  379. #endif
  380. DEBUG(pr_debug("Open sync serial port %d\n", dev));
  381. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
  382. DEBUG(pr_info("Invalid minor %d\n", dev));
  383. return -ENODEV;
  384. }
  385. port = &ports[dev];
  386. /* Allow open this device twice (assuming one reader and one writer) */
  387. if (port->busy == 2) {
  388. DEBUG(pr_info("syncser%d is busy\n", dev));
  389. return -EBUSY;
  390. }
  391. mutex_lock(&sync_serial_mutex);
  392. /* Clear any stale date left in the flip buffer */
  393. port->readp = port->writep = port->flip;
  394. port->in_buffer_len = 0;
  395. port->read_ts_idx = 0;
  396. port->write_ts_idx = 0;
  397. if (port->init_irqs != no_irq_setup) {
  398. /* Init only on first call. */
  399. port->busy++;
  400. mutex_unlock(&sync_serial_mutex);
  401. return 0;
  402. }
  403. if (port->use_dma) {
  404. #ifdef SYNC_SER_DMA
  405. const char *tmp;
  406. DEBUG(pr_info("Using DMA for syncser%d\n", dev));
  407. tmp = dev == 0 ? "syncser0 tx" : "syncser1 tx";
  408. if (request_irq(port->dma_out_intr_vect, tr_interrupt, 0,
  409. tmp, port)) {
  410. pr_err("Can't alloc syncser%d TX IRQ", dev);
  411. ret = -EBUSY;
  412. goto unlock_and_exit;
  413. }
  414. if (artpec_request_dma(port->dma_out_nbr, tmp,
  415. DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) {
  416. free_irq(port->dma_out_intr_vect, port);
  417. pr_err("Can't alloc syncser%d TX DMA", dev);
  418. ret = -EBUSY;
  419. goto unlock_and_exit;
  420. }
  421. tmp = dev == 0 ? "syncser0 rx" : "syncser1 rx";
  422. if (request_irq(port->dma_in_intr_vect, rx_interrupt, 0,
  423. tmp, port)) {
  424. artpec_free_dma(port->dma_out_nbr);
  425. free_irq(port->dma_out_intr_vect, port);
  426. pr_err("Can't alloc syncser%d RX IRQ", dev);
  427. ret = -EBUSY;
  428. goto unlock_and_exit;
  429. }
  430. if (artpec_request_dma(port->dma_in_nbr, tmp,
  431. DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) {
  432. artpec_free_dma(port->dma_out_nbr);
  433. free_irq(port->dma_out_intr_vect, port);
  434. free_irq(port->dma_in_intr_vect, port);
  435. pr_err("Can't alloc syncser%d RX DMA", dev);
  436. ret = -EBUSY;
  437. goto unlock_and_exit;
  438. }
  439. /* Enable DMAs */
  440. REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
  441. REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
  442. /* Enable DMA IRQs */
  443. REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
  444. REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
  445. /* Set up wordsize = 1 for DMAs. */
  446. DMA_WR_CMD(port->regi_dmain, regk_dma_set_w_size1);
  447. DMA_WR_CMD(port->regi_dmaout, regk_dma_set_w_size1);
  448. start_dma_in(port);
  449. port->init_irqs = dma_irq_setup;
  450. #endif
  451. } else { /* !port->use_dma */
  452. #ifdef SYNC_SER_MANUAL
  453. const char *tmp = dev == 0 ? "syncser0 manual irq" :
  454. "syncser1 manual irq";
  455. if (request_irq(port->syncser_intr_vect, manual_interrupt,
  456. 0, tmp, port)) {
  457. pr_err("Can't alloc syncser%d manual irq",
  458. dev);
  459. ret = -EBUSY;
  460. goto unlock_and_exit;
  461. }
  462. port->init_irqs = manual_irq_setup;
  463. #else
  464. panic("sync_serial: Manual mode not supported\n");
  465. #endif /* SYNC_SER_MANUAL */
  466. }
  467. port->busy++;
  468. ret = 0;
  469. unlock_and_exit:
  470. mutex_unlock(&sync_serial_mutex);
  471. return ret;
  472. }
  473. static int sync_serial_release(struct inode *inode, struct file *file)
  474. {
  475. int dev = iminor(inode);
  476. struct sync_port *port;
  477. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
  478. DEBUG(pr_info("Invalid minor %d\n", dev));
  479. return -ENODEV;
  480. }
  481. port = &ports[dev];
  482. if (port->busy)
  483. port->busy--;
  484. if (!port->busy)
  485. /* XXX */;
  486. return 0;
  487. }
  488. static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
  489. {
  490. int dev = iminor(file_inode(file));
  491. unsigned int mask = 0;
  492. struct sync_port *port;
  493. DEBUGPOLL(
  494. static unsigned int prev_mask;
  495. );
  496. port = &ports[dev];
  497. if (!port->started)
  498. sync_serial_start_port(port);
  499. poll_wait(file, &port->out_wait_q, wait);
  500. poll_wait(file, &port->in_wait_q, wait);
  501. /* No active transfer, descriptors are available */
  502. if (port->output && !port->tr_running)
  503. mask |= POLLOUT | POLLWRNORM;
  504. /* Descriptor and buffer space available. */
  505. if (port->output &&
  506. port->active_tr_descr != port->catch_tr_descr &&
  507. port->out_buf_count < OUT_BUFFER_SIZE)
  508. mask |= POLLOUT | POLLWRNORM;
  509. /* At least an inbufchunk of data */
  510. if (port->input && sync_data_avail(port) >= port->inbufchunk)
  511. mask |= POLLIN | POLLRDNORM;
  512. DEBUGPOLL(
  513. if (mask != prev_mask)
  514. pr_info("sync_serial_poll: mask 0x%08X %s %s\n",
  515. mask,
  516. mask & POLLOUT ? "POLLOUT" : "",
  517. mask & POLLIN ? "POLLIN" : "");
  518. prev_mask = mask;
  519. );
  520. return mask;
  521. }
  522. static ssize_t __sync_serial_read(struct file *file,
  523. char __user *buf,
  524. size_t count,
  525. loff_t *ppos,
  526. struct timespec *ts)
  527. {
  528. unsigned long flags;
  529. int dev = MINOR(file_inode(file)->i_rdev);
  530. int avail;
  531. struct sync_port *port;
  532. unsigned char *start;
  533. unsigned char *end;
  534. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
  535. DEBUG(pr_info("Invalid minor %d\n", dev));
  536. return -ENODEV;
  537. }
  538. port = &ports[dev];
  539. if (!port->started)
  540. sync_serial_start_port(port);
  541. /* Calculate number of available bytes */
  542. /* Save pointers to avoid that they are modified by interrupt */
  543. spin_lock_irqsave(&port->lock, flags);
  544. start = port->readp;
  545. end = port->writep;
  546. spin_unlock_irqrestore(&port->lock, flags);
  547. while ((start == end) && !port->in_buffer_len) {
  548. if (file->f_flags & O_NONBLOCK)
  549. return -EAGAIN;
  550. wait_event_interruptible(port->in_wait_q,
  551. !(start == end && !port->full));
  552. if (signal_pending(current))
  553. return -EINTR;
  554. spin_lock_irqsave(&port->lock, flags);
  555. start = port->readp;
  556. end = port->writep;
  557. spin_unlock_irqrestore(&port->lock, flags);
  558. }
  559. DEBUGREAD(pr_info("R%d c %d ri %u wi %u /%u\n",
  560. dev, count,
  561. start - port->flip, end - port->flip,
  562. port->in_buffer_size));
  563. /* Lazy read, never return wrapped data. */
  564. if (end > start)
  565. avail = end - start;
  566. else
  567. avail = port->flip + port->in_buffer_size - start;
  568. count = count > avail ? avail : count;
  569. if (copy_to_user(buf, start, count))
  570. return -EFAULT;
  571. /* If timestamp requested, find timestamp of first returned byte
  572. * and copy it.
  573. * N.B: Applications that request timstamps MUST read data in
  574. * chunks that are multiples of IN_DESCR_SIZE.
  575. * Otherwise the timestamps will not be aligned to the data read.
  576. */
  577. if (ts != NULL) {
  578. int idx = port->read_ts_idx;
  579. memcpy(ts, &port->timestamp[idx], sizeof(struct timespec));
  580. port->read_ts_idx += count / IN_DESCR_SIZE;
  581. if (port->read_ts_idx >= NBR_IN_DESCR)
  582. port->read_ts_idx = 0;
  583. }
  584. spin_lock_irqsave(&port->lock, flags);
  585. port->readp += count;
  586. /* Check for wrap */
  587. if (port->readp >= port->flip + port->in_buffer_size)
  588. port->readp = port->flip;
  589. port->in_buffer_len -= count;
  590. port->full = 0;
  591. spin_unlock_irqrestore(&port->lock, flags);
  592. DEBUGREAD(pr_info("r %d\n", count));
  593. return count;
  594. }
  595. static ssize_t sync_serial_input(struct file *file, unsigned long arg)
  596. {
  597. struct ssp_request req;
  598. int count;
  599. int ret;
  600. /* Copy the request structure from user-mode. */
  601. ret = copy_from_user(&req, (struct ssp_request __user *)arg,
  602. sizeof(struct ssp_request));
  603. if (ret) {
  604. DEBUG(pr_info("sync_serial_input copy from user failed\n"));
  605. return -EFAULT;
  606. }
  607. /* To get the timestamps aligned, make sure that 'len'
  608. * is a multiple of IN_DESCR_SIZE.
  609. */
  610. if ((req.len % IN_DESCR_SIZE) != 0) {
  611. DEBUG(pr_info("sync_serial: req.len %x, IN_DESCR_SIZE %x\n",
  612. req.len, IN_DESCR_SIZE));
  613. return -EFAULT;
  614. }
  615. /* Do the actual read. */
  616. /* Note that req.buf is actually a pointer to user space. */
  617. count = __sync_serial_read(file, req.buf, req.len,
  618. NULL, &req.ts);
  619. if (count < 0) {
  620. DEBUG(pr_info("sync_serial_input read failed\n"));
  621. return count;
  622. }
  623. /* Copy the request back to user-mode. */
  624. ret = copy_to_user((struct ssp_request __user *)arg, &req,
  625. sizeof(struct ssp_request));
  626. if (ret) {
  627. DEBUG(pr_info("syncser input copy2user failed\n"));
  628. return -EFAULT;
  629. }
  630. /* Return the number of bytes read. */
  631. return count;
  632. }
  633. static int sync_serial_ioctl_unlocked(struct file *file,
  634. unsigned int cmd, unsigned long arg)
  635. {
  636. int return_val = 0;
  637. int dma_w_size = regk_dma_set_w_size1;
  638. int dev = iminor(file_inode(file));
  639. struct sync_port *port;
  640. reg_sser_rw_tr_cfg tr_cfg;
  641. reg_sser_rw_rec_cfg rec_cfg;
  642. reg_sser_rw_frm_cfg frm_cfg;
  643. reg_sser_rw_cfg gen_cfg;
  644. reg_sser_rw_intr_mask intr_mask;
  645. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
  646. DEBUG(pr_info("Invalid minor %d\n", dev));
  647. return -1;
  648. }
  649. if (cmd == SSP_INPUT)
  650. return sync_serial_input(file, arg);
  651. port = &ports[dev];
  652. spin_lock_irq(&port->lock);
  653. tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
  654. rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
  655. frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg);
  656. gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  657. intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
  658. switch (cmd) {
  659. case SSP_SPEED:
  660. if (GET_SPEED(arg) == CODEC) {
  661. unsigned int freq;
  662. gen_cfg.base_freq = regk_sser_f32;
  663. /* Clock divider will internally be
  664. * gen_cfg.clk_div + 1.
  665. */
  666. freq = GET_FREQ(arg);
  667. switch (freq) {
  668. case FREQ_32kHz:
  669. case FREQ_64kHz:
  670. case FREQ_128kHz:
  671. case FREQ_256kHz:
  672. gen_cfg.clk_div = 125 *
  673. (1 << (freq - FREQ_256kHz)) - 1;
  674. break;
  675. case FREQ_512kHz:
  676. gen_cfg.clk_div = 62;
  677. break;
  678. case FREQ_1MHz:
  679. case FREQ_2MHz:
  680. case FREQ_4MHz:
  681. gen_cfg.clk_div = 8 * (1 << freq) - 1;
  682. break;
  683. }
  684. } else if (GET_SPEED(arg) == CODEC_f32768) {
  685. gen_cfg.base_freq = regk_sser_f32_768;
  686. switch (GET_FREQ(arg)) {
  687. case FREQ_4096kHz:
  688. gen_cfg.clk_div = 7;
  689. break;
  690. default:
  691. spin_unlock_irq(&port->lock);
  692. return -EINVAL;
  693. }
  694. } else {
  695. gen_cfg.base_freq = regk_sser_f29_493;
  696. switch (GET_SPEED(arg)) {
  697. case SSP150:
  698. gen_cfg.clk_div = 29493000 / (150 * 8) - 1;
  699. break;
  700. case SSP300:
  701. gen_cfg.clk_div = 29493000 / (300 * 8) - 1;
  702. break;
  703. case SSP600:
  704. gen_cfg.clk_div = 29493000 / (600 * 8) - 1;
  705. break;
  706. case SSP1200:
  707. gen_cfg.clk_div = 29493000 / (1200 * 8) - 1;
  708. break;
  709. case SSP2400:
  710. gen_cfg.clk_div = 29493000 / (2400 * 8) - 1;
  711. break;
  712. case SSP4800:
  713. gen_cfg.clk_div = 29493000 / (4800 * 8) - 1;
  714. break;
  715. case SSP9600:
  716. gen_cfg.clk_div = 29493000 / (9600 * 8) - 1;
  717. break;
  718. case SSP19200:
  719. gen_cfg.clk_div = 29493000 / (19200 * 8) - 1;
  720. break;
  721. case SSP28800:
  722. gen_cfg.clk_div = 29493000 / (28800 * 8) - 1;
  723. break;
  724. case SSP57600:
  725. gen_cfg.clk_div = 29493000 / (57600 * 8) - 1;
  726. break;
  727. case SSP115200:
  728. gen_cfg.clk_div = 29493000 / (115200 * 8) - 1;
  729. break;
  730. case SSP230400:
  731. gen_cfg.clk_div = 29493000 / (230400 * 8) - 1;
  732. break;
  733. case SSP460800:
  734. gen_cfg.clk_div = 29493000 / (460800 * 8) - 1;
  735. break;
  736. case SSP921600:
  737. gen_cfg.clk_div = 29493000 / (921600 * 8) - 1;
  738. break;
  739. case SSP3125000:
  740. gen_cfg.base_freq = regk_sser_f100;
  741. gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1;
  742. break;
  743. }
  744. }
  745. frm_cfg.wordrate = GET_WORD_RATE(arg);
  746. break;
  747. case SSP_MODE:
  748. switch (arg) {
  749. case MASTER_OUTPUT:
  750. port->output = 1;
  751. port->input = 0;
  752. frm_cfg.out_on = regk_sser_tr;
  753. frm_cfg.frame_pin_dir = regk_sser_out;
  754. gen_cfg.clk_dir = regk_sser_out;
  755. break;
  756. case SLAVE_OUTPUT:
  757. port->output = 1;
  758. port->input = 0;
  759. frm_cfg.frame_pin_dir = regk_sser_in;
  760. gen_cfg.clk_dir = regk_sser_in;
  761. break;
  762. case MASTER_INPUT:
  763. port->output = 0;
  764. port->input = 1;
  765. frm_cfg.frame_pin_dir = regk_sser_out;
  766. frm_cfg.out_on = regk_sser_intern_tb;
  767. gen_cfg.clk_dir = regk_sser_out;
  768. break;
  769. case SLAVE_INPUT:
  770. port->output = 0;
  771. port->input = 1;
  772. frm_cfg.frame_pin_dir = regk_sser_in;
  773. gen_cfg.clk_dir = regk_sser_in;
  774. break;
  775. case MASTER_BIDIR:
  776. port->output = 1;
  777. port->input = 1;
  778. frm_cfg.frame_pin_dir = regk_sser_out;
  779. frm_cfg.out_on = regk_sser_intern_tb;
  780. gen_cfg.clk_dir = regk_sser_out;
  781. break;
  782. case SLAVE_BIDIR:
  783. port->output = 1;
  784. port->input = 1;
  785. frm_cfg.frame_pin_dir = regk_sser_in;
  786. gen_cfg.clk_dir = regk_sser_in;
  787. break;
  788. default:
  789. spin_unlock_irq(&port->lock);
  790. return -EINVAL;
  791. }
  792. if (!port->use_dma || arg == MASTER_OUTPUT ||
  793. arg == SLAVE_OUTPUT)
  794. intr_mask.rdav = regk_sser_yes;
  795. break;
  796. case SSP_FRAME_SYNC:
  797. if (arg & NORMAL_SYNC) {
  798. frm_cfg.rec_delay = 1;
  799. frm_cfg.tr_delay = 1;
  800. } else if (arg & EARLY_SYNC)
  801. frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
  802. else if (arg & LATE_SYNC) {
  803. frm_cfg.tr_delay = 2;
  804. frm_cfg.rec_delay = 2;
  805. } else if (arg & SECOND_WORD_SYNC) {
  806. frm_cfg.rec_delay = 7;
  807. frm_cfg.tr_delay = 1;
  808. }
  809. tr_cfg.bulk_wspace = frm_cfg.tr_delay;
  810. frm_cfg.early_wend = regk_sser_yes;
  811. if (arg & BIT_SYNC)
  812. frm_cfg.type = regk_sser_edge;
  813. else if (arg & WORD_SYNC)
  814. frm_cfg.type = regk_sser_level;
  815. else if (arg & EXTENDED_SYNC)
  816. frm_cfg.early_wend = regk_sser_no;
  817. if (arg & SYNC_ON)
  818. frm_cfg.frame_pin_use = regk_sser_frm;
  819. else if (arg & SYNC_OFF)
  820. frm_cfg.frame_pin_use = regk_sser_gio0;
  821. dma_w_size = regk_dma_set_w_size2;
  822. if (arg & WORD_SIZE_8) {
  823. rec_cfg.sample_size = tr_cfg.sample_size = 7;
  824. dma_w_size = regk_dma_set_w_size1;
  825. } else if (arg & WORD_SIZE_12)
  826. rec_cfg.sample_size = tr_cfg.sample_size = 11;
  827. else if (arg & WORD_SIZE_16)
  828. rec_cfg.sample_size = tr_cfg.sample_size = 15;
  829. else if (arg & WORD_SIZE_24)
  830. rec_cfg.sample_size = tr_cfg.sample_size = 23;
  831. else if (arg & WORD_SIZE_32)
  832. rec_cfg.sample_size = tr_cfg.sample_size = 31;
  833. if (arg & BIT_ORDER_MSB)
  834. rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
  835. else if (arg & BIT_ORDER_LSB)
  836. rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst;
  837. if (arg & FLOW_CONTROL_ENABLE) {
  838. frm_cfg.status_pin_use = regk_sser_frm;
  839. rec_cfg.fifo_thr = regk_sser_thr16;
  840. } else if (arg & FLOW_CONTROL_DISABLE) {
  841. frm_cfg.status_pin_use = regk_sser_gio0;
  842. rec_cfg.fifo_thr = regk_sser_inf;
  843. }
  844. if (arg & CLOCK_NOT_GATED)
  845. gen_cfg.gate_clk = regk_sser_no;
  846. else if (arg & CLOCK_GATED)
  847. gen_cfg.gate_clk = regk_sser_yes;
  848. break;
  849. case SSP_IPOLARITY:
  850. /* NOTE!! negedge is considered NORMAL */
  851. if (arg & CLOCK_NORMAL)
  852. rec_cfg.clk_pol = regk_sser_neg;
  853. else if (arg & CLOCK_INVERT)
  854. rec_cfg.clk_pol = regk_sser_pos;
  855. if (arg & FRAME_NORMAL)
  856. frm_cfg.level = regk_sser_pos_hi;
  857. else if (arg & FRAME_INVERT)
  858. frm_cfg.level = regk_sser_neg_lo;
  859. if (arg & STATUS_NORMAL)
  860. gen_cfg.hold_pol = regk_sser_pos;
  861. else if (arg & STATUS_INVERT)
  862. gen_cfg.hold_pol = regk_sser_neg;
  863. break;
  864. case SSP_OPOLARITY:
  865. if (arg & CLOCK_NORMAL)
  866. gen_cfg.out_clk_pol = regk_sser_pos;
  867. else if (arg & CLOCK_INVERT)
  868. gen_cfg.out_clk_pol = regk_sser_neg;
  869. if (arg & FRAME_NORMAL)
  870. frm_cfg.level = regk_sser_pos_hi;
  871. else if (arg & FRAME_INVERT)
  872. frm_cfg.level = regk_sser_neg_lo;
  873. if (arg & STATUS_NORMAL)
  874. gen_cfg.hold_pol = regk_sser_pos;
  875. else if (arg & STATUS_INVERT)
  876. gen_cfg.hold_pol = regk_sser_neg;
  877. break;
  878. case SSP_SPI:
  879. rec_cfg.fifo_thr = regk_sser_inf;
  880. rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
  881. rec_cfg.sample_size = tr_cfg.sample_size = 7;
  882. frm_cfg.frame_pin_use = regk_sser_frm;
  883. frm_cfg.type = regk_sser_level;
  884. frm_cfg.tr_delay = 1;
  885. frm_cfg.level = regk_sser_neg_lo;
  886. if (arg & SPI_SLAVE) {
  887. rec_cfg.clk_pol = regk_sser_neg;
  888. gen_cfg.clk_dir = regk_sser_in;
  889. port->input = 1;
  890. port->output = 0;
  891. } else {
  892. gen_cfg.out_clk_pol = regk_sser_pos;
  893. port->input = 0;
  894. port->output = 1;
  895. gen_cfg.clk_dir = regk_sser_out;
  896. }
  897. break;
  898. case SSP_INBUFCHUNK:
  899. break;
  900. default:
  901. return_val = -1;
  902. }
  903. if (port->started) {
  904. rec_cfg.rec_en = port->input;
  905. gen_cfg.en = (port->output | port->input);
  906. }
  907. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  908. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  909. REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
  910. REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
  911. REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
  912. if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 |
  913. WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) {
  914. int en = gen_cfg.en;
  915. gen_cfg.en = 0;
  916. REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
  917. /* ##### Should DMA be stoped before we change dma size? */
  918. DMA_WR_CMD(port->regi_dmain, dma_w_size);
  919. DMA_WR_CMD(port->regi_dmaout, dma_w_size);
  920. gen_cfg.en = en;
  921. REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
  922. }
  923. spin_unlock_irq(&port->lock);
  924. return return_val;
  925. }
  926. static long sync_serial_ioctl(struct file *file,
  927. unsigned int cmd, unsigned long arg)
  928. {
  929. long ret;
  930. mutex_lock(&sync_serial_mutex);
  931. ret = sync_serial_ioctl_unlocked(file, cmd, arg);
  932. mutex_unlock(&sync_serial_mutex);
  933. return ret;
  934. }
  935. /* NOTE: sync_serial_write does not support concurrency */
  936. static ssize_t sync_serial_write(struct file *file, const char __user *buf,
  937. size_t count, loff_t *ppos)
  938. {
  939. int dev = iminor(file_inode(file));
  940. DECLARE_WAITQUEUE(wait, current);
  941. struct sync_port *port;
  942. int trunc_count;
  943. unsigned long flags;
  944. int bytes_free;
  945. int out_buf_count;
  946. unsigned char *rd_ptr; /* First allocated byte in the buffer */
  947. unsigned char *wr_ptr; /* First free byte in the buffer */
  948. unsigned char *buf_stop_ptr; /* Last byte + 1 */
  949. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
  950. DEBUG(pr_info("Invalid minor %d\n", dev));
  951. return -ENODEV;
  952. }
  953. port = &ports[dev];
  954. /* |<- OUT_BUFFER_SIZE ->|
  955. * |<- out_buf_count ->|
  956. * |<- trunc_count ->| ...->|
  957. * ______________________________________________________
  958. * | free | data | free |
  959. * |_________|___________________|________________________|
  960. * ^ rd_ptr ^ wr_ptr
  961. */
  962. DEBUGWRITE(pr_info("W d%d c %u a: %p c: %p\n",
  963. port->port_nbr, count, port->active_tr_descr,
  964. port->catch_tr_descr));
  965. /* Read variables that may be updated by interrupts */
  966. spin_lock_irqsave(&port->lock, flags);
  967. rd_ptr = port->out_rd_ptr;
  968. out_buf_count = port->out_buf_count;
  969. spin_unlock_irqrestore(&port->lock, flags);
  970. /* Check if resources are available */
  971. if (port->tr_running &&
  972. ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
  973. out_buf_count >= OUT_BUFFER_SIZE)) {
  974. DEBUGWRITE(pr_info("sser%d full\n", dev));
  975. return -EAGAIN;
  976. }
  977. buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE;
  978. /* Determine pointer to the first free byte, before copying. */
  979. wr_ptr = rd_ptr + out_buf_count;
  980. if (wr_ptr >= buf_stop_ptr)
  981. wr_ptr -= OUT_BUFFER_SIZE;
  982. /* If we wrap the ring buffer, let the user space program handle it by
  983. * truncating the data. This could be more elegant, small buffer
  984. * fragments may occur.
  985. */
  986. bytes_free = OUT_BUFFER_SIZE - out_buf_count;
  987. if (wr_ptr + bytes_free > buf_stop_ptr)
  988. bytes_free = buf_stop_ptr - wr_ptr;
  989. trunc_count = (count < bytes_free) ? count : bytes_free;
  990. if (copy_from_user(wr_ptr, buf, trunc_count))
  991. return -EFAULT;
  992. DEBUGOUTBUF(pr_info("%-4d + %-4d = %-4d %p %p %p\n",
  993. out_buf_count, trunc_count,
  994. port->out_buf_count, port->out_buffer,
  995. wr_ptr, buf_stop_ptr));
  996. /* Make sure transmitter/receiver is running */
  997. if (!port->started) {
  998. reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  999. reg_sser_rw_rec_cfg rec_cfg =
  1000. REG_RD(sser, port->regi_sser, rw_rec_cfg);
  1001. cfg.en = regk_sser_yes;
  1002. rec_cfg.rec_en = port->input;
  1003. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  1004. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  1005. port->started = 1;
  1006. }
  1007. /* Setup wait if blocking */
  1008. if (!(file->f_flags & O_NONBLOCK)) {
  1009. add_wait_queue(&port->out_wait_q, &wait);
  1010. set_current_state(TASK_INTERRUPTIBLE);
  1011. }
  1012. spin_lock_irqsave(&port->lock, flags);
  1013. port->out_buf_count += trunc_count;
  1014. if (port->use_dma) {
  1015. #ifdef SYNC_SER_DMA
  1016. start_dma_out(port, wr_ptr, trunc_count);
  1017. #endif
  1018. } else if (!port->tr_running) {
  1019. #ifdef SYNC_SER_MANUAL
  1020. reg_sser_rw_intr_mask intr_mask;
  1021. intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
  1022. /* Start sender by writing data */
  1023. send_word(port);
  1024. /* and enable transmitter ready IRQ */
  1025. intr_mask.trdy = 1;
  1026. REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
  1027. #endif
  1028. }
  1029. spin_unlock_irqrestore(&port->lock, flags);
  1030. /* Exit if non blocking */
  1031. if (file->f_flags & O_NONBLOCK) {
  1032. DEBUGWRITE(pr_info("w d%d c %u %08x\n",
  1033. port->port_nbr, trunc_count,
  1034. REG_RD_INT(dma, port->regi_dmaout, r_intr)));
  1035. return trunc_count;
  1036. }
  1037. schedule();
  1038. remove_wait_queue(&port->out_wait_q, &wait);
  1039. if (signal_pending(current))
  1040. return -EINTR;
  1041. DEBUGWRITE(pr_info("w d%d c %u\n", port->port_nbr, trunc_count));
  1042. return trunc_count;
  1043. }
  1044. static ssize_t sync_serial_read(struct file *file, char __user *buf,
  1045. size_t count, loff_t *ppos)
  1046. {
  1047. return __sync_serial_read(file, buf, count, ppos, NULL);
  1048. }
  1049. #ifdef SYNC_SER_MANUAL
  1050. static void send_word(struct sync_port *port)
  1051. {
  1052. reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
  1053. reg_sser_rw_tr_data tr_data = {0};
  1054. switch (tr_cfg.sample_size) {
  1055. case 8:
  1056. port->out_buf_count--;
  1057. tr_data.data = *port->out_rd_ptr++;
  1058. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1059. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1060. port->out_rd_ptr = port->out_buffer;
  1061. break;
  1062. case 12:
  1063. {
  1064. int data = (*port->out_rd_ptr++) << 8;
  1065. data |= *port->out_rd_ptr++;
  1066. port->out_buf_count -= 2;
  1067. tr_data.data = data;
  1068. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1069. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1070. port->out_rd_ptr = port->out_buffer;
  1071. break;
  1072. }
  1073. case 16:
  1074. port->out_buf_count -= 2;
  1075. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1076. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1077. port->out_rd_ptr += 2;
  1078. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1079. port->out_rd_ptr = port->out_buffer;
  1080. break;
  1081. case 24:
  1082. port->out_buf_count -= 3;
  1083. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1084. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1085. port->out_rd_ptr += 2;
  1086. tr_data.data = *port->out_rd_ptr++;
  1087. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1088. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1089. port->out_rd_ptr = port->out_buffer;
  1090. break;
  1091. case 32:
  1092. port->out_buf_count -= 4;
  1093. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1094. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1095. port->out_rd_ptr += 2;
  1096. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1097. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1098. port->out_rd_ptr += 2;
  1099. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1100. port->out_rd_ptr = port->out_buffer;
  1101. break;
  1102. }
  1103. }
  1104. #endif
  1105. #ifdef SYNC_SER_DMA
  1106. static void start_dma_out(struct sync_port *port, const char *data, int count)
  1107. {
  1108. port->active_tr_descr->buf = (char *)virt_to_phys((char *)data);
  1109. port->active_tr_descr->after = port->active_tr_descr->buf + count;
  1110. port->active_tr_descr->intr = 1;
  1111. port->active_tr_descr->eol = 1;
  1112. port->prev_tr_descr->eol = 0;
  1113. DEBUGTRDMA(pr_info("Inserting eolr:%p eol@:%p\n",
  1114. port->prev_tr_descr, port->active_tr_descr));
  1115. port->prev_tr_descr = port->active_tr_descr;
  1116. port->active_tr_descr = phys_to_virt((int)port->active_tr_descr->next);
  1117. if (!port->tr_running) {
  1118. reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
  1119. rw_tr_cfg);
  1120. port->out_context.next = NULL;
  1121. port->out_context.saved_data =
  1122. (dma_descr_data *)virt_to_phys(port->prev_tr_descr);
  1123. port->out_context.saved_data_buf = port->prev_tr_descr->buf;
  1124. DMA_START_CONTEXT(port->regi_dmaout,
  1125. virt_to_phys((char *)&port->out_context));
  1126. tr_cfg.tr_en = regk_sser_yes;
  1127. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  1128. DEBUGTRDMA(pr_info("dma s\n"););
  1129. } else {
  1130. DMA_CONTINUE_DATA(port->regi_dmaout);
  1131. DEBUGTRDMA(pr_info("dma c\n"););
  1132. }
  1133. port->tr_running = 1;
  1134. }
  1135. static void start_dma_in(struct sync_port *port)
  1136. {
  1137. int i;
  1138. char *buf;
  1139. unsigned long flags;
  1140. spin_lock_irqsave(&port->lock, flags);
  1141. port->writep = port->flip;
  1142. spin_unlock_irqrestore(&port->lock, flags);
  1143. buf = (char *)virt_to_phys(port->in_buffer);
  1144. for (i = 0; i < NBR_IN_DESCR; i++) {
  1145. port->in_descr[i].buf = buf;
  1146. port->in_descr[i].after = buf + port->inbufchunk;
  1147. port->in_descr[i].intr = 1;
  1148. port->in_descr[i].next =
  1149. (dma_descr_data *)virt_to_phys(&port->in_descr[i+1]);
  1150. port->in_descr[i].buf = buf;
  1151. buf += port->inbufchunk;
  1152. }
  1153. /* Link the last descriptor to the first */
  1154. port->in_descr[i-1].next =
  1155. (dma_descr_data *)virt_to_phys(&port->in_descr[0]);
  1156. port->in_descr[i-1].eol = regk_sser_yes;
  1157. port->next_rx_desc = &port->in_descr[0];
  1158. port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
  1159. port->in_context.saved_data =
  1160. (dma_descr_data *)virt_to_phys(&port->in_descr[0]);
  1161. port->in_context.saved_data_buf = port->in_descr[0].buf;
  1162. DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
  1163. }
  1164. static irqreturn_t tr_interrupt(int irq, void *dev_id)
  1165. {
  1166. reg_dma_r_masked_intr masked;
  1167. reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
  1168. reg_dma_rw_stat stat;
  1169. int i;
  1170. int found = 0;
  1171. int stop_sser = 0;
  1172. for (i = 0; i < NBR_PORTS; i++) {
  1173. struct sync_port *port = &ports[i];
  1174. if (!port->enabled || !port->use_dma)
  1175. continue;
  1176. /* IRQ active for the port? */
  1177. masked = REG_RD(dma, port->regi_dmaout, r_masked_intr);
  1178. if (!masked.data)
  1179. continue;
  1180. found = 1;
  1181. /* Check if we should stop the DMA transfer */
  1182. stat = REG_RD(dma, port->regi_dmaout, rw_stat);
  1183. if (stat.list_state == regk_dma_data_at_eol)
  1184. stop_sser = 1;
  1185. /* Clear IRQ */
  1186. REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr);
  1187. if (!stop_sser) {
  1188. /* The DMA has completed a descriptor, EOL was not
  1189. * encountered, so step relevant descriptor and
  1190. * datapointers forward. */
  1191. int sent;
  1192. sent = port->catch_tr_descr->after -
  1193. port->catch_tr_descr->buf;
  1194. DEBUGTXINT(pr_info("%-4d - %-4d = %-4d\t"
  1195. "in descr %p (ac: %p)\n",
  1196. port->out_buf_count, sent,
  1197. port->out_buf_count - sent,
  1198. port->catch_tr_descr,
  1199. port->active_tr_descr););
  1200. port->out_buf_count -= sent;
  1201. port->catch_tr_descr =
  1202. phys_to_virt((int) port->catch_tr_descr->next);
  1203. port->out_rd_ptr =
  1204. phys_to_virt((int) port->catch_tr_descr->buf);
  1205. } else {
  1206. reg_sser_rw_tr_cfg tr_cfg;
  1207. int j, sent;
  1208. /* EOL handler.
  1209. * Note that if an EOL was encountered during the irq
  1210. * locked section of sync_ser_write the DMA will be
  1211. * restarted and the eol flag will be cleared.
  1212. * The remaining descriptors will be traversed by
  1213. * the descriptor interrupts as usual.
  1214. */
  1215. j = 0;
  1216. while (!port->catch_tr_descr->eol) {
  1217. sent = port->catch_tr_descr->after -
  1218. port->catch_tr_descr->buf;
  1219. DEBUGOUTBUF(pr_info(
  1220. "traversing descr %p -%d (%d)\n",
  1221. port->catch_tr_descr,
  1222. sent,
  1223. port->out_buf_count));
  1224. port->out_buf_count -= sent;
  1225. port->catch_tr_descr = phys_to_virt(
  1226. (int)port->catch_tr_descr->next);
  1227. j++;
  1228. if (j >= NBR_OUT_DESCR) {
  1229. /* TODO: Reset and recover */
  1230. panic("sync_serial: missing eol");
  1231. }
  1232. }
  1233. sent = port->catch_tr_descr->after -
  1234. port->catch_tr_descr->buf;
  1235. DEBUGOUTBUF(pr_info("eol at descr %p -%d (%d)\n",
  1236. port->catch_tr_descr,
  1237. sent,
  1238. port->out_buf_count));
  1239. port->out_buf_count -= sent;
  1240. /* Update read pointer to first free byte, we
  1241. * may already be writing data there. */
  1242. port->out_rd_ptr =
  1243. phys_to_virt((int) port->catch_tr_descr->after);
  1244. if (port->out_rd_ptr > port->out_buffer +
  1245. OUT_BUFFER_SIZE)
  1246. port->out_rd_ptr = port->out_buffer;
  1247. tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
  1248. DEBUGTXINT(pr_info(
  1249. "tr_int DMA stop %d, set catch @ %p\n",
  1250. port->out_buf_count,
  1251. port->active_tr_descr));
  1252. if (port->out_buf_count != 0)
  1253. pr_err("sync_ser: buf not empty after eol\n");
  1254. port->catch_tr_descr = port->active_tr_descr;
  1255. port->tr_running = 0;
  1256. tr_cfg.tr_en = regk_sser_no;
  1257. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  1258. }
  1259. /* wake up the waiting process */
  1260. wake_up_interruptible(&port->out_wait_q);
  1261. }
  1262. return IRQ_RETVAL(found);
  1263. } /* tr_interrupt */
  1264. static inline void handle_rx_packet(struct sync_port *port)
  1265. {
  1266. int idx;
  1267. reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
  1268. unsigned long flags;
  1269. DEBUGRXINT(pr_info("!"));
  1270. spin_lock_irqsave(&port->lock, flags);
  1271. /* If we overrun the user experience is crap regardless if we
  1272. * drop new or old data. Its much easier to get it right when
  1273. * dropping new data so lets do that.
  1274. */
  1275. if ((port->writep + port->inbufchunk <=
  1276. port->flip + port->in_buffer_size) &&
  1277. (port->in_buffer_len + port->inbufchunk < IN_BUFFER_SIZE)) {
  1278. memcpy(port->writep,
  1279. phys_to_virt((unsigned)port->next_rx_desc->buf),
  1280. port->inbufchunk);
  1281. port->writep += port->inbufchunk;
  1282. if (port->writep >= port->flip + port->in_buffer_size)
  1283. port->writep = port->flip;
  1284. /* Timestamp the new data chunk. */
  1285. if (port->write_ts_idx == NBR_IN_DESCR)
  1286. port->write_ts_idx = 0;
  1287. idx = port->write_ts_idx++;
  1288. ktime_get_ts(&port->timestamp[idx]);
  1289. port->in_buffer_len += port->inbufchunk;
  1290. }
  1291. spin_unlock_irqrestore(&port->lock, flags);
  1292. port->next_rx_desc->eol = 1;
  1293. port->prev_rx_desc->eol = 0;
  1294. /* Cache bug workaround */
  1295. flush_dma_descr(port->prev_rx_desc, 0);
  1296. port->prev_rx_desc = port->next_rx_desc;
  1297. port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
  1298. /* Cache bug workaround */
  1299. flush_dma_descr(port->prev_rx_desc, 1);
  1300. /* wake up the waiting process */
  1301. wake_up_interruptible(&port->in_wait_q);
  1302. DMA_CONTINUE(port->regi_dmain);
  1303. REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
  1304. }
  1305. static irqreturn_t rx_interrupt(int irq, void *dev_id)
  1306. {
  1307. reg_dma_r_masked_intr masked;
  1308. int i;
  1309. int found = 0;
  1310. DEBUG(pr_info("rx_interrupt\n"));
  1311. for (i = 0; i < NBR_PORTS; i++) {
  1312. struct sync_port *port = &ports[i];
  1313. if (!port->enabled || !port->use_dma)
  1314. continue;
  1315. masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
  1316. if (!masked.data)
  1317. continue;
  1318. /* Descriptor interrupt */
  1319. found = 1;
  1320. while (REG_RD(dma, port->regi_dmain, rw_data) !=
  1321. virt_to_phys(port->next_rx_desc))
  1322. handle_rx_packet(port);
  1323. }
  1324. return IRQ_RETVAL(found);
  1325. } /* rx_interrupt */
  1326. #endif /* SYNC_SER_DMA */
  1327. #ifdef SYNC_SER_MANUAL
  1328. static irqreturn_t manual_interrupt(int irq, void *dev_id)
  1329. {
  1330. unsigned long flags;
  1331. int i;
  1332. int found = 0;
  1333. reg_sser_r_masked_intr masked;
  1334. for (i = 0; i < NBR_PORTS; i++) {
  1335. struct sync_port *port = &ports[i];
  1336. if (!port->enabled || port->use_dma)
  1337. continue;
  1338. masked = REG_RD(sser, port->regi_sser, r_masked_intr);
  1339. /* Data received? */
  1340. if (masked.rdav) {
  1341. reg_sser_rw_rec_cfg rec_cfg =
  1342. REG_RD(sser, port->regi_sser, rw_rec_cfg);
  1343. reg_sser_r_rec_data data = REG_RD(sser,
  1344. port->regi_sser, r_rec_data);
  1345. found = 1;
  1346. /* Read data */
  1347. spin_lock_irqsave(&port->lock, flags);
  1348. switch (rec_cfg.sample_size) {
  1349. case 8:
  1350. *port->writep++ = data.data & 0xff;
  1351. break;
  1352. case 12:
  1353. *port->writep = (data.data & 0x0ff0) >> 4;
  1354. *(port->writep + 1) = data.data & 0x0f;
  1355. port->writep += 2;
  1356. break;
  1357. case 16:
  1358. *(unsigned short *)port->writep = data.data;
  1359. port->writep += 2;
  1360. break;
  1361. case 24:
  1362. *(unsigned int *)port->writep = data.data;
  1363. port->writep += 3;
  1364. break;
  1365. case 32:
  1366. *(unsigned int *)port->writep = data.data;
  1367. port->writep += 4;
  1368. break;
  1369. }
  1370. /* Wrap? */
  1371. if (port->writep >= port->flip + port->in_buffer_size)
  1372. port->writep = port->flip;
  1373. if (port->writep == port->readp) {
  1374. /* Receive buf overrun, discard oldest data */
  1375. port->readp++;
  1376. /* Wrap? */
  1377. if (port->readp >= port->flip +
  1378. port->in_buffer_size)
  1379. port->readp = port->flip;
  1380. }
  1381. spin_unlock_irqrestore(&port->lock, flags);
  1382. if (sync_data_avail(port) >= port->inbufchunk)
  1383. /* Wake up application */
  1384. wake_up_interruptible(&port->in_wait_q);
  1385. }
  1386. /* Transmitter ready? */
  1387. if (masked.trdy) {
  1388. found = 1;
  1389. /* More data to send */
  1390. if (port->out_buf_count > 0)
  1391. send_word(port);
  1392. else {
  1393. /* Transmission finished */
  1394. reg_sser_rw_intr_mask intr_mask;
  1395. intr_mask = REG_RD(sser, port->regi_sser,
  1396. rw_intr_mask);
  1397. intr_mask.trdy = 0;
  1398. REG_WR(sser, port->regi_sser,
  1399. rw_intr_mask, intr_mask);
  1400. /* Wake up application */
  1401. wake_up_interruptible(&port->out_wait_q);
  1402. }
  1403. }
  1404. }
  1405. return IRQ_RETVAL(found);
  1406. }
  1407. #endif
  1408. static int __init etrax_sync_serial_init(void)
  1409. {
  1410. #if 1
  1411. /* This code will be removed when we move to udev for all devices. */
  1412. syncser_first = MKDEV(SYNC_SERIAL_MAJOR, 0);
  1413. if (register_chrdev_region(syncser_first, minor_count, SYNCSER_NAME)) {
  1414. pr_err("Failed to register major %d\n", SYNC_SERIAL_MAJOR);
  1415. return -1;
  1416. }
  1417. #else
  1418. /* Allocate dynamic major number. */
  1419. if (alloc_chrdev_region(&syncser_first, 0, minor_count, SYNCSER_NAME)) {
  1420. pr_err("Failed to allocate character device region\n");
  1421. return -1;
  1422. }
  1423. #endif
  1424. syncser_cdev = cdev_alloc();
  1425. if (!syncser_cdev) {
  1426. pr_err("Failed to allocate cdev for syncser\n");
  1427. unregister_chrdev_region(syncser_first, minor_count);
  1428. return -1;
  1429. }
  1430. cdev_init(syncser_cdev, &syncser_fops);
  1431. /* Create a sysfs class for syncser */
  1432. syncser_class = class_create(THIS_MODULE, "syncser_class");
  1433. if (IS_ERR(syncser_class)) {
  1434. pr_err("Failed to create a sysfs class for syncser\n");
  1435. unregister_chrdev_region(syncser_first, minor_count);
  1436. cdev_del(syncser_cdev);
  1437. return -1;
  1438. }
  1439. /* Initialize Ports */
  1440. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
  1441. if (artpec_pinmux_alloc_fixed(PINMUX_SSER0)) {
  1442. pr_warn("Unable to alloc pins for synchronous serial port 0\n");
  1443. unregister_chrdev_region(syncser_first, minor_count);
  1444. return -EIO;
  1445. }
  1446. initialize_port(0);
  1447. ports[0].enabled = 1;
  1448. /* Register with sysfs so udev can pick it up. */
  1449. device_create(syncser_class, NULL, syncser_first, NULL,
  1450. "%s%d", SYNCSER_NAME, 0);
  1451. #endif
  1452. #if defined(CONFIG_ETRAXFS) && defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
  1453. if (artpec_pinmux_alloc_fixed(PINMUX_SSER1)) {
  1454. pr_warn("Unable to alloc pins for synchronous serial port 1\n");
  1455. unregister_chrdev_region(syncser_first, minor_count);
  1456. class_destroy(syncser_class);
  1457. return -EIO;
  1458. }
  1459. initialize_port(1);
  1460. ports[1].enabled = 1;
  1461. /* Register with sysfs so udev can pick it up. */
  1462. device_create(syncser_class, NULL, syncser_first, NULL,
  1463. "%s%d", SYNCSER_NAME, 0);
  1464. #endif
  1465. /* Add it to system */
  1466. if (cdev_add(syncser_cdev, syncser_first, minor_count) < 0) {
  1467. pr_err("Failed to add syncser as char device\n");
  1468. device_destroy(syncser_class, syncser_first);
  1469. class_destroy(syncser_class);
  1470. cdev_del(syncser_cdev);
  1471. unregister_chrdev_region(syncser_first, minor_count);
  1472. return -1;
  1473. }
  1474. pr_info("ARTPEC synchronous serial port (%s: %d, %d)\n",
  1475. SYNCSER_NAME, MAJOR(syncser_first), MINOR(syncser_first));
  1476. return 0;
  1477. }
  1478. static void __exit etrax_sync_serial_exit(void)
  1479. {
  1480. int i;
  1481. device_destroy(syncser_class, syncser_first);
  1482. class_destroy(syncser_class);
  1483. if (syncser_cdev) {
  1484. cdev_del(syncser_cdev);
  1485. unregister_chrdev_region(syncser_first, minor_count);
  1486. }
  1487. for (i = 0; i < NBR_PORTS; i++) {
  1488. struct sync_port *port = &ports[i];
  1489. if (port->init_irqs == dma_irq_setup) {
  1490. /* Free dma irqs and dma channels. */
  1491. #ifdef SYNC_SER_DMA
  1492. artpec_free_dma(port->dma_in_nbr);
  1493. artpec_free_dma(port->dma_out_nbr);
  1494. free_irq(port->dma_out_intr_vect, port);
  1495. free_irq(port->dma_in_intr_vect, port);
  1496. #endif
  1497. } else if (port->init_irqs == manual_irq_setup) {
  1498. /* Free manual irq. */
  1499. free_irq(port->syncser_intr_vect, port);
  1500. }
  1501. }
  1502. pr_info("ARTPEC synchronous serial port unregistered\n");
  1503. }
  1504. module_init(etrax_sync_serial_init);
  1505. module_exit(etrax_sync_serial_exit);
  1506. MODULE_LICENSE("GPL");