serial-tegra.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410
  1. /*
  2. * serial_tegra.c
  3. *
  4. * High-speed serial driver for NVIDIA Tegra SoCs
  5. *
  6. * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
  7. *
  8. * Author: Laxman Dewangan <ldewangan@nvidia.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms and conditions of the GNU General Public License,
  12. * version 2, as published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope it will be useful, but WITHOUT
  15. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  16. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  17. * more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  21. */
  22. #include <linux/clk.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/delay.h>
  25. #include <linux/dmaengine.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/dmapool.h>
  28. #include <linux/err.h>
  29. #include <linux/io.h>
  30. #include <linux/irq.h>
  31. #include <linux/module.h>
  32. #include <linux/of.h>
  33. #include <linux/of_device.h>
  34. #include <linux/pagemap.h>
  35. #include <linux/platform_device.h>
  36. #include <linux/reset.h>
  37. #include <linux/serial.h>
  38. #include <linux/serial_8250.h>
  39. #include <linux/serial_core.h>
  40. #include <linux/serial_reg.h>
  41. #include <linux/slab.h>
  42. #include <linux/string.h>
  43. #include <linux/termios.h>
  44. #include <linux/tty.h>
  45. #include <linux/tty_flip.h>
  46. #define TEGRA_UART_TYPE "TEGRA_UART"
  47. #define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
  48. #define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
  49. #define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
  50. #define TEGRA_UART_LSR_TXFIFO_FULL 0x100
  51. #define TEGRA_UART_IER_EORD 0x20
  52. #define TEGRA_UART_MCR_RTS_EN 0x40
  53. #define TEGRA_UART_MCR_CTS_EN 0x20
  54. #define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
  55. UART_LSR_PE | UART_LSR_FE)
  56. #define TEGRA_UART_IRDA_CSR 0x08
  57. #define TEGRA_UART_SIR_ENABLED 0x80
  58. #define TEGRA_UART_TX_PIO 1
  59. #define TEGRA_UART_TX_DMA 2
  60. #define TEGRA_UART_MIN_DMA 16
  61. #define TEGRA_UART_FIFO_SIZE 32
  62. /*
  63. * Tx fifo trigger level setting in tegra uart is in
  64. * reverse way then conventional uart.
  65. */
  66. #define TEGRA_UART_TX_TRIG_16B 0x00
  67. #define TEGRA_UART_TX_TRIG_8B 0x10
  68. #define TEGRA_UART_TX_TRIG_4B 0x20
  69. #define TEGRA_UART_TX_TRIG_1B 0x30
  70. #define TEGRA_UART_MAXIMUM 5
  71. /* Default UART setting when started: 115200 no parity, stop, 8 data bits */
  72. #define TEGRA_UART_DEFAULT_BAUD 115200
  73. #define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
  74. /* Tx transfer mode */
  75. #define TEGRA_TX_PIO 1
  76. #define TEGRA_TX_DMA 2
  77. /**
  78. * tegra_uart_chip_data: SOC specific data.
  79. *
  80. * @tx_fifo_full_status: Status flag available for checking tx fifo full.
  81. * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
  82. * Tegra30 does not allow this.
  83. * @support_clk_src_div: Clock source support the clock divider.
  84. */
  85. struct tegra_uart_chip_data {
  86. bool tx_fifo_full_status;
  87. bool allow_txfifo_reset_fifo_mode;
  88. bool support_clk_src_div;
  89. };
  90. struct tegra_uart_port {
  91. struct uart_port uport;
  92. const struct tegra_uart_chip_data *cdata;
  93. struct clk *uart_clk;
  94. struct reset_control *rst;
  95. unsigned int current_baud;
  96. /* Register shadow */
  97. unsigned long fcr_shadow;
  98. unsigned long mcr_shadow;
  99. unsigned long lcr_shadow;
  100. unsigned long ier_shadow;
  101. bool rts_active;
  102. int tx_in_progress;
  103. unsigned int tx_bytes;
  104. bool enable_modem_interrupt;
  105. bool rx_timeout;
  106. int rx_in_progress;
  107. int symb_bit;
  108. struct dma_chan *rx_dma_chan;
  109. struct dma_chan *tx_dma_chan;
  110. dma_addr_t rx_dma_buf_phys;
  111. dma_addr_t tx_dma_buf_phys;
  112. unsigned char *rx_dma_buf_virt;
  113. unsigned char *tx_dma_buf_virt;
  114. struct dma_async_tx_descriptor *tx_dma_desc;
  115. struct dma_async_tx_descriptor *rx_dma_desc;
  116. dma_cookie_t tx_cookie;
  117. dma_cookie_t rx_cookie;
  118. unsigned int tx_bytes_requested;
  119. unsigned int rx_bytes_requested;
  120. };
  121. static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
  122. static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
  123. static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
  124. unsigned long reg)
  125. {
  126. return readl(tup->uport.membase + (reg << tup->uport.regshift));
  127. }
  128. static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
  129. unsigned long reg)
  130. {
  131. writel(val, tup->uport.membase + (reg << tup->uport.regshift));
  132. }
  133. static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
  134. {
  135. return container_of(u, struct tegra_uart_port, uport);
  136. }
  137. static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
  138. {
  139. struct tegra_uart_port *tup = to_tegra_uport(u);
  140. /*
  141. * RI - Ring detector is active
  142. * CD/DCD/CAR - Carrier detect is always active. For some reason
  143. * linux has different names for carrier detect.
  144. * DSR - Data Set ready is active as the hardware doesn't support it.
  145. * Don't know if the linux support this yet?
  146. * CTS - Clear to send. Always set to active, as the hardware handles
  147. * CTS automatically.
  148. */
  149. if (tup->enable_modem_interrupt)
  150. return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
  151. return TIOCM_CTS;
  152. }
  153. static void set_rts(struct tegra_uart_port *tup, bool active)
  154. {
  155. unsigned long mcr;
  156. mcr = tup->mcr_shadow;
  157. if (active)
  158. mcr |= TEGRA_UART_MCR_RTS_EN;
  159. else
  160. mcr &= ~TEGRA_UART_MCR_RTS_EN;
  161. if (mcr != tup->mcr_shadow) {
  162. tegra_uart_write(tup, mcr, UART_MCR);
  163. tup->mcr_shadow = mcr;
  164. }
  165. }
  166. static void set_dtr(struct tegra_uart_port *tup, bool active)
  167. {
  168. unsigned long mcr;
  169. mcr = tup->mcr_shadow;
  170. if (active)
  171. mcr |= UART_MCR_DTR;
  172. else
  173. mcr &= ~UART_MCR_DTR;
  174. if (mcr != tup->mcr_shadow) {
  175. tegra_uart_write(tup, mcr, UART_MCR);
  176. tup->mcr_shadow = mcr;
  177. }
  178. }
  179. static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
  180. {
  181. struct tegra_uart_port *tup = to_tegra_uport(u);
  182. int dtr_enable;
  183. tup->rts_active = !!(mctrl & TIOCM_RTS);
  184. set_rts(tup, tup->rts_active);
  185. dtr_enable = !!(mctrl & TIOCM_DTR);
  186. set_dtr(tup, dtr_enable);
  187. }
  188. static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
  189. {
  190. struct tegra_uart_port *tup = to_tegra_uport(u);
  191. unsigned long lcr;
  192. lcr = tup->lcr_shadow;
  193. if (break_ctl)
  194. lcr |= UART_LCR_SBC;
  195. else
  196. lcr &= ~UART_LCR_SBC;
  197. tegra_uart_write(tup, lcr, UART_LCR);
  198. tup->lcr_shadow = lcr;
  199. }
  200. /**
  201. * tegra_uart_wait_cycle_time: Wait for N UART clock periods
  202. *
  203. * @tup: Tegra serial port data structure.
  204. * @cycles: Number of clock periods to wait.
  205. *
  206. * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART
  207. * clock speed is 16X the current baud rate.
  208. */
  209. static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup,
  210. unsigned int cycles)
  211. {
  212. if (tup->current_baud)
  213. udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16));
  214. }
  215. /* Wait for a symbol-time. */
  216. static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
  217. unsigned int syms)
  218. {
  219. if (tup->current_baud)
  220. udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
  221. tup->current_baud));
  222. }
  223. static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
  224. {
  225. unsigned long fcr = tup->fcr_shadow;
  226. if (tup->cdata->allow_txfifo_reset_fifo_mode) {
  227. fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
  228. tegra_uart_write(tup, fcr, UART_FCR);
  229. } else {
  230. fcr &= ~UART_FCR_ENABLE_FIFO;
  231. tegra_uart_write(tup, fcr, UART_FCR);
  232. udelay(60);
  233. fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
  234. tegra_uart_write(tup, fcr, UART_FCR);
  235. fcr |= UART_FCR_ENABLE_FIFO;
  236. tegra_uart_write(tup, fcr, UART_FCR);
  237. }
  238. /* Dummy read to ensure the write is posted */
  239. tegra_uart_read(tup, UART_SCR);
  240. /*
  241. * For all tegra devices (up to t210), there is a hardware issue that
  242. * requires software to wait for 32 UART clock periods for the flush
  243. * to propagate, otherwise data could be lost.
  244. */
  245. tegra_uart_wait_cycle_time(tup, 32);
  246. }
  247. static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
  248. {
  249. unsigned long rate;
  250. unsigned int divisor;
  251. unsigned long lcr;
  252. int ret;
  253. if (tup->current_baud == baud)
  254. return 0;
  255. if (tup->cdata->support_clk_src_div) {
  256. rate = baud * 16;
  257. ret = clk_set_rate(tup->uart_clk, rate);
  258. if (ret < 0) {
  259. dev_err(tup->uport.dev,
  260. "clk_set_rate() failed for rate %lu\n", rate);
  261. return ret;
  262. }
  263. divisor = 1;
  264. } else {
  265. rate = clk_get_rate(tup->uart_clk);
  266. divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
  267. }
  268. lcr = tup->lcr_shadow;
  269. lcr |= UART_LCR_DLAB;
  270. tegra_uart_write(tup, lcr, UART_LCR);
  271. tegra_uart_write(tup, divisor & 0xFF, UART_TX);
  272. tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
  273. lcr &= ~UART_LCR_DLAB;
  274. tegra_uart_write(tup, lcr, UART_LCR);
  275. /* Dummy read to ensure the write is posted */
  276. tegra_uart_read(tup, UART_SCR);
  277. tup->current_baud = baud;
  278. /* wait two character intervals at new rate */
  279. tegra_uart_wait_sym_time(tup, 2);
  280. return 0;
  281. }
  282. static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
  283. unsigned long lsr)
  284. {
  285. char flag = TTY_NORMAL;
  286. if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
  287. if (lsr & UART_LSR_OE) {
  288. /* Overrrun error */
  289. flag = TTY_OVERRUN;
  290. tup->uport.icount.overrun++;
  291. dev_err(tup->uport.dev, "Got overrun errors\n");
  292. } else if (lsr & UART_LSR_PE) {
  293. /* Parity error */
  294. flag = TTY_PARITY;
  295. tup->uport.icount.parity++;
  296. dev_err(tup->uport.dev, "Got Parity errors\n");
  297. } else if (lsr & UART_LSR_FE) {
  298. flag = TTY_FRAME;
  299. tup->uport.icount.frame++;
  300. dev_err(tup->uport.dev, "Got frame errors\n");
  301. } else if (lsr & UART_LSR_BI) {
  302. dev_err(tup->uport.dev, "Got Break\n");
  303. tup->uport.icount.brk++;
  304. /* If FIFO read error without any data, reset Rx FIFO */
  305. if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
  306. tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
  307. }
  308. }
  309. return flag;
  310. }
  311. static int tegra_uart_request_port(struct uart_port *u)
  312. {
  313. return 0;
  314. }
  315. static void tegra_uart_release_port(struct uart_port *u)
  316. {
  317. /* Nothing to do here */
  318. }
  319. static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
  320. {
  321. struct circ_buf *xmit = &tup->uport.state->xmit;
  322. int i;
  323. for (i = 0; i < max_bytes; i++) {
  324. BUG_ON(uart_circ_empty(xmit));
  325. if (tup->cdata->tx_fifo_full_status) {
  326. unsigned long lsr = tegra_uart_read(tup, UART_LSR);
  327. if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
  328. break;
  329. }
  330. tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
  331. xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
  332. tup->uport.icount.tx++;
  333. }
  334. }
  335. static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
  336. unsigned int bytes)
  337. {
  338. if (bytes > TEGRA_UART_MIN_DMA)
  339. bytes = TEGRA_UART_MIN_DMA;
  340. tup->tx_in_progress = TEGRA_UART_TX_PIO;
  341. tup->tx_bytes = bytes;
  342. tup->ier_shadow |= UART_IER_THRI;
  343. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  344. }
  345. static void tegra_uart_tx_dma_complete(void *args)
  346. {
  347. struct tegra_uart_port *tup = args;
  348. struct circ_buf *xmit = &tup->uport.state->xmit;
  349. struct dma_tx_state state;
  350. unsigned long flags;
  351. unsigned int count;
  352. dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
  353. count = tup->tx_bytes_requested - state.residue;
  354. async_tx_ack(tup->tx_dma_desc);
  355. spin_lock_irqsave(&tup->uport.lock, flags);
  356. xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
  357. tup->tx_in_progress = 0;
  358. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  359. uart_write_wakeup(&tup->uport);
  360. tegra_uart_start_next_tx(tup);
  361. spin_unlock_irqrestore(&tup->uport.lock, flags);
  362. }
  363. static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
  364. unsigned long count)
  365. {
  366. struct circ_buf *xmit = &tup->uport.state->xmit;
  367. dma_addr_t tx_phys_addr;
  368. dma_sync_single_for_device(tup->uport.dev, tup->tx_dma_buf_phys,
  369. UART_XMIT_SIZE, DMA_TO_DEVICE);
  370. tup->tx_bytes = count & ~(0xF);
  371. tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
  372. tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
  373. tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
  374. DMA_PREP_INTERRUPT);
  375. if (!tup->tx_dma_desc) {
  376. dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
  377. return -EIO;
  378. }
  379. tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
  380. tup->tx_dma_desc->callback_param = tup;
  381. tup->tx_in_progress = TEGRA_UART_TX_DMA;
  382. tup->tx_bytes_requested = tup->tx_bytes;
  383. tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
  384. dma_async_issue_pending(tup->tx_dma_chan);
  385. return 0;
  386. }
  387. static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
  388. {
  389. unsigned long tail;
  390. unsigned long count;
  391. struct circ_buf *xmit = &tup->uport.state->xmit;
  392. tail = (unsigned long)&xmit->buf[xmit->tail];
  393. count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
  394. if (!count)
  395. return;
  396. if (count < TEGRA_UART_MIN_DMA)
  397. tegra_uart_start_pio_tx(tup, count);
  398. else if (BYTES_TO_ALIGN(tail) > 0)
  399. tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
  400. else
  401. tegra_uart_start_tx_dma(tup, count);
  402. }
  403. /* Called by serial core driver with u->lock taken. */
  404. static void tegra_uart_start_tx(struct uart_port *u)
  405. {
  406. struct tegra_uart_port *tup = to_tegra_uport(u);
  407. struct circ_buf *xmit = &u->state->xmit;
  408. if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
  409. tegra_uart_start_next_tx(tup);
  410. }
  411. static unsigned int tegra_uart_tx_empty(struct uart_port *u)
  412. {
  413. struct tegra_uart_port *tup = to_tegra_uport(u);
  414. unsigned int ret = 0;
  415. unsigned long flags;
  416. spin_lock_irqsave(&u->lock, flags);
  417. if (!tup->tx_in_progress) {
  418. unsigned long lsr = tegra_uart_read(tup, UART_LSR);
  419. if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
  420. ret = TIOCSER_TEMT;
  421. }
  422. spin_unlock_irqrestore(&u->lock, flags);
  423. return ret;
  424. }
  425. static void tegra_uart_stop_tx(struct uart_port *u)
  426. {
  427. struct tegra_uart_port *tup = to_tegra_uport(u);
  428. struct circ_buf *xmit = &tup->uport.state->xmit;
  429. struct dma_tx_state state;
  430. unsigned int count;
  431. if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
  432. return;
  433. dmaengine_terminate_all(tup->tx_dma_chan);
  434. dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
  435. count = tup->tx_bytes_requested - state.residue;
  436. async_tx_ack(tup->tx_dma_desc);
  437. xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
  438. tup->tx_in_progress = 0;
  439. }
  440. static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
  441. {
  442. struct circ_buf *xmit = &tup->uport.state->xmit;
  443. tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
  444. tup->tx_in_progress = 0;
  445. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  446. uart_write_wakeup(&tup->uport);
  447. tegra_uart_start_next_tx(tup);
  448. }
  449. static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
  450. struct tty_port *tty)
  451. {
  452. do {
  453. char flag = TTY_NORMAL;
  454. unsigned long lsr = 0;
  455. unsigned char ch;
  456. lsr = tegra_uart_read(tup, UART_LSR);
  457. if (!(lsr & UART_LSR_DR))
  458. break;
  459. flag = tegra_uart_decode_rx_error(tup, lsr);
  460. ch = (unsigned char) tegra_uart_read(tup, UART_RX);
  461. tup->uport.icount.rx++;
  462. if (!uart_handle_sysrq_char(&tup->uport, ch) && tty)
  463. tty_insert_flip_char(tty, ch, flag);
  464. } while (1);
  465. }
  466. static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
  467. struct tty_port *tty,
  468. unsigned int count)
  469. {
  470. int copied;
  471. /* If count is zero, then there is no data to be copied */
  472. if (!count)
  473. return;
  474. tup->uport.icount.rx += count;
  475. if (!tty) {
  476. dev_err(tup->uport.dev, "No tty port\n");
  477. return;
  478. }
  479. dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
  480. TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
  481. copied = tty_insert_flip_string(tty,
  482. ((unsigned char *)(tup->rx_dma_buf_virt)), count);
  483. if (copied != count) {
  484. WARN_ON(1);
  485. dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
  486. }
  487. dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
  488. TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
  489. }
  490. static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
  491. unsigned int residue)
  492. {
  493. struct tty_port *port = &tup->uport.state->port;
  494. struct tty_struct *tty = tty_port_tty_get(port);
  495. unsigned int count;
  496. async_tx_ack(tup->rx_dma_desc);
  497. count = tup->rx_bytes_requested - residue;
  498. /* If we are here, DMA is stopped */
  499. tegra_uart_copy_rx_to_tty(tup, port, count);
  500. tegra_uart_handle_rx_pio(tup, port);
  501. if (tty) {
  502. tty_flip_buffer_push(port);
  503. tty_kref_put(tty);
  504. }
  505. }
  506. static void tegra_uart_rx_dma_complete(void *args)
  507. {
  508. struct tegra_uart_port *tup = args;
  509. struct uart_port *u = &tup->uport;
  510. unsigned long flags;
  511. struct dma_tx_state state;
  512. enum dma_status status;
  513. spin_lock_irqsave(&u->lock, flags);
  514. status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
  515. if (status == DMA_IN_PROGRESS) {
  516. dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
  517. goto done;
  518. }
  519. /* Deactivate flow control to stop sender */
  520. if (tup->rts_active)
  521. set_rts(tup, false);
  522. tegra_uart_rx_buffer_push(tup, 0);
  523. tegra_uart_start_rx_dma(tup);
  524. /* Activate flow control to start transfer */
  525. if (tup->rts_active)
  526. set_rts(tup, true);
  527. done:
  528. spin_unlock_irqrestore(&u->lock, flags);
  529. }
  530. static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
  531. {
  532. struct dma_tx_state state;
  533. /* Deactivate flow control to stop sender */
  534. if (tup->rts_active)
  535. set_rts(tup, false);
  536. dmaengine_terminate_all(tup->rx_dma_chan);
  537. dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
  538. tegra_uart_rx_buffer_push(tup, state.residue);
  539. tegra_uart_start_rx_dma(tup);
  540. if (tup->rts_active)
  541. set_rts(tup, true);
  542. }
  543. static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
  544. {
  545. unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
  546. tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
  547. tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
  548. DMA_PREP_INTERRUPT);
  549. if (!tup->rx_dma_desc) {
  550. dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
  551. return -EIO;
  552. }
  553. tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
  554. tup->rx_dma_desc->callback_param = tup;
  555. dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
  556. count, DMA_TO_DEVICE);
  557. tup->rx_bytes_requested = count;
  558. tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
  559. dma_async_issue_pending(tup->rx_dma_chan);
  560. return 0;
  561. }
  562. static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
  563. {
  564. struct tegra_uart_port *tup = to_tegra_uport(u);
  565. unsigned long msr;
  566. msr = tegra_uart_read(tup, UART_MSR);
  567. if (!(msr & UART_MSR_ANY_DELTA))
  568. return;
  569. if (msr & UART_MSR_TERI)
  570. tup->uport.icount.rng++;
  571. if (msr & UART_MSR_DDSR)
  572. tup->uport.icount.dsr++;
  573. /* We may only get DDCD when HW init and reset */
  574. if (msr & UART_MSR_DDCD)
  575. uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
  576. /* Will start/stop_tx accordingly */
  577. if (msr & UART_MSR_DCTS)
  578. uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
  579. }
  580. static irqreturn_t tegra_uart_isr(int irq, void *data)
  581. {
  582. struct tegra_uart_port *tup = data;
  583. struct uart_port *u = &tup->uport;
  584. unsigned long iir;
  585. unsigned long ier;
  586. bool is_rx_int = false;
  587. unsigned long flags;
  588. spin_lock_irqsave(&u->lock, flags);
  589. while (1) {
  590. iir = tegra_uart_read(tup, UART_IIR);
  591. if (iir & UART_IIR_NO_INT) {
  592. if (is_rx_int) {
  593. tegra_uart_handle_rx_dma(tup);
  594. if (tup->rx_in_progress) {
  595. ier = tup->ier_shadow;
  596. ier |= (UART_IER_RLSI | UART_IER_RTOIE |
  597. TEGRA_UART_IER_EORD);
  598. tup->ier_shadow = ier;
  599. tegra_uart_write(tup, ier, UART_IER);
  600. }
  601. }
  602. spin_unlock_irqrestore(&u->lock, flags);
  603. return IRQ_HANDLED;
  604. }
  605. switch ((iir >> 1) & 0x7) {
  606. case 0: /* Modem signal change interrupt */
  607. tegra_uart_handle_modem_signal_change(u);
  608. break;
  609. case 1: /* Transmit interrupt only triggered when using PIO */
  610. tup->ier_shadow &= ~UART_IER_THRI;
  611. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  612. tegra_uart_handle_tx_pio(tup);
  613. break;
  614. case 4: /* End of data */
  615. case 6: /* Rx timeout */
  616. case 2: /* Receive */
  617. if (!is_rx_int) {
  618. is_rx_int = true;
  619. /* Disable Rx interrupts */
  620. ier = tup->ier_shadow;
  621. ier |= UART_IER_RDI;
  622. tegra_uart_write(tup, ier, UART_IER);
  623. ier &= ~(UART_IER_RDI | UART_IER_RLSI |
  624. UART_IER_RTOIE | TEGRA_UART_IER_EORD);
  625. tup->ier_shadow = ier;
  626. tegra_uart_write(tup, ier, UART_IER);
  627. }
  628. break;
  629. case 3: /* Receive error */
  630. tegra_uart_decode_rx_error(tup,
  631. tegra_uart_read(tup, UART_LSR));
  632. break;
  633. case 5: /* break nothing to handle */
  634. case 7: /* break nothing to handle */
  635. break;
  636. }
  637. }
  638. }
  639. static void tegra_uart_stop_rx(struct uart_port *u)
  640. {
  641. struct tegra_uart_port *tup = to_tegra_uport(u);
  642. struct dma_tx_state state;
  643. unsigned long ier;
  644. if (tup->rts_active)
  645. set_rts(tup, false);
  646. if (!tup->rx_in_progress)
  647. return;
  648. tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */
  649. ier = tup->ier_shadow;
  650. ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
  651. TEGRA_UART_IER_EORD);
  652. tup->ier_shadow = ier;
  653. tegra_uart_write(tup, ier, UART_IER);
  654. tup->rx_in_progress = 0;
  655. dmaengine_terminate_all(tup->rx_dma_chan);
  656. dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
  657. tegra_uart_rx_buffer_push(tup, state.residue);
  658. }
  659. static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
  660. {
  661. unsigned long flags;
  662. unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
  663. unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
  664. unsigned long wait_time;
  665. unsigned long lsr;
  666. unsigned long msr;
  667. unsigned long mcr;
  668. /* Disable interrupts */
  669. tegra_uart_write(tup, 0, UART_IER);
  670. lsr = tegra_uart_read(tup, UART_LSR);
  671. if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
  672. msr = tegra_uart_read(tup, UART_MSR);
  673. mcr = tegra_uart_read(tup, UART_MCR);
  674. if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
  675. dev_err(tup->uport.dev,
  676. "Tx Fifo not empty, CTS disabled, waiting\n");
  677. /* Wait for Tx fifo to be empty */
  678. while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
  679. wait_time = min(fifo_empty_time, 100lu);
  680. udelay(wait_time);
  681. fifo_empty_time -= wait_time;
  682. if (!fifo_empty_time) {
  683. msr = tegra_uart_read(tup, UART_MSR);
  684. mcr = tegra_uart_read(tup, UART_MCR);
  685. if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
  686. (msr & UART_MSR_CTS))
  687. dev_err(tup->uport.dev,
  688. "Slave not ready\n");
  689. break;
  690. }
  691. lsr = tegra_uart_read(tup, UART_LSR);
  692. }
  693. }
  694. spin_lock_irqsave(&tup->uport.lock, flags);
  695. /* Reset the Rx and Tx FIFOs */
  696. tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
  697. tup->current_baud = 0;
  698. spin_unlock_irqrestore(&tup->uport.lock, flags);
  699. clk_disable_unprepare(tup->uart_clk);
  700. }
  701. static int tegra_uart_hw_init(struct tegra_uart_port *tup)
  702. {
  703. int ret;
  704. tup->fcr_shadow = 0;
  705. tup->mcr_shadow = 0;
  706. tup->lcr_shadow = 0;
  707. tup->ier_shadow = 0;
  708. tup->current_baud = 0;
  709. clk_prepare_enable(tup->uart_clk);
  710. /* Reset the UART controller to clear all previous status.*/
  711. reset_control_assert(tup->rst);
  712. udelay(10);
  713. reset_control_deassert(tup->rst);
  714. tup->rx_in_progress = 0;
  715. tup->tx_in_progress = 0;
  716. /*
  717. * Set the trigger level
  718. *
  719. * For PIO mode:
  720. *
  721. * For receive, this will interrupt the CPU after that many number of
  722. * bytes are received, for the remaining bytes the receive timeout
  723. * interrupt is received. Rx high watermark is set to 4.
  724. *
  725. * For transmit, if the trasnmit interrupt is enabled, this will
  726. * interrupt the CPU when the number of entries in the FIFO reaches the
  727. * low watermark. Tx low watermark is set to 16 bytes.
  728. *
  729. * For DMA mode:
  730. *
  731. * Set the Tx trigger to 16. This should match the DMA burst size that
  732. * programmed in the DMA registers.
  733. */
  734. tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
  735. tup->fcr_shadow |= UART_FCR_R_TRIG_01;
  736. tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
  737. tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
  738. /* Dummy read to ensure the write is posted */
  739. tegra_uart_read(tup, UART_SCR);
  740. /*
  741. * For all tegra devices (up to t210), there is a hardware issue that
  742. * requires software to wait for 3 UART clock periods after enabling
  743. * the TX fifo, otherwise data could be lost.
  744. */
  745. tegra_uart_wait_cycle_time(tup, 3);
  746. /*
  747. * Initialize the UART with default configuration
  748. * (115200, N, 8, 1) so that the receive DMA buffer may be
  749. * enqueued
  750. */
  751. tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
  752. tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
  753. tup->fcr_shadow |= UART_FCR_DMA_SELECT;
  754. tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
  755. ret = tegra_uart_start_rx_dma(tup);
  756. if (ret < 0) {
  757. dev_err(tup->uport.dev, "Not able to start Rx DMA\n");
  758. return ret;
  759. }
  760. tup->rx_in_progress = 1;
  761. /*
  762. * Enable IE_RXS for the receive status interrupts like line errros.
  763. * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
  764. *
  765. * If using DMA mode, enable EORD instead of receive interrupt which
  766. * will interrupt after the UART is done with the receive instead of
  767. * the interrupt when the FIFO "threshold" is reached.
  768. *
  769. * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
  770. * the DATA is sitting in the FIFO and couldn't be transferred to the
  771. * DMA as the DMA size alignment(4 bytes) is not met. EORD will be
  772. * triggered when there is a pause of the incomming data stream for 4
  773. * characters long.
  774. *
  775. * For pauses in the data which is not aligned to 4 bytes, we get
  776. * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
  777. * then the EORD.
  778. */
  779. tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | TEGRA_UART_IER_EORD;
  780. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  781. return 0;
  782. }
  783. static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
  784. bool dma_to_memory)
  785. {
  786. if (dma_to_memory) {
  787. dmaengine_terminate_all(tup->rx_dma_chan);
  788. dma_release_channel(tup->rx_dma_chan);
  789. dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
  790. tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
  791. tup->rx_dma_chan = NULL;
  792. tup->rx_dma_buf_phys = 0;
  793. tup->rx_dma_buf_virt = NULL;
  794. } else {
  795. dmaengine_terminate_all(tup->tx_dma_chan);
  796. dma_release_channel(tup->tx_dma_chan);
  797. dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
  798. UART_XMIT_SIZE, DMA_TO_DEVICE);
  799. tup->tx_dma_chan = NULL;
  800. tup->tx_dma_buf_phys = 0;
  801. tup->tx_dma_buf_virt = NULL;
  802. }
  803. }
  804. static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
  805. bool dma_to_memory)
  806. {
  807. struct dma_chan *dma_chan;
  808. unsigned char *dma_buf;
  809. dma_addr_t dma_phys;
  810. int ret;
  811. struct dma_slave_config dma_sconfig;
  812. dma_chan = dma_request_slave_channel_reason(tup->uport.dev,
  813. dma_to_memory ? "rx" : "tx");
  814. if (IS_ERR(dma_chan)) {
  815. ret = PTR_ERR(dma_chan);
  816. dev_err(tup->uport.dev,
  817. "DMA channel alloc failed: %d\n", ret);
  818. return ret;
  819. }
  820. if (dma_to_memory) {
  821. dma_buf = dma_alloc_coherent(tup->uport.dev,
  822. TEGRA_UART_RX_DMA_BUFFER_SIZE,
  823. &dma_phys, GFP_KERNEL);
  824. if (!dma_buf) {
  825. dev_err(tup->uport.dev,
  826. "Not able to allocate the dma buffer\n");
  827. dma_release_channel(dma_chan);
  828. return -ENOMEM;
  829. }
  830. dma_sconfig.src_addr = tup->uport.mapbase;
  831. dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  832. dma_sconfig.src_maxburst = 4;
  833. tup->rx_dma_chan = dma_chan;
  834. tup->rx_dma_buf_virt = dma_buf;
  835. tup->rx_dma_buf_phys = dma_phys;
  836. } else {
  837. dma_phys = dma_map_single(tup->uport.dev,
  838. tup->uport.state->xmit.buf, UART_XMIT_SIZE,
  839. DMA_TO_DEVICE);
  840. if (dma_mapping_error(tup->uport.dev, dma_phys)) {
  841. dev_err(tup->uport.dev, "dma_map_single tx failed\n");
  842. dma_release_channel(dma_chan);
  843. return -ENOMEM;
  844. }
  845. dma_buf = tup->uport.state->xmit.buf;
  846. dma_sconfig.dst_addr = tup->uport.mapbase;
  847. dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  848. dma_sconfig.dst_maxburst = 16;
  849. tup->tx_dma_chan = dma_chan;
  850. tup->tx_dma_buf_virt = dma_buf;
  851. tup->tx_dma_buf_phys = dma_phys;
  852. }
  853. ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
  854. if (ret < 0) {
  855. dev_err(tup->uport.dev,
  856. "Dma slave config failed, err = %d\n", ret);
  857. tegra_uart_dma_channel_free(tup, dma_to_memory);
  858. return ret;
  859. }
  860. return 0;
  861. }
  862. static int tegra_uart_startup(struct uart_port *u)
  863. {
  864. struct tegra_uart_port *tup = to_tegra_uport(u);
  865. int ret;
  866. ret = tegra_uart_dma_channel_allocate(tup, false);
  867. if (ret < 0) {
  868. dev_err(u->dev, "Tx Dma allocation failed, err = %d\n", ret);
  869. return ret;
  870. }
  871. ret = tegra_uart_dma_channel_allocate(tup, true);
  872. if (ret < 0) {
  873. dev_err(u->dev, "Rx Dma allocation failed, err = %d\n", ret);
  874. goto fail_rx_dma;
  875. }
  876. ret = tegra_uart_hw_init(tup);
  877. if (ret < 0) {
  878. dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
  879. goto fail_hw_init;
  880. }
  881. ret = request_irq(u->irq, tegra_uart_isr, 0,
  882. dev_name(u->dev), tup);
  883. if (ret < 0) {
  884. dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
  885. goto fail_hw_init;
  886. }
  887. return 0;
  888. fail_hw_init:
  889. tegra_uart_dma_channel_free(tup, true);
  890. fail_rx_dma:
  891. tegra_uart_dma_channel_free(tup, false);
  892. return ret;
  893. }
  894. /*
  895. * Flush any TX data submitted for DMA and PIO. Called when the
  896. * TX circular buffer is reset.
  897. */
  898. static void tegra_uart_flush_buffer(struct uart_port *u)
  899. {
  900. struct tegra_uart_port *tup = to_tegra_uport(u);
  901. tup->tx_bytes = 0;
  902. if (tup->tx_dma_chan)
  903. dmaengine_terminate_all(tup->tx_dma_chan);
  904. }
  905. static void tegra_uart_shutdown(struct uart_port *u)
  906. {
  907. struct tegra_uart_port *tup = to_tegra_uport(u);
  908. tegra_uart_hw_deinit(tup);
  909. tup->rx_in_progress = 0;
  910. tup->tx_in_progress = 0;
  911. tegra_uart_dma_channel_free(tup, true);
  912. tegra_uart_dma_channel_free(tup, false);
  913. free_irq(u->irq, tup);
  914. }
  915. static void tegra_uart_enable_ms(struct uart_port *u)
  916. {
  917. struct tegra_uart_port *tup = to_tegra_uport(u);
  918. if (tup->enable_modem_interrupt) {
  919. tup->ier_shadow |= UART_IER_MSI;
  920. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  921. }
  922. }
  923. static void tegra_uart_set_termios(struct uart_port *u,
  924. struct ktermios *termios, struct ktermios *oldtermios)
  925. {
  926. struct tegra_uart_port *tup = to_tegra_uport(u);
  927. unsigned int baud;
  928. unsigned long flags;
  929. unsigned int lcr;
  930. int symb_bit = 1;
  931. struct clk *parent_clk = clk_get_parent(tup->uart_clk);
  932. unsigned long parent_clk_rate = clk_get_rate(parent_clk);
  933. int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
  934. max_divider *= 16;
  935. spin_lock_irqsave(&u->lock, flags);
  936. /* Changing configuration, it is safe to stop any rx now */
  937. if (tup->rts_active)
  938. set_rts(tup, false);
  939. /* Clear all interrupts as configuration is going to be change */
  940. tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
  941. tegra_uart_read(tup, UART_IER);
  942. tegra_uart_write(tup, 0, UART_IER);
  943. tegra_uart_read(tup, UART_IER);
  944. /* Parity */
  945. lcr = tup->lcr_shadow;
  946. lcr &= ~UART_LCR_PARITY;
  947. /* CMSPAR isn't supported by this driver */
  948. termios->c_cflag &= ~CMSPAR;
  949. if ((termios->c_cflag & PARENB) == PARENB) {
  950. symb_bit++;
  951. if (termios->c_cflag & PARODD) {
  952. lcr |= UART_LCR_PARITY;
  953. lcr &= ~UART_LCR_EPAR;
  954. lcr &= ~UART_LCR_SPAR;
  955. } else {
  956. lcr |= UART_LCR_PARITY;
  957. lcr |= UART_LCR_EPAR;
  958. lcr &= ~UART_LCR_SPAR;
  959. }
  960. }
  961. lcr &= ~UART_LCR_WLEN8;
  962. switch (termios->c_cflag & CSIZE) {
  963. case CS5:
  964. lcr |= UART_LCR_WLEN5;
  965. symb_bit += 5;
  966. break;
  967. case CS6:
  968. lcr |= UART_LCR_WLEN6;
  969. symb_bit += 6;
  970. break;
  971. case CS7:
  972. lcr |= UART_LCR_WLEN7;
  973. symb_bit += 7;
  974. break;
  975. default:
  976. lcr |= UART_LCR_WLEN8;
  977. symb_bit += 8;
  978. break;
  979. }
  980. /* Stop bits */
  981. if (termios->c_cflag & CSTOPB) {
  982. lcr |= UART_LCR_STOP;
  983. symb_bit += 2;
  984. } else {
  985. lcr &= ~UART_LCR_STOP;
  986. symb_bit++;
  987. }
  988. tegra_uart_write(tup, lcr, UART_LCR);
  989. tup->lcr_shadow = lcr;
  990. tup->symb_bit = symb_bit;
  991. /* Baud rate. */
  992. baud = uart_get_baud_rate(u, termios, oldtermios,
  993. parent_clk_rate/max_divider,
  994. parent_clk_rate/16);
  995. spin_unlock_irqrestore(&u->lock, flags);
  996. tegra_set_baudrate(tup, baud);
  997. if (tty_termios_baud_rate(termios))
  998. tty_termios_encode_baud_rate(termios, baud, baud);
  999. spin_lock_irqsave(&u->lock, flags);
  1000. /* Flow control */
  1001. if (termios->c_cflag & CRTSCTS) {
  1002. tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
  1003. tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
  1004. tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
  1005. /* if top layer has asked to set rts active then do so here */
  1006. if (tup->rts_active)
  1007. set_rts(tup, true);
  1008. } else {
  1009. tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
  1010. tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
  1011. tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
  1012. }
  1013. /* update the port timeout based on new settings */
  1014. uart_update_timeout(u, termios->c_cflag, baud);
  1015. /* Make sure all write has completed */
  1016. tegra_uart_read(tup, UART_IER);
  1017. /* Reenable interrupt */
  1018. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  1019. tegra_uart_read(tup, UART_IER);
  1020. spin_unlock_irqrestore(&u->lock, flags);
  1021. }
  1022. static const char *tegra_uart_type(struct uart_port *u)
  1023. {
  1024. return TEGRA_UART_TYPE;
  1025. }
  1026. static struct uart_ops tegra_uart_ops = {
  1027. .tx_empty = tegra_uart_tx_empty,
  1028. .set_mctrl = tegra_uart_set_mctrl,
  1029. .get_mctrl = tegra_uart_get_mctrl,
  1030. .stop_tx = tegra_uart_stop_tx,
  1031. .start_tx = tegra_uart_start_tx,
  1032. .stop_rx = tegra_uart_stop_rx,
  1033. .flush_buffer = tegra_uart_flush_buffer,
  1034. .enable_ms = tegra_uart_enable_ms,
  1035. .break_ctl = tegra_uart_break_ctl,
  1036. .startup = tegra_uart_startup,
  1037. .shutdown = tegra_uart_shutdown,
  1038. .set_termios = tegra_uart_set_termios,
  1039. .type = tegra_uart_type,
  1040. .request_port = tegra_uart_request_port,
  1041. .release_port = tegra_uart_release_port,
  1042. };
  1043. static struct uart_driver tegra_uart_driver = {
  1044. .owner = THIS_MODULE,
  1045. .driver_name = "tegra_hsuart",
  1046. .dev_name = "ttyTHS",
  1047. .cons = NULL,
  1048. .nr = TEGRA_UART_MAXIMUM,
  1049. };
  1050. static int tegra_uart_parse_dt(struct platform_device *pdev,
  1051. struct tegra_uart_port *tup)
  1052. {
  1053. struct device_node *np = pdev->dev.of_node;
  1054. int port;
  1055. port = of_alias_get_id(np, "serial");
  1056. if (port < 0) {
  1057. dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
  1058. return port;
  1059. }
  1060. tup->uport.line = port;
  1061. tup->enable_modem_interrupt = of_property_read_bool(np,
  1062. "nvidia,enable-modem-interrupt");
  1063. return 0;
  1064. }
  1065. static struct tegra_uart_chip_data tegra20_uart_chip_data = {
  1066. .tx_fifo_full_status = false,
  1067. .allow_txfifo_reset_fifo_mode = true,
  1068. .support_clk_src_div = false,
  1069. };
  1070. static struct tegra_uart_chip_data tegra30_uart_chip_data = {
  1071. .tx_fifo_full_status = true,
  1072. .allow_txfifo_reset_fifo_mode = false,
  1073. .support_clk_src_div = true,
  1074. };
  1075. static const struct of_device_id tegra_uart_of_match[] = {
  1076. {
  1077. .compatible = "nvidia,tegra30-hsuart",
  1078. .data = &tegra30_uart_chip_data,
  1079. }, {
  1080. .compatible = "nvidia,tegra20-hsuart",
  1081. .data = &tegra20_uart_chip_data,
  1082. }, {
  1083. },
  1084. };
  1085. MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
  1086. static int tegra_uart_probe(struct platform_device *pdev)
  1087. {
  1088. struct tegra_uart_port *tup;
  1089. struct uart_port *u;
  1090. struct resource *resource;
  1091. int ret;
  1092. const struct tegra_uart_chip_data *cdata;
  1093. const struct of_device_id *match;
  1094. match = of_match_device(tegra_uart_of_match, &pdev->dev);
  1095. if (!match) {
  1096. dev_err(&pdev->dev, "Error: No device match found\n");
  1097. return -ENODEV;
  1098. }
  1099. cdata = match->data;
  1100. tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
  1101. if (!tup) {
  1102. dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
  1103. return -ENOMEM;
  1104. }
  1105. ret = tegra_uart_parse_dt(pdev, tup);
  1106. if (ret < 0)
  1107. return ret;
  1108. u = &tup->uport;
  1109. u->dev = &pdev->dev;
  1110. u->ops = &tegra_uart_ops;
  1111. u->type = PORT_TEGRA;
  1112. u->fifosize = 32;
  1113. tup->cdata = cdata;
  1114. platform_set_drvdata(pdev, tup);
  1115. resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1116. if (!resource) {
  1117. dev_err(&pdev->dev, "No IO memory resource\n");
  1118. return -ENODEV;
  1119. }
  1120. u->mapbase = resource->start;
  1121. u->membase = devm_ioremap_resource(&pdev->dev, resource);
  1122. if (IS_ERR(u->membase))
  1123. return PTR_ERR(u->membase);
  1124. tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
  1125. if (IS_ERR(tup->uart_clk)) {
  1126. dev_err(&pdev->dev, "Couldn't get the clock\n");
  1127. return PTR_ERR(tup->uart_clk);
  1128. }
  1129. tup->rst = devm_reset_control_get(&pdev->dev, "serial");
  1130. if (IS_ERR(tup->rst)) {
  1131. dev_err(&pdev->dev, "Couldn't get the reset\n");
  1132. return PTR_ERR(tup->rst);
  1133. }
  1134. u->iotype = UPIO_MEM32;
  1135. ret = platform_get_irq(pdev, 0);
  1136. if (ret < 0) {
  1137. dev_err(&pdev->dev, "Couldn't get IRQ\n");
  1138. return ret;
  1139. }
  1140. u->irq = ret;
  1141. u->regshift = 2;
  1142. ret = uart_add_one_port(&tegra_uart_driver, u);
  1143. if (ret < 0) {
  1144. dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
  1145. return ret;
  1146. }
  1147. return ret;
  1148. }
  1149. static int tegra_uart_remove(struct platform_device *pdev)
  1150. {
  1151. struct tegra_uart_port *tup = platform_get_drvdata(pdev);
  1152. struct uart_port *u = &tup->uport;
  1153. uart_remove_one_port(&tegra_uart_driver, u);
  1154. return 0;
  1155. }
  1156. #ifdef CONFIG_PM_SLEEP
  1157. static int tegra_uart_suspend(struct device *dev)
  1158. {
  1159. struct tegra_uart_port *tup = dev_get_drvdata(dev);
  1160. struct uart_port *u = &tup->uport;
  1161. return uart_suspend_port(&tegra_uart_driver, u);
  1162. }
  1163. static int tegra_uart_resume(struct device *dev)
  1164. {
  1165. struct tegra_uart_port *tup = dev_get_drvdata(dev);
  1166. struct uart_port *u = &tup->uport;
  1167. return uart_resume_port(&tegra_uart_driver, u);
  1168. }
  1169. #endif
  1170. static const struct dev_pm_ops tegra_uart_pm_ops = {
  1171. SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
  1172. };
  1173. static struct platform_driver tegra_uart_platform_driver = {
  1174. .probe = tegra_uart_probe,
  1175. .remove = tegra_uart_remove,
  1176. .driver = {
  1177. .name = "serial-tegra",
  1178. .of_match_table = tegra_uart_of_match,
  1179. .pm = &tegra_uart_pm_ops,
  1180. },
  1181. };
  1182. static int __init tegra_uart_init(void)
  1183. {
  1184. int ret;
  1185. ret = uart_register_driver(&tegra_uart_driver);
  1186. if (ret < 0) {
  1187. pr_err("Could not register %s driver\n",
  1188. tegra_uart_driver.driver_name);
  1189. return ret;
  1190. }
  1191. ret = platform_driver_register(&tegra_uart_platform_driver);
  1192. if (ret < 0) {
  1193. pr_err("Uart platform driver register failed, e = %d\n", ret);
  1194. uart_unregister_driver(&tegra_uart_driver);
  1195. return ret;
  1196. }
  1197. return 0;
  1198. }
  1199. static void __exit tegra_uart_exit(void)
  1200. {
  1201. pr_info("Unloading tegra uart driver\n");
  1202. platform_driver_unregister(&tegra_uart_platform_driver);
  1203. uart_unregister_driver(&tegra_uart_driver);
  1204. }
  1205. module_init(tegra_uart_init);
  1206. module_exit(tegra_uart_exit);
  1207. MODULE_ALIAS("platform:serial-tegra");
  1208. MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
  1209. MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
  1210. MODULE_LICENSE("GPL v2");