msm_serial_hs.c 95 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487
  1. /* drivers/serial/msm_serial_hs.c
  2. *
  3. * MSM 7k High speed uart driver
  4. *
  5. * Copyright (c) 2008 Google Inc.
  6. * Copyright (c) 2007-2014, The Linux Foundation. All rights reserved.
  7. * Modified: Nick Pelly <npelly@google.com>
  8. *
  9. * All source code in this file is licensed under the following license
  10. * except where indicated.
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * version 2 as published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  19. * See the GNU General Public License for more details.
  20. *
  21. * Has optional support for uart power management independent of linux
  22. * suspend/resume:
  23. *
  24. * RX wakeup.
  25. * UART wakeup can be triggered by RX activity (using a wakeup GPIO on the
  26. * UART RX pin). This should only be used if there is not a wakeup
  27. * GPIO on the UART CTS, and the first RX byte is known (for example, with the
  28. * Bluetooth Texas Instruments HCILL protocol), since the first RX byte will
  29. * always be lost. RTS will be asserted even while the UART is off in this mode
  30. * of operation. See msm_serial_hs_platform_data.rx_wakeup_irq.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/serial.h>
  34. #include <linux/serial_core.h>
  35. #include <linux/slab.h>
  36. #include <linux/init.h>
  37. #include <linux/interrupt.h>
  38. #include <linux/irq.h>
  39. #include <linux/io.h>
  40. #include <linux/ioport.h>
  41. #include <linux/atomic.h>
  42. #include <linux/kernel.h>
  43. #include <linux/timer.h>
  44. #include <linux/clk.h>
  45. #include <linux/platform_device.h>
  46. #include <linux/pm_runtime.h>
  47. #include <linux/dma-mapping.h>
  48. #include <linux/dmapool.h>
  49. #include <linux/tty_flip.h>
  50. #include <linux/wait.h>
  51. #include <linux/sysfs.h>
  52. #include <linux/stat.h>
  53. #include <linux/device.h>
  54. #include <linux/wakelock.h>
  55. #include <linux/debugfs.h>
  56. #include <linux/of.h>
  57. #include <linux/of_device.h>
  58. #include <linux/of_gpio.h>
  59. #include <linux/gpio.h>
  60. #include <asm/atomic.h>
  61. #include <asm/irq.h>
  62. #include <mach/sps.h>
  63. #include <mach/msm_serial_hs.h>
  64. #include <mach/msm_bus.h>
  65. #include <mach/msm_ipc_logging.h>
  66. #include "msm_serial_hs_hwreg.h"
  67. #define UART_SPS_CONS_PERIPHERAL 0
  68. #define UART_SPS_PROD_PERIPHERAL 1
  69. static void *ipc_msm_hs_log_ctxt;
  70. #define IPC_MSM_HS_LOG_PAGES 30
  71. #define _DW_ENABLED
  72. #ifdef _DW_ENABLED
  73. #define MAX_DUALWAVE_MESSAGE_SIZE 128
  74. #include <linux/syscalls.h>
  75. #include <asm/uaccess.h>
  76. #define DUALWAVE_INACTIVE 0
  77. #define DUALWAVE_PLAYBACK 1
  78. #define DUALWAVE_CAPTURE 2
  79. extern int send_uevent_wh_ble_info(char *prEnvInfoLists[3]);
  80. extern int checkDualWaveStatus(void);
  81. #define GET_CUR_TIME_ON(tCurTimespec) \
  82. do { \
  83. long int llErrTime = 0; \
  84. struct timespec tMyTime; \
  85. mm_segment_t tOldfs; \
  86. tOldfs = get_fs(); \
  87. set_fs(KERNEL_DS); \
  88. \
  89. llErrTime = sys_clock_gettime(CLOCK_REALTIME, &tMyTime); \
  90. set_fs(tOldfs); \
  91. \
  92. tCurTimespec = tMyTime; \
  93. }while(0)
  94. char *g_szSysTime;
  95. char *g_szRefTime;
  96. inline void UpdateTime(char *pchBuffer, int iLen)
  97. {
  98. struct timespec tSysTimespec;
  99. char *pEnv[3];
  100. int iRead=0;
  101. int iEventLength=0;
  102. int iNumHciCmdPackets=0;
  103. unsigned short *psCmdOpCode =NULL;
  104. int iStatus = 0;
  105. unsigned int *puiBtClock;
  106. GET_CUR_TIME_ON(tSysTimespec);
  107. g_szSysTime = kzalloc(MAX_DUALWAVE_MESSAGE_SIZE, GFP_KERNEL);
  108. g_szRefTime = kzalloc(MAX_DUALWAVE_MESSAGE_SIZE, GFP_KERNEL);
  109. pEnv[0] = g_szSysTime;
  110. pEnv[1] = g_szRefTime;
  111. pEnv[2] = NULL;
  112. switch (pchBuffer[iRead++])
  113. {
  114. case 0x04:
  115. {
  116. if(pchBuffer[iRead++] == 0x0E)
  117. {
  118. iEventLength = pchBuffer[iRead++];
  119. iNumHciCmdPackets = pchBuffer[iRead++];
  120. psCmdOpCode = (short*) (pchBuffer+iRead); iRead += 2;
  121. iStatus = pchBuffer[iRead++];
  122. puiBtClock = (unsigned int*)(pchBuffer+iRead); iRead +=4;
  123. if ( *psCmdOpCode == (unsigned short)0xFCEE && iStatus == 0x00)
  124. {
  125. sprintf(g_szSysTime,"SYS_TIME=%ld.%09ld",tSysTimespec.tv_sec,tSysTimespec.tv_nsec);
  126. sprintf(g_szRefTime,"BT_CLK=%d",*puiBtClock);
  127. send_uevent_wh_ble_info(pEnv);
  128. }
  129. }
  130. }
  131. break;
  132. default:
  133. break;
  134. }
  135. kfree(g_szSysTime);
  136. kfree(g_szRefTime);
  137. }
  138. #endif
  139. /* If the debug_mask gets set to FATAL_LEV,
  140. * a fatal error has happened and further IPC logging
  141. * is disabled so that this problem can be detected
  142. */
  143. enum {
  144. FATAL_LEV = 0U,
  145. ERR_LEV = 1U,
  146. WARN_LEV = 2U,
  147. INFO_LEV = 3U,
  148. DBG_LEV = 4U,
  149. };
  150. /* Default IPC log level INFO */
  151. static int hs_serial_debug_mask = DBG_LEV;
  152. module_param_named(debug_mask, hs_serial_debug_mask,
  153. int, S_IRUGO | S_IWUSR | S_IWGRP);
  154. #define MSM_HS_DBG(x...) do { \
  155. if (hs_serial_debug_mask >= DBG_LEV) { \
  156. if (ipc_msm_hs_log_ctxt) \
  157. ipc_log_string(ipc_msm_hs_log_ctxt, x); \
  158. } \
  159. } while (0)
  160. #define MSM_HS_INFO(x...) do { \
  161. if (hs_serial_debug_mask >= INFO_LEV) {\
  162. if (ipc_msm_hs_log_ctxt) \
  163. ipc_log_string(ipc_msm_hs_log_ctxt, x); \
  164. } \
  165. } while (0)
  166. /* warnings and errors show up on console always */
  167. #define MSM_HS_WARN(x...) do { \
  168. pr_warn(x); \
  169. if (ipc_msm_hs_log_ctxt && hs_serial_debug_mask >= WARN_LEV) \
  170. ipc_log_string(ipc_msm_hs_log_ctxt, x); \
  171. } while (0)
  172. /* ERROR condition in the driver sets the hs_serial_debug_mask
  173. * to ERR_FATAL level, so that this message can be seen
  174. * in IPC logging. Further errors continue to log on the console
  175. */
  176. #define MSM_HS_ERR(x...) do { \
  177. pr_err(x); \
  178. if (ipc_msm_hs_log_ctxt && hs_serial_debug_mask >= ERR_LEV) { \
  179. ipc_log_string(ipc_msm_hs_log_ctxt, x); \
  180. hs_serial_debug_mask = FATAL_LEV; \
  181. } \
  182. } while (0)
  183. /*
  184. * There are 3 different kind of UART Core available on MSM.
  185. * High Speed UART (i.e. Legacy HSUART), GSBI based HSUART
  186. * and BSLP based HSUART.
  187. */
  188. enum uart_core_type {
  189. LEGACY_HSUART,
  190. GSBI_HSUART,
  191. BLSP_HSUART,
  192. };
  193. enum flush_reason {
  194. FLUSH_NONE,
  195. FLUSH_DATA_READY,
  196. FLUSH_DATA_INVALID, /* values after this indicate invalid data */
  197. FLUSH_IGNORE,
  198. FLUSH_STOP,
  199. FLUSH_SHUTDOWN,
  200. };
  201. enum msm_hs_clk_states_e {
  202. MSM_HS_CLK_PORT_OFF, /* port not in use */
  203. MSM_HS_CLK_OFF, /* clock disabled */
  204. MSM_HS_CLK_REQUEST_OFF, /* disable after TX and RX flushed */
  205. MSM_HS_CLK_ON, /* clock enabled */
  206. };
  207. /* Track the forced RXSTALE flush during clock off sequence.
  208. * These states are only valid during MSM_HS_CLK_REQUEST_OFF */
  209. enum msm_hs_clk_req_off_state_e {
  210. CLK_REQ_OFF_START,
  211. CLK_REQ_OFF_RXSTALE_ISSUED,
  212. CLK_REQ_OFF_FLUSH_ISSUED,
  213. CLK_REQ_OFF_RXSTALE_FLUSHED,
  214. };
  215. /* SPS data structures to support HSUART with BAM
  216. * @sps_pipe - This struct defines BAM pipe descriptor
  217. * @sps_connect - This struct defines a connection's end point
  218. * @sps_register - This struct defines a event registration parameters
  219. */
  220. struct msm_hs_sps_ep_conn_data {
  221. struct sps_pipe *pipe_handle;
  222. struct sps_connect config;
  223. struct sps_register_event event;
  224. };
  225. struct msm_hs_tx {
  226. unsigned int tx_ready_int_en; /* ok to dma more tx */
  227. unsigned int dma_in_flight; /* tx dma in progress */
  228. enum flush_reason flush;
  229. wait_queue_head_t wait;
  230. int tx_count;
  231. dma_addr_t dma_base;
  232. struct tasklet_struct tlet;
  233. struct msm_hs_sps_ep_conn_data cons;
  234. };
  235. struct msm_hs_rx {
  236. enum flush_reason flush;
  237. wait_queue_head_t wait;
  238. dma_addr_t rbuffer;
  239. unsigned char *buffer;
  240. unsigned int buffer_pending;
  241. struct dma_pool *pool;
  242. struct wake_lock wake_lock;
  243. struct delayed_work flip_insert_work;
  244. struct tasklet_struct tlet;
  245. struct msm_hs_sps_ep_conn_data prod;
  246. bool rx_cmd_queued;
  247. bool rx_cmd_exec;
  248. };
  249. enum buffer_states {
  250. NONE_PENDING = 0x0,
  251. FIFO_OVERRUN = 0x1,
  252. PARITY_ERROR = 0x2,
  253. CHARS_NORMAL = 0x4,
  254. };
  255. /* optional low power wakeup, typically on a GPIO RX irq */
  256. struct msm_hs_wakeup {
  257. int irq; /* < 0 indicates low power wakeup disabled */
  258. unsigned char ignore; /* bool */
  259. /* bool: inject char into rx tty on wakeup */
  260. unsigned char inject_rx;
  261. char rx_to_inject;
  262. };
  263. struct msm_hs_port {
  264. struct uart_port uport;
  265. unsigned long imr_reg; /* shadow value of UARTDM_IMR */
  266. struct clk *clk;
  267. struct clk *pclk;
  268. struct msm_hs_tx tx;
  269. struct msm_hs_rx rx;
  270. struct hrtimer clk_off_timer; /* to poll TXEMT before clock off */
  271. ktime_t clk_off_delay;
  272. enum msm_hs_clk_states_e clk_state;
  273. enum msm_hs_clk_req_off_state_e clk_req_off_state;
  274. atomic_t clk_count;
  275. struct msm_hs_wakeup wakeup;
  276. struct wake_lock dma_wake_lock; /* held while any DMA active */
  277. struct dentry *loopback_dir;
  278. struct work_struct clock_off_w; /* work for actual clock off */
  279. struct workqueue_struct *hsuart_wq; /* hsuart workqueue */
  280. struct mutex clk_mutex; /* mutex to guard against clock off/clock on */
  281. struct work_struct disconnect_rx_endpoint; /* disconnect rx_endpoint */
  282. enum uart_core_type uart_type;
  283. u32 bam_handle;
  284. resource_size_t bam_mem;
  285. int bam_irq;
  286. unsigned char __iomem *bam_base;
  287. unsigned int bam_tx_ep_pipe_index;
  288. unsigned int bam_rx_ep_pipe_index;
  289. /* struct sps_event_notify is an argument passed when triggering a
  290. * callback event object registered for an SPS connection end point.
  291. */
  292. struct sps_event_notify notify;
  293. /* bus client handler */
  294. u32 bus_perf_client;
  295. /* BLSP UART required BUS Scaling data */
  296. struct msm_bus_scale_pdata *bus_scale_table;
  297. int rx_count_callback;
  298. bool rx_bam_inprogress;
  299. wait_queue_head_t bam_disconnect_wait;
  300. };
  301. static struct of_device_id msm_hs_match_table[] = {
  302. { .compatible = "qcom,msm-hsuart-v14"},
  303. {}
  304. };
  305. #define MSM_UARTDM_BURST_SIZE 16 /* DM burst size (in bytes) */
  306. #define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
  307. #define UARTDM_RX_BUF_SIZE 512
  308. #define RETRY_TIMEOUT 5
  309. #define UARTDM_NR 256
  310. #define BAM_PIPE_MIN 0
  311. #define BAM_PIPE_MAX 11
  312. #define BUS_SCALING 1
  313. #define BUS_RESET 0
  314. #define RX_FLUSH_COMPLETE_TIMEOUT 300 /* In jiffies */
  315. #define BLSP_UART_CLK_FMAX 63160000
  316. static struct dentry *debug_base;
  317. static struct msm_hs_port q_uart_port[UARTDM_NR];
  318. static struct platform_driver msm_serial_hs_platform_driver;
  319. static struct uart_driver msm_hs_driver;
  320. static struct uart_ops msm_hs_ops;
  321. static void msm_hs_start_rx_locked(struct uart_port *uport);
  322. static void msm_serial_hs_rx_tlet(unsigned long tlet_ptr);
  323. static void flip_insert_work(struct work_struct *work);
  324. static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote);
  325. static struct msm_hs_port *msm_hs_get_hs_port(int port_index);
  326. #define UARTDM_TO_MSM(uart_port) \
  327. container_of((uart_port), struct msm_hs_port, uport)
  328. struct uart_port * msm_hs_get_port_by_id(int num)
  329. {
  330. struct uart_port *uport;
  331. struct msm_hs_port *msm_uport;
  332. if (num < 0 || num >= UARTDM_NR)
  333. return NULL;
  334. msm_uport = &q_uart_port[num];
  335. uport = &(msm_uport->uport);
  336. return uport;
  337. }
  338. static int msm_hs_ioctl(struct uart_port *uport, unsigned int cmd,
  339. unsigned long arg)
  340. {
  341. int ret = 0, state = 1;
  342. enum msm_hs_clk_states_e clk_state;
  343. unsigned long flags;
  344. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  345. switch (cmd) {
  346. case MSM_ENABLE_UART_CLOCK: {
  347. MSM_HS_DBG("%s():ENABLE UART CLOCK: cmd=%d\n", __func__, cmd);
  348. msm_hs_request_clock_on(&msm_uport->uport);
  349. break;
  350. }
  351. case MSM_DISABLE_UART_CLOCK: {
  352. MSM_HS_DBG("%s():DISABLE UART CLOCK: cmd=%d\n", __func__, cmd);
  353. msm_hs_request_clock_off(&msm_uport->uport);
  354. break;
  355. }
  356. case MSM_GET_UART_CLOCK_STATUS: {
  357. /* Return value 0 - UART CLOCK is OFF
  358. * Return value 1 - UART CLOCK is ON */
  359. MSM_HS_DBG("%s():GET UART CLOCK STATUS: cmd=%d\n", __func__, cmd);
  360. spin_lock_irqsave(&msm_uport->uport.lock, flags);
  361. clk_state = msm_uport->clk_state;
  362. spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
  363. if (clk_state <= MSM_HS_CLK_OFF)
  364. state = 0;
  365. ret = state;
  366. break;
  367. }
  368. default: {
  369. MSM_HS_DBG("%s():Unknown cmd specified: cmd=%d\n", __func__, cmd);
  370. ret = -ENOIOCTLCMD;
  371. break;
  372. }
  373. }
  374. return ret;
  375. }
  376. static int msm_hs_clock_vote(struct msm_hs_port *msm_uport)
  377. {
  378. int rc = 0;
  379. mutex_lock(&msm_uport->clk_mutex);
  380. if (1 == atomic_inc_return(&msm_uport->clk_count)) {
  381. msm_hs_bus_voting(msm_uport, BUS_SCALING);
  382. /* Turn on core clk and iface clk */
  383. if (msm_uport->pclk) {
  384. rc = clk_prepare_enable(msm_uport->pclk);
  385. if (rc) {
  386. dev_err(msm_uport->uport.dev,
  387. "%s: Could not turn on pclk [%d]\n",
  388. __func__, rc);
  389. mutex_unlock(&msm_uport->clk_mutex);
  390. return rc;
  391. }
  392. }
  393. rc = clk_prepare_enable(msm_uport->clk);
  394. if (rc) {
  395. dev_err(msm_uport->uport.dev,
  396. "%s: Could not turn on core clk [%d]\n",
  397. __func__, rc);
  398. clk_disable_unprepare(msm_uport->pclk);
  399. mutex_unlock(&msm_uport->clk_mutex);
  400. return rc;
  401. }
  402. msm_uport->clk_state = MSM_HS_CLK_ON;
  403. MSM_HS_DBG("%s: Clock ON successful\n", __func__);
  404. }
  405. mutex_unlock(&msm_uport->clk_mutex);
  406. return rc;
  407. }
  408. static void msm_hs_clock_unvote(struct msm_hs_port *msm_uport)
  409. {
  410. int rc = atomic_read(&msm_uport->clk_count);
  411. if (rc <= 0) {
  412. WARN(rc, "msm_uport->clk_count < 0!");
  413. dev_err(msm_uport->uport.dev,
  414. "%s: Clocks count invalid [%d]\n", __func__, rc);
  415. return;
  416. }
  417. mutex_lock(&msm_uport->clk_mutex);
  418. rc = atomic_dec_return(&msm_uport->clk_count);
  419. if (0 == rc) {
  420. /* Turn off the core clk and iface clk*/
  421. clk_disable_unprepare(msm_uport->clk);
  422. if (msm_uport->pclk)
  423. clk_disable_unprepare(msm_uport->pclk);
  424. /* Unvote the PNOC clock */
  425. msm_hs_bus_voting(msm_uport, BUS_RESET);
  426. msm_uport->clk_state = MSM_HS_CLK_OFF;
  427. MSM_HS_DBG("%s: Clock OFF successful\n", __func__);
  428. }
  429. mutex_unlock(&msm_uport->clk_mutex);
  430. }
  431. /* Check if the uport line number matches with user id stored in pdata.
  432. * User id information is stored during initialization. This function
  433. * ensues that the same device is selected */
  434. static struct msm_hs_port *get_matching_hs_port(struct platform_device *pdev)
  435. {
  436. struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
  437. struct msm_hs_port *msm_uport = msm_hs_get_hs_port(pdev->id);
  438. if ((!msm_uport) || (msm_uport->uport.line != pdev->id
  439. && msm_uport->uport.line != pdata->userid)) {
  440. MSM_HS_ERR("uport line number mismatch!");
  441. WARN_ON(1);
  442. return NULL;
  443. }
  444. return msm_uport;
  445. }
  446. static ssize_t show_clock(struct device *dev, struct device_attribute *attr,
  447. char *buf)
  448. {
  449. int state = 1;
  450. ssize_t ret = 0;
  451. enum msm_hs_clk_states_e clk_state;
  452. unsigned long flags;
  453. struct platform_device *pdev = container_of(dev, struct
  454. platform_device, dev);
  455. struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
  456. /* This check should not fail */
  457. if (msm_uport) {
  458. spin_lock_irqsave(&msm_uport->uport.lock, flags);
  459. clk_state = msm_uport->clk_state;
  460. spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
  461. if (clk_state <= MSM_HS_CLK_OFF)
  462. state = 0;
  463. ret = snprintf(buf, PAGE_SIZE, "%d\n", state);
  464. }
  465. return ret;
  466. }
  467. static ssize_t set_clock(struct device *dev, struct device_attribute *attr,
  468. const char *buf, size_t count)
  469. {
  470. int state;
  471. ssize_t ret = 0;
  472. struct platform_device *pdev = container_of(dev, struct
  473. platform_device, dev);
  474. struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
  475. /* This check should not fail */
  476. if (msm_uport) {
  477. state = buf[0] - '0';
  478. switch (state) {
  479. case 0:
  480. msm_hs_request_clock_off(&msm_uport->uport);
  481. ret = count;
  482. break;
  483. case 1:
  484. msm_hs_request_clock_on(&msm_uport->uport);
  485. ret = count;
  486. break;
  487. default:
  488. ret = -EINVAL;
  489. }
  490. }
  491. return ret;
  492. }
  493. static DEVICE_ATTR(clock, S_IWUSR | S_IRUGO, show_clock, set_clock);
  494. static inline unsigned int use_low_power_wakeup(struct msm_hs_port *msm_uport)
  495. {
  496. return (msm_uport->wakeup.irq > 0);
  497. }
  498. static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote)
  499. {
  500. int ret;
  501. if (msm_uport->bus_perf_client) {
  502. MSM_HS_DBG("Bus voting:%d\n", vote);
  503. ret = msm_bus_scale_client_update_request(
  504. msm_uport->bus_perf_client, vote);
  505. if (ret)
  506. MSM_HS_ERR("%s(): Failed for Bus voting: %d\n",
  507. __func__, vote);
  508. }
  509. }
  510. static inline unsigned int msm_hs_read(struct uart_port *uport,
  511. unsigned int index)
  512. {
  513. return readl_relaxed(uport->membase + index);
  514. }
  515. static inline void msm_hs_write(struct uart_port *uport, unsigned int index,
  516. unsigned int value)
  517. {
  518. writel_relaxed(value, uport->membase + index);
  519. }
  520. static int sps_rx_disconnect(struct sps_pipe *sps_pipe_handler)
  521. {
  522. struct sps_connect config;
  523. int ret;
  524. ret = sps_get_config(sps_pipe_handler, &config);
  525. if (ret) {
  526. pr_err("%s: sps_get_config() failed ret %d\n", __func__, ret);
  527. return ret;
  528. }
  529. config.options |= SPS_O_POLL;
  530. ret = sps_set_config(sps_pipe_handler, &config);
  531. if (ret) {
  532. pr_err("%s: sps_set_config() failed ret %d\n", __func__, ret);
  533. return ret;
  534. }
  535. return sps_disconnect(sps_pipe_handler);
  536. }
  537. static void hex_dump_ipc(char *prefix, char *string, int size)
  538. {
  539. unsigned char linebuf[512];
  540. unsigned char firstbuf[40], lastbuf[40];
  541. if ((hs_serial_debug_mask != DBG_LEV) && (size > 20)) {
  542. hex_dump_to_buffer(string, 10, 16, 1,
  543. firstbuf, sizeof(firstbuf), 1);
  544. hex_dump_to_buffer(string + (size - 10), 10, 16, 1,
  545. lastbuf, sizeof(lastbuf), 1);
  546. MSM_HS_INFO("%s : %s...%s", prefix, firstbuf, lastbuf);
  547. } else {
  548. hex_dump_to_buffer(string, size, 16, 1,
  549. linebuf, sizeof(linebuf), 1);
  550. MSM_HS_INFO("%s : %s", prefix, linebuf);
  551. }
  552. }
  553. /*
  554. * This API read and provides UART Core registers information.
  555. */
  556. static void dump_uart_hs_registers(struct msm_hs_port *msm_uport)
  557. {
  558. struct uart_port *uport = &(msm_uport->uport);
  559. if (msm_uport->clk_state != MSM_HS_CLK_ON) {
  560. MSM_HS_WARN("%s: Failed.Clocks are OFF\n", __func__);
  561. return;
  562. }
  563. MSM_HS_DBG(
  564. "MR1:%x MR2:%x TFWR:%x RFWR:%x DMEN:%x IMR:%x MISR:%x NCF_TX:%x\n",
  565. msm_hs_read(uport, UART_DM_MR1),
  566. msm_hs_read(uport, UART_DM_MR2),
  567. msm_hs_read(uport, UART_DM_TFWR),
  568. msm_hs_read(uport, UART_DM_RFWR),
  569. msm_hs_read(uport, UART_DM_DMEN),
  570. msm_hs_read(uport, UART_DM_IMR),
  571. msm_hs_read(uport, UART_DM_MISR),
  572. msm_hs_read(uport, UART_DM_NCF_TX));
  573. MSM_HS_INFO("SR:%x ISR:%x DMRX:%x RX_SNAP:%x TXFS:%x RXFS:%x\n",
  574. msm_hs_read(uport, UART_DM_SR),
  575. msm_hs_read(uport, UART_DM_ISR),
  576. msm_hs_read(uport, UART_DM_DMRX),
  577. msm_hs_read(uport, UART_DM_RX_TOTAL_SNAP),
  578. msm_hs_read(uport, UART_DM_TXFS),
  579. msm_hs_read(uport, UART_DM_RXFS));
  580. MSM_HS_DBG("clk_req_state:0x%x rx.flush:%u\n",
  581. msm_uport->clk_req_off_state,
  582. msm_uport->rx.flush);
  583. MSM_HS_DBG("clk_state:%d", msm_uport->clk_state);
  584. }
  585. static int msm_serial_loopback_enable_set(void *data, u64 val)
  586. {
  587. struct msm_hs_port *msm_uport = data;
  588. struct uart_port *uport = &(msm_uport->uport);
  589. unsigned long flags;
  590. int ret = 0;
  591. msm_hs_clock_vote(msm_uport);
  592. if (val) {
  593. spin_lock_irqsave(&uport->lock, flags);
  594. ret = msm_hs_read(uport, UART_DM_MR2);
  595. ret |= (UARTDM_MR2_LOOP_MODE_BMSK |
  596. UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
  597. msm_hs_write(uport, UART_DM_MR2, ret);
  598. spin_unlock_irqrestore(&uport->lock, flags);
  599. } else {
  600. spin_lock_irqsave(&uport->lock, flags);
  601. ret = msm_hs_read(uport, UART_DM_MR2);
  602. ret &= ~(UARTDM_MR2_LOOP_MODE_BMSK |
  603. UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
  604. msm_hs_write(uport, UART_DM_MR2, ret);
  605. spin_unlock_irqrestore(&uport->lock, flags);
  606. }
  607. /* Calling CLOCK API. Hence mb() requires here. */
  608. mb();
  609. msm_hs_clock_unvote(msm_uport);
  610. return 0;
  611. }
  612. static int msm_serial_loopback_enable_get(void *data, u64 *val)
  613. {
  614. struct msm_hs_port *msm_uport = data;
  615. struct uart_port *uport = &(msm_uport->uport);
  616. unsigned long flags;
  617. int ret = 0;
  618. msm_hs_clock_vote(msm_uport);
  619. spin_lock_irqsave(&uport->lock, flags);
  620. ret = msm_hs_read(&msm_uport->uport, UART_DM_MR2);
  621. spin_unlock_irqrestore(&uport->lock, flags);
  622. msm_hs_clock_unvote(msm_uport);
  623. *val = (ret & UARTDM_MR2_LOOP_MODE_BMSK) ? 1 : 0;
  624. return 0;
  625. }
  626. DEFINE_SIMPLE_ATTRIBUTE(loopback_enable_fops, msm_serial_loopback_enable_get,
  627. msm_serial_loopback_enable_set, "%llu\n");
  628. /*
  629. * msm_serial_hs debugfs node: <debugfs_root>/msm_serial_hs/loopback.<id>
  630. * writing 1 turns on internal loopback mode in HW. Useful for automation
  631. * test scripts.
  632. * writing 0 disables the internal loopback mode. Default is disabled.
  633. */
  634. static void __devinit msm_serial_debugfs_init(struct msm_hs_port *msm_uport,
  635. int id)
  636. {
  637. char node_name[15];
  638. snprintf(node_name, sizeof(node_name), "loopback.%d", id);
  639. msm_uport->loopback_dir = debugfs_create_file(node_name,
  640. S_IRUGO | S_IWUSR,
  641. debug_base,
  642. msm_uport,
  643. &loopback_enable_fops);
  644. if (IS_ERR_OR_NULL(msm_uport->loopback_dir))
  645. MSM_HS_ERR("%s(): Cannot create loopback.%d debug entry",
  646. __func__, id);
  647. }
  648. static int __devexit msm_hs_remove(struct platform_device *pdev)
  649. {
  650. struct msm_hs_port *msm_uport;
  651. struct device *dev;
  652. if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
  653. MSM_HS_ERR(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id);
  654. return -EINVAL;
  655. }
  656. msm_uport = get_matching_hs_port(pdev);
  657. if (!msm_uport)
  658. return -EINVAL;
  659. dev = msm_uport->uport.dev;
  660. sysfs_remove_file(&pdev->dev.kobj, &dev_attr_clock.attr);
  661. debugfs_remove(msm_uport->loopback_dir);
  662. dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer,
  663. msm_uport->rx.rbuffer);
  664. dma_pool_destroy(msm_uport->rx.pool);
  665. wake_lock_destroy(&msm_uport->rx.wake_lock);
  666. wake_lock_destroy(&msm_uport->dma_wake_lock);
  667. destroy_workqueue(msm_uport->hsuart_wq);
  668. mutex_destroy(&msm_uport->clk_mutex);
  669. uart_remove_one_port(&msm_hs_driver, &msm_uport->uport);
  670. clk_put(msm_uport->clk);
  671. if (msm_uport->pclk)
  672. clk_put(msm_uport->pclk);
  673. iounmap(msm_uport->uport.membase);
  674. return 0;
  675. }
  676. static int msm_hs_init_clk(struct uart_port *uport)
  677. {
  678. int ret;
  679. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  680. /* Set up the MREG/NREG/DREG/MNDREG */
  681. ret = clk_set_rate(msm_uport->clk, uport->uartclk);
  682. if (ret) {
  683. MSM_HS_WARN("Error setting clock rate on UART\n");
  684. return ret;
  685. }
  686. ret = msm_hs_clock_vote(msm_uport);
  687. if (ret) {
  688. MSM_HS_ERR("Error could not turn on UART clk\n");
  689. return ret;
  690. }
  691. return 0;
  692. }
  693. /* Connect a UART peripheral's SPS endpoint(consumer endpoint)
  694. *
  695. * Also registers a SPS callback function for the consumer
  696. * process with the SPS driver
  697. *
  698. * @uport - Pointer to uart uport structure
  699. *
  700. * @return - 0 if successful else negative value.
  701. *
  702. */
  703. static int msm_hs_spsconnect_tx(struct uart_port *uport)
  704. {
  705. int ret;
  706. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  707. struct msm_hs_tx *tx = &msm_uport->tx;
  708. struct sps_pipe *sps_pipe_handle = tx->cons.pipe_handle;
  709. struct sps_connect *sps_config = &tx->cons.config;
  710. struct sps_register_event *sps_event = &tx->cons.event;
  711. /* Establish connection between peripheral and memory endpoint */
  712. ret = sps_connect(sps_pipe_handle, sps_config);
  713. if (ret) {
  714. MSM_HS_ERR("msm_serial_hs: sps_connect() failed for tx!!\n"
  715. "pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
  716. return ret;
  717. }
  718. /* Register callback event for EOT (End of transfer) event. */
  719. ret = sps_register_event(sps_pipe_handle, sps_event);
  720. if (ret) {
  721. MSM_HS_ERR("msm_serial_hs: sps_connect() failed for tx!!\n"
  722. "pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
  723. goto reg_event_err;
  724. }
  725. return 0;
  726. reg_event_err:
  727. sps_disconnect(sps_pipe_handle);
  728. return ret;
  729. }
  730. /* Connect a UART peripheral's SPS endpoint(producer endpoint)
  731. *
  732. * Also registers a SPS callback function for the producer
  733. * process with the SPS driver
  734. *
  735. * @uport - Pointer to uart uport structure
  736. *
  737. * @return - 0 if successful else negative value.
  738. *
  739. */
  740. static int msm_hs_spsconnect_rx(struct uart_port *uport)
  741. {
  742. int ret;
  743. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  744. struct msm_hs_rx *rx = &msm_uport->rx;
  745. struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
  746. struct sps_connect *sps_config = &rx->prod.config;
  747. struct sps_register_event *sps_event = &rx->prod.event;
  748. /* Establish connection between peripheral and memory endpoint */
  749. ret = sps_connect(sps_pipe_handle, sps_config);
  750. if (ret) {
  751. MSM_HS_ERR("msm_serial_hs: sps_connect() failed for rx!!\n"
  752. "pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
  753. return ret;
  754. }
  755. /* Register callback event for DESC_DONE event. */
  756. ret = sps_register_event(sps_pipe_handle, sps_event);
  757. if (ret) {
  758. MSM_HS_ERR("msm_serial_hs: sps_connect() failed for rx!!\n"
  759. "pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
  760. goto reg_event_err;
  761. }
  762. return 0;
  763. reg_event_err:
  764. sps_disconnect(sps_pipe_handle);
  765. return ret;
  766. }
  767. /*
  768. * programs the UARTDM_CSR register with correct bit rates
  769. *
  770. * Interrupts should be disabled before we are called, as
  771. * we modify Set Baud rate
  772. * Set receive stale interrupt level, dependant on Bit Rate
  773. * Goal is to have around 8 ms before indicate stale.
  774. * roundup (((Bit Rate * .008) / 10) + 1
  775. */
  776. static void msm_hs_set_bps_locked(struct uart_port *uport,
  777. unsigned int bps)
  778. {
  779. unsigned long rxstale;
  780. unsigned long data;
  781. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  782. switch (bps) {
  783. case 300:
  784. msm_hs_write(uport, UART_DM_CSR, 0x00);
  785. rxstale = 1;
  786. break;
  787. case 600:
  788. msm_hs_write(uport, UART_DM_CSR, 0x11);
  789. rxstale = 1;
  790. break;
  791. case 1200:
  792. msm_hs_write(uport, UART_DM_CSR, 0x22);
  793. rxstale = 1;
  794. break;
  795. case 2400:
  796. msm_hs_write(uport, UART_DM_CSR, 0x33);
  797. rxstale = 1;
  798. break;
  799. case 4800:
  800. msm_hs_write(uport, UART_DM_CSR, 0x44);
  801. rxstale = 1;
  802. break;
  803. case 9600:
  804. msm_hs_write(uport, UART_DM_CSR, 0x55);
  805. rxstale = 2;
  806. break;
  807. case 14400:
  808. msm_hs_write(uport, UART_DM_CSR, 0x66);
  809. rxstale = 3;
  810. break;
  811. case 19200:
  812. msm_hs_write(uport, UART_DM_CSR, 0x77);
  813. rxstale = 4;
  814. break;
  815. case 28800:
  816. msm_hs_write(uport, UART_DM_CSR, 0x88);
  817. rxstale = 6;
  818. break;
  819. case 38400:
  820. msm_hs_write(uport, UART_DM_CSR, 0x99);
  821. rxstale = 8;
  822. break;
  823. case 57600:
  824. msm_hs_write(uport, UART_DM_CSR, 0xaa);
  825. rxstale = 16;
  826. break;
  827. case 76800:
  828. msm_hs_write(uport, UART_DM_CSR, 0xbb);
  829. rxstale = 16;
  830. break;
  831. case 115200:
  832. msm_hs_write(uport, UART_DM_CSR, 0xcc);
  833. rxstale = 31;
  834. break;
  835. case 230400:
  836. msm_hs_write(uport, UART_DM_CSR, 0xee);
  837. rxstale = 31;
  838. break;
  839. case 460800:
  840. msm_hs_write(uport, UART_DM_CSR, 0xff);
  841. rxstale = 31;
  842. break;
  843. case 4000000:
  844. case 3686400:
  845. case 3200000:
  846. case 3500000:
  847. case 3000000:
  848. case 2500000:
  849. case 1500000:
  850. case 1152000:
  851. case 1000000:
  852. case 921600:
  853. msm_hs_write(uport, UART_DM_CSR, 0xff);
  854. rxstale = 31;
  855. break;
  856. default:
  857. msm_hs_write(uport, UART_DM_CSR, 0xff);
  858. /* default to 9600 */
  859. bps = 9600;
  860. rxstale = 2;
  861. break;
  862. }
  863. /*
  864. * uart baud rate depends on CSR and MND Values
  865. * we are updating CSR before and then calling
  866. * clk_set_rate which updates MND Values. Hence
  867. * dsb requires here.
  868. */
  869. mb();
  870. if (bps > 460800) {
  871. uport->uartclk = bps * 16;
  872. /* BLSP based UART supports maximum clock frequency
  873. * of 63.16 Mhz. With this (63.16 Mhz) clock frequency
  874. * UART can support baud rate of 3.94 Mbps which is
  875. * equivalent to 4 Mbps.
  876. * UART hardware is robust enough to handle this
  877. * deviation to achieve baud rate ~4 Mbps.
  878. */
  879. if (bps == 4000000)
  880. uport->uartclk = BLSP_UART_CLK_FMAX;
  881. } else {
  882. uport->uartclk = 7372800;
  883. }
  884. if (clk_set_rate(msm_uport->clk, uport->uartclk)) {
  885. MSM_HS_WARN("Error setting clock rate on UART\n");
  886. WARN_ON(1);
  887. }
  888. data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
  889. data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
  890. msm_hs_write(uport, UART_DM_IPR, data);
  891. /*
  892. * It is suggested to do reset of transmitter and receiver after
  893. * changing any protocol configuration. Here Baud rate and stale
  894. * timeout are getting updated. Hence reset transmitter and receiver.
  895. */
  896. msm_hs_write(uport, UART_DM_CR, RESET_TX);
  897. msm_hs_write(uport, UART_DM_CR, RESET_RX);
  898. }
  899. static void msm_hs_set_std_bps_locked(struct uart_port *uport,
  900. unsigned int bps)
  901. {
  902. unsigned long rxstale;
  903. unsigned long data;
  904. switch (bps) {
  905. case 9600:
  906. msm_hs_write(uport, UART_DM_CSR, 0x99);
  907. rxstale = 2;
  908. break;
  909. case 14400:
  910. msm_hs_write(uport, UART_DM_CSR, 0xaa);
  911. rxstale = 3;
  912. break;
  913. case 19200:
  914. msm_hs_write(uport, UART_DM_CSR, 0xbb);
  915. rxstale = 4;
  916. break;
  917. case 28800:
  918. msm_hs_write(uport, UART_DM_CSR, 0xcc);
  919. rxstale = 6;
  920. break;
  921. case 38400:
  922. msm_hs_write(uport, UART_DM_CSR, 0xdd);
  923. rxstale = 8;
  924. break;
  925. case 57600:
  926. msm_hs_write(uport, UART_DM_CSR, 0xee);
  927. rxstale = 16;
  928. break;
  929. case 115200:
  930. msm_hs_write(uport, UART_DM_CSR, 0xff);
  931. rxstale = 31;
  932. break;
  933. default:
  934. msm_hs_write(uport, UART_DM_CSR, 0x99);
  935. /* default to 9600 */
  936. bps = 9600;
  937. rxstale = 2;
  938. break;
  939. }
  940. data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
  941. data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
  942. msm_hs_write(uport, UART_DM_IPR, data);
  943. }
  944. /*
  945. * termios : new ktermios
  946. * oldtermios: old ktermios previous setting
  947. *
  948. * Configure the serial port
  949. */
  950. static void msm_hs_set_termios(struct uart_port *uport,
  951. struct ktermios *termios,
  952. struct ktermios *oldtermios)
  953. {
  954. unsigned int bps;
  955. unsigned long data;
  956. int ret;
  957. unsigned int c_cflag = termios->c_cflag;
  958. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  959. struct msm_hs_rx *rx = &msm_uport->rx;
  960. struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
  961. /**
  962. * set_termios can be invoked from the framework when
  963. * the clocks are off and the client has not had a chance
  964. * to turn them on. Make sure that they are on
  965. */
  966. msm_hs_clock_vote(msm_uport);
  967. mutex_lock(&msm_uport->clk_mutex);
  968. msm_hs_write(uport, UART_DM_IMR, 0);
  969. MSM_HS_DBG("Entering %s\n", __func__);
  970. dump_uart_hs_registers(msm_uport);
  971. /* Clear the Rx Ready Ctl bit - This ensures that
  972. * flow control lines stop the other side from sending
  973. * data while we change the parameters
  974. */
  975. data = msm_hs_read(uport, UART_DM_MR1);
  976. data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
  977. msm_hs_write(uport, UART_DM_MR1, data);
  978. /* set RFR_N to high */
  979. msm_hs_write(uport, UART_DM_CR, RFR_HIGH);
  980. /*
  981. * Disable Rx channel of UARTDM
  982. * DMA Rx Stall happens if enqueue and flush of Rx command happens
  983. * concurrently. Hence before changing the baud rate/protocol
  984. * configuration and sending flush command to ADM, disable the Rx
  985. * channel of UARTDM.
  986. * Note: should not reset the receiver here immediately as it is not
  987. * suggested to do disable/reset or reset/disable at the same time.
  988. */
  989. data = msm_hs_read(uport, UART_DM_DMEN);
  990. /* Disable UARTDM RX BAM Interface */
  991. data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
  992. msm_hs_write(uport, UART_DM_DMEN, data);
  993. /* 300 is the minimum baud support by the driver */
  994. bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000);
  995. /* Temporary remapping 200 BAUD to 3.2 mbps */
  996. if (bps == 200)
  997. bps = 3200000;
  998. uport->uartclk = clk_get_rate(msm_uport->clk);
  999. if (!uport->uartclk)
  1000. msm_hs_set_std_bps_locked(uport, bps);
  1001. else
  1002. msm_hs_set_bps_locked(uport, bps);
  1003. data = msm_hs_read(uport, UART_DM_MR2);
  1004. data &= ~UARTDM_MR2_PARITY_MODE_BMSK;
  1005. /* set parity */
  1006. if (PARENB == (c_cflag & PARENB)) {
  1007. if (PARODD == (c_cflag & PARODD)) {
  1008. data |= ODD_PARITY;
  1009. } else if (CMSPAR == (c_cflag & CMSPAR)) {
  1010. data |= SPACE_PARITY;
  1011. } else {
  1012. data |= EVEN_PARITY;
  1013. }
  1014. }
  1015. /* Set bits per char */
  1016. data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK;
  1017. switch (c_cflag & CSIZE) {
  1018. case CS5:
  1019. data |= FIVE_BPC;
  1020. break;
  1021. case CS6:
  1022. data |= SIX_BPC;
  1023. break;
  1024. case CS7:
  1025. data |= SEVEN_BPC;
  1026. break;
  1027. default:
  1028. data |= EIGHT_BPC;
  1029. break;
  1030. }
  1031. /* stop bits */
  1032. if (c_cflag & CSTOPB) {
  1033. data |= STOP_BIT_TWO;
  1034. } else {
  1035. /* otherwise 1 stop bit */
  1036. data |= STOP_BIT_ONE;
  1037. }
  1038. data |= UARTDM_MR2_ERROR_MODE_BMSK;
  1039. /* write parity/bits per char/stop bit configuration */
  1040. msm_hs_write(uport, UART_DM_MR2, data);
  1041. uport->ignore_status_mask = termios->c_iflag & INPCK;
  1042. uport->ignore_status_mask |= termios->c_iflag & IGNPAR;
  1043. uport->ignore_status_mask |= termios->c_iflag & IGNBRK;
  1044. uport->read_status_mask = (termios->c_cflag & CREAD);
  1045. /* Set Transmit software time out */
  1046. uart_update_timeout(uport, c_cflag, bps);
  1047. msm_hs_write(uport, UART_DM_CR, RESET_RX);
  1048. msm_hs_write(uport, UART_DM_CR, RESET_TX);
  1049. /* Issue TX BAM Start IFC command */
  1050. msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
  1051. if (msm_uport->rx.flush == FLUSH_NONE) {
  1052. wake_lock(&msm_uport->rx.wake_lock);
  1053. msm_uport->rx.flush = FLUSH_DATA_INVALID;
  1054. mb();
  1055. if (msm_uport->rx_bam_inprogress)
  1056. ret = wait_event_timeout(msm_uport->rx.wait,
  1057. msm_uport->rx_bam_inprogress == false,
  1058. RX_FLUSH_COMPLETE_TIMEOUT);
  1059. ret = sps_rx_disconnect(sps_pipe_handle);
  1060. if (ret)
  1061. MSM_HS_ERR("%s(): sps_disconnect failed\n",
  1062. __func__);
  1063. msm_hs_spsconnect_rx(uport);
  1064. msm_uport->rx.flush = FLUSH_IGNORE;
  1065. msm_serial_hs_rx_tlet((unsigned long) &rx->tlet);
  1066. }
  1067. /* Configure HW flow control
  1068. * UART Core would see status of CTS line when it is sending data
  1069. * to remote uart to confirm that it can receive or not.
  1070. * UART Core would trigger RFR if it is not having any space with
  1071. * RX FIFO.
  1072. */
  1073. /* Pulling RFR line high */
  1074. msm_hs_write(uport, UART_DM_CR, RFR_LOW);
  1075. data = msm_hs_read(uport, UART_DM_MR1);
  1076. data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
  1077. if (c_cflag & CRTSCTS) {
  1078. data |= UARTDM_MR1_CTS_CTL_BMSK;
  1079. data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
  1080. }
  1081. msm_hs_write(uport, UART_DM_MR1, data);
  1082. msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
  1083. mb();
  1084. mutex_unlock(&msm_uport->clk_mutex);
  1085. MSM_HS_DBG("Exit %s\n", __func__);
  1086. dump_uart_hs_registers(msm_uport);
  1087. msm_hs_clock_unvote(msm_uport);
  1088. }
  1089. /*
  1090. * Standard API, Transmitter
  1091. * Any character in the transmit shift register is sent
  1092. */
  1093. unsigned int msm_hs_tx_empty(struct uart_port *uport)
  1094. {
  1095. unsigned int data;
  1096. unsigned int ret = 0;
  1097. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1098. if (msm_uport->clk_state == MSM_HS_CLK_PORT_OFF) {
  1099. MSM_HS_ERR("%s:UART port is closed\n", __func__);
  1100. return -EPERM;
  1101. }
  1102. msm_hs_clock_vote(msm_uport);
  1103. data = msm_hs_read(uport, UART_DM_SR);
  1104. msm_hs_clock_unvote(msm_uport);
  1105. MSM_HS_DBG("%s(): SR Reg Read 0x%x", __func__, data);
  1106. if (data & UARTDM_SR_TXEMT_BMSK)
  1107. ret = TIOCSER_TEMT;
  1108. return ret;
  1109. }
  1110. EXPORT_SYMBOL(msm_hs_tx_empty);
  1111. /*
  1112. * Standard API, Stop transmitter.
  1113. * Any character in the transmit shift register is sent as
  1114. * well as the current data mover transfer .
  1115. */
  1116. static void msm_hs_stop_tx_locked(struct uart_port *uport)
  1117. {
  1118. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1119. msm_uport->tx.tx_ready_int_en = 0;
  1120. }
  1121. /* Disconnect BAM RX Endpoint Pipe Index from workqueue context*/
  1122. static void hsuart_disconnect_rx_endpoint_work(struct work_struct *w)
  1123. {
  1124. struct msm_hs_port *msm_uport = container_of(w, struct msm_hs_port,
  1125. disconnect_rx_endpoint);
  1126. struct uart_port *uport = &msm_uport->uport;
  1127. struct msm_hs_rx *rx = &msm_uport->rx;
  1128. struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
  1129. struct platform_device *pdev = to_platform_device(uport->dev);
  1130. const struct msm_serial_hs_platform_data *pdata =
  1131. pdev->dev.platform_data;
  1132. int ret = 0;
  1133. msm_hs_clock_vote(msm_uport);
  1134. ret = sps_rx_disconnect(sps_pipe_handle);
  1135. msm_hs_clock_unvote(msm_uport);
  1136. if (ret)
  1137. MSM_HS_ERR("%s(): sps_disconnect failed\n", __func__);
  1138. if (pdata->no_suspend_delay)
  1139. wake_unlock(&msm_uport->rx.wake_lock);
  1140. else
  1141. wake_lock_timeout(&msm_uport->rx.wake_lock,
  1142. HZ / 2);
  1143. msm_uport->rx.flush = FLUSH_SHUTDOWN;
  1144. MSM_HS_DBG("%s: Calling Completion\n", __func__);
  1145. wake_up(&msm_uport->bam_disconnect_wait);
  1146. MSM_HS_DBG("%s: Done Completion\n", __func__);
  1147. wake_up(&msm_uport->rx.wait);
  1148. }
  1149. /*
  1150. * Standard API, Stop receiver as soon as possible.
  1151. *
  1152. * Function immediately terminates the operation of the
  1153. * channel receiver and any incoming characters are lost. None
  1154. * of the receiver status bits are affected by this command and
  1155. * characters that are already in the receive FIFO there.
  1156. */
  1157. static void msm_hs_stop_rx_locked(struct uart_port *uport)
  1158. {
  1159. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1160. unsigned int data;
  1161. MSM_HS_DBG("In %s():\n", __func__);
  1162. if (msm_uport->clk_state != MSM_HS_CLK_OFF) {
  1163. /* disable dlink */
  1164. data = msm_hs_read(uport, UART_DM_DMEN);
  1165. data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
  1166. msm_hs_write(uport, UART_DM_DMEN, data);
  1167. /* calling DMOV or CLOCK API. Hence mb() */
  1168. mb();
  1169. }
  1170. /* Disable the receiver */
  1171. if (msm_uport->rx.flush == FLUSH_NONE) {
  1172. wake_lock(&msm_uport->rx.wake_lock);
  1173. msm_uport->rx.flush = FLUSH_STOP;
  1174. /* workqueue for BAM rx endpoint disconnect */
  1175. queue_work(msm_uport->hsuart_wq,
  1176. &msm_uport->disconnect_rx_endpoint);
  1177. }
  1178. }
  1179. /* Transmit the next chunk of data */
  1180. static void msm_hs_submit_tx_locked(struct uart_port *uport)
  1181. {
  1182. int left;
  1183. int tx_count;
  1184. int aligned_tx_count;
  1185. dma_addr_t src_addr;
  1186. dma_addr_t aligned_src_addr;
  1187. u32 flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
  1188. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1189. struct msm_hs_tx *tx = &msm_uport->tx;
  1190. struct circ_buf *tx_buf = &msm_uport->uport.state->xmit;
  1191. struct sps_pipe *sps_pipe_handle;
  1192. struct platform_device *pdev = to_platform_device(uport->dev);
  1193. char * buff = tx_buf->buf+tx_buf->tail;
  1194. if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) {
  1195. msm_hs_stop_tx_locked(uport);
  1196. if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) {
  1197. MSM_HS_DBG("%s(): Clock off requested calling WQ",
  1198. __func__);
  1199. queue_work(msm_uport->hsuart_wq,
  1200. &msm_uport->clock_off_w);
  1201. wake_up(&msm_uport->tx.wait);
  1202. }
  1203. return;
  1204. }
  1205. tx->dma_in_flight = 1;
  1206. tx_count = uart_circ_chars_pending(tx_buf);
  1207. if (UARTDM_TX_BUF_SIZE < tx_count)
  1208. tx_count = UARTDM_TX_BUF_SIZE;
  1209. left = UART_XMIT_SIZE - tx_buf->tail;
  1210. if (tx_count > left)
  1211. tx_count = left;
  1212. MSM_HS_DBG("%s(): [UART_TX]<%d>\n", __func__, tx_count);
  1213. hex_dump_ipc("HSUART write: ", &tx_buf->buf[tx_buf->tail], tx_count);
  1214. src_addr = tx->dma_base + tx_buf->tail;
  1215. if (pdev->id == 0 && tx_count == 4 && buff[0] == 0x1 && buff[1] == 0x3 && buff[2] == 0xc && buff[3] == 0x0) {
  1216. printk(KERN_ERR "(msm_serial_hs) hci_reset was received at ttyHS0 port\n");
  1217. }
  1218. /* Mask the src_addr to align on a cache
  1219. * and add those bytes to tx_count */
  1220. aligned_src_addr = src_addr & ~(dma_get_cache_alignment() - 1);
  1221. aligned_tx_count = tx_count + src_addr - aligned_src_addr;
  1222. dma_sync_single_for_device(uport->dev, aligned_src_addr,
  1223. aligned_tx_count, DMA_TO_DEVICE);
  1224. tx->tx_count = tx_count;
  1225. msm_uport->tx.flush = FLUSH_NONE;
  1226. sps_pipe_handle = tx->cons.pipe_handle;
  1227. /* Queue transfer request to SPS */
  1228. sps_transfer_one(sps_pipe_handle, src_addr, tx_count,
  1229. msm_uport, flags);
  1230. MSM_HS_DBG("%s:Enqueue Tx Cmd\n", __func__);
  1231. dump_uart_hs_registers(msm_uport);
  1232. }
  1233. /* Start to receive the next chunk of data */
  1234. static void msm_hs_start_rx_locked(struct uart_port *uport)
  1235. {
  1236. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1237. struct msm_hs_rx *rx = &msm_uport->rx;
  1238. struct sps_pipe *sps_pipe_handle;
  1239. u32 flags = SPS_IOVEC_FLAG_INT;
  1240. unsigned int buffer_pending = msm_uport->rx.buffer_pending;
  1241. unsigned int data;
  1242. if (msm_uport->clk_state != MSM_HS_CLK_ON) {
  1243. MSM_HS_WARN("%s: Failed.Clocks are OFF\n", __func__);
  1244. return;
  1245. }
  1246. if (rx->rx_cmd_exec) {
  1247. MSM_HS_DBG("%s: Rx Cmd got executed, wait for rx_tlet\n",
  1248. __func__);
  1249. rx->flush = FLUSH_IGNORE;
  1250. return;
  1251. }
  1252. msm_uport->rx.buffer_pending = 0;
  1253. if (buffer_pending && hs_serial_debug_mask)
  1254. MSM_HS_ERR("Error: rx started in buffer state = %x",
  1255. buffer_pending);
  1256. msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
  1257. msm_hs_write(uport, UART_DM_DMRX, UARTDM_RX_BUF_SIZE);
  1258. msm_hs_write(uport, UART_DM_CR, STALE_EVENT_ENABLE);
  1259. /*
  1260. * Enable UARTDM Rx Interface as previously it has been
  1261. * disable in set_termios before configuring baud rate.
  1262. */
  1263. data = msm_hs_read(uport, UART_DM_DMEN);
  1264. /* Enable UARTDM Rx BAM Interface */
  1265. data |= UARTDM_RX_BAM_ENABLE_BMSK;
  1266. msm_hs_write(uport, UART_DM_DMEN, data);
  1267. msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
  1268. /* Calling next DMOV API. Hence mb() here. */
  1269. mb();
  1270. /*
  1271. * RX-transfer will be automatically re-activated
  1272. * after last data of previous transfer was read.
  1273. */
  1274. data = (RX_STALE_AUTO_RE_EN | RX_TRANS_AUTO_RE_ACTIVATE |
  1275. RX_DMRX_CYCLIC_EN);
  1276. msm_hs_write(uport, UART_DM_RX_TRANS_CTRL, data);
  1277. /* Issue RX BAM Start IFC command */
  1278. msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
  1279. mb();
  1280. msm_uport->rx.flush = FLUSH_NONE;
  1281. msm_uport->rx_bam_inprogress = true;
  1282. sps_pipe_handle = rx->prod.pipe_handle;
  1283. /* Queue transfer request to SPS */
  1284. sps_transfer_one(sps_pipe_handle, rx->rbuffer,
  1285. UARTDM_RX_BUF_SIZE, msm_uport, flags);
  1286. msm_uport->rx_bam_inprogress = false;
  1287. msm_uport->rx.rx_cmd_queued = true;
  1288. wake_up(&msm_uport->rx.wait);
  1289. MSM_HS_DBG("%s:Enqueue Rx Cmd\n", __func__);
  1290. dump_uart_hs_registers(msm_uport);
  1291. }
  1292. static void flip_insert_work(struct work_struct *work)
  1293. {
  1294. unsigned long flags;
  1295. int retval;
  1296. struct msm_hs_port *msm_uport =
  1297. container_of(work, struct msm_hs_port,
  1298. rx.flip_insert_work.work);
  1299. struct tty_struct *tty = msm_uport->uport.state->port.tty;
  1300. spin_lock_irqsave(&msm_uport->uport.lock, flags);
  1301. if (msm_uport->rx.buffer_pending == NONE_PENDING) {
  1302. if (hs_serial_debug_mask)
  1303. MSM_HS_ERR("Error: No buffer pending in %s",
  1304. __func__);
  1305. return;
  1306. }
  1307. if (msm_uport->rx.buffer_pending & FIFO_OVERRUN) {
  1308. retval = tty_insert_flip_char(tty, 0, TTY_OVERRUN);
  1309. if (retval)
  1310. msm_uport->rx.buffer_pending &= ~FIFO_OVERRUN;
  1311. }
  1312. if (msm_uport->rx.buffer_pending & PARITY_ERROR) {
  1313. retval = tty_insert_flip_char(tty, 0, TTY_PARITY);
  1314. if (retval)
  1315. msm_uport->rx.buffer_pending &= ~PARITY_ERROR;
  1316. }
  1317. if (msm_uport->rx.buffer_pending & CHARS_NORMAL) {
  1318. int rx_count, rx_offset;
  1319. rx_count = (msm_uport->rx.buffer_pending & 0xFFFF0000) >> 16;
  1320. rx_offset = (msm_uport->rx.buffer_pending & 0xFFD0) >> 5;
  1321. retval = tty_insert_flip_string(tty, msm_uport->rx.buffer +
  1322. rx_offset, rx_count);
  1323. msm_uport->rx.buffer_pending &= (FIFO_OVERRUN |
  1324. PARITY_ERROR);
  1325. if (retval != rx_count)
  1326. msm_uport->rx.buffer_pending |= CHARS_NORMAL |
  1327. retval << 8 | (rx_count - retval) << 16;
  1328. }
  1329. if (msm_uport->rx.buffer_pending)
  1330. schedule_delayed_work(&msm_uport->rx.flip_insert_work,
  1331. msecs_to_jiffies(RETRY_TIMEOUT));
  1332. else
  1333. if ((msm_uport->clk_state == MSM_HS_CLK_ON) &&
  1334. (msm_uport->rx.flush <= FLUSH_IGNORE)) {
  1335. MSM_HS_WARN("Pending buffers cleared,restarting\n");
  1336. msm_hs_start_rx_locked(&msm_uport->uport);
  1337. }
  1338. spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
  1339. tty_flip_buffer_push(tty);
  1340. }
  1341. static void msm_serial_hs_rx_tlet(unsigned long tlet_ptr)
  1342. {
  1343. int retval;
  1344. int rx_count = 0;
  1345. unsigned long status;
  1346. unsigned long flags;
  1347. unsigned int error_f = 0;
  1348. struct uart_port *uport;
  1349. struct msm_hs_port *msm_uport;
  1350. unsigned int flush;
  1351. struct tty_struct *tty;
  1352. struct sps_event_notify *notify;
  1353. struct msm_hs_rx *rx;
  1354. struct sps_pipe *sps_pipe_handle;
  1355. u32 sps_flags = SPS_IOVEC_FLAG_INT;
  1356. struct platform_device *pdev;
  1357. const struct msm_serial_hs_platform_data *pdata;
  1358. msm_uport = container_of((struct tasklet_struct *)tlet_ptr,
  1359. struct msm_hs_port, rx.tlet);
  1360. uport = &msm_uport->uport;
  1361. tty = uport->state->port.tty;
  1362. notify = &msm_uport->notify;
  1363. rx = &msm_uport->rx;
  1364. pdev = to_platform_device(uport->dev);
  1365. pdata = pdev->dev.platform_data;
  1366. msm_uport->rx.rx_cmd_queued = false;
  1367. msm_uport->rx.rx_cmd_exec = false;
  1368. status = msm_hs_read(uport, UART_DM_SR);
  1369. spin_lock_irqsave(&uport->lock, flags);
  1370. MSM_HS_DBG("In %s\n", __func__);
  1371. dump_uart_hs_registers(msm_uport);
  1372. /* overflow is not connect to data in a FIFO */
  1373. if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
  1374. (uport->read_status_mask & CREAD))) {
  1375. retval = tty_insert_flip_char(tty, 0, TTY_OVERRUN);
  1376. if (!retval)
  1377. msm_uport->rx.buffer_pending |= TTY_OVERRUN;
  1378. uport->icount.buf_overrun++;
  1379. error_f = 1;
  1380. }
  1381. if (!(uport->ignore_status_mask & INPCK))
  1382. status = status & ~(UARTDM_SR_PAR_FRAME_BMSK);
  1383. if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
  1384. /* Can not tell difference between parity & frame error */
  1385. if (hs_serial_debug_mask)
  1386. MSM_HS_WARN("msm_serial_hs: parity error\n");
  1387. uport->icount.parity++;
  1388. error_f = 1;
  1389. if (!(uport->ignore_status_mask & IGNPAR)) {
  1390. retval = tty_insert_flip_char(tty, 0, TTY_PARITY);
  1391. if (!retval)
  1392. msm_uport->rx.buffer_pending |= TTY_PARITY;
  1393. }
  1394. }
  1395. if (unlikely(status & UARTDM_SR_RX_BREAK_BMSK)) {
  1396. if (hs_serial_debug_mask)
  1397. MSM_HS_WARN("msm_serial_hs: Rx break\n");
  1398. uport->icount.brk++;
  1399. error_f = 1;
  1400. if (!(uport->ignore_status_mask & IGNBRK)) {
  1401. retval = tty_insert_flip_char(tty, 0, TTY_BREAK);
  1402. if (!retval)
  1403. msm_uport->rx.buffer_pending |= TTY_BREAK;
  1404. }
  1405. }
  1406. if (error_f) {
  1407. if (msm_uport->clk_state == MSM_HS_CLK_ON)
  1408. msm_hs_write(uport, UART_DM_CR, RESET_ERROR_STATUS);
  1409. else
  1410. MSM_HS_WARN("%s: Failed.Clocks are OFF\n", __func__);
  1411. }
  1412. flush = msm_uport->rx.flush;
  1413. if (flush == FLUSH_IGNORE)
  1414. if (!msm_uport->rx.buffer_pending) {
  1415. MSM_HS_DBG("%s: calling start_rx_locked\n", __func__);
  1416. msm_hs_start_rx_locked(uport);
  1417. }
  1418. if (flush >= FLUSH_DATA_INVALID)
  1419. goto out;
  1420. rx_count = msm_uport->rx_count_callback;
  1421. MSM_HS_DBG("%s():[UART_RX]<%d>\n", __func__, rx_count);
  1422. hex_dump_ipc("HSUART Read: ", msm_uport->rx.buffer, rx_count);
  1423. if (0 != (uport->read_status_mask & CREAD)) {
  1424. retval = tty_insert_flip_string(tty, msm_uport->rx.buffer,
  1425. rx_count);
  1426. if (retval != rx_count) {
  1427. MSM_HS_DBG("%s(): retval %d rx_count %d", __func__,
  1428. retval, rx_count);
  1429. msm_uport->rx.buffer_pending |= CHARS_NORMAL |
  1430. retval << 5 | (rx_count - retval) << 16;
  1431. }
  1432. #ifdef _DW_ENABLED
  1433. else
  1434. {
  1435. if (checkDualWaveStatus() != DUALWAVE_INACTIVE) {
  1436. UpdateTime(msm_uport->rx.buffer, rx_count);
  1437. }
  1438. }
  1439. #endif
  1440. }
  1441. if (!msm_uport->rx.buffer_pending && !msm_uport->rx.rx_cmd_queued) {
  1442. msm_uport->rx.flush = FLUSH_NONE;
  1443. msm_uport->rx_bam_inprogress = true;
  1444. sps_pipe_handle = rx->prod.pipe_handle;
  1445. MSM_HS_DBG("Queing bam descriptor\n");
  1446. /* Queue transfer request to SPS */
  1447. sps_transfer_one(sps_pipe_handle, rx->rbuffer,
  1448. UARTDM_RX_BUF_SIZE, msm_uport, sps_flags);
  1449. msm_uport->rx_bam_inprogress = false;
  1450. msm_uport->rx.rx_cmd_queued = true;
  1451. wake_up(&msm_uport->rx.wait);
  1452. }
  1453. out:
  1454. if (msm_uport->rx.buffer_pending) {
  1455. MSM_HS_WARN("tty buffer exhausted.Stalling\n");
  1456. schedule_delayed_work(&msm_uport->rx.flip_insert_work
  1457. , msecs_to_jiffies(RETRY_TIMEOUT));
  1458. }
  1459. /* release wakelock in 500ms, not immediately, because higher layers
  1460. * don't always take wakelocks when they should
  1461. */
  1462. if (pdata->no_suspend_delay)
  1463. wake_unlock(&msm_uport->rx.wake_lock);
  1464. else
  1465. wake_lock_timeout(&msm_uport->rx.wake_lock, HZ / 2);
  1466. /* tty_flip_buffer_push() might call msm_hs_start(), so unlock */
  1467. spin_unlock_irqrestore(&uport->lock, flags);
  1468. if (flush < FLUSH_DATA_INVALID)
  1469. tty_flip_buffer_push(tty);
  1470. }
  1471. /* Enable the transmitter Interrupt */
  1472. static void msm_hs_start_tx_locked(struct uart_port *uport )
  1473. {
  1474. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1475. if (msm_uport->clk_state != MSM_HS_CLK_ON) {
  1476. MSM_HS_WARN("%s: Failed.Clocks are OFF\n", __func__);
  1477. return;
  1478. }
  1479. if ((msm_uport->tx.tx_ready_int_en == 0) &&
  1480. (msm_uport->tx.dma_in_flight == 0))
  1481. msm_hs_submit_tx_locked(uport);
  1482. }
  1483. /**
  1484. * Callback notification from SPS driver
  1485. *
  1486. * This callback function gets triggered called from
  1487. * SPS driver when requested SPS data transfer is
  1488. * completed.
  1489. *
  1490. */
  1491. static void msm_hs_sps_tx_callback(struct sps_event_notify *notify)
  1492. {
  1493. struct msm_hs_port *msm_uport =
  1494. (struct msm_hs_port *)
  1495. ((struct sps_event_notify *)notify)->user;
  1496. msm_uport->notify = *notify;
  1497. MSM_HS_DBG("%s: ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x, line=%d\n",
  1498. __func__, notify->event_id,
  1499. notify->data.transfer.iovec.addr,
  1500. notify->data.transfer.iovec.size,
  1501. notify->data.transfer.iovec.flags,
  1502. msm_uport->uport.line);
  1503. tasklet_schedule(&msm_uport->tx.tlet);
  1504. }
  1505. static void msm_serial_hs_tx_tlet(unsigned long tlet_ptr)
  1506. {
  1507. unsigned long flags;
  1508. struct msm_hs_port *msm_uport = container_of((struct tasklet_struct *)
  1509. tlet_ptr, struct msm_hs_port, tx.tlet);
  1510. struct uart_port *uport = &msm_uport->uport;
  1511. struct circ_buf *tx_buf = &uport->state->xmit;
  1512. struct msm_hs_tx *tx = &msm_uport->tx;
  1513. /*
  1514. * Do the work buffer related work in BAM
  1515. * mode that is equivalent to legacy mode
  1516. */
  1517. spin_lock_irqsave(&(msm_uport->uport.lock), flags);
  1518. if (!uart_circ_empty(tx_buf))
  1519. tx_buf->tail = (tx_buf->tail +
  1520. tx->tx_count) & ~UART_XMIT_SIZE;
  1521. else
  1522. MSM_HS_DBG("%s:circ buffer is empty\n", __func__);
  1523. tx->dma_in_flight = 0;
  1524. uport->icount.tx += tx->tx_count;
  1525. /*
  1526. * Calling to send next chunk of data
  1527. * If the circ buffer is empty, we stop
  1528. * If the clock off was requested, the clock
  1529. * off sequence is kicked off
  1530. */
  1531. msm_hs_submit_tx_locked(uport);
  1532. if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
  1533. uart_write_wakeup(uport);
  1534. if (msm_uport->tx.flush == FLUSH_STOP) {
  1535. msm_uport->tx.flush = FLUSH_SHUTDOWN;
  1536. wake_up(&msm_uport->tx.wait);
  1537. spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
  1538. return;
  1539. }
  1540. spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
  1541. MSM_HS_DBG("In %s()\n", __func__);
  1542. dump_uart_hs_registers(msm_uport);
  1543. }
  1544. /**
  1545. * Callback notification from SPS driver
  1546. *
  1547. * This callback function gets triggered called from
  1548. * SPS driver when requested SPS data transfer is
  1549. * completed.
  1550. *
  1551. */
  1552. static void msm_hs_sps_rx_callback(struct sps_event_notify *notify)
  1553. {
  1554. struct msm_hs_port *msm_uport =
  1555. (struct msm_hs_port *)
  1556. ((struct sps_event_notify *)notify)->user;
  1557. struct uart_port *uport;
  1558. unsigned long flags;
  1559. uport = &(msm_uport->uport);
  1560. msm_uport->notify = *notify;
  1561. MSM_HS_DBG("%s: sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
  1562. __func__, notify->event_id,
  1563. notify->data.transfer.iovec.addr,
  1564. notify->data.transfer.iovec.size,
  1565. notify->data.transfer.iovec.flags);
  1566. if (msm_uport->rx.flush == FLUSH_NONE) {
  1567. spin_lock_irqsave(&uport->lock, flags);
  1568. msm_uport->rx_count_callback = notify->data.transfer.iovec.size;
  1569. msm_uport->rx.rx_cmd_exec = true;
  1570. spin_unlock_irqrestore(&uport->lock, flags);
  1571. tasklet_schedule(&msm_uport->rx.tlet);
  1572. }
  1573. }
  1574. /*
  1575. * Standard API, Current states of modem control inputs
  1576. *
  1577. * Since CTS can be handled entirely by HARDWARE we always
  1578. * indicate clear to send and count on the TX FIFO to block when
  1579. * it fills up.
  1580. *
  1581. * - TIOCM_DCD
  1582. * - TIOCM_CTS
  1583. * - TIOCM_DSR
  1584. * - TIOCM_RI
  1585. * (Unsupported) DCD and DSR will return them high. RI will return low.
  1586. */
  1587. static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport)
  1588. {
  1589. return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
  1590. }
  1591. /*
  1592. * Standard API, Set or clear RFR_signal
  1593. *
  1594. * Set RFR high, (Indicate we are not ready for data), we disable auto
  1595. * ready for receiving and then set RFR_N high. To set RFR to low we just turn
  1596. * back auto ready for receiving and it should lower RFR signal
  1597. * when hardware is ready
  1598. */
  1599. void msm_hs_set_mctrl_locked(struct uart_port *uport,
  1600. unsigned int mctrl)
  1601. {
  1602. unsigned int set_rts;
  1603. unsigned int data;
  1604. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1605. if (msm_uport->clk_state != MSM_HS_CLK_ON) {
  1606. MSM_HS_WARN("%s:Failed.Clocks are OFF\n", __func__);
  1607. printk(KERN_INFO "(msm_serial_hs) msm_hs_set_mctrl_locked.Clocks are OFF\n");
  1608. return;
  1609. }
  1610. /* RTS is active low */
  1611. set_rts = TIOCM_RTS & mctrl ? 0 : 1;
  1612. data = msm_hs_read(uport, UART_DM_MR1);
  1613. if (set_rts) {
  1614. /*disable auto ready-for-receiving */
  1615. data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
  1616. msm_hs_write(uport, UART_DM_MR1, data);
  1617. /* set RFR_N to high */
  1618. msm_hs_write(uport, UART_DM_CR, RFR_HIGH);
  1619. } else {
  1620. /* Enable auto ready-for-receiving */
  1621. data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
  1622. msm_hs_write(uport, UART_DM_MR1, data);
  1623. }
  1624. mb();
  1625. }
  1626. void msm_hs_set_mctrl(struct uart_port *uport,
  1627. unsigned int mctrl)
  1628. {
  1629. unsigned long flags;
  1630. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1631. if (msm_uport->clk_state == MSM_HS_CLK_PORT_OFF) {
  1632. MSM_HS_ERR("%s:UART port is closed\n", __func__);
  1633. return ;
  1634. }
  1635. msm_hs_clock_vote(msm_uport);
  1636. spin_lock_irqsave(&uport->lock, flags);
  1637. msm_hs_set_mctrl_locked(uport, mctrl);
  1638. spin_unlock_irqrestore(&uport->lock, flags);
  1639. msm_hs_clock_unvote(msm_uport);
  1640. }
  1641. EXPORT_SYMBOL(msm_hs_set_mctrl);
  1642. /* Standard API, Enable modem status (CTS) interrupt */
  1643. static void msm_hs_enable_ms_locked(struct uart_port *uport)
  1644. {
  1645. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1646. if (msm_uport->clk_state != MSM_HS_CLK_ON) {
  1647. MSM_HS_WARN("%s:Failed.Clocks are OFF\n", __func__);
  1648. return;
  1649. }
  1650. /* Enable DELTA_CTS Interrupt */
  1651. msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
  1652. msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
  1653. mb();
  1654. }
  1655. /*
  1656. * Standard API, Break Signal
  1657. *
  1658. * Control the transmission of a break signal. ctl eq 0 => break
  1659. * signal terminate ctl ne 0 => start break signal
  1660. */
  1661. static void msm_hs_break_ctl(struct uart_port *uport, int ctl)
  1662. {
  1663. unsigned long flags;
  1664. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1665. if (msm_uport->clk_state != MSM_HS_CLK_ON) {
  1666. MSM_HS_WARN("%s: Failed.Clocks are OFF\n", __func__);
  1667. return;
  1668. }
  1669. spin_lock_irqsave(&uport->lock, flags);
  1670. msm_hs_write(uport, UART_DM_CR, ctl ? START_BREAK : STOP_BREAK);
  1671. mb();
  1672. spin_unlock_irqrestore(&uport->lock, flags);
  1673. }
  1674. static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
  1675. {
  1676. if (cfg_flags & UART_CONFIG_TYPE)
  1677. uport->type = PORT_MSM;
  1678. }
  1679. /* Handle CTS changes (Called from interrupt handler) */
  1680. static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
  1681. {
  1682. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1683. if (msm_uport->clk_state != MSM_HS_CLK_ON) {
  1684. MSM_HS_WARN("%s: Failed.Clocks are OFF\n", __func__);
  1685. return;
  1686. }
  1687. /* clear interrupt */
  1688. msm_hs_write(uport, UART_DM_CR, RESET_CTS);
  1689. /* Calling CLOCK API. Hence mb() requires here. */
  1690. mb();
  1691. uport->icount.cts++;
  1692. /* clear the IOCTL TIOCMIWAIT if called */
  1693. wake_up_interruptible(&uport->state->port.delta_msr_wait);
  1694. }
  1695. /* check if the TX path is flushed, and if so clock off
  1696. * returns 0 did not clock off, need to retry (still sending final byte)
  1697. * -1 did not clock off, do not retry
  1698. * 1 if we clocked off
  1699. */
  1700. static int msm_hs_check_clock_off(struct uart_port *uport)
  1701. {
  1702. unsigned long sr_status;
  1703. unsigned long flags;
  1704. #ifdef CONFIG_MSM_BT_POWER
  1705. unsigned int data;
  1706. #endif
  1707. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1708. struct circ_buf *tx_buf = &uport->state->xmit;
  1709. struct platform_device *pdev = to_platform_device(uport->dev);
  1710. mutex_lock(&msm_uport->clk_mutex);
  1711. spin_lock_irqsave(&uport->lock, flags);
  1712. if (msm_uport->clk_state == MSM_HS_CLK_OFF) {
  1713. spin_unlock_irqrestore(&uport->lock, flags);
  1714. mutex_unlock(&msm_uport->clk_mutex);
  1715. MSM_HS_INFO("%s: Clocks Off Successfully\n", __func__);
  1716. return 1;
  1717. }
  1718. MSM_HS_DBG("In %s:\n", __func__);
  1719. /* Cancel if tx tty buffer is not empty, dma is in flight,
  1720. * or tx fifo is not empty
  1721. */
  1722. if (msm_uport->clk_state != MSM_HS_CLK_REQUEST_OFF ||
  1723. !uart_circ_empty(tx_buf) || msm_uport->tx.dma_in_flight ||
  1724. msm_uport->imr_reg & UARTDM_ISR_TXLEV_BMSK) {
  1725. #ifdef CONFIG_MSM_BT_POWER
  1726. if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) {
  1727. msm_uport->clk_state = MSM_HS_CLK_ON;
  1728. /* Pulling RFR line high */
  1729. msm_hs_write(uport, UART_DM_CR, RFR_LOW);
  1730. /* Enable auto RFR */
  1731. data = msm_hs_read(uport, UART_DM_MR1);
  1732. data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
  1733. msm_hs_write(uport, UART_DM_MR1, data);
  1734. mb();
  1735. }
  1736. #endif
  1737. spin_unlock_irqrestore(&uport->lock, flags);
  1738. mutex_unlock(&msm_uport->clk_mutex);
  1739. MSM_HS_DBG("%s(): clkstate %d", __func__, msm_uport->clk_state);
  1740. return -1;
  1741. }
  1742. /* Make sure the uart is finished with the last byte,
  1743. * use BFamily Register
  1744. */
  1745. sr_status = msm_hs_read(uport, UART_DM_SR);
  1746. if (!(sr_status & UARTDM_SR_TXEMT_BMSK)) {
  1747. spin_unlock_irqrestore(&uport->lock, flags);
  1748. mutex_unlock(&msm_uport->clk_mutex);
  1749. MSM_HS_DBG("%s(): SR TXEMT fail %lx", __func__, sr_status);
  1750. return 0; /* retry */
  1751. }
  1752. if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
  1753. if (msm_uport->rx.flush == FLUSH_NONE)
  1754. msm_hs_stop_rx_locked(uport);
  1755. MSM_HS_DBG("%s: rx.flush %d clk_state %d\n", __func__,
  1756. msm_uport->rx.flush, msm_uport->clk_state);
  1757. spin_unlock_irqrestore(&uport->lock, flags);
  1758. mutex_unlock(&msm_uport->clk_mutex);
  1759. return 0; /* come back later to really clock off */
  1760. }
  1761. spin_unlock_irqrestore(&uport->lock, flags);
  1762. #ifdef CONFIG_MSM_BT_POWER
  1763. /* Pulling RFR line high */
  1764. msm_hs_write(uport, UART_DM_CR, RFR_LOW);
  1765. /* Enable auto RFR */
  1766. data = msm_hs_read(uport, UART_DM_MR1);
  1767. data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
  1768. msm_hs_write(uport, UART_DM_MR1, data);
  1769. mb();
  1770. #endif
  1771. /* we really want to clock off */
  1772. mutex_unlock(&msm_uport->clk_mutex);
  1773. msm_hs_clock_unvote(msm_uport);
  1774. mutex_lock(&msm_uport->clk_mutex);
  1775. spin_lock_irqsave(&uport->lock, flags);
  1776. if (use_low_power_wakeup(msm_uport)) {
  1777. msm_uport->wakeup.ignore = 1;
  1778. enable_irq(msm_uport->wakeup.irq);
  1779. }
  1780. if (pdev->id == 0)
  1781. printk(KERN_INFO "(msm_serial_hs) msm_hs_check_clock_off - dma wake unlock\n");
  1782. wake_unlock(&msm_uport->dma_wake_lock);
  1783. spin_unlock_irqrestore(&uport->lock, flags);
  1784. mutex_unlock(&msm_uport->clk_mutex);
  1785. return 1;
  1786. }
  1787. static void hsuart_clock_off_work(struct work_struct *w)
  1788. {
  1789. struct msm_hs_port *msm_uport = container_of(w, struct msm_hs_port,
  1790. clock_off_w);
  1791. struct uart_port *uport = &msm_uport->uport;
  1792. struct platform_device *pdev = to_platform_device(uport->dev);
  1793. int check_clk_off = msm_hs_check_clock_off(uport);
  1794. if (!check_clk_off) {
  1795. hrtimer_start(&msm_uport->clk_off_timer,
  1796. msm_uport->clk_off_delay,
  1797. HRTIMER_MODE_REL);
  1798. } else if (check_clk_off == -1) {
  1799. if (pdev->id == 0)
  1800. printk(KERN_INFO "(msm_serial_hs) hsuart_clock_off_work WORKQUEUE - FIFO is not empty or in flight...\n");
  1801. } else {
  1802. if (pdev->id == 0)
  1803. printk(KERN_INFO "(msm_serial_hs) hsuart_clock_off_work WORKQUEUE - Maybe, clock is off-ed.\n");
  1804. }
  1805. }
  1806. static enum hrtimer_restart msm_hs_clk_off_retry(struct hrtimer *timer)
  1807. {
  1808. struct msm_hs_port *msm_uport = container_of(timer, struct msm_hs_port,
  1809. clk_off_timer);
  1810. queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
  1811. return HRTIMER_NORESTART;
  1812. }
  1813. static irqreturn_t msm_hs_isr(int irq, void *dev)
  1814. {
  1815. unsigned long flags;
  1816. unsigned long isr_status;
  1817. struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
  1818. struct uart_port *uport = &msm_uport->uport;
  1819. struct circ_buf *tx_buf = &uport->state->xmit;
  1820. struct msm_hs_tx *tx = &msm_uport->tx;
  1821. struct msm_hs_rx *rx = &msm_uport->rx;
  1822. spin_lock_irqsave(&uport->lock, flags);
  1823. isr_status = msm_hs_read(uport, UART_DM_MISR);
  1824. MSM_HS_DBG("%s:UART_DM_MISR %lx", __func__, isr_status);
  1825. dump_uart_hs_registers(msm_uport);
  1826. /* Uart RX starting */
  1827. if (isr_status & UARTDM_ISR_RXLEV_BMSK) {
  1828. wake_lock(&rx->wake_lock); /* hold wakelock while rx dma */
  1829. MSM_HS_DBG("%s:UARTDM_ISR_RXLEV_BMSK\n", __func__);
  1830. msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK;
  1831. msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
  1832. /* Complete device write for IMR. Hence mb() requires. */
  1833. mb();
  1834. }
  1835. /* Stale rx interrupt */
  1836. if (isr_status & UARTDM_ISR_RXSTALE_BMSK) {
  1837. msm_hs_write(uport, UART_DM_CR, STALE_EVENT_DISABLE);
  1838. msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
  1839. /*
  1840. * Complete device write before calling DMOV API. Hence
  1841. * mb() requires here.
  1842. */
  1843. mb();
  1844. MSM_HS_DBG("%s:Stal Interrupt\n", __func__);
  1845. }
  1846. /* tx ready interrupt */
  1847. if (isr_status & UARTDM_ISR_TX_READY_BMSK) {
  1848. MSM_HS_DBG("%s: ISR_TX_READY Interrupt\n", __func__);
  1849. /* Clear TX Ready */
  1850. msm_hs_write(uport, UART_DM_CR, CLEAR_TX_READY);
  1851. if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) {
  1852. msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
  1853. msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
  1854. }
  1855. /*
  1856. * Complete both writes before starting new TX.
  1857. * Hence mb() requires here.
  1858. */
  1859. mb();
  1860. /* Complete DMA TX transactions and submit new transactions */
  1861. /* Do not update tx_buf.tail if uart_flush_buffer already
  1862. * called in serial core
  1863. */
  1864. if (!uart_circ_empty(tx_buf))
  1865. tx_buf->tail = (tx_buf->tail +
  1866. tx->tx_count) & ~UART_XMIT_SIZE;
  1867. tx->dma_in_flight = 0;
  1868. uport->icount.tx += tx->tx_count;
  1869. if (tx->tx_ready_int_en)
  1870. msm_hs_submit_tx_locked(uport);
  1871. if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
  1872. uart_write_wakeup(uport);
  1873. }
  1874. if (isr_status & UARTDM_ISR_TXLEV_BMSK) {
  1875. /* TX FIFO is empty */
  1876. msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK;
  1877. msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
  1878. MSM_HS_DBG("%s: TXLEV Interrupt\n", __func__);
  1879. /*
  1880. * Complete device write before starting clock_off request.
  1881. * Hence mb() requires here.
  1882. */
  1883. mb();
  1884. queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
  1885. }
  1886. /* Change in CTS interrupt */
  1887. if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
  1888. msm_hs_handle_delta_cts_locked(uport);
  1889. spin_unlock_irqrestore(&uport->lock, flags);
  1890. return IRQ_HANDLED;
  1891. }
  1892. /* The following two functions provide interfaces to get the underlying
  1893. * port structure (struct uart_port or struct msm_hs_port) given
  1894. * the port index. msm_hs_get_uart port is called by clients.
  1895. * The function msm_hs_get_hs_port is for internal use
  1896. */
  1897. struct uart_port *msm_hs_get_uart_port(int port_index)
  1898. {
  1899. struct uart_state *state = msm_hs_driver.state + port_index;
  1900. /* The uart_driver structure stores the states in an array.
  1901. * Thus the corresponding offset from the drv->state returns
  1902. * the state for the uart_port that is requested
  1903. */
  1904. if (port_index == state->uart_port->line)
  1905. return state->uart_port;
  1906. return NULL;
  1907. }
  1908. EXPORT_SYMBOL(msm_hs_get_uart_port);
  1909. static struct msm_hs_port *msm_hs_get_hs_port(int port_index)
  1910. {
  1911. struct uart_port *uport = msm_hs_get_uart_port(port_index);
  1912. if (uport)
  1913. return UARTDM_TO_MSM(uport);
  1914. return NULL;
  1915. }
  1916. /* request to turn off uart clock once pending TX is flushed */
  1917. void msm_hs_request_clock_off(struct uart_port *uport) {
  1918. unsigned long flags;
  1919. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1920. int data;
  1921. if (msm_uport->clk_state == MSM_HS_CLK_PORT_OFF) {
  1922. MSM_HS_ERR("%s:UART port is closed\n", __func__);
  1923. return ;
  1924. }
  1925. spin_lock_irqsave(&uport->lock, flags);
  1926. if (msm_uport->clk_state == MSM_HS_CLK_ON) {
  1927. msm_uport->clk_state = MSM_HS_CLK_REQUEST_OFF;
  1928. data = msm_hs_read(uport, UART_DM_MR1);
  1929. /*disable auto ready-for-receiving */
  1930. data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
  1931. msm_hs_write(uport, UART_DM_MR1, data);
  1932. mb();
  1933. /* set RFR_N to high */
  1934. msm_hs_write(uport, UART_DM_CR, RFR_HIGH);
  1935. data = msm_hs_read(uport, UART_DM_SR);
  1936. MSM_HS_DBG("%s(): TXEMT, queuing clock off work\n",
  1937. __func__);
  1938. queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
  1939. mb();
  1940. }
  1941. spin_unlock_irqrestore(&uport->lock, flags);
  1942. }
  1943. EXPORT_SYMBOL(msm_hs_request_clock_off);
  1944. void msm_hs_request_clock_on(struct uart_port *uport)
  1945. {
  1946. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1947. unsigned long flags;
  1948. unsigned int data;
  1949. int ret = 0;
  1950. struct platform_device *pdev = to_platform_device(uport->dev);
  1951. if (msm_uport->clk_state == MSM_HS_CLK_PORT_OFF) {
  1952. MSM_HS_ERR("%s:UART port is closed\n", __func__);
  1953. return ;
  1954. }
  1955. mutex_lock(&msm_uport->clk_mutex);
  1956. spin_lock_irqsave(&uport->lock, flags);
  1957. #ifdef CONFIG_MSM_BT_POWER
  1958. if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) {
  1959. /* Pulling RFR line high */
  1960. msm_hs_write(uport, UART_DM_CR, RFR_LOW);
  1961. /* Enable auto RFR */
  1962. data = msm_hs_read(uport, UART_DM_MR1);
  1963. data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
  1964. msm_hs_write(uport, UART_DM_MR1, data);
  1965. mb();
  1966. }
  1967. #endif
  1968. switch (msm_uport->clk_state) {
  1969. case MSM_HS_CLK_OFF:
  1970. if (pdev->id == 0)
  1971. printk(KERN_INFO "(msm_serial_hs) msm_hs_check_clock_on - dma wake lock\n");
  1972. wake_lock(&msm_uport->dma_wake_lock);
  1973. if (use_low_power_wakeup(msm_uport))
  1974. disable_irq_nosync(msm_uport->wakeup.irq);
  1975. spin_unlock_irqrestore(&uport->lock, flags);
  1976. mutex_unlock(&msm_uport->clk_mutex);
  1977. ret = msm_hs_clock_vote(msm_uport);
  1978. mutex_lock(&msm_uport->clk_mutex);
  1979. if (ret) {
  1980. MSM_HS_INFO("Clock ON Failure"
  1981. "For UART CLK Stalling HSUART\n");
  1982. break;
  1983. }
  1984. spin_lock_irqsave(&uport->lock, flags);
  1985. /* else fall-through */
  1986. case MSM_HS_CLK_REQUEST_OFF:
  1987. hrtimer_cancel(&msm_uport->clk_off_timer);
  1988. if (msm_uport->rx.flush == FLUSH_STOP) {
  1989. spin_unlock_irqrestore(&uport->lock, flags);
  1990. MSM_HS_DBG("%s:Calling wait forxcompletion\n",
  1991. __func__);
  1992. mutex_unlock(&msm_uport->clk_mutex);
  1993. ret = wait_event_timeout(msm_uport->bam_disconnect_wait,
  1994. msm_uport->rx.flush == FLUSH_SHUTDOWN, 300);
  1995. mutex_lock(&msm_uport->clk_mutex);
  1996. if (!ret)
  1997. MSM_HS_ERR("BAM Disconnect not happened\n");
  1998. spin_lock_irqsave(&uport->lock, flags);
  1999. MSM_HS_DBG("%s:DONE wait for completion\n", __func__);
  2000. }
  2001. MSM_HS_DBG("%s:clock state %d\n\n", __func__,
  2002. msm_uport->clk_state);
  2003. if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF)
  2004. msm_uport->clk_state = MSM_HS_CLK_ON;
  2005. if (msm_uport->rx.flush == FLUSH_STOP ||
  2006. msm_uport->rx.flush == FLUSH_SHUTDOWN) {
  2007. msm_hs_write(uport, UART_DM_CR, RESET_RX);
  2008. data = msm_hs_read(uport, UART_DM_DMEN);
  2009. data |= UARTDM_RX_BAM_ENABLE_BMSK;
  2010. msm_hs_write(uport, UART_DM_DMEN, data);
  2011. /* Complete above device write. Hence mb() here. */
  2012. mb();
  2013. }
  2014. MSM_HS_DBG("%s: rx.flush %d\n", __func__, msm_uport->rx.flush);
  2015. if (msm_uport->rx.flush == FLUSH_SHUTDOWN) {
  2016. spin_unlock_irqrestore(&uport->lock, flags);
  2017. msm_hs_spsconnect_rx(uport);
  2018. spin_lock_irqsave(&uport->lock, flags);
  2019. msm_hs_start_rx_locked(uport);
  2020. }
  2021. if (msm_uport->rx.flush == FLUSH_STOP)
  2022. msm_uport->rx.flush = FLUSH_IGNORE;
  2023. break;
  2024. case MSM_HS_CLK_ON:
  2025. break;
  2026. case MSM_HS_CLK_PORT_OFF:
  2027. MSM_HS_ERR("%s:Clock ON failed;UART Port is Closed\n",
  2028. __func__);
  2029. break;
  2030. }
  2031. MSM_HS_INFO("%s:Clock ON Successful\n", __func__);
  2032. dump_uart_hs_registers(msm_uport);
  2033. spin_unlock_irqrestore(&uport->lock, flags);
  2034. mutex_unlock(&msm_uport->clk_mutex);
  2035. }
  2036. EXPORT_SYMBOL(msm_hs_request_clock_on);
  2037. int msm_hs_get_clock_state(struct uart_port *uport)
  2038. {
  2039. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  2040. return (int)msm_uport->clk_state;
  2041. }
  2042. EXPORT_SYMBOL(msm_hs_get_clock_state);
  2043. static irqreturn_t msm_hs_wakeup_isr(int irq, void *dev)
  2044. {
  2045. unsigned int wakeup = 0;
  2046. unsigned long flags;
  2047. struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
  2048. struct uart_port *uport = &msm_uport->uport;
  2049. struct tty_struct *tty = NULL;
  2050. spin_lock_irqsave(&uport->lock, flags);
  2051. if (msm_uport->clk_state == MSM_HS_CLK_OFF) {
  2052. /* ignore the first irq - it is a pending irq that occured
  2053. * before enable_irq()
  2054. */
  2055. if (msm_uport->wakeup.ignore)
  2056. msm_uport->wakeup.ignore = 0;
  2057. else
  2058. wakeup = 1;
  2059. }
  2060. if (wakeup) {
  2061. /* the uart was clocked off during an rx, wake up and
  2062. * optionally inject char into tty rx
  2063. */
  2064. spin_unlock_irqrestore(&uport->lock, flags);
  2065. msm_hs_request_clock_on(uport);
  2066. spin_lock_irqsave(&uport->lock, flags);
  2067. if (msm_uport->wakeup.inject_rx) {
  2068. tty = uport->state->port.tty;
  2069. tty_insert_flip_char(tty,
  2070. msm_uport->wakeup.rx_to_inject,
  2071. TTY_NORMAL);
  2072. MSM_HS_DBG("%s(): Inject 0x%x", __func__,
  2073. msm_uport->wakeup.rx_to_inject);
  2074. }
  2075. }
  2076. spin_unlock_irqrestore(&uport->lock, flags);
  2077. if (wakeup && msm_uport->wakeup.inject_rx)
  2078. tty_flip_buffer_push(tty);
  2079. return IRQ_HANDLED;
  2080. }
  2081. static const char *msm_hs_type(struct uart_port *port)
  2082. {
  2083. return ("MSM HS UART");
  2084. }
  2085. /**
  2086. * msm_hs_unconfig_uart_gpios: Unconfigures UART GPIOs
  2087. * @uport: uart port
  2088. */
  2089. static void msm_hs_unconfig_uart_gpios(struct uart_port *uport)
  2090. {
  2091. struct platform_device *pdev = to_platform_device(uport->dev);
  2092. const struct msm_serial_hs_platform_data *pdata =
  2093. pdev->dev.platform_data;
  2094. if (pdata) {
  2095. if (gpio_is_valid(pdata->uart_tx_gpio))
  2096. gpio_free(pdata->uart_tx_gpio);
  2097. if (gpio_is_valid(pdata->uart_rx_gpio))
  2098. gpio_free(pdata->uart_rx_gpio);
  2099. if (gpio_is_valid(pdata->uart_cts_gpio))
  2100. gpio_free(pdata->uart_cts_gpio);
  2101. if (gpio_is_valid(pdata->uart_rfr_gpio))
  2102. gpio_free(pdata->uart_rfr_gpio);
  2103. } else {
  2104. MSM_HS_ERR("Error:Pdata is NULL.\n");
  2105. }
  2106. }
  2107. /**
  2108. * msm_hs_config_uart_gpios - Configures UART GPIOs
  2109. * @uport: uart port
  2110. */
  2111. static int msm_hs_config_uart_gpios(struct uart_port *uport)
  2112. {
  2113. struct platform_device *pdev = to_platform_device(uport->dev);
  2114. const struct msm_serial_hs_platform_data *pdata =
  2115. pdev->dev.platform_data;
  2116. int ret = 0;
  2117. if (pdata) {
  2118. if (gpio_is_valid(pdata->uart_tx_gpio)) {
  2119. ret = gpio_request(pdata->uart_tx_gpio,
  2120. "UART_TX_GPIO");
  2121. if (unlikely(ret)) {
  2122. MSM_HS_ERR("gpio request failed for:%d\n",
  2123. pdata->uart_tx_gpio);
  2124. goto exit_uart_config;
  2125. }
  2126. }
  2127. if (gpio_is_valid(pdata->uart_rx_gpio)) {
  2128. ret = gpio_request(pdata->uart_rx_gpio,
  2129. "UART_RX_GPIO");
  2130. if (unlikely(ret)) {
  2131. MSM_HS_ERR("gpio request failed for:%d\n",
  2132. pdata->uart_rx_gpio);
  2133. goto uart_tx_unconfig;
  2134. }
  2135. }
  2136. if (gpio_is_valid(pdata->uart_cts_gpio)) {
  2137. ret = gpio_request(pdata->uart_cts_gpio,
  2138. "UART_CTS_GPIO");
  2139. if (unlikely(ret)) {
  2140. MSM_HS_ERR("gpio request failed for:%d\n",
  2141. pdata->uart_cts_gpio);
  2142. goto uart_rx_unconfig;
  2143. }
  2144. }
  2145. if (gpio_is_valid(pdata->uart_rfr_gpio)) {
  2146. ret = gpio_request(pdata->uart_rfr_gpio,
  2147. "UART_RFR_GPIO");
  2148. if (unlikely(ret)) {
  2149. MSM_HS_ERR("gpio request failed for:%d\n",
  2150. pdata->uart_rfr_gpio);
  2151. goto uart_cts_unconfig;
  2152. }
  2153. }
  2154. } else {
  2155. MSM_HS_ERR("Pdata is NULL.\n");
  2156. ret = -EINVAL;
  2157. }
  2158. return ret;
  2159. uart_cts_unconfig:
  2160. if (gpio_is_valid(pdata->uart_cts_gpio))
  2161. gpio_free(pdata->uart_cts_gpio);
  2162. uart_rx_unconfig:
  2163. if (gpio_is_valid(pdata->uart_rx_gpio))
  2164. gpio_free(pdata->uart_rx_gpio);
  2165. uart_tx_unconfig:
  2166. if (gpio_is_valid(pdata->uart_tx_gpio))
  2167. gpio_free(pdata->uart_tx_gpio);
  2168. exit_uart_config:
  2169. return ret;
  2170. }
  2171. /* Called when port is opened */
  2172. static int msm_hs_startup(struct uart_port *uport)
  2173. {
  2174. int ret;
  2175. int rfr_level;
  2176. unsigned long flags;
  2177. unsigned int data;
  2178. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  2179. struct circ_buf *tx_buf = &uport->state->xmit;
  2180. struct msm_hs_tx *tx = &msm_uport->tx;
  2181. struct msm_hs_rx *rx = &msm_uport->rx;
  2182. struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle;
  2183. struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle;
  2184. struct platform_device *pdev = to_platform_device(uport->dev);
  2185. rfr_level = uport->fifosize;
  2186. if (rfr_level > 16)
  2187. rfr_level -= 16;
  2188. tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE,
  2189. DMA_TO_DEVICE);
  2190. if (pdev->id == 0)
  2191. printk(KERN_INFO "(msm_serial_hs) msm_hs_startup - dma wake lock\n");
  2192. wake_lock(&msm_uport->dma_wake_lock);
  2193. /* turn on uart clk */
  2194. ret = msm_hs_init_clk(uport);
  2195. if (unlikely(ret)) {
  2196. MSM_HS_ERR("Turning ON uartclk error\n");
  2197. wake_unlock(&msm_uport->dma_wake_lock);
  2198. return ret;
  2199. }
  2200. ret = msm_hs_config_uart_gpios(uport);
  2201. if (ret) {
  2202. MSM_HS_ERR("Uart GPIO request failed\n");
  2203. goto deinit_uart_clk;
  2204. }
  2205. /* SPS Connect for BAM endpoints */
  2206. /* SPS connect for TX */
  2207. ret = msm_hs_spsconnect_tx(uport);
  2208. if (ret) {
  2209. MSM_HS_ERR("msm_serial_hs: SPS connect failed for TX");
  2210. goto unconfig_uart_gpios;
  2211. }
  2212. /* SPS connect for RX */
  2213. ret = msm_hs_spsconnect_rx(uport);
  2214. if (ret) {
  2215. MSM_HS_ERR("msm_serial_hs: SPS connect failed for RX");
  2216. goto sps_disconnect_tx;
  2217. }
  2218. data = (UARTDM_BCR_TX_BREAK_DISABLE | UARTDM_BCR_STALE_IRQ_EMPTY |
  2219. UARTDM_BCR_RX_DMRX_LOW_EN | UARTDM_BCR_RX_STAL_IRQ_DMRX_EQL |
  2220. UARTDM_BCR_RX_DMRX_1BYTE_RES_EN);
  2221. msm_hs_write(uport, UART_DM_BCR, data);
  2222. /* Set auto RFR Level */
  2223. data = msm_hs_read(uport, UART_DM_MR1);
  2224. data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
  2225. data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK;
  2226. data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2));
  2227. data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level);
  2228. msm_hs_write(uport, UART_DM_MR1, data);
  2229. /* Make sure RXSTALE count is non-zero */
  2230. data = msm_hs_read(uport, UART_DM_IPR);
  2231. if (!data) {
  2232. data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK;
  2233. msm_hs_write(uport, UART_DM_IPR, data);
  2234. }
  2235. /* Enable BAM mode */
  2236. data = UARTDM_TX_BAM_ENABLE_BMSK | UARTDM_RX_BAM_ENABLE_BMSK;
  2237. msm_hs_write(uport, UART_DM_DMEN, data);
  2238. /* Reset TX */
  2239. msm_hs_write(uport, UART_DM_CR, RESET_TX);
  2240. msm_hs_write(uport, UART_DM_CR, RESET_RX);
  2241. msm_hs_write(uport, UART_DM_CR, RESET_ERROR_STATUS);
  2242. msm_hs_write(uport, UART_DM_CR, RESET_BREAK_INT);
  2243. msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
  2244. msm_hs_write(uport, UART_DM_CR, RESET_CTS);
  2245. msm_hs_write(uport, UART_DM_CR, RFR_LOW);
  2246. /* Turn on Uart Receiver */
  2247. msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_EN_BMSK);
  2248. /* Turn on Uart Transmitter */
  2249. msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_EN_BMSK);
  2250. /* Initialize the tx */
  2251. tx->tx_ready_int_en = 0;
  2252. tx->dma_in_flight = 0;
  2253. rx->rx_cmd_exec = false;
  2254. /* Enable reading the current CTS, no harm even if CTS is ignored */
  2255. msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
  2256. /* TXLEV on empty TX fifo */
  2257. msm_hs_write(uport, UART_DM_TFWR, 4);
  2258. /*
  2259. * Complete all device write related configuration before
  2260. * queuing RX request. Hence mb() requires here.
  2261. */
  2262. mb();
  2263. if (use_low_power_wakeup(msm_uport)) {
  2264. ret = irq_set_irq_wake(msm_uport->wakeup.irq, 1);
  2265. if (unlikely(ret)) {
  2266. MSM_HS_ERR("%s():Err setting wakeup irq\n", __func__);
  2267. goto sps_disconnect_rx;
  2268. }
  2269. }
  2270. ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH,
  2271. "msm_hs_uart", msm_uport);
  2272. if (unlikely(ret)) {
  2273. MSM_HS_ERR("%s():Error getting uart irq\n", __func__);
  2274. goto free_wake_irq;
  2275. }
  2276. if (use_low_power_wakeup(msm_uport)) {
  2277. ret = request_threaded_irq(msm_uport->wakeup.irq, NULL,
  2278. msm_hs_wakeup_isr,
  2279. IRQF_TRIGGER_FALLING,
  2280. "msm_hs_wakeup", msm_uport);
  2281. if (unlikely(ret)) {
  2282. MSM_HS_ERR("%s():Err getting uart wakeup_irq\n", __func__);
  2283. goto free_uart_irq;
  2284. }
  2285. disable_irq(msm_uport->wakeup.irq);
  2286. }
  2287. spin_lock_irqsave(&uport->lock, flags);
  2288. msm_hs_start_rx_locked(uport);
  2289. spin_unlock_irqrestore(&uport->lock, flags);
  2290. pm_runtime_enable(uport->dev);
  2291. return 0;
  2292. free_uart_irq:
  2293. free_irq(uport->irq, msm_uport);
  2294. free_wake_irq:
  2295. if (use_low_power_wakeup(msm_uport))
  2296. irq_set_irq_wake(msm_uport->wakeup.irq, 0);
  2297. sps_disconnect_rx:
  2298. sps_disconnect(sps_pipe_handle_rx);
  2299. sps_disconnect_tx:
  2300. sps_disconnect(sps_pipe_handle_tx);
  2301. unconfig_uart_gpios:
  2302. msm_hs_unconfig_uart_gpios(uport);
  2303. deinit_uart_clk:
  2304. msm_hs_clock_unvote(msm_uport);
  2305. if (pdev->id == 0)
  2306. printk(KERN_INFO "(msm_serial_hs) msm_hs_startup deinit clk - dma wake unlock\n");
  2307. wake_unlock(&msm_uport->dma_wake_lock);
  2308. return ret;
  2309. }
  2310. /* Initialize tx and rx data structures */
  2311. static int uartdm_init_port(struct uart_port *uport)
  2312. {
  2313. int ret = 0;
  2314. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  2315. struct msm_hs_tx *tx = &msm_uport->tx;
  2316. struct msm_hs_rx *rx = &msm_uport->rx;
  2317. init_waitqueue_head(&rx->wait);
  2318. init_waitqueue_head(&tx->wait);
  2319. init_waitqueue_head(&msm_uport->bam_disconnect_wait);
  2320. wake_lock_init(&rx->wake_lock, WAKE_LOCK_SUSPEND, "msm_serial_hs_rx");
  2321. wake_lock_init(&msm_uport->dma_wake_lock, WAKE_LOCK_SUSPEND,
  2322. "msm_serial_hs_dma");
  2323. tasklet_init(&rx->tlet, msm_serial_hs_rx_tlet,
  2324. (unsigned long) &rx->tlet);
  2325. tasklet_init(&tx->tlet, msm_serial_hs_tx_tlet,
  2326. (unsigned long) &tx->tlet);
  2327. rx->pool = dma_pool_create("rx_buffer_pool", uport->dev,
  2328. UARTDM_RX_BUF_SIZE, 16, 0);
  2329. if (!rx->pool) {
  2330. MSM_HS_ERR("%s(): cannot allocate rx_buffer_pool", __func__);
  2331. ret = -ENOMEM;
  2332. goto exit_tasklet_init;
  2333. }
  2334. rx->buffer = dma_pool_alloc(rx->pool, GFP_KERNEL, &rx->rbuffer);
  2335. if (!rx->buffer) {
  2336. MSM_HS_ERR("%s(): cannot allocate rx->buffer", __func__);
  2337. ret = -ENOMEM;
  2338. goto free_pool;
  2339. }
  2340. /* Set up Uart Receive */
  2341. msm_hs_write(uport, UART_DM_RFWR, 32);
  2342. INIT_DELAYED_WORK(&rx->flip_insert_work, flip_insert_work);
  2343. return ret;
  2344. free_pool:
  2345. dma_pool_destroy(msm_uport->rx.pool);
  2346. exit_tasklet_init:
  2347. wake_lock_destroy(&msm_uport->rx.wake_lock);
  2348. wake_lock_destroy(&msm_uport->dma_wake_lock);
  2349. tasklet_kill(&msm_uport->tx.tlet);
  2350. tasklet_kill(&msm_uport->rx.tlet);
  2351. return ret;
  2352. }
  2353. struct msm_serial_hs_platform_data
  2354. *msm_hs_dt_to_pdata(struct platform_device *pdev)
  2355. {
  2356. struct device_node *node = pdev->dev.of_node;
  2357. struct msm_serial_hs_platform_data *pdata;
  2358. int rx_to_inject, ret;
  2359. pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
  2360. if (!pdata) {
  2361. MSM_HS_ERR("unable to allocate memory for platform data\n");
  2362. return ERR_PTR(-ENOMEM);
  2363. }
  2364. pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
  2365. /* UART TX GPIO */
  2366. pdata->uart_tx_gpio = of_get_named_gpio(node,
  2367. "qcom,tx-gpio", 0);
  2368. if (pdata->uart_tx_gpio < 0)
  2369. MSM_HS_DBG("uart_tx_gpio is not available\n");
  2370. /* UART RX GPIO */
  2371. pdata->uart_rx_gpio = of_get_named_gpio(node,
  2372. "qcom,rx-gpio", 0);
  2373. if (pdata->uart_rx_gpio < 0)
  2374. MSM_HS_DBG("uart_rx_gpio is not available\n");
  2375. /* UART CTS GPIO */
  2376. pdata->uart_cts_gpio = of_get_named_gpio(node,
  2377. "qcom,cts-gpio", 0);
  2378. if (pdata->uart_cts_gpio < 0)
  2379. MSM_HS_DBG("uart_cts_gpio is not available\n");
  2380. /* UART RFR GPIO */
  2381. pdata->uart_rfr_gpio = of_get_named_gpio(node,
  2382. "qcom,rfr-gpio", 0);
  2383. if (pdata->uart_rfr_gpio < 0)
  2384. MSM_HS_DBG("uart_rfr_gpio is not available\n");
  2385. pdata->no_suspend_delay = of_property_read_bool(node,
  2386. "qcom,no-suspend-delay");
  2387. pdata->inject_rx_on_wakeup = of_property_read_bool(node,
  2388. "qcom,inject-rx-on-wakeup");
  2389. if (pdata->inject_rx_on_wakeup) {
  2390. ret = of_property_read_u32(node, "qcom,rx-char-to-inject",
  2391. &rx_to_inject);
  2392. if (ret < 0) {
  2393. MSM_HS_ERR("Error: Rx_char_to_inject not specified.\n");
  2394. return ERR_PTR(ret);
  2395. }
  2396. pdata->rx_to_inject = (char)rx_to_inject;
  2397. }
  2398. ret = of_property_read_u32(node, "qcom,bam-tx-ep-pipe-index",
  2399. &pdata->bam_tx_ep_pipe_index);
  2400. if (ret < 0) {
  2401. MSM_HS_ERR("Error: Getting UART BAM TX EP Pipe Index.\n");
  2402. return ERR_PTR(ret);
  2403. }
  2404. if (!(pdata->bam_tx_ep_pipe_index >= BAM_PIPE_MIN &&
  2405. pdata->bam_tx_ep_pipe_index <= BAM_PIPE_MAX)) {
  2406. MSM_HS_ERR("Error: Invalid UART BAM TX EP Pipe Index.\n");
  2407. return ERR_PTR(-EINVAL);
  2408. }
  2409. ret = of_property_read_u32(node, "qcom,bam-rx-ep-pipe-index",
  2410. &pdata->bam_rx_ep_pipe_index);
  2411. if (ret < 0) {
  2412. MSM_HS_ERR("Error: Getting UART BAM RX EP Pipe Index.\n");
  2413. return ERR_PTR(ret);
  2414. }
  2415. if (!(pdata->bam_rx_ep_pipe_index >= BAM_PIPE_MIN &&
  2416. pdata->bam_rx_ep_pipe_index <= BAM_PIPE_MAX)) {
  2417. MSM_HS_ERR("Error: Invalid UART BAM RX EP Pipe Index.\n");
  2418. return ERR_PTR(-EINVAL);
  2419. }
  2420. MSM_HS_DBG("tx_ep_pipe_index:%d rx_ep_pipe_index:%d\n"
  2421. "tx_gpio:%d rx_gpio:%d rfr_gpio:%d cts_gpio:%d",
  2422. pdata->bam_tx_ep_pipe_index, pdata->bam_rx_ep_pipe_index,
  2423. pdata->uart_tx_gpio, pdata->uart_rx_gpio, pdata->uart_cts_gpio,
  2424. pdata->uart_rfr_gpio);
  2425. return pdata;
  2426. }
  2427. /**
  2428. * Deallocate UART peripheral's SPS endpoint
  2429. * @msm_uport - Pointer to msm_hs_port structure
  2430. * @ep - Pointer to sps endpoint data structure
  2431. */
  2432. static void msm_hs_exit_ep_conn(struct msm_hs_port *msm_uport,
  2433. struct msm_hs_sps_ep_conn_data *ep)
  2434. {
  2435. struct sps_pipe *sps_pipe_handle = ep->pipe_handle;
  2436. struct sps_connect *sps_config = &ep->config;
  2437. dma_free_coherent(msm_uport->uport.dev,
  2438. sps_config->desc.size,
  2439. &sps_config->desc.phys_base,
  2440. GFP_KERNEL);
  2441. sps_free_endpoint(sps_pipe_handle);
  2442. }
  2443. /**
  2444. * Allocate UART peripheral's SPS endpoint
  2445. *
  2446. * This function allocates endpoint context
  2447. * by calling appropriate SPS driver APIs.
  2448. *
  2449. * @msm_uport - Pointer to msm_hs_port structure
  2450. * @ep - Pointer to sps endpoint data structure
  2451. * @is_produce - 1 means Producer endpoint
  2452. * - 0 means Consumer endpoint
  2453. *
  2454. * @return - 0 if successful else negative value
  2455. */
  2456. static int msm_hs_sps_init_ep_conn(struct msm_hs_port *msm_uport,
  2457. struct msm_hs_sps_ep_conn_data *ep,
  2458. bool is_producer)
  2459. {
  2460. int rc = 0;
  2461. struct sps_pipe *sps_pipe_handle;
  2462. struct sps_connect *sps_config = &ep->config;
  2463. struct sps_register_event *sps_event = &ep->event;
  2464. /* Allocate endpoint context */
  2465. sps_pipe_handle = sps_alloc_endpoint();
  2466. if (!sps_pipe_handle) {
  2467. MSM_HS_ERR("%s(): sps_alloc_endpoint() failed!!\n"
  2468. "is_producer=%d", __func__, is_producer);
  2469. rc = -ENOMEM;
  2470. goto out;
  2471. }
  2472. /* Get default connection configuration for an endpoint */
  2473. rc = sps_get_config(sps_pipe_handle, sps_config);
  2474. if (rc) {
  2475. MSM_HS_ERR("%s(): failed! pipe_handle=0x%p rc=%d",
  2476. __func__, sps_pipe_handle, rc);
  2477. goto get_config_err;
  2478. }
  2479. /* Modify the default connection configuration */
  2480. if (is_producer) {
  2481. /* For UART producer transfer, source is UART peripheral
  2482. where as destination is system memory */
  2483. sps_config->source = msm_uport->bam_handle;
  2484. sps_config->destination = SPS_DEV_HANDLE_MEM;
  2485. sps_config->mode = SPS_MODE_SRC;
  2486. sps_config->src_pipe_index = msm_uport->bam_rx_ep_pipe_index;
  2487. sps_config->dest_pipe_index = 0;
  2488. } else {
  2489. /* For UART consumer transfer, source is system memory
  2490. where as destination is UART peripheral */
  2491. sps_config->source = SPS_DEV_HANDLE_MEM;
  2492. sps_config->destination = msm_uport->bam_handle;
  2493. sps_config->mode = SPS_MODE_DEST;
  2494. sps_config->src_pipe_index = 0;
  2495. sps_config->dest_pipe_index = msm_uport->bam_tx_ep_pipe_index;
  2496. }
  2497. sps_config->options = SPS_O_EOT | SPS_O_DESC_DONE | SPS_O_AUTO_ENABLE;
  2498. sps_config->event_thresh = 0x10;
  2499. /* Allocate maximum descriptor fifo size */
  2500. sps_config->desc.size = 65532;
  2501. sps_config->desc.base = dma_alloc_coherent(msm_uport->uport.dev,
  2502. sps_config->desc.size,
  2503. &sps_config->desc.phys_base,
  2504. GFP_KERNEL);
  2505. if (!sps_config->desc.base) {
  2506. rc = -ENOMEM;
  2507. MSM_HS_ERR("msm_serial_hs: dma_alloc_coherent() failed!!\n");
  2508. goto get_config_err;
  2509. }
  2510. memset(sps_config->desc.base, 0x00, sps_config->desc.size);
  2511. sps_event->mode = SPS_TRIGGER_CALLBACK;
  2512. if (is_producer) {
  2513. sps_event->callback = msm_hs_sps_rx_callback;
  2514. } else {
  2515. sps_event->callback = msm_hs_sps_tx_callback;
  2516. }
  2517. sps_event->options = SPS_O_DESC_DONE | SPS_O_EOT;
  2518. sps_event->user = (void *)msm_uport;
  2519. /* Now save the sps pipe handle */
  2520. ep->pipe_handle = sps_pipe_handle;
  2521. MSM_HS_DBG("msm_serial_hs: success !! %s: pipe_handle=0x%p\n"
  2522. "desc_fifo.phys_base=0x%pa\n",
  2523. is_producer ? "READ" : "WRITE",
  2524. sps_pipe_handle, &sps_config->desc.phys_base);
  2525. return 0;
  2526. get_config_err:
  2527. sps_free_endpoint(sps_pipe_handle);
  2528. out:
  2529. return rc;
  2530. }
  2531. /**
  2532. * Initialize SPS HW connected with UART core
  2533. *
  2534. * This function register BAM HW resources with
  2535. * SPS driver and then initialize 2 SPS endpoints
  2536. *
  2537. * msm_uport - Pointer to msm_hs_port structure
  2538. *
  2539. * @return - 0 if successful else negative value
  2540. */
  2541. static int msm_hs_sps_init(struct msm_hs_port *msm_uport)
  2542. {
  2543. int rc = 0;
  2544. struct sps_bam_props bam = {0};
  2545. u32 bam_handle;
  2546. rc = sps_phy2h(msm_uport->bam_mem, &bam_handle);
  2547. if (rc || !bam_handle) {
  2548. bam.phys_addr = msm_uport->bam_mem;
  2549. bam.virt_addr = msm_uport->bam_base;
  2550. /*
  2551. * This event thresold value is only significant for BAM-to-BAM
  2552. * transfer. It's ignored for BAM-to-System mode transfer.
  2553. */
  2554. bam.event_threshold = 0x10; /* Pipe event threshold */
  2555. bam.summing_threshold = 1; /* BAM event threshold */
  2556. /* SPS driver wll handle the UART BAM IRQ */
  2557. bam.irq = (u32)msm_uport->bam_irq;
  2558. bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
  2559. MSM_HS_DBG("msm_serial_hs: bam physical base=0x%pa\n",
  2560. &bam.phys_addr);
  2561. MSM_HS_DBG("msm_serial_hs: bam virtual base=0x%p\n",
  2562. bam.virt_addr);
  2563. /* Register UART Peripheral BAM device to SPS driver */
  2564. rc = sps_register_bam_device(&bam, &bam_handle);
  2565. if (rc) {
  2566. MSM_HS_ERR("msm_serial_hs: BAM device register failed\n");
  2567. return rc;
  2568. }
  2569. MSM_HS_INFO("msm_serial_hs: BAM device registered. bam_handle=0x%x",
  2570. msm_uport->bam_handle);
  2571. }
  2572. msm_uport->bam_handle = bam_handle;
  2573. rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->rx.prod,
  2574. UART_SPS_PROD_PERIPHERAL);
  2575. if (rc) {
  2576. MSM_HS_ERR("%s: Failed to Init Producer BAM-pipe", __func__);
  2577. goto deregister_bam;
  2578. }
  2579. rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->tx.cons,
  2580. UART_SPS_CONS_PERIPHERAL);
  2581. if (rc) {
  2582. MSM_HS_ERR("%s: Failed to Init Consumer BAM-pipe", __func__);
  2583. goto deinit_ep_conn_prod;
  2584. }
  2585. return 0;
  2586. deinit_ep_conn_prod:
  2587. msm_hs_exit_ep_conn(msm_uport, &msm_uport->rx.prod);
  2588. deregister_bam:
  2589. sps_deregister_bam_device(msm_uport->bam_handle);
  2590. return rc;
  2591. }
  2592. static bool deviceid[UARTDM_NR] = {0};
  2593. /*
  2594. * The mutex synchronizes grabbing next free device number
  2595. * both in case of an alias being used or not. When alias is
  2596. * used, the msm_hs_dt_to_pdata gets it and the boolean array
  2597. * is accordingly updated with device_id_set_used. If no alias
  2598. * is used, then device_id_grab_next_free sets that array.
  2599. */
  2600. static DEFINE_MUTEX(mutex_next_device_id);
  2601. static int device_id_grab_next_free(void)
  2602. {
  2603. int i;
  2604. int ret = -ENODEV;
  2605. mutex_lock(&mutex_next_device_id);
  2606. for (i = 0; i < UARTDM_NR; i++)
  2607. if (!deviceid[i]) {
  2608. ret = i;
  2609. deviceid[i] = true;
  2610. break;
  2611. }
  2612. mutex_unlock(&mutex_next_device_id);
  2613. return ret;
  2614. }
  2615. static int device_id_set_used(int index)
  2616. {
  2617. int ret = 0;
  2618. mutex_lock(&mutex_next_device_id);
  2619. if (deviceid[index])
  2620. ret = -ENODEV;
  2621. else
  2622. deviceid[index] = true;
  2623. mutex_unlock(&mutex_next_device_id);
  2624. return ret;
  2625. }
  2626. static int __devinit msm_hs_probe(struct platform_device *pdev)
  2627. {
  2628. int ret = 0;
  2629. struct uart_port *uport;
  2630. struct msm_hs_port *msm_uport;
  2631. struct resource *core_resource;
  2632. struct resource *bam_resource;
  2633. int core_irqres, bam_irqres, wakeup_irqres;
  2634. struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
  2635. unsigned long data;
  2636. if (pdev->dev.of_node) {
  2637. dev_dbg(&pdev->dev, "device tree enabled\n");
  2638. pdata = msm_hs_dt_to_pdata(pdev);
  2639. if (IS_ERR(pdata))
  2640. return PTR_ERR(pdata);
  2641. if (pdev->id < 0) {
  2642. pdev->id = device_id_grab_next_free();
  2643. if (pdev->id < 0) {
  2644. dev_err(&pdev->dev,
  2645. "Error grabbing next free device id");
  2646. return pdev->id;
  2647. }
  2648. } else {
  2649. ret = device_id_set_used(pdev->id);
  2650. if (ret < 0) {
  2651. dev_err(&pdev->dev, "%d alias taken",
  2652. pdev->id);
  2653. return ret;
  2654. }
  2655. }
  2656. pdev->dev.platform_data = pdata;
  2657. }
  2658. if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
  2659. MSM_HS_ERR("Invalid plaform device ID = %d\n", pdev->id);
  2660. return -EINVAL;
  2661. }
  2662. msm_uport = devm_kzalloc(&pdev->dev, sizeof(struct msm_hs_port),
  2663. GFP_KERNEL);
  2664. if (!msm_uport) {
  2665. MSM_HS_ERR("Memory allocation failed\n");
  2666. return -ENOMEM;
  2667. }
  2668. msm_uport->uport.type = PORT_UNKNOWN;
  2669. uport = &msm_uport->uport;
  2670. uport->dev = &pdev->dev;
  2671. if (pdev->dev.of_node)
  2672. msm_uport->uart_type = BLSP_HSUART;
  2673. /* Get required resources for BAM HSUART */
  2674. core_resource = platform_get_resource_byname(pdev,
  2675. IORESOURCE_MEM, "core_mem");
  2676. if (!core_resource) {
  2677. MSM_HS_ERR("Invalid core HSUART Resources.\n");
  2678. return -ENXIO;
  2679. }
  2680. bam_resource = platform_get_resource_byname(pdev,
  2681. IORESOURCE_MEM, "bam_mem");
  2682. if (!bam_resource) {
  2683. MSM_HS_ERR("Invalid BAM HSUART Resources.\n");
  2684. return -ENXIO;
  2685. }
  2686. core_irqres = platform_get_irq_byname(pdev, "core_irq");
  2687. if (core_irqres < 0) {
  2688. MSM_HS_ERR("Invalid core irqres Resources.\n");
  2689. return -ENXIO;
  2690. }
  2691. bam_irqres = platform_get_irq_byname(pdev, "bam_irq");
  2692. if (bam_irqres < 0) {
  2693. MSM_HS_ERR("Invalid bam irqres Resources.\n");
  2694. return -ENXIO;
  2695. }
  2696. wakeup_irqres = platform_get_irq_byname(pdev, "wakeup_irq");
  2697. if (wakeup_irqres < 0) {
  2698. wakeup_irqres = -1;
  2699. MSM_HS_DBG("Wakeup irq not specified.\n");
  2700. }
  2701. uport->mapbase = core_resource->start;
  2702. uport->membase = ioremap(uport->mapbase,
  2703. resource_size(core_resource));
  2704. if (unlikely(!uport->membase)) {
  2705. MSM_HS_ERR("UART Resource ioremap Failed.\n");
  2706. return -ENOMEM;
  2707. }
  2708. msm_uport->bam_mem = bam_resource->start;
  2709. msm_uport->bam_base = ioremap(msm_uport->bam_mem,
  2710. resource_size(bam_resource));
  2711. if (unlikely(!msm_uport->bam_base)) {
  2712. MSM_HS_ERR("UART BAM Resource ioremap Failed.\n");
  2713. iounmap(uport->membase);
  2714. return -ENOMEM;
  2715. }
  2716. uport->irq = core_irqres;
  2717. msm_uport->bam_irq = bam_irqres;
  2718. pdata->wakeup_irq = wakeup_irqres;
  2719. msm_uport->bus_scale_table = msm_bus_cl_get_pdata(pdev);
  2720. if (!msm_uport->bus_scale_table) {
  2721. MSM_HS_ERR("BLSP UART: Bus scaling is disabled.\n");
  2722. } else {
  2723. msm_uport->bus_perf_client =
  2724. msm_bus_scale_register_client
  2725. (msm_uport->bus_scale_table);
  2726. if (IS_ERR(&msm_uport->bus_perf_client)) {
  2727. MSM_HS_ERR("%s():Bus client register failed\n",
  2728. __func__);
  2729. ret = -EINVAL;
  2730. goto unmap_memory;
  2731. }
  2732. }
  2733. if (pdata == NULL)
  2734. msm_uport->wakeup.irq = -1;
  2735. else {
  2736. msm_uport->wakeup.irq = pdata->wakeup_irq;
  2737. msm_uport->wakeup.ignore = 1;
  2738. msm_uport->wakeup.inject_rx = pdata->inject_rx_on_wakeup;
  2739. msm_uport->wakeup.rx_to_inject = pdata->rx_to_inject;
  2740. msm_uport->bam_tx_ep_pipe_index =
  2741. pdata->bam_tx_ep_pipe_index;
  2742. msm_uport->bam_rx_ep_pipe_index =
  2743. pdata->bam_rx_ep_pipe_index;
  2744. }
  2745. uport->iotype = UPIO_MEM;
  2746. uport->fifosize = 64;
  2747. uport->ops = &msm_hs_ops;
  2748. uport->flags = UPF_BOOT_AUTOCONF;
  2749. uport->uartclk = 7372800;
  2750. msm_uport->imr_reg = 0x0;
  2751. msm_uport->clk = clk_get(&pdev->dev, "core_clk");
  2752. if (IS_ERR(msm_uport->clk)) {
  2753. ret = PTR_ERR(msm_uport->clk);
  2754. goto deregister_bus_client;
  2755. }
  2756. msm_uport->pclk = clk_get(&pdev->dev, "iface_clk");
  2757. /*
  2758. * Some configurations do not require explicit pclk control so
  2759. * do not flag error on pclk get failure.
  2760. */
  2761. if (IS_ERR(msm_uport->pclk))
  2762. msm_uport->pclk = NULL;
  2763. ret = clk_set_rate(msm_uport->clk, uport->uartclk);
  2764. if (ret) {
  2765. MSM_HS_WARN("Error setting clock rate on UART\n");
  2766. goto put_clk;
  2767. }
  2768. msm_uport->hsuart_wq = alloc_workqueue("k_hsuart",
  2769. WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
  2770. if (!msm_uport->hsuart_wq) {
  2771. MSM_HS_ERR("%s(): Unable to create workqueue hsuart_wq\n",
  2772. __func__);
  2773. ret = -ENOMEM;
  2774. goto put_clk;
  2775. }
  2776. INIT_WORK(&msm_uport->clock_off_w, hsuart_clock_off_work);
  2777. /* Init work for sps_disconnect in stop_rx_locked */
  2778. INIT_WORK(&msm_uport->disconnect_rx_endpoint,
  2779. hsuart_disconnect_rx_endpoint_work);
  2780. mutex_init(&msm_uport->clk_mutex);
  2781. atomic_set(&msm_uport->clk_count, 0);
  2782. /* Initialize SPS HW connected with UART core */
  2783. ret = msm_hs_sps_init(msm_uport);
  2784. if (unlikely(ret)) {
  2785. MSM_HS_ERR("SPS Initialization failed ! err=%d", ret);
  2786. goto destroy_mutex;
  2787. }
  2788. msm_hs_clock_vote(msm_uport);
  2789. ret = uartdm_init_port(uport);
  2790. if (unlikely(ret)) {
  2791. goto err_clock;
  2792. }
  2793. /* configure the CR Protection to Enable */
  2794. msm_hs_write(uport, UART_DM_CR, CR_PROTECTION_EN);
  2795. /*
  2796. * Enable Command register protection before going ahead as this hw
  2797. * configuration makes sure that issued cmd to CR register gets complete
  2798. * before next issued cmd start. Hence mb() requires here.
  2799. */
  2800. mb();
  2801. /*
  2802. * Set RX_BREAK_ZERO_CHAR_OFF and RX_ERROR_CHAR_OFF
  2803. * so any rx_break and character having parity of framing
  2804. * error don't enter inside UART RX FIFO.
  2805. */
  2806. data = msm_hs_read(uport, UART_DM_MR2);
  2807. data |= (UARTDM_MR2_RX_BREAK_ZERO_CHAR_OFF |
  2808. UARTDM_MR2_RX_ERROR_CHAR_OFF);
  2809. msm_hs_write(uport, UART_DM_MR2, data);
  2810. mb();
  2811. hrtimer_init(&msm_uport->clk_off_timer, CLOCK_MONOTONIC,
  2812. HRTIMER_MODE_REL);
  2813. msm_uport->clk_off_timer.function = msm_hs_clk_off_retry;
  2814. msm_uport->clk_off_delay = ktime_set(0, 1000000); /* 1ms */
  2815. ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_clock.attr);
  2816. if (unlikely(ret)) {
  2817. MSM_HS_ERR("Probe Failed as sysfs failed\n");
  2818. goto err_clock;
  2819. }
  2820. msm_serial_debugfs_init(msm_uport, pdev->id);
  2821. uport->line = pdev->id;
  2822. if (pdata != NULL && pdata->userid && pdata->userid <= UARTDM_NR)
  2823. uport->line = pdata->userid;
  2824. ret = uart_add_one_port(&msm_hs_driver, uport);
  2825. if (!ret) {
  2826. msm_hs_clock_unvote(msm_uport);
  2827. msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
  2828. return ret;
  2829. }
  2830. err_clock:
  2831. msm_hs_clock_unvote(msm_uport);
  2832. destroy_mutex:
  2833. mutex_destroy(&msm_uport->clk_mutex);
  2834. destroy_workqueue(msm_uport->hsuart_wq);
  2835. put_clk:
  2836. if (msm_uport->pclk)
  2837. clk_put(msm_uport->pclk);
  2838. if (msm_uport->clk)
  2839. clk_put(msm_uport->clk);
  2840. deregister_bus_client:
  2841. msm_bus_scale_unregister_client(msm_uport->bus_perf_client);
  2842. unmap_memory:
  2843. iounmap(uport->membase);
  2844. iounmap(msm_uport->bam_base);
  2845. return ret;
  2846. }
  2847. static int __init msm_serial_hs_init(void)
  2848. {
  2849. int ret;
  2850. ipc_msm_hs_log_ctxt = ipc_log_context_create(IPC_MSM_HS_LOG_PAGES,
  2851. "msm_serial_hs", 0);
  2852. if (!ipc_msm_hs_log_ctxt)
  2853. MSM_HS_WARN("%s: error creating logging context", __func__);
  2854. ret = uart_register_driver(&msm_hs_driver);
  2855. if (unlikely(ret)) {
  2856. MSM_HS_WARN("%s failed to load\n", __func__);
  2857. return ret;
  2858. }
  2859. debug_base = debugfs_create_dir("msm_serial_hs", NULL);
  2860. if (IS_ERR_OR_NULL(debug_base))
  2861. MSM_HS_INFO("msm_serial_hs: Cannot create debugfs dir\n");
  2862. ret = platform_driver_register(&msm_serial_hs_platform_driver);
  2863. if (ret) {
  2864. MSM_HS_ERR("%s failed to load\n", __FUNCTION__);
  2865. debugfs_remove_recursive(debug_base);
  2866. uart_unregister_driver(&msm_hs_driver);
  2867. return ret;
  2868. }
  2869. MSM_HS_INFO("msm_serial_hs module loaded\n");
  2870. return ret;
  2871. }
  2872. /*
  2873. * Called by the upper layer when port is closed.
  2874. * - Disables the port
  2875. * - Unhook the ISR
  2876. */
  2877. static void msm_hs_shutdown(struct uart_port *uport)
  2878. {
  2879. int ret;
  2880. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  2881. struct circ_buf *tx_buf = &uport->state->xmit;
  2882. struct msm_hs_tx *tx = &msm_uport->tx;
  2883. struct sps_pipe *sps_pipe_handle = tx->cons.pipe_handle;
  2884. struct platform_device *pdev = to_platform_device(uport->dev);
  2885. if (pdev->id == 0)
  2886. printk(KERN_INFO "(msm_serial_hs) msm_hs_shutdown\n");
  2887. /*
  2888. * cancel the hrtimer first so that
  2889. * clk_state can not change in flight
  2890. */
  2891. hrtimer_cancel(&msm_uport->clk_off_timer);
  2892. flush_work(&msm_uport->clock_off_w);
  2893. if (use_low_power_wakeup(msm_uport))
  2894. irq_set_irq_wake(msm_uport->wakeup.irq, 0);
  2895. /* wake irq or uart irq is active depending on clk_state */
  2896. if (msm_uport->clk_state == MSM_HS_CLK_OFF) {
  2897. if (use_low_power_wakeup(msm_uport))
  2898. disable_irq(msm_uport->wakeup.irq);
  2899. } else {
  2900. disable_irq(uport->irq);
  2901. wake_unlock(&msm_uport->dma_wake_lock);
  2902. }
  2903. /* make sure tx tasklet finishes */
  2904. tasklet_kill(&msm_uport->tx.tlet);
  2905. ret = wait_event_timeout(msm_uport->tx.wait,
  2906. uart_circ_empty(tx_buf), 500);
  2907. if (!ret)
  2908. MSM_HS_WARN("Shutdown called when tx buff not empty");
  2909. /* make sure rx tasklet finishes */
  2910. tasklet_kill(&msm_uport->rx.tlet);
  2911. wait_event(msm_uport->rx.wait, msm_uport->rx.flush == FLUSH_SHUTDOWN);
  2912. cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work);
  2913. flush_workqueue(msm_uport->hsuart_wq);
  2914. msm_hs_clock_vote(msm_uport);
  2915. mutex_lock(&msm_uport->clk_mutex);
  2916. /* BAM Disconnect for TX */
  2917. ret = sps_disconnect(sps_pipe_handle);
  2918. if (ret)
  2919. MSM_HS_ERR("%s(): sps_disconnect failed\n",
  2920. __func__);
  2921. WARN_ON(msm_uport->rx.flush < FLUSH_STOP);
  2922. /* Disable the transmitter */
  2923. msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_DISABLE_BMSK);
  2924. /* Disable the receiver */
  2925. msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_DISABLE_BMSK);
  2926. msm_uport->imr_reg = 0;
  2927. msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
  2928. /*
  2929. * Complete all device write before actually disabling uartclk.
  2930. * Hence mb() requires here.
  2931. */
  2932. mb();
  2933. mutex_unlock(&msm_uport->clk_mutex);
  2934. msm_uport->rx.buffer_pending = NONE_PENDING;
  2935. MSM_HS_DBG("%s(): tx, rx events complete", __func__);
  2936. msm_hs_clock_unvote(msm_uport);
  2937. if (msm_uport->clk_state != MSM_HS_CLK_OFF) {
  2938. /* to balance clk_state */
  2939. msm_hs_clock_unvote(msm_uport);
  2940. }
  2941. pm_runtime_disable(uport->dev);
  2942. msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
  2943. dma_unmap_single(uport->dev, msm_uport->tx.dma_base,
  2944. UART_XMIT_SIZE, DMA_TO_DEVICE);
  2945. /* Free the interrupt */
  2946. free_irq(uport->irq, msm_uport);
  2947. if (use_low_power_wakeup(msm_uport))
  2948. free_irq(msm_uport->wakeup.irq, msm_uport);
  2949. msm_hs_unconfig_uart_gpios(uport);
  2950. }
  2951. static void __exit msm_serial_hs_exit(void)
  2952. {
  2953. MSM_HS_INFO("msm_serial_hs module removed\n");
  2954. debugfs_remove_recursive(debug_base);
  2955. platform_driver_unregister(&msm_serial_hs_platform_driver);
  2956. uart_unregister_driver(&msm_hs_driver);
  2957. }
  2958. static int msm_hs_runtime_idle(struct device *dev)
  2959. {
  2960. /*
  2961. * returning success from idle results in runtime suspend to be
  2962. * called
  2963. */
  2964. return 0;
  2965. }
  2966. static int msm_hs_runtime_resume(struct device *dev)
  2967. {
  2968. struct platform_device *pdev = container_of(dev, struct
  2969. platform_device, dev);
  2970. struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
  2971. /* This check should not fail
  2972. * During probe, we set uport->line to either pdev->id or userid */
  2973. if (msm_uport)
  2974. msm_hs_request_clock_on(&msm_uport->uport);
  2975. return 0;
  2976. }
  2977. static int msm_hs_runtime_suspend(struct device *dev)
  2978. {
  2979. struct platform_device *pdev = container_of(dev, struct
  2980. platform_device, dev);
  2981. struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
  2982. /* This check should not fail
  2983. * During probe, we set uport->line to either pdev->id or userid */
  2984. if (msm_uport)
  2985. msm_hs_request_clock_off(&msm_uport->uport);
  2986. return 0;
  2987. }
  2988. static const struct dev_pm_ops msm_hs_dev_pm_ops = {
  2989. .runtime_suspend = msm_hs_runtime_suspend,
  2990. .runtime_resume = msm_hs_runtime_resume,
  2991. .runtime_idle = msm_hs_runtime_idle,
  2992. };
  2993. static struct platform_driver msm_serial_hs_platform_driver = {
  2994. .probe = msm_hs_probe,
  2995. .remove = __devexit_p(msm_hs_remove),
  2996. .driver = {
  2997. .name = "msm_serial_hs",
  2998. .pm = &msm_hs_dev_pm_ops,
  2999. .of_match_table = msm_hs_match_table,
  3000. },
  3001. };
  3002. static struct uart_driver msm_hs_driver = {
  3003. .owner = THIS_MODULE,
  3004. .driver_name = "msm_serial_hs",
  3005. .dev_name = "ttyHS",
  3006. .nr = UARTDM_NR,
  3007. .cons = 0,
  3008. };
  3009. static struct uart_ops msm_hs_ops = {
  3010. .tx_empty = msm_hs_tx_empty,
  3011. .set_mctrl = msm_hs_set_mctrl_locked,
  3012. .get_mctrl = msm_hs_get_mctrl_locked,
  3013. .stop_tx = msm_hs_stop_tx_locked,
  3014. .start_tx = msm_hs_start_tx_locked,
  3015. .stop_rx = msm_hs_stop_rx_locked,
  3016. .enable_ms = msm_hs_enable_ms_locked,
  3017. .break_ctl = msm_hs_break_ctl,
  3018. .startup = msm_hs_startup,
  3019. .shutdown = msm_hs_shutdown,
  3020. .set_termios = msm_hs_set_termios,
  3021. .type = msm_hs_type,
  3022. .config_port = msm_hs_config_port,
  3023. .flush_buffer = NULL,
  3024. .ioctl = msm_hs_ioctl,
  3025. };
  3026. module_init(msm_serial_hs_init);
  3027. module_exit(msm_serial_hs_exit);
  3028. MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset");
  3029. MODULE_VERSION("1.2");
  3030. MODULE_LICENSE("GPL v2");