n_smux.c 97 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935
  1. /* drivers/tty/n_smux.c
  2. *
  3. * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. */
  15. #include <linux/module.h>
  16. #include <linux/types.h>
  17. #include <linux/errno.h>
  18. #include <linux/tty.h>
  19. #include <linux/tty_flip.h>
  20. #include <linux/tty_driver.h>
  21. #include <linux/smux.h>
  22. #include <linux/list.h>
  23. #include <linux/kfifo.h>
  24. #include <linux/slab.h>
  25. #include <linux/types.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/delay.h>
  28. #include <mach/subsystem_notif.h>
  29. #include <mach/subsystem_restart.h>
  30. #include <mach/msm_serial_hs.h>
  31. #include <mach/msm_ipc_logging.h>
  32. #include "smux_private.h"
  33. #include "smux_loopback.h"
  34. #define SMUX_NOTIFY_FIFO_SIZE 128
  35. #define SMUX_TX_QUEUE_SIZE 256
  36. #define SMUX_PKT_LOG_SIZE 128
  37. /* Maximum size we can accept in a single RX buffer */
  38. #define TTY_RECEIVE_ROOM 65536
  39. #define TTY_BUFFER_FULL_WAIT_MS 50
  40. /* maximum sleep time between wakeup attempts */
  41. #define SMUX_WAKEUP_DELAY_MAX (1 << 20)
  42. /* minimum delay for scheduling delayed work */
  43. #define SMUX_WAKEUP_DELAY_MIN (1 << 15)
  44. /* inactivity timeout for no rx/tx activity */
  45. #define SMUX_INACTIVITY_TIMEOUT_MS 1000000
  46. /* RX get_rx_buffer retry timeout values */
  47. #define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
  48. #define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
  49. enum {
  50. MSM_SMUX_DEBUG = 1U << 0,
  51. MSM_SMUX_INFO = 1U << 1,
  52. MSM_SMUX_POWER_INFO = 1U << 2,
  53. MSM_SMUX_PKT = 1U << 3,
  54. };
  55. static int smux_debug_mask = MSM_SMUX_DEBUG | MSM_SMUX_POWER_INFO;
  56. module_param_named(debug_mask, smux_debug_mask,
  57. int, S_IRUGO | S_IWUSR | S_IWGRP);
  58. static int disable_ipc_logging;
  59. /* Simulated wakeup used for testing */
  60. int smux_byte_loopback;
  61. module_param_named(byte_loopback, smux_byte_loopback,
  62. int, S_IRUGO | S_IWUSR | S_IWGRP);
  63. int smux_simulate_wakeup_delay = 1;
  64. module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
  65. int, S_IRUGO | S_IWUSR | S_IWGRP);
  66. #define IPC_LOG_STR(x...) do { \
  67. if (!disable_ipc_logging && log_ctx) \
  68. ipc_log_string(log_ctx, x); \
  69. } while (0)
  70. #define SMUX_DBG(x...) do { \
  71. if (smux_debug_mask & MSM_SMUX_DEBUG) \
  72. IPC_LOG_STR(x); \
  73. } while (0)
  74. #define SMUX_ERR(x...) do { \
  75. pr_err(x); \
  76. IPC_LOG_STR(x); \
  77. } while (0)
  78. #define SMUX_PWR(x...) do { \
  79. if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
  80. IPC_LOG_STR(x); \
  81. } while (0)
  82. #define SMUX_PWR_PKT_RX(pkt) do { \
  83. if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
  84. smux_log_pkt(pkt, 1); \
  85. } while (0)
  86. #define SMUX_PWR_PKT_TX(pkt) do { \
  87. if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
  88. if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
  89. pkt->hdr.flags == SMUX_WAKEUP_ACK) \
  90. IPC_LOG_STR("smux: TX Wakeup ACK\n"); \
  91. else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
  92. pkt->hdr.flags == SMUX_WAKEUP_REQ) \
  93. IPC_LOG_STR("smux: TX Wakeup REQ\n"); \
  94. else \
  95. smux_log_pkt(pkt, 0); \
  96. } \
  97. } while (0)
  98. #define SMUX_PWR_BYTE_TX(pkt) do { \
  99. if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
  100. smux_log_pkt(pkt, 0); \
  101. } \
  102. } while (0)
  103. #define SMUX_LOG_PKT_RX(pkt) do { \
  104. if (smux_debug_mask & MSM_SMUX_PKT) \
  105. smux_log_pkt(pkt, 1); \
  106. } while (0)
  107. #define SMUX_LOG_PKT_TX(pkt) do { \
  108. if (smux_debug_mask & MSM_SMUX_PKT) \
  109. smux_log_pkt(pkt, 0); \
  110. } while (0)
  111. /**
  112. * Return true if channel is fully opened (both
  113. * local and remote sides are in the OPENED state).
  114. */
  115. #define IS_FULLY_OPENED(ch) \
  116. (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
  117. && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
  118. static struct platform_device smux_devs[] = {
  119. {.name = "SMUX_CTL", .id = -1},
  120. {.name = "SMUX_RMNET", .id = -1},
  121. {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
  122. {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
  123. {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
  124. {.name = "SMUX_DIAG", .id = -1},
  125. };
  126. enum {
  127. SMUX_CMD_STATUS_RTC = 1 << 0,
  128. SMUX_CMD_STATUS_RTR = 1 << 1,
  129. SMUX_CMD_STATUS_RI = 1 << 2,
  130. SMUX_CMD_STATUS_DCD = 1 << 3,
  131. SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
  132. };
  133. /* Channel mode */
  134. enum {
  135. SMUX_LCH_MODE_NORMAL,
  136. SMUX_LCH_MODE_LOCAL_LOOPBACK,
  137. SMUX_LCH_MODE_REMOTE_LOOPBACK,
  138. };
  139. enum {
  140. SMUX_RX_IDLE,
  141. SMUX_RX_MAGIC,
  142. SMUX_RX_HDR,
  143. SMUX_RX_PAYLOAD,
  144. SMUX_RX_FAILURE,
  145. };
  146. /**
  147. * Power states.
  148. *
  149. * The _FLUSH states are internal transitional states and are not part of the
  150. * official state machine.
  151. */
  152. enum {
  153. SMUX_PWR_OFF,
  154. SMUX_PWR_TURNING_ON,
  155. SMUX_PWR_ON,
  156. SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
  157. SMUX_PWR_TURNING_OFF,
  158. SMUX_PWR_OFF_FLUSH,
  159. };
  160. union notifier_metadata {
  161. struct smux_meta_disconnected disconnected;
  162. struct smux_meta_read read;
  163. struct smux_meta_write write;
  164. struct smux_meta_tiocm tiocm;
  165. };
  166. struct smux_notify_handle {
  167. void (*notify)(void *priv, int event_type, const void *metadata);
  168. void *priv;
  169. int event_type;
  170. union notifier_metadata *metadata;
  171. };
  172. /**
  173. * Get RX Buffer Retry structure.
  174. *
  175. * This is used for clients that are unable to provide an RX buffer
  176. * immediately. This temporary structure will be used to temporarily hold the
  177. * data and perform a retry.
  178. */
  179. struct smux_rx_pkt_retry {
  180. struct smux_pkt_t *pkt;
  181. struct list_head rx_retry_list;
  182. unsigned timeout_in_ms;
  183. };
  184. /**
  185. * Receive worker data structure.
  186. *
  187. * One instance is created for every call to smux_rx_state_machine.
  188. */
  189. struct smux_rx_worker_data {
  190. const unsigned char *data;
  191. int len;
  192. int flag;
  193. struct work_struct work;
  194. struct completion work_complete;
  195. };
  196. /**
  197. * Line discipline and module structure.
  198. *
  199. * Only one instance since multiple instances of line discipline are not
  200. * allowed.
  201. */
  202. struct smux_ldisc_t {
  203. struct mutex mutex_lha0;
  204. int is_initialized;
  205. int platform_devs_registered;
  206. int in_reset;
  207. int remote_is_alive;
  208. int ld_open_count;
  209. struct tty_struct *tty;
  210. /* RX State Machine (singled-threaded access by smux_rx_wq) */
  211. unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
  212. unsigned int recv_len;
  213. unsigned int pkt_remain;
  214. unsigned rx_state;
  215. /* RX Activity - accessed by multiple threads */
  216. spinlock_t rx_lock_lha1;
  217. unsigned rx_activity_flag;
  218. /* TX / Power */
  219. spinlock_t tx_lock_lha2;
  220. struct list_head lch_tx_ready_list;
  221. unsigned power_state;
  222. unsigned pwr_wakeup_delay_us;
  223. unsigned tx_activity_flag;
  224. unsigned powerdown_enabled;
  225. unsigned power_ctl_remote_req_received;
  226. struct list_head power_queue;
  227. unsigned remote_initiated_wakeup_count;
  228. unsigned local_initiated_wakeup_count;
  229. };
  230. /* data structures */
  231. struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
  232. static struct smux_ldisc_t smux;
  233. static const char *tty_error_type[] = {
  234. [TTY_NORMAL] = "normal",
  235. [TTY_OVERRUN] = "overrun",
  236. [TTY_BREAK] = "break",
  237. [TTY_PARITY] = "parity",
  238. [TTY_FRAME] = "framing",
  239. };
  240. static const char * const smux_cmds[] = {
  241. [SMUX_CMD_DATA] = "DATA",
  242. [SMUX_CMD_OPEN_LCH] = "OPEN",
  243. [SMUX_CMD_CLOSE_LCH] = "CLOSE",
  244. [SMUX_CMD_STATUS] = "STATUS",
  245. [SMUX_CMD_PWR_CTL] = "PWR",
  246. [SMUX_CMD_DELAY] = "DELAY",
  247. [SMUX_CMD_BYTE] = "Raw Byte",
  248. };
  249. static const char * const smux_events[] = {
  250. [SMUX_CONNECTED] = "CONNECTED" ,
  251. [SMUX_DISCONNECTED] = "DISCONNECTED",
  252. [SMUX_READ_DONE] = "READ_DONE",
  253. [SMUX_READ_FAIL] = "READ_FAIL",
  254. [SMUX_WRITE_DONE] = "WRITE_DONE",
  255. [SMUX_WRITE_FAIL] = "WRITE_FAIL",
  256. [SMUX_TIOCM_UPDATE] = "TIOCM_UPDATE",
  257. [SMUX_LOW_WM_HIT] = "LOW_WM_HIT",
  258. [SMUX_HIGH_WM_HIT] = "HIGH_WM_HIT",
  259. [SMUX_RX_RETRY_HIGH_WM_HIT] = "RX_RETRY_HIGH_WM_HIT",
  260. [SMUX_RX_RETRY_LOW_WM_HIT] = "RX_RETRY_LOW_WM_HIT",
  261. [SMUX_LOCAL_CLOSED] = "LOCAL_CLOSED",
  262. [SMUX_REMOTE_CLOSED] = "REMOTE_CLOSED",
  263. };
  264. static const char * const smux_local_state[] = {
  265. [SMUX_LCH_LOCAL_CLOSED] = "CLOSED",
  266. [SMUX_LCH_LOCAL_OPENING] = "OPENING",
  267. [SMUX_LCH_LOCAL_OPENED] = "OPENED",
  268. [SMUX_LCH_LOCAL_CLOSING] = "CLOSING",
  269. };
  270. static const char * const smux_remote_state[] = {
  271. [SMUX_LCH_REMOTE_CLOSED] = "CLOSED",
  272. [SMUX_LCH_REMOTE_OPENED] = "OPENED",
  273. };
  274. static const char * const smux_mode[] = {
  275. [SMUX_LCH_MODE_NORMAL] = "N",
  276. [SMUX_LCH_MODE_LOCAL_LOOPBACK] = "L",
  277. [SMUX_LCH_MODE_REMOTE_LOOPBACK] = "R",
  278. };
  279. static const char * const smux_undef[] = {
  280. [SMUX_UNDEF_LONG] = "UNDEF",
  281. [SMUX_UNDEF_SHORT] = "U",
  282. };
  283. static void *log_ctx;
  284. static void smux_notify_local_fn(struct work_struct *work);
  285. static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
  286. static struct workqueue_struct *smux_notify_wq;
  287. static size_t handle_size;
  288. static struct kfifo smux_notify_fifo;
  289. static int queued_fifo_notifications;
  290. static DEFINE_SPINLOCK(notify_lock_lhc1);
  291. static struct workqueue_struct *smux_tx_wq;
  292. static struct workqueue_struct *smux_rx_wq;
  293. static void smux_tx_worker(struct work_struct *work);
  294. static DECLARE_WORK(smux_tx_work, smux_tx_worker);
  295. static void smux_wakeup_worker(struct work_struct *work);
  296. static void smux_rx_retry_worker(struct work_struct *work);
  297. static void smux_rx_worker(struct work_struct *work);
  298. static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
  299. static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
  300. static void smux_inactivity_worker(struct work_struct *work);
  301. static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
  302. static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
  303. smux_inactivity_worker);
  304. static void list_channel(struct smux_lch_t *ch);
  305. static int smux_send_status_cmd(struct smux_lch_t *ch);
  306. static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
  307. static void smux_flush_tty(void);
  308. static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr);
  309. static int schedule_notify(uint8_t lcid, int event,
  310. const union notifier_metadata *metadata);
  311. static int ssr_notifier_cb(struct notifier_block *this,
  312. unsigned long code,
  313. void *data);
  314. static void smux_uart_power_on_atomic(void);
  315. static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
  316. static void smux_flush_workqueues(void);
  317. static void smux_pdev_release(struct device *dev);
  318. /**
  319. * local_lch_state() - Return human readable form of local logical state.
  320. * @state: Local logical channel state enum.
  321. *
  322. */
  323. const char *local_lch_state(unsigned state)
  324. {
  325. if (state < ARRAY_SIZE(smux_local_state))
  326. return smux_local_state[state];
  327. else
  328. return smux_undef[SMUX_UNDEF_LONG];
  329. }
  330. /**
  331. * remote_lch_state() - Return human readable for of remote logical state.
  332. * @state: Remote logical channel state enum.
  333. *
  334. */
  335. const char *remote_lch_state(unsigned state)
  336. {
  337. if (state < ARRAY_SIZE(smux_remote_state))
  338. return smux_remote_state[state];
  339. else
  340. return smux_undef[SMUX_UNDEF_LONG];
  341. }
  342. /**
  343. * lch_mode() - Return human readable form of mode.
  344. * @mode: Mode of the logical channel.
  345. *
  346. */
  347. const char *lch_mode(unsigned mode)
  348. {
  349. if (mode < ARRAY_SIZE(smux_mode))
  350. return smux_mode[mode];
  351. else
  352. return smux_undef[SMUX_UNDEF_SHORT];
  353. }
  354. /**
  355. * Convert TTY Error Flags to string for logging purposes.
  356. *
  357. * @flag TTY_* flag
  358. * @returns String description or NULL if unknown
  359. */
  360. static const char *tty_flag_to_str(unsigned flag)
  361. {
  362. if (flag < ARRAY_SIZE(tty_error_type))
  363. return tty_error_type[flag];
  364. return NULL;
  365. }
  366. /**
  367. * Convert SMUX Command to string for logging purposes.
  368. *
  369. * @cmd SMUX command
  370. * @returns String description or NULL if unknown
  371. */
  372. static const char *cmd_to_str(unsigned cmd)
  373. {
  374. if (cmd < ARRAY_SIZE(smux_cmds))
  375. return smux_cmds[cmd];
  376. return NULL;
  377. }
  378. /**
  379. * Convert SMUX event to string for logging purposes.
  380. *
  381. * @event SMUX event
  382. * @returns String description or NULL if unknown
  383. */
  384. static const char *event_to_str(unsigned cmd)
  385. {
  386. if (cmd < ARRAY_SIZE(smux_events))
  387. return smux_events[cmd];
  388. return NULL;
  389. }
  390. /**
  391. * Set the reset state due to an unrecoverable failure.
  392. */
  393. static void smux_enter_reset(void)
  394. {
  395. SMUX_ERR("%s: unrecoverable failure, waiting for ssr\n", __func__);
  396. smux.in_reset = 1;
  397. smux.remote_is_alive = 0;
  398. }
  399. /**
  400. * Initialize the lch_structs.
  401. */
  402. static int lch_init(void)
  403. {
  404. unsigned int id;
  405. struct smux_lch_t *ch;
  406. int i = 0;
  407. handle_size = sizeof(struct smux_notify_handle *);
  408. smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
  409. smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
  410. smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
  411. if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
  412. SMUX_DBG("smux: %s: create_singlethread_workqueue ENOMEM\n",
  413. __func__);
  414. return -ENOMEM;
  415. }
  416. i |= kfifo_alloc(&smux_notify_fifo,
  417. SMUX_NOTIFY_FIFO_SIZE * handle_size,
  418. GFP_KERNEL);
  419. i |= smux_loopback_init();
  420. if (i) {
  421. SMUX_ERR("%s: out of memory error\n", __func__);
  422. return -ENOMEM;
  423. }
  424. for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
  425. ch = &smux_lch[id];
  426. spin_lock_init(&ch->state_lock_lhb1);
  427. ch->lcid = id;
  428. ch->local_state = SMUX_LCH_LOCAL_CLOSED;
  429. ch->local_mode = SMUX_LCH_MODE_NORMAL;
  430. ch->local_tiocm = 0x0;
  431. ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
  432. ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
  433. ch->remote_mode = SMUX_LCH_MODE_NORMAL;
  434. ch->remote_tiocm = 0x0;
  435. ch->tx_flow_control = 0;
  436. ch->rx_flow_control_auto = 0;
  437. ch->rx_flow_control_client = 0;
  438. ch->priv = 0;
  439. ch->notify = 0;
  440. ch->get_rx_buffer = 0;
  441. INIT_LIST_HEAD(&ch->rx_retry_queue);
  442. ch->rx_retry_queue_cnt = 0;
  443. INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
  444. spin_lock_init(&ch->tx_lock_lhb2);
  445. INIT_LIST_HEAD(&ch->tx_queue);
  446. INIT_LIST_HEAD(&ch->tx_ready_list);
  447. ch->tx_pending_data_cnt = 0;
  448. ch->notify_lwm = 0;
  449. }
  450. return 0;
  451. }
  452. /**
  453. * Empty and cleanup all SMUX logical channels for subsystem restart or line
  454. * discipline disconnect.
  455. */
  456. static void smux_lch_purge(void)
  457. {
  458. struct smux_lch_t *ch;
  459. unsigned long flags;
  460. int i;
  461. /* Empty TX ready list */
  462. spin_lock_irqsave(&smux.tx_lock_lha2, flags);
  463. while (!list_empty(&smux.lch_tx_ready_list)) {
  464. SMUX_DBG("smux: %s: emptying ready list %p\n",
  465. __func__, smux.lch_tx_ready_list.next);
  466. ch = list_first_entry(&smux.lch_tx_ready_list,
  467. struct smux_lch_t,
  468. tx_ready_list);
  469. list_del(&ch->tx_ready_list);
  470. INIT_LIST_HEAD(&ch->tx_ready_list);
  471. }
  472. /* Purge Power Queue */
  473. while (!list_empty(&smux.power_queue)) {
  474. struct smux_pkt_t *pkt;
  475. pkt = list_first_entry(&smux.power_queue,
  476. struct smux_pkt_t,
  477. list);
  478. list_del(&pkt->list);
  479. SMUX_DBG("smux: %s: emptying power queue pkt=%p\n",
  480. __func__, pkt);
  481. smux_free_pkt(pkt);
  482. }
  483. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  484. /* Close all ports */
  485. for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
  486. union notifier_metadata meta;
  487. int send_disconnect = 0;
  488. ch = &smux_lch[i];
  489. SMUX_DBG("smux: %s: cleaning up lcid %d\n", __func__, i);
  490. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  491. /* Purge TX queue */
  492. spin_lock(&ch->tx_lock_lhb2);
  493. smux_purge_ch_tx_queue(ch, 1);
  494. spin_unlock(&ch->tx_lock_lhb2);
  495. meta.disconnected.is_ssr = smux.in_reset;
  496. /* Notify user of disconnect and reset channel state */
  497. if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
  498. ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
  499. schedule_notify(ch->lcid, SMUX_LOCAL_CLOSED, &meta);
  500. send_disconnect = 1;
  501. }
  502. if (ch->remote_state != SMUX_LCH_REMOTE_CLOSED) {
  503. schedule_notify(ch->lcid, SMUX_REMOTE_CLOSED, &meta);
  504. send_disconnect = 1;
  505. }
  506. if (send_disconnect)
  507. schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
  508. ch->local_state = SMUX_LCH_LOCAL_CLOSED;
  509. ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
  510. ch->remote_mode = SMUX_LCH_MODE_NORMAL;
  511. ch->tx_flow_control = 0;
  512. ch->rx_flow_control_auto = 0;
  513. ch->rx_flow_control_client = 0;
  514. /* Purge RX retry queue */
  515. if (ch->rx_retry_queue_cnt)
  516. queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
  517. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  518. }
  519. }
  520. int smux_assert_lch_id(uint32_t lcid)
  521. {
  522. if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
  523. return -ENXIO;
  524. else
  525. return 0;
  526. }
  527. /**
  528. * Log packet information for debug purposes.
  529. *
  530. * @pkt Packet to log
  531. * @is_recv 1 = RX packet; 0 = TX Packet
  532. *
  533. * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
  534. *
  535. * PKT Info:
  536. * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
  537. *
  538. * Direction: R = Receive, S = Send
  539. * Local State: C = Closed; c = closing; o = opening; O = Opened
  540. * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
  541. * Remote State: C = Closed; O = Opened
  542. * Remote Mode: R = Remote loopback; N = Normal
  543. */
  544. static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
  545. {
  546. char logbuf[SMUX_PKT_LOG_SIZE];
  547. char cmd_extra[16];
  548. int i = 0;
  549. int count;
  550. int len;
  551. char local_state;
  552. char local_mode;
  553. char remote_state;
  554. char remote_mode;
  555. struct smux_lch_t *ch = NULL;
  556. unsigned char *data;
  557. if (!smux_assert_lch_id(pkt->hdr.lcid))
  558. ch = &smux_lch[pkt->hdr.lcid];
  559. if (ch) {
  560. switch (ch->local_state) {
  561. case SMUX_LCH_LOCAL_CLOSED:
  562. local_state = 'C';
  563. break;
  564. case SMUX_LCH_LOCAL_OPENING:
  565. local_state = 'o';
  566. break;
  567. case SMUX_LCH_LOCAL_OPENED:
  568. local_state = 'O';
  569. break;
  570. case SMUX_LCH_LOCAL_CLOSING:
  571. local_state = 'c';
  572. break;
  573. default:
  574. local_state = 'U';
  575. break;
  576. }
  577. switch (ch->local_mode) {
  578. case SMUX_LCH_MODE_LOCAL_LOOPBACK:
  579. local_mode = 'L';
  580. break;
  581. case SMUX_LCH_MODE_REMOTE_LOOPBACK:
  582. local_mode = 'R';
  583. break;
  584. case SMUX_LCH_MODE_NORMAL:
  585. local_mode = 'N';
  586. break;
  587. default:
  588. local_mode = 'U';
  589. break;
  590. }
  591. switch (ch->remote_state) {
  592. case SMUX_LCH_REMOTE_CLOSED:
  593. remote_state = 'C';
  594. break;
  595. case SMUX_LCH_REMOTE_OPENED:
  596. remote_state = 'O';
  597. break;
  598. default:
  599. remote_state = 'U';
  600. break;
  601. }
  602. switch (ch->remote_mode) {
  603. case SMUX_LCH_MODE_REMOTE_LOOPBACK:
  604. remote_mode = 'R';
  605. break;
  606. case SMUX_LCH_MODE_NORMAL:
  607. remote_mode = 'N';
  608. break;
  609. default:
  610. remote_mode = 'U';
  611. break;
  612. }
  613. } else {
  614. /* broadcast channel */
  615. local_state = '-';
  616. local_mode = '-';
  617. remote_state = '-';
  618. remote_mode = '-';
  619. }
  620. /* determine command type (ACK, etc) */
  621. cmd_extra[0] = '\0';
  622. switch (pkt->hdr.cmd) {
  623. case SMUX_CMD_OPEN_LCH:
  624. if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
  625. snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
  626. break;
  627. case SMUX_CMD_CLOSE_LCH:
  628. if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
  629. snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
  630. break;
  631. case SMUX_CMD_PWR_CTL:
  632. if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
  633. snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
  634. break;
  635. };
  636. i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
  637. "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
  638. is_recv ? 'R' : 'S', pkt->hdr.lcid,
  639. local_state, local_mode,
  640. remote_state, remote_mode,
  641. cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
  642. pkt->hdr.payload_len, pkt->hdr.pad_len);
  643. len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
  644. data = (unsigned char *)pkt->payload;
  645. for (count = 0; count < len; count++)
  646. i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
  647. "%02x ", (unsigned)data[count]);
  648. IPC_LOG_STR(logbuf);
  649. }
  650. static void smux_notify_local_fn(struct work_struct *work)
  651. {
  652. struct smux_notify_handle *notify_handle = NULL;
  653. union notifier_metadata *metadata = NULL;
  654. unsigned long flags;
  655. int i;
  656. for (;;) {
  657. /* retrieve notification */
  658. spin_lock_irqsave(&notify_lock_lhc1, flags);
  659. if (kfifo_len(&smux_notify_fifo) >= handle_size) {
  660. i = kfifo_out(&smux_notify_fifo,
  661. &notify_handle,
  662. handle_size);
  663. if (i != handle_size) {
  664. SMUX_ERR(
  665. "%s: unable to retrieve handle %d expected %d\n",
  666. __func__, i, handle_size);
  667. spin_unlock_irqrestore(&notify_lock_lhc1, flags);
  668. break;
  669. }
  670. } else {
  671. spin_unlock_irqrestore(&notify_lock_lhc1, flags);
  672. break;
  673. }
  674. --queued_fifo_notifications;
  675. spin_unlock_irqrestore(&notify_lock_lhc1, flags);
  676. /* notify client */
  677. metadata = notify_handle->metadata;
  678. notify_handle->notify(notify_handle->priv,
  679. notify_handle->event_type,
  680. metadata);
  681. kfree(metadata);
  682. kfree(notify_handle);
  683. }
  684. }
  685. /**
  686. * Initialize existing packet.
  687. */
  688. void smux_init_pkt(struct smux_pkt_t *pkt)
  689. {
  690. memset(pkt, 0x0, sizeof(*pkt));
  691. pkt->hdr.magic = SMUX_MAGIC;
  692. INIT_LIST_HEAD(&pkt->list);
  693. }
  694. /**
  695. * Allocate and initialize packet.
  696. *
  697. * If a payload is needed, either set it directly and ensure that it's freed or
  698. * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
  699. * automatically when smd_free_pkt() is called.
  700. */
  701. struct smux_pkt_t *smux_alloc_pkt(void)
  702. {
  703. struct smux_pkt_t *pkt;
  704. /* Consider a free list implementation instead of kmalloc */
  705. pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
  706. if (!pkt) {
  707. SMUX_ERR("%s: out of memory\n", __func__);
  708. return NULL;
  709. }
  710. smux_init_pkt(pkt);
  711. pkt->allocated = 1;
  712. return pkt;
  713. }
  714. /**
  715. * Free packet.
  716. *
  717. * @pkt Packet to free (may be NULL)
  718. *
  719. * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
  720. * well. Otherwise, the caller is responsible for freeing the payload.
  721. */
  722. void smux_free_pkt(struct smux_pkt_t *pkt)
  723. {
  724. if (pkt) {
  725. if (pkt->free_payload)
  726. kfree(pkt->payload);
  727. if (pkt->allocated)
  728. kfree(pkt);
  729. }
  730. }
  731. /**
  732. * Allocate packet payload.
  733. *
  734. * @pkt Packet to add payload to
  735. *
  736. * @returns 0 on success, <0 upon error
  737. *
  738. * A flag is set to signal smux_free_pkt() to free the payload.
  739. */
  740. int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
  741. {
  742. if (!pkt)
  743. return -EINVAL;
  744. pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
  745. pkt->free_payload = 1;
  746. if (!pkt->payload) {
  747. SMUX_ERR("%s: unable to malloc %d bytes for payload\n",
  748. __func__, pkt->hdr.payload_len);
  749. return -ENOMEM;
  750. }
  751. return 0;
  752. }
  753. static int schedule_notify(uint8_t lcid, int event,
  754. const union notifier_metadata *metadata)
  755. {
  756. struct smux_notify_handle *notify_handle = 0;
  757. union notifier_metadata *meta_copy = 0;
  758. struct smux_lch_t *ch;
  759. int i;
  760. unsigned long flags;
  761. int ret = 0;
  762. IPC_LOG_STR("smux: %s ch:%d\n", event_to_str(event), lcid);
  763. ch = &smux_lch[lcid];
  764. if (!ch->notify) {
  765. SMUX_DBG("%s: [%d]lcid notify fn is NULL\n", __func__, lcid);
  766. return ret;
  767. }
  768. notify_handle = kzalloc(sizeof(struct smux_notify_handle),
  769. GFP_ATOMIC);
  770. if (!notify_handle) {
  771. SMUX_ERR("%s: out of memory\n", __func__);
  772. ret = -ENOMEM;
  773. goto free_out;
  774. }
  775. notify_handle->notify = ch->notify;
  776. notify_handle->priv = ch->priv;
  777. notify_handle->event_type = event;
  778. if (metadata) {
  779. meta_copy = kzalloc(sizeof(union notifier_metadata),
  780. GFP_ATOMIC);
  781. if (!meta_copy) {
  782. SMUX_ERR("%s: out of memory\n", __func__);
  783. ret = -ENOMEM;
  784. goto free_out;
  785. }
  786. *meta_copy = *metadata;
  787. notify_handle->metadata = meta_copy;
  788. } else {
  789. notify_handle->metadata = NULL;
  790. }
  791. spin_lock_irqsave(&notify_lock_lhc1, flags);
  792. i = kfifo_avail(&smux_notify_fifo);
  793. if (i < handle_size) {
  794. SMUX_ERR("%s: fifo full error %d expected %d\n",
  795. __func__, i, handle_size);
  796. ret = -ENOMEM;
  797. goto unlock_out;
  798. }
  799. i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
  800. if (i < 0 || i != handle_size) {
  801. SMUX_ERR("%s: fifo not available error %d (expected %d)\n",
  802. __func__, i, handle_size);
  803. ret = -ENOSPC;
  804. goto unlock_out;
  805. }
  806. ++queued_fifo_notifications;
  807. unlock_out:
  808. spin_unlock_irqrestore(&notify_lock_lhc1, flags);
  809. free_out:
  810. queue_work(smux_notify_wq, &smux_notify_local);
  811. if (ret < 0 && notify_handle) {
  812. kfree(notify_handle->metadata);
  813. kfree(notify_handle);
  814. }
  815. return ret;
  816. }
  817. /**
  818. * Returns the serialized size of a packet.
  819. *
  820. * @pkt Packet to serialize
  821. *
  822. * @returns Serialized length of packet
  823. */
  824. static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
  825. {
  826. unsigned int size;
  827. size = sizeof(struct smux_hdr_t);
  828. size += pkt->hdr.payload_len;
  829. size += pkt->hdr.pad_len;
  830. return size;
  831. }
  832. /**
  833. * Serialize packet @pkt into output buffer @data.
  834. *
  835. * @pkt Packet to serialize
  836. * @out Destination buffer pointer
  837. * @out_len Size of serialized packet
  838. *
  839. * @returns 0 for success
  840. */
  841. int smux_serialize(struct smux_pkt_t *pkt, char *out,
  842. unsigned int *out_len)
  843. {
  844. char *data_start = out;
  845. if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
  846. SMUX_ERR("%s: packet size %d too big\n",
  847. __func__, smux_serialize_size(pkt));
  848. return -E2BIG;
  849. }
  850. memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
  851. out += sizeof(struct smux_hdr_t);
  852. if (pkt->payload) {
  853. memcpy(out, pkt->payload, pkt->hdr.payload_len);
  854. out += pkt->hdr.payload_len;
  855. }
  856. if (pkt->hdr.pad_len) {
  857. memset(out, 0x0, pkt->hdr.pad_len);
  858. out += pkt->hdr.pad_len;
  859. }
  860. *out_len = out - data_start;
  861. return 0;
  862. }
  863. /**
  864. * Serialize header and provide pointer to the data.
  865. *
  866. * @pkt Packet
  867. * @out[out] Pointer to the serialized header data
  868. * @out_len[out] Pointer to the serialized header length
  869. */
  870. static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
  871. unsigned int *out_len)
  872. {
  873. *out = (char *)&pkt->hdr;
  874. *out_len = sizeof(struct smux_hdr_t);
  875. }
  876. /**
  877. * Serialize payload and provide pointer to the data.
  878. *
  879. * @pkt Packet
  880. * @out[out] Pointer to the serialized payload data
  881. * @out_len[out] Pointer to the serialized payload length
  882. */
  883. static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
  884. unsigned int *out_len)
  885. {
  886. *out = pkt->payload;
  887. *out_len = pkt->hdr.payload_len;
  888. }
  889. /**
  890. * Serialize padding and provide pointer to the data.
  891. *
  892. * @pkt Packet
  893. * @out[out] Pointer to the serialized padding (always NULL)
  894. * @out_len[out] Pointer to the serialized payload length
  895. *
  896. * Since the padding field value is undefined, only the size of the patting
  897. * (@out_len) is set and the buffer pointer (@out) will always be NULL.
  898. */
  899. static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
  900. unsigned int *out_len)
  901. {
  902. *out = NULL;
  903. *out_len = pkt->hdr.pad_len;
  904. }
  905. /**
  906. * Write data to TTY framework and handle breaking the writes up if needed.
  907. *
  908. * @data Data to write
  909. * @len Length of data
  910. *
  911. * @returns 0 for success, < 0 for failure
  912. */
  913. static int write_to_tty(char *data, unsigned len)
  914. {
  915. int data_written;
  916. if (!data)
  917. return 0;
  918. while (len > 0 && !smux.in_reset) {
  919. data_written = smux.tty->ops->write(smux.tty, data, len);
  920. if (data_written >= 0) {
  921. len -= data_written;
  922. data += data_written;
  923. } else {
  924. SMUX_ERR("%s: TTY write returned error %d\n",
  925. __func__, data_written);
  926. return data_written;
  927. }
  928. if (len)
  929. tty_wait_until_sent(smux.tty,
  930. msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
  931. }
  932. return 0;
  933. }
  934. /**
  935. * Write packet to TTY.
  936. *
  937. * @pkt packet to write
  938. *
  939. * @returns 0 on success
  940. */
  941. static int smux_tx_tty(struct smux_pkt_t *pkt)
  942. {
  943. char *data;
  944. unsigned int len;
  945. int ret;
  946. if (!smux.tty) {
  947. SMUX_ERR("%s: TTY not initialized", __func__);
  948. return -ENOTTY;
  949. }
  950. if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
  951. SMUX_DBG("smux: %s: tty send single byte\n", __func__);
  952. ret = write_to_tty(&pkt->hdr.flags, 1);
  953. return ret;
  954. }
  955. smux_serialize_hdr(pkt, &data, &len);
  956. ret = write_to_tty(data, len);
  957. if (ret) {
  958. SMUX_ERR("%s: failed %d to write header %d\n",
  959. __func__, ret, len);
  960. return ret;
  961. }
  962. smux_serialize_payload(pkt, &data, &len);
  963. ret = write_to_tty(data, len);
  964. if (ret) {
  965. SMUX_ERR("%s: failed %d to write payload %d\n",
  966. __func__, ret, len);
  967. return ret;
  968. }
  969. smux_serialize_padding(pkt, &data, &len);
  970. while (len > 0) {
  971. char zero = 0x0;
  972. ret = write_to_tty(&zero, 1);
  973. if (ret) {
  974. SMUX_ERR("%s: failed %d to write padding %d\n",
  975. __func__, ret, len);
  976. return ret;
  977. }
  978. --len;
  979. }
  980. return 0;
  981. }
  982. /**
  983. * Send a single character.
  984. *
  985. * @ch Character to send
  986. */
  987. static void smux_send_byte(char ch)
  988. {
  989. struct smux_pkt_t *pkt;
  990. pkt = smux_alloc_pkt();
  991. if (!pkt) {
  992. SMUX_ERR("%s: alloc failure for byte %x\n", __func__, ch);
  993. return;
  994. }
  995. pkt->hdr.cmd = SMUX_CMD_BYTE;
  996. pkt->hdr.flags = ch;
  997. pkt->hdr.lcid = SMUX_BROADCAST_LCID;
  998. list_add_tail(&pkt->list, &smux.power_queue);
  999. queue_work(smux_tx_wq, &smux_tx_work);
  1000. }
  1001. /**
  1002. * Receive a single-character packet (used for internal testing).
  1003. *
  1004. * @ch Character to receive
  1005. * @lcid Logical channel ID for packet
  1006. *
  1007. * @returns 0 for success
  1008. */
  1009. static int smux_receive_byte(char ch, int lcid)
  1010. {
  1011. struct smux_pkt_t pkt;
  1012. smux_init_pkt(&pkt);
  1013. pkt.hdr.lcid = lcid;
  1014. pkt.hdr.cmd = SMUX_CMD_BYTE;
  1015. pkt.hdr.flags = ch;
  1016. return smux_dispatch_rx_pkt(&pkt);
  1017. }
  1018. /**
  1019. * Queue packet for transmit.
  1020. *
  1021. * @pkt_ptr Packet to queue
  1022. * @ch Channel to queue packet on
  1023. * @queue Queue channel on ready list
  1024. */
  1025. static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
  1026. int queue)
  1027. {
  1028. unsigned long flags;
  1029. SMUX_DBG("smux: %s: queuing pkt %p\n", __func__, pkt_ptr);
  1030. spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
  1031. list_add_tail(&pkt_ptr->list, &ch->tx_queue);
  1032. spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
  1033. if (queue)
  1034. list_channel(ch);
  1035. }
  1036. /**
  1037. * Handle receive OPEN ACK command.
  1038. *
  1039. * @pkt Received packet
  1040. *
  1041. * @returns 0 for success
  1042. */
  1043. static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
  1044. {
  1045. uint8_t lcid;
  1046. int ret;
  1047. struct smux_lch_t *ch;
  1048. int enable_powerdown = 0;
  1049. int tx_ready = 0;
  1050. lcid = pkt->hdr.lcid;
  1051. ch = &smux_lch[lcid];
  1052. spin_lock(&ch->state_lock_lhb1);
  1053. if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
  1054. SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
  1055. ch->local_state,
  1056. SMUX_LCH_LOCAL_OPENED);
  1057. if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
  1058. enable_powerdown = 1;
  1059. ch->local_state = SMUX_LCH_LOCAL_OPENED;
  1060. if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
  1061. schedule_notify(lcid, SMUX_CONNECTED, NULL);
  1062. if (!(list_empty(&ch->tx_queue)))
  1063. tx_ready = 1;
  1064. }
  1065. ret = 0;
  1066. } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
  1067. SMUX_DBG("smux: Remote loopback OPEN ACK received\n");
  1068. ret = 0;
  1069. } else {
  1070. SMUX_ERR("%s: lcid %d state 0x%x open ack invalid\n",
  1071. __func__, lcid, ch->local_state);
  1072. ret = -EINVAL;
  1073. }
  1074. spin_unlock(&ch->state_lock_lhb1);
  1075. if (enable_powerdown) {
  1076. spin_lock(&smux.tx_lock_lha2);
  1077. if (!smux.powerdown_enabled) {
  1078. smux.powerdown_enabled = 1;
  1079. SMUX_DBG("smux: %s: enabling power-collapse support\n",
  1080. __func__);
  1081. }
  1082. spin_unlock(&smux.tx_lock_lha2);
  1083. }
  1084. if (tx_ready)
  1085. list_channel(ch);
  1086. return ret;
  1087. }
  1088. static int smux_handle_close_ack(struct smux_pkt_t *pkt)
  1089. {
  1090. uint8_t lcid;
  1091. int ret;
  1092. struct smux_lch_t *ch;
  1093. union notifier_metadata meta_disconnected;
  1094. unsigned long flags;
  1095. lcid = pkt->hdr.lcid;
  1096. ch = &smux_lch[lcid];
  1097. meta_disconnected.disconnected.is_ssr = 0;
  1098. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  1099. if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
  1100. SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
  1101. SMUX_LCH_LOCAL_CLOSING,
  1102. SMUX_LCH_LOCAL_CLOSED);
  1103. ch->local_state = SMUX_LCH_LOCAL_CLOSED;
  1104. schedule_notify(lcid, SMUX_LOCAL_CLOSED, &meta_disconnected);
  1105. if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
  1106. schedule_notify(lcid, SMUX_DISCONNECTED,
  1107. &meta_disconnected);
  1108. ret = 0;
  1109. } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
  1110. SMUX_DBG("smux: Remote loopback CLOSE ACK received\n");
  1111. ret = 0;
  1112. } else {
  1113. SMUX_ERR("%s: lcid %d state 0x%x close ack invalid\n",
  1114. __func__, lcid, ch->local_state);
  1115. ret = -EINVAL;
  1116. }
  1117. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  1118. return ret;
  1119. }
  1120. /**
  1121. * Handle receive OPEN command.
  1122. *
  1123. * @pkt Received packet
  1124. *
  1125. * @returns 0 for success
  1126. */
  1127. static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
  1128. {
  1129. uint8_t lcid;
  1130. int ret;
  1131. struct smux_lch_t *ch;
  1132. struct smux_pkt_t *ack_pkt;
  1133. unsigned long flags;
  1134. int tx_ready = 0;
  1135. int enable_powerdown = 0;
  1136. if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
  1137. return smux_handle_rx_open_ack(pkt);
  1138. lcid = pkt->hdr.lcid;
  1139. ch = &smux_lch[lcid];
  1140. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  1141. if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
  1142. SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
  1143. SMUX_LCH_REMOTE_CLOSED,
  1144. SMUX_LCH_REMOTE_OPENED);
  1145. ch->remote_state = SMUX_LCH_REMOTE_OPENED;
  1146. if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
  1147. enable_powerdown = 1;
  1148. /* Send Open ACK */
  1149. ack_pkt = smux_alloc_pkt();
  1150. if (!ack_pkt) {
  1151. /* exit out to allow retrying this later */
  1152. ret = -ENOMEM;
  1153. goto out;
  1154. }
  1155. ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
  1156. ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK;
  1157. if (enable_powerdown)
  1158. ack_pkt->hdr.flags |= SMUX_CMD_OPEN_POWER_COLLAPSE;
  1159. ack_pkt->hdr.lcid = lcid;
  1160. ack_pkt->hdr.payload_len = 0;
  1161. ack_pkt->hdr.pad_len = 0;
  1162. if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
  1163. ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
  1164. ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
  1165. }
  1166. smux_tx_queue(ack_pkt, ch, 0);
  1167. tx_ready = 1;
  1168. if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
  1169. /*
  1170. * Send an Open command to the remote side to
  1171. * simulate our local client doing it.
  1172. */
  1173. ack_pkt = smux_alloc_pkt();
  1174. if (ack_pkt) {
  1175. ack_pkt->hdr.lcid = lcid;
  1176. ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
  1177. if (enable_powerdown)
  1178. ack_pkt->hdr.flags |=
  1179. SMUX_CMD_OPEN_POWER_COLLAPSE;
  1180. ack_pkt->hdr.payload_len = 0;
  1181. ack_pkt->hdr.pad_len = 0;
  1182. smux_tx_queue(ack_pkt, ch, 0);
  1183. tx_ready = 1;
  1184. } else {
  1185. SMUX_ERR(
  1186. "%s: Remote loopack allocation failure\n",
  1187. __func__);
  1188. }
  1189. } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
  1190. schedule_notify(lcid, SMUX_CONNECTED, NULL);
  1191. }
  1192. ret = 0;
  1193. } else {
  1194. SMUX_ERR("%s: lcid %d remote state 0x%x open invalid\n",
  1195. __func__, lcid, ch->remote_state);
  1196. ret = -EINVAL;
  1197. }
  1198. out:
  1199. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  1200. if (enable_powerdown) {
  1201. spin_lock_irqsave(&smux.tx_lock_lha2, flags);
  1202. if (!smux.powerdown_enabled) {
  1203. smux.powerdown_enabled = 1;
  1204. SMUX_DBG("smux: %s: enabling power-collapse support\n",
  1205. __func__);
  1206. }
  1207. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  1208. }
  1209. if (tx_ready)
  1210. list_channel(ch);
  1211. return ret;
  1212. }
  1213. /**
  1214. * Handle receive CLOSE command.
  1215. *
  1216. * @pkt Received packet
  1217. *
  1218. * @returns 0 for success
  1219. */
  1220. static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
  1221. {
  1222. uint8_t lcid;
  1223. int ret;
  1224. struct smux_lch_t *ch;
  1225. struct smux_pkt_t *ack_pkt;
  1226. union notifier_metadata meta_disconnected;
  1227. unsigned long flags;
  1228. int tx_ready = 0;
  1229. if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
  1230. return smux_handle_close_ack(pkt);
  1231. lcid = pkt->hdr.lcid;
  1232. ch = &smux_lch[lcid];
  1233. meta_disconnected.disconnected.is_ssr = 0;
  1234. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  1235. if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
  1236. SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
  1237. SMUX_LCH_REMOTE_OPENED,
  1238. SMUX_LCH_REMOTE_CLOSED);
  1239. ack_pkt = smux_alloc_pkt();
  1240. if (!ack_pkt) {
  1241. /* exit out to allow retrying this later */
  1242. ret = -ENOMEM;
  1243. goto out;
  1244. }
  1245. ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
  1246. ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
  1247. ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
  1248. ack_pkt->hdr.lcid = lcid;
  1249. ack_pkt->hdr.payload_len = 0;
  1250. ack_pkt->hdr.pad_len = 0;
  1251. smux_tx_queue(ack_pkt, ch, 0);
  1252. tx_ready = 1;
  1253. if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
  1254. /*
  1255. * Send a Close command to the remote side to simulate
  1256. * our local client doing it.
  1257. */
  1258. ack_pkt = smux_alloc_pkt();
  1259. if (ack_pkt) {
  1260. ack_pkt->hdr.lcid = lcid;
  1261. ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
  1262. ack_pkt->hdr.flags = 0;
  1263. ack_pkt->hdr.payload_len = 0;
  1264. ack_pkt->hdr.pad_len = 0;
  1265. smux_tx_queue(ack_pkt, ch, 0);
  1266. tx_ready = 1;
  1267. } else {
  1268. SMUX_ERR(
  1269. "%s: Remote loopack allocation failure\n",
  1270. __func__);
  1271. }
  1272. }
  1273. schedule_notify(lcid, SMUX_REMOTE_CLOSED, &meta_disconnected);
  1274. if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
  1275. schedule_notify(lcid, SMUX_DISCONNECTED,
  1276. &meta_disconnected);
  1277. ret = 0;
  1278. } else {
  1279. SMUX_ERR("%s: lcid %d remote state 0x%x close invalid\n",
  1280. __func__, lcid, ch->remote_state);
  1281. ret = -EINVAL;
  1282. }
  1283. out:
  1284. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  1285. if (tx_ready)
  1286. list_channel(ch);
  1287. return ret;
  1288. }
  1289. /*
  1290. * Handle receive DATA command.
  1291. *
  1292. * @pkt Received packet
  1293. *
  1294. * @returns 0 for success
  1295. */
  1296. static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
  1297. {
  1298. uint8_t lcid;
  1299. int ret = 0;
  1300. int do_retry = 0;
  1301. int tx_ready = 0;
  1302. int tmp;
  1303. int rx_len;
  1304. struct smux_lch_t *ch;
  1305. union notifier_metadata metadata;
  1306. int remote_loopback;
  1307. struct smux_pkt_t *ack_pkt;
  1308. unsigned long flags;
  1309. if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
  1310. ret = -ENXIO;
  1311. goto out;
  1312. }
  1313. rx_len = pkt->hdr.payload_len;
  1314. if (rx_len == 0) {
  1315. ret = -EINVAL;
  1316. goto out;
  1317. }
  1318. lcid = pkt->hdr.lcid;
  1319. ch = &smux_lch[lcid];
  1320. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  1321. remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
  1322. if (ch->local_state != SMUX_LCH_LOCAL_OPENED
  1323. && !remote_loopback) {
  1324. SMUX_ERR("smux: ch %d error data on local state 0x%x",
  1325. lcid, ch->local_state);
  1326. ret = -EIO;
  1327. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  1328. goto out;
  1329. }
  1330. if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
  1331. SMUX_ERR("smux: ch %d error data on remote state 0x%x",
  1332. lcid, ch->remote_state);
  1333. ret = -EIO;
  1334. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  1335. goto out;
  1336. }
  1337. if (!list_empty(&ch->rx_retry_queue)) {
  1338. do_retry = 1;
  1339. if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
  1340. !ch->rx_flow_control_auto &&
  1341. ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
  1342. /* need to flow control RX */
  1343. ch->rx_flow_control_auto = 1;
  1344. tx_ready |= smux_rx_flow_control_updated(ch);
  1345. schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
  1346. NULL);
  1347. }
  1348. if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
  1349. /* retry queue full */
  1350. SMUX_ERR(
  1351. "%s: ch %d RX retry queue full; rx flow=%d\n",
  1352. __func__, lcid, ch->rx_flow_control_auto);
  1353. schedule_notify(lcid, SMUX_READ_FAIL, NULL);
  1354. ret = -ENOMEM;
  1355. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  1356. goto out;
  1357. }
  1358. }
  1359. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  1360. if (remote_loopback) {
  1361. /* Echo the data back to the remote client. */
  1362. ack_pkt = smux_alloc_pkt();
  1363. if (ack_pkt) {
  1364. ack_pkt->hdr.lcid = lcid;
  1365. ack_pkt->hdr.cmd = SMUX_CMD_DATA;
  1366. ack_pkt->hdr.flags = 0;
  1367. ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
  1368. if (ack_pkt->hdr.payload_len) {
  1369. smux_alloc_pkt_payload(ack_pkt);
  1370. memcpy(ack_pkt->payload, pkt->payload,
  1371. ack_pkt->hdr.payload_len);
  1372. }
  1373. ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
  1374. smux_tx_queue(ack_pkt, ch, 0);
  1375. tx_ready = 1;
  1376. } else {
  1377. SMUX_ERR("%s: Remote loopack allocation failure\n",
  1378. __func__);
  1379. }
  1380. } else if (!do_retry) {
  1381. /* request buffer from client */
  1382. metadata.read.pkt_priv = 0;
  1383. metadata.read.buffer = 0;
  1384. tmp = ch->get_rx_buffer(ch->priv,
  1385. (void **)&metadata.read.pkt_priv,
  1386. (void **)&metadata.read.buffer,
  1387. rx_len);
  1388. if (tmp == 0 && metadata.read.buffer) {
  1389. /* place data into RX buffer */
  1390. memcpy(metadata.read.buffer, pkt->payload,
  1391. rx_len);
  1392. metadata.read.len = rx_len;
  1393. schedule_notify(lcid, SMUX_READ_DONE,
  1394. &metadata);
  1395. } else if (tmp == -EAGAIN ||
  1396. (tmp == 0 && !metadata.read.buffer)) {
  1397. /* buffer allocation failed - add to retry queue */
  1398. do_retry = 1;
  1399. } else if (tmp < 0) {
  1400. SMUX_ERR("%s: ch %d Client RX buffer alloc failed %d\n",
  1401. __func__, lcid, tmp);
  1402. schedule_notify(lcid, SMUX_READ_FAIL, NULL);
  1403. ret = -ENOMEM;
  1404. }
  1405. }
  1406. if (do_retry) {
  1407. struct smux_rx_pkt_retry *retry;
  1408. retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
  1409. if (!retry) {
  1410. SMUX_ERR("%s: retry alloc failure\n", __func__);
  1411. ret = -ENOMEM;
  1412. schedule_notify(lcid, SMUX_READ_FAIL, NULL);
  1413. goto out;
  1414. }
  1415. INIT_LIST_HEAD(&retry->rx_retry_list);
  1416. retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
  1417. /* copy packet */
  1418. retry->pkt = smux_alloc_pkt();
  1419. if (!retry->pkt) {
  1420. kfree(retry);
  1421. SMUX_ERR("%s: pkt alloc failure\n", __func__);
  1422. ret = -ENOMEM;
  1423. schedule_notify(lcid, SMUX_READ_FAIL, NULL);
  1424. goto out;
  1425. }
  1426. retry->pkt->hdr.lcid = lcid;
  1427. retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
  1428. retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
  1429. if (retry->pkt->hdr.payload_len) {
  1430. smux_alloc_pkt_payload(retry->pkt);
  1431. memcpy(retry->pkt->payload, pkt->payload,
  1432. retry->pkt->hdr.payload_len);
  1433. }
  1434. /* add to retry queue */
  1435. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  1436. list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
  1437. ++ch->rx_retry_queue_cnt;
  1438. if (ch->rx_retry_queue_cnt == 1)
  1439. queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
  1440. msecs_to_jiffies(retry->timeout_in_ms));
  1441. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  1442. }
  1443. if (tx_ready)
  1444. list_channel(ch);
  1445. out:
  1446. return ret;
  1447. }
  1448. /**
  1449. * Handle receive byte command for testing purposes.
  1450. *
  1451. * @pkt Received packet
  1452. *
  1453. * @returns 0 for success
  1454. */
  1455. static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
  1456. {
  1457. uint8_t lcid;
  1458. int ret;
  1459. struct smux_lch_t *ch;
  1460. union notifier_metadata metadata;
  1461. unsigned long flags;
  1462. if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
  1463. SMUX_ERR("%s: invalid packet or channel id\n", __func__);
  1464. return -ENXIO;
  1465. }
  1466. lcid = pkt->hdr.lcid;
  1467. ch = &smux_lch[lcid];
  1468. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  1469. if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
  1470. SMUX_ERR("smux: ch %d error data on local state 0x%x",
  1471. lcid, ch->local_state);
  1472. ret = -EIO;
  1473. goto out;
  1474. }
  1475. if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
  1476. SMUX_ERR("smux: ch %d error data on remote state 0x%x",
  1477. lcid, ch->remote_state);
  1478. ret = -EIO;
  1479. goto out;
  1480. }
  1481. metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
  1482. metadata.read.buffer = 0;
  1483. schedule_notify(lcid, SMUX_READ_DONE, &metadata);
  1484. ret = 0;
  1485. out:
  1486. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  1487. return ret;
  1488. }
  1489. /**
  1490. * Handle receive status command.
  1491. *
  1492. * @pkt Received packet
  1493. *
  1494. * @returns 0 for success
  1495. */
  1496. static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
  1497. {
  1498. uint8_t lcid;
  1499. int ret;
  1500. struct smux_lch_t *ch;
  1501. union notifier_metadata meta;
  1502. unsigned long flags;
  1503. int tx_ready = 0;
  1504. lcid = pkt->hdr.lcid;
  1505. ch = &smux_lch[lcid];
  1506. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  1507. meta.tiocm.tiocm_old = ch->remote_tiocm;
  1508. meta.tiocm.tiocm_new = pkt->hdr.flags;
  1509. /* update logical channel flow control */
  1510. if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
  1511. (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
  1512. /* logical channel flow control changed */
  1513. if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
  1514. /* disabled TX */
  1515. SMUX_DBG("smux: TX Flow control enabled\n");
  1516. ch->tx_flow_control = 1;
  1517. } else {
  1518. /* re-enable channel */
  1519. SMUX_DBG("smux: TX Flow control disabled\n");
  1520. ch->tx_flow_control = 0;
  1521. tx_ready = 1;
  1522. }
  1523. }
  1524. meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
  1525. ch->remote_tiocm = pkt->hdr.flags;
  1526. meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
  1527. /* client notification for status change */
  1528. if (IS_FULLY_OPENED(ch)) {
  1529. if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
  1530. schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
  1531. ret = 0;
  1532. }
  1533. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  1534. if (tx_ready)
  1535. list_channel(ch);
  1536. return ret;
  1537. }
  1538. /**
  1539. * Handle receive power command.
  1540. *
  1541. * @pkt Received packet
  1542. *
  1543. * @returns 0 for success
  1544. */
  1545. static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
  1546. {
  1547. struct smux_pkt_t *ack_pkt;
  1548. int power_down = 0;
  1549. unsigned long flags;
  1550. SMUX_PWR_PKT_RX(pkt);
  1551. spin_lock_irqsave(&smux.tx_lock_lha2, flags);
  1552. if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
  1553. /* local sleep request ack */
  1554. if (smux.power_state == SMUX_PWR_TURNING_OFF)
  1555. /* Power-down complete, turn off UART */
  1556. power_down = 1;
  1557. else
  1558. SMUX_ERR("%s: sleep request ack invalid in state %d\n",
  1559. __func__, smux.power_state);
  1560. } else {
  1561. /*
  1562. * Remote sleep request
  1563. *
  1564. * Even if we have data pending, we need to transition to the
  1565. * POWER_OFF state and then perform a wakeup since the remote
  1566. * side has requested a power-down.
  1567. *
  1568. * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
  1569. * the TX thread will set the state to SMUX_PWR_TURNING_OFF
  1570. * when it sends the packet.
  1571. *
  1572. * If we are already powering down, then no ACK is sent.
  1573. */
  1574. if (smux.power_state == SMUX_PWR_ON) {
  1575. ack_pkt = smux_alloc_pkt();
  1576. if (ack_pkt) {
  1577. SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
  1578. smux.power_state,
  1579. SMUX_PWR_TURNING_OFF_FLUSH);
  1580. smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
  1581. /* send power-down ack */
  1582. ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
  1583. ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
  1584. ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
  1585. list_add_tail(&ack_pkt->list,
  1586. &smux.power_queue);
  1587. queue_work(smux_tx_wq, &smux_tx_work);
  1588. }
  1589. } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
  1590. /* Local power-down request still in TX queue */
  1591. SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
  1592. __func__);
  1593. smux.power_ctl_remote_req_received = 1;
  1594. } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
  1595. /*
  1596. * Local power-down request already sent to remote
  1597. * side, so this request gets treated as an ACK.
  1598. */
  1599. SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
  1600. __func__);
  1601. power_down = 1;
  1602. } else {
  1603. SMUX_ERR("%s: sleep request invalid in state %d\n",
  1604. __func__, smux.power_state);
  1605. }
  1606. }
  1607. if (power_down) {
  1608. SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
  1609. smux.power_state, SMUX_PWR_OFF_FLUSH);
  1610. smux.power_state = SMUX_PWR_OFF_FLUSH;
  1611. queue_work(smux_tx_wq, &smux_inactivity_work);
  1612. }
  1613. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  1614. return 0;
  1615. }
  1616. /**
  1617. * Handle dispatching a completed packet for receive processing.
  1618. *
  1619. * @pkt Packet to process
  1620. *
  1621. * @returns 0 for success
  1622. */
  1623. static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
  1624. {
  1625. int ret = -ENXIO;
  1626. switch (pkt->hdr.cmd) {
  1627. case SMUX_CMD_OPEN_LCH:
  1628. SMUX_LOG_PKT_RX(pkt);
  1629. if (smux_assert_lch_id(pkt->hdr.lcid)) {
  1630. SMUX_ERR("%s: invalid channel id %d\n",
  1631. __func__, pkt->hdr.lcid);
  1632. break;
  1633. }
  1634. ret = smux_handle_rx_open_cmd(pkt);
  1635. break;
  1636. case SMUX_CMD_DATA:
  1637. SMUX_LOG_PKT_RX(pkt);
  1638. if (smux_assert_lch_id(pkt->hdr.lcid)) {
  1639. SMUX_ERR("%s: invalid channel id %d\n",
  1640. __func__, pkt->hdr.lcid);
  1641. break;
  1642. }
  1643. ret = smux_handle_rx_data_cmd(pkt);
  1644. break;
  1645. case SMUX_CMD_CLOSE_LCH:
  1646. SMUX_LOG_PKT_RX(pkt);
  1647. if (smux_assert_lch_id(pkt->hdr.lcid)) {
  1648. SMUX_ERR("%s: invalid channel id %d\n",
  1649. __func__, pkt->hdr.lcid);
  1650. break;
  1651. }
  1652. ret = smux_handle_rx_close_cmd(pkt);
  1653. break;
  1654. case SMUX_CMD_STATUS:
  1655. SMUX_LOG_PKT_RX(pkt);
  1656. if (smux_assert_lch_id(pkt->hdr.lcid)) {
  1657. SMUX_ERR("%s: invalid channel id %d\n",
  1658. __func__, pkt->hdr.lcid);
  1659. break;
  1660. }
  1661. ret = smux_handle_rx_status_cmd(pkt);
  1662. break;
  1663. case SMUX_CMD_PWR_CTL:
  1664. ret = smux_handle_rx_power_cmd(pkt);
  1665. break;
  1666. case SMUX_CMD_BYTE:
  1667. SMUX_LOG_PKT_RX(pkt);
  1668. ret = smux_handle_rx_byte_cmd(pkt);
  1669. break;
  1670. default:
  1671. SMUX_LOG_PKT_RX(pkt);
  1672. SMUX_ERR("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
  1673. ret = -EINVAL;
  1674. }
  1675. return ret;
  1676. }
  1677. /**
  1678. * Deserializes a packet and dispatches it to the packet receive logic.
  1679. *
  1680. * @data Raw data for one packet
  1681. * @len Length of the data
  1682. *
  1683. * @returns 0 for success
  1684. */
  1685. static int smux_deserialize(unsigned char *data, int len)
  1686. {
  1687. struct smux_pkt_t recv;
  1688. smux_init_pkt(&recv);
  1689. /*
  1690. * It may be possible to optimize this to not use the
  1691. * temporary buffer.
  1692. */
  1693. memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
  1694. if (recv.hdr.magic != SMUX_MAGIC) {
  1695. SMUX_ERR("%s: invalid header magic\n", __func__);
  1696. return -EINVAL;
  1697. }
  1698. if (recv.hdr.payload_len)
  1699. recv.payload = data + sizeof(struct smux_hdr_t);
  1700. return smux_dispatch_rx_pkt(&recv);
  1701. }
  1702. /**
  1703. * Handle wakeup request byte.
  1704. */
  1705. static void smux_handle_wakeup_req(void)
  1706. {
  1707. unsigned long flags;
  1708. spin_lock_irqsave(&smux.tx_lock_lha2, flags);
  1709. if (smux.power_state == SMUX_PWR_OFF
  1710. || smux.power_state == SMUX_PWR_TURNING_ON) {
  1711. /* wakeup system */
  1712. SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
  1713. smux.power_state, SMUX_PWR_ON);
  1714. smux.remote_initiated_wakeup_count++;
  1715. smux.power_state = SMUX_PWR_ON;
  1716. queue_work(smux_tx_wq, &smux_wakeup_work);
  1717. queue_work(smux_tx_wq, &smux_tx_work);
  1718. queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
  1719. msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
  1720. smux_send_byte(SMUX_WAKEUP_ACK);
  1721. } else if (smux.power_state == SMUX_PWR_ON) {
  1722. smux_send_byte(SMUX_WAKEUP_ACK);
  1723. } else {
  1724. /* stale wakeup request from previous wakeup */
  1725. SMUX_PWR("smux: %s: stale Wakeup REQ in state %d\n",
  1726. __func__, smux.power_state);
  1727. }
  1728. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  1729. }
  1730. /**
  1731. * Handle wakeup request ack.
  1732. */
  1733. static void smux_handle_wakeup_ack(void)
  1734. {
  1735. unsigned long flags;
  1736. spin_lock_irqsave(&smux.tx_lock_lha2, flags);
  1737. if (smux.power_state == SMUX_PWR_TURNING_ON) {
  1738. /* received response to wakeup request */
  1739. SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
  1740. smux.power_state, SMUX_PWR_ON);
  1741. smux.power_state = SMUX_PWR_ON;
  1742. queue_work(smux_tx_wq, &smux_tx_work);
  1743. queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
  1744. msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
  1745. } else if (smux.power_state != SMUX_PWR_ON) {
  1746. /* invalid message */
  1747. SMUX_PWR("smux: %s: stale Wakeup REQ ACK in state %d\n",
  1748. __func__, smux.power_state);
  1749. }
  1750. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  1751. }
  1752. /**
  1753. * RX State machine - IDLE state processing.
  1754. *
  1755. * @data New RX data to process
  1756. * @len Length of the data
  1757. * @used Return value of length processed
  1758. * @flag Error flag - TTY_NORMAL 0 for no failure
  1759. */
  1760. static void smux_rx_handle_idle(const unsigned char *data,
  1761. int len, int *used, int flag)
  1762. {
  1763. int i;
  1764. if (flag) {
  1765. if (smux_byte_loopback)
  1766. smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
  1767. smux_byte_loopback);
  1768. SMUX_ERR("%s: TTY error 0x%x - ignoring\n", __func__, flag);
  1769. ++*used;
  1770. return;
  1771. }
  1772. for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
  1773. switch (data[i]) {
  1774. case SMUX_MAGIC_WORD1:
  1775. smux.rx_state = SMUX_RX_MAGIC;
  1776. break;
  1777. case SMUX_WAKEUP_REQ:
  1778. SMUX_PWR("smux: smux: RX Wakeup REQ\n");
  1779. if (unlikely(!smux.remote_is_alive)) {
  1780. mutex_lock(&smux.mutex_lha0);
  1781. smux.remote_is_alive = 1;
  1782. mutex_unlock(&smux.mutex_lha0);
  1783. }
  1784. smux_handle_wakeup_req();
  1785. break;
  1786. case SMUX_WAKEUP_ACK:
  1787. SMUX_PWR("smux: smux: RX Wakeup ACK\n");
  1788. if (unlikely(!smux.remote_is_alive)) {
  1789. mutex_lock(&smux.mutex_lha0);
  1790. smux.remote_is_alive = 1;
  1791. mutex_unlock(&smux.mutex_lha0);
  1792. }
  1793. smux_handle_wakeup_ack();
  1794. break;
  1795. default:
  1796. /* unexpected character */
  1797. if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
  1798. smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
  1799. smux_byte_loopback);
  1800. SMUX_ERR("%s: parse error 0x%02x - ignoring\n",
  1801. __func__, (unsigned)data[i]);
  1802. break;
  1803. }
  1804. }
  1805. *used = i;
  1806. }
  1807. /**
  1808. * RX State machine - Header Magic state processing.
  1809. *
  1810. * @data New RX data to process
  1811. * @len Length of the data
  1812. * @used Return value of length processed
  1813. * @flag Error flag - TTY_NORMAL 0 for no failure
  1814. */
  1815. static void smux_rx_handle_magic(const unsigned char *data,
  1816. int len, int *used, int flag)
  1817. {
  1818. int i;
  1819. if (flag) {
  1820. SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
  1821. smux_enter_reset();
  1822. smux.rx_state = SMUX_RX_FAILURE;
  1823. ++*used;
  1824. return;
  1825. }
  1826. for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
  1827. /* wait for completion of the magic */
  1828. if (data[i] == SMUX_MAGIC_WORD2) {
  1829. smux.recv_len = 0;
  1830. smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
  1831. smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
  1832. smux.rx_state = SMUX_RX_HDR;
  1833. } else {
  1834. /* unexpected / trash character */
  1835. SMUX_ERR(
  1836. "%s: rx parse error for char %c; *used=%d, len=%d\n",
  1837. __func__, data[i], *used, len);
  1838. smux.rx_state = SMUX_RX_IDLE;
  1839. }
  1840. }
  1841. *used = i;
  1842. }
  1843. /**
  1844. * RX State machine - Packet Header state processing.
  1845. *
  1846. * @data New RX data to process
  1847. * @len Length of the data
  1848. * @used Return value of length processed
  1849. * @flag Error flag - TTY_NORMAL 0 for no failure
  1850. */
  1851. static void smux_rx_handle_hdr(const unsigned char *data,
  1852. int len, int *used, int flag)
  1853. {
  1854. int i;
  1855. struct smux_hdr_t *hdr;
  1856. if (flag) {
  1857. SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
  1858. smux_enter_reset();
  1859. smux.rx_state = SMUX_RX_FAILURE;
  1860. ++*used;
  1861. return;
  1862. }
  1863. for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
  1864. smux.recv_buf[smux.recv_len++] = data[i];
  1865. if (smux.recv_len == sizeof(struct smux_hdr_t)) {
  1866. /* complete header received */
  1867. hdr = (struct smux_hdr_t *)smux.recv_buf;
  1868. smux.pkt_remain = hdr->payload_len + hdr->pad_len;
  1869. smux.rx_state = SMUX_RX_PAYLOAD;
  1870. }
  1871. }
  1872. *used = i;
  1873. }
  1874. /**
  1875. * RX State machine - Packet Payload state processing.
  1876. *
  1877. * @data New RX data to process
  1878. * @len Length of the data
  1879. * @used Return value of length processed
  1880. * @flag Error flag - TTY_NORMAL 0 for no failure
  1881. */
  1882. static void smux_rx_handle_pkt_payload(const unsigned char *data,
  1883. int len, int *used, int flag)
  1884. {
  1885. int remaining;
  1886. if (flag) {
  1887. SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
  1888. smux_enter_reset();
  1889. smux.rx_state = SMUX_RX_FAILURE;
  1890. ++*used;
  1891. return;
  1892. }
  1893. /* copy data into rx buffer */
  1894. if (smux.pkt_remain < (len - *used))
  1895. remaining = smux.pkt_remain;
  1896. else
  1897. remaining = len - *used;
  1898. memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
  1899. smux.recv_len += remaining;
  1900. smux.pkt_remain -= remaining;
  1901. *used += remaining;
  1902. if (smux.pkt_remain == 0) {
  1903. /* complete packet received */
  1904. smux_deserialize(smux.recv_buf, smux.recv_len);
  1905. smux.rx_state = SMUX_RX_IDLE;
  1906. }
  1907. }
  1908. /**
  1909. * Feed data to the receive state machine.
  1910. *
  1911. * @data Pointer to data block
  1912. * @len Length of data
  1913. * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
  1914. */
  1915. void smux_rx_state_machine(const unsigned char *data,
  1916. int len, int flag)
  1917. {
  1918. struct smux_rx_worker_data work;
  1919. work.data = data;
  1920. work.len = len;
  1921. work.flag = flag;
  1922. INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
  1923. work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
  1924. queue_work(smux_rx_wq, &work.work);
  1925. wait_for_completion(&work.work_complete);
  1926. }
  1927. /**
  1928. * Returns true if the remote side has acknowledged a wakeup
  1929. * request previously, so we know that the link is alive and active.
  1930. *
  1931. * @returns true for is alive, false for not alive
  1932. */
  1933. bool smux_remote_is_active(void)
  1934. {
  1935. bool is_active = false;
  1936. mutex_lock(&smux.mutex_lha0);
  1937. if (smux.remote_is_alive)
  1938. is_active = true;
  1939. mutex_unlock(&smux.mutex_lha0);
  1940. return is_active;
  1941. }
  1942. /**
  1943. * Sends a delay command to the remote side.
  1944. *
  1945. * @ms: Time in milliseconds for the remote side to delay
  1946. *
  1947. * This command defines the delay that the remote side will use
  1948. * to slow the response time for DATA commands.
  1949. */
  1950. void smux_set_loopback_data_reply_delay(uint32_t ms)
  1951. {
  1952. struct smux_lch_t *ch = &smux_lch[SMUX_TEST_LCID];
  1953. struct smux_pkt_t *pkt;
  1954. pkt = smux_alloc_pkt();
  1955. if (!pkt) {
  1956. pr_err("%s: unable to allocate packet\n", __func__);
  1957. return;
  1958. }
  1959. pkt->hdr.lcid = ch->lcid;
  1960. pkt->hdr.cmd = SMUX_CMD_DELAY;
  1961. pkt->hdr.flags = 0;
  1962. pkt->hdr.payload_len = sizeof(uint32_t);
  1963. pkt->hdr.pad_len = 0;
  1964. if (smux_alloc_pkt_payload(pkt)) {
  1965. pr_err("%s: unable to allocate payload\n", __func__);
  1966. smux_free_pkt(pkt);
  1967. return;
  1968. }
  1969. memcpy(pkt->payload, &ms, sizeof(uint32_t));
  1970. smux_tx_queue(pkt, ch, 1);
  1971. }
  1972. /**
  1973. * Retrieve wakeup counts.
  1974. *
  1975. * @local_cnt: Pointer to local wakeup count
  1976. * @remote_cnt: Pointer to remote wakeup count
  1977. */
  1978. void smux_get_wakeup_counts(int *local_cnt, int *remote_cnt)
  1979. {
  1980. unsigned long flags;
  1981. spin_lock_irqsave(&smux.tx_lock_lha2, flags);
  1982. if (local_cnt)
  1983. *local_cnt = smux.local_initiated_wakeup_count;
  1984. if (remote_cnt)
  1985. *remote_cnt = smux.remote_initiated_wakeup_count;
  1986. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  1987. }
  1988. /**
  1989. * Add channel to transmit-ready list and trigger transmit worker.
  1990. *
  1991. * @ch Channel to add
  1992. */
  1993. static void list_channel(struct smux_lch_t *ch)
  1994. {
  1995. unsigned long flags;
  1996. SMUX_DBG("smux: %s: listing channel %d\n",
  1997. __func__, ch->lcid);
  1998. spin_lock_irqsave(&smux.tx_lock_lha2, flags);
  1999. spin_lock(&ch->tx_lock_lhb2);
  2000. smux.tx_activity_flag = 1;
  2001. if (list_empty(&ch->tx_ready_list))
  2002. list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
  2003. spin_unlock(&ch->tx_lock_lhb2);
  2004. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  2005. queue_work(smux_tx_wq, &smux_tx_work);
  2006. }
  2007. /**
  2008. * Transmit packet on correct transport and then perform client
  2009. * notification.
  2010. *
  2011. * @ch Channel to transmit on
  2012. * @pkt Packet to transmit
  2013. */
  2014. static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
  2015. {
  2016. union notifier_metadata meta_write;
  2017. int ret;
  2018. if (ch && pkt) {
  2019. SMUX_LOG_PKT_TX(pkt);
  2020. if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
  2021. ret = smux_tx_loopback(pkt);
  2022. else
  2023. ret = smux_tx_tty(pkt);
  2024. if (pkt->hdr.cmd == SMUX_CMD_DATA) {
  2025. /* notify write-done */
  2026. meta_write.write.pkt_priv = pkt->priv;
  2027. meta_write.write.buffer = pkt->payload;
  2028. meta_write.write.len = pkt->hdr.payload_len;
  2029. if (ret >= 0) {
  2030. SMUX_DBG("smux: %s: PKT write done", __func__);
  2031. schedule_notify(ch->lcid, SMUX_WRITE_DONE,
  2032. &meta_write);
  2033. } else {
  2034. SMUX_ERR("%s: failed to write pkt %d\n",
  2035. __func__, ret);
  2036. schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
  2037. &meta_write);
  2038. }
  2039. }
  2040. }
  2041. }
  2042. /**
  2043. * Flush pending TTY TX data.
  2044. */
  2045. static void smux_flush_tty(void)
  2046. {
  2047. mutex_lock(&smux.mutex_lha0);
  2048. if (!smux.tty) {
  2049. SMUX_ERR("%s: ldisc not loaded\n", __func__);
  2050. mutex_unlock(&smux.mutex_lha0);
  2051. return;
  2052. }
  2053. tty_wait_until_sent(smux.tty,
  2054. msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
  2055. if (tty_chars_in_buffer(smux.tty) > 0)
  2056. SMUX_ERR("%s: unable to flush UART queue\n", __func__);
  2057. mutex_unlock(&smux.mutex_lha0);
  2058. }
  2059. /**
  2060. * Purge TX queue for logical channel.
  2061. *
  2062. * @ch Logical channel pointer
  2063. * @is_ssr 1 = this is a subsystem restart purge
  2064. *
  2065. * Must be called with the following spinlocks locked:
  2066. * state_lock_lhb1
  2067. * tx_lock_lhb2
  2068. */
  2069. static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr)
  2070. {
  2071. struct smux_pkt_t *pkt;
  2072. int send_disconnect = 0;
  2073. struct smux_pkt_t *pkt_tmp;
  2074. int is_state_pkt;
  2075. list_for_each_entry_safe(pkt, pkt_tmp, &ch->tx_queue, list) {
  2076. is_state_pkt = 0;
  2077. if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
  2078. if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK) {
  2079. /* Open ACK must still be sent */
  2080. is_state_pkt = 1;
  2081. } else {
  2082. /* Open never sent -- force to closed state */
  2083. ch->local_state = SMUX_LCH_LOCAL_CLOSED;
  2084. send_disconnect = 1;
  2085. }
  2086. } else if (pkt->hdr.cmd == SMUX_CMD_CLOSE_LCH) {
  2087. if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
  2088. is_state_pkt = 1;
  2089. if (!send_disconnect)
  2090. is_state_pkt = 1;
  2091. } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
  2092. /* Notify client of failed write */
  2093. union notifier_metadata meta_write;
  2094. meta_write.write.pkt_priv = pkt->priv;
  2095. meta_write.write.buffer = pkt->payload;
  2096. meta_write.write.len = pkt->hdr.payload_len;
  2097. schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
  2098. }
  2099. if (!is_state_pkt || is_ssr) {
  2100. list_del(&pkt->list);
  2101. smux_free_pkt(pkt);
  2102. }
  2103. }
  2104. if (send_disconnect) {
  2105. union notifier_metadata meta_disconnected;
  2106. meta_disconnected.disconnected.is_ssr = smux.in_reset;
  2107. schedule_notify(ch->lcid, SMUX_LOCAL_CLOSED,
  2108. &meta_disconnected);
  2109. if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
  2110. schedule_notify(ch->lcid, SMUX_DISCONNECTED,
  2111. &meta_disconnected);
  2112. }
  2113. }
  2114. /**
  2115. * Power-up the UART.
  2116. *
  2117. * Must be called with smux.mutex_lha0 already locked.
  2118. */
  2119. static void smux_uart_power_on_atomic(void)
  2120. {
  2121. struct uart_state *state;
  2122. if (!smux.tty || !smux.tty->driver_data) {
  2123. SMUX_ERR("%s: unable to find UART port for tty %p\n",
  2124. __func__, smux.tty);
  2125. return;
  2126. }
  2127. state = smux.tty->driver_data;
  2128. msm_hs_request_clock_on(state->uart_port);
  2129. }
  2130. /**
  2131. * Power-up the UART.
  2132. */
  2133. static void smux_uart_power_on(void)
  2134. {
  2135. mutex_lock(&smux.mutex_lha0);
  2136. smux_uart_power_on_atomic();
  2137. mutex_unlock(&smux.mutex_lha0);
  2138. }
  2139. /**
  2140. * Power down the UART.
  2141. *
  2142. * Must be called with mutex_lha0 locked.
  2143. */
  2144. static void smux_uart_power_off_atomic(void)
  2145. {
  2146. struct uart_state *state;
  2147. if (!smux.tty || !smux.tty->driver_data) {
  2148. SMUX_ERR("%s: unable to find UART port for tty %p\n",
  2149. __func__, smux.tty);
  2150. mutex_unlock(&smux.mutex_lha0);
  2151. return;
  2152. }
  2153. state = smux.tty->driver_data;
  2154. msm_hs_request_clock_off(state->uart_port);
  2155. }
  2156. /**
  2157. * Power down the UART.
  2158. */
  2159. static void smux_uart_power_off(void)
  2160. {
  2161. mutex_lock(&smux.mutex_lha0);
  2162. smux_uart_power_off_atomic();
  2163. mutex_unlock(&smux.mutex_lha0);
  2164. }
  2165. /**
  2166. * TX Wakeup Worker
  2167. *
  2168. * @work Not used
  2169. *
  2170. * Do an exponential back-off wakeup sequence with a maximum period
  2171. * of approximately 1 second (1 << 20 microseconds).
  2172. */
  2173. static void smux_wakeup_worker(struct work_struct *work)
  2174. {
  2175. unsigned long flags;
  2176. unsigned wakeup_delay;
  2177. if (smux.in_reset)
  2178. return;
  2179. spin_lock_irqsave(&smux.tx_lock_lha2, flags);
  2180. if (smux.power_state == SMUX_PWR_ON) {
  2181. /* wakeup complete */
  2182. smux.pwr_wakeup_delay_us = 1;
  2183. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  2184. SMUX_DBG("smux: %s: wakeup complete\n", __func__);
  2185. /*
  2186. * Cancel any pending retry. This avoids a race condition with
  2187. * a new power-up request because:
  2188. * 1) this worker doesn't modify the state
  2189. * 2) this worker is processed on the same single-threaded
  2190. * workqueue as new TX wakeup requests
  2191. */
  2192. cancel_delayed_work(&smux_wakeup_delayed_work);
  2193. queue_work(smux_tx_wq, &smux_tx_work);
  2194. } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
  2195. /* retry wakeup */
  2196. wakeup_delay = smux.pwr_wakeup_delay_us;
  2197. smux.pwr_wakeup_delay_us <<= 1;
  2198. if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
  2199. smux.pwr_wakeup_delay_us =
  2200. SMUX_WAKEUP_DELAY_MAX;
  2201. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  2202. SMUX_PWR("smux: %s: triggering wakeup\n", __func__);
  2203. smux_send_byte(SMUX_WAKEUP_REQ);
  2204. if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
  2205. SMUX_DBG("smux: %s: sleeping for %u us\n", __func__,
  2206. wakeup_delay);
  2207. usleep_range(wakeup_delay, 2*wakeup_delay);
  2208. queue_work(smux_tx_wq, &smux_wakeup_work);
  2209. } else {
  2210. /* schedule delayed work */
  2211. SMUX_DBG(
  2212. "smux: %s: scheduling delayed wakeup in %u ms\n",
  2213. __func__, wakeup_delay / 1000);
  2214. queue_delayed_work(smux_tx_wq,
  2215. &smux_wakeup_delayed_work,
  2216. msecs_to_jiffies(wakeup_delay / 1000));
  2217. }
  2218. } else {
  2219. /* wakeup aborted */
  2220. smux.pwr_wakeup_delay_us = 1;
  2221. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  2222. SMUX_PWR("smux: %s: wakeup aborted\n", __func__);
  2223. cancel_delayed_work(&smux_wakeup_delayed_work);
  2224. }
  2225. }
  2226. /**
  2227. * Inactivity timeout worker. Periodically scheduled when link is active.
  2228. * When it detects inactivity, it will power-down the UART link.
  2229. *
  2230. * @work Work structure (not used)
  2231. */
  2232. static void smux_inactivity_worker(struct work_struct *work)
  2233. {
  2234. struct smux_pkt_t *pkt;
  2235. unsigned long flags;
  2236. if (smux.in_reset)
  2237. return;
  2238. spin_lock_irqsave(&smux.rx_lock_lha1, flags);
  2239. spin_lock(&smux.tx_lock_lha2);
  2240. if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
  2241. /* no activity */
  2242. if (smux.powerdown_enabled) {
  2243. if (smux.power_state == SMUX_PWR_ON) {
  2244. /* start power-down sequence */
  2245. pkt = smux_alloc_pkt();
  2246. if (pkt) {
  2247. SMUX_PWR(
  2248. "smux: %s: Power %d->%d\n", __func__,
  2249. smux.power_state,
  2250. SMUX_PWR_TURNING_OFF_FLUSH);
  2251. smux.power_state =
  2252. SMUX_PWR_TURNING_OFF_FLUSH;
  2253. /* send power-down request */
  2254. pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
  2255. pkt->hdr.flags = 0;
  2256. pkt->hdr.lcid = SMUX_BROADCAST_LCID;
  2257. list_add_tail(&pkt->list,
  2258. &smux.power_queue);
  2259. queue_work(smux_tx_wq, &smux_tx_work);
  2260. } else {
  2261. SMUX_ERR("%s: packet alloc failed\n",
  2262. __func__);
  2263. }
  2264. }
  2265. }
  2266. }
  2267. smux.tx_activity_flag = 0;
  2268. smux.rx_activity_flag = 0;
  2269. if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
  2270. /* ready to power-down the UART */
  2271. SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
  2272. smux.power_state, SMUX_PWR_OFF);
  2273. smux.power_state = SMUX_PWR_OFF;
  2274. /* if data is pending, schedule a new wakeup */
  2275. if (!list_empty(&smux.lch_tx_ready_list) ||
  2276. !list_empty(&smux.power_queue))
  2277. queue_work(smux_tx_wq, &smux_tx_work);
  2278. spin_unlock(&smux.tx_lock_lha2);
  2279. spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
  2280. /* flush UART output queue and power down */
  2281. smux_flush_tty();
  2282. smux_uart_power_off();
  2283. } else {
  2284. spin_unlock(&smux.tx_lock_lha2);
  2285. spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
  2286. }
  2287. /* reschedule inactivity worker */
  2288. if (smux.power_state != SMUX_PWR_OFF)
  2289. queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
  2290. msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
  2291. }
  2292. /**
  2293. * Remove RX retry packet from channel and free it.
  2294. *
  2295. * @ch Channel for retry packet
  2296. * @retry Retry packet to remove
  2297. *
  2298. * @returns 1 if flow control updated; 0 otherwise
  2299. *
  2300. * Must be called with state_lock_lhb1 locked.
  2301. */
  2302. int smux_remove_rx_retry(struct smux_lch_t *ch,
  2303. struct smux_rx_pkt_retry *retry)
  2304. {
  2305. int tx_ready = 0;
  2306. list_del(&retry->rx_retry_list);
  2307. --ch->rx_retry_queue_cnt;
  2308. smux_free_pkt(retry->pkt);
  2309. kfree(retry);
  2310. if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
  2311. (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
  2312. ch->rx_flow_control_auto) {
  2313. ch->rx_flow_control_auto = 0;
  2314. smux_rx_flow_control_updated(ch);
  2315. schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
  2316. tx_ready = 1;
  2317. }
  2318. return tx_ready;
  2319. }
  2320. /**
  2321. * RX worker handles all receive operations.
  2322. *
  2323. * @work Work structure contained in TBD structure
  2324. */
  2325. static void smux_rx_worker(struct work_struct *work)
  2326. {
  2327. unsigned long flags;
  2328. int used;
  2329. int initial_rx_state;
  2330. struct smux_rx_worker_data *w;
  2331. const unsigned char *data;
  2332. int len;
  2333. int flag;
  2334. w = container_of(work, struct smux_rx_worker_data, work);
  2335. data = w->data;
  2336. len = w->len;
  2337. flag = w->flag;
  2338. spin_lock_irqsave(&smux.rx_lock_lha1, flags);
  2339. smux.rx_activity_flag = 1;
  2340. spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
  2341. SMUX_DBG("smux: %s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
  2342. used = 0;
  2343. do {
  2344. if (smux.in_reset) {
  2345. SMUX_DBG("smux: %s: abort RX due to reset\n", __func__);
  2346. smux.rx_state = SMUX_RX_IDLE;
  2347. break;
  2348. }
  2349. SMUX_DBG("smux: %s: state %d; %d of %d\n",
  2350. __func__, smux.rx_state, used, len);
  2351. initial_rx_state = smux.rx_state;
  2352. switch (smux.rx_state) {
  2353. case SMUX_RX_IDLE:
  2354. smux_rx_handle_idle(data, len, &used, flag);
  2355. break;
  2356. case SMUX_RX_MAGIC:
  2357. smux_rx_handle_magic(data, len, &used, flag);
  2358. break;
  2359. case SMUX_RX_HDR:
  2360. smux_rx_handle_hdr(data, len, &used, flag);
  2361. break;
  2362. case SMUX_RX_PAYLOAD:
  2363. smux_rx_handle_pkt_payload(data, len, &used, flag);
  2364. break;
  2365. default:
  2366. SMUX_DBG("smux: %s: invalid state %d\n",
  2367. __func__, smux.rx_state);
  2368. smux.rx_state = SMUX_RX_IDLE;
  2369. break;
  2370. }
  2371. } while (used < len || smux.rx_state != initial_rx_state);
  2372. complete(&w->work_complete);
  2373. }
  2374. /**
  2375. * RX Retry worker handles retrying get_rx_buffer calls that previously failed
  2376. * because the client was not ready (-EAGAIN).
  2377. *
  2378. * @work Work structure contained in smux_lch_t structure
  2379. */
  2380. static void smux_rx_retry_worker(struct work_struct *work)
  2381. {
  2382. struct smux_lch_t *ch;
  2383. struct smux_rx_pkt_retry *retry;
  2384. union notifier_metadata metadata;
  2385. int tmp;
  2386. unsigned long flags;
  2387. int immediate_retry = 0;
  2388. int tx_ready = 0;
  2389. ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
  2390. /* get next retry packet */
  2391. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  2392. if ((ch->local_state != SMUX_LCH_LOCAL_OPENED) || smux.in_reset) {
  2393. /* port has been closed - remove all retries */
  2394. while (!list_empty(&ch->rx_retry_queue)) {
  2395. retry = list_first_entry(&ch->rx_retry_queue,
  2396. struct smux_rx_pkt_retry,
  2397. rx_retry_list);
  2398. (void)smux_remove_rx_retry(ch, retry);
  2399. }
  2400. }
  2401. if (list_empty(&ch->rx_retry_queue)) {
  2402. SMUX_DBG("smux: %s: retry list empty for channel %d\n",
  2403. __func__, ch->lcid);
  2404. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  2405. return;
  2406. }
  2407. retry = list_first_entry(&ch->rx_retry_queue,
  2408. struct smux_rx_pkt_retry,
  2409. rx_retry_list);
  2410. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  2411. SMUX_DBG("smux: %s: ch %d retrying rx pkt %p\n",
  2412. __func__, ch->lcid, retry);
  2413. metadata.read.pkt_priv = 0;
  2414. metadata.read.buffer = 0;
  2415. tmp = ch->get_rx_buffer(ch->priv,
  2416. (void **)&metadata.read.pkt_priv,
  2417. (void **)&metadata.read.buffer,
  2418. retry->pkt->hdr.payload_len);
  2419. if (tmp == 0 && metadata.read.buffer) {
  2420. /* have valid RX buffer */
  2421. memcpy(metadata.read.buffer, retry->pkt->payload,
  2422. retry->pkt->hdr.payload_len);
  2423. metadata.read.len = retry->pkt->hdr.payload_len;
  2424. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  2425. tx_ready = smux_remove_rx_retry(ch, retry);
  2426. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  2427. schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
  2428. if (tx_ready)
  2429. list_channel(ch);
  2430. immediate_retry = 1;
  2431. } else if (tmp == -EAGAIN ||
  2432. (tmp == 0 && !metadata.read.buffer)) {
  2433. /* retry again */
  2434. retry->timeout_in_ms <<= 1;
  2435. if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
  2436. /* timed out */
  2437. SMUX_ERR("%s: ch %d RX retry client timeout\n",
  2438. __func__, ch->lcid);
  2439. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  2440. tx_ready = smux_remove_rx_retry(ch, retry);
  2441. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  2442. schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
  2443. if (tx_ready)
  2444. list_channel(ch);
  2445. }
  2446. } else {
  2447. /* client error - drop packet */
  2448. SMUX_ERR("%s: ch %d RX retry client failed (%d)\n",
  2449. __func__, ch->lcid, tmp);
  2450. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  2451. tx_ready = smux_remove_rx_retry(ch, retry);
  2452. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  2453. schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
  2454. if (tx_ready)
  2455. list_channel(ch);
  2456. }
  2457. /* schedule next retry */
  2458. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  2459. if (!list_empty(&ch->rx_retry_queue)) {
  2460. retry = list_first_entry(&ch->rx_retry_queue,
  2461. struct smux_rx_pkt_retry,
  2462. rx_retry_list);
  2463. if (immediate_retry)
  2464. queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
  2465. else
  2466. queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
  2467. msecs_to_jiffies(retry->timeout_in_ms));
  2468. }
  2469. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  2470. }
  2471. /**
  2472. * Transmit worker handles serializing and transmitting packets onto the
  2473. * underlying transport.
  2474. *
  2475. * @work Work structure (not used)
  2476. */
  2477. static void smux_tx_worker(struct work_struct *work)
  2478. {
  2479. struct smux_pkt_t *pkt;
  2480. struct smux_lch_t *ch;
  2481. unsigned low_wm_notif;
  2482. unsigned lcid;
  2483. unsigned long flags;
  2484. /*
  2485. * Transmit packets in round-robin fashion based upon ready
  2486. * channels.
  2487. *
  2488. * To eliminate the need to hold a lock for the entire
  2489. * iteration through the channel ready list, the head of the
  2490. * ready-channel list is always the next channel to be
  2491. * processed. To send a packet, the first valid packet in
  2492. * the head channel is removed and the head channel is then
  2493. * rescheduled at the end of the queue by removing it and
  2494. * inserting after the tail. The locks can then be released
  2495. * while the packet is processed.
  2496. */
  2497. while (!smux.in_reset) {
  2498. pkt = NULL;
  2499. low_wm_notif = 0;
  2500. spin_lock_irqsave(&smux.tx_lock_lha2, flags);
  2501. /* handle wakeup if needed */
  2502. if (smux.power_state == SMUX_PWR_OFF) {
  2503. if (!list_empty(&smux.lch_tx_ready_list) ||
  2504. !list_empty(&smux.power_queue)) {
  2505. /* data to transmit, do wakeup */
  2506. SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
  2507. smux.power_state,
  2508. SMUX_PWR_TURNING_ON);
  2509. smux.local_initiated_wakeup_count++;
  2510. smux.power_state = SMUX_PWR_TURNING_ON;
  2511. spin_unlock_irqrestore(&smux.tx_lock_lha2,
  2512. flags);
  2513. queue_work(smux_tx_wq, &smux_wakeup_work);
  2514. } else {
  2515. /* no activity -- stay asleep */
  2516. spin_unlock_irqrestore(&smux.tx_lock_lha2,
  2517. flags);
  2518. }
  2519. break;
  2520. }
  2521. /* process any pending power packets */
  2522. if (!list_empty(&smux.power_queue)) {
  2523. pkt = list_first_entry(&smux.power_queue,
  2524. struct smux_pkt_t, list);
  2525. list_del(&pkt->list);
  2526. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  2527. /* Adjust power state if this is a flush command */
  2528. spin_lock_irqsave(&smux.tx_lock_lha2, flags);
  2529. if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
  2530. pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
  2531. if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
  2532. smux.power_ctl_remote_req_received) {
  2533. /*
  2534. * Sending remote power-down request ACK
  2535. * or sending local power-down request
  2536. * and we already received a remote
  2537. * power-down request.
  2538. */
  2539. SMUX_PWR(
  2540. "smux: %s: Power %d->%d\n", __func__,
  2541. smux.power_state,
  2542. SMUX_PWR_OFF_FLUSH);
  2543. smux.power_state = SMUX_PWR_OFF_FLUSH;
  2544. smux.power_ctl_remote_req_received = 0;
  2545. queue_work(smux_tx_wq,
  2546. &smux_inactivity_work);
  2547. } else {
  2548. /* sending local power-down request */
  2549. SMUX_PWR(
  2550. "smux: %s: Power %d->%d\n", __func__,
  2551. smux.power_state,
  2552. SMUX_PWR_TURNING_OFF);
  2553. smux.power_state = SMUX_PWR_TURNING_OFF;
  2554. }
  2555. }
  2556. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  2557. /* send the packet */
  2558. smux_uart_power_on();
  2559. smux.tx_activity_flag = 1;
  2560. SMUX_PWR_PKT_TX(pkt);
  2561. if (!smux_byte_loopback) {
  2562. smux_tx_tty(pkt);
  2563. smux_flush_tty();
  2564. } else {
  2565. smux_tx_loopback(pkt);
  2566. }
  2567. smux_free_pkt(pkt);
  2568. continue;
  2569. }
  2570. /* get the next ready channel */
  2571. if (list_empty(&smux.lch_tx_ready_list)) {
  2572. /* no ready channels */
  2573. SMUX_DBG("smux: %s: no more ready channels, exiting\n",
  2574. __func__);
  2575. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  2576. break;
  2577. }
  2578. smux.tx_activity_flag = 1;
  2579. if (smux.power_state != SMUX_PWR_ON) {
  2580. /* channel not ready to transmit */
  2581. SMUX_DBG("smux: %s: waiting for link up (state %d)\n",
  2582. __func__,
  2583. smux.power_state);
  2584. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  2585. break;
  2586. }
  2587. /* get the next packet to send and rotate channel list */
  2588. ch = list_first_entry(&smux.lch_tx_ready_list,
  2589. struct smux_lch_t,
  2590. tx_ready_list);
  2591. spin_lock(&ch->state_lock_lhb1);
  2592. spin_lock(&ch->tx_lock_lhb2);
  2593. if (!list_empty(&ch->tx_queue)) {
  2594. /*
  2595. * If remote TX flow control is enabled or
  2596. * the channel is not fully opened, then only
  2597. * send command packets.
  2598. */
  2599. if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
  2600. struct smux_pkt_t *curr;
  2601. list_for_each_entry(curr, &ch->tx_queue, list) {
  2602. if (curr->hdr.cmd != SMUX_CMD_DATA) {
  2603. pkt = curr;
  2604. break;
  2605. }
  2606. }
  2607. } else {
  2608. /* get next cmd/data packet to send */
  2609. pkt = list_first_entry(&ch->tx_queue,
  2610. struct smux_pkt_t, list);
  2611. }
  2612. }
  2613. if (pkt) {
  2614. list_del(&pkt->list);
  2615. /* update packet stats */
  2616. if (pkt->hdr.cmd == SMUX_CMD_DATA) {
  2617. --ch->tx_pending_data_cnt;
  2618. if (ch->notify_lwm &&
  2619. ch->tx_pending_data_cnt
  2620. <= SMUX_TX_WM_LOW) {
  2621. ch->notify_lwm = 0;
  2622. low_wm_notif = 1;
  2623. }
  2624. }
  2625. /* advance to the next ready channel */
  2626. list_rotate_left(&smux.lch_tx_ready_list);
  2627. } else {
  2628. /* no data in channel to send, remove from ready list */
  2629. list_del(&ch->tx_ready_list);
  2630. INIT_LIST_HEAD(&ch->tx_ready_list);
  2631. }
  2632. lcid = ch->lcid;
  2633. spin_unlock(&ch->tx_lock_lhb2);
  2634. spin_unlock(&ch->state_lock_lhb1);
  2635. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  2636. if (low_wm_notif)
  2637. schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
  2638. /* send the packet */
  2639. smux_tx_pkt(ch, pkt);
  2640. smux_free_pkt(pkt);
  2641. }
  2642. }
  2643. /**
  2644. * Update the RX flow control (sent in the TIOCM Status command).
  2645. *
  2646. * @ch Channel for update
  2647. *
  2648. * @returns 1 for updated, 0 for not updated
  2649. *
  2650. * Must be called with ch->state_lock_lhb1 locked.
  2651. */
  2652. static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
  2653. {
  2654. int updated = 0;
  2655. int prev_state;
  2656. prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
  2657. if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
  2658. ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
  2659. else
  2660. ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
  2661. if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
  2662. smux_send_status_cmd(ch);
  2663. updated = 1;
  2664. }
  2665. return updated;
  2666. }
  2667. /**
  2668. * Flush all SMUX workqueues.
  2669. *
  2670. * This sets the reset bit to abort any processing loops and then
  2671. * flushes the workqueues to ensure that no new pending work is
  2672. * running. Do not call with any locks used by workers held as
  2673. * this will result in a deadlock.
  2674. */
  2675. static void smux_flush_workqueues(void)
  2676. {
  2677. smux.in_reset = 1;
  2678. SMUX_DBG("smux: %s: flushing tx wq\n", __func__);
  2679. flush_workqueue(smux_tx_wq);
  2680. SMUX_DBG("smux: %s: flushing rx wq\n", __func__);
  2681. flush_workqueue(smux_rx_wq);
  2682. SMUX_DBG("smux: %s: flushing notify wq\n", __func__);
  2683. flush_workqueue(smux_notify_wq);
  2684. }
  2685. /**********************************************************************/
  2686. /* Kernel API */
  2687. /**********************************************************************/
  2688. /**
  2689. * Set or clear channel option using the SMUX_CH_OPTION_* channel
  2690. * flags.
  2691. *
  2692. * @lcid Logical channel ID
  2693. * @set Options to set
  2694. * @clear Options to clear
  2695. *
  2696. * @returns 0 for success, < 0 for failure
  2697. */
  2698. int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
  2699. {
  2700. unsigned long flags;
  2701. struct smux_lch_t *ch;
  2702. int tx_ready = 0;
  2703. int ret = 0;
  2704. if (smux_assert_lch_id(lcid))
  2705. return -ENXIO;
  2706. ch = &smux_lch[lcid];
  2707. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  2708. /* Local loopback mode */
  2709. if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
  2710. ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
  2711. if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
  2712. ch->local_mode = SMUX_LCH_MODE_NORMAL;
  2713. /* Remote loopback mode */
  2714. if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
  2715. ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
  2716. if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
  2717. ch->local_mode = SMUX_LCH_MODE_NORMAL;
  2718. /* RX Flow control */
  2719. if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
  2720. ch->rx_flow_control_client = 1;
  2721. tx_ready |= smux_rx_flow_control_updated(ch);
  2722. }
  2723. if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
  2724. ch->rx_flow_control_client = 0;
  2725. tx_ready |= smux_rx_flow_control_updated(ch);
  2726. }
  2727. /* Auto RX Flow Control */
  2728. if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
  2729. SMUX_DBG("smux: %s: auto rx flow control option enabled\n",
  2730. __func__);
  2731. ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
  2732. }
  2733. if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
  2734. SMUX_DBG("smux: %s: auto rx flow control option disabled\n",
  2735. __func__);
  2736. ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
  2737. ch->rx_flow_control_auto = 0;
  2738. tx_ready |= smux_rx_flow_control_updated(ch);
  2739. }
  2740. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  2741. if (tx_ready)
  2742. list_channel(ch);
  2743. return ret;
  2744. }
  2745. /**
  2746. * Starts the opening sequence for a logical channel.
  2747. *
  2748. * @lcid Logical channel ID
  2749. * @priv Free for client usage
  2750. * @notify Event notification function
  2751. * @get_rx_buffer Function used to provide a receive buffer to SMUX
  2752. *
  2753. * @returns 0 for success, <0 otherwise
  2754. *
  2755. * The local channel state must be closed (either not previously
  2756. * opened or msm_smux_close() has been called and the SMUX_LOCAL_CLOSED
  2757. * notification has been received).
  2758. *
  2759. * If open is called before the SMUX_LOCAL_CLOSED has been received,
  2760. * then the function will return -EAGAIN and the client will need to
  2761. * retry the open later.
  2762. *
  2763. * Once the remote side is opened, the client will receive a SMUX_CONNECTED
  2764. * event.
  2765. */
  2766. int msm_smux_open(uint8_t lcid, void *priv,
  2767. void (*notify)(void *priv, int event_type, const void *metadata),
  2768. int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
  2769. int size))
  2770. {
  2771. int ret;
  2772. struct smux_lch_t *ch;
  2773. struct smux_pkt_t *pkt;
  2774. int tx_ready = 0;
  2775. unsigned long flags;
  2776. if (smux_assert_lch_id(lcid))
  2777. return -ENXIO;
  2778. ch = &smux_lch[lcid];
  2779. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  2780. if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
  2781. ret = -EAGAIN;
  2782. goto out;
  2783. }
  2784. if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
  2785. SMUX_ERR("%s: open lcid %d local state %x invalid\n",
  2786. __func__, lcid, ch->local_state);
  2787. ret = -EINVAL;
  2788. goto out;
  2789. }
  2790. SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
  2791. ch->local_state,
  2792. SMUX_LCH_LOCAL_OPENING);
  2793. ch->rx_flow_control_auto = 0;
  2794. ch->local_state = SMUX_LCH_LOCAL_OPENING;
  2795. ch->priv = priv;
  2796. ch->notify = notify;
  2797. ch->get_rx_buffer = get_rx_buffer;
  2798. ret = 0;
  2799. /* Send Open Command */
  2800. pkt = smux_alloc_pkt();
  2801. if (!pkt) {
  2802. ret = -ENOMEM;
  2803. goto out;
  2804. }
  2805. pkt->hdr.magic = SMUX_MAGIC;
  2806. pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
  2807. pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
  2808. if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
  2809. pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
  2810. pkt->hdr.lcid = lcid;
  2811. pkt->hdr.payload_len = 0;
  2812. pkt->hdr.pad_len = 0;
  2813. smux_tx_queue(pkt, ch, 0);
  2814. tx_ready = 1;
  2815. out:
  2816. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  2817. smux_rx_flow_control_updated(ch);
  2818. if (tx_ready)
  2819. list_channel(ch);
  2820. return ret;
  2821. }
  2822. /**
  2823. * Starts the closing sequence for a logical channel.
  2824. *
  2825. * @lcid Logical channel ID
  2826. *
  2827. * @returns 0 for success, <0 otherwise
  2828. *
  2829. * Once the close event has been acknowledge by the remote side, the client
  2830. * will receive an SMUX_LOCAL_CLOSED notification. If the remote side is also
  2831. * closed, then an SMUX_DISCONNECTED notification will also be sent.
  2832. */
  2833. int msm_smux_close(uint8_t lcid)
  2834. {
  2835. int ret = 0;
  2836. struct smux_lch_t *ch;
  2837. struct smux_pkt_t *pkt;
  2838. int tx_ready = 0;
  2839. unsigned long flags;
  2840. if (smux_assert_lch_id(lcid))
  2841. return -ENXIO;
  2842. ch = &smux_lch[lcid];
  2843. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  2844. ch->local_tiocm = 0x0;
  2845. ch->remote_tiocm = 0x0;
  2846. ch->tx_pending_data_cnt = 0;
  2847. ch->notify_lwm = 0;
  2848. ch->tx_flow_control = 0;
  2849. /* Purge TX queue */
  2850. spin_lock(&ch->tx_lock_lhb2);
  2851. smux_purge_ch_tx_queue(ch, 0);
  2852. spin_unlock(&ch->tx_lock_lhb2);
  2853. /* Send Close Command */
  2854. if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
  2855. ch->local_state == SMUX_LCH_LOCAL_OPENING) {
  2856. SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
  2857. ch->local_state,
  2858. SMUX_LCH_LOCAL_CLOSING);
  2859. ch->local_state = SMUX_LCH_LOCAL_CLOSING;
  2860. pkt = smux_alloc_pkt();
  2861. if (pkt) {
  2862. pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
  2863. pkt->hdr.flags = 0;
  2864. pkt->hdr.lcid = lcid;
  2865. pkt->hdr.payload_len = 0;
  2866. pkt->hdr.pad_len = 0;
  2867. smux_tx_queue(pkt, ch, 0);
  2868. tx_ready = 1;
  2869. } else {
  2870. SMUX_ERR("%s: pkt allocation failed\n", __func__);
  2871. ret = -ENOMEM;
  2872. }
  2873. /* Purge RX retry queue */
  2874. if (ch->rx_retry_queue_cnt)
  2875. queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
  2876. }
  2877. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  2878. if (tx_ready)
  2879. list_channel(ch);
  2880. return ret;
  2881. }
  2882. /**
  2883. * Write data to a logical channel.
  2884. *
  2885. * @lcid Logical channel ID
  2886. * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
  2887. * SMUX_WRITE_FAIL notification.
  2888. * @data Data to write
  2889. * @len Length of @data
  2890. *
  2891. * @returns 0 for success, <0 otherwise
  2892. *
  2893. * Data may be written immediately after msm_smux_open() is called,
  2894. * but the data will wait in the transmit queue until the channel has
  2895. * been fully opened.
  2896. *
  2897. * Once the data has been written, the client will receive either a completion
  2898. * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
  2899. */
  2900. int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
  2901. {
  2902. struct smux_lch_t *ch;
  2903. struct smux_pkt_t *pkt;
  2904. int tx_ready = 0;
  2905. unsigned long flags;
  2906. int ret;
  2907. if (smux_assert_lch_id(lcid))
  2908. return -ENXIO;
  2909. ch = &smux_lch[lcid];
  2910. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  2911. if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
  2912. ch->local_state != SMUX_LCH_LOCAL_OPENING) {
  2913. SMUX_ERR("%s: hdr.invalid local state %d channel %d\n",
  2914. __func__, ch->local_state, lcid);
  2915. ret = -EINVAL;
  2916. goto out;
  2917. }
  2918. if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
  2919. SMUX_ERR("%s: payload %d too large\n",
  2920. __func__, len);
  2921. ret = -E2BIG;
  2922. goto out;
  2923. }
  2924. pkt = smux_alloc_pkt();
  2925. if (!pkt) {
  2926. ret = -ENOMEM;
  2927. goto out;
  2928. }
  2929. pkt->hdr.cmd = SMUX_CMD_DATA;
  2930. pkt->hdr.lcid = lcid;
  2931. pkt->hdr.flags = 0;
  2932. pkt->hdr.payload_len = len;
  2933. pkt->payload = (void *)data;
  2934. pkt->priv = pkt_priv;
  2935. pkt->hdr.pad_len = 0;
  2936. spin_lock(&ch->tx_lock_lhb2);
  2937. /* verify high watermark */
  2938. SMUX_DBG("smux: %s: pending %d", __func__, ch->tx_pending_data_cnt);
  2939. if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
  2940. SMUX_ERR("%s: ch %d high watermark %d exceeded %d\n",
  2941. __func__, lcid, SMUX_TX_WM_HIGH,
  2942. ch->tx_pending_data_cnt);
  2943. ret = -EAGAIN;
  2944. goto out_inner;
  2945. }
  2946. /* queue packet for transmit */
  2947. if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
  2948. ch->notify_lwm = 1;
  2949. SMUX_ERR("%s: high watermark hit\n", __func__);
  2950. schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
  2951. }
  2952. list_add_tail(&pkt->list, &ch->tx_queue);
  2953. /* add to ready list */
  2954. if (IS_FULLY_OPENED(ch))
  2955. tx_ready = 1;
  2956. ret = 0;
  2957. out_inner:
  2958. spin_unlock(&ch->tx_lock_lhb2);
  2959. out:
  2960. if (ret)
  2961. smux_free_pkt(pkt);
  2962. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  2963. if (tx_ready)
  2964. list_channel(ch);
  2965. return ret;
  2966. }
  2967. /**
  2968. * Returns true if the TX queue is currently full (high water mark).
  2969. *
  2970. * @lcid Logical channel ID
  2971. * @returns 0 if channel is not full
  2972. * 1 if it is full
  2973. * < 0 for error
  2974. */
  2975. int msm_smux_is_ch_full(uint8_t lcid)
  2976. {
  2977. struct smux_lch_t *ch;
  2978. unsigned long flags;
  2979. int is_full = 0;
  2980. if (smux_assert_lch_id(lcid))
  2981. return -ENXIO;
  2982. ch = &smux_lch[lcid];
  2983. spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
  2984. if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
  2985. is_full = 1;
  2986. spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
  2987. return is_full;
  2988. }
  2989. /**
  2990. * Returns true if the TX queue has space for more packets it is at or
  2991. * below the low water mark).
  2992. *
  2993. * @lcid Logical channel ID
  2994. * @returns 0 if channel is above low watermark
  2995. * 1 if it's at or below the low watermark
  2996. * < 0 for error
  2997. */
  2998. int msm_smux_is_ch_low(uint8_t lcid)
  2999. {
  3000. struct smux_lch_t *ch;
  3001. unsigned long flags;
  3002. int is_low = 0;
  3003. if (smux_assert_lch_id(lcid))
  3004. return -ENXIO;
  3005. ch = &smux_lch[lcid];
  3006. spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
  3007. if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
  3008. is_low = 1;
  3009. spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
  3010. return is_low;
  3011. }
  3012. /**
  3013. * Send TIOCM status update.
  3014. *
  3015. * @ch Channel for update
  3016. *
  3017. * @returns 0 for success, <0 for failure
  3018. *
  3019. * Channel lock must be held before calling.
  3020. */
  3021. static int smux_send_status_cmd(struct smux_lch_t *ch)
  3022. {
  3023. struct smux_pkt_t *pkt;
  3024. if (!ch)
  3025. return -EINVAL;
  3026. pkt = smux_alloc_pkt();
  3027. if (!pkt)
  3028. return -ENOMEM;
  3029. pkt->hdr.lcid = ch->lcid;
  3030. pkt->hdr.cmd = SMUX_CMD_STATUS;
  3031. pkt->hdr.flags = ch->local_tiocm;
  3032. pkt->hdr.payload_len = 0;
  3033. pkt->hdr.pad_len = 0;
  3034. smux_tx_queue(pkt, ch, 0);
  3035. return 0;
  3036. }
  3037. /**
  3038. * Internal helper function for getting the TIOCM status with
  3039. * state_lock_lhb1 already locked.
  3040. *
  3041. * @ch Channel pointer
  3042. *
  3043. * @returns TIOCM status
  3044. */
  3045. long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
  3046. {
  3047. long status = 0x0;
  3048. status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
  3049. status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
  3050. status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
  3051. status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
  3052. status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
  3053. status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
  3054. return status;
  3055. }
  3056. /**
  3057. * Get the TIOCM status bits.
  3058. *
  3059. * @lcid Logical channel ID
  3060. *
  3061. * @returns >= 0 TIOCM status bits
  3062. * < 0 Error condition
  3063. */
  3064. long msm_smux_tiocm_get(uint8_t lcid)
  3065. {
  3066. struct smux_lch_t *ch;
  3067. unsigned long flags;
  3068. long status = 0x0;
  3069. if (smux_assert_lch_id(lcid))
  3070. return -ENXIO;
  3071. ch = &smux_lch[lcid];
  3072. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  3073. status = msm_smux_tiocm_get_atomic(ch);
  3074. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  3075. return status;
  3076. }
  3077. /**
  3078. * Set/clear the TIOCM status bits.
  3079. *
  3080. * @lcid Logical channel ID
  3081. * @set Bits to set
  3082. * @clear Bits to clear
  3083. *
  3084. * @returns 0 for success; < 0 for failure
  3085. *
  3086. * If a bit is specified in both the @set and @clear masks, then the clear bit
  3087. * definition will dominate and the bit will be cleared.
  3088. */
  3089. int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
  3090. {
  3091. struct smux_lch_t *ch;
  3092. unsigned long flags;
  3093. uint8_t old_status;
  3094. uint8_t status_set = 0x0;
  3095. uint8_t status_clear = 0x0;
  3096. int tx_ready = 0;
  3097. int ret = 0;
  3098. if (smux_assert_lch_id(lcid))
  3099. return -ENXIO;
  3100. ch = &smux_lch[lcid];
  3101. spin_lock_irqsave(&ch->state_lock_lhb1, flags);
  3102. status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
  3103. status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
  3104. status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
  3105. status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
  3106. status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
  3107. status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
  3108. status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
  3109. status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
  3110. old_status = ch->local_tiocm;
  3111. ch->local_tiocm |= status_set;
  3112. ch->local_tiocm &= ~status_clear;
  3113. if (ch->local_tiocm != old_status) {
  3114. ret = smux_send_status_cmd(ch);
  3115. tx_ready = 1;
  3116. }
  3117. spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
  3118. if (tx_ready)
  3119. list_channel(ch);
  3120. return ret;
  3121. }
  3122. /**********************************************************************/
  3123. /* Subsystem Restart */
  3124. /**********************************************************************/
  3125. static struct notifier_block ssr_notifier = {
  3126. .notifier_call = ssr_notifier_cb,
  3127. };
  3128. /**
  3129. * Handle Subsystem Restart (SSR) notifications.
  3130. *
  3131. * @this Pointer to ssr_notifier
  3132. * @code SSR Code
  3133. * @data Data pointer (not used)
  3134. */
  3135. static int ssr_notifier_cb(struct notifier_block *this,
  3136. unsigned long code,
  3137. void *data)
  3138. {
  3139. unsigned long flags;
  3140. int i;
  3141. int tmp;
  3142. int power_off_uart = 0;
  3143. if (code == SUBSYS_BEFORE_SHUTDOWN) {
  3144. SMUX_DBG("smux: %s: ssr - before shutdown\n", __func__);
  3145. mutex_lock(&smux.mutex_lha0);
  3146. smux.in_reset = 1;
  3147. smux.remote_is_alive = 0;
  3148. mutex_unlock(&smux.mutex_lha0);
  3149. return NOTIFY_DONE;
  3150. } else if (code == SUBSYS_AFTER_POWERUP) {
  3151. /* re-register platform devices */
  3152. SMUX_DBG("smux: %s: ssr - after power-up\n", __func__);
  3153. mutex_lock(&smux.mutex_lha0);
  3154. if (smux.ld_open_count > 0
  3155. && !smux.platform_devs_registered) {
  3156. for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
  3157. SMUX_DBG("smux: %s: register pdev '%s'\n",
  3158. __func__, smux_devs[i].name);
  3159. smux_devs[i].dev.release = smux_pdev_release;
  3160. tmp = platform_device_register(&smux_devs[i]);
  3161. if (tmp)
  3162. SMUX_ERR(
  3163. "%s: error %d registering device %s\n",
  3164. __func__, tmp, smux_devs[i].name);
  3165. }
  3166. smux.platform_devs_registered = 1;
  3167. }
  3168. mutex_unlock(&smux.mutex_lha0);
  3169. return NOTIFY_DONE;
  3170. } else if (code != SUBSYS_AFTER_SHUTDOWN) {
  3171. return NOTIFY_DONE;
  3172. }
  3173. SMUX_DBG("smux: %s: ssr - after shutdown\n", __func__);
  3174. /* Cleanup channels */
  3175. smux_flush_workqueues();
  3176. mutex_lock(&smux.mutex_lha0);
  3177. if (smux.ld_open_count > 0) {
  3178. smux_lch_purge();
  3179. if (smux.tty)
  3180. tty_driver_flush_buffer(smux.tty);
  3181. /* Unregister platform devices */
  3182. if (smux.platform_devs_registered) {
  3183. for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
  3184. SMUX_DBG("smux: %s: unregister pdev '%s'\n",
  3185. __func__, smux_devs[i].name);
  3186. platform_device_unregister(&smux_devs[i]);
  3187. }
  3188. smux.platform_devs_registered = 0;
  3189. }
  3190. /* Power-down UART */
  3191. spin_lock_irqsave(&smux.tx_lock_lha2, flags);
  3192. if (smux.power_state != SMUX_PWR_OFF) {
  3193. SMUX_PWR("smux: %s: SSR - turning off UART\n",
  3194. __func__);
  3195. smux.power_state = SMUX_PWR_OFF;
  3196. power_off_uart = 1;
  3197. }
  3198. smux.powerdown_enabled = 0;
  3199. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  3200. if (power_off_uart)
  3201. smux_uart_power_off_atomic();
  3202. }
  3203. smux.tx_activity_flag = 0;
  3204. smux.rx_activity_flag = 0;
  3205. smux.rx_state = SMUX_RX_IDLE;
  3206. smux.in_reset = 0;
  3207. smux.remote_is_alive = 0;
  3208. mutex_unlock(&smux.mutex_lha0);
  3209. return NOTIFY_DONE;
  3210. }
  3211. /**********************************************************************/
  3212. /* Line Discipline Interface */
  3213. /**********************************************************************/
  3214. static void smux_pdev_release(struct device *dev)
  3215. {
  3216. struct platform_device *pdev;
  3217. pdev = container_of(dev, struct platform_device, dev);
  3218. SMUX_DBG("smux: %s: releasing pdev %p '%s'\n",
  3219. __func__, pdev, pdev->name);
  3220. memset(&pdev->dev, 0x0, sizeof(pdev->dev));
  3221. }
  3222. static int smuxld_open(struct tty_struct *tty)
  3223. {
  3224. int i;
  3225. int tmp;
  3226. unsigned long flags;
  3227. if (!smux.is_initialized)
  3228. return -ENODEV;
  3229. mutex_lock(&smux.mutex_lha0);
  3230. if (smux.ld_open_count) {
  3231. SMUX_ERR("%s: %p multiple instances not supported\n",
  3232. __func__, tty);
  3233. mutex_unlock(&smux.mutex_lha0);
  3234. return -EEXIST;
  3235. }
  3236. if (tty->ops->write == NULL) {
  3237. SMUX_ERR("%s: tty->ops->write already NULL\n", __func__);
  3238. mutex_unlock(&smux.mutex_lha0);
  3239. return -EINVAL;
  3240. }
  3241. /* connect to TTY */
  3242. ++smux.ld_open_count;
  3243. smux.in_reset = 0;
  3244. smux.tty = tty;
  3245. tty->disc_data = &smux;
  3246. tty->receive_room = TTY_RECEIVE_ROOM;
  3247. tty_driver_flush_buffer(tty);
  3248. /* power-down the UART if we are idle */
  3249. spin_lock_irqsave(&smux.tx_lock_lha2, flags);
  3250. if (smux.power_state == SMUX_PWR_OFF) {
  3251. SMUX_PWR("smux: %s: powering off uart\n", __func__);
  3252. smux.power_state = SMUX_PWR_OFF_FLUSH;
  3253. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  3254. queue_work(smux_tx_wq, &smux_inactivity_work);
  3255. } else {
  3256. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  3257. }
  3258. /* register platform devices */
  3259. for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
  3260. SMUX_DBG("smux: %s: register pdev '%s'\n",
  3261. __func__, smux_devs[i].name);
  3262. smux_devs[i].dev.release = smux_pdev_release;
  3263. tmp = platform_device_register(&smux_devs[i]);
  3264. if (tmp)
  3265. SMUX_ERR("%s: error %d registering device %s\n",
  3266. __func__, tmp, smux_devs[i].name);
  3267. }
  3268. smux.platform_devs_registered = 1;
  3269. mutex_unlock(&smux.mutex_lha0);
  3270. return 0;
  3271. }
  3272. static void smuxld_close(struct tty_struct *tty)
  3273. {
  3274. unsigned long flags;
  3275. int power_up_uart = 0;
  3276. int i;
  3277. SMUX_DBG("smux: %s: ldisc unload\n", __func__);
  3278. smux_flush_workqueues();
  3279. mutex_lock(&smux.mutex_lha0);
  3280. if (smux.ld_open_count <= 0) {
  3281. SMUX_ERR("%s: invalid ld count %d\n", __func__,
  3282. smux.ld_open_count);
  3283. mutex_unlock(&smux.mutex_lha0);
  3284. return;
  3285. }
  3286. --smux.ld_open_count;
  3287. /* Cleanup channels */
  3288. smux_lch_purge();
  3289. /* Unregister platform devices */
  3290. if (smux.platform_devs_registered) {
  3291. for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
  3292. SMUX_DBG("smux: %s: unregister pdev '%s'\n",
  3293. __func__, smux_devs[i].name);
  3294. platform_device_unregister(&smux_devs[i]);
  3295. }
  3296. smux.platform_devs_registered = 0;
  3297. }
  3298. /* Schedule UART power-up if it's down */
  3299. spin_lock_irqsave(&smux.tx_lock_lha2, flags);
  3300. if (smux.power_state == SMUX_PWR_OFF)
  3301. power_up_uart = 1;
  3302. smux.power_state = SMUX_PWR_OFF;
  3303. smux.powerdown_enabled = 0;
  3304. smux.tx_activity_flag = 0;
  3305. smux.rx_activity_flag = 0;
  3306. spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
  3307. if (power_up_uart)
  3308. smux_uart_power_on_atomic();
  3309. smux.rx_state = SMUX_RX_IDLE;
  3310. /* Disconnect from TTY */
  3311. smux.tty = NULL;
  3312. smux.remote_is_alive = 0;
  3313. mutex_unlock(&smux.mutex_lha0);
  3314. SMUX_DBG("smux: %s: ldisc complete\n", __func__);
  3315. }
  3316. /**
  3317. * Receive data from TTY Line Discipline.
  3318. *
  3319. * @tty TTY structure
  3320. * @cp Character data
  3321. * @fp Flag data
  3322. * @count Size of character and flag data
  3323. */
  3324. void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
  3325. char *fp, int count)
  3326. {
  3327. int i;
  3328. int last_idx = 0;
  3329. const char *tty_name = NULL;
  3330. char *f;
  3331. /* verify error flags */
  3332. for (i = 0, f = fp; i < count; ++i, ++f) {
  3333. if (*f != TTY_NORMAL) {
  3334. if (tty)
  3335. tty_name = tty->name;
  3336. SMUX_ERR("%s: TTY %s Error %d (%s)\n", __func__,
  3337. tty_name, *f, tty_flag_to_str(*f));
  3338. /* feed all previous valid data to the parser */
  3339. smux_rx_state_machine(cp + last_idx, i - last_idx,
  3340. TTY_NORMAL);
  3341. /* feed bad data to parser */
  3342. smux_rx_state_machine(cp + i, 1, *f);
  3343. last_idx = i + 1;
  3344. }
  3345. }
  3346. /* feed data to RX state machine */
  3347. smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
  3348. }
  3349. static void smuxld_flush_buffer(struct tty_struct *tty)
  3350. {
  3351. SMUX_ERR("%s: not supported\n", __func__);
  3352. }
  3353. static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
  3354. {
  3355. SMUX_ERR("%s: not supported\n", __func__);
  3356. return -ENODEV;
  3357. }
  3358. static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
  3359. unsigned char __user *buf, size_t nr)
  3360. {
  3361. SMUX_ERR("%s: not supported\n", __func__);
  3362. return -ENODEV;
  3363. }
  3364. static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
  3365. const unsigned char *buf, size_t nr)
  3366. {
  3367. SMUX_ERR("%s: not supported\n", __func__);
  3368. return -ENODEV;
  3369. }
  3370. static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
  3371. unsigned int cmd, unsigned long arg)
  3372. {
  3373. SMUX_ERR("%s: not supported\n", __func__);
  3374. return -ENODEV;
  3375. }
  3376. static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
  3377. struct poll_table_struct *tbl)
  3378. {
  3379. SMUX_ERR("%s: not supported\n", __func__);
  3380. return -ENODEV;
  3381. }
  3382. static void smuxld_write_wakeup(struct tty_struct *tty)
  3383. {
  3384. SMUX_ERR("%s: not supported\n", __func__);
  3385. }
  3386. static struct tty_ldisc_ops smux_ldisc_ops = {
  3387. .owner = THIS_MODULE,
  3388. .magic = TTY_LDISC_MAGIC,
  3389. .name = "n_smux",
  3390. .open = smuxld_open,
  3391. .close = smuxld_close,
  3392. .flush_buffer = smuxld_flush_buffer,
  3393. .chars_in_buffer = smuxld_chars_in_buffer,
  3394. .read = smuxld_read,
  3395. .write = smuxld_write,
  3396. .ioctl = smuxld_ioctl,
  3397. .poll = smuxld_poll,
  3398. .receive_buf = smuxld_receive_buf,
  3399. .write_wakeup = smuxld_write_wakeup
  3400. };
  3401. static int __init smux_init(void)
  3402. {
  3403. int ret;
  3404. mutex_init(&smux.mutex_lha0);
  3405. spin_lock_init(&smux.rx_lock_lha1);
  3406. smux.rx_state = SMUX_RX_IDLE;
  3407. smux.power_state = SMUX_PWR_OFF;
  3408. smux.pwr_wakeup_delay_us = 1;
  3409. smux.powerdown_enabled = 0;
  3410. smux.power_ctl_remote_req_received = 0;
  3411. INIT_LIST_HEAD(&smux.power_queue);
  3412. smux.rx_activity_flag = 0;
  3413. smux.tx_activity_flag = 0;
  3414. smux.recv_len = 0;
  3415. smux.tty = NULL;
  3416. smux.ld_open_count = 0;
  3417. smux.in_reset = 0;
  3418. smux.remote_is_alive = 0;
  3419. smux.is_initialized = 1;
  3420. smux.platform_devs_registered = 0;
  3421. smux_byte_loopback = 0;
  3422. spin_lock_init(&smux.tx_lock_lha2);
  3423. INIT_LIST_HEAD(&smux.lch_tx_ready_list);
  3424. ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
  3425. if (ret != 0) {
  3426. SMUX_ERR("%s: error %d registering line discipline\n",
  3427. __func__, ret);
  3428. return ret;
  3429. }
  3430. subsys_notif_register_notifier("external_modem", &ssr_notifier);
  3431. ret = lch_init();
  3432. if (ret != 0) {
  3433. SMUX_ERR("%s: lch_init failed\n", __func__);
  3434. return ret;
  3435. }
  3436. log_ctx = ipc_log_context_create(20, "smux", 0);
  3437. if (!log_ctx) {
  3438. SMUX_ERR("%s: unable to create log context\n", __func__);
  3439. disable_ipc_logging = 1;
  3440. }
  3441. return 0;
  3442. }
  3443. static void __exit smux_exit(void)
  3444. {
  3445. int ret;
  3446. ret = tty_unregister_ldisc(N_SMUX);
  3447. if (ret != 0) {
  3448. SMUX_ERR("%s error %d unregistering line discipline\n",
  3449. __func__, ret);
  3450. return;
  3451. }
  3452. }
  3453. module_init(smux_init);
  3454. module_exit(smux_exit);
  3455. MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
  3456. MODULE_LICENSE("GPL v2");
  3457. MODULE_ALIAS_LDISC(N_SMUX);