spi_qsd.c 93 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634
  1. /* Copyright (c) 2008-2015, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. /*
  14. * SPI driver for Qualcomm MSM platforms
  15. *
  16. */
  17. #include <linux/version.h>
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/init.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/list.h>
  23. #include <linux/irq.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/spi/spi.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/err.h>
  28. #include <linux/clk.h>
  29. #include <linux/delay.h>
  30. #include <linux/workqueue.h>
  31. #include <linux/io.h>
  32. #include <linux/debugfs.h>
  33. #include <linux/gpio.h>
  34. #include <linux/remote_spinlock.h>
  35. #include <linux/pm_qos.h>
  36. #include <linux/of.h>
  37. #include <linux/of_gpio.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/sched.h>
  40. #include <linux/mutex.h>
  41. #include <linux/atomic.h>
  42. #include <linux/pm_runtime.h>
  43. #include <mach/sps.h>
  44. #include <mach/dma.h>
  45. #include <mach/msm_bus.h>
  46. #include <mach/msm_bus_board.h>
  47. #include <linux/qcom-spi.h>
  48. #include "spi_qsd.h"
  49. static int msm_spi_pm_resume_runtime(struct device *device);
  50. static int msm_spi_pm_suspend_runtime(struct device *device);
  51. static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd);
  52. static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
  53. struct platform_device *pdev)
  54. {
  55. struct resource *resource;
  56. unsigned long gsbi_mem_phys_addr;
  57. size_t gsbi_mem_size;
  58. void __iomem *gsbi_base;
  59. resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  60. if (!resource)
  61. return 0;
  62. gsbi_mem_phys_addr = resource->start;
  63. gsbi_mem_size = resource_size(resource);
  64. if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
  65. gsbi_mem_size, SPI_DRV_NAME))
  66. return -ENXIO;
  67. gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
  68. gsbi_mem_size);
  69. if (!gsbi_base)
  70. return -ENXIO;
  71. /* Set GSBI to SPI mode */
  72. writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
  73. return 0;
  74. }
  75. static inline void msm_spi_register_init(struct msm_spi *dd)
  76. {
  77. writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
  78. msm_spi_set_state(dd, SPI_OP_STATE_RESET);
  79. writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
  80. writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
  81. writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
  82. if (dd->qup_ver)
  83. writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
  84. }
  85. static inline int msm_spi_request_gpios(struct msm_spi *dd)
  86. {
  87. int i;
  88. int result = 0;
  89. for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
  90. if (dd->spi_gpios[i] >= 0) {
  91. result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
  92. if (result) {
  93. dev_err(dd->dev, "%s: gpio_request for pin %d "
  94. "failed with error %d\n", __func__,
  95. dd->spi_gpios[i], result);
  96. goto error;
  97. }
  98. }
  99. }
  100. return 0;
  101. error:
  102. for (; --i >= 0;) {
  103. if (dd->spi_gpios[i] >= 0)
  104. gpio_free(dd->spi_gpios[i]);
  105. }
  106. return result;
  107. }
  108. static inline void msm_spi_free_gpios(struct msm_spi *dd)
  109. {
  110. int i;
  111. for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
  112. if (dd->spi_gpios[i] >= 0)
  113. gpio_free(dd->spi_gpios[i]);
  114. }
  115. for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
  116. if (dd->cs_gpios[i].valid) {
  117. gpio_free(dd->cs_gpios[i].gpio_num);
  118. dd->cs_gpios[i].valid = 0;
  119. }
  120. }
  121. }
  122. static inline int msm_spi_request_cs_gpio(struct msm_spi *dd)
  123. {
  124. int cs_num;
  125. int rc;
  126. cs_num = dd->cur_msg->spi->chip_select;
  127. if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
  128. (!(dd->cs_gpios[cs_num].valid)) &&
  129. (dd->cs_gpios[cs_num].gpio_num >= 0)) {
  130. rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
  131. spi_cs_rsrcs[cs_num]);
  132. if (rc) {
  133. dev_err(dd->dev,
  134. "gpio_request for pin %d failed,error %d\n",
  135. dd->cs_gpios[cs_num].gpio_num, rc);
  136. return rc;
  137. }
  138. dd->cs_gpios[cs_num].valid = 1;
  139. }
  140. return 0;
  141. }
  142. static inline void msm_spi_free_cs_gpio(struct msm_spi *dd)
  143. {
  144. int cs_num;
  145. cs_num = dd->cur_msg->spi->chip_select;
  146. if (dd->cs_gpios[cs_num].valid) {
  147. gpio_free(dd->cs_gpios[cs_num].gpio_num);
  148. dd->cs_gpios[cs_num].valid = 0;
  149. }
  150. }
  151. /**
  152. * msm_spi_clk_max_rate: finds the nearest lower rate for a clk
  153. * @clk the clock for which to find nearest lower rate
  154. * @rate clock frequency in Hz
  155. * @return nearest lower rate or negative error value
  156. *
  157. * Public clock API extends clk_round_rate which is a ceiling function. This
  158. * function is a floor function implemented as a binary search using the
  159. * ceiling function.
  160. */
  161. static long msm_spi_clk_max_rate(struct clk *clk, unsigned long rate)
  162. {
  163. long lowest_available, nearest_low, step_size, cur;
  164. long step_direction = -1;
  165. long guess = rate;
  166. int max_steps = 10;
  167. cur = clk_round_rate(clk, rate);
  168. if (cur == rate)
  169. return rate;
  170. /* if we got here then: cur > rate */
  171. lowest_available = clk_round_rate(clk, 0);
  172. if (lowest_available > rate)
  173. return -EINVAL;
  174. step_size = (rate - lowest_available) >> 1;
  175. nearest_low = lowest_available;
  176. while (max_steps-- && step_size) {
  177. guess += step_size * step_direction;
  178. cur = clk_round_rate(clk, guess);
  179. if ((cur < rate) && (cur > nearest_low))
  180. nearest_low = cur;
  181. /*
  182. * if we stepped too far, then start stepping in the other
  183. * direction with half the step size
  184. */
  185. if (((cur > rate) && (step_direction > 0))
  186. || ((cur < rate) && (step_direction < 0))) {
  187. step_direction = -step_direction;
  188. step_size >>= 1;
  189. }
  190. }
  191. return nearest_low;
  192. }
  193. static void msm_spi_clock_set(struct msm_spi *dd, int speed)
  194. {
  195. long rate;
  196. int rc;
  197. rate = msm_spi_clk_max_rate(dd->clk, speed);
  198. if (rate < 0) {
  199. dev_err(dd->dev,
  200. "%s: no match found for requested clock frequency:%d",
  201. __func__, speed);
  202. return;
  203. }
  204. rc = clk_set_rate(dd->clk, rate);
  205. if (!rc)
  206. dd->clock_speed = rate;
  207. }
  208. static void msm_spi_clk_path_vote(struct msm_spi *dd)
  209. {
  210. if (dd->clk_path_vote.client_hdl)
  211. msm_bus_scale_client_update_request(
  212. dd->clk_path_vote.client_hdl,
  213. MSM_SPI_CLK_PATH_RESUME_VEC);
  214. }
  215. static void msm_spi_clk_path_unvote(struct msm_spi *dd)
  216. {
  217. if (dd->clk_path_vote.client_hdl)
  218. msm_bus_scale_client_update_request(
  219. dd->clk_path_vote.client_hdl,
  220. MSM_SPI_CLK_PATH_SUSPEND_VEC);
  221. }
  222. static void msm_spi_clk_path_teardown(struct msm_spi *dd)
  223. {
  224. if (dd->pdata->active_only)
  225. msm_spi_clk_path_unvote(dd);
  226. if (dd->clk_path_vote.client_hdl) {
  227. msm_bus_scale_unregister_client(dd->clk_path_vote.client_hdl);
  228. dd->clk_path_vote.client_hdl = 0;
  229. }
  230. }
  231. /**
  232. * msm_spi_clk_path_init_structs: internal impl detail of msm_spi_clk_path_init
  233. *
  234. * allocates and initilizes the bus scaling vectors.
  235. */
  236. static int msm_spi_clk_path_init_structs(struct msm_spi *dd)
  237. {
  238. struct msm_bus_vectors *paths = NULL;
  239. struct msm_bus_paths *usecases = NULL;
  240. dev_dbg(dd->dev, "initialises path clock voting structs");
  241. paths = devm_kzalloc(dd->dev, sizeof(*paths) * 2, GFP_KERNEL);
  242. if (!paths) {
  243. dev_err(dd->dev,
  244. "msm_bus_paths.paths memory allocation failed");
  245. return -ENOMEM;
  246. }
  247. usecases = devm_kzalloc(dd->dev, sizeof(*usecases) * 2, GFP_KERNEL);
  248. if (!usecases) {
  249. dev_err(dd->dev,
  250. "msm_bus_scale_pdata.usecases memory allocation failed");
  251. goto path_init_err;
  252. }
  253. dd->clk_path_vote.pdata = devm_kzalloc(dd->dev,
  254. sizeof(*dd->clk_path_vote.pdata),
  255. GFP_KERNEL);
  256. if (!dd->clk_path_vote.pdata) {
  257. dev_err(dd->dev,
  258. "msm_bus_scale_pdata memory allocation failed");
  259. goto path_init_err;
  260. }
  261. paths[MSM_SPI_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_vectors) {
  262. .src = dd->pdata->master_id,
  263. .dst = MSM_BUS_SLAVE_EBI_CH0,
  264. .ab = 0,
  265. .ib = 0,
  266. };
  267. paths[MSM_SPI_CLK_PATH_RESUME_VEC] = (struct msm_bus_vectors) {
  268. .src = dd->pdata->master_id,
  269. .dst = MSM_BUS_SLAVE_EBI_CH0,
  270. .ab = MSM_SPI_CLK_PATH_AVRG_BW(dd),
  271. .ib = MSM_SPI_CLK_PATH_BRST_BW(dd),
  272. };
  273. usecases[MSM_SPI_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_paths) {
  274. .num_paths = 1,
  275. .vectors = &paths[MSM_SPI_CLK_PATH_SUSPEND_VEC],
  276. };
  277. usecases[MSM_SPI_CLK_PATH_RESUME_VEC] = (struct msm_bus_paths) {
  278. .num_paths = 1,
  279. .vectors = &paths[MSM_SPI_CLK_PATH_RESUME_VEC],
  280. };
  281. *dd->clk_path_vote.pdata = (struct msm_bus_scale_pdata) {
  282. .active_only = dd->pdata->active_only,
  283. .name = dev_name(dd->dev),
  284. .num_usecases = 2,
  285. .usecase = usecases,
  286. };
  287. return 0;
  288. path_init_err:
  289. devm_kfree(dd->dev, paths);
  290. devm_kfree(dd->dev, usecases);
  291. devm_kfree(dd->dev, dd->clk_path_vote.pdata);
  292. dd->clk_path_vote.pdata = NULL;
  293. return -ENOMEM;
  294. }
  295. /**
  296. * msm_spi_clk_path_postponed_register: reg with bus-scaling after it is probed
  297. *
  298. * @return zero on success
  299. *
  300. * Workaround: SPI driver may be probed before the bus scaling driver. Calling
  301. * msm_bus_scale_register_client() will fail if the bus scaling driver is not
  302. * ready yet. Thus, this function should be called not from probe but from a
  303. * later context. Also, this function may be called more then once before
  304. * register succeed. At this case only one error message will be logged. At boot
  305. * time all clocks are on, so earlier SPI transactions should succeed.
  306. */
  307. static int msm_spi_clk_path_postponed_register(struct msm_spi *dd)
  308. {
  309. dd->clk_path_vote.client_hdl = msm_bus_scale_register_client(
  310. dd->clk_path_vote.pdata);
  311. if (dd->clk_path_vote.client_hdl) {
  312. if (dd->clk_path_vote.reg_err) {
  313. /* log a success message if an error msg was logged */
  314. dd->clk_path_vote.reg_err = false;
  315. dev_info(dd->dev,
  316. "msm_bus_scale_register_client(mstr-id:%d "
  317. "actv-only:%d):0x%x",
  318. dd->pdata->master_id, dd->pdata->active_only,
  319. dd->clk_path_vote.client_hdl);
  320. }
  321. if (dd->pdata->active_only)
  322. msm_spi_clk_path_vote(dd);
  323. } else {
  324. /* guard to log only one error on multiple failure */
  325. if (!dd->clk_path_vote.reg_err) {
  326. dd->clk_path_vote.reg_err = true;
  327. dev_info(dd->dev,
  328. "msm_bus_scale_register_client(mstr-id:%d "
  329. "actv-only:%d):0",
  330. dd->pdata->master_id, dd->pdata->active_only);
  331. }
  332. }
  333. return dd->clk_path_vote.client_hdl ? 0 : -EAGAIN;
  334. }
  335. static void msm_spi_clk_path_init(struct msm_spi *dd)
  336. {
  337. /*
  338. * bail out if path voting is diabled (master_id == 0) or if it is
  339. * already registered (client_hdl != 0)
  340. */
  341. if (!dd->pdata->master_id || dd->clk_path_vote.client_hdl)
  342. return;
  343. /* if fail once then try no more */
  344. if (!dd->clk_path_vote.pdata && msm_spi_clk_path_init_structs(dd)) {
  345. dd->pdata->master_id = 0;
  346. return;
  347. };
  348. /* on failure try again later */
  349. if (msm_spi_clk_path_postponed_register(dd))
  350. return;
  351. if (dd->pdata->active_only)
  352. msm_spi_clk_path_vote(dd);
  353. }
  354. static int msm_spi_calculate_size(int *fifo_size,
  355. int *block_size,
  356. int block,
  357. int mult)
  358. {
  359. int words;
  360. switch (block) {
  361. case 0:
  362. words = 1; /* 4 bytes */
  363. break;
  364. case 1:
  365. words = 4; /* 16 bytes */
  366. break;
  367. case 2:
  368. words = 8; /* 32 bytes */
  369. break;
  370. default:
  371. return -EINVAL;
  372. }
  373. switch (mult) {
  374. case 0:
  375. *fifo_size = words * 2;
  376. break;
  377. case 1:
  378. *fifo_size = words * 4;
  379. break;
  380. case 2:
  381. *fifo_size = words * 8;
  382. break;
  383. case 3:
  384. *fifo_size = words * 16;
  385. break;
  386. default:
  387. return -EINVAL;
  388. }
  389. *block_size = words * sizeof(u32); /* in bytes */
  390. return 0;
  391. }
  392. static void get_next_transfer(struct msm_spi *dd)
  393. {
  394. struct spi_transfer *t = dd->cur_transfer;
  395. if (t->transfer_list.next != &dd->cur_msg->transfers) {
  396. dd->cur_transfer = list_entry(t->transfer_list.next,
  397. struct spi_transfer,
  398. transfer_list);
  399. dd->write_buf = dd->cur_transfer->tx_buf;
  400. dd->read_buf = dd->cur_transfer->rx_buf;
  401. }
  402. }
  403. static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
  404. {
  405. u32 spi_iom;
  406. int block;
  407. int mult;
  408. spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
  409. block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
  410. mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
  411. if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
  412. block, mult)) {
  413. goto fifo_size_err;
  414. }
  415. block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
  416. mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
  417. if (msm_spi_calculate_size(&dd->output_fifo_size,
  418. &dd->output_block_size, block, mult)) {
  419. goto fifo_size_err;
  420. }
  421. if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
  422. /* DM mode is not available for this block size */
  423. if (dd->input_block_size == 4 || dd->output_block_size == 4)
  424. dd->use_dma = 0;
  425. if (dd->use_dma) {
  426. dd->input_burst_size = max(dd->input_block_size,
  427. DM_BURST_SIZE);
  428. dd->output_burst_size = max(dd->output_block_size,
  429. DM_BURST_SIZE);
  430. }
  431. }
  432. return;
  433. fifo_size_err:
  434. dd->use_dma = 0;
  435. pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
  436. return;
  437. }
  438. static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
  439. {
  440. u32 data_in;
  441. int i;
  442. int shift;
  443. data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
  444. if (dd->read_buf) {
  445. for (i = 0; (i < dd->bytes_per_word) &&
  446. dd->rx_bytes_remaining; i++) {
  447. /* The data format depends on bytes_per_word:
  448. 4 bytes: 0x12345678
  449. 3 bytes: 0x00123456
  450. 2 bytes: 0x00001234
  451. 1 byte : 0x00000012
  452. */
  453. shift = 8 * (dd->bytes_per_word - i - 1);
  454. *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
  455. dd->rx_bytes_remaining--;
  456. }
  457. } else {
  458. if (dd->rx_bytes_remaining >= dd->bytes_per_word)
  459. dd->rx_bytes_remaining -= dd->bytes_per_word;
  460. else
  461. dd->rx_bytes_remaining = 0;
  462. }
  463. dd->read_xfr_cnt++;
  464. if (dd->multi_xfr) {
  465. if (!dd->rx_bytes_remaining)
  466. dd->read_xfr_cnt = 0;
  467. else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
  468. dd->read_len) {
  469. struct spi_transfer *t = dd->cur_rx_transfer;
  470. if (t->transfer_list.next != &dd->cur_msg->transfers) {
  471. t = list_entry(t->transfer_list.next,
  472. struct spi_transfer,
  473. transfer_list);
  474. dd->read_buf = t->rx_buf;
  475. dd->read_len = t->len;
  476. dd->read_xfr_cnt = 0;
  477. dd->cur_rx_transfer = t;
  478. }
  479. }
  480. }
  481. }
  482. static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
  483. {
  484. u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
  485. return spi_op & SPI_OP_STATE_VALID;
  486. }
  487. static inline void msm_spi_udelay(unsigned long delay_usecs)
  488. {
  489. /*
  490. * For smaller values of delay, context switch time
  491. * would negate the usage of usleep
  492. */
  493. if (delay_usecs > 20)
  494. usleep_range(delay_usecs, delay_usecs);
  495. else if (delay_usecs)
  496. udelay(delay_usecs);
  497. }
  498. static inline int msm_spi_wait_valid(struct msm_spi *dd)
  499. {
  500. unsigned long delay = 0;
  501. unsigned long timeout = 0;
  502. if (dd->clock_speed == 0)
  503. return -EINVAL;
  504. /*
  505. * Based on the SPI clock speed, sufficient time
  506. * should be given for the SPI state transition
  507. * to occur
  508. */
  509. delay = (10 * USEC_PER_SEC) / dd->clock_speed;
  510. /*
  511. * For small delay values, the default timeout would
  512. * be one jiffy
  513. */
  514. if (delay < SPI_DELAY_THRESHOLD)
  515. delay = SPI_DELAY_THRESHOLD;
  516. /* Adding one to round off to the nearest jiffy */
  517. timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
  518. while (!msm_spi_is_valid_state(dd)) {
  519. if (time_after(jiffies, timeout)) {
  520. if (!msm_spi_is_valid_state(dd)) {
  521. if (dd->cur_msg)
  522. dd->cur_msg->status = -EIO;
  523. dev_err(dd->dev, "%s: SPI operational state"
  524. "not valid\n", __func__);
  525. return -ETIMEDOUT;
  526. } else
  527. return 0;
  528. }
  529. msm_spi_udelay(delay);
  530. }
  531. return 0;
  532. }
  533. static inline int msm_spi_set_state(struct msm_spi *dd,
  534. enum msm_spi_state state)
  535. {
  536. enum msm_spi_state cur_state;
  537. if (msm_spi_wait_valid(dd))
  538. return -EIO;
  539. cur_state = readl_relaxed(dd->base + SPI_STATE);
  540. /* Per spec:
  541. For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
  542. if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
  543. (state == SPI_OP_STATE_RESET)) {
  544. writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
  545. writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
  546. } else {
  547. writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
  548. dd->base + SPI_STATE);
  549. }
  550. if (msm_spi_wait_valid(dd))
  551. return -EIO;
  552. return 0;
  553. }
  554. /**
  555. * msm_spi_set_bpw_and_no_io_flags: configure N, and no-input/no-output flags
  556. */
  557. static inline void
  558. msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n)
  559. {
  560. *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
  561. if (n != (*config & SPI_CFG_N))
  562. *config = (*config & ~SPI_CFG_N) | n;
  563. if (((dd->mode == SPI_DMOV_MODE) && (!dd->read_len))
  564. || (dd->mode == SPI_BAM_MODE)) {
  565. if (dd->read_buf == NULL)
  566. *config |= SPI_NO_INPUT;
  567. if (dd->write_buf == NULL)
  568. *config |= SPI_NO_OUTPUT;
  569. }
  570. }
  571. /**
  572. * msm_spi_calc_spi_config_loopback_and_input_first: Calculate the values that
  573. * should be updated into SPI_CONFIG's LOOPBACK and INPUT_FIRST flags
  574. * @return calculatd value for SPI_CONFIG
  575. */
  576. static u32
  577. msm_spi_calc_spi_config_loopback_and_input_first(u32 spi_config, u8 mode)
  578. {
  579. if (mode & SPI_LOOP)
  580. spi_config |= SPI_CFG_LOOPBACK;
  581. else
  582. spi_config &= ~SPI_CFG_LOOPBACK;
  583. if (mode & SPI_CPHA)
  584. spi_config &= ~SPI_CFG_INPUT_FIRST;
  585. else
  586. spi_config |= SPI_CFG_INPUT_FIRST;
  587. return spi_config;
  588. }
  589. /**
  590. * msm_spi_set_spi_config: prepares register SPI_CONFIG to process the
  591. * next transfer
  592. */
  593. static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw)
  594. {
  595. u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
  596. spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
  597. spi_config, dd->cur_msg->spi->mode);
  598. if (dd->qup_ver == SPI_QUP_VERSION_NONE)
  599. /* flags removed from SPI_CONFIG in QUP version-2 */
  600. msm_spi_set_bpw_and_no_io_flags(dd, &spi_config, bpw-1);
  601. /*
  602. * HS_MODE improves signal stability for spi-clk high rates
  603. * but is invalid in LOOPBACK mode.
  604. */
  605. if ((dd->clock_speed >= SPI_HS_MIN_RATE) &&
  606. !(dd->cur_msg->spi->mode & SPI_LOOP))
  607. spi_config |= SPI_CFG_HS_MODE;
  608. else
  609. spi_config &= ~SPI_CFG_HS_MODE;
  610. writel_relaxed(spi_config, dd->base + SPI_CONFIG);
  611. }
  612. /**
  613. * msm_spi_set_mx_counts: set SPI_MX_INPUT_COUNT and SPI_MX_INPUT_COUNT
  614. * for FIFO-mode. set SPI_MX_INPUT_COUNT and SPI_MX_OUTPUT_COUNT for
  615. * BAM and DMOV modes.
  616. * @n_words The number of reads/writes of size N.
  617. */
  618. static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words)
  619. {
  620. /*
  621. * n_words cannot exceed fifo_size, and only one READ COUNT
  622. * interrupt is generated per transaction, so for transactions
  623. * larger than fifo size READ COUNT must be disabled.
  624. * For those transactions we usually move to Data Mover mode.
  625. */
  626. if (dd->mode == SPI_FIFO_MODE) {
  627. if (n_words <= dd->input_fifo_size) {
  628. writel_relaxed(n_words,
  629. dd->base + SPI_MX_READ_COUNT);
  630. msm_spi_set_write_count(dd, n_words);
  631. } else {
  632. writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
  633. msm_spi_set_write_count(dd, 0);
  634. }
  635. if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
  636. /* must be zero for FIFO */
  637. writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
  638. writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
  639. }
  640. } else {
  641. /* must be zero for BAM and DMOV */
  642. writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
  643. msm_spi_set_write_count(dd, 0);
  644. /*
  645. * for DMA transfers, both QUP_MX_INPUT_COUNT and
  646. * QUP_MX_OUTPUT_COUNT must be zero to all cases but one.
  647. * That case is a non-balanced transfer when there is
  648. * only a read_buf.
  649. */
  650. if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
  651. if (dd->write_buf)
  652. writel_relaxed(0,
  653. dd->base + SPI_MX_INPUT_COUNT);
  654. else
  655. writel_relaxed(n_words,
  656. dd->base + SPI_MX_INPUT_COUNT);
  657. writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
  658. }
  659. }
  660. }
  661. static int msm_spi_bam_pipe_disconnect(struct msm_spi *dd,
  662. struct msm_spi_bam_pipe *pipe)
  663. {
  664. int ret = sps_disconnect(pipe->handle);
  665. if (ret) {
  666. dev_dbg(dd->dev, "%s disconnect bam %s pipe failed\n",
  667. __func__, pipe->name);
  668. return ret;
  669. }
  670. return 0;
  671. }
  672. static int msm_spi_bam_pipe_connect(struct msm_spi *dd,
  673. struct msm_spi_bam_pipe *pipe, struct sps_connect *config)
  674. {
  675. int ret;
  676. struct sps_register_event event = {
  677. .mode = SPS_TRIGGER_WAIT,
  678. .options = SPS_O_EOT,
  679. .xfer_done = &dd->transfer_complete,
  680. };
  681. ret = sps_connect(pipe->handle, config);
  682. if (ret) {
  683. dev_err(dd->dev, "%s: sps_connect(%s:0x%p):%d",
  684. __func__, pipe->name, pipe->handle, ret);
  685. return ret;
  686. }
  687. ret = sps_register_event(pipe->handle, &event);
  688. if (ret) {
  689. dev_err(dd->dev, "%s sps_register_event(hndl:0x%p %s):%d",
  690. __func__, pipe->handle, pipe->name, ret);
  691. msm_spi_bam_pipe_disconnect(dd, pipe);
  692. return ret;
  693. }
  694. pipe->teardown_required = true;
  695. return 0;
  696. }
  697. static void msm_spi_bam_pipe_flush(struct msm_spi *dd,
  698. enum msm_spi_pipe_direction pipe_dir)
  699. {
  700. struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
  701. (&dd->bam.prod) : (&dd->bam.cons);
  702. struct sps_connect config = pipe->config;
  703. int ret;
  704. ret = msm_spi_bam_pipe_disconnect(dd, pipe);
  705. if (ret)
  706. return;
  707. ret = msm_spi_bam_pipe_connect(dd, pipe, &config);
  708. if (ret)
  709. return;
  710. }
  711. static void msm_spi_bam_flush(struct msm_spi *dd)
  712. {
  713. dev_dbg(dd->dev, "%s flushing bam for recovery\n" , __func__);
  714. msm_spi_bam_pipe_flush(dd, SPI_BAM_CONSUMER_PIPE);
  715. msm_spi_bam_pipe_flush(dd, SPI_BAM_PRODUCER_PIPE);
  716. }
  717. static int
  718. msm_spi_bam_process_rx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt)
  719. {
  720. int ret = 0;
  721. u32 data_xfr_size = 0, rem_bc = 0;
  722. u32 prod_flags = 0;
  723. rem_bc = dd->cur_rx_transfer->len - dd->bam.curr_rx_bytes_recvd;
  724. data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send;
  725. /*
  726. * set flags for last descriptor only
  727. */
  728. if ((desc_cnt == 1)
  729. || (*bytes_to_send == data_xfr_size))
  730. prod_flags = (dd->write_buf)
  731. ? 0 : (SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
  732. /*
  733. * enqueue read buffer in BAM
  734. */
  735. ret = sps_transfer_one(dd->bam.prod.handle,
  736. dd->cur_rx_transfer->rx_dma
  737. + dd->bam.curr_rx_bytes_recvd,
  738. data_xfr_size, dd, prod_flags);
  739. if (ret < 0) {
  740. dev_err(dd->dev,
  741. "%s: Failed to queue producer BAM transfer",
  742. __func__);
  743. return ret;
  744. }
  745. dd->bam.curr_rx_bytes_recvd += data_xfr_size;
  746. *bytes_to_send -= data_xfr_size;
  747. dd->bam.bam_rx_len -= data_xfr_size;
  748. return data_xfr_size;
  749. }
  750. static int
  751. msm_spi_bam_process_tx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt)
  752. {
  753. int ret = 0;
  754. u32 data_xfr_size = 0, rem_bc = 0;
  755. u32 cons_flags = 0;
  756. rem_bc = dd->cur_tx_transfer->len - dd->bam.curr_tx_bytes_sent;
  757. data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send;
  758. /*
  759. * set flags for last descriptor only
  760. */
  761. if ((desc_cnt == 1)
  762. || (*bytes_to_send == data_xfr_size))
  763. cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
  764. /*
  765. * enqueue write buffer in BAM
  766. */
  767. ret = sps_transfer_one(dd->bam.cons.handle,
  768. dd->cur_tx_transfer->tx_dma
  769. + dd->bam.curr_tx_bytes_sent,
  770. data_xfr_size, dd, cons_flags);
  771. if (ret < 0) {
  772. dev_err(dd->dev,
  773. "%s: Failed to queue consumer BAM transfer",
  774. __func__);
  775. return ret;
  776. }
  777. dd->bam.curr_tx_bytes_sent += data_xfr_size;
  778. *bytes_to_send -= data_xfr_size;
  779. dd->bam.bam_tx_len -= data_xfr_size;
  780. return data_xfr_size;
  781. }
  782. /**
  783. * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes
  784. * using BAM.
  785. * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single
  786. * transfer. Between transfer QUP must change to reset state. A loop is
  787. * issuing a single BAM transfer at a time.
  788. * @return zero on success
  789. */
  790. static int
  791. msm_spi_bam_begin_transfer(struct msm_spi *dd)
  792. {
  793. u32 tx_bytes_to_send = 0, rx_bytes_to_recv = 0;
  794. u32 n_words_xfr;
  795. s32 ret = 0;
  796. u32 prod_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1;
  797. u32 cons_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1;
  798. u32 byte_count = 0;
  799. rx_bytes_to_recv = min_t(u32, dd->bam.bam_rx_len,
  800. SPI_MAX_TRFR_BTWN_RESETS);
  801. tx_bytes_to_send = min_t(u32, dd->bam.bam_tx_len,
  802. SPI_MAX_TRFR_BTWN_RESETS);
  803. n_words_xfr = DIV_ROUND_UP(rx_bytes_to_recv,
  804. dd->bytes_per_word);
  805. msm_spi_set_mx_counts(dd, n_words_xfr);
  806. ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
  807. if (ret < 0) {
  808. dev_err(dd->dev,
  809. "%s: Failed to set QUP state to run",
  810. __func__);
  811. goto xfr_err;
  812. }
  813. while ((rx_bytes_to_recv + tx_bytes_to_send) &&
  814. ((cons_desc_cnt + prod_desc_cnt) > 0)) {
  815. struct spi_transfer *t = NULL, *next;
  816. if (dd->read_buf && (prod_desc_cnt > 0)) {
  817. ret = msm_spi_bam_process_rx(dd, &rx_bytes_to_recv,
  818. prod_desc_cnt);
  819. if (ret < 0)
  820. goto xfr_err;
  821. if (!(dd->cur_rx_transfer->len
  822. - dd->bam.curr_rx_bytes_recvd))
  823. t = dd->cur_rx_transfer;
  824. prod_desc_cnt--;
  825. }
  826. if (dd->write_buf && (cons_desc_cnt > 0)) {
  827. ret = msm_spi_bam_process_tx(dd, &tx_bytes_to_send,
  828. cons_desc_cnt);
  829. if (ret < 0)
  830. goto xfr_err;
  831. if (!(dd->cur_tx_transfer->len
  832. - dd->bam.curr_tx_bytes_sent))
  833. t = dd->cur_tx_transfer;
  834. cons_desc_cnt--;
  835. }
  836. if (t && (t->transfer_list.next != &dd->cur_msg->transfers)) {
  837. next = list_entry(t->transfer_list.next,
  838. struct spi_transfer,
  839. transfer_list);
  840. dd->read_buf = next->rx_buf;
  841. dd->write_buf = next->tx_buf;
  842. dd->cur_rx_transfer = next;
  843. dd->cur_tx_transfer = next;
  844. dd->bam.curr_rx_bytes_recvd = 0;
  845. dd->bam.curr_tx_bytes_sent = 0;
  846. }
  847. byte_count += ret;
  848. }
  849. dd->tx_bytes_remaining -= min_t(u32, byte_count,
  850. SPI_MAX_TRFR_BTWN_RESETS);
  851. return 0;
  852. xfr_err:
  853. return ret;
  854. }
  855. static int
  856. msm_spi_bam_next_transfer(struct msm_spi *dd)
  857. {
  858. if (dd->mode != SPI_BAM_MODE)
  859. return 0;
  860. if (dd->tx_bytes_remaining > 0) {
  861. init_completion(&dd->transfer_complete);
  862. if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
  863. return 0;
  864. if ((msm_spi_bam_begin_transfer(dd)) < 0) {
  865. dev_err(dd->dev, "%s: BAM transfer setup failed\n",
  866. __func__);
  867. return 0;
  868. }
  869. return 1;
  870. }
  871. return 0;
  872. }
  873. static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
  874. {
  875. dmov_box *box;
  876. int bytes_to_send, bytes_sent;
  877. int tx_num_rows, rx_num_rows;
  878. u32 num_transfers;
  879. atomic_set(&dd->rx_irq_called, 0);
  880. atomic_set(&dd->tx_irq_called, 0);
  881. if (dd->write_len && !dd->read_len) {
  882. /* WR-WR transfer */
  883. bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
  884. dd->write_buf = dd->temp_buf;
  885. } else {
  886. bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
  887. /* For WR-RD transfer, bytes_sent can be negative */
  888. if (bytes_sent < 0)
  889. bytes_sent = 0;
  890. }
  891. /* We'll send in chunks of SPI_MAX_LEN if larger than
  892. * 4K bytes for targets that have only 12 bits in
  893. * QUP_MAX_OUTPUT_CNT register. If the target supports
  894. * more than 12bits then we send the data in chunks of
  895. * the infinite_mode value that is defined in the
  896. * corresponding board file.
  897. */
  898. if (!dd->pdata->infinite_mode)
  899. dd->max_trfr_len = SPI_MAX_LEN;
  900. else
  901. dd->max_trfr_len = (dd->pdata->infinite_mode) *
  902. (dd->bytes_per_word);
  903. bytes_to_send = min_t(u32, dd->tx_bytes_remaining,
  904. dd->max_trfr_len);
  905. num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
  906. dd->tx_unaligned_len = bytes_to_send % dd->output_burst_size;
  907. dd->rx_unaligned_len = bytes_to_send % dd->input_burst_size;
  908. tx_num_rows = bytes_to_send / dd->output_burst_size;
  909. rx_num_rows = bytes_to_send / dd->input_burst_size;
  910. dd->mode = SPI_DMOV_MODE;
  911. if (tx_num_rows) {
  912. /* src in 16 MSB, dst in 16 LSB */
  913. box = &dd->tx_dmov_cmd->box;
  914. box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
  915. box->src_dst_len
  916. = (dd->output_burst_size << 16) | dd->output_burst_size;
  917. box->num_rows = (tx_num_rows << 16) | tx_num_rows;
  918. box->row_offset = (dd->output_burst_size << 16) | 0;
  919. dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
  920. DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
  921. offsetof(struct spi_dmov_cmd, box));
  922. } else {
  923. dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
  924. DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
  925. offsetof(struct spi_dmov_cmd, single_pad));
  926. }
  927. if (rx_num_rows) {
  928. /* src in 16 MSB, dst in 16 LSB */
  929. box = &dd->rx_dmov_cmd->box;
  930. box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
  931. box->src_dst_len
  932. = (dd->input_burst_size << 16) | dd->input_burst_size;
  933. box->num_rows = (rx_num_rows << 16) | rx_num_rows;
  934. box->row_offset = (0 << 16) | dd->input_burst_size;
  935. dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
  936. DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
  937. offsetof(struct spi_dmov_cmd, box));
  938. } else {
  939. dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
  940. DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
  941. offsetof(struct spi_dmov_cmd, single_pad));
  942. }
  943. if (!dd->tx_unaligned_len) {
  944. dd->tx_dmov_cmd->box.cmd |= CMD_LC;
  945. } else {
  946. dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
  947. u32 tx_offset = dd->cur_transfer->len - dd->tx_unaligned_len;
  948. if ((dd->multi_xfr) && (dd->read_len <= 0))
  949. tx_offset = dd->cur_msg_len - dd->tx_unaligned_len;
  950. dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
  951. memset(dd->tx_padding, 0, dd->output_burst_size);
  952. if (dd->write_buf)
  953. memcpy(dd->tx_padding, dd->write_buf + tx_offset,
  954. dd->tx_unaligned_len);
  955. tx_cmd->src = dd->tx_padding_dma;
  956. tx_cmd->len = dd->output_burst_size;
  957. }
  958. if (!dd->rx_unaligned_len) {
  959. dd->rx_dmov_cmd->box.cmd |= CMD_LC;
  960. } else {
  961. dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
  962. dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
  963. memset(dd->rx_padding, 0, dd->input_burst_size);
  964. rx_cmd->dst = dd->rx_padding_dma;
  965. rx_cmd->len = dd->input_burst_size;
  966. }
  967. /* This also takes care of the padding dummy buf
  968. Since this is set to the correct length, the
  969. dummy bytes won't be actually sent */
  970. if (dd->multi_xfr) {
  971. u32 write_transfers = 0;
  972. u32 read_transfers = 0;
  973. if (dd->write_len > 0) {
  974. write_transfers = DIV_ROUND_UP(dd->write_len,
  975. dd->bytes_per_word);
  976. writel_relaxed(write_transfers,
  977. dd->base + SPI_MX_OUTPUT_COUNT);
  978. }
  979. if (dd->read_len > 0) {
  980. /*
  981. * The read following a write transfer must take
  982. * into account, that the bytes pertaining to
  983. * the write transfer needs to be discarded,
  984. * before the actual read begins.
  985. */
  986. read_transfers = DIV_ROUND_UP(dd->read_len +
  987. dd->write_len,
  988. dd->bytes_per_word);
  989. writel_relaxed(read_transfers,
  990. dd->base + SPI_MX_INPUT_COUNT);
  991. }
  992. } else {
  993. if (dd->write_buf)
  994. writel_relaxed(num_transfers,
  995. dd->base + SPI_MX_OUTPUT_COUNT);
  996. if (dd->read_buf)
  997. writel_relaxed(num_transfers,
  998. dd->base + SPI_MX_INPUT_COUNT);
  999. }
  1000. }
  1001. static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
  1002. {
  1003. dma_coherent_pre_ops();
  1004. if (dd->write_buf)
  1005. msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
  1006. if (dd->read_buf)
  1007. msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
  1008. }
  1009. /* SPI core on targets that does not support infinite mode can send
  1010. maximum of 4K transfers or 64K transfers depending up on size of
  1011. MAX_OUTPUT_COUNT register, Therefore, we are sending in several
  1012. chunks. Upon completion we send the next chunk, or complete the
  1013. transfer if everything is finished. On targets that support
  1014. infinite mode, we send all the bytes in as single chunk.
  1015. */
  1016. static int msm_spi_dm_send_next(struct msm_spi *dd)
  1017. {
  1018. /* By now we should have sent all the bytes in FIFO mode,
  1019. * However to make things right, we'll check anyway.
  1020. */
  1021. if (dd->mode != SPI_DMOV_MODE)
  1022. return 0;
  1023. /* On targets which does not support infinite mode,
  1024. We need to send more chunks, if we sent max last time */
  1025. if (dd->tx_bytes_remaining > dd->max_trfr_len) {
  1026. dd->tx_bytes_remaining -= dd->max_trfr_len;
  1027. if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
  1028. return 0;
  1029. dd->read_len = dd->write_len = 0;
  1030. msm_spi_setup_dm_transfer(dd);
  1031. msm_spi_enqueue_dm_commands(dd);
  1032. if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
  1033. return 0;
  1034. return 1;
  1035. } else if (dd->read_len && dd->write_len) {
  1036. dd->tx_bytes_remaining -= dd->cur_transfer->len;
  1037. if (list_is_last(&dd->cur_transfer->transfer_list,
  1038. &dd->cur_msg->transfers))
  1039. return 0;
  1040. get_next_transfer(dd);
  1041. if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
  1042. return 0;
  1043. dd->tx_bytes_remaining = dd->read_len + dd->write_len;
  1044. dd->read_buf = dd->temp_buf;
  1045. dd->read_len = dd->write_len = -1;
  1046. msm_spi_setup_dm_transfer(dd);
  1047. msm_spi_enqueue_dm_commands(dd);
  1048. if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
  1049. return 0;
  1050. return 1;
  1051. }
  1052. return 0;
  1053. }
  1054. static int msm_spi_dma_send_next(struct msm_spi *dd)
  1055. {
  1056. int ret = 0;
  1057. if (dd->mode == SPI_DMOV_MODE)
  1058. ret = msm_spi_dm_send_next(dd);
  1059. if (dd->mode == SPI_BAM_MODE)
  1060. ret = msm_spi_bam_next_transfer(dd);
  1061. return ret;
  1062. }
  1063. static inline void msm_spi_ack_transfer(struct msm_spi *dd)
  1064. {
  1065. writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
  1066. SPI_OP_MAX_OUTPUT_DONE_FLAG,
  1067. dd->base + SPI_OPERATIONAL);
  1068. /* Ensure done flag was cleared before proceeding further */
  1069. mb();
  1070. }
  1071. /* Figure which irq occured and call the relevant functions */
  1072. static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
  1073. {
  1074. u32 op, ret = IRQ_NONE;
  1075. struct msm_spi *dd = dev_id;
  1076. if (pm_runtime_suspended(dd->dev)) {
  1077. dev_warn(dd->dev, "QUP: pm runtime suspend, irq:%d\n", irq);
  1078. return ret;
  1079. }
  1080. if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
  1081. readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
  1082. struct spi_master *master = dev_get_drvdata(dd->dev);
  1083. ret |= msm_spi_error_irq(irq, master);
  1084. }
  1085. op = readl_relaxed(dd->base + SPI_OPERATIONAL);
  1086. if (op & SPI_OP_INPUT_SERVICE_FLAG) {
  1087. writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
  1088. dd->base + SPI_OPERATIONAL);
  1089. /*
  1090. * Ensure service flag was cleared before further
  1091. * processing of interrupt.
  1092. */
  1093. mb();
  1094. ret |= msm_spi_input_irq(irq, dev_id);
  1095. }
  1096. if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
  1097. writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
  1098. dd->base + SPI_OPERATIONAL);
  1099. /*
  1100. * Ensure service flag was cleared before further
  1101. * processing of interrupt.
  1102. */
  1103. mb();
  1104. ret |= msm_spi_output_irq(irq, dev_id);
  1105. }
  1106. if (dd->done) {
  1107. complete(&dd->transfer_complete);
  1108. dd->done = 0;
  1109. }
  1110. return ret;
  1111. }
  1112. static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
  1113. {
  1114. struct msm_spi *dd = dev_id;
  1115. dd->stat_rx++;
  1116. if (dd->mode == SPI_MODE_NONE)
  1117. return IRQ_HANDLED;
  1118. if (dd->mode == SPI_DMOV_MODE) {
  1119. u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
  1120. if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
  1121. (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
  1122. msm_spi_ack_transfer(dd);
  1123. if (atomic_inc_return(&dd->rx_irq_called) == 1)
  1124. return IRQ_HANDLED;
  1125. msm_spi_complete(dd);
  1126. return IRQ_HANDLED;
  1127. }
  1128. return IRQ_NONE;
  1129. }
  1130. if (dd->mode == SPI_FIFO_MODE) {
  1131. while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
  1132. SPI_OP_IP_FIFO_NOT_EMPTY) &&
  1133. (dd->rx_bytes_remaining > 0)) {
  1134. msm_spi_read_word_from_fifo(dd);
  1135. }
  1136. if (dd->rx_bytes_remaining == 0)
  1137. msm_spi_complete(dd);
  1138. }
  1139. return IRQ_HANDLED;
  1140. }
  1141. static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
  1142. {
  1143. u32 word;
  1144. u8 byte;
  1145. int i;
  1146. word = 0;
  1147. if (dd->write_buf) {
  1148. for (i = 0; (i < dd->bytes_per_word) &&
  1149. dd->tx_bytes_remaining; i++) {
  1150. dd->tx_bytes_remaining--;
  1151. byte = *dd->write_buf++;
  1152. word |= (byte << (BITS_PER_BYTE * (3 - i)));
  1153. }
  1154. } else
  1155. if (dd->tx_bytes_remaining > dd->bytes_per_word)
  1156. dd->tx_bytes_remaining -= dd->bytes_per_word;
  1157. else
  1158. dd->tx_bytes_remaining = 0;
  1159. dd->write_xfr_cnt++;
  1160. if (dd->multi_xfr) {
  1161. if (!dd->tx_bytes_remaining)
  1162. dd->write_xfr_cnt = 0;
  1163. else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
  1164. dd->write_len) {
  1165. struct spi_transfer *t = dd->cur_tx_transfer;
  1166. if (t->transfer_list.next != &dd->cur_msg->transfers) {
  1167. t = list_entry(t->transfer_list.next,
  1168. struct spi_transfer,
  1169. transfer_list);
  1170. dd->write_buf = t->tx_buf;
  1171. dd->write_len = t->len;
  1172. dd->write_xfr_cnt = 0;
  1173. dd->cur_tx_transfer = t;
  1174. }
  1175. }
  1176. }
  1177. writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
  1178. }
  1179. static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
  1180. {
  1181. int count = 0;
  1182. while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
  1183. !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
  1184. SPI_OP_OUTPUT_FIFO_FULL)) {
  1185. msm_spi_write_word_to_fifo(dd);
  1186. count++;
  1187. }
  1188. }
  1189. static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
  1190. {
  1191. struct msm_spi *dd = dev_id;
  1192. dd->stat_tx++;
  1193. if (dd->mode == SPI_MODE_NONE)
  1194. return IRQ_HANDLED;
  1195. if (dd->mode == SPI_DMOV_MODE) {
  1196. /* TX_ONLY transaction is handled here
  1197. This is the only place we send complete at tx and not rx */
  1198. if (dd->read_buf == NULL &&
  1199. readl_relaxed(dd->base + SPI_OPERATIONAL) &
  1200. SPI_OP_MAX_OUTPUT_DONE_FLAG) {
  1201. msm_spi_ack_transfer(dd);
  1202. if (atomic_inc_return(&dd->tx_irq_called) == 1)
  1203. return IRQ_HANDLED;
  1204. msm_spi_complete(dd);
  1205. return IRQ_HANDLED;
  1206. }
  1207. return IRQ_NONE;
  1208. }
  1209. /* Output FIFO is empty. Transmit any outstanding write data. */
  1210. if (dd->mode == SPI_FIFO_MODE)
  1211. msm_spi_write_rmn_to_fifo(dd);
  1212. return IRQ_HANDLED;
  1213. }
  1214. static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
  1215. {
  1216. struct spi_master *master = dev_id;
  1217. struct msm_spi *dd = spi_master_get_devdata(master);
  1218. u32 spi_err;
  1219. spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
  1220. if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
  1221. dev_warn(master->dev.parent, "SPI output overrun error\n");
  1222. if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
  1223. dev_warn(master->dev.parent, "SPI input underrun error\n");
  1224. if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
  1225. dev_warn(master->dev.parent, "SPI output underrun error\n");
  1226. msm_spi_get_clk_err(dd, &spi_err);
  1227. if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
  1228. dev_warn(master->dev.parent, "SPI clock overrun error\n");
  1229. if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
  1230. dev_warn(master->dev.parent, "SPI clock underrun error\n");
  1231. msm_spi_clear_error_flags(dd);
  1232. msm_spi_ack_clk_err(dd);
  1233. /* Ensure clearing of QUP_ERROR_FLAGS was completed */
  1234. mb();
  1235. return IRQ_HANDLED;
  1236. }
  1237. /**
  1238. * msm_spi_dmov_map_buffers: prepares buffer for DMA transfer
  1239. * @return zero on success or negative error code
  1240. *
  1241. * calls dma_map_single() on the read/write buffers, effectively invalidating
  1242. * their cash entries. for For WR-WR and WR-RD transfers, allocates temporary
  1243. * buffer and copy the data to/from the client buffers
  1244. */
  1245. static int msm_spi_dmov_map_buffers(struct msm_spi *dd)
  1246. {
  1247. struct device *dev;
  1248. struct spi_transfer *first_xfr;
  1249. struct spi_transfer *nxt_xfr = NULL;
  1250. void *tx_buf, *rx_buf;
  1251. unsigned tx_len, rx_len;
  1252. int ret = -EINVAL;
  1253. dev = &dd->cur_msg->spi->dev;
  1254. first_xfr = dd->cur_transfer;
  1255. tx_buf = (void *)first_xfr->tx_buf;
  1256. rx_buf = first_xfr->rx_buf;
  1257. tx_len = rx_len = first_xfr->len;
  1258. /*
  1259. * For WR-WR and WR-RD transfers, we allocate our own temporary
  1260. * buffer and copy the data to/from the client buffers.
  1261. */
  1262. if (!dd->qup_ver && dd->multi_xfr) {
  1263. dd->temp_buf = kzalloc(dd->cur_msg_len,
  1264. GFP_KERNEL | __GFP_DMA);
  1265. if (!dd->temp_buf)
  1266. return -ENOMEM;
  1267. nxt_xfr = list_entry(first_xfr->transfer_list.next,
  1268. struct spi_transfer, transfer_list);
  1269. if (dd->write_len && !dd->read_len) {
  1270. if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
  1271. goto error;
  1272. memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
  1273. memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
  1274. nxt_xfr->len);
  1275. tx_buf = dd->temp_buf;
  1276. tx_len = dd->cur_msg_len;
  1277. } else {
  1278. if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
  1279. goto error;
  1280. rx_buf = dd->temp_buf;
  1281. rx_len = dd->cur_msg_len;
  1282. }
  1283. }
  1284. if (tx_buf != NULL) {
  1285. first_xfr->tx_dma = dma_map_single(dev, tx_buf,
  1286. tx_len, DMA_TO_DEVICE);
  1287. if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
  1288. dev_err(dev, "dma %cX %d bytes error\n",
  1289. 'T', tx_len);
  1290. ret = -ENOMEM;
  1291. goto error;
  1292. }
  1293. }
  1294. if (rx_buf != NULL) {
  1295. dma_addr_t dma_handle;
  1296. dma_handle = dma_map_single(dev, rx_buf,
  1297. rx_len, DMA_FROM_DEVICE);
  1298. if (dma_mapping_error(NULL, dma_handle)) {
  1299. dev_err(dev, "dma %cX %d bytes error\n",
  1300. 'R', rx_len);
  1301. if (tx_buf != NULL)
  1302. dma_unmap_single(NULL, first_xfr->tx_dma,
  1303. tx_len, DMA_TO_DEVICE);
  1304. ret = -ENOMEM;
  1305. goto error;
  1306. }
  1307. if (dd->multi_xfr)
  1308. nxt_xfr->rx_dma = dma_handle;
  1309. else
  1310. first_xfr->rx_dma = dma_handle;
  1311. }
  1312. return 0;
  1313. error:
  1314. kfree(dd->temp_buf);
  1315. dd->temp_buf = NULL;
  1316. return ret;
  1317. }
  1318. static int msm_spi_bam_map_buffers(struct msm_spi *dd)
  1319. {
  1320. int ret = -EINVAL;
  1321. struct device *dev;
  1322. struct spi_transfer *first_xfr;
  1323. struct spi_transfer *nxt_xfr;
  1324. void *tx_buf, *rx_buf;
  1325. u32 tx_len, rx_len;
  1326. int num_xfrs_grped = dd->num_xfrs_grped;
  1327. dev = dd->dev;
  1328. first_xfr = dd->cur_transfer;
  1329. do {
  1330. tx_buf = (void *)first_xfr->tx_buf;
  1331. rx_buf = first_xfr->rx_buf;
  1332. tx_len = rx_len = first_xfr->len;
  1333. if (tx_buf != NULL) {
  1334. first_xfr->tx_dma = dma_map_single(dev, tx_buf,
  1335. tx_len, DMA_TO_DEVICE);
  1336. if (dma_mapping_error(dev, first_xfr->tx_dma)) {
  1337. ret = -ENOMEM;
  1338. goto error;
  1339. }
  1340. }
  1341. if (rx_buf != NULL) {
  1342. first_xfr->rx_dma = dma_map_single(dev, rx_buf, rx_len,
  1343. DMA_FROM_DEVICE);
  1344. if (dma_mapping_error(dev, first_xfr->rx_dma)) {
  1345. if (tx_buf != NULL)
  1346. dma_unmap_single(dev,
  1347. first_xfr->tx_dma,
  1348. tx_len, DMA_TO_DEVICE);
  1349. ret = -ENOMEM;
  1350. goto error;
  1351. }
  1352. }
  1353. nxt_xfr = list_entry(first_xfr->transfer_list.next,
  1354. struct spi_transfer, transfer_list);
  1355. if (nxt_xfr == NULL)
  1356. break;
  1357. num_xfrs_grped--;
  1358. first_xfr = nxt_xfr;
  1359. } while (num_xfrs_grped > 0);
  1360. return 0;
  1361. error:
  1362. msm_spi_dma_unmap_buffers(dd);
  1363. return ret;
  1364. }
  1365. static int msm_spi_dma_map_buffers(struct msm_spi *dd)
  1366. {
  1367. int ret = 0;
  1368. if (dd->mode == SPI_DMOV_MODE)
  1369. ret = msm_spi_dmov_map_buffers(dd);
  1370. else if (dd->mode == SPI_BAM_MODE)
  1371. ret = msm_spi_bam_map_buffers(dd);
  1372. return ret;
  1373. }
  1374. static void msm_spi_dmov_unmap_buffers(struct msm_spi *dd)
  1375. {
  1376. struct device *dev;
  1377. u32 offset;
  1378. dev = &dd->cur_msg->spi->dev;
  1379. if (dd->cur_msg->is_dma_mapped)
  1380. goto unmap_end;
  1381. if (dd->multi_xfr) {
  1382. if (dd->write_len && !dd->read_len) {
  1383. dma_unmap_single(dev,
  1384. dd->cur_transfer->tx_dma,
  1385. dd->cur_msg_len,
  1386. DMA_TO_DEVICE);
  1387. } else {
  1388. struct spi_transfer *prev_xfr;
  1389. prev_xfr = list_entry(
  1390. dd->cur_transfer->transfer_list.prev,
  1391. struct spi_transfer,
  1392. transfer_list);
  1393. if (dd->cur_transfer->rx_buf) {
  1394. dma_unmap_single(dev,
  1395. dd->cur_transfer->rx_dma,
  1396. dd->cur_msg_len,
  1397. DMA_FROM_DEVICE);
  1398. }
  1399. if (prev_xfr->tx_buf) {
  1400. dma_unmap_single(dev,
  1401. prev_xfr->tx_dma,
  1402. prev_xfr->len,
  1403. DMA_TO_DEVICE);
  1404. }
  1405. if (dd->rx_unaligned_len && dd->read_buf) {
  1406. offset = dd->cur_msg_len - dd->rx_unaligned_len;
  1407. dma_coherent_post_ops();
  1408. memcpy(dd->read_buf + offset, dd->rx_padding,
  1409. dd->rx_unaligned_len);
  1410. if (dd->cur_transfer->rx_buf)
  1411. memcpy(dd->cur_transfer->rx_buf,
  1412. dd->read_buf + prev_xfr->len,
  1413. dd->cur_transfer->len);
  1414. }
  1415. }
  1416. kfree(dd->temp_buf);
  1417. dd->temp_buf = NULL;
  1418. return;
  1419. } else {
  1420. if (dd->cur_transfer->rx_buf)
  1421. dma_unmap_single(dev, dd->cur_transfer->rx_dma,
  1422. dd->cur_transfer->len,
  1423. DMA_FROM_DEVICE);
  1424. if (dd->cur_transfer->tx_buf)
  1425. dma_unmap_single(dev, dd->cur_transfer->tx_dma,
  1426. dd->cur_transfer->len,
  1427. DMA_TO_DEVICE);
  1428. }
  1429. unmap_end:
  1430. /* If we padded the transfer, we copy it from the padding buf */
  1431. if (dd->rx_unaligned_len && dd->read_buf) {
  1432. offset = dd->cur_transfer->len - dd->rx_unaligned_len;
  1433. dma_coherent_post_ops();
  1434. memcpy(dd->read_buf + offset, dd->rx_padding,
  1435. dd->rx_unaligned_len);
  1436. }
  1437. }
  1438. static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
  1439. {
  1440. struct device *dev;
  1441. int num_xfrs_grped = dd->num_xfrs_grped;
  1442. struct spi_transfer *first_xfr;
  1443. struct spi_transfer *nxt_xfr;
  1444. void *tx_buf, *rx_buf;
  1445. u32 tx_len, rx_len;
  1446. dev = &dd->cur_msg->spi->dev;
  1447. first_xfr = dd->cur_transfer;
  1448. /* mapped by client */
  1449. if (dd->cur_msg->is_dma_mapped)
  1450. return;
  1451. do {
  1452. tx_buf = (void *)first_xfr->tx_buf;
  1453. rx_buf = first_xfr->rx_buf;
  1454. tx_len = rx_len = first_xfr->len;
  1455. if (tx_buf != NULL)
  1456. dma_unmap_single(dev, first_xfr->tx_dma,
  1457. tx_len, DMA_TO_DEVICE);
  1458. if (rx_buf != NULL)
  1459. dma_unmap_single(dev, first_xfr->rx_dma,
  1460. rx_len, DMA_FROM_DEVICE);
  1461. nxt_xfr = list_entry(first_xfr->transfer_list.next,
  1462. struct spi_transfer, transfer_list);
  1463. if (nxt_xfr == NULL)
  1464. break;
  1465. num_xfrs_grped--;
  1466. first_xfr = nxt_xfr;
  1467. } while (num_xfrs_grped > 0);
  1468. }
  1469. static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
  1470. {
  1471. if (dd->mode == SPI_DMOV_MODE)
  1472. msm_spi_dmov_unmap_buffers(dd);
  1473. else if (dd->mode == SPI_BAM_MODE)
  1474. msm_spi_bam_unmap_buffers(dd);
  1475. }
  1476. /**
  1477. * msm_spi_use_dma - decides whether to use Data-Mover or BAM for
  1478. * the given transfer
  1479. * @dd: device
  1480. * @tr: transfer
  1481. *
  1482. * Start using DMA if:
  1483. * 1. Is supported by HW
  1484. * 2. Is not diabled by platfrom data
  1485. * 3. Transfer size is greater than 3*block size.
  1486. * 4. Buffers are aligned to cache line.
  1487. * 5. Bytes-per-word is 8,16 or 32.
  1488. */
  1489. static inline bool
  1490. msm_spi_use_dma(struct msm_spi *dd, struct spi_transfer *tr, u8 bpw)
  1491. {
  1492. if (!dd->use_dma)
  1493. return false;
  1494. /* check constraints from platform data */
  1495. if ((dd->qup_ver == SPI_QUP_VERSION_BFAM) && !dd->pdata->use_bam)
  1496. return false;
  1497. if (dd->cur_msg_len < 3*dd->input_block_size)
  1498. return false;
  1499. if ((dd->qup_ver != SPI_QUP_VERSION_BFAM) &&
  1500. dd->multi_xfr && !dd->read_len && !dd->write_len)
  1501. return false;
  1502. if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
  1503. u32 cache_line = dma_get_cache_alignment();
  1504. if (tr->tx_buf) {
  1505. if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
  1506. return 0;
  1507. }
  1508. if (tr->rx_buf) {
  1509. if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
  1510. return false;
  1511. }
  1512. if (tr->cs_change &&
  1513. ((bpw != 8) && (bpw != 16) && (bpw != 32)))
  1514. return false;
  1515. }
  1516. return true;
  1517. }
  1518. /**
  1519. * msm_spi_set_transfer_mode: Chooses optimal transfer mode. Sets dd->mode and
  1520. * prepares to process a transfer.
  1521. */
  1522. static void
  1523. msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count)
  1524. {
  1525. if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) {
  1526. if (dd->qup_ver) {
  1527. dd->mode = SPI_BAM_MODE;
  1528. } else {
  1529. dd->mode = SPI_DMOV_MODE;
  1530. if (dd->write_len && dd->read_len) {
  1531. dd->tx_bytes_remaining = dd->write_len;
  1532. dd->rx_bytes_remaining = dd->read_len;
  1533. }
  1534. }
  1535. } else {
  1536. dd->mode = SPI_FIFO_MODE;
  1537. if (dd->multi_xfr) {
  1538. dd->read_len = dd->cur_transfer->len;
  1539. dd->write_len = dd->cur_transfer->len;
  1540. }
  1541. }
  1542. }
  1543. /**
  1544. * msm_spi_set_qup_io_modes: prepares register QUP_IO_MODES to process a
  1545. * transfer
  1546. */
  1547. static void msm_spi_set_qup_io_modes(struct msm_spi *dd)
  1548. {
  1549. u32 spi_iom;
  1550. spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
  1551. /* Set input and output transfer mode: FIFO, DMOV, or BAM */
  1552. spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
  1553. spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
  1554. spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
  1555. /* Turn on packing for data mover */
  1556. if ((dd->mode == SPI_DMOV_MODE) || (dd->mode == SPI_BAM_MODE))
  1557. spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
  1558. else
  1559. spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
  1560. /*if (dd->mode == SPI_BAM_MODE) {
  1561. spi_iom |= SPI_IO_C_NO_TRI_STATE;
  1562. spi_iom &= ~(SPI_IO_C_CS_SELECT | SPI_IO_C_CS_N_POLARITY);
  1563. }*/
  1564. writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
  1565. }
  1566. static u32 msm_spi_calc_spi_ioc_clk_polarity(u32 spi_ioc, u8 mode)
  1567. {
  1568. if (mode & SPI_CPOL)
  1569. spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
  1570. else
  1571. spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
  1572. return spi_ioc;
  1573. }
  1574. /**
  1575. * msm_spi_set_spi_io_control: prepares register SPI_IO_CONTROL to process the
  1576. * next transfer
  1577. * @return the new set value of SPI_IO_CONTROL
  1578. */
  1579. static u32 msm_spi_set_spi_io_control(struct msm_spi *dd)
  1580. {
  1581. u32 spi_ioc, spi_ioc_orig, chip_select;
  1582. spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
  1583. spi_ioc_orig = spi_ioc;
  1584. spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc
  1585. , dd->cur_msg->spi->mode);
  1586. /* Set chip-select */
  1587. chip_select = dd->cur_msg->spi->chip_select << 2;
  1588. if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
  1589. spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
  1590. if (!dd->cur_transfer->cs_change)
  1591. spi_ioc |= SPI_IO_C_MX_CS_MODE;
  1592. if (spi_ioc != spi_ioc_orig)
  1593. writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
  1594. return spi_ioc;
  1595. }
  1596. /**
  1597. * msm_spi_set_qup_op_mask: prepares register QUP_OPERATIONAL_MASK to process
  1598. * the next transfer
  1599. */
  1600. static void msm_spi_set_qup_op_mask(struct msm_spi *dd)
  1601. {
  1602. /* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
  1603. * change in BAM mode */
  1604. u32 mask = (dd->mode == SPI_BAM_MODE) ?
  1605. QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG
  1606. : 0;
  1607. writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK);
  1608. }
  1609. static void msm_spi_process_transfer(struct msm_spi *dd)
  1610. {
  1611. u8 bpw;
  1612. u32 max_speed;
  1613. u32 read_count;
  1614. u32 timeout;
  1615. u32 spi_ioc;
  1616. u32 int_loopback = 0;
  1617. int ret;
  1618. dd->tx_bytes_remaining = dd->cur_msg_len;
  1619. dd->rx_bytes_remaining = dd->cur_msg_len;
  1620. dd->read_buf = dd->cur_transfer->rx_buf;
  1621. dd->write_buf = dd->cur_transfer->tx_buf;
  1622. init_completion(&dd->transfer_complete);
  1623. if (dd->cur_transfer->bits_per_word)
  1624. bpw = dd->cur_transfer->bits_per_word;
  1625. else
  1626. if (dd->cur_msg->spi->bits_per_word)
  1627. bpw = dd->cur_msg->spi->bits_per_word;
  1628. else
  1629. bpw = 8;
  1630. dd->bytes_per_word = (bpw + 7) / 8;
  1631. if (dd->cur_transfer->speed_hz)
  1632. max_speed = dd->cur_transfer->speed_hz;
  1633. else
  1634. max_speed = dd->cur_msg->spi->max_speed_hz;
  1635. if (!dd->clock_speed || max_speed != dd->clock_speed)
  1636. msm_spi_clock_set(dd, max_speed);
  1637. timeout = 100 * msecs_to_jiffies(
  1638. DIV_ROUND_UP(dd->cur_msg_len * 8,
  1639. DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
  1640. read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
  1641. if (dd->cur_msg->spi->mode & SPI_LOOP)
  1642. int_loopback = 1;
  1643. if (int_loopback && dd->multi_xfr &&
  1644. (read_count > dd->input_fifo_size)) {
  1645. if (dd->read_len && dd->write_len)
  1646. pr_err(
  1647. "%s:Internal Loopback does not support > fifo size"
  1648. "for write-then-read transactions\n",
  1649. __func__);
  1650. else if (dd->write_len && !dd->read_len)
  1651. pr_err(
  1652. "%s:Internal Loopback does not support > fifo size"
  1653. "for write-then-write transactions\n",
  1654. __func__);
  1655. return;
  1656. }
  1657. if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
  1658. dev_err(dd->dev,
  1659. "%s: Error setting QUP to reset-state",
  1660. __func__);
  1661. msm_spi_set_transfer_mode(dd, bpw, read_count);
  1662. msm_spi_set_mx_counts(dd, read_count);
  1663. if (dd->mode == SPI_DMOV_MODE) {
  1664. ret = msm_spi_dma_map_buffers(dd);
  1665. if (ret < 0) {
  1666. pr_err("Mapping DMA buffers\n");
  1667. dd->cur_msg->status = ret;
  1668. return;
  1669. }
  1670. } else if (dd->mode == SPI_BAM_MODE) {
  1671. if (msm_spi_dma_map_buffers(dd) < 0) {
  1672. pr_err("Mapping DMA buffers\n");
  1673. return;
  1674. }
  1675. }
  1676. msm_spi_set_qup_io_modes(dd);
  1677. msm_spi_set_spi_config(dd, bpw);
  1678. msm_spi_set_qup_config(dd, bpw);
  1679. spi_ioc = msm_spi_set_spi_io_control(dd);
  1680. msm_spi_set_qup_op_mask(dd);
  1681. if (dd->mode == SPI_DMOV_MODE) {
  1682. msm_spi_setup_dm_transfer(dd);
  1683. msm_spi_enqueue_dm_commands(dd);
  1684. }
  1685. /* The output fifo interrupt handler will handle all writes after
  1686. the first. Restricting this to one write avoids contention
  1687. issues and race conditions between this thread and the int handler
  1688. */
  1689. else if (dd->mode == SPI_FIFO_MODE) {
  1690. if (msm_spi_prepare_for_write(dd))
  1691. goto transfer_end;
  1692. msm_spi_start_write(dd, read_count);
  1693. } else if (dd->mode == SPI_BAM_MODE) {
  1694. if ((msm_spi_bam_begin_transfer(dd)) < 0) {
  1695. dev_err(dd->dev, "%s: BAM transfer setup failed\n",
  1696. __func__);
  1697. dd->cur_msg->status = -EIO;
  1698. goto transfer_end;
  1699. }
  1700. }
  1701. /*
  1702. * On BAM mode, current state here is run.
  1703. * Only enter the RUN state after the first word is written into
  1704. * the output FIFO. Otherwise, the output FIFO EMPTY interrupt
  1705. * might fire before the first word is written resulting in a
  1706. * possible race condition.
  1707. */
  1708. if (dd->mode != SPI_BAM_MODE)
  1709. if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) {
  1710. dev_warn(dd->dev,
  1711. "%s: Failed to set QUP to run-state. Mode:%d",
  1712. __func__, dd->mode);
  1713. goto transfer_end;
  1714. }
  1715. /* Assume success, this might change later upon transaction result */
  1716. dd->cur_msg->status = 0;
  1717. do {
  1718. if (!wait_for_completion_timeout(&dd->transfer_complete,
  1719. timeout)) {
  1720. dev_err(dd->dev,
  1721. "%s: SPI transaction timeout\n",
  1722. __func__);
  1723. dd->cur_msg->status = -EIO;
  1724. if (dd->mode == SPI_DMOV_MODE) {
  1725. msm_dmov_flush(dd->tx_dma_chan, 1);
  1726. msm_dmov_flush(dd->rx_dma_chan, 1);
  1727. }
  1728. if (dd->mode == SPI_BAM_MODE)
  1729. msm_spi_bam_flush(dd);
  1730. break;
  1731. }
  1732. } while (msm_spi_dma_send_next(dd));
  1733. msm_spi_udelay(dd->xfrs_delay_usec);
  1734. transfer_end:
  1735. if (dd->mode == SPI_BAM_MODE)
  1736. msm_spi_bam_flush(dd);
  1737. msm_spi_dma_unmap_buffers(dd);
  1738. dd->mode = SPI_MODE_NONE;
  1739. msm_spi_set_state(dd, SPI_OP_STATE_RESET);
  1740. writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
  1741. dd->base + SPI_IO_CONTROL);
  1742. }
  1743. static void get_transfer_length(struct msm_spi *dd)
  1744. {
  1745. struct spi_transfer *tr;
  1746. int num_xfrs = 0;
  1747. int readlen = 0;
  1748. int writelen = 0;
  1749. dd->cur_msg_len = 0;
  1750. dd->multi_xfr = 0;
  1751. dd->read_len = dd->write_len = 0;
  1752. list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
  1753. if (tr->tx_buf)
  1754. writelen += tr->len;
  1755. if (tr->rx_buf)
  1756. readlen += tr->len;
  1757. dd->cur_msg_len += tr->len;
  1758. num_xfrs++;
  1759. }
  1760. if (num_xfrs == 2) {
  1761. struct spi_transfer *first_xfr = dd->cur_transfer;
  1762. dd->multi_xfr = 1;
  1763. tr = list_entry(first_xfr->transfer_list.next,
  1764. struct spi_transfer,
  1765. transfer_list);
  1766. /*
  1767. * We update dd->read_len and dd->write_len only
  1768. * for WR-WR and WR-RD transfers.
  1769. */
  1770. if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
  1771. if (((tr->tx_buf) && (!tr->rx_buf)) ||
  1772. ((!tr->tx_buf) && (tr->rx_buf))) {
  1773. dd->read_len = readlen;
  1774. dd->write_len = writelen;
  1775. }
  1776. }
  1777. } else if (num_xfrs > 1)
  1778. dd->multi_xfr = 1;
  1779. }
  1780. static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
  1781. {
  1782. u32 spi_ioc;
  1783. u32 spi_ioc_orig;
  1784. spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
  1785. spi_ioc_orig = spi_ioc;
  1786. if (set_flag)
  1787. spi_ioc |= SPI_IO_C_FORCE_CS;
  1788. else
  1789. spi_ioc &= ~SPI_IO_C_FORCE_CS;
  1790. if (spi_ioc != spi_ioc_orig)
  1791. writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
  1792. }
  1793. static inline int combine_transfers(struct msm_spi *dd)
  1794. {
  1795. int xfrs_grped = 1;
  1796. dd->xfrs_delay_usec = 0;
  1797. dd->bam.bam_rx_len = dd->bam.bam_tx_len = 0;
  1798. dd->cur_msg_len = dd->cur_transfer->len;
  1799. if (dd->cur_transfer->tx_buf)
  1800. dd->bam.bam_tx_len += dd->cur_transfer->len;
  1801. if (dd->cur_transfer->rx_buf)
  1802. dd->bam.bam_rx_len += dd->cur_transfer->len;
  1803. dd->xfrs_delay_usec = dd->cur_transfer->delay_usecs;
  1804. return xfrs_grped;
  1805. }
  1806. static void msm_spi_process_message(struct msm_spi *dd)
  1807. {
  1808. int xfrs_grped = 0;
  1809. int rc;
  1810. dd->num_xfrs_grped = 0;
  1811. dd->bam.curr_rx_bytes_recvd = dd->bam.curr_tx_bytes_sent = 0;
  1812. dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
  1813. rc = msm_spi_request_cs_gpio(dd);
  1814. if (rc)
  1815. return;
  1816. dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
  1817. struct spi_transfer,
  1818. transfer_list);
  1819. get_transfer_length(dd);
  1820. if (dd->qup_ver || (dd->multi_xfr && !dd->read_len && !dd->write_len)) {
  1821. if (dd->qup_ver)
  1822. write_force_cs(dd, 0);
  1823. /*
  1824. * Handling of multi-transfers.
  1825. * FIFO mode is used by default
  1826. */
  1827. list_for_each_entry(dd->cur_transfer,
  1828. &dd->cur_msg->transfers,
  1829. transfer_list) {
  1830. if (!dd->cur_transfer->len)
  1831. goto error;
  1832. if (xfrs_grped) {
  1833. xfrs_grped--;
  1834. continue;
  1835. } else {
  1836. dd->read_len = dd->write_len = 0;
  1837. xfrs_grped = combine_transfers(dd);
  1838. dd->num_xfrs_grped = xfrs_grped;
  1839. if (dd->qup_ver)
  1840. write_force_cs(dd, 1);
  1841. }
  1842. dd->cur_tx_transfer = dd->cur_transfer;
  1843. dd->cur_rx_transfer = dd->cur_transfer;
  1844. msm_spi_process_transfer(dd);
  1845. if (dd->qup_ver && dd->cur_transfer->cs_change)
  1846. write_force_cs(dd, 0);
  1847. xfrs_grped--;
  1848. }
  1849. } else {
  1850. /* Handling of a single transfer or
  1851. * WR-WR or WR-RD transfers
  1852. */
  1853. if ((!dd->cur_msg->is_dma_mapped) &&
  1854. (msm_spi_use_dma(dd, dd->cur_transfer,
  1855. dd->cur_transfer->bits_per_word))) {
  1856. /* Mapping of DMA buffers */
  1857. int ret = msm_spi_dma_map_buffers(dd);
  1858. if (ret < 0) {
  1859. dd->cur_msg->status = ret;
  1860. goto error;
  1861. }
  1862. }
  1863. dd->cur_tx_transfer = dd->cur_transfer;
  1864. dd->cur_rx_transfer = dd->cur_transfer;
  1865. dd->num_xfrs_grped = 1;
  1866. msm_spi_process_transfer(dd);
  1867. }
  1868. if (dd->qup_ver)
  1869. write_force_cs(dd, 0);
  1870. return;
  1871. error:
  1872. msm_spi_free_cs_gpio(dd);
  1873. }
  1874. static void reset_core(struct msm_spi *dd)
  1875. {
  1876. msm_spi_register_init(dd);
  1877. /*
  1878. * The SPI core generates a bogus input overrun error on some targets,
  1879. * when a transition from run to reset state occurs and if the FIFO has
  1880. * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
  1881. * bit.
  1882. */
  1883. msm_spi_enable_error_flags(dd);
  1884. writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
  1885. msm_spi_set_state(dd, SPI_OP_STATE_RESET);
  1886. }
  1887. static void put_local_resources(struct msm_spi *dd)
  1888. {
  1889. msm_spi_disable_irqs(dd);
  1890. clk_disable_unprepare(dd->clk);
  1891. clk_disable_unprepare(dd->pclk);
  1892. /* Free the spi clk, miso, mosi, cs gpio */
  1893. if (dd->pdata && dd->pdata->gpio_release)
  1894. dd->pdata->gpio_release();
  1895. msm_spi_free_gpios(dd);
  1896. }
  1897. static int get_local_resources(struct msm_spi *dd)
  1898. {
  1899. int ret = -EINVAL;
  1900. /* Configure the spi clk, miso, mosi and cs gpio */
  1901. if (dd->pdata->gpio_config) {
  1902. ret = dd->pdata->gpio_config();
  1903. if (ret) {
  1904. dev_err(dd->dev,
  1905. "%s: error configuring GPIOs\n",
  1906. __func__);
  1907. return ret;
  1908. }
  1909. }
  1910. ret = msm_spi_request_gpios(dd);
  1911. if (ret)
  1912. return ret;
  1913. ret = clk_prepare_enable(dd->clk);
  1914. if (ret)
  1915. goto clk0_err;
  1916. ret = clk_prepare_enable(dd->pclk);
  1917. if (ret)
  1918. goto clk1_err;
  1919. msm_spi_enable_irqs(dd);
  1920. return 0;
  1921. clk1_err:
  1922. clk_disable_unprepare(dd->clk);
  1923. clk0_err:
  1924. msm_spi_free_gpios(dd);
  1925. return ret;
  1926. }
  1927. /**
  1928. * msm_spi_transfer_one_message: To process one spi message at a time
  1929. * @master: spi master controller reference
  1930. * @msg: one multi-segment SPI transaction
  1931. * @return zero on success or negative error value
  1932. *
  1933. */
  1934. static int msm_spi_transfer_one_message(struct spi_master *master,
  1935. struct spi_message *msg)
  1936. {
  1937. struct msm_spi *dd;
  1938. struct spi_transfer *tr;
  1939. unsigned long flags;
  1940. u32 status_error = 0;
  1941. dd = spi_master_get_devdata(master);
  1942. if (list_empty(&msg->transfers) || !msg->complete)
  1943. return -EINVAL;
  1944. list_for_each_entry(tr, &msg->transfers, transfer_list) {
  1945. /* Check message parameters */
  1946. if (tr->speed_hz > dd->pdata->max_clock_speed ||
  1947. (tr->bits_per_word &&
  1948. (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
  1949. (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
  1950. dev_err(dd->dev,
  1951. "Invalid transfer: %d Hz, %d bpw tx=%p, rx=%p\n",
  1952. tr->speed_hz, tr->bits_per_word,
  1953. tr->tx_buf, tr->rx_buf);
  1954. status_error = -EINVAL;
  1955. msg->status = status_error;
  1956. spi_finalize_current_message(master);
  1957. put_local_resources(dd);
  1958. return 0;
  1959. }
  1960. }
  1961. mutex_lock(&dd->core_lock);
  1962. /*
  1963. * Counter-part of system-suspend when runtime-pm is not enabled.
  1964. * This way, resume can be left empty and device will be put in
  1965. * active mode only if client requests anything on the bus
  1966. */
  1967. if (!pm_runtime_enabled(dd->dev))
  1968. msm_spi_pm_resume_runtime(dd->dev);
  1969. if (dd->use_rlock)
  1970. remote_mutex_lock(&dd->r_lock);
  1971. spin_lock_irqsave(&dd->queue_lock, flags);
  1972. dd->transfer_pending = 1;
  1973. spin_unlock_irqrestore(&dd->queue_lock, flags);
  1974. /*
  1975. * get local resources for each transfer to ensure we're in a good
  1976. * state and not interfering with other EE's using this device
  1977. */
  1978. if (dd->pdata->is_shared) {
  1979. if (get_local_resources(dd)) {
  1980. mutex_unlock(&dd->core_lock);
  1981. return -EINVAL;
  1982. }
  1983. reset_core(dd);
  1984. if (dd->use_dma) {
  1985. msm_spi_bam_pipe_connect(dd, &dd->bam.prod,
  1986. &dd->bam.prod.config);
  1987. msm_spi_bam_pipe_connect(dd, &dd->bam.cons,
  1988. &dd->bam.cons.config);
  1989. }
  1990. }
  1991. if (dd->suspended || !msm_spi_is_valid_state(dd)) {
  1992. dev_err(dd->dev, "%s: SPI operational state not valid\n",
  1993. __func__);
  1994. status_error = 1;
  1995. }
  1996. spin_lock_irqsave(&dd->queue_lock, flags);
  1997. dd->transfer_pending = 1;
  1998. dd->cur_msg = msg;
  1999. spin_unlock_irqrestore(&dd->queue_lock, flags);
  2000. if (status_error)
  2001. dd->cur_msg->status = -EIO;
  2002. else
  2003. msm_spi_process_message(dd);
  2004. spin_lock_irqsave(&dd->queue_lock, flags);
  2005. dd->transfer_pending = 0;
  2006. spin_unlock_irqrestore(&dd->queue_lock, flags);
  2007. if (dd->use_rlock)
  2008. remote_mutex_unlock(&dd->r_lock);
  2009. mutex_unlock(&dd->core_lock);
  2010. /*
  2011. * If needed, this can be done after the current message is complete,
  2012. * and work can be continued upon resume. No motivation for now.
  2013. */
  2014. if (dd->suspended)
  2015. wake_up_interruptible(&dd->continue_suspend);
  2016. /*
  2017. * Put local resources prior to calling finalize to ensure the hw
  2018. * is in a known state before notifying the calling thread (which is a
  2019. * different context since we're running in the spi kthread here) to
  2020. * prevent race conditions between us and any other EE's using this hw.
  2021. */
  2022. if (dd->pdata->is_shared) {
  2023. if (dd->use_dma) {
  2024. msm_spi_bam_pipe_disconnect(dd, &dd->bam.prod);
  2025. msm_spi_bam_pipe_disconnect(dd, &dd->bam.cons);
  2026. }
  2027. put_local_resources(dd);
  2028. }
  2029. mutex_unlock(&dd->core_lock);
  2030. if (dd->suspended)
  2031. wake_up_interruptible(&dd->continue_suspend);
  2032. status_error = dd->cur_msg->status;
  2033. spi_finalize_current_message(master);
  2034. return status_error;
  2035. }
  2036. static int msm_spi_prepare_transfer_hardware(struct spi_master *master)
  2037. {
  2038. struct msm_spi *dd = spi_master_get_devdata(master);
  2039. pm_runtime_get_sync(dd->dev);
  2040. return 0;
  2041. }
  2042. static int msm_spi_unprepare_transfer_hardware(struct spi_master *master)
  2043. {
  2044. struct msm_spi *dd = spi_master_get_devdata(master);
  2045. pm_runtime_mark_last_busy(dd->dev);
  2046. pm_runtime_put_autosuspend(dd->dev);
  2047. return 0;
  2048. }
  2049. static int msm_spi_setup(struct spi_device *spi)
  2050. {
  2051. struct msm_spi *dd;
  2052. int rc = 0;
  2053. u32 spi_ioc;
  2054. u32 spi_config;
  2055. u32 mask;
  2056. if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
  2057. dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
  2058. __func__, spi->bits_per_word);
  2059. rc = -EINVAL;
  2060. }
  2061. if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
  2062. dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
  2063. __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
  2064. rc = -EINVAL;
  2065. }
  2066. if (rc)
  2067. goto err_setup_exit;
  2068. dd = spi_master_get_devdata(spi->master);
  2069. pm_runtime_get_sync(dd->dev);
  2070. mutex_lock(&dd->core_lock);
  2071. /* Counter-part of system-suspend when runtime-pm is not enabled. */
  2072. if (!pm_runtime_enabled(dd->dev))
  2073. msm_spi_pm_resume_runtime(dd->dev);
  2074. if (dd->suspended) {
  2075. mutex_unlock(&dd->core_lock);
  2076. return -EBUSY;
  2077. }
  2078. if (dd->pdata->is_shared) {
  2079. rc = get_local_resources(dd);
  2080. if (rc)
  2081. goto no_resources;
  2082. }
  2083. spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
  2084. mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
  2085. if (spi->mode & SPI_CS_HIGH)
  2086. spi_ioc |= mask;
  2087. else
  2088. spi_ioc &= ~mask;
  2089. spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc, spi->mode);
  2090. writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
  2091. spi_config = readl_relaxed(dd->base + SPI_CONFIG);
  2092. spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
  2093. spi_config, spi->mode);
  2094. writel_relaxed(spi_config, dd->base + SPI_CONFIG);
  2095. /* Ensure previous write completed before disabling the clocks */
  2096. mb();
  2097. if (dd->pdata->is_shared)
  2098. put_local_resources(dd);
  2099. /* Counter-part of system-resume when runtime-pm is not enabled. */
  2100. if (!pm_runtime_enabled(dd->dev))
  2101. msm_spi_pm_suspend_runtime(dd->dev);
  2102. no_resources:
  2103. mutex_unlock(&dd->core_lock);
  2104. pm_runtime_mark_last_busy(dd->dev);
  2105. pm_runtime_put_autosuspend(dd->dev);
  2106. err_setup_exit:
  2107. return rc;
  2108. }
  2109. #ifdef CONFIG_DEBUG_FS
  2110. static int debugfs_iomem_x32_set(void *data, u64 val)
  2111. {
  2112. writel_relaxed(val, data);
  2113. /* Ensure the previous write completed. */
  2114. mb();
  2115. return 0;
  2116. }
  2117. static int debugfs_iomem_x32_get(void *data, u64 *val)
  2118. {
  2119. *val = readl_relaxed(data);
  2120. /* Ensure the previous read completed. */
  2121. mb();
  2122. return 0;
  2123. }
  2124. DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
  2125. debugfs_iomem_x32_set, "0x%08llx\n");
  2126. static void spi_debugfs_init(struct msm_spi *dd)
  2127. {
  2128. dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
  2129. if (dd->dent_spi) {
  2130. int i;
  2131. for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
  2132. dd->debugfs_spi_regs[i] =
  2133. debugfs_create_file(
  2134. debugfs_spi_regs[i].name,
  2135. debugfs_spi_regs[i].mode,
  2136. dd->dent_spi,
  2137. dd->base + debugfs_spi_regs[i].offset,
  2138. &fops_iomem_x32);
  2139. }
  2140. }
  2141. }
  2142. static void spi_debugfs_exit(struct msm_spi *dd)
  2143. {
  2144. if (dd->dent_spi) {
  2145. int i;
  2146. debugfs_remove_recursive(dd->dent_spi);
  2147. dd->dent_spi = NULL;
  2148. for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
  2149. dd->debugfs_spi_regs[i] = NULL;
  2150. }
  2151. }
  2152. #else
  2153. static void spi_debugfs_init(struct msm_spi *dd) {}
  2154. static void spi_debugfs_exit(struct msm_spi *dd) {}
  2155. #endif
  2156. /* ===Device attributes begin=== */
  2157. static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
  2158. char *buf)
  2159. {
  2160. struct spi_master *master = dev_get_drvdata(dev);
  2161. struct msm_spi *dd = spi_master_get_devdata(master);
  2162. return snprintf(buf, PAGE_SIZE,
  2163. "Device %s\n"
  2164. "rx fifo_size = %d spi words\n"
  2165. "tx fifo_size = %d spi words\n"
  2166. "use_dma ? %s\n"
  2167. "rx block size = %d bytes\n"
  2168. "tx block size = %d bytes\n"
  2169. "input burst size = %d bytes\n"
  2170. "output burst size = %d bytes\n"
  2171. "DMA configuration:\n"
  2172. "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
  2173. "--statistics--\n"
  2174. "Rx isrs = %d\n"
  2175. "Tx isrs = %d\n"
  2176. "DMA error = %d\n"
  2177. "--debug--\n"
  2178. "NA yet\n",
  2179. dev_name(dev),
  2180. dd->input_fifo_size,
  2181. dd->output_fifo_size,
  2182. dd->use_dma ? "yes" : "no",
  2183. dd->input_block_size,
  2184. dd->output_block_size,
  2185. dd->input_burst_size,
  2186. dd->output_burst_size,
  2187. dd->tx_dma_chan,
  2188. dd->rx_dma_chan,
  2189. dd->tx_dma_crci,
  2190. dd->rx_dma_crci,
  2191. dd->stat_rx + dd->stat_dmov_rx,
  2192. dd->stat_tx + dd->stat_dmov_tx,
  2193. dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
  2194. );
  2195. }
  2196. /* Reset statistics on write */
  2197. static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
  2198. const char *buf, size_t count)
  2199. {
  2200. struct msm_spi *dd = dev_get_drvdata(dev);
  2201. dd->stat_rx = 0;
  2202. dd->stat_tx = 0;
  2203. dd->stat_dmov_rx = 0;
  2204. dd->stat_dmov_tx = 0;
  2205. dd->stat_dmov_rx_err = 0;
  2206. dd->stat_dmov_tx_err = 0;
  2207. return count;
  2208. }
  2209. static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
  2210. static struct attribute *dev_attrs[] = {
  2211. &dev_attr_stats.attr,
  2212. NULL,
  2213. };
  2214. static struct attribute_group dev_attr_grp = {
  2215. .attrs = dev_attrs,
  2216. };
  2217. /* ===Device attributes end=== */
  2218. /**
  2219. * spi_dmov_tx_complete_func - DataMover tx completion callback
  2220. *
  2221. * Executed in IRQ context (Data Mover's IRQ) DataMover's
  2222. * spinlock @msm_dmov_lock held.
  2223. */
  2224. static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
  2225. unsigned int result,
  2226. struct msm_dmov_errdata *err)
  2227. {
  2228. struct msm_spi *dd;
  2229. if (!(result & DMOV_RSLT_VALID)) {
  2230. pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
  2231. return;
  2232. }
  2233. /* restore original context */
  2234. dd = container_of(cmd, struct msm_spi, tx_hdr);
  2235. if (result & DMOV_RSLT_DONE) {
  2236. dd->stat_dmov_tx++;
  2237. if ((atomic_inc_return(&dd->tx_irq_called) == 1))
  2238. return;
  2239. complete(&dd->transfer_complete);
  2240. } else {
  2241. /* Error or flush */
  2242. if (result & DMOV_RSLT_ERROR) {
  2243. dev_err(dd->dev, "DMA error (0x%08x)\n", result);
  2244. dd->stat_dmov_tx_err++;
  2245. }
  2246. if (result & DMOV_RSLT_FLUSH) {
  2247. /*
  2248. * Flushing normally happens in process of
  2249. * removing, when we are waiting for outstanding
  2250. * DMA commands to be flushed.
  2251. */
  2252. dev_info(dd->dev,
  2253. "DMA channel flushed (0x%08x)\n", result);
  2254. }
  2255. if (err)
  2256. dev_err(dd->dev,
  2257. "Flush data(%08x %08x %08x %08x %08x %08x)\n",
  2258. err->flush[0], err->flush[1], err->flush[2],
  2259. err->flush[3], err->flush[4], err->flush[5]);
  2260. dd->cur_msg->status = -EIO;
  2261. complete(&dd->transfer_complete);
  2262. }
  2263. }
  2264. /**
  2265. * spi_dmov_rx_complete_func - DataMover rx completion callback
  2266. *
  2267. * Executed in IRQ context (Data Mover's IRQ)
  2268. * DataMover's spinlock @msm_dmov_lock held.
  2269. */
  2270. static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
  2271. unsigned int result,
  2272. struct msm_dmov_errdata *err)
  2273. {
  2274. struct msm_spi *dd;
  2275. if (!(result & DMOV_RSLT_VALID)) {
  2276. pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
  2277. result, cmd);
  2278. return;
  2279. }
  2280. /* restore original context */
  2281. dd = container_of(cmd, struct msm_spi, rx_hdr);
  2282. if (result & DMOV_RSLT_DONE) {
  2283. dd->stat_dmov_rx++;
  2284. if (atomic_inc_return(&dd->rx_irq_called) == 1)
  2285. return;
  2286. complete(&dd->transfer_complete);
  2287. } else {
  2288. /** Error or flush */
  2289. if (result & DMOV_RSLT_ERROR) {
  2290. dev_err(dd->dev, "DMA error(0x%08x)\n", result);
  2291. dd->stat_dmov_rx_err++;
  2292. }
  2293. if (result & DMOV_RSLT_FLUSH) {
  2294. dev_info(dd->dev,
  2295. "DMA channel flushed(0x%08x)\n", result);
  2296. }
  2297. if (err)
  2298. dev_err(dd->dev,
  2299. "Flush data(%08x %08x %08x %08x %08x %08x)\n",
  2300. err->flush[0], err->flush[1], err->flush[2],
  2301. err->flush[3], err->flush[4], err->flush[5]);
  2302. dd->cur_msg->status = -EIO;
  2303. complete(&dd->transfer_complete);
  2304. }
  2305. }
  2306. static inline u32 get_chunk_size(struct msm_spi *dd, int input_burst_size,
  2307. int output_burst_size)
  2308. {
  2309. u32 cache_line = dma_get_cache_alignment();
  2310. int burst_size = (input_burst_size > output_burst_size) ?
  2311. input_burst_size : output_burst_size;
  2312. return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
  2313. roundup(burst_size, cache_line))*2;
  2314. }
  2315. static void msm_spi_dmov_teardown(struct msm_spi *dd)
  2316. {
  2317. int limit = 0;
  2318. if (!dd->use_dma)
  2319. return;
  2320. while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
  2321. msm_dmov_flush(dd->tx_dma_chan, 1);
  2322. msm_dmov_flush(dd->rx_dma_chan, 1);
  2323. msleep(10);
  2324. }
  2325. dma_free_coherent(NULL,
  2326. get_chunk_size(dd, dd->input_burst_size, dd->output_burst_size),
  2327. dd->tx_dmov_cmd,
  2328. dd->tx_dmov_cmd_dma);
  2329. dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
  2330. dd->tx_padding = dd->rx_padding = NULL;
  2331. }
  2332. static void msm_spi_bam_pipe_teardown(struct msm_spi *dd,
  2333. enum msm_spi_pipe_direction pipe_dir)
  2334. {
  2335. struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
  2336. (&dd->bam.prod) : (&dd->bam.cons);
  2337. if (!pipe->teardown_required)
  2338. return;
  2339. msm_spi_bam_pipe_disconnect(dd, pipe);
  2340. dma_free_coherent(dd->dev, pipe->config.desc.size,
  2341. pipe->config.desc.base, pipe->config.desc.phys_base);
  2342. sps_free_endpoint(pipe->handle);
  2343. pipe->handle = 0;
  2344. pipe->teardown_required = false;
  2345. }
  2346. static int msm_spi_bam_pipe_init(struct msm_spi *dd,
  2347. enum msm_spi_pipe_direction pipe_dir)
  2348. {
  2349. int rc = 0;
  2350. struct sps_pipe *pipe_handle;
  2351. struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
  2352. (&dd->bam.prod) : (&dd->bam.cons);
  2353. struct sps_connect *pipe_conf = &pipe->config;
  2354. pipe->name = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ? "cons" : "prod";
  2355. pipe->handle = 0;
  2356. pipe_handle = sps_alloc_endpoint();
  2357. if (!pipe_handle) {
  2358. dev_err(dd->dev, "%s: Failed to allocate BAM endpoint\n"
  2359. , __func__);
  2360. return -ENOMEM;
  2361. }
  2362. memset(pipe_conf, 0, sizeof(*pipe_conf));
  2363. rc = sps_get_config(pipe_handle, pipe_conf);
  2364. if (rc) {
  2365. dev_err(dd->dev, "%s: Failed to get BAM pipe config\n"
  2366. , __func__);
  2367. goto config_err;
  2368. }
  2369. if (pipe_dir == SPI_BAM_CONSUMER_PIPE) {
  2370. pipe_conf->source = dd->bam.handle;
  2371. pipe_conf->destination = SPS_DEV_HANDLE_MEM;
  2372. pipe_conf->mode = SPS_MODE_SRC;
  2373. pipe_conf->src_pipe_index =
  2374. dd->pdata->bam_producer_pipe_index;
  2375. pipe_conf->dest_pipe_index = 0;
  2376. } else {
  2377. pipe_conf->source = SPS_DEV_HANDLE_MEM;
  2378. pipe_conf->destination = dd->bam.handle;
  2379. pipe_conf->mode = SPS_MODE_DEST;
  2380. pipe_conf->src_pipe_index = 0;
  2381. pipe_conf->dest_pipe_index =
  2382. dd->pdata->bam_consumer_pipe_index;
  2383. }
  2384. pipe_conf->options = SPS_O_EOT | SPS_O_AUTO_ENABLE;
  2385. pipe_conf->desc.size = SPI_BAM_MAX_DESC_NUM * sizeof(struct sps_iovec);
  2386. pipe_conf->desc.base = dma_alloc_coherent(dd->dev,
  2387. pipe_conf->desc.size,
  2388. &pipe_conf->desc.phys_base,
  2389. GFP_KERNEL);
  2390. if (!pipe_conf->desc.base) {
  2391. dev_err(dd->dev, "%s: Failed allocate BAM pipe memory"
  2392. , __func__);
  2393. rc = -ENOMEM;
  2394. goto config_err;
  2395. }
  2396. /* zero descriptor FIFO for convenient debugging of first descs */
  2397. memset(pipe_conf->desc.base, 0x00, pipe_conf->desc.size);
  2398. pipe->handle = pipe_handle;
  2399. return 0;
  2400. config_err:
  2401. sps_free_endpoint(pipe_handle);
  2402. return rc;
  2403. }
  2404. static void msm_spi_bam_teardown(struct msm_spi *dd)
  2405. {
  2406. msm_spi_bam_pipe_teardown(dd, SPI_BAM_PRODUCER_PIPE);
  2407. msm_spi_bam_pipe_teardown(dd, SPI_BAM_CONSUMER_PIPE);
  2408. if (dd->bam.deregister_required) {
  2409. sps_deregister_bam_device(dd->bam.handle);
  2410. dd->bam.deregister_required = false;
  2411. }
  2412. }
  2413. static int msm_spi_bam_init(struct msm_spi *dd)
  2414. {
  2415. struct sps_bam_props bam_props = {0};
  2416. u32 bam_handle;
  2417. int rc = 0;
  2418. rc = sps_phy2h(dd->bam.phys_addr, &bam_handle);
  2419. if (rc || !bam_handle) {
  2420. bam_props.phys_addr = dd->bam.phys_addr;
  2421. bam_props.virt_addr = dd->bam.base;
  2422. bam_props.irq = dd->bam.irq;
  2423. bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
  2424. bam_props.summing_threshold = 0x10;
  2425. rc = sps_register_bam_device(&bam_props, &bam_handle);
  2426. if (rc) {
  2427. dev_err(dd->dev,
  2428. "%s: Failed to register BAM device",
  2429. __func__);
  2430. return rc;
  2431. }
  2432. dd->bam.deregister_required = true;
  2433. }
  2434. dd->bam.handle = bam_handle;
  2435. rc = msm_spi_bam_pipe_init(dd, SPI_BAM_PRODUCER_PIPE);
  2436. if (rc) {
  2437. dev_err(dd->dev,
  2438. "%s: Failed to init producer BAM-pipe",
  2439. __func__);
  2440. goto bam_init_error;
  2441. }
  2442. rc = msm_spi_bam_pipe_init(dd, SPI_BAM_CONSUMER_PIPE);
  2443. if (rc) {
  2444. dev_err(dd->dev,
  2445. "%s: Failed to init consumer BAM-pipe",
  2446. __func__);
  2447. goto bam_init_error;
  2448. }
  2449. return 0;
  2450. bam_init_error:
  2451. msm_spi_bam_teardown(dd);
  2452. return rc;
  2453. }
  2454. static __init int msm_spi_dmov_init(struct msm_spi *dd)
  2455. {
  2456. dmov_box *box;
  2457. u32 cache_line = dma_get_cache_alignment();
  2458. /* Allocate all as one chunk, since all is smaller than page size */
  2459. /* We send NULL device, since it requires coherent_dma_mask id
  2460. device definition, we're okay with using system pool */
  2461. dd->tx_dmov_cmd
  2462. = dma_alloc_coherent(NULL,
  2463. get_chunk_size(dd, dd->input_burst_size,
  2464. dd->output_burst_size),
  2465. &dd->tx_dmov_cmd_dma, GFP_KERNEL);
  2466. if (dd->tx_dmov_cmd == NULL)
  2467. return -ENOMEM;
  2468. /* DMA addresses should be 64 bit aligned aligned */
  2469. dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
  2470. ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
  2471. dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
  2472. sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
  2473. /* Buffers should be aligned to cache line */
  2474. dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
  2475. dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
  2476. sizeof(struct spi_dmov_cmd), cache_line);
  2477. dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding +
  2478. dd->output_burst_size), cache_line);
  2479. dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->output_burst_size,
  2480. cache_line);
  2481. /* Setup DM commands */
  2482. box = &(dd->rx_dmov_cmd->box);
  2483. box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
  2484. box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
  2485. dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
  2486. DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
  2487. offsetof(struct spi_dmov_cmd, cmd_ptr));
  2488. dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
  2489. box = &(dd->tx_dmov_cmd->box);
  2490. box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
  2491. box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
  2492. dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
  2493. DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
  2494. offsetof(struct spi_dmov_cmd, cmd_ptr));
  2495. dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
  2496. dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
  2497. CMD_DST_CRCI(dd->tx_dma_crci);
  2498. dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
  2499. SPI_OUTPUT_FIFO;
  2500. dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
  2501. CMD_SRC_CRCI(dd->rx_dma_crci);
  2502. dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
  2503. SPI_INPUT_FIFO;
  2504. /* Clear remaining activities on channel */
  2505. msm_dmov_flush(dd->tx_dma_chan, 1);
  2506. msm_dmov_flush(dd->rx_dma_chan, 1);
  2507. return 0;
  2508. }
  2509. enum msm_spi_dt_entry_status {
  2510. DT_REQ, /* Required: fail if missing */
  2511. DT_SGST, /* Suggested: warn if missing */
  2512. DT_OPT, /* Optional: don't warn if missing */
  2513. };
  2514. enum msm_spi_dt_entry_type {
  2515. DT_U32,
  2516. DT_GPIO,
  2517. DT_BOOL,
  2518. };
  2519. struct msm_spi_dt_to_pdata_map {
  2520. const char *dt_name;
  2521. void *ptr_data;
  2522. enum msm_spi_dt_entry_status status;
  2523. enum msm_spi_dt_entry_type type;
  2524. int default_val;
  2525. };
  2526. static int __init msm_spi_dt_to_pdata_populate(struct platform_device *pdev,
  2527. struct msm_spi_platform_data *pdata,
  2528. struct msm_spi_dt_to_pdata_map *itr)
  2529. {
  2530. int ret, err = 0;
  2531. struct device_node *node = pdev->dev.of_node;
  2532. for (; itr->dt_name ; ++itr) {
  2533. switch (itr->type) {
  2534. case DT_GPIO:
  2535. ret = of_get_named_gpio(node, itr->dt_name, 0);
  2536. if (ret >= 0) {
  2537. *((int *) itr->ptr_data) = ret;
  2538. ret = 0;
  2539. }
  2540. break;
  2541. case DT_U32:
  2542. ret = of_property_read_u32(node, itr->dt_name,
  2543. (u32 *) itr->ptr_data);
  2544. break;
  2545. case DT_BOOL:
  2546. *((bool *) itr->ptr_data) =
  2547. of_property_read_bool(node, itr->dt_name);
  2548. ret = 0;
  2549. break;
  2550. default:
  2551. dev_err(&pdev->dev, "%d is an unknown DT entry type\n",
  2552. itr->type);
  2553. ret = -EBADE;
  2554. }
  2555. dev_dbg(&pdev->dev, "DT entry ret:%d name:%s val:%d\n",
  2556. ret, itr->dt_name, *((int *)itr->ptr_data));
  2557. if (ret) {
  2558. *((int *)itr->ptr_data) = itr->default_val;
  2559. if (itr->status < DT_OPT) {
  2560. dev_err(&pdev->dev, "Missing '%s' DT entry\n",
  2561. itr->dt_name);
  2562. /* cont on err to dump all missing entries */
  2563. if (itr->status == DT_REQ && !err)
  2564. err = ret;
  2565. }
  2566. }
  2567. }
  2568. return err;
  2569. }
  2570. /**
  2571. * msm_spi_dt_to_pdata: create pdata and read gpio config from device tree
  2572. */
  2573. struct msm_spi_platform_data * __init msm_spi_dt_to_pdata(
  2574. struct platform_device *pdev, struct msm_spi *dd)
  2575. {
  2576. struct msm_spi_platform_data *pdata;
  2577. pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
  2578. if (!pdata) {
  2579. pr_err("Unable to allocate platform data\n");
  2580. return NULL;
  2581. } else {
  2582. struct msm_spi_dt_to_pdata_map map[] = {
  2583. {"spi-max-frequency",
  2584. &pdata->max_clock_speed, DT_SGST, DT_U32, 0},
  2585. {"qcom,infinite-mode",
  2586. &pdata->infinite_mode, DT_OPT, DT_U32, 0},
  2587. {"qcom,active-only",
  2588. &pdata->active_only, DT_OPT, DT_BOOL, 0},
  2589. {"qcom,master-id",
  2590. &pdata->master_id, DT_SGST, DT_U32, 0},
  2591. {"qcom,ver-reg-exists",
  2592. &pdata->ver_reg_exists, DT_OPT, DT_BOOL, 0},
  2593. {"qcom,use-bam",
  2594. &pdata->use_bam, DT_OPT, DT_BOOL, 0},
  2595. {"qcom,bam-consumer-pipe-index",
  2596. &pdata->bam_consumer_pipe_index, DT_OPT, DT_U32, 0},
  2597. {"qcom,bam-producer-pipe-index",
  2598. &pdata->bam_producer_pipe_index, DT_OPT, DT_U32, 0},
  2599. {"qcom,gpio-clk",
  2600. &dd->spi_gpios[0], DT_OPT, DT_GPIO, -1},
  2601. {"qcom,gpio-miso",
  2602. &dd->spi_gpios[1], DT_OPT, DT_GPIO, -1},
  2603. {"qcom,gpio-mosi",
  2604. &dd->spi_gpios[2], DT_OPT, DT_GPIO, -1},
  2605. {"qcom,gpio-cs0",
  2606. &dd->cs_gpios[0].gpio_num, DT_OPT, DT_GPIO, -1},
  2607. {"qcom,gpio-cs1",
  2608. &dd->cs_gpios[1].gpio_num, DT_OPT, DT_GPIO, -1},
  2609. {"qcom,gpio-cs2",
  2610. &dd->cs_gpios[2].gpio_num, DT_OPT, DT_GPIO, -1},
  2611. {"qcom,gpio-cs3",
  2612. &dd->cs_gpios[3].gpio_num, DT_OPT, DT_GPIO, -1},
  2613. {"qcom,rt-priority",
  2614. &pdata->rt_priority, DT_OPT, DT_BOOL, 0},
  2615. {"qcom,shared",
  2616. &pdata->is_shared, DT_OPT, DT_BOOL, 0},
  2617. {NULL, NULL, 0, 0, 0},
  2618. };
  2619. if (msm_spi_dt_to_pdata_populate(pdev, pdata, map)) {
  2620. devm_kfree(&pdev->dev, pdata);
  2621. return NULL;
  2622. }
  2623. }
  2624. #ifdef ENABLE_SENSORS_FPRINT_SECURE
  2625. /* Even if you set the bam setting, */
  2626. /* you can't access bam when you use tzspi */
  2627. if ((dd->cs_gpios[0].gpio_num) == FP_SPI_CS) {
  2628. pdata->use_bam = false;
  2629. pr_info("%s: disable bam for BLSP5 tzspi\n", __func__);
  2630. }
  2631. #endif
  2632. dev_warn(&pdev->dev,
  2633. "%s pdata->use_bam: %d", __func__, pdata->use_bam);
  2634. if (pdata->use_bam) {
  2635. if (!pdata->bam_consumer_pipe_index) {
  2636. dev_warn(&pdev->dev,
  2637. "missing qcom,bam-consumer-pipe-index entry in device-tree\n");
  2638. pdata->use_bam = false;
  2639. }
  2640. if (!pdata->bam_producer_pipe_index) {
  2641. dev_warn(&pdev->dev,
  2642. "missing qcom,bam-producer-pipe-index entry in device-tree\n");
  2643. pdata->use_bam = false;
  2644. }
  2645. }
  2646. return pdata;
  2647. }
  2648. static int __init msm_spi_get_qup_hw_ver(struct device *dev, struct msm_spi *dd)
  2649. {
  2650. u32 data = readl_relaxed(dd->base + QUP_HARDWARE_VER);
  2651. return (data >= QUP_HARDWARE_VER_2_1_1) ? SPI_QUP_VERSION_BFAM
  2652. : SPI_QUP_VERSION_NONE;
  2653. }
  2654. static int __init msm_spi_bam_get_resources(struct msm_spi *dd,
  2655. struct platform_device *pdev, struct spi_master *master)
  2656. {
  2657. struct resource *resource;
  2658. size_t bam_mem_size;
  2659. resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  2660. "spi_bam_physical");
  2661. if (!resource) {
  2662. dev_warn(&pdev->dev,
  2663. "%s: Missing spi_bam_physical entry in DT",
  2664. __func__);
  2665. return -ENXIO;
  2666. }
  2667. dd->bam.phys_addr = resource->start;
  2668. bam_mem_size = resource_size(resource);
  2669. dd->bam.base = devm_ioremap(&pdev->dev, dd->bam.phys_addr,
  2670. bam_mem_size);
  2671. if (!dd->bam.base) {
  2672. dev_warn(&pdev->dev,
  2673. "%s: Failed to ioremap(spi_bam_physical)",
  2674. __func__);
  2675. return -ENXIO;
  2676. }
  2677. dd->bam.irq = platform_get_irq_byname(pdev, "spi_bam_irq");
  2678. if (dd->bam.irq < 0) {
  2679. dev_warn(&pdev->dev, "%s: Missing spi_bam_irq entry in DT",
  2680. __func__);
  2681. return -EINVAL;
  2682. }
  2683. dd->dma_init = msm_spi_bam_init;
  2684. dd->dma_teardown = msm_spi_bam_teardown;
  2685. return 0;
  2686. }
  2687. #ifdef ENABLE_SENSORS_FPRINT_SECURE
  2688. int fp_spi_clock_set_rate(struct spi_device *spidev)
  2689. {
  2690. struct msm_spi *dd;
  2691. if (!spidev) {
  2692. pr_err("%s: spidev pointer is NULL\n", __func__);
  2693. return -EFAULT;
  2694. }
  2695. dd = spi_master_get_devdata(spidev->master);
  2696. if (!dd) {
  2697. pr_err("%s: spi master pointer is NULL\n", __func__);
  2698. return -EFAULT;
  2699. }
  2700. msm_spi_clock_set(dd, spidev->max_speed_hz);
  2701. pr_info("%s sucess\n", __func__);
  2702. return 0;
  2703. }
  2704. EXPORT_SYMBOL_GPL(fp_spi_clock_set_rate);
  2705. int fp_spi_clock_enable(struct spi_device *spidev)
  2706. {
  2707. struct msm_spi *dd;
  2708. int rc;
  2709. if (!spidev) {
  2710. pr_err("%s: spidev pointer is NULL\n", __func__);
  2711. return -EFAULT;
  2712. }
  2713. dd = spi_master_get_devdata(spidev->master);
  2714. if (!dd) {
  2715. pr_err("%s: spi master pointer is NULL\n", __func__);
  2716. return -EFAULT;
  2717. }
  2718. rc = clk_prepare_enable(dd->clk);
  2719. if (rc) {
  2720. pr_err("%s: unable to enable core_clk\n", __func__);
  2721. return rc;
  2722. }
  2723. rc = clk_prepare_enable(dd->pclk);
  2724. if (rc) {
  2725. pr_err("%s: unable to enable iface_clk\n", __func__);
  2726. return rc;
  2727. }
  2728. pr_info("%s sucess\n", __func__);
  2729. return 0;
  2730. }
  2731. EXPORT_SYMBOL_GPL(fp_spi_clock_enable);
  2732. int fp_spi_clock_disable(struct spi_device *spidev)
  2733. {
  2734. struct msm_spi *dd;
  2735. if (!spidev) {
  2736. pr_err("%s: spidev pointer is NULL\n", __func__);
  2737. return -EFAULT;
  2738. }
  2739. dd = spi_master_get_devdata(spidev->master);
  2740. if (!dd) {
  2741. pr_err("%s: spi master pointer is NULL\n", __func__);
  2742. return -EFAULT;
  2743. }
  2744. clk_disable_unprepare(dd->clk);
  2745. clk_disable_unprepare(dd->pclk);
  2746. pr_info("%s sucess\n", __func__);
  2747. return 0;
  2748. }
  2749. EXPORT_SYMBOL_GPL(fp_spi_clock_disable);
  2750. #endif
  2751. static int __init msm_spi_probe(struct platform_device *pdev)
  2752. {
  2753. struct spi_master *master;
  2754. struct msm_spi *dd;
  2755. struct resource *resource;
  2756. int rc = -ENXIO;
  2757. int locked = 0;
  2758. int i = 0;
  2759. int clk_enabled = 0;
  2760. int pclk_enabled = 0;
  2761. struct msm_spi_platform_data *pdata;
  2762. master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
  2763. if (!master) {
  2764. rc = -ENOMEM;
  2765. dev_err(&pdev->dev, "master allocation failed\n");
  2766. goto err_probe_exit;
  2767. }
  2768. master->bus_num = pdev->id;
  2769. master->mode_bits = SPI_SUPPORTED_MODES;
  2770. master->num_chipselect = SPI_NUM_CHIPSELECTS;
  2771. master->setup = msm_spi_setup;
  2772. master->prepare_transfer_hardware = msm_spi_prepare_transfer_hardware;
  2773. master->transfer_one_message = msm_spi_transfer_one_message;
  2774. master->unprepare_transfer_hardware
  2775. = msm_spi_unprepare_transfer_hardware;
  2776. platform_set_drvdata(pdev, master);
  2777. dd = spi_master_get_devdata(master);
  2778. if (pdev->dev.of_node) {
  2779. dd->qup_ver = SPI_QUP_VERSION_BFAM;
  2780. master->dev.of_node = pdev->dev.of_node;
  2781. pdata = msm_spi_dt_to_pdata(pdev, dd);
  2782. if (!pdata) {
  2783. rc = -ENOMEM;
  2784. goto err_probe_exit;
  2785. }
  2786. rc = of_alias_get_id(pdev->dev.of_node, "spi");
  2787. if (rc < 0)
  2788. dev_warn(&pdev->dev,
  2789. "using default bus_num %d\n", pdev->id);
  2790. else
  2791. master->bus_num = pdev->id = rc;
  2792. } else {
  2793. pdata = pdev->dev.platform_data;
  2794. dd->qup_ver = SPI_QUP_VERSION_NONE;
  2795. for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
  2796. resource = platform_get_resource(pdev, IORESOURCE_IO,
  2797. i);
  2798. dd->spi_gpios[i] = resource ? resource->start : -1;
  2799. }
  2800. for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
  2801. resource = platform_get_resource(pdev, IORESOURCE_IO,
  2802. i + ARRAY_SIZE(spi_rsrcs));
  2803. dd->cs_gpios[i].gpio_num = resource ?
  2804. resource->start : -1;
  2805. }
  2806. }
  2807. for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i)
  2808. dd->cs_gpios[i].valid = 0;
  2809. master->rt = pdata->rt_priority;
  2810. dd->pdata = pdata;
  2811. resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2812. if (!resource) {
  2813. rc = -ENXIO;
  2814. goto err_probe_res;
  2815. }
  2816. dd->mem_phys_addr = resource->start;
  2817. dd->mem_size = resource_size(resource);
  2818. if (pdata) {
  2819. if (pdata->dma_config) {
  2820. rc = pdata->dma_config();
  2821. if (rc) {
  2822. dev_warn(&pdev->dev,
  2823. "%s: DM mode not supported\n",
  2824. __func__);
  2825. dd->use_dma = 0;
  2826. goto skip_dma_resources;
  2827. }
  2828. }
  2829. if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
  2830. resource = platform_get_resource(pdev,
  2831. IORESOURCE_DMA, 0);
  2832. if (resource) {
  2833. dd->rx_dma_chan = resource->start;
  2834. dd->tx_dma_chan = resource->end;
  2835. resource = platform_get_resource(pdev,
  2836. IORESOURCE_DMA, 1);
  2837. if (!resource) {
  2838. rc = -ENXIO;
  2839. goto err_probe_res;
  2840. }
  2841. dd->rx_dma_crci = resource->start;
  2842. dd->tx_dma_crci = resource->end;
  2843. dd->use_dma = 1;
  2844. master->dma_alignment =
  2845. dma_get_cache_alignment();
  2846. dd->dma_init = msm_spi_dmov_init ;
  2847. dd->dma_teardown = msm_spi_dmov_teardown;
  2848. }
  2849. } else {
  2850. if (!dd->pdata->use_bam)
  2851. goto skip_dma_resources;
  2852. rc = msm_spi_bam_get_resources(dd, pdev, master);
  2853. if (rc) {
  2854. dev_warn(dd->dev,
  2855. "%s: Faild to get BAM resources",
  2856. __func__);
  2857. goto skip_dma_resources;
  2858. }
  2859. dd->use_dma = 1;
  2860. }
  2861. }
  2862. skip_dma_resources:
  2863. spin_lock_init(&dd->queue_lock);
  2864. mutex_init(&dd->core_lock);
  2865. init_waitqueue_head(&dd->continue_suspend);
  2866. if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
  2867. dd->mem_size, SPI_DRV_NAME)) {
  2868. rc = -ENXIO;
  2869. goto err_probe_reqmem;
  2870. }
  2871. dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
  2872. if (!dd->base) {
  2873. rc = -ENOMEM;
  2874. goto err_probe_reqmem;
  2875. }
  2876. if (pdata && pdata->rsl_id) {
  2877. struct remote_mutex_id rmid;
  2878. rmid.r_spinlock_id = pdata->rsl_id;
  2879. rmid.delay_us = SPI_TRYLOCK_DELAY;
  2880. rc = remote_mutex_init(&dd->r_lock, &rmid);
  2881. if (rc) {
  2882. dev_err(&pdev->dev, "%s: unable to init remote_mutex "
  2883. "(%s), (rc=%d)\n", rmid.r_spinlock_id,
  2884. __func__, rc);
  2885. goto err_probe_rlock_init;
  2886. }
  2887. dd->use_rlock = 1;
  2888. dd->pm_lat = pdata->pm_lat;
  2889. pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
  2890. PM_QOS_DEFAULT_VALUE);
  2891. }
  2892. mutex_lock(&dd->core_lock);
  2893. if (dd->use_rlock)
  2894. remote_mutex_lock(&dd->r_lock);
  2895. locked = 1;
  2896. dd->dev = &pdev->dev;
  2897. dd->clk = clk_get(&pdev->dev, "core_clk");
  2898. if (IS_ERR(dd->clk)) {
  2899. dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
  2900. rc = PTR_ERR(dd->clk);
  2901. goto err_probe_clk_get;
  2902. }
  2903. dd->pclk = clk_get(&pdev->dev, "iface_clk");
  2904. if (IS_ERR(dd->pclk)) {
  2905. dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
  2906. rc = PTR_ERR(dd->pclk);
  2907. goto err_probe_pclk_get;
  2908. }
  2909. if (pdata && pdata->max_clock_speed)
  2910. msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
  2911. rc = clk_prepare_enable(dd->clk);
  2912. if (rc) {
  2913. dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
  2914. __func__);
  2915. goto err_probe_clk_enable;
  2916. }
  2917. clk_enabled = 1;
  2918. rc = clk_prepare_enable(dd->pclk);
  2919. if (rc) {
  2920. dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
  2921. __func__);
  2922. goto err_probe_pclk_enable;
  2923. }
  2924. pclk_enabled = 1;
  2925. if (pdata && pdata->ver_reg_exists) {
  2926. enum msm_spi_qup_version ver =
  2927. msm_spi_get_qup_hw_ver(&pdev->dev, dd);
  2928. if (dd->qup_ver != ver)
  2929. dev_warn(&pdev->dev,
  2930. "%s: HW version different then initially assumed by probe",
  2931. __func__);
  2932. }
  2933. /* GSBI dose not exists on B-family MSM-chips */
  2934. if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
  2935. rc = msm_spi_configure_gsbi(dd, pdev);
  2936. if (rc)
  2937. goto err_probe_gsbi;
  2938. }
  2939. msm_spi_calculate_fifo_size(dd);
  2940. if (dd->use_dma) {
  2941. rc = dd->dma_init(dd);
  2942. if (rc)
  2943. goto err_probe_dma;
  2944. }
  2945. msm_spi_register_init(dd);
  2946. /*
  2947. * The SPI core generates a bogus input overrun error on some targets,
  2948. * when a transition from run to reset state occurs and if the FIFO has
  2949. * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
  2950. * bit.
  2951. */
  2952. msm_spi_enable_error_flags(dd);
  2953. writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
  2954. rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
  2955. if (rc)
  2956. goto err_probe_state;
  2957. clk_disable_unprepare(dd->clk);
  2958. clk_disable_unprepare(dd->pclk);
  2959. clk_enabled = 0;
  2960. pclk_enabled = 0;
  2961. dd->suspended = 1;
  2962. dd->transfer_pending = 0;
  2963. dd->multi_xfr = 0;
  2964. dd->mode = SPI_MODE_NONE;
  2965. rc = msm_spi_request_irq(dd, pdev, master);
  2966. if (rc)
  2967. goto err_probe_irq;
  2968. msm_spi_disable_irqs(dd);
  2969. if (dd->use_rlock)
  2970. remote_mutex_unlock(&dd->r_lock);
  2971. mutex_unlock(&dd->core_lock);
  2972. locked = 0;
  2973. pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC);
  2974. pm_runtime_use_autosuspend(&pdev->dev);
  2975. pm_runtime_enable(&pdev->dev);
  2976. rc = spi_register_master(master);
  2977. if (rc)
  2978. goto err_probe_reg_master;
  2979. rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
  2980. if (rc) {
  2981. dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
  2982. goto err_attrs;
  2983. }
  2984. spi_debugfs_init(dd);
  2985. return 0;
  2986. err_attrs:
  2987. spi_unregister_master(master);
  2988. err_probe_reg_master:
  2989. pm_runtime_disable(&pdev->dev);
  2990. err_probe_irq:
  2991. err_probe_state:
  2992. if (dd->dma_teardown)
  2993. dd->dma_teardown(dd);
  2994. err_probe_dma:
  2995. err_probe_gsbi:
  2996. if (pclk_enabled)
  2997. clk_disable_unprepare(dd->pclk);
  2998. err_probe_pclk_enable:
  2999. if (clk_enabled)
  3000. clk_disable_unprepare(dd->clk);
  3001. err_probe_clk_enable:
  3002. clk_put(dd->pclk);
  3003. err_probe_pclk_get:
  3004. clk_put(dd->clk);
  3005. err_probe_clk_get:
  3006. if (locked) {
  3007. if (dd->use_rlock)
  3008. remote_mutex_unlock(&dd->r_lock);
  3009. mutex_unlock(&dd->core_lock);
  3010. }
  3011. err_probe_rlock_init:
  3012. err_probe_reqmem:
  3013. err_probe_res:
  3014. spi_master_put(master);
  3015. err_probe_exit:
  3016. return rc;
  3017. }
  3018. #ifdef CONFIG_PM
  3019. static int msm_spi_pm_suspend_runtime(struct device *device)
  3020. {
  3021. struct platform_device *pdev = to_platform_device(device);
  3022. struct spi_master *master = platform_get_drvdata(pdev);
  3023. struct msm_spi *dd;
  3024. unsigned long flags;
  3025. dev_dbg(device, "pm_runtime: suspending...\n");
  3026. if (!master)
  3027. goto suspend_exit;
  3028. dd = spi_master_get_devdata(master);
  3029. if (!dd)
  3030. goto suspend_exit;
  3031. if (dd->suspended)
  3032. return 0;
  3033. /*
  3034. * Make sure nothing is added to the queue while we're
  3035. * suspending
  3036. */
  3037. spin_lock_irqsave(&dd->queue_lock, flags);
  3038. dd->suspended = 1;
  3039. spin_unlock_irqrestore(&dd->queue_lock, flags);
  3040. /* Wait for transactions to end, or time out */
  3041. wait_event_interruptible(dd->continue_suspend,
  3042. !dd->transfer_pending);
  3043. if (dd->pdata && !dd->pdata->is_shared && dd->use_dma) {
  3044. msm_spi_bam_pipe_disconnect(dd, &dd->bam.prod);
  3045. msm_spi_bam_pipe_disconnect(dd, &dd->bam.cons);
  3046. }
  3047. if (dd->pdata && !dd->pdata->active_only)
  3048. msm_spi_clk_path_unvote(dd);
  3049. if (dd->pdata && !dd->pdata->is_shared)
  3050. put_local_resources(dd);
  3051. suspend_exit:
  3052. return 0;
  3053. }
  3054. static int msm_spi_pm_resume_runtime(struct device *device)
  3055. {
  3056. struct platform_device *pdev = to_platform_device(device);
  3057. struct spi_master *master = platform_get_drvdata(pdev);
  3058. struct msm_spi *dd;
  3059. dev_dbg(device, "pm_runtime: resuming...\n");
  3060. if (!master)
  3061. goto resume_exit;
  3062. dd = spi_master_get_devdata(master);
  3063. if (!dd)
  3064. goto resume_exit;
  3065. if (!dd->suspended)
  3066. return 0;
  3067. if (!dd->pdata->is_shared)
  3068. get_local_resources(dd);
  3069. msm_spi_clk_path_init(dd);
  3070. if (!dd->pdata->active_only)
  3071. msm_spi_clk_path_vote(dd);
  3072. if (!dd->pdata->is_shared && dd->use_dma) {
  3073. msm_spi_bam_pipe_connect(dd, &dd->bam.prod,
  3074. &dd->bam.prod.config);
  3075. msm_spi_bam_pipe_connect(dd, &dd->bam.cons,
  3076. &dd->bam.cons.config);
  3077. }
  3078. dd->suspended = 0;
  3079. resume_exit:
  3080. return 0;
  3081. }
  3082. static int msm_spi_suspend(struct device *device)
  3083. {
  3084. if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) {
  3085. struct platform_device *pdev = to_platform_device(device);
  3086. struct spi_master *master = platform_get_drvdata(pdev);
  3087. struct msm_spi *dd;
  3088. dev_dbg(device, "system suspend");
  3089. if (!master)
  3090. goto suspend_exit;
  3091. dd = spi_master_get_devdata(master);
  3092. if (!dd)
  3093. goto suspend_exit;
  3094. msm_spi_pm_suspend_runtime(device);
  3095. /*
  3096. * set the device's runtime PM status to 'suspended'
  3097. */
  3098. pm_runtime_disable(device);
  3099. pm_runtime_set_suspended(device);
  3100. pm_runtime_enable(device);
  3101. }
  3102. suspend_exit:
  3103. return 0;
  3104. }
  3105. static int msm_spi_resume(struct device *device)
  3106. {
  3107. /*
  3108. * Rely on runtime-PM to call resume in case it is enabled
  3109. * Even if it's not enabled, rely on 1st client transaction to do
  3110. * clock ON and gpio configuration
  3111. */
  3112. dev_dbg(device, "system resume");
  3113. return 0;
  3114. }
  3115. #else
  3116. #define msm_spi_suspend NULL
  3117. #define msm_spi_resume NULL
  3118. #define msm_spi_pm_suspend_runtime NULL
  3119. #define msm_spi_pm_resume_runtime NULL
  3120. #endif /* CONFIG_PM */
  3121. static int __devexit msm_spi_remove(struct platform_device *pdev)
  3122. {
  3123. struct spi_master *master = platform_get_drvdata(pdev);
  3124. struct msm_spi *dd = spi_master_get_devdata(master);
  3125. pm_qos_remove_request(&qos_req_list);
  3126. spi_debugfs_exit(dd);
  3127. sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
  3128. if (dd->dma_teardown)
  3129. dd->dma_teardown(dd);
  3130. pm_runtime_disable(&pdev->dev);
  3131. pm_runtime_set_suspended(&pdev->dev);
  3132. clk_put(dd->clk);
  3133. clk_put(dd->pclk);
  3134. msm_spi_clk_path_teardown(dd);
  3135. platform_set_drvdata(pdev, 0);
  3136. spi_unregister_master(master);
  3137. spi_master_put(master);
  3138. return 0;
  3139. }
  3140. static struct of_device_id msm_spi_dt_match[] = {
  3141. {
  3142. .compatible = "qcom,spi-qup-v2",
  3143. },
  3144. {}
  3145. };
  3146. static const struct dev_pm_ops msm_spi_dev_pm_ops = {
  3147. SET_SYSTEM_SLEEP_PM_OPS(msm_spi_suspend, msm_spi_resume)
  3148. SET_RUNTIME_PM_OPS(msm_spi_pm_suspend_runtime,
  3149. msm_spi_pm_resume_runtime, NULL)
  3150. };
  3151. static struct platform_driver msm_spi_driver = {
  3152. .driver = {
  3153. .name = SPI_DRV_NAME,
  3154. .owner = THIS_MODULE,
  3155. .pm = &msm_spi_dev_pm_ops,
  3156. .of_match_table = msm_spi_dt_match,
  3157. },
  3158. .remove = __exit_p(msm_spi_remove),
  3159. };
  3160. static int __init msm_spi_init(void)
  3161. {
  3162. return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
  3163. }
  3164. module_init(msm_spi_init);
  3165. static void __exit msm_spi_exit(void)
  3166. {
  3167. platform_driver_unregister(&msm_spi_driver);
  3168. }
  3169. module_exit(msm_spi_exit);
  3170. MODULE_LICENSE("GPL v2");
  3171. MODULE_VERSION("0.4");
  3172. MODULE_ALIAS("platform:"SPI_DRV_NAME);